f594a006d04c60832db7ba25a52826db7334dbdf
[platform/kernel/linux-rpi.git] / drivers / scsi / lpfc / lpfc_sli.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  *******************************************************************/
23
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/interrupt.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/lockdep.h>
30
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36 #include <scsi/fc/fc_fs.h>
37 #include <linux/aer.h>
38 #include <linux/crash_dump.h>
39 #ifdef CONFIG_X86
40 #include <asm/set_memory.h>
41 #endif
42
43 #include "lpfc_hw4.h"
44 #include "lpfc_hw.h"
45 #include "lpfc_sli.h"
46 #include "lpfc_sli4.h"
47 #include "lpfc_nl.h"
48 #include "lpfc_disc.h"
49 #include "lpfc.h"
50 #include "lpfc_scsi.h"
51 #include "lpfc_nvme.h"
52 #include "lpfc_crtn.h"
53 #include "lpfc_logmsg.h"
54 #include "lpfc_compat.h"
55 #include "lpfc_debugfs.h"
56 #include "lpfc_vport.h"
57 #include "lpfc_version.h"
58
59 /* There are only four IOCB completion types. */
60 typedef enum _lpfc_iocb_type {
61         LPFC_UNKNOWN_IOCB,
62         LPFC_UNSOL_IOCB,
63         LPFC_SOL_IOCB,
64         LPFC_ABORT_IOCB
65 } lpfc_iocb_type;
66
67
68 /* Provide function prototypes local to this module. */
69 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
70                                   uint32_t);
71 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
72                               uint8_t *, uint32_t *);
73 static struct lpfc_iocbq *
74 lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
75                                   struct lpfc_iocbq *rspiocbq);
76 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
77                                       struct hbq_dmabuf *);
78 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
79                                           struct hbq_dmabuf *dmabuf);
80 static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
81                                    struct lpfc_queue *cq, struct lpfc_cqe *cqe);
82 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
83                                        int);
84 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
85                                      struct lpfc_queue *eq,
86                                      struct lpfc_eqe *eqe);
87 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
88 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
89 static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
90 static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
91                                     struct lpfc_queue *cq,
92                                     struct lpfc_cqe *cqe);
93 static uint16_t lpfc_wqe_bpl2sgl(struct lpfc_hba *phba,
94                                  struct lpfc_iocbq *pwqeq,
95                                  struct lpfc_sglq *sglq);
96
97 union lpfc_wqe128 lpfc_iread_cmd_template;
98 union lpfc_wqe128 lpfc_iwrite_cmd_template;
99 union lpfc_wqe128 lpfc_icmnd_cmd_template;
100
101 static IOCB_t *
102 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
103 {
104         return &iocbq->iocb;
105 }
106
107 /* Setup WQE templates for IOs */
108 void lpfc_wqe_cmd_template(void)
109 {
110         union lpfc_wqe128 *wqe;
111
112         /* IREAD template */
113         wqe = &lpfc_iread_cmd_template;
114         memset(wqe, 0, sizeof(union lpfc_wqe128));
115
116         /* Word 0, 1, 2 - BDE is variable */
117
118         /* Word 3 - cmd_buff_len, payload_offset_len is zero */
119
120         /* Word 4 - total_xfer_len is variable */
121
122         /* Word 5 - is zero */
123
124         /* Word 6 - ctxt_tag, xri_tag is variable */
125
126         /* Word 7 */
127         bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
128         bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
129         bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
130         bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
131
132         /* Word 8 - abort_tag is variable */
133
134         /* Word 9  - reqtag is variable */
135
136         /* Word 10 - dbde, wqes is variable */
137         bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
138         bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
139         bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
140         bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
141         bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
142
143         /* Word 11 - pbde is variable */
144         bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, COMMAND_DATA_IN);
145         bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
146         bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
147
148         /* Word 12 - is zero */
149
150         /* Word 13, 14, 15 - PBDE is variable */
151
152         /* IWRITE template */
153         wqe = &lpfc_iwrite_cmd_template;
154         memset(wqe, 0, sizeof(union lpfc_wqe128));
155
156         /* Word 0, 1, 2 - BDE is variable */
157
158         /* Word 3 - cmd_buff_len, payload_offset_len is zero */
159
160         /* Word 4 - total_xfer_len is variable */
161
162         /* Word 5 - initial_xfer_len is variable */
163
164         /* Word 6 - ctxt_tag, xri_tag is variable */
165
166         /* Word 7 */
167         bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
168         bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
169         bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
170         bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
171
172         /* Word 8 - abort_tag is variable */
173
174         /* Word 9  - reqtag is variable */
175
176         /* Word 10 - dbde, wqes is variable */
177         bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
178         bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
179         bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
180         bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
181         bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
182
183         /* Word 11 - pbde is variable */
184         bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, COMMAND_DATA_OUT);
185         bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
186         bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
187
188         /* Word 12 - is zero */
189
190         /* Word 13, 14, 15 - PBDE is variable */
191
192         /* ICMND template */
193         wqe = &lpfc_icmnd_cmd_template;
194         memset(wqe, 0, sizeof(union lpfc_wqe128));
195
196         /* Word 0, 1, 2 - BDE is variable */
197
198         /* Word 3 - payload_offset_len is variable */
199
200         /* Word 4, 5 - is zero */
201
202         /* Word 6 - ctxt_tag, xri_tag is variable */
203
204         /* Word 7 */
205         bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
206         bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
207         bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
208         bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
209
210         /* Word 8 - abort_tag is variable */
211
212         /* Word 9  - reqtag is variable */
213
214         /* Word 10 - dbde, wqes is variable */
215         bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
216         bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
217         bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
218         bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
219         bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
220
221         /* Word 11 */
222         bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, COMMAND_DATA_IN);
223         bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
224         bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
225
226         /* Word 12, 13, 14, 15 - is zero */
227 }
228
229 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
230 /**
231  * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
232  * @srcp: Source memory pointer.
233  * @destp: Destination memory pointer.
234  * @cnt: Number of words required to be copied.
235  *       Must be a multiple of sizeof(uint64_t)
236  *
237  * This function is used for copying data between driver memory
238  * and the SLI WQ. This function also changes the endianness
239  * of each word if native endianness is different from SLI
240  * endianness. This function can be called with or without
241  * lock.
242  **/
243 static void
244 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
245 {
246         uint64_t *src = srcp;
247         uint64_t *dest = destp;
248         int i;
249
250         for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
251                 *dest++ = *src++;
252 }
253 #else
254 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
255 #endif
256
257 /**
258  * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
259  * @q: The Work Queue to operate on.
260  * @wqe: The work Queue Entry to put on the Work queue.
261  *
262  * This routine will copy the contents of @wqe to the next available entry on
263  * the @q. This function will then ring the Work Queue Doorbell to signal the
264  * HBA to start processing the Work Queue Entry. This function returns 0 if
265  * successful. If no entries are available on @q then this function will return
266  * -ENOMEM.
267  * The caller is expected to hold the hbalock when calling this routine.
268  **/
269 static int
270 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
271 {
272         union lpfc_wqe *temp_wqe;
273         struct lpfc_register doorbell;
274         uint32_t host_index;
275         uint32_t idx;
276         uint32_t i = 0;
277         uint8_t *tmp;
278         u32 if_type;
279
280         /* sanity check on queue memory */
281         if (unlikely(!q))
282                 return -ENOMEM;
283
284         temp_wqe = lpfc_sli4_qe(q, q->host_index);
285
286         /* If the host has not yet processed the next entry then we are done */
287         idx = ((q->host_index + 1) % q->entry_count);
288         if (idx == q->hba_index) {
289                 q->WQ_overflow++;
290                 return -EBUSY;
291         }
292         q->WQ_posted++;
293         /* set consumption flag every once in a while */
294         if (!((q->host_index + 1) % q->notify_interval))
295                 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
296         else
297                 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
298         if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
299                 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
300         lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
301         if (q->dpp_enable && q->phba->cfg_enable_dpp) {
302                 /* write to DPP aperture taking advatage of Combined Writes */
303                 tmp = (uint8_t *)temp_wqe;
304 #ifdef __raw_writeq
305                 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
306                         __raw_writeq(*((uint64_t *)(tmp + i)),
307                                         q->dpp_regaddr + i);
308 #else
309                 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
310                         __raw_writel(*((uint32_t *)(tmp + i)),
311                                         q->dpp_regaddr + i);
312 #endif
313         }
314         /* ensure WQE bcopy and DPP flushed before doorbell write */
315         wmb();
316
317         /* Update the host index before invoking device */
318         host_index = q->host_index;
319
320         q->host_index = idx;
321
322         /* Ring Doorbell */
323         doorbell.word0 = 0;
324         if (q->db_format == LPFC_DB_LIST_FORMAT) {
325                 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
326                         bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
327                         bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
328                         bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
329                             q->dpp_id);
330                         bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
331                             q->queue_id);
332                 } else {
333                         bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
334                         bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
335
336                         /* Leave bits <23:16> clear for if_type 6 dpp */
337                         if_type = bf_get(lpfc_sli_intf_if_type,
338                                          &q->phba->sli4_hba.sli_intf);
339                         if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
340                                 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
341                                        host_index);
342                 }
343         } else if (q->db_format == LPFC_DB_RING_FORMAT) {
344                 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
345                 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
346         } else {
347                 return -EINVAL;
348         }
349         writel(doorbell.word0, q->db_regaddr);
350
351         return 0;
352 }
353
354 /**
355  * lpfc_sli4_wq_release - Updates internal hba index for WQ
356  * @q: The Work Queue to operate on.
357  * @index: The index to advance the hba index to.
358  *
359  * This routine will update the HBA index of a queue to reflect consumption of
360  * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
361  * an entry the host calls this function to update the queue's internal
362  * pointers.
363  **/
364 static void
365 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
366 {
367         /* sanity check on queue memory */
368         if (unlikely(!q))
369                 return;
370
371         q->hba_index = index;
372 }
373
374 /**
375  * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
376  * @q: The Mailbox Queue to operate on.
377  * @mqe: The Mailbox Queue Entry to put on the Work queue.
378  *
379  * This routine will copy the contents of @mqe to the next available entry on
380  * the @q. This function will then ring the Work Queue Doorbell to signal the
381  * HBA to start processing the Work Queue Entry. This function returns 0 if
382  * successful. If no entries are available on @q then this function will return
383  * -ENOMEM.
384  * The caller is expected to hold the hbalock when calling this routine.
385  **/
386 static uint32_t
387 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
388 {
389         struct lpfc_mqe *temp_mqe;
390         struct lpfc_register doorbell;
391
392         /* sanity check on queue memory */
393         if (unlikely(!q))
394                 return -ENOMEM;
395         temp_mqe = lpfc_sli4_qe(q, q->host_index);
396
397         /* If the host has not yet processed the next entry then we are done */
398         if (((q->host_index + 1) % q->entry_count) == q->hba_index)
399                 return -ENOMEM;
400         lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
401         /* Save off the mailbox pointer for completion */
402         q->phba->mbox = (MAILBOX_t *)temp_mqe;
403
404         /* Update the host index before invoking device */
405         q->host_index = ((q->host_index + 1) % q->entry_count);
406
407         /* Ring Doorbell */
408         doorbell.word0 = 0;
409         bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
410         bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
411         writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
412         return 0;
413 }
414
415 /**
416  * lpfc_sli4_mq_release - Updates internal hba index for MQ
417  * @q: The Mailbox Queue to operate on.
418  *
419  * This routine will update the HBA index of a queue to reflect consumption of
420  * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
421  * an entry the host calls this function to update the queue's internal
422  * pointers. This routine returns the number of entries that were consumed by
423  * the HBA.
424  **/
425 static uint32_t
426 lpfc_sli4_mq_release(struct lpfc_queue *q)
427 {
428         /* sanity check on queue memory */
429         if (unlikely(!q))
430                 return 0;
431
432         /* Clear the mailbox pointer for completion */
433         q->phba->mbox = NULL;
434         q->hba_index = ((q->hba_index + 1) % q->entry_count);
435         return 1;
436 }
437
438 /**
439  * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
440  * @q: The Event Queue to get the first valid EQE from
441  *
442  * This routine will get the first valid Event Queue Entry from @q, update
443  * the queue's internal hba index, and return the EQE. If no valid EQEs are in
444  * the Queue (no more work to do), or the Queue is full of EQEs that have been
445  * processed, but not popped back to the HBA then this routine will return NULL.
446  **/
447 static struct lpfc_eqe *
448 lpfc_sli4_eq_get(struct lpfc_queue *q)
449 {
450         struct lpfc_eqe *eqe;
451
452         /* sanity check on queue memory */
453         if (unlikely(!q))
454                 return NULL;
455         eqe = lpfc_sli4_qe(q, q->host_index);
456
457         /* If the next EQE is not valid then we are done */
458         if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
459                 return NULL;
460
461         /*
462          * insert barrier for instruction interlock : data from the hardware
463          * must have the valid bit checked before it can be copied and acted
464          * upon. Speculative instructions were allowing a bcopy at the start
465          * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
466          * after our return, to copy data before the valid bit check above
467          * was done. As such, some of the copied data was stale. The barrier
468          * ensures the check is before any data is copied.
469          */
470         mb();
471         return eqe;
472 }
473
474 /**
475  * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
476  * @q: The Event Queue to disable interrupts
477  *
478  **/
479 void
480 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
481 {
482         struct lpfc_register doorbell;
483
484         doorbell.word0 = 0;
485         bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
486         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
487         bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
488                 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
489         bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
490         writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
491 }
492
493 /**
494  * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
495  * @q: The Event Queue to disable interrupts
496  *
497  **/
498 void
499 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
500 {
501         struct lpfc_register doorbell;
502
503         doorbell.word0 = 0;
504         bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
505         writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
506 }
507
508 /**
509  * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
510  * @phba: adapter with EQ
511  * @q: The Event Queue that the host has completed processing for.
512  * @count: Number of elements that have been consumed
513  * @arm: Indicates whether the host wants to arms this CQ.
514  *
515  * This routine will notify the HBA, by ringing the doorbell, that count
516  * number of EQEs have been processed. The @arm parameter indicates whether
517  * the queue should be rearmed when ringing the doorbell.
518  **/
519 void
520 lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
521                      uint32_t count, bool arm)
522 {
523         struct lpfc_register doorbell;
524
525         /* sanity check on queue memory */
526         if (unlikely(!q || (count == 0 && !arm)))
527                 return;
528
529         /* ring doorbell for number popped */
530         doorbell.word0 = 0;
531         if (arm) {
532                 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
533                 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
534         }
535         bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
536         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
537         bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
538                         (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
539         bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
540         writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
541         /* PCI read to flush PCI pipeline on re-arming for INTx mode */
542         if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
543                 readl(q->phba->sli4_hba.EQDBregaddr);
544 }
545
546 /**
547  * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
548  * @phba: adapter with EQ
549  * @q: The Event Queue that the host has completed processing for.
550  * @count: Number of elements that have been consumed
551  * @arm: Indicates whether the host wants to arms this CQ.
552  *
553  * This routine will notify the HBA, by ringing the doorbell, that count
554  * number of EQEs have been processed. The @arm parameter indicates whether
555  * the queue should be rearmed when ringing the doorbell.
556  **/
557 void
558 lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
559                           uint32_t count, bool arm)
560 {
561         struct lpfc_register doorbell;
562
563         /* sanity check on queue memory */
564         if (unlikely(!q || (count == 0 && !arm)))
565                 return;
566
567         /* ring doorbell for number popped */
568         doorbell.word0 = 0;
569         if (arm)
570                 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
571         bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
572         bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
573         writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
574         /* PCI read to flush PCI pipeline on re-arming for INTx mode */
575         if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
576                 readl(q->phba->sli4_hba.EQDBregaddr);
577 }
578
579 static void
580 __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
581                         struct lpfc_eqe *eqe)
582 {
583         if (!phba->sli4_hba.pc_sli4_params.eqav)
584                 bf_set_le32(lpfc_eqe_valid, eqe, 0);
585
586         eq->host_index = ((eq->host_index + 1) % eq->entry_count);
587
588         /* if the index wrapped around, toggle the valid bit */
589         if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
590                 eq->qe_valid = (eq->qe_valid) ? 0 : 1;
591 }
592
593 static void
594 lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
595 {
596         struct lpfc_eqe *eqe = NULL;
597         u32 eq_count = 0, cq_count = 0;
598         struct lpfc_cqe *cqe = NULL;
599         struct lpfc_queue *cq = NULL, *childq = NULL;
600         int cqid = 0;
601
602         /* walk all the EQ entries and drop on the floor */
603         eqe = lpfc_sli4_eq_get(eq);
604         while (eqe) {
605                 /* Get the reference to the corresponding CQ */
606                 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
607                 cq = NULL;
608
609                 list_for_each_entry(childq, &eq->child_list, list) {
610                         if (childq->queue_id == cqid) {
611                                 cq = childq;
612                                 break;
613                         }
614                 }
615                 /* If CQ is valid, iterate through it and drop all the CQEs */
616                 if (cq) {
617                         cqe = lpfc_sli4_cq_get(cq);
618                         while (cqe) {
619                                 __lpfc_sli4_consume_cqe(phba, cq, cqe);
620                                 cq_count++;
621                                 cqe = lpfc_sli4_cq_get(cq);
622                         }
623                         /* Clear and re-arm the CQ */
624                         phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
625                             LPFC_QUEUE_REARM);
626                         cq_count = 0;
627                 }
628                 __lpfc_sli4_consume_eqe(phba, eq, eqe);
629                 eq_count++;
630                 eqe = lpfc_sli4_eq_get(eq);
631         }
632
633         /* Clear and re-arm the EQ */
634         phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
635 }
636
637 static int
638 lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
639                      uint8_t rearm)
640 {
641         struct lpfc_eqe *eqe;
642         int count = 0, consumed = 0;
643
644         if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
645                 goto rearm_and_exit;
646
647         eqe = lpfc_sli4_eq_get(eq);
648         while (eqe) {
649                 lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
650                 __lpfc_sli4_consume_eqe(phba, eq, eqe);
651
652                 consumed++;
653                 if (!(++count % eq->max_proc_limit))
654                         break;
655
656                 if (!(count % eq->notify_interval)) {
657                         phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
658                                                         LPFC_QUEUE_NOARM);
659                         consumed = 0;
660                 }
661
662                 eqe = lpfc_sli4_eq_get(eq);
663         }
664         eq->EQ_processed += count;
665
666         /* Track the max number of EQEs processed in 1 intr */
667         if (count > eq->EQ_max_eqe)
668                 eq->EQ_max_eqe = count;
669
670         xchg(&eq->queue_claimed, 0);
671
672 rearm_and_exit:
673         /* Always clear the EQ. */
674         phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
675
676         return count;
677 }
678
679 /**
680  * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
681  * @q: The Completion Queue to get the first valid CQE from
682  *
683  * This routine will get the first valid Completion Queue Entry from @q, update
684  * the queue's internal hba index, and return the CQE. If no valid CQEs are in
685  * the Queue (no more work to do), or the Queue is full of CQEs that have been
686  * processed, but not popped back to the HBA then this routine will return NULL.
687  **/
688 static struct lpfc_cqe *
689 lpfc_sli4_cq_get(struct lpfc_queue *q)
690 {
691         struct lpfc_cqe *cqe;
692
693         /* sanity check on queue memory */
694         if (unlikely(!q))
695                 return NULL;
696         cqe = lpfc_sli4_qe(q, q->host_index);
697
698         /* If the next CQE is not valid then we are done */
699         if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
700                 return NULL;
701
702         /*
703          * insert barrier for instruction interlock : data from the hardware
704          * must have the valid bit checked before it can be copied and acted
705          * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
706          * instructions allowing action on content before valid bit checked,
707          * add barrier here as well. May not be needed as "content" is a
708          * single 32-bit entity here (vs multi word structure for cq's).
709          */
710         mb();
711         return cqe;
712 }
713
714 static void
715 __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
716                         struct lpfc_cqe *cqe)
717 {
718         if (!phba->sli4_hba.pc_sli4_params.cqav)
719                 bf_set_le32(lpfc_cqe_valid, cqe, 0);
720
721         cq->host_index = ((cq->host_index + 1) % cq->entry_count);
722
723         /* if the index wrapped around, toggle the valid bit */
724         if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
725                 cq->qe_valid = (cq->qe_valid) ? 0 : 1;
726 }
727
728 /**
729  * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
730  * @phba: the adapter with the CQ
731  * @q: The Completion Queue that the host has completed processing for.
732  * @count: the number of elements that were consumed
733  * @arm: Indicates whether the host wants to arms this CQ.
734  *
735  * This routine will notify the HBA, by ringing the doorbell, that the
736  * CQEs have been processed. The @arm parameter specifies whether the
737  * queue should be rearmed when ringing the doorbell.
738  **/
739 void
740 lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
741                      uint32_t count, bool arm)
742 {
743         struct lpfc_register doorbell;
744
745         /* sanity check on queue memory */
746         if (unlikely(!q || (count == 0 && !arm)))
747                 return;
748
749         /* ring doorbell for number popped */
750         doorbell.word0 = 0;
751         if (arm)
752                 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
753         bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
754         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
755         bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
756                         (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
757         bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
758         writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
759 }
760
761 /**
762  * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
763  * @phba: the adapter with the CQ
764  * @q: The Completion Queue that the host has completed processing for.
765  * @count: the number of elements that were consumed
766  * @arm: Indicates whether the host wants to arms this CQ.
767  *
768  * This routine will notify the HBA, by ringing the doorbell, that the
769  * CQEs have been processed. The @arm parameter specifies whether the
770  * queue should be rearmed when ringing the doorbell.
771  **/
772 void
773 lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
774                          uint32_t count, bool arm)
775 {
776         struct lpfc_register doorbell;
777
778         /* sanity check on queue memory */
779         if (unlikely(!q || (count == 0 && !arm)))
780                 return;
781
782         /* ring doorbell for number popped */
783         doorbell.word0 = 0;
784         if (arm)
785                 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
786         bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
787         bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
788         writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
789 }
790
791 /*
792  * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
793  *
794  * This routine will copy the contents of @wqe to the next available entry on
795  * the @q. This function will then ring the Receive Queue Doorbell to signal the
796  * HBA to start processing the Receive Queue Entry. This function returns the
797  * index that the rqe was copied to if successful. If no entries are available
798  * on @q then this function will return -ENOMEM.
799  * The caller is expected to hold the hbalock when calling this routine.
800  **/
801 int
802 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
803                  struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
804 {
805         struct lpfc_rqe *temp_hrqe;
806         struct lpfc_rqe *temp_drqe;
807         struct lpfc_register doorbell;
808         int hq_put_index;
809         int dq_put_index;
810
811         /* sanity check on queue memory */
812         if (unlikely(!hq) || unlikely(!dq))
813                 return -ENOMEM;
814         hq_put_index = hq->host_index;
815         dq_put_index = dq->host_index;
816         temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
817         temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
818
819         if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
820                 return -EINVAL;
821         if (hq_put_index != dq_put_index)
822                 return -EINVAL;
823         /* If the host has not yet processed the next entry then we are done */
824         if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
825                 return -EBUSY;
826         lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
827         lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
828
829         /* Update the host index to point to the next slot */
830         hq->host_index = ((hq_put_index + 1) % hq->entry_count);
831         dq->host_index = ((dq_put_index + 1) % dq->entry_count);
832         hq->RQ_buf_posted++;
833
834         /* Ring The Header Receive Queue Doorbell */
835         if (!(hq->host_index % hq->notify_interval)) {
836                 doorbell.word0 = 0;
837                 if (hq->db_format == LPFC_DB_RING_FORMAT) {
838                         bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
839                                hq->notify_interval);
840                         bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
841                 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
842                         bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
843                                hq->notify_interval);
844                         bf_set(lpfc_rq_db_list_fm_index, &doorbell,
845                                hq->host_index);
846                         bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
847                 } else {
848                         return -EINVAL;
849                 }
850                 writel(doorbell.word0, hq->db_regaddr);
851         }
852         return hq_put_index;
853 }
854
855 /*
856  * lpfc_sli4_rq_release - Updates internal hba index for RQ
857  *
858  * This routine will update the HBA index of a queue to reflect consumption of
859  * one Receive Queue Entry by the HBA. When the HBA indicates that it has
860  * consumed an entry the host calls this function to update the queue's
861  * internal pointers. This routine returns the number of entries that were
862  * consumed by the HBA.
863  **/
864 static uint32_t
865 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
866 {
867         /* sanity check on queue memory */
868         if (unlikely(!hq) || unlikely(!dq))
869                 return 0;
870
871         if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
872                 return 0;
873         hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
874         dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
875         return 1;
876 }
877
878 /**
879  * lpfc_cmd_iocb - Get next command iocb entry in the ring
880  * @phba: Pointer to HBA context object.
881  * @pring: Pointer to driver SLI ring object.
882  *
883  * This function returns pointer to next command iocb entry
884  * in the command ring. The caller must hold hbalock to prevent
885  * other threads consume the next command iocb.
886  * SLI-2/SLI-3 provide different sized iocbs.
887  **/
888 static inline IOCB_t *
889 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
890 {
891         return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
892                            pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
893 }
894
895 /**
896  * lpfc_resp_iocb - Get next response iocb entry in the ring
897  * @phba: Pointer to HBA context object.
898  * @pring: Pointer to driver SLI ring object.
899  *
900  * This function returns pointer to next response iocb entry
901  * in the response ring. The caller must hold hbalock to make sure
902  * that no other thread consume the next response iocb.
903  * SLI-2/SLI-3 provide different sized iocbs.
904  **/
905 static inline IOCB_t *
906 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
907 {
908         return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
909                            pring->sli.sli3.rspidx * phba->iocb_rsp_size);
910 }
911
912 /**
913  * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
914  * @phba: Pointer to HBA context object.
915  *
916  * This function is called with hbalock held. This function
917  * allocates a new driver iocb object from the iocb pool. If the
918  * allocation is successful, it returns pointer to the newly
919  * allocated iocb object else it returns NULL.
920  **/
921 struct lpfc_iocbq *
922 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
923 {
924         struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
925         struct lpfc_iocbq * iocbq = NULL;
926
927         lockdep_assert_held(&phba->hbalock);
928
929         list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
930         if (iocbq)
931                 phba->iocb_cnt++;
932         if (phba->iocb_cnt > phba->iocb_max)
933                 phba->iocb_max = phba->iocb_cnt;
934         return iocbq;
935 }
936
937 /**
938  * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
939  * @phba: Pointer to HBA context object.
940  * @xritag: XRI value.
941  *
942  * This function clears the sglq pointer from the array of active
943  * sglq's. The xritag that is passed in is used to index into the
944  * array. Before the xritag can be used it needs to be adjusted
945  * by subtracting the xribase.
946  *
947  * Returns sglq ponter = success, NULL = Failure.
948  **/
949 struct lpfc_sglq *
950 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
951 {
952         struct lpfc_sglq *sglq;
953
954         sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
955         phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
956         return sglq;
957 }
958
959 /**
960  * __lpfc_get_active_sglq - Get the active sglq for this XRI.
961  * @phba: Pointer to HBA context object.
962  * @xritag: XRI value.
963  *
964  * This function returns the sglq pointer from the array of active
965  * sglq's. The xritag that is passed in is used to index into the
966  * array. Before the xritag can be used it needs to be adjusted
967  * by subtracting the xribase.
968  *
969  * Returns sglq ponter = success, NULL = Failure.
970  **/
971 struct lpfc_sglq *
972 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
973 {
974         struct lpfc_sglq *sglq;
975
976         sglq =  phba->sli4_hba.lpfc_sglq_active_list[xritag];
977         return sglq;
978 }
979
980 /**
981  * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
982  * @phba: Pointer to HBA context object.
983  * @xritag: xri used in this exchange.
984  * @rrq: The RRQ to be cleared.
985  *
986  **/
987 void
988 lpfc_clr_rrq_active(struct lpfc_hba *phba,
989                     uint16_t xritag,
990                     struct lpfc_node_rrq *rrq)
991 {
992         struct lpfc_nodelist *ndlp = NULL;
993
994         /* Lookup did to verify if did is still active on this vport */
995         if (rrq->vport)
996                 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
997
998         if (!ndlp)
999                 goto out;
1000
1001         if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
1002                 rrq->send_rrq = 0;
1003                 rrq->xritag = 0;
1004                 rrq->rrq_stop_time = 0;
1005         }
1006 out:
1007         mempool_free(rrq, phba->rrq_pool);
1008 }
1009
1010 /**
1011  * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
1012  * @phba: Pointer to HBA context object.
1013  *
1014  * This function is called with hbalock held. This function
1015  * Checks if stop_time (ratov from setting rrq active) has
1016  * been reached, if it has and the send_rrq flag is set then
1017  * it will call lpfc_send_rrq. If the send_rrq flag is not set
1018  * then it will just call the routine to clear the rrq and
1019  * free the rrq resource.
1020  * The timer is set to the next rrq that is going to expire before
1021  * leaving the routine.
1022  *
1023  **/
1024 void
1025 lpfc_handle_rrq_active(struct lpfc_hba *phba)
1026 {
1027         struct lpfc_node_rrq *rrq;
1028         struct lpfc_node_rrq *nextrrq;
1029         unsigned long next_time;
1030         unsigned long iflags;
1031         LIST_HEAD(send_rrq);
1032
1033         spin_lock_irqsave(&phba->hbalock, iflags);
1034         phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1035         next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1036         list_for_each_entry_safe(rrq, nextrrq,
1037                                  &phba->active_rrq_list, list) {
1038                 if (time_after(jiffies, rrq->rrq_stop_time))
1039                         list_move(&rrq->list, &send_rrq);
1040                 else if (time_before(rrq->rrq_stop_time, next_time))
1041                         next_time = rrq->rrq_stop_time;
1042         }
1043         spin_unlock_irqrestore(&phba->hbalock, iflags);
1044         if ((!list_empty(&phba->active_rrq_list)) &&
1045             (!(phba->pport->load_flag & FC_UNLOADING)))
1046                 mod_timer(&phba->rrq_tmr, next_time);
1047         list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
1048                 list_del(&rrq->list);
1049                 if (!rrq->send_rrq) {
1050                         /* this call will free the rrq */
1051                         lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1052                 } else if (lpfc_send_rrq(phba, rrq)) {
1053                         /* if we send the rrq then the completion handler
1054                         *  will clear the bit in the xribitmap.
1055                         */
1056                         lpfc_clr_rrq_active(phba, rrq->xritag,
1057                                             rrq);
1058                 }
1059         }
1060 }
1061
1062 /**
1063  * lpfc_get_active_rrq - Get the active RRQ for this exchange.
1064  * @vport: Pointer to vport context object.
1065  * @xri: The xri used in the exchange.
1066  * @did: The targets DID for this exchange.
1067  *
1068  * returns NULL = rrq not found in the phba->active_rrq_list.
1069  *         rrq = rrq for this xri and target.
1070  **/
1071 struct lpfc_node_rrq *
1072 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
1073 {
1074         struct lpfc_hba *phba = vport->phba;
1075         struct lpfc_node_rrq *rrq;
1076         struct lpfc_node_rrq *nextrrq;
1077         unsigned long iflags;
1078
1079         if (phba->sli_rev != LPFC_SLI_REV4)
1080                 return NULL;
1081         spin_lock_irqsave(&phba->hbalock, iflags);
1082         list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1083                 if (rrq->vport == vport && rrq->xritag == xri &&
1084                                 rrq->nlp_DID == did){
1085                         list_del(&rrq->list);
1086                         spin_unlock_irqrestore(&phba->hbalock, iflags);
1087                         return rrq;
1088                 }
1089         }
1090         spin_unlock_irqrestore(&phba->hbalock, iflags);
1091         return NULL;
1092 }
1093
1094 /**
1095  * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
1096  * @vport: Pointer to vport context object.
1097  * @ndlp: Pointer to the lpfc_node_list structure.
1098  * If ndlp is NULL Remove all active RRQs for this vport from the
1099  * phba->active_rrq_list and clear the rrq.
1100  * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
1101  **/
1102 void
1103 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1104
1105 {
1106         struct lpfc_hba *phba = vport->phba;
1107         struct lpfc_node_rrq *rrq;
1108         struct lpfc_node_rrq *nextrrq;
1109         unsigned long iflags;
1110         LIST_HEAD(rrq_list);
1111
1112         if (phba->sli_rev != LPFC_SLI_REV4)
1113                 return;
1114         if (!ndlp) {
1115                 lpfc_sli4_vport_delete_els_xri_aborted(vport);
1116                 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
1117         }
1118         spin_lock_irqsave(&phba->hbalock, iflags);
1119         list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1120                 if (rrq->vport != vport)
1121                         continue;
1122
1123                 if (!ndlp || ndlp == lpfc_findnode_did(vport, rrq->nlp_DID))
1124                         list_move(&rrq->list, &rrq_list);
1125
1126         }
1127         spin_unlock_irqrestore(&phba->hbalock, iflags);
1128
1129         list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
1130                 list_del(&rrq->list);
1131                 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1132         }
1133 }
1134
1135 /**
1136  * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
1137  * @phba: Pointer to HBA context object.
1138  * @ndlp: Targets nodelist pointer for this exchange.
1139  * @xritag: the xri in the bitmap to test.
1140  *
1141  * This function returns:
1142  * 0 = rrq not active for this xri
1143  * 1 = rrq is valid for this xri.
1144  **/
1145 int
1146 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1147                         uint16_t  xritag)
1148 {
1149         if (!ndlp)
1150                 return 0;
1151         if (!ndlp->active_rrqs_xri_bitmap)
1152                 return 0;
1153         if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1154                 return 1;
1155         else
1156                 return 0;
1157 }
1158
1159 /**
1160  * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1161  * @phba: Pointer to HBA context object.
1162  * @ndlp: nodelist pointer for this target.
1163  * @xritag: xri used in this exchange.
1164  * @rxid: Remote Exchange ID.
1165  * @send_rrq: Flag used to determine if we should send rrq els cmd.
1166  *
1167  * This function takes the hbalock.
1168  * The active bit is always set in the active rrq xri_bitmap even
1169  * if there is no slot avaiable for the other rrq information.
1170  *
1171  * returns 0 rrq actived for this xri
1172  *         < 0 No memory or invalid ndlp.
1173  **/
1174 int
1175 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1176                     uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1177 {
1178         unsigned long iflags;
1179         struct lpfc_node_rrq *rrq;
1180         int empty;
1181
1182         if (!ndlp)
1183                 return -EINVAL;
1184
1185         if (!phba->cfg_enable_rrq)
1186                 return -EINVAL;
1187
1188         spin_lock_irqsave(&phba->hbalock, iflags);
1189         if (phba->pport->load_flag & FC_UNLOADING) {
1190                 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1191                 goto out;
1192         }
1193
1194         if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1195                 goto out;
1196
1197         if (!ndlp->active_rrqs_xri_bitmap)
1198                 goto out;
1199
1200         if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1201                 goto out;
1202
1203         spin_unlock_irqrestore(&phba->hbalock, iflags);
1204         rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC);
1205         if (!rrq) {
1206                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1207                                 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1208                                 " DID:0x%x Send:%d\n",
1209                                 xritag, rxid, ndlp->nlp_DID, send_rrq);
1210                 return -EINVAL;
1211         }
1212         if (phba->cfg_enable_rrq == 1)
1213                 rrq->send_rrq = send_rrq;
1214         else
1215                 rrq->send_rrq = 0;
1216         rrq->xritag = xritag;
1217         rrq->rrq_stop_time = jiffies +
1218                                 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1219         rrq->nlp_DID = ndlp->nlp_DID;
1220         rrq->vport = ndlp->vport;
1221         rrq->rxid = rxid;
1222         spin_lock_irqsave(&phba->hbalock, iflags);
1223         empty = list_empty(&phba->active_rrq_list);
1224         list_add_tail(&rrq->list, &phba->active_rrq_list);
1225         phba->hba_flag |= HBA_RRQ_ACTIVE;
1226         if (empty)
1227                 lpfc_worker_wake_up(phba);
1228         spin_unlock_irqrestore(&phba->hbalock, iflags);
1229         return 0;
1230 out:
1231         spin_unlock_irqrestore(&phba->hbalock, iflags);
1232         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1233                         "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1234                         " DID:0x%x Send:%d\n",
1235                         xritag, rxid, ndlp->nlp_DID, send_rrq);
1236         return -EINVAL;
1237 }
1238
1239 /**
1240  * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
1241  * @phba: Pointer to HBA context object.
1242  * @piocbq: Pointer to the iocbq.
1243  *
1244  * The driver calls this function with either the nvme ls ring lock
1245  * or the fc els ring lock held depending on the iocb usage.  This function
1246  * gets a new driver sglq object from the sglq list. If the list is not empty
1247  * then it is successful, it returns pointer to the newly allocated sglq
1248  * object else it returns NULL.
1249  **/
1250 static struct lpfc_sglq *
1251 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1252 {
1253         struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1254         struct lpfc_sglq *sglq = NULL;
1255         struct lpfc_sglq *start_sglq = NULL;
1256         struct lpfc_io_buf *lpfc_cmd;
1257         struct lpfc_nodelist *ndlp;
1258         struct lpfc_sli_ring *pring = NULL;
1259         int found = 0;
1260
1261         if (piocbq->cmd_flag & LPFC_IO_NVME_LS)
1262                 pring =  phba->sli4_hba.nvmels_wq->pring;
1263         else
1264                 pring = lpfc_phba_elsring(phba);
1265
1266         lockdep_assert_held(&pring->ring_lock);
1267
1268         if (piocbq->cmd_flag &  LPFC_IO_FCP) {
1269                 lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
1270                 ndlp = lpfc_cmd->rdata->pnode;
1271         } else  if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
1272                         !(piocbq->cmd_flag & LPFC_IO_LIBDFC)) {
1273                 ndlp = piocbq->context_un.ndlp;
1274         } else  if (piocbq->cmd_flag & LPFC_IO_LIBDFC) {
1275                 if (piocbq->cmd_flag & LPFC_IO_LOOPBACK)
1276                         ndlp = NULL;
1277                 else
1278                         ndlp = piocbq->context_un.ndlp;
1279         } else {
1280                 ndlp = piocbq->context1;
1281         }
1282
1283         spin_lock(&phba->sli4_hba.sgl_list_lock);
1284         list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1285         start_sglq = sglq;
1286         while (!found) {
1287                 if (!sglq)
1288                         break;
1289                 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1290                     test_bit(sglq->sli4_lxritag,
1291                     ndlp->active_rrqs_xri_bitmap)) {
1292                         /* This xri has an rrq outstanding for this DID.
1293                          * put it back in the list and get another xri.
1294                          */
1295                         list_add_tail(&sglq->list, lpfc_els_sgl_list);
1296                         sglq = NULL;
1297                         list_remove_head(lpfc_els_sgl_list, sglq,
1298                                                 struct lpfc_sglq, list);
1299                         if (sglq == start_sglq) {
1300                                 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1301                                 sglq = NULL;
1302                                 break;
1303                         } else
1304                                 continue;
1305                 }
1306                 sglq->ndlp = ndlp;
1307                 found = 1;
1308                 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1309                 sglq->state = SGL_ALLOCATED;
1310         }
1311         spin_unlock(&phba->sli4_hba.sgl_list_lock);
1312         return sglq;
1313 }
1314
1315 /**
1316  * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1317  * @phba: Pointer to HBA context object.
1318  * @piocbq: Pointer to the iocbq.
1319  *
1320  * This function is called with the sgl_list lock held. This function
1321  * gets a new driver sglq object from the sglq list. If the
1322  * list is not empty then it is successful, it returns pointer to the newly
1323  * allocated sglq object else it returns NULL.
1324  **/
1325 struct lpfc_sglq *
1326 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1327 {
1328         struct list_head *lpfc_nvmet_sgl_list;
1329         struct lpfc_sglq *sglq = NULL;
1330
1331         lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1332
1333         lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1334
1335         list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1336         if (!sglq)
1337                 return NULL;
1338         phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1339         sglq->state = SGL_ALLOCATED;
1340         return sglq;
1341 }
1342
1343 /**
1344  * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1345  * @phba: Pointer to HBA context object.
1346  *
1347  * This function is called with no lock held. This function
1348  * allocates a new driver iocb object from the iocb pool. If the
1349  * allocation is successful, it returns pointer to the newly
1350  * allocated iocb object else it returns NULL.
1351  **/
1352 struct lpfc_iocbq *
1353 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1354 {
1355         struct lpfc_iocbq * iocbq = NULL;
1356         unsigned long iflags;
1357
1358         spin_lock_irqsave(&phba->hbalock, iflags);
1359         iocbq = __lpfc_sli_get_iocbq(phba);
1360         spin_unlock_irqrestore(&phba->hbalock, iflags);
1361         return iocbq;
1362 }
1363
1364 /**
1365  * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1366  * @phba: Pointer to HBA context object.
1367  * @iocbq: Pointer to driver iocb object.
1368  *
1369  * This function is called to release the driver iocb object
1370  * to the iocb pool. The iotag in the iocb object
1371  * does not change for each use of the iocb object. This function
1372  * clears all other fields of the iocb object when it is freed.
1373  * The sqlq structure that holds the xritag and phys and virtual
1374  * mappings for the scatter gather list is retrieved from the
1375  * active array of sglq. The get of the sglq pointer also clears
1376  * the entry in the array. If the status of the IO indiactes that
1377  * this IO was aborted then the sglq entry it put on the
1378  * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1379  * IO has good status or fails for any other reason then the sglq
1380  * entry is added to the free list (lpfc_els_sgl_list). The hbalock is
1381  *  asserted held in the code path calling this routine.
1382  **/
1383 static void
1384 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1385 {
1386         struct lpfc_sglq *sglq;
1387         size_t start_clean = offsetof(struct lpfc_iocbq, wqe);
1388         unsigned long iflag = 0;
1389         struct lpfc_sli_ring *pring;
1390
1391         if (iocbq->sli4_xritag == NO_XRI)
1392                 sglq = NULL;
1393         else
1394                 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1395
1396
1397         if (sglq)  {
1398                 if (iocbq->cmd_flag & LPFC_IO_NVMET) {
1399                         spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1400                                           iflag);
1401                         sglq->state = SGL_FREED;
1402                         sglq->ndlp = NULL;
1403                         list_add_tail(&sglq->list,
1404                                       &phba->sli4_hba.lpfc_nvmet_sgl_list);
1405                         spin_unlock_irqrestore(
1406                                 &phba->sli4_hba.sgl_list_lock, iflag);
1407                         goto out;
1408                 }
1409
1410                 if ((iocbq->cmd_flag & LPFC_EXCHANGE_BUSY) &&
1411                     (!(unlikely(pci_channel_offline(phba->pcidev)))) &&
1412                     sglq->state != SGL_XRI_ABORTED) {
1413                         spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1414                                           iflag);
1415
1416                         /* Check if we can get a reference on ndlp */
1417                         if (sglq->ndlp && !lpfc_nlp_get(sglq->ndlp))
1418                                 sglq->ndlp = NULL;
1419
1420                         list_add(&sglq->list,
1421                                  &phba->sli4_hba.lpfc_abts_els_sgl_list);
1422                         spin_unlock_irqrestore(
1423                                 &phba->sli4_hba.sgl_list_lock, iflag);
1424                 } else {
1425                         spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1426                                           iflag);
1427                         sglq->state = SGL_FREED;
1428                         sglq->ndlp = NULL;
1429                         list_add_tail(&sglq->list,
1430                                       &phba->sli4_hba.lpfc_els_sgl_list);
1431                         spin_unlock_irqrestore(
1432                                 &phba->sli4_hba.sgl_list_lock, iflag);
1433                         pring = lpfc_phba_elsring(phba);
1434                         /* Check if TXQ queue needs to be serviced */
1435                         if (pring && (!list_empty(&pring->txq)))
1436                                 lpfc_worker_wake_up(phba);
1437                 }
1438         }
1439
1440 out:
1441         /*
1442          * Clean all volatile data fields, preserve iotag and node struct.
1443          */
1444         memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1445         iocbq->sli4_lxritag = NO_XRI;
1446         iocbq->sli4_xritag = NO_XRI;
1447         iocbq->cmd_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF |
1448                               LPFC_IO_NVME_LS);
1449         list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1450 }
1451
1452
1453 /**
1454  * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1455  * @phba: Pointer to HBA context object.
1456  * @iocbq: Pointer to driver iocb object.
1457  *
1458  * This function is called to release the driver iocb object to the
1459  * iocb pool. The iotag in the iocb object does not change for each
1460  * use of the iocb object. This function clears all other fields of
1461  * the iocb object when it is freed. The hbalock is asserted held in
1462  * the code path calling this routine.
1463  **/
1464 static void
1465 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1466 {
1467         size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1468
1469         /*
1470          * Clean all volatile data fields, preserve iotag and node struct.
1471          */
1472         memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1473         iocbq->sli4_xritag = NO_XRI;
1474         list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1475 }
1476
1477 /**
1478  * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1479  * @phba: Pointer to HBA context object.
1480  * @iocbq: Pointer to driver iocb object.
1481  *
1482  * This function is called with hbalock held to release driver
1483  * iocb object to the iocb pool. The iotag in the iocb object
1484  * does not change for each use of the iocb object. This function
1485  * clears all other fields of the iocb object when it is freed.
1486  **/
1487 static void
1488 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1489 {
1490         lockdep_assert_held(&phba->hbalock);
1491
1492         phba->__lpfc_sli_release_iocbq(phba, iocbq);
1493         phba->iocb_cnt--;
1494 }
1495
1496 /**
1497  * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1498  * @phba: Pointer to HBA context object.
1499  * @iocbq: Pointer to driver iocb object.
1500  *
1501  * This function is called with no lock held to release the iocb to
1502  * iocb pool.
1503  **/
1504 void
1505 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1506 {
1507         unsigned long iflags;
1508
1509         /*
1510          * Clean all volatile data fields, preserve iotag and node struct.
1511          */
1512         spin_lock_irqsave(&phba->hbalock, iflags);
1513         __lpfc_sli_release_iocbq(phba, iocbq);
1514         spin_unlock_irqrestore(&phba->hbalock, iflags);
1515 }
1516
1517 /**
1518  * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1519  * @phba: Pointer to HBA context object.
1520  * @iocblist: List of IOCBs.
1521  * @ulpstatus: ULP status in IOCB command field.
1522  * @ulpWord4: ULP word-4 in IOCB command field.
1523  *
1524  * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1525  * on the list by invoking the complete callback function associated with the
1526  * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1527  * fields.
1528  **/
1529 void
1530 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1531                       uint32_t ulpstatus, uint32_t ulpWord4)
1532 {
1533         struct lpfc_iocbq *piocb;
1534
1535         while (!list_empty(iocblist)) {
1536                 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1537                 if (piocb->cmd_cmpl) {
1538                         if (piocb->cmd_flag & LPFC_IO_NVME)
1539                                 lpfc_nvme_cancel_iocb(phba, piocb,
1540                                                       ulpstatus, ulpWord4);
1541                         else
1542                                 lpfc_sli_release_iocbq(phba, piocb);
1543
1544                 } else if (piocb->cmd_cmpl) {
1545                         piocb->iocb.ulpStatus = ulpstatus;
1546                         piocb->iocb.un.ulpWord[4] = ulpWord4;
1547                         (piocb->cmd_cmpl) (phba, piocb, piocb);
1548                 } else {
1549                         lpfc_sli_release_iocbq(phba, piocb);
1550                 }
1551         }
1552         return;
1553 }
1554
1555 /**
1556  * lpfc_sli_iocb_cmd_type - Get the iocb type
1557  * @iocb_cmnd: iocb command code.
1558  *
1559  * This function is called by ring event handler function to get the iocb type.
1560  * This function translates the iocb command to an iocb command type used to
1561  * decide the final disposition of each completed IOCB.
1562  * The function returns
1563  * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1564  * LPFC_SOL_IOCB     if it is a solicited iocb completion
1565  * LPFC_ABORT_IOCB   if it is an abort iocb
1566  * LPFC_UNSOL_IOCB   if it is an unsolicited iocb
1567  *
1568  * The caller is not required to hold any lock.
1569  **/
1570 static lpfc_iocb_type
1571 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1572 {
1573         lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1574
1575         if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1576                 return 0;
1577
1578         switch (iocb_cmnd) {
1579         case CMD_XMIT_SEQUENCE_CR:
1580         case CMD_XMIT_SEQUENCE_CX:
1581         case CMD_XMIT_BCAST_CN:
1582         case CMD_XMIT_BCAST_CX:
1583         case CMD_ELS_REQUEST_CR:
1584         case CMD_ELS_REQUEST_CX:
1585         case CMD_CREATE_XRI_CR:
1586         case CMD_CREATE_XRI_CX:
1587         case CMD_GET_RPI_CN:
1588         case CMD_XMIT_ELS_RSP_CX:
1589         case CMD_GET_RPI_CR:
1590         case CMD_FCP_IWRITE_CR:
1591         case CMD_FCP_IWRITE_CX:
1592         case CMD_FCP_IREAD_CR:
1593         case CMD_FCP_IREAD_CX:
1594         case CMD_FCP_ICMND_CR:
1595         case CMD_FCP_ICMND_CX:
1596         case CMD_FCP_TSEND_CX:
1597         case CMD_FCP_TRSP_CX:
1598         case CMD_FCP_TRECEIVE_CX:
1599         case CMD_FCP_AUTO_TRSP_CX:
1600         case CMD_ADAPTER_MSG:
1601         case CMD_ADAPTER_DUMP:
1602         case CMD_XMIT_SEQUENCE64_CR:
1603         case CMD_XMIT_SEQUENCE64_CX:
1604         case CMD_XMIT_BCAST64_CN:
1605         case CMD_XMIT_BCAST64_CX:
1606         case CMD_ELS_REQUEST64_CR:
1607         case CMD_ELS_REQUEST64_CX:
1608         case CMD_FCP_IWRITE64_CR:
1609         case CMD_FCP_IWRITE64_CX:
1610         case CMD_FCP_IREAD64_CR:
1611         case CMD_FCP_IREAD64_CX:
1612         case CMD_FCP_ICMND64_CR:
1613         case CMD_FCP_ICMND64_CX:
1614         case CMD_FCP_TSEND64_CX:
1615         case CMD_FCP_TRSP64_CX:
1616         case CMD_FCP_TRECEIVE64_CX:
1617         case CMD_GEN_REQUEST64_CR:
1618         case CMD_GEN_REQUEST64_CX:
1619         case CMD_XMIT_ELS_RSP64_CX:
1620         case DSSCMD_IWRITE64_CR:
1621         case DSSCMD_IWRITE64_CX:
1622         case DSSCMD_IREAD64_CR:
1623         case DSSCMD_IREAD64_CX:
1624         case CMD_SEND_FRAME:
1625                 type = LPFC_SOL_IOCB;
1626                 break;
1627         case CMD_ABORT_XRI_CN:
1628         case CMD_ABORT_XRI_CX:
1629         case CMD_CLOSE_XRI_CN:
1630         case CMD_CLOSE_XRI_CX:
1631         case CMD_XRI_ABORTED_CX:
1632         case CMD_ABORT_MXRI64_CN:
1633         case CMD_XMIT_BLS_RSP64_CX:
1634                 type = LPFC_ABORT_IOCB;
1635                 break;
1636         case CMD_RCV_SEQUENCE_CX:
1637         case CMD_RCV_ELS_REQ_CX:
1638         case CMD_RCV_SEQUENCE64_CX:
1639         case CMD_RCV_ELS_REQ64_CX:
1640         case CMD_ASYNC_STATUS:
1641         case CMD_IOCB_RCV_SEQ64_CX:
1642         case CMD_IOCB_RCV_ELS64_CX:
1643         case CMD_IOCB_RCV_CONT64_CX:
1644         case CMD_IOCB_RET_XRI64_CX:
1645                 type = LPFC_UNSOL_IOCB;
1646                 break;
1647         case CMD_IOCB_XMIT_MSEQ64_CR:
1648         case CMD_IOCB_XMIT_MSEQ64_CX:
1649         case CMD_IOCB_RCV_SEQ_LIST64_CX:
1650         case CMD_IOCB_RCV_ELS_LIST64_CX:
1651         case CMD_IOCB_CLOSE_EXTENDED_CN:
1652         case CMD_IOCB_ABORT_EXTENDED_CN:
1653         case CMD_IOCB_RET_HBQE64_CN:
1654         case CMD_IOCB_FCP_IBIDIR64_CR:
1655         case CMD_IOCB_FCP_IBIDIR64_CX:
1656         case CMD_IOCB_FCP_ITASKMGT64_CX:
1657         case CMD_IOCB_LOGENTRY_CN:
1658         case CMD_IOCB_LOGENTRY_ASYNC_CN:
1659                 printk("%s - Unhandled SLI-3 Command x%x\n",
1660                                 __func__, iocb_cmnd);
1661                 type = LPFC_UNKNOWN_IOCB;
1662                 break;
1663         default:
1664                 type = LPFC_UNKNOWN_IOCB;
1665                 break;
1666         }
1667
1668         return type;
1669 }
1670
1671 /**
1672  * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1673  * @phba: Pointer to HBA context object.
1674  *
1675  * This function is called from SLI initialization code
1676  * to configure every ring of the HBA's SLI interface. The
1677  * caller is not required to hold any lock. This function issues
1678  * a config_ring mailbox command for each ring.
1679  * This function returns zero if successful else returns a negative
1680  * error code.
1681  **/
1682 static int
1683 lpfc_sli_ring_map(struct lpfc_hba *phba)
1684 {
1685         struct lpfc_sli *psli = &phba->sli;
1686         LPFC_MBOXQ_t *pmb;
1687         MAILBOX_t *pmbox;
1688         int i, rc, ret = 0;
1689
1690         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1691         if (!pmb)
1692                 return -ENOMEM;
1693         pmbox = &pmb->u.mb;
1694         phba->link_state = LPFC_INIT_MBX_CMDS;
1695         for (i = 0; i < psli->num_rings; i++) {
1696                 lpfc_config_ring(phba, i, pmb);
1697                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1698                 if (rc != MBX_SUCCESS) {
1699                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1700                                         "0446 Adapter failed to init (%d), "
1701                                         "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1702                                         "ring %d\n",
1703                                         rc, pmbox->mbxCommand,
1704                                         pmbox->mbxStatus, i);
1705                         phba->link_state = LPFC_HBA_ERROR;
1706                         ret = -ENXIO;
1707                         break;
1708                 }
1709         }
1710         mempool_free(pmb, phba->mbox_mem_pool);
1711         return ret;
1712 }
1713
1714 /**
1715  * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1716  * @phba: Pointer to HBA context object.
1717  * @pring: Pointer to driver SLI ring object.
1718  * @piocb: Pointer to the driver iocb object.
1719  *
1720  * The driver calls this function with the hbalock held for SLI3 ports or
1721  * the ring lock held for SLI4 ports. The function adds the
1722  * new iocb to txcmplq of the given ring. This function always returns
1723  * 0. If this function is called for ELS ring, this function checks if
1724  * there is a vport associated with the ELS command. This function also
1725  * starts els_tmofunc timer if this is an ELS command.
1726  **/
1727 static int
1728 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1729                         struct lpfc_iocbq *piocb)
1730 {
1731         if (phba->sli_rev == LPFC_SLI_REV4)
1732                 lockdep_assert_held(&pring->ring_lock);
1733         else
1734                 lockdep_assert_held(&phba->hbalock);
1735
1736         BUG_ON(!piocb);
1737
1738         list_add_tail(&piocb->list, &pring->txcmplq);
1739         piocb->cmd_flag |= LPFC_IO_ON_TXCMPLQ;
1740         pring->txcmplq_cnt++;
1741
1742         if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1743            (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1744            (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1745                 BUG_ON(!piocb->vport);
1746                 if (!(piocb->vport->load_flag & FC_UNLOADING))
1747                         mod_timer(&piocb->vport->els_tmofunc,
1748                                   jiffies +
1749                                   msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1750         }
1751
1752         return 0;
1753 }
1754
1755 /**
1756  * lpfc_sli_ringtx_get - Get first element of the txq
1757  * @phba: Pointer to HBA context object.
1758  * @pring: Pointer to driver SLI ring object.
1759  *
1760  * This function is called with hbalock held to get next
1761  * iocb in txq of the given ring. If there is any iocb in
1762  * the txq, the function returns first iocb in the list after
1763  * removing the iocb from the list, else it returns NULL.
1764  **/
1765 struct lpfc_iocbq *
1766 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1767 {
1768         struct lpfc_iocbq *cmd_iocb;
1769
1770         lockdep_assert_held(&phba->hbalock);
1771
1772         list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1773         return cmd_iocb;
1774 }
1775
1776 /**
1777  * lpfc_cmf_sync_cmpl - Process a CMF_SYNC_WQE cmpl
1778  * @phba: Pointer to HBA context object.
1779  * @cmdiocb: Pointer to driver command iocb object.
1780  * @rspiocb: Pointer to driver response iocb object.
1781  *
1782  * This routine will inform the driver of any BW adjustments we need
1783  * to make. These changes will be picked up during the next CMF
1784  * timer interrupt. In addition, any BW changes will be logged
1785  * with LOG_CGN_MGMT.
1786  **/
1787 static void
1788 lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1789                    struct lpfc_iocbq *rspiocb)
1790 {
1791         union lpfc_wqe128 *wqe;
1792         uint32_t status, info;
1793         struct lpfc_wcqe_complete *wcqe = &rspiocb->wcqe_cmpl;
1794         uint64_t bw, bwdif, slop;
1795         uint64_t pcent, bwpcent;
1796         int asig, afpin, sigcnt, fpincnt;
1797         int wsigmax, wfpinmax, cg, tdp;
1798         char *s;
1799
1800         /* First check for error */
1801         status = bf_get(lpfc_wcqe_c_status, wcqe);
1802         if (status) {
1803                 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1804                                 "6211 CMF_SYNC_WQE Error "
1805                                 "req_tag x%x status x%x hwstatus x%x "
1806                                 "tdatap x%x parm x%x\n",
1807                                 bf_get(lpfc_wcqe_c_request_tag, wcqe),
1808                                 bf_get(lpfc_wcqe_c_status, wcqe),
1809                                 bf_get(lpfc_wcqe_c_hw_status, wcqe),
1810                                 wcqe->total_data_placed,
1811                                 wcqe->parameter);
1812                 goto out;
1813         }
1814
1815         /* Gather congestion information on a successful cmpl */
1816         info = wcqe->parameter;
1817         phba->cmf_active_info = info;
1818
1819         /* See if firmware info count is valid or has changed */
1820         if (info > LPFC_MAX_CMF_INFO || phba->cmf_info_per_interval == info)
1821                 info = 0;
1822         else
1823                 phba->cmf_info_per_interval = info;
1824
1825         tdp = bf_get(lpfc_wcqe_c_cmf_bw, wcqe);
1826         cg = bf_get(lpfc_wcqe_c_cmf_cg, wcqe);
1827
1828         /* Get BW requirement from firmware */
1829         bw = (uint64_t)tdp * LPFC_CMF_BLK_SIZE;
1830         if (!bw) {
1831                 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1832                                 "6212 CMF_SYNC_WQE x%x: NULL bw\n",
1833                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
1834                 goto out;
1835         }
1836
1837         /* Gather information needed for logging if a BW change is required */
1838         wqe = &cmdiocb->wqe;
1839         asig = bf_get(cmf_sync_asig, &wqe->cmf_sync);
1840         afpin = bf_get(cmf_sync_afpin, &wqe->cmf_sync);
1841         fpincnt = bf_get(cmf_sync_wfpincnt, &wqe->cmf_sync);
1842         sigcnt = bf_get(cmf_sync_wsigcnt, &wqe->cmf_sync);
1843         if (phba->cmf_max_bytes_per_interval != bw ||
1844             (asig || afpin || sigcnt || fpincnt)) {
1845                 /* Are we increasing or decreasing BW */
1846                 if (phba->cmf_max_bytes_per_interval <  bw) {
1847                         bwdif = bw - phba->cmf_max_bytes_per_interval;
1848                         s = "Increase";
1849                 } else {
1850                         bwdif = phba->cmf_max_bytes_per_interval - bw;
1851                         s = "Decrease";
1852                 }
1853
1854                 /* What is the change percentage */
1855                 slop = div_u64(phba->cmf_link_byte_count, 200); /*For rounding*/
1856                 pcent = div64_u64(bwdif * 100 + slop,
1857                                   phba->cmf_link_byte_count);
1858                 bwpcent = div64_u64(bw * 100 + slop,
1859                                     phba->cmf_link_byte_count);
1860                 if (asig) {
1861                         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1862                                         "6237 BW Threshold %lld%% (%lld): "
1863                                         "%lld%% %s: Signal Alarm: cg:%d "
1864                                         "Info:%u\n",
1865                                         bwpcent, bw, pcent, s, cg,
1866                                         phba->cmf_active_info);
1867                 } else if (afpin) {
1868                         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1869                                         "6238 BW Threshold %lld%% (%lld): "
1870                                         "%lld%% %s: FPIN Alarm: cg:%d "
1871                                         "Info:%u\n",
1872                                         bwpcent, bw, pcent, s, cg,
1873                                         phba->cmf_active_info);
1874                 } else if (sigcnt) {
1875                         wsigmax = bf_get(cmf_sync_wsigmax, &wqe->cmf_sync);
1876                         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1877                                         "6239 BW Threshold %lld%% (%lld): "
1878                                         "%lld%% %s: Signal Warning: "
1879                                         "Cnt %d Max %d: cg:%d Info:%u\n",
1880                                         bwpcent, bw, pcent, s, sigcnt,
1881                                         wsigmax, cg, phba->cmf_active_info);
1882                 } else if (fpincnt) {
1883                         wfpinmax = bf_get(cmf_sync_wfpinmax, &wqe->cmf_sync);
1884                         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1885                                         "6240 BW Threshold %lld%% (%lld): "
1886                                         "%lld%% %s: FPIN Warning: "
1887                                         "Cnt %d Max %d: cg:%d Info:%u\n",
1888                                         bwpcent, bw, pcent, s, fpincnt,
1889                                         wfpinmax, cg, phba->cmf_active_info);
1890                 } else {
1891                         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1892                                         "6241 BW Threshold %lld%% (%lld): "
1893                                         "CMF %lld%% %s: cg:%d Info:%u\n",
1894                                         bwpcent, bw, pcent, s, cg,
1895                                         phba->cmf_active_info);
1896                 }
1897         } else if (info) {
1898                 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1899                                 "6246 Info Threshold %u\n", info);
1900         }
1901
1902         /* Save BW change to be picked up during next timer interrupt */
1903         phba->cmf_last_sync_bw = bw;
1904 out:
1905         lpfc_sli_release_iocbq(phba, cmdiocb);
1906 }
1907
1908 /**
1909  * lpfc_issue_cmf_sync_wqe - Issue a CMF_SYNC_WQE
1910  * @phba: Pointer to HBA context object.
1911  * @ms:   ms to set in WQE interval, 0 means use init op
1912  * @total: Total rcv bytes for this interval
1913  *
1914  * This routine is called every CMF timer interrupt. Its purpose is
1915  * to issue a CMF_SYNC_WQE to the firmware to inform it of any events
1916  * that may indicate we have congestion (FPINs or Signals). Upon
1917  * completion, the firmware will indicate any BW restrictions the
1918  * driver may need to take.
1919  **/
1920 int
1921 lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
1922 {
1923         union lpfc_wqe128 *wqe;
1924         struct lpfc_iocbq *sync_buf;
1925         unsigned long iflags;
1926         u32 ret_val;
1927         u32 atot, wtot, max;
1928
1929         /* First address any alarm / warning activity */
1930         atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0);
1931         wtot = atomic_xchg(&phba->cgn_sync_warn_cnt, 0);
1932
1933         /* ONLY Managed mode will send the CMF_SYNC_WQE to the HBA */
1934         if (phba->cmf_active_mode != LPFC_CFG_MANAGED ||
1935             phba->link_state == LPFC_LINK_DOWN)
1936                 return 0;
1937
1938         spin_lock_irqsave(&phba->hbalock, iflags);
1939         sync_buf = __lpfc_sli_get_iocbq(phba);
1940         if (!sync_buf) {
1941                 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
1942                                 "6244 No available WQEs for CMF_SYNC_WQE\n");
1943                 ret_val = ENOMEM;
1944                 goto out_unlock;
1945         }
1946
1947         wqe = &sync_buf->wqe;
1948
1949         /* WQEs are reused.  Clear stale data and set key fields to zero */
1950         memset(wqe, 0, sizeof(*wqe));
1951
1952         /* If this is the very first CMF_SYNC_WQE, issue an init operation */
1953         if (!ms) {
1954                 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1955                                 "6441 CMF Init %d - CMF_SYNC_WQE\n",
1956                                 phba->fc_eventTag);
1957                 bf_set(cmf_sync_op, &wqe->cmf_sync, 1); /* 1=init */
1958                 bf_set(cmf_sync_interval, &wqe->cmf_sync, LPFC_CMF_INTERVAL);
1959                 goto initpath;
1960         }
1961
1962         bf_set(cmf_sync_op, &wqe->cmf_sync, 0); /* 0=recalc */
1963         bf_set(cmf_sync_interval, &wqe->cmf_sync, ms);
1964
1965         /* Check for alarms / warnings */
1966         if (atot) {
1967                 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
1968                         /* We hit an Signal alarm condition */
1969                         bf_set(cmf_sync_asig, &wqe->cmf_sync, 1);
1970                 } else {
1971                         /* We hit a FPIN alarm condition */
1972                         bf_set(cmf_sync_afpin, &wqe->cmf_sync, 1);
1973                 }
1974         } else if (wtot) {
1975                 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
1976                     phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
1977                         /* We hit an Signal warning condition */
1978                         max = LPFC_SEC_TO_MSEC / lpfc_fabric_cgn_frequency *
1979                                 lpfc_acqe_cgn_frequency;
1980                         bf_set(cmf_sync_wsigmax, &wqe->cmf_sync, max);
1981                         bf_set(cmf_sync_wsigcnt, &wqe->cmf_sync, wtot);
1982                 } else {
1983                         /* We hit a FPIN warning condition */
1984                         bf_set(cmf_sync_wfpinmax, &wqe->cmf_sync, 1);
1985                         bf_set(cmf_sync_wfpincnt, &wqe->cmf_sync, 1);
1986                 }
1987         }
1988
1989         /* Update total read blocks during previous timer interval */
1990         wqe->cmf_sync.read_bytes = (u32)(total / LPFC_CMF_BLK_SIZE);
1991
1992 initpath:
1993         bf_set(cmf_sync_ver, &wqe->cmf_sync, LPFC_CMF_SYNC_VER);
1994         wqe->cmf_sync.event_tag = phba->fc_eventTag;
1995         bf_set(cmf_sync_cmnd, &wqe->cmf_sync, CMD_CMF_SYNC_WQE);
1996
1997         /* Setup reqtag to match the wqe completion. */
1998         bf_set(cmf_sync_reqtag, &wqe->cmf_sync, sync_buf->iotag);
1999
2000         bf_set(cmf_sync_qosd, &wqe->cmf_sync, 1);
2001
2002         bf_set(cmf_sync_cmd_type, &wqe->cmf_sync, CMF_SYNC_COMMAND);
2003         bf_set(cmf_sync_wqec, &wqe->cmf_sync, 1);
2004         bf_set(cmf_sync_cqid, &wqe->cmf_sync, LPFC_WQE_CQ_ID_DEFAULT);
2005
2006         sync_buf->vport = phba->pport;
2007         sync_buf->cmd_cmpl = lpfc_cmf_sync_cmpl;
2008         sync_buf->context1 = NULL;
2009         sync_buf->context2 = NULL;
2010         sync_buf->context3 = NULL;
2011         sync_buf->sli4_xritag = NO_XRI;
2012
2013         sync_buf->cmd_flag |= LPFC_IO_CMF;
2014         ret_val = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], sync_buf);
2015         if (ret_val) {
2016                 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
2017                                 "6214 Cannot issue CMF_SYNC_WQE: x%x\n",
2018                                 ret_val);
2019                 __lpfc_sli_release_iocbq(phba, sync_buf);
2020         }
2021 out_unlock:
2022         spin_unlock_irqrestore(&phba->hbalock, iflags);
2023         return ret_val;
2024 }
2025
2026 /**
2027  * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
2028  * @phba: Pointer to HBA context object.
2029  * @pring: Pointer to driver SLI ring object.
2030  *
2031  * This function is called with hbalock held and the caller must post the
2032  * iocb without releasing the lock. If the caller releases the lock,
2033  * iocb slot returned by the function is not guaranteed to be available.
2034  * The function returns pointer to the next available iocb slot if there
2035  * is available slot in the ring, else it returns NULL.
2036  * If the get index of the ring is ahead of the put index, the function
2037  * will post an error attention event to the worker thread to take the
2038  * HBA to offline state.
2039  **/
2040 static IOCB_t *
2041 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2042 {
2043         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2044         uint32_t  max_cmd_idx = pring->sli.sli3.numCiocb;
2045
2046         lockdep_assert_held(&phba->hbalock);
2047
2048         if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
2049            (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
2050                 pring->sli.sli3.next_cmdidx = 0;
2051
2052         if (unlikely(pring->sli.sli3.local_getidx ==
2053                 pring->sli.sli3.next_cmdidx)) {
2054
2055                 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
2056
2057                 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
2058                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2059                                         "0315 Ring %d issue: portCmdGet %d "
2060                                         "is bigger than cmd ring %d\n",
2061                                         pring->ringno,
2062                                         pring->sli.sli3.local_getidx,
2063                                         max_cmd_idx);
2064
2065                         phba->link_state = LPFC_HBA_ERROR;
2066                         /*
2067                          * All error attention handlers are posted to
2068                          * worker thread
2069                          */
2070                         phba->work_ha |= HA_ERATT;
2071                         phba->work_hs = HS_FFER3;
2072
2073                         lpfc_worker_wake_up(phba);
2074
2075                         return NULL;
2076                 }
2077
2078                 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
2079                         return NULL;
2080         }
2081
2082         return lpfc_cmd_iocb(phba, pring);
2083 }
2084
2085 /**
2086  * lpfc_sli_next_iotag - Get an iotag for the iocb
2087  * @phba: Pointer to HBA context object.
2088  * @iocbq: Pointer to driver iocb object.
2089  *
2090  * This function gets an iotag for the iocb. If there is no unused iotag and
2091  * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
2092  * array and assigns a new iotag.
2093  * The function returns the allocated iotag if successful, else returns zero.
2094  * Zero is not a valid iotag.
2095  * The caller is not required to hold any lock.
2096  **/
2097 uint16_t
2098 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
2099 {
2100         struct lpfc_iocbq **new_arr;
2101         struct lpfc_iocbq **old_arr;
2102         size_t new_len;
2103         struct lpfc_sli *psli = &phba->sli;
2104         uint16_t iotag;
2105
2106         spin_lock_irq(&phba->hbalock);
2107         iotag = psli->last_iotag;
2108         if(++iotag < psli->iocbq_lookup_len) {
2109                 psli->last_iotag = iotag;
2110                 psli->iocbq_lookup[iotag] = iocbq;
2111                 spin_unlock_irq(&phba->hbalock);
2112                 iocbq->iotag = iotag;
2113                 return iotag;
2114         } else if (psli->iocbq_lookup_len < (0xffff
2115                                            - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
2116                 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
2117                 spin_unlock_irq(&phba->hbalock);
2118                 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
2119                                   GFP_KERNEL);
2120                 if (new_arr) {
2121                         spin_lock_irq(&phba->hbalock);
2122                         old_arr = psli->iocbq_lookup;
2123                         if (new_len <= psli->iocbq_lookup_len) {
2124                                 /* highly unprobable case */
2125                                 kfree(new_arr);
2126                                 iotag = psli->last_iotag;
2127                                 if(++iotag < psli->iocbq_lookup_len) {
2128                                         psli->last_iotag = iotag;
2129                                         psli->iocbq_lookup[iotag] = iocbq;
2130                                         spin_unlock_irq(&phba->hbalock);
2131                                         iocbq->iotag = iotag;
2132                                         return iotag;
2133                                 }
2134                                 spin_unlock_irq(&phba->hbalock);
2135                                 return 0;
2136                         }
2137                         if (psli->iocbq_lookup)
2138                                 memcpy(new_arr, old_arr,
2139                                        ((psli->last_iotag  + 1) *
2140                                         sizeof (struct lpfc_iocbq *)));
2141                         psli->iocbq_lookup = new_arr;
2142                         psli->iocbq_lookup_len = new_len;
2143                         psli->last_iotag = iotag;
2144                         psli->iocbq_lookup[iotag] = iocbq;
2145                         spin_unlock_irq(&phba->hbalock);
2146                         iocbq->iotag = iotag;
2147                         kfree(old_arr);
2148                         return iotag;
2149                 }
2150         } else
2151                 spin_unlock_irq(&phba->hbalock);
2152
2153         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2154                         "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
2155                         psli->last_iotag);
2156
2157         return 0;
2158 }
2159
2160 /**
2161  * lpfc_sli_submit_iocb - Submit an iocb to the firmware
2162  * @phba: Pointer to HBA context object.
2163  * @pring: Pointer to driver SLI ring object.
2164  * @iocb: Pointer to iocb slot in the ring.
2165  * @nextiocb: Pointer to driver iocb object which need to be
2166  *            posted to firmware.
2167  *
2168  * This function is called to post a new iocb to the firmware. This
2169  * function copies the new iocb to ring iocb slot and updates the
2170  * ring pointers. It adds the new iocb to txcmplq if there is
2171  * a completion call back for this iocb else the function will free the
2172  * iocb object.  The hbalock is asserted held in the code path calling
2173  * this routine.
2174  **/
2175 static void
2176 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2177                 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
2178 {
2179         /*
2180          * Set up an iotag
2181          */
2182         nextiocb->iocb.ulpIoTag = (nextiocb->cmd_cmpl) ? nextiocb->iotag : 0;
2183
2184
2185         if (pring->ringno == LPFC_ELS_RING) {
2186                 lpfc_debugfs_slow_ring_trc(phba,
2187                         "IOCB cmd ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
2188                         *(((uint32_t *) &nextiocb->iocb) + 4),
2189                         *(((uint32_t *) &nextiocb->iocb) + 6),
2190                         *(((uint32_t *) &nextiocb->iocb) + 7));
2191         }
2192
2193         /*
2194          * Issue iocb command to adapter
2195          */
2196         lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
2197         wmb();
2198         pring->stats.iocb_cmd++;
2199
2200         /*
2201          * If there is no completion routine to call, we can release the
2202          * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
2203          * that have no rsp ring completion, cmd_cmpl MUST be NULL.
2204          */
2205         if (nextiocb->cmd_cmpl)
2206                 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
2207         else
2208                 __lpfc_sli_release_iocbq(phba, nextiocb);
2209
2210         /*
2211          * Let the HBA know what IOCB slot will be the next one the
2212          * driver will put a command into.
2213          */
2214         pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
2215         writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
2216 }
2217
2218 /**
2219  * lpfc_sli_update_full_ring - Update the chip attention register
2220  * @phba: Pointer to HBA context object.
2221  * @pring: Pointer to driver SLI ring object.
2222  *
2223  * The caller is not required to hold any lock for calling this function.
2224  * This function updates the chip attention bits for the ring to inform firmware
2225  * that there are pending work to be done for this ring and requests an
2226  * interrupt when there is space available in the ring. This function is
2227  * called when the driver is unable to post more iocbs to the ring due
2228  * to unavailability of space in the ring.
2229  **/
2230 static void
2231 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2232 {
2233         int ringno = pring->ringno;
2234
2235         pring->flag |= LPFC_CALL_RING_AVAILABLE;
2236
2237         wmb();
2238
2239         /*
2240          * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
2241          * The HBA will tell us when an IOCB entry is available.
2242          */
2243         writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
2244         readl(phba->CAregaddr); /* flush */
2245
2246         pring->stats.iocb_cmd_full++;
2247 }
2248
2249 /**
2250  * lpfc_sli_update_ring - Update chip attention register
2251  * @phba: Pointer to HBA context object.
2252  * @pring: Pointer to driver SLI ring object.
2253  *
2254  * This function updates the chip attention register bit for the
2255  * given ring to inform HBA that there is more work to be done
2256  * in this ring. The caller is not required to hold any lock.
2257  **/
2258 static void
2259 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2260 {
2261         int ringno = pring->ringno;
2262
2263         /*
2264          * Tell the HBA that there is work to do in this ring.
2265          */
2266         if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
2267                 wmb();
2268                 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
2269                 readl(phba->CAregaddr); /* flush */
2270         }
2271 }
2272
2273 /**
2274  * lpfc_sli_resume_iocb - Process iocbs in the txq
2275  * @phba: Pointer to HBA context object.
2276  * @pring: Pointer to driver SLI ring object.
2277  *
2278  * This function is called with hbalock held to post pending iocbs
2279  * in the txq to the firmware. This function is called when driver
2280  * detects space available in the ring.
2281  **/
2282 static void
2283 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2284 {
2285         IOCB_t *iocb;
2286         struct lpfc_iocbq *nextiocb;
2287
2288         lockdep_assert_held(&phba->hbalock);
2289
2290         /*
2291          * Check to see if:
2292          *  (a) there is anything on the txq to send
2293          *  (b) link is up
2294          *  (c) link attention events can be processed (fcp ring only)
2295          *  (d) IOCB processing is not blocked by the outstanding mbox command.
2296          */
2297
2298         if (lpfc_is_link_up(phba) &&
2299             (!list_empty(&pring->txq)) &&
2300             (pring->ringno != LPFC_FCP_RING ||
2301              phba->sli.sli_flag & LPFC_PROCESS_LA)) {
2302
2303                 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2304                        (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
2305                         lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2306
2307                 if (iocb)
2308                         lpfc_sli_update_ring(phba, pring);
2309                 else
2310                         lpfc_sli_update_full_ring(phba, pring);
2311         }
2312
2313         return;
2314 }
2315
2316 /**
2317  * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
2318  * @phba: Pointer to HBA context object.
2319  * @hbqno: HBQ number.
2320  *
2321  * This function is called with hbalock held to get the next
2322  * available slot for the given HBQ. If there is free slot
2323  * available for the HBQ it will return pointer to the next available
2324  * HBQ entry else it will return NULL.
2325  **/
2326 static struct lpfc_hbq_entry *
2327 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
2328 {
2329         struct hbq_s *hbqp = &phba->hbqs[hbqno];
2330
2331         lockdep_assert_held(&phba->hbalock);
2332
2333         if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
2334             ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
2335                 hbqp->next_hbqPutIdx = 0;
2336
2337         if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
2338                 uint32_t raw_index = phba->hbq_get[hbqno];
2339                 uint32_t getidx = le32_to_cpu(raw_index);
2340
2341                 hbqp->local_hbqGetIdx = getidx;
2342
2343                 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
2344                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2345                                         "1802 HBQ %d: local_hbqGetIdx "
2346                                         "%u is > than hbqp->entry_count %u\n",
2347                                         hbqno, hbqp->local_hbqGetIdx,
2348                                         hbqp->entry_count);
2349
2350                         phba->link_state = LPFC_HBA_ERROR;
2351                         return NULL;
2352                 }
2353
2354                 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
2355                         return NULL;
2356         }
2357
2358         return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
2359                         hbqp->hbqPutIdx;
2360 }
2361
2362 /**
2363  * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
2364  * @phba: Pointer to HBA context object.
2365  *
2366  * This function is called with no lock held to free all the
2367  * hbq buffers while uninitializing the SLI interface. It also
2368  * frees the HBQ buffers returned by the firmware but not yet
2369  * processed by the upper layers.
2370  **/
2371 void
2372 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
2373 {
2374         struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2375         struct hbq_dmabuf *hbq_buf;
2376         unsigned long flags;
2377         int i, hbq_count;
2378
2379         hbq_count = lpfc_sli_hbq_count();
2380         /* Return all memory used by all HBQs */
2381         spin_lock_irqsave(&phba->hbalock, flags);
2382         for (i = 0; i < hbq_count; ++i) {
2383                 list_for_each_entry_safe(dmabuf, next_dmabuf,
2384                                 &phba->hbqs[i].hbq_buffer_list, list) {
2385                         hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
2386                         list_del(&hbq_buf->dbuf.list);
2387                         (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
2388                 }
2389                 phba->hbqs[i].buffer_count = 0;
2390         }
2391
2392         /* Mark the HBQs not in use */
2393         phba->hbq_in_use = 0;
2394         spin_unlock_irqrestore(&phba->hbalock, flags);
2395 }
2396
2397 /**
2398  * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
2399  * @phba: Pointer to HBA context object.
2400  * @hbqno: HBQ number.
2401  * @hbq_buf: Pointer to HBQ buffer.
2402  *
2403  * This function is called with the hbalock held to post a
2404  * hbq buffer to the firmware. If the function finds an empty
2405  * slot in the HBQ, it will post the buffer. The function will return
2406  * pointer to the hbq entry if it successfully post the buffer
2407  * else it will return NULL.
2408  **/
2409 static int
2410 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2411                          struct hbq_dmabuf *hbq_buf)
2412 {
2413         lockdep_assert_held(&phba->hbalock);
2414         return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2415 }
2416
2417 /**
2418  * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2419  * @phba: Pointer to HBA context object.
2420  * @hbqno: HBQ number.
2421  * @hbq_buf: Pointer to HBQ buffer.
2422  *
2423  * This function is called with the hbalock held to post a hbq buffer to the
2424  * firmware. If the function finds an empty slot in the HBQ, it will post the
2425  * buffer and place it on the hbq_buffer_list. The function will return zero if
2426  * it successfully post the buffer else it will return an error.
2427  **/
2428 static int
2429 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2430                             struct hbq_dmabuf *hbq_buf)
2431 {
2432         struct lpfc_hbq_entry *hbqe;
2433         dma_addr_t physaddr = hbq_buf->dbuf.phys;
2434
2435         lockdep_assert_held(&phba->hbalock);
2436         /* Get next HBQ entry slot to use */
2437         hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2438         if (hbqe) {
2439                 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2440
2441                 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2442                 hbqe->bde.addrLow  = le32_to_cpu(putPaddrLow(physaddr));
2443                 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2444                 hbqe->bde.tus.f.bdeFlags = 0;
2445                 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2446                 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2447                                 /* Sync SLIM */
2448                 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2449                 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2450                                 /* flush */
2451                 readl(phba->hbq_put + hbqno);
2452                 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2453                 return 0;
2454         } else
2455                 return -ENOMEM;
2456 }
2457
2458 /**
2459  * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2460  * @phba: Pointer to HBA context object.
2461  * @hbqno: HBQ number.
2462  * @hbq_buf: Pointer to HBQ buffer.
2463  *
2464  * This function is called with the hbalock held to post an RQE to the SLI4
2465  * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2466  * the hbq_buffer_list and return zero, otherwise it will return an error.
2467  **/
2468 static int
2469 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2470                             struct hbq_dmabuf *hbq_buf)
2471 {
2472         int rc;
2473         struct lpfc_rqe hrqe;
2474         struct lpfc_rqe drqe;
2475         struct lpfc_queue *hrq;
2476         struct lpfc_queue *drq;
2477
2478         if (hbqno != LPFC_ELS_HBQ)
2479                 return 1;
2480         hrq = phba->sli4_hba.hdr_rq;
2481         drq = phba->sli4_hba.dat_rq;
2482
2483         lockdep_assert_held(&phba->hbalock);
2484         hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2485         hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2486         drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2487         drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2488         rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2489         if (rc < 0)
2490                 return rc;
2491         hbq_buf->tag = (rc | (hbqno << 16));
2492         list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2493         return 0;
2494 }
2495
2496 /* HBQ for ELS and CT traffic. */
2497 static struct lpfc_hbq_init lpfc_els_hbq = {
2498         .rn = 1,
2499         .entry_count = 256,
2500         .mask_count = 0,
2501         .profile = 0,
2502         .ring_mask = (1 << LPFC_ELS_RING),
2503         .buffer_count = 0,
2504         .init_count = 40,
2505         .add_count = 40,
2506 };
2507
2508 /* Array of HBQs */
2509 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2510         &lpfc_els_hbq,
2511 };
2512
2513 /**
2514  * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
2515  * @phba: Pointer to HBA context object.
2516  * @hbqno: HBQ number.
2517  * @count: Number of HBQ buffers to be posted.
2518  *
2519  * This function is called with no lock held to post more hbq buffers to the
2520  * given HBQ. The function returns the number of HBQ buffers successfully
2521  * posted.
2522  **/
2523 static int
2524 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2525 {
2526         uint32_t i, posted = 0;
2527         unsigned long flags;
2528         struct hbq_dmabuf *hbq_buffer;
2529         LIST_HEAD(hbq_buf_list);
2530         if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2531                 return 0;
2532
2533         if ((phba->hbqs[hbqno].buffer_count + count) >
2534             lpfc_hbq_defs[hbqno]->entry_count)
2535                 count = lpfc_hbq_defs[hbqno]->entry_count -
2536                                         phba->hbqs[hbqno].buffer_count;
2537         if (!count)
2538                 return 0;
2539         /* Allocate HBQ entries */
2540         for (i = 0; i < count; i++) {
2541                 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2542                 if (!hbq_buffer)
2543                         break;
2544                 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2545         }
2546         /* Check whether HBQ is still in use */
2547         spin_lock_irqsave(&phba->hbalock, flags);
2548         if (!phba->hbq_in_use)
2549                 goto err;
2550         while (!list_empty(&hbq_buf_list)) {
2551                 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2552                                  dbuf.list);
2553                 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2554                                       (hbqno << 16));
2555                 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2556                         phba->hbqs[hbqno].buffer_count++;
2557                         posted++;
2558                 } else
2559                         (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2560         }
2561         spin_unlock_irqrestore(&phba->hbalock, flags);
2562         return posted;
2563 err:
2564         spin_unlock_irqrestore(&phba->hbalock, flags);
2565         while (!list_empty(&hbq_buf_list)) {
2566                 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2567                                  dbuf.list);
2568                 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2569         }
2570         return 0;
2571 }
2572
2573 /**
2574  * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
2575  * @phba: Pointer to HBA context object.
2576  * @qno: HBQ number.
2577  *
2578  * This function posts more buffers to the HBQ. This function
2579  * is called with no lock held. The function returns the number of HBQ entries
2580  * successfully allocated.
2581  **/
2582 int
2583 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2584 {
2585         if (phba->sli_rev == LPFC_SLI_REV4)
2586                 return 0;
2587         else
2588                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2589                                          lpfc_hbq_defs[qno]->add_count);
2590 }
2591
2592 /**
2593  * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
2594  * @phba: Pointer to HBA context object.
2595  * @qno:  HBQ queue number.
2596  *
2597  * This function is called from SLI initialization code path with
2598  * no lock held to post initial HBQ buffers to firmware. The
2599  * function returns the number of HBQ entries successfully allocated.
2600  **/
2601 static int
2602 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2603 {
2604         if (phba->sli_rev == LPFC_SLI_REV4)
2605                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2606                                         lpfc_hbq_defs[qno]->entry_count);
2607         else
2608                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2609                                          lpfc_hbq_defs[qno]->init_count);
2610 }
2611
2612 /*
2613  * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2614  *
2615  * This function removes the first hbq buffer on an hbq list and returns a
2616  * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2617  **/
2618 static struct hbq_dmabuf *
2619 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2620 {
2621         struct lpfc_dmabuf *d_buf;
2622
2623         list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2624         if (!d_buf)
2625                 return NULL;
2626         return container_of(d_buf, struct hbq_dmabuf, dbuf);
2627 }
2628
2629 /**
2630  * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2631  * @phba: Pointer to HBA context object.
2632  * @hrq: HBQ number.
2633  *
2634  * This function removes the first RQ buffer on an RQ buffer list and returns a
2635  * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2636  **/
2637 static struct rqb_dmabuf *
2638 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2639 {
2640         struct lpfc_dmabuf *h_buf;
2641         struct lpfc_rqb *rqbp;
2642
2643         rqbp = hrq->rqbp;
2644         list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2645                          struct lpfc_dmabuf, list);
2646         if (!h_buf)
2647                 return NULL;
2648         rqbp->buffer_count--;
2649         return container_of(h_buf, struct rqb_dmabuf, hbuf);
2650 }
2651
2652 /**
2653  * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2654  * @phba: Pointer to HBA context object.
2655  * @tag: Tag of the hbq buffer.
2656  *
2657  * This function searches for the hbq buffer associated with the given tag in
2658  * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2659  * otherwise it returns NULL.
2660  **/
2661 static struct hbq_dmabuf *
2662 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2663 {
2664         struct lpfc_dmabuf *d_buf;
2665         struct hbq_dmabuf *hbq_buf;
2666         uint32_t hbqno;
2667
2668         hbqno = tag >> 16;
2669         if (hbqno >= LPFC_MAX_HBQS)
2670                 return NULL;
2671
2672         spin_lock_irq(&phba->hbalock);
2673         list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2674                 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2675                 if (hbq_buf->tag == tag) {
2676                         spin_unlock_irq(&phba->hbalock);
2677                         return hbq_buf;
2678                 }
2679         }
2680         spin_unlock_irq(&phba->hbalock);
2681         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2682                         "1803 Bad hbq tag. Data: x%x x%x\n",
2683                         tag, phba->hbqs[tag >> 16].buffer_count);
2684         return NULL;
2685 }
2686
2687 /**
2688  * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2689  * @phba: Pointer to HBA context object.
2690  * @hbq_buffer: Pointer to HBQ buffer.
2691  *
2692  * This function is called with hbalock. This function gives back
2693  * the hbq buffer to firmware. If the HBQ does not have space to
2694  * post the buffer, it will free the buffer.
2695  **/
2696 void
2697 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2698 {
2699         uint32_t hbqno;
2700
2701         if (hbq_buffer) {
2702                 hbqno = hbq_buffer->tag >> 16;
2703                 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2704                         (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2705         }
2706 }
2707
2708 /**
2709  * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2710  * @mbxCommand: mailbox command code.
2711  *
2712  * This function is called by the mailbox event handler function to verify
2713  * that the completed mailbox command is a legitimate mailbox command. If the
2714  * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2715  * and the mailbox event handler will take the HBA offline.
2716  **/
2717 static int
2718 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2719 {
2720         uint8_t ret;
2721
2722         switch (mbxCommand) {
2723         case MBX_LOAD_SM:
2724         case MBX_READ_NV:
2725         case MBX_WRITE_NV:
2726         case MBX_WRITE_VPARMS:
2727         case MBX_RUN_BIU_DIAG:
2728         case MBX_INIT_LINK:
2729         case MBX_DOWN_LINK:
2730         case MBX_CONFIG_LINK:
2731         case MBX_CONFIG_RING:
2732         case MBX_RESET_RING:
2733         case MBX_READ_CONFIG:
2734         case MBX_READ_RCONFIG:
2735         case MBX_READ_SPARM:
2736         case MBX_READ_STATUS:
2737         case MBX_READ_RPI:
2738         case MBX_READ_XRI:
2739         case MBX_READ_REV:
2740         case MBX_READ_LNK_STAT:
2741         case MBX_REG_LOGIN:
2742         case MBX_UNREG_LOGIN:
2743         case MBX_CLEAR_LA:
2744         case MBX_DUMP_MEMORY:
2745         case MBX_DUMP_CONTEXT:
2746         case MBX_RUN_DIAGS:
2747         case MBX_RESTART:
2748         case MBX_UPDATE_CFG:
2749         case MBX_DOWN_LOAD:
2750         case MBX_DEL_LD_ENTRY:
2751         case MBX_RUN_PROGRAM:
2752         case MBX_SET_MASK:
2753         case MBX_SET_VARIABLE:
2754         case MBX_UNREG_D_ID:
2755         case MBX_KILL_BOARD:
2756         case MBX_CONFIG_FARP:
2757         case MBX_BEACON:
2758         case MBX_LOAD_AREA:
2759         case MBX_RUN_BIU_DIAG64:
2760         case MBX_CONFIG_PORT:
2761         case MBX_READ_SPARM64:
2762         case MBX_READ_RPI64:
2763         case MBX_REG_LOGIN64:
2764         case MBX_READ_TOPOLOGY:
2765         case MBX_WRITE_WWN:
2766         case MBX_SET_DEBUG:
2767         case MBX_LOAD_EXP_ROM:
2768         case MBX_ASYNCEVT_ENABLE:
2769         case MBX_REG_VPI:
2770         case MBX_UNREG_VPI:
2771         case MBX_HEARTBEAT:
2772         case MBX_PORT_CAPABILITIES:
2773         case MBX_PORT_IOV_CONTROL:
2774         case MBX_SLI4_CONFIG:
2775         case MBX_SLI4_REQ_FTRS:
2776         case MBX_REG_FCFI:
2777         case MBX_UNREG_FCFI:
2778         case MBX_REG_VFI:
2779         case MBX_UNREG_VFI:
2780         case MBX_INIT_VPI:
2781         case MBX_INIT_VFI:
2782         case MBX_RESUME_RPI:
2783         case MBX_READ_EVENT_LOG_STATUS:
2784         case MBX_READ_EVENT_LOG:
2785         case MBX_SECURITY_MGMT:
2786         case MBX_AUTH_PORT:
2787         case MBX_ACCESS_VDATA:
2788                 ret = mbxCommand;
2789                 break;
2790         default:
2791                 ret = MBX_SHUTDOWN;
2792                 break;
2793         }
2794         return ret;
2795 }
2796
2797 /**
2798  * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2799  * @phba: Pointer to HBA context object.
2800  * @pmboxq: Pointer to mailbox command.
2801  *
2802  * This is completion handler function for mailbox commands issued from
2803  * lpfc_sli_issue_mbox_wait function. This function is called by the
2804  * mailbox event handler function with no lock held. This function
2805  * will wake up thread waiting on the wait queue pointed by context1
2806  * of the mailbox.
2807  **/
2808 void
2809 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2810 {
2811         unsigned long drvr_flag;
2812         struct completion *pmbox_done;
2813
2814         /*
2815          * If pmbox_done is empty, the driver thread gave up waiting and
2816          * continued running.
2817          */
2818         pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2819         spin_lock_irqsave(&phba->hbalock, drvr_flag);
2820         pmbox_done = (struct completion *)pmboxq->context3;
2821         if (pmbox_done)
2822                 complete(pmbox_done);
2823         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2824         return;
2825 }
2826
2827 static void
2828 __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2829 {
2830         unsigned long iflags;
2831
2832         if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2833                 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
2834                 spin_lock_irqsave(&ndlp->lock, iflags);
2835                 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2836                 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
2837                 spin_unlock_irqrestore(&ndlp->lock, iflags);
2838         }
2839         ndlp->nlp_flag &= ~NLP_UNREG_INP;
2840 }
2841
2842 /**
2843  * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2844  * @phba: Pointer to HBA context object.
2845  * @pmb: Pointer to mailbox object.
2846  *
2847  * This function is the default mailbox completion handler. It
2848  * frees the memory resources associated with the completed mailbox
2849  * command. If the completed command is a REG_LOGIN mailbox command,
2850  * this function will issue a UREG_LOGIN to re-claim the RPI.
2851  **/
2852 void
2853 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2854 {
2855         struct lpfc_vport  *vport = pmb->vport;
2856         struct lpfc_dmabuf *mp;
2857         struct lpfc_nodelist *ndlp;
2858         struct Scsi_Host *shost;
2859         uint16_t rpi, vpi;
2860         int rc;
2861
2862         mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
2863
2864         if (mp) {
2865                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2866                 kfree(mp);
2867         }
2868
2869         /*
2870          * If a REG_LOGIN succeeded  after node is destroyed or node
2871          * is in re-discovery driver need to cleanup the RPI.
2872          */
2873         if (!(phba->pport->load_flag & FC_UNLOADING) &&
2874             pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2875             !pmb->u.mb.mbxStatus) {
2876                 rpi = pmb->u.mb.un.varWords[0];
2877                 vpi = pmb->u.mb.un.varRegLogin.vpi;
2878                 if (phba->sli_rev == LPFC_SLI_REV4)
2879                         vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
2880                 lpfc_unreg_login(phba, vpi, rpi, pmb);
2881                 pmb->vport = vport;
2882                 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2883                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2884                 if (rc != MBX_NOT_FINISHED)
2885                         return;
2886         }
2887
2888         if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2889                 !(phba->pport->load_flag & FC_UNLOADING) &&
2890                 !pmb->u.mb.mbxStatus) {
2891                 shost = lpfc_shost_from_vport(vport);
2892                 spin_lock_irq(shost->host_lock);
2893                 vport->vpi_state |= LPFC_VPI_REGISTERED;
2894                 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2895                 spin_unlock_irq(shost->host_lock);
2896         }
2897
2898         if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2899                 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2900                 lpfc_nlp_put(ndlp);
2901                 pmb->ctx_buf = NULL;
2902                 pmb->ctx_ndlp = NULL;
2903         }
2904
2905         if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2906                 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2907
2908                 /* Check to see if there are any deferred events to process */
2909                 if (ndlp) {
2910                         lpfc_printf_vlog(
2911                                 vport,
2912                                 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2913                                 "1438 UNREG cmpl deferred mbox x%x "
2914                                 "on NPort x%x Data: x%x x%x x%px x%x x%x\n",
2915                                 ndlp->nlp_rpi, ndlp->nlp_DID,
2916                                 ndlp->nlp_flag, ndlp->nlp_defer_did,
2917                                 ndlp, vport->load_flag, kref_read(&ndlp->kref));
2918
2919                         if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2920                             (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2921                                 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2922                                 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2923                                 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2924                         } else {
2925                                 __lpfc_sli_rpi_release(vport, ndlp);
2926                         }
2927
2928                         /* The unreg_login mailbox is complete and had a
2929                          * reference that has to be released.  The PLOGI
2930                          * got its own ref.
2931                          */
2932                         lpfc_nlp_put(ndlp);
2933                         pmb->ctx_ndlp = NULL;
2934                 }
2935         }
2936
2937         /* This nlp_put pairs with lpfc_sli4_resume_rpi */
2938         if (pmb->u.mb.mbxCommand == MBX_RESUME_RPI) {
2939                 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2940                 lpfc_nlp_put(ndlp);
2941         }
2942
2943         /* Check security permission status on INIT_LINK mailbox command */
2944         if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2945             (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2946                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2947                                 "2860 SLI authentication is required "
2948                                 "for INIT_LINK but has not done yet\n");
2949
2950         if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2951                 lpfc_sli4_mbox_cmd_free(phba, pmb);
2952         else
2953                 mempool_free(pmb, phba->mbox_mem_pool);
2954 }
2955  /**
2956  * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2957  * @phba: Pointer to HBA context object.
2958  * @pmb: Pointer to mailbox object.
2959  *
2960  * This function is the unreg rpi mailbox completion handler. It
2961  * frees the memory resources associated with the completed mailbox
2962  * command. An additional reference is put on the ndlp to prevent
2963  * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2964  * the unreg mailbox command completes, this routine puts the
2965  * reference back.
2966  *
2967  **/
2968 void
2969 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2970 {
2971         struct lpfc_vport  *vport = pmb->vport;
2972         struct lpfc_nodelist *ndlp;
2973
2974         ndlp = pmb->ctx_ndlp;
2975         if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2976                 if (phba->sli_rev == LPFC_SLI_REV4 &&
2977                     (bf_get(lpfc_sli_intf_if_type,
2978                      &phba->sli4_hba.sli_intf) >=
2979                      LPFC_SLI_INTF_IF_TYPE_2)) {
2980                         if (ndlp) {
2981                                 lpfc_printf_vlog(
2982                                          vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2983                                          "0010 UNREG_LOGIN vpi:%x "
2984                                          "rpi:%x DID:%x defer x%x flg x%x "
2985                                          "x%px\n",
2986                                          vport->vpi, ndlp->nlp_rpi,
2987                                          ndlp->nlp_DID, ndlp->nlp_defer_did,
2988                                          ndlp->nlp_flag,
2989                                          ndlp);
2990                                 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2991
2992                                 /* Check to see if there are any deferred
2993                                  * events to process
2994                                  */
2995                                 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2996                                     (ndlp->nlp_defer_did !=
2997                                     NLP_EVT_NOTHING_PENDING)) {
2998                                         lpfc_printf_vlog(
2999                                                 vport, KERN_INFO, LOG_DISCOVERY,
3000                                                 "4111 UNREG cmpl deferred "
3001                                                 "clr x%x on "
3002                                                 "NPort x%x Data: x%x x%px\n",
3003                                                 ndlp->nlp_rpi, ndlp->nlp_DID,
3004                                                 ndlp->nlp_defer_did, ndlp);
3005                                         ndlp->nlp_flag &= ~NLP_UNREG_INP;
3006                                         ndlp->nlp_defer_did =
3007                                                 NLP_EVT_NOTHING_PENDING;
3008                                         lpfc_issue_els_plogi(
3009                                                 vport, ndlp->nlp_DID, 0);
3010                                 } else {
3011                                         __lpfc_sli_rpi_release(vport, ndlp);
3012                                 }
3013                                 lpfc_nlp_put(ndlp);
3014                         }
3015                 }
3016         }
3017
3018         mempool_free(pmb, phba->mbox_mem_pool);
3019 }
3020
3021 /**
3022  * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
3023  * @phba: Pointer to HBA context object.
3024  *
3025  * This function is called with no lock held. This function processes all
3026  * the completed mailbox commands and gives it to upper layers. The interrupt
3027  * service routine processes mailbox completion interrupt and adds completed
3028  * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
3029  * Worker thread call lpfc_sli_handle_mb_event, which will return the
3030  * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
3031  * function returns the mailbox commands to the upper layer by calling the
3032  * completion handler function of each mailbox.
3033  **/
3034 int
3035 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
3036 {
3037         MAILBOX_t *pmbox;
3038         LPFC_MBOXQ_t *pmb;
3039         int rc;
3040         LIST_HEAD(cmplq);
3041
3042         phba->sli.slistat.mbox_event++;
3043
3044         /* Get all completed mailboxe buffers into the cmplq */
3045         spin_lock_irq(&phba->hbalock);
3046         list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
3047         spin_unlock_irq(&phba->hbalock);
3048
3049         /* Get a Mailbox buffer to setup mailbox commands for callback */
3050         do {
3051                 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
3052                 if (pmb == NULL)
3053                         break;
3054
3055                 pmbox = &pmb->u.mb;
3056
3057                 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
3058                         if (pmb->vport) {
3059                                 lpfc_debugfs_disc_trc(pmb->vport,
3060                                         LPFC_DISC_TRC_MBOX_VPORT,
3061                                         "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
3062                                         (uint32_t)pmbox->mbxCommand,
3063                                         pmbox->un.varWords[0],
3064                                         pmbox->un.varWords[1]);
3065                         }
3066                         else {
3067                                 lpfc_debugfs_disc_trc(phba->pport,
3068                                         LPFC_DISC_TRC_MBOX,
3069                                         "MBOX cmpl:       cmd:x%x mb:x%x x%x",
3070                                         (uint32_t)pmbox->mbxCommand,
3071                                         pmbox->un.varWords[0],
3072                                         pmbox->un.varWords[1]);
3073                         }
3074                 }
3075
3076                 /*
3077                  * It is a fatal error if unknown mbox command completion.
3078                  */
3079                 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
3080                     MBX_SHUTDOWN) {
3081                         /* Unknown mailbox command compl */
3082                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3083                                         "(%d):0323 Unknown Mailbox command "
3084                                         "x%x (x%x/x%x) Cmpl\n",
3085                                         pmb->vport ? pmb->vport->vpi :
3086                                         LPFC_VPORT_UNKNOWN,
3087                                         pmbox->mbxCommand,
3088                                         lpfc_sli_config_mbox_subsys_get(phba,
3089                                                                         pmb),
3090                                         lpfc_sli_config_mbox_opcode_get(phba,
3091                                                                         pmb));
3092                         phba->link_state = LPFC_HBA_ERROR;
3093                         phba->work_hs = HS_FFER3;
3094                         lpfc_handle_eratt(phba);
3095                         continue;
3096                 }
3097
3098                 if (pmbox->mbxStatus) {
3099                         phba->sli.slistat.mbox_stat_err++;
3100                         if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
3101                                 /* Mbox cmd cmpl error - RETRYing */
3102                                 lpfc_printf_log(phba, KERN_INFO,
3103                                         LOG_MBOX | LOG_SLI,
3104                                         "(%d):0305 Mbox cmd cmpl "
3105                                         "error - RETRYing Data: x%x "
3106                                         "(x%x/x%x) x%x x%x x%x\n",
3107                                         pmb->vport ? pmb->vport->vpi :
3108                                         LPFC_VPORT_UNKNOWN,
3109                                         pmbox->mbxCommand,
3110                                         lpfc_sli_config_mbox_subsys_get(phba,
3111                                                                         pmb),
3112                                         lpfc_sli_config_mbox_opcode_get(phba,
3113                                                                         pmb),
3114                                         pmbox->mbxStatus,
3115                                         pmbox->un.varWords[0],
3116                                         pmb->vport ? pmb->vport->port_state :
3117                                         LPFC_VPORT_UNKNOWN);
3118                                 pmbox->mbxStatus = 0;
3119                                 pmbox->mbxOwner = OWN_HOST;
3120                                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3121                                 if (rc != MBX_NOT_FINISHED)
3122                                         continue;
3123                         }
3124                 }
3125
3126                 /* Mailbox cmd <cmd> Cmpl <cmpl> */
3127                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
3128                                 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
3129                                 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
3130                                 "x%x x%x x%x\n",
3131                                 pmb->vport ? pmb->vport->vpi : 0,
3132                                 pmbox->mbxCommand,
3133                                 lpfc_sli_config_mbox_subsys_get(phba, pmb),
3134                                 lpfc_sli_config_mbox_opcode_get(phba, pmb),
3135                                 pmb->mbox_cmpl,
3136                                 *((uint32_t *) pmbox),
3137                                 pmbox->un.varWords[0],
3138                                 pmbox->un.varWords[1],
3139                                 pmbox->un.varWords[2],
3140                                 pmbox->un.varWords[3],
3141                                 pmbox->un.varWords[4],
3142                                 pmbox->un.varWords[5],
3143                                 pmbox->un.varWords[6],
3144                                 pmbox->un.varWords[7],
3145                                 pmbox->un.varWords[8],
3146                                 pmbox->un.varWords[9],
3147                                 pmbox->un.varWords[10]);
3148
3149                 if (pmb->mbox_cmpl)
3150                         pmb->mbox_cmpl(phba,pmb);
3151         } while (1);
3152         return 0;
3153 }
3154
3155 /**
3156  * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
3157  * @phba: Pointer to HBA context object.
3158  * @pring: Pointer to driver SLI ring object.
3159  * @tag: buffer tag.
3160  *
3161  * This function is called with no lock held. When QUE_BUFTAG_BIT bit
3162  * is set in the tag the buffer is posted for a particular exchange,
3163  * the function will return the buffer without replacing the buffer.
3164  * If the buffer is for unsolicited ELS or CT traffic, this function
3165  * returns the buffer and also posts another buffer to the firmware.
3166  **/
3167 static struct lpfc_dmabuf *
3168 lpfc_sli_get_buff(struct lpfc_hba *phba,
3169                   struct lpfc_sli_ring *pring,
3170                   uint32_t tag)
3171 {
3172         struct hbq_dmabuf *hbq_entry;
3173
3174         if (tag & QUE_BUFTAG_BIT)
3175                 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
3176         hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
3177         if (!hbq_entry)
3178                 return NULL;
3179         return &hbq_entry->dbuf;
3180 }
3181
3182 /**
3183  * lpfc_nvme_unsol_ls_handler - Process an unsolicited event data buffer
3184  *                              containing a NVME LS request.
3185  * @phba: pointer to lpfc hba data structure.
3186  * @piocb: pointer to the iocbq struct representing the sequence starting
3187  *        frame.
3188  *
3189  * This routine initially validates the NVME LS, validates there is a login
3190  * with the port that sent the LS, and then calls the appropriate nvme host
3191  * or target LS request handler.
3192  **/
3193 static void
3194 lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
3195 {
3196         struct lpfc_nodelist *ndlp;
3197         struct lpfc_dmabuf *d_buf;
3198         struct hbq_dmabuf *nvmebuf;
3199         struct fc_frame_header *fc_hdr;
3200         struct lpfc_async_xchg_ctx *axchg = NULL;
3201         char *failwhy = NULL;
3202         uint32_t oxid, sid, did, fctl, size;
3203         int ret = 1;
3204
3205         d_buf = piocb->context2;
3206
3207         nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
3208         fc_hdr = nvmebuf->hbuf.virt;
3209         oxid = be16_to_cpu(fc_hdr->fh_ox_id);
3210         sid = sli4_sid_from_fc_hdr(fc_hdr);
3211         did = sli4_did_from_fc_hdr(fc_hdr);
3212         fctl = (fc_hdr->fh_f_ctl[0] << 16 |
3213                 fc_hdr->fh_f_ctl[1] << 8 |
3214                 fc_hdr->fh_f_ctl[2]);
3215         size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
3216
3217         lpfc_nvmeio_data(phba, "NVME LS    RCV: xri x%x sz %d from %06x\n",
3218                          oxid, size, sid);
3219
3220         if (phba->pport->load_flag & FC_UNLOADING) {
3221                 failwhy = "Driver Unloading";
3222         } else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
3223                 failwhy = "NVME FC4 Disabled";
3224         } else if (!phba->nvmet_support && !phba->pport->localport) {
3225                 failwhy = "No Localport";
3226         } else if (phba->nvmet_support && !phba->targetport) {
3227                 failwhy = "No Targetport";
3228         } else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) {
3229                 failwhy = "Bad NVME LS R_CTL";
3230         } else if (unlikely((fctl & 0x00FF0000) !=
3231                         (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) {
3232                 failwhy = "Bad NVME LS F_CTL";
3233         } else {
3234                 axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC);
3235                 if (!axchg)
3236                         failwhy = "No CTX memory";
3237         }
3238
3239         if (unlikely(failwhy)) {
3240                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3241                                 "6154 Drop NVME LS: SID %06X OXID x%X: %s\n",
3242                                 sid, oxid, failwhy);
3243                 goto out_fail;
3244         }
3245
3246         /* validate the source of the LS is logged in */
3247         ndlp = lpfc_findnode_did(phba->pport, sid);
3248         if (!ndlp ||
3249             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3250              (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3251                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
3252                                 "6216 NVME Unsol rcv: No ndlp: "
3253                                 "NPort_ID x%x oxid x%x\n",
3254                                 sid, oxid);
3255                 goto out_fail;
3256         }
3257
3258         axchg->phba = phba;
3259         axchg->ndlp = ndlp;
3260         axchg->size = size;
3261         axchg->oxid = oxid;
3262         axchg->sid = sid;
3263         axchg->wqeq = NULL;
3264         axchg->state = LPFC_NVME_STE_LS_RCV;
3265         axchg->entry_cnt = 1;
3266         axchg->rqb_buffer = (void *)nvmebuf;
3267         axchg->hdwq = &phba->sli4_hba.hdwq[0];
3268         axchg->payload = nvmebuf->dbuf.virt;
3269         INIT_LIST_HEAD(&axchg->list);
3270
3271         if (phba->nvmet_support) {
3272                 ret = lpfc_nvmet_handle_lsreq(phba, axchg);
3273                 spin_lock_irq(&ndlp->lock);
3274                 if (!ret && !(ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH)) {
3275                         ndlp->fc4_xpt_flags |= NLP_XPT_HAS_HH;
3276                         spin_unlock_irq(&ndlp->lock);
3277
3278                         /* This reference is a single occurrence to hold the
3279                          * node valid until the nvmet transport calls
3280                          * host_release.
3281                          */
3282                         if (!lpfc_nlp_get(ndlp))
3283                                 goto out_fail;
3284
3285                         lpfc_printf_log(phba, KERN_ERR, LOG_NODE,
3286                                         "6206 NVMET unsol ls_req ndlp x%px "
3287                                         "DID x%x xflags x%x refcnt %d\n",
3288                                         ndlp, ndlp->nlp_DID,
3289                                         ndlp->fc4_xpt_flags,
3290                                         kref_read(&ndlp->kref));
3291                 } else {
3292                         spin_unlock_irq(&ndlp->lock);
3293                 }
3294         } else {
3295                 ret = lpfc_nvme_handle_lsreq(phba, axchg);
3296         }
3297
3298         /* if zero, LS was successfully handled. If non-zero, LS not handled */
3299         if (!ret)
3300                 return;
3301
3302 out_fail:
3303         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3304                         "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X "
3305                         "NVMe%s handler failed %d\n",
3306                         did, sid, oxid,
3307                         (phba->nvmet_support) ? "T" : "I", ret);
3308
3309         /* recycle receive buffer */
3310         lpfc_in_buf_free(phba, &nvmebuf->dbuf);
3311
3312         /* If start of new exchange, abort it */
3313         if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX)))
3314                 ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid);
3315
3316         if (ret)
3317                 kfree(axchg);
3318 }
3319
3320 /**
3321  * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
3322  * @phba: Pointer to HBA context object.
3323  * @pring: Pointer to driver SLI ring object.
3324  * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
3325  * @fch_r_ctl: the r_ctl for the first frame of the sequence.
3326  * @fch_type: the type for the first frame of the sequence.
3327  *
3328  * This function is called with no lock held. This function uses the r_ctl and
3329  * type of the received sequence to find the correct callback function to call
3330  * to process the sequence.
3331  **/
3332 static int
3333 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3334                          struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
3335                          uint32_t fch_type)
3336 {
3337         int i;
3338
3339         switch (fch_type) {
3340         case FC_TYPE_NVME:
3341                 lpfc_nvme_unsol_ls_handler(phba, saveq);
3342                 return 1;
3343         default:
3344                 break;
3345         }
3346
3347         /* unSolicited Responses */
3348         if (pring->prt[0].profile) {
3349                 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
3350                         (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
3351                                                                         saveq);
3352                 return 1;
3353         }
3354         /* We must search, based on rctl / type
3355            for the right routine */
3356         for (i = 0; i < pring->num_mask; i++) {
3357                 if ((pring->prt[i].rctl == fch_r_ctl) &&
3358                     (pring->prt[i].type == fch_type)) {
3359                         if (pring->prt[i].lpfc_sli_rcv_unsol_event)
3360                                 (pring->prt[i].lpfc_sli_rcv_unsol_event)
3361                                                 (phba, pring, saveq);
3362                         return 1;
3363                 }
3364         }
3365         return 0;
3366 }
3367
3368 /**
3369  * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
3370  * @phba: Pointer to HBA context object.
3371  * @pring: Pointer to driver SLI ring object.
3372  * @saveq: Pointer to the unsolicited iocb.
3373  *
3374  * This function is called with no lock held by the ring event handler
3375  * when there is an unsolicited iocb posted to the response ring by the
3376  * firmware. This function gets the buffer associated with the iocbs
3377  * and calls the event handler for the ring. This function handles both
3378  * qring buffers and hbq buffers.
3379  * When the function returns 1 the caller can free the iocb object otherwise
3380  * upper layer functions will free the iocb objects.
3381  **/
3382 static int
3383 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3384                             struct lpfc_iocbq *saveq)
3385 {
3386         IOCB_t           * irsp;
3387         WORD5            * w5p;
3388         uint32_t           Rctl, Type;
3389         struct lpfc_iocbq *iocbq;
3390         struct lpfc_dmabuf *dmzbuf;
3391
3392         irsp = &(saveq->iocb);
3393
3394         if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
3395                 if (pring->lpfc_sli_rcv_async_status)
3396                         pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
3397                 else
3398                         lpfc_printf_log(phba,
3399                                         KERN_WARNING,
3400                                         LOG_SLI,
3401                                         "0316 Ring %d handler: unexpected "
3402                                         "ASYNC_STATUS iocb received evt_code "
3403                                         "0x%x\n",
3404                                         pring->ringno,
3405                                         irsp->un.asyncstat.evt_code);
3406                 return 1;
3407         }
3408
3409         if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
3410                 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
3411                 if (irsp->ulpBdeCount > 0) {
3412                         dmzbuf = lpfc_sli_get_buff(phba, pring,
3413                                         irsp->un.ulpWord[3]);
3414                         lpfc_in_buf_free(phba, dmzbuf);
3415                 }
3416
3417                 if (irsp->ulpBdeCount > 1) {
3418                         dmzbuf = lpfc_sli_get_buff(phba, pring,
3419                                         irsp->unsli3.sli3Words[3]);
3420                         lpfc_in_buf_free(phba, dmzbuf);
3421                 }
3422
3423                 if (irsp->ulpBdeCount > 2) {
3424                         dmzbuf = lpfc_sli_get_buff(phba, pring,
3425                                 irsp->unsli3.sli3Words[7]);
3426                         lpfc_in_buf_free(phba, dmzbuf);
3427                 }
3428
3429                 return 1;
3430         }
3431
3432         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3433                 if (irsp->ulpBdeCount != 0) {
3434                         saveq->context2 = lpfc_sli_get_buff(phba, pring,
3435                                                 irsp->un.ulpWord[3]);
3436                         if (!saveq->context2)
3437                                 lpfc_printf_log(phba,
3438                                         KERN_ERR,
3439                                         LOG_SLI,
3440                                         "0341 Ring %d Cannot find buffer for "
3441                                         "an unsolicited iocb. tag 0x%x\n",
3442                                         pring->ringno,
3443                                         irsp->un.ulpWord[3]);
3444                 }
3445                 if (irsp->ulpBdeCount == 2) {
3446                         saveq->context3 = lpfc_sli_get_buff(phba, pring,
3447                                                 irsp->unsli3.sli3Words[7]);
3448                         if (!saveq->context3)
3449                                 lpfc_printf_log(phba,
3450                                         KERN_ERR,
3451                                         LOG_SLI,
3452                                         "0342 Ring %d Cannot find buffer for an"
3453                                         " unsolicited iocb. tag 0x%x\n",
3454                                         pring->ringno,
3455                                         irsp->unsli3.sli3Words[7]);
3456                 }
3457                 list_for_each_entry(iocbq, &saveq->list, list) {
3458                         irsp = &(iocbq->iocb);
3459                         if (irsp->ulpBdeCount != 0) {
3460                                 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
3461                                                         irsp->un.ulpWord[3]);
3462                                 if (!iocbq->context2)
3463                                         lpfc_printf_log(phba,
3464                                                 KERN_ERR,
3465                                                 LOG_SLI,
3466                                                 "0343 Ring %d Cannot find "
3467                                                 "buffer for an unsolicited iocb"
3468                                                 ". tag 0x%x\n", pring->ringno,
3469                                                 irsp->un.ulpWord[3]);
3470                         }
3471                         if (irsp->ulpBdeCount == 2) {
3472                                 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
3473                                                 irsp->unsli3.sli3Words[7]);
3474                                 if (!iocbq->context3)
3475                                         lpfc_printf_log(phba,
3476                                                 KERN_ERR,
3477                                                 LOG_SLI,
3478                                                 "0344 Ring %d Cannot find "
3479                                                 "buffer for an unsolicited "
3480                                                 "iocb. tag 0x%x\n",
3481                                                 pring->ringno,
3482                                                 irsp->unsli3.sli3Words[7]);
3483                         }
3484                 }
3485         }
3486         if (irsp->ulpBdeCount != 0 &&
3487             (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
3488              irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
3489                 int found = 0;
3490
3491                 /* search continue save q for same XRI */
3492                 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
3493                         if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
3494                                 saveq->iocb.unsli3.rcvsli3.ox_id) {
3495                                 list_add_tail(&saveq->list, &iocbq->list);
3496                                 found = 1;
3497                                 break;
3498                         }
3499                 }
3500                 if (!found)
3501                         list_add_tail(&saveq->clist,
3502                                       &pring->iocb_continue_saveq);
3503                 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
3504                         list_del_init(&iocbq->clist);
3505                         saveq = iocbq;
3506                         irsp = &(saveq->iocb);
3507                 } else
3508                         return 0;
3509         }
3510         if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
3511             (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
3512             (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
3513                 Rctl = FC_RCTL_ELS_REQ;
3514                 Type = FC_TYPE_ELS;
3515         } else {
3516                 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
3517                 Rctl = w5p->hcsw.Rctl;
3518                 Type = w5p->hcsw.Type;
3519
3520                 /* Firmware Workaround */
3521                 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
3522                         (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
3523                          irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3524                         Rctl = FC_RCTL_ELS_REQ;
3525                         Type = FC_TYPE_ELS;
3526                         w5p->hcsw.Rctl = Rctl;
3527                         w5p->hcsw.Type = Type;
3528                 }
3529         }
3530
3531         if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
3532                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3533                                 "0313 Ring %d handler: unexpected Rctl x%x "
3534                                 "Type x%x received\n",
3535                                 pring->ringno, Rctl, Type);
3536
3537         return 1;
3538 }
3539
3540 /**
3541  * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
3542  * @phba: Pointer to HBA context object.
3543  * @pring: Pointer to driver SLI ring object.
3544  * @prspiocb: Pointer to response iocb object.
3545  *
3546  * This function looks up the iocb_lookup table to get the command iocb
3547  * corresponding to the given response iocb using the iotag of the
3548  * response iocb. The driver calls this function with the hbalock held
3549  * for SLI3 ports or the ring lock held for SLI4 ports.
3550  * This function returns the command iocb object if it finds the command
3551  * iocb else returns NULL.
3552  **/
3553 static struct lpfc_iocbq *
3554 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
3555                       struct lpfc_sli_ring *pring,
3556                       struct lpfc_iocbq *prspiocb)
3557 {
3558         struct lpfc_iocbq *cmd_iocb = NULL;
3559         u16 iotag;
3560
3561         if (phba->sli_rev == LPFC_SLI_REV4)
3562                 iotag = get_wqe_reqtag(prspiocb);
3563         else
3564                 iotag = prspiocb->iocb.ulpIoTag;
3565
3566         if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3567                 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3568                 if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
3569                         /* remove from txcmpl queue list */
3570                         list_del_init(&cmd_iocb->list);
3571                         cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
3572                         pring->txcmplq_cnt--;
3573                         return cmd_iocb;
3574                 }
3575         }
3576
3577         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3578                         "0317 iotag x%x is out of "
3579                         "range: max iotag x%x\n",
3580                         iotag, phba->sli.last_iotag);
3581         return NULL;
3582 }
3583
3584 /**
3585  * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3586  * @phba: Pointer to HBA context object.
3587  * @pring: Pointer to driver SLI ring object.
3588  * @iotag: IOCB tag.
3589  *
3590  * This function looks up the iocb_lookup table to get the command iocb
3591  * corresponding to the given iotag. The driver calls this function with
3592  * the ring lock held because this function is an SLI4 port only helper.
3593  * This function returns the command iocb object if it finds the command
3594  * iocb else returns NULL.
3595  **/
3596 static struct lpfc_iocbq *
3597 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3598                              struct lpfc_sli_ring *pring, uint16_t iotag)
3599 {
3600         struct lpfc_iocbq *cmd_iocb = NULL;
3601
3602         if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3603                 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3604                 if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
3605                         /* remove from txcmpl queue list */
3606                         list_del_init(&cmd_iocb->list);
3607                         cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
3608                         pring->txcmplq_cnt--;
3609                         return cmd_iocb;
3610                 }
3611         }
3612
3613         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3614                         "0372 iotag x%x lookup error: max iotag (x%x) "
3615                         "cmd_flag x%x\n",
3616                         iotag, phba->sli.last_iotag,
3617                         cmd_iocb ? cmd_iocb->cmd_flag : 0xffff);
3618         return NULL;
3619 }
3620
3621 /**
3622  * lpfc_sli_process_sol_iocb - process solicited iocb completion
3623  * @phba: Pointer to HBA context object.
3624  * @pring: Pointer to driver SLI ring object.
3625  * @saveq: Pointer to the response iocb to be processed.
3626  *
3627  * This function is called by the ring event handler for non-fcp
3628  * rings when there is a new response iocb in the response ring.
3629  * The caller is not required to hold any locks. This function
3630  * gets the command iocb associated with the response iocb and
3631  * calls the completion handler for the command iocb. If there
3632  * is no completion handler, the function will free the resources
3633  * associated with command iocb. If the response iocb is for
3634  * an already aborted command iocb, the status of the completion
3635  * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3636  * This function always returns 1.
3637  **/
3638 static int
3639 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3640                           struct lpfc_iocbq *saveq)
3641 {
3642         struct lpfc_iocbq *cmdiocbp;
3643         int rc = 1;
3644         unsigned long iflag;
3645         u32 ulp_command, ulp_status, ulp_word4, ulp_context, iotag;
3646
3647         if (phba->sli_rev == LPFC_SLI_REV4)
3648                 spin_lock_irqsave(&pring->ring_lock, iflag);
3649         else
3650                 spin_lock_irqsave(&phba->hbalock, iflag);
3651         cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3652         if (phba->sli_rev == LPFC_SLI_REV4)
3653                 spin_unlock_irqrestore(&pring->ring_lock, iflag);
3654         else
3655                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3656
3657         ulp_command = get_job_cmnd(phba, saveq);
3658         ulp_status = get_job_ulpstatus(phba, saveq);
3659         ulp_word4 = get_job_word4(phba, saveq);
3660         ulp_context = get_job_ulpcontext(phba, saveq);
3661         if (phba->sli_rev == LPFC_SLI_REV4)
3662                 iotag = get_wqe_reqtag(saveq);
3663         else
3664                 iotag = saveq->iocb.ulpIoTag;
3665
3666         if (cmdiocbp) {
3667                 ulp_command = get_job_cmnd(phba, cmdiocbp);
3668                 if (cmdiocbp->cmd_cmpl) {
3669                         /*
3670                          * If an ELS command failed send an event to mgmt
3671                          * application.
3672                          */
3673                         if (ulp_status &&
3674                              (pring->ringno == LPFC_ELS_RING) &&
3675                              (ulp_command == CMD_ELS_REQUEST64_CR))
3676                                 lpfc_send_els_failure_event(phba,
3677                                         cmdiocbp, saveq);
3678
3679                         /*
3680                          * Post all ELS completions to the worker thread.
3681                          * All other are passed to the completion callback.
3682                          */
3683                         if (pring->ringno == LPFC_ELS_RING) {
3684                                 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3685                                     (cmdiocbp->cmd_flag &
3686                                                         LPFC_DRIVER_ABORTED)) {
3687                                         spin_lock_irqsave(&phba->hbalock,
3688                                                           iflag);
3689                                         cmdiocbp->cmd_flag &=
3690                                                 ~LPFC_DRIVER_ABORTED;
3691                                         spin_unlock_irqrestore(&phba->hbalock,
3692                                                                iflag);
3693                                         saveq->iocb.ulpStatus =
3694                                                 IOSTAT_LOCAL_REJECT;
3695                                         saveq->iocb.un.ulpWord[4] =
3696                                                 IOERR_SLI_ABORTED;
3697
3698                                         /* Firmware could still be in progress
3699                                          * of DMAing payload, so don't free data
3700                                          * buffer till after a hbeat.
3701                                          */
3702                                         spin_lock_irqsave(&phba->hbalock,
3703                                                           iflag);
3704                                         saveq->cmd_flag |= LPFC_DELAY_MEM_FREE;
3705                                         spin_unlock_irqrestore(&phba->hbalock,
3706                                                                iflag);
3707                                 }
3708                                 if (phba->sli_rev == LPFC_SLI_REV4) {
3709                                         if (saveq->cmd_flag &
3710                                             LPFC_EXCHANGE_BUSY) {
3711                                                 /* Set cmdiocb flag for the
3712                                                  * exchange busy so sgl (xri)
3713                                                  * will not be released until
3714                                                  * the abort xri is received
3715                                                  * from hba.
3716                                                  */
3717                                                 spin_lock_irqsave(
3718                                                         &phba->hbalock, iflag);
3719                                                 cmdiocbp->cmd_flag |=
3720                                                         LPFC_EXCHANGE_BUSY;
3721                                                 spin_unlock_irqrestore(
3722                                                         &phba->hbalock, iflag);
3723                                         }
3724                                         if (cmdiocbp->cmd_flag &
3725                                             LPFC_DRIVER_ABORTED) {
3726                                                 /*
3727                                                  * Clear LPFC_DRIVER_ABORTED
3728                                                  * bit in case it was driver
3729                                                  * initiated abort.
3730                                                  */
3731                                                 spin_lock_irqsave(
3732                                                         &phba->hbalock, iflag);
3733                                                 cmdiocbp->cmd_flag &=
3734                                                         ~LPFC_DRIVER_ABORTED;
3735                                                 spin_unlock_irqrestore(
3736                                                         &phba->hbalock, iflag);
3737                                                 set_job_ulpstatus(cmdiocbp,
3738                                                                   IOSTAT_LOCAL_REJECT);
3739                                                 set_job_ulpword4(cmdiocbp,
3740                                                                  IOERR_ABORT_REQUESTED);
3741                                                 /*
3742                                                  * For SLI4, irspiocb contains
3743                                                  * NO_XRI in sli_xritag, it
3744                                                  * shall not affect releasing
3745                                                  * sgl (xri) process.
3746                                                  */
3747                                                 set_job_ulpstatus(saveq,
3748                                                                   IOSTAT_LOCAL_REJECT);
3749                                                 set_job_ulpword4(saveq,
3750                                                                  IOERR_SLI_ABORTED);
3751                                                 spin_lock_irqsave(
3752                                                         &phba->hbalock, iflag);
3753                                                 saveq->cmd_flag |=
3754                                                         LPFC_DELAY_MEM_FREE;
3755                                                 spin_unlock_irqrestore(
3756                                                         &phba->hbalock, iflag);
3757                                         }
3758                                 }
3759                         }
3760                         cmdiocbp->cmd_cmpl(phba, cmdiocbp, saveq);
3761                 } else
3762                         lpfc_sli_release_iocbq(phba, cmdiocbp);
3763         } else {
3764                 /*
3765                  * Unknown initiating command based on the response iotag.
3766                  * This could be the case on the ELS ring because of
3767                  * lpfc_els_abort().
3768                  */
3769                 if (pring->ringno != LPFC_ELS_RING) {
3770                         /*
3771                          * Ring <ringno> handler: unexpected completion IoTag
3772                          * <IoTag>
3773                          */
3774                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3775                                          "0322 Ring %d handler: "
3776                                          "unexpected completion IoTag x%x "
3777                                          "Data: x%x x%x x%x x%x\n",
3778                                          pring->ringno, iotag, ulp_status,
3779                                          ulp_word4, ulp_command, ulp_context);
3780                 }
3781         }
3782
3783         return rc;
3784 }
3785
3786 /**
3787  * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
3788  * @phba: Pointer to HBA context object.
3789  * @pring: Pointer to driver SLI ring object.
3790  *
3791  * This function is called from the iocb ring event handlers when
3792  * put pointer is ahead of the get pointer for a ring. This function signal
3793  * an error attention condition to the worker thread and the worker
3794  * thread will transition the HBA to offline state.
3795  **/
3796 static void
3797 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3798 {
3799         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3800         /*
3801          * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3802          * rsp ring <portRspMax>
3803          */
3804         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3805                         "0312 Ring %d handler: portRspPut %d "
3806                         "is bigger than rsp ring %d\n",
3807                         pring->ringno, le32_to_cpu(pgp->rspPutInx),
3808                         pring->sli.sli3.numRiocb);
3809
3810         phba->link_state = LPFC_HBA_ERROR;
3811
3812         /*
3813          * All error attention handlers are posted to
3814          * worker thread
3815          */
3816         phba->work_ha |= HA_ERATT;
3817         phba->work_hs = HS_FFER3;
3818
3819         lpfc_worker_wake_up(phba);
3820
3821         return;
3822 }
3823
3824 /**
3825  * lpfc_poll_eratt - Error attention polling timer timeout handler
3826  * @t: Context to fetch pointer to address of HBA context object from.
3827  *
3828  * This function is invoked by the Error Attention polling timer when the
3829  * timer times out. It will check the SLI Error Attention register for
3830  * possible attention events. If so, it will post an Error Attention event
3831  * and wake up worker thread to process it. Otherwise, it will set up the
3832  * Error Attention polling timer for the next poll.
3833  **/
3834 void lpfc_poll_eratt(struct timer_list *t)
3835 {
3836         struct lpfc_hba *phba;
3837         uint32_t eratt = 0;
3838         uint64_t sli_intr, cnt;
3839
3840         phba = from_timer(phba, t, eratt_poll);
3841
3842         /* Here we will also keep track of interrupts per sec of the hba */
3843         sli_intr = phba->sli.slistat.sli_intr;
3844
3845         if (phba->sli.slistat.sli_prev_intr > sli_intr)
3846                 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3847                         sli_intr);
3848         else
3849                 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3850
3851         /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3852         do_div(cnt, phba->eratt_poll_interval);
3853         phba->sli.slistat.sli_ips = cnt;
3854
3855         phba->sli.slistat.sli_prev_intr = sli_intr;
3856
3857         /* Check chip HA register for error event */
3858         eratt = lpfc_sli_check_eratt(phba);
3859
3860         if (eratt)
3861                 /* Tell the worker thread there is work to do */
3862                 lpfc_worker_wake_up(phba);
3863         else
3864                 /* Restart the timer for next eratt poll */
3865                 mod_timer(&phba->eratt_poll,
3866                           jiffies +
3867                           msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3868         return;
3869 }
3870
3871
3872 /**
3873  * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3874  * @phba: Pointer to HBA context object.
3875  * @pring: Pointer to driver SLI ring object.
3876  * @mask: Host attention register mask for this ring.
3877  *
3878  * This function is called from the interrupt context when there is a ring
3879  * event for the fcp ring. The caller does not hold any lock.
3880  * The function processes each response iocb in the response ring until it
3881  * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3882  * LE bit set. The function will call the completion handler of the command iocb
3883  * if the response iocb indicates a completion for a command iocb or it is
3884  * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3885  * function if this is an unsolicited iocb.
3886  * This routine presumes LPFC_FCP_RING handling and doesn't bother
3887  * to check it explicitly.
3888  */
3889 int
3890 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3891                                 struct lpfc_sli_ring *pring, uint32_t mask)
3892 {
3893         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3894         IOCB_t *irsp = NULL;
3895         IOCB_t *entry = NULL;
3896         struct lpfc_iocbq *cmdiocbq = NULL;
3897         struct lpfc_iocbq rspiocbq;
3898         uint32_t status;
3899         uint32_t portRspPut, portRspMax;
3900         int rc = 1;
3901         lpfc_iocb_type type;
3902         unsigned long iflag;
3903         uint32_t rsp_cmpl = 0;
3904
3905         spin_lock_irqsave(&phba->hbalock, iflag);
3906         pring->stats.iocb_event++;
3907
3908         /*
3909          * The next available response entry should never exceed the maximum
3910          * entries.  If it does, treat it as an adapter hardware error.
3911          */
3912         portRspMax = pring->sli.sli3.numRiocb;
3913         portRspPut = le32_to_cpu(pgp->rspPutInx);
3914         if (unlikely(portRspPut >= portRspMax)) {
3915                 lpfc_sli_rsp_pointers_error(phba, pring);
3916                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3917                 return 1;
3918         }
3919         if (phba->fcp_ring_in_use) {
3920                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3921                 return 1;
3922         } else
3923                 phba->fcp_ring_in_use = 1;
3924
3925         rmb();
3926         while (pring->sli.sli3.rspidx != portRspPut) {
3927                 /*
3928                  * Fetch an entry off the ring and copy it into a local data
3929                  * structure.  The copy involves a byte-swap since the
3930                  * network byte order and pci byte orders are different.
3931                  */
3932                 entry = lpfc_resp_iocb(phba, pring);
3933                 phba->last_completion_time = jiffies;
3934
3935                 if (++pring->sli.sli3.rspidx >= portRspMax)
3936                         pring->sli.sli3.rspidx = 0;
3937
3938                 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3939                                       (uint32_t *) &rspiocbq.iocb,
3940                                       phba->iocb_rsp_size);
3941                 INIT_LIST_HEAD(&(rspiocbq.list));
3942                 irsp = &rspiocbq.iocb;
3943
3944                 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3945                 pring->stats.iocb_rsp++;
3946                 rsp_cmpl++;
3947
3948                 if (unlikely(irsp->ulpStatus)) {
3949                         /*
3950                          * If resource errors reported from HBA, reduce
3951                          * queuedepths of the SCSI device.
3952                          */
3953                         if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3954                             ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3955                              IOERR_NO_RESOURCES)) {
3956                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3957                                 phba->lpfc_rampdown_queue_depth(phba);
3958                                 spin_lock_irqsave(&phba->hbalock, iflag);
3959                         }
3960
3961                         /* Rsp ring <ringno> error: IOCB */
3962                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3963                                         "0336 Rsp Ring %d error: IOCB Data: "
3964                                         "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3965                                         pring->ringno,
3966                                         irsp->un.ulpWord[0],
3967                                         irsp->un.ulpWord[1],
3968                                         irsp->un.ulpWord[2],
3969                                         irsp->un.ulpWord[3],
3970                                         irsp->un.ulpWord[4],
3971                                         irsp->un.ulpWord[5],
3972                                         *(uint32_t *)&irsp->un1,
3973                                         *((uint32_t *)&irsp->un1 + 1));
3974                 }
3975
3976                 switch (type) {
3977                 case LPFC_ABORT_IOCB:
3978                 case LPFC_SOL_IOCB:
3979                         /*
3980                          * Idle exchange closed via ABTS from port.  No iocb
3981                          * resources need to be recovered.
3982                          */
3983                         if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3984                                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3985                                                 "0333 IOCB cmd 0x%x"
3986                                                 " processed. Skipping"
3987                                                 " completion\n",
3988                                                 irsp->ulpCommand);
3989                                 break;
3990                         }
3991
3992                         cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3993                                                          &rspiocbq);
3994                         if (unlikely(!cmdiocbq))
3995                                 break;
3996                         if (cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED)
3997                                 cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
3998                         if (cmdiocbq->cmd_cmpl) {
3999                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
4000                                 cmdiocbq->cmd_cmpl(phba, cmdiocbq, &rspiocbq);
4001                                 spin_lock_irqsave(&phba->hbalock, iflag);
4002                         }
4003                         break;
4004                 case LPFC_UNSOL_IOCB:
4005                         spin_unlock_irqrestore(&phba->hbalock, iflag);
4006                         lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
4007                         spin_lock_irqsave(&phba->hbalock, iflag);
4008                         break;
4009                 default:
4010                         if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
4011                                 char adaptermsg[LPFC_MAX_ADPTMSG];
4012                                 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
4013                                 memcpy(&adaptermsg[0], (uint8_t *) irsp,
4014                                        MAX_MSG_DATA);
4015                                 dev_warn(&((phba->pcidev)->dev),
4016                                          "lpfc%d: %s\n",
4017                                          phba->brd_no, adaptermsg);
4018                         } else {
4019                                 /* Unknown IOCB command */
4020                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4021                                                 "0334 Unknown IOCB command "
4022                                                 "Data: x%x, x%x x%x x%x x%x\n",
4023                                                 type, irsp->ulpCommand,
4024                                                 irsp->ulpStatus,
4025                                                 irsp->ulpIoTag,
4026                                                 irsp->ulpContext);
4027                         }
4028                         break;
4029                 }
4030
4031                 /*
4032                  * The response IOCB has been processed.  Update the ring
4033                  * pointer in SLIM.  If the port response put pointer has not
4034                  * been updated, sync the pgp->rspPutInx and fetch the new port
4035                  * response put pointer.
4036                  */
4037                 writel(pring->sli.sli3.rspidx,
4038                         &phba->host_gp[pring->ringno].rspGetInx);
4039
4040                 if (pring->sli.sli3.rspidx == portRspPut)
4041                         portRspPut = le32_to_cpu(pgp->rspPutInx);
4042         }
4043
4044         if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
4045                 pring->stats.iocb_rsp_full++;
4046                 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
4047                 writel(status, phba->CAregaddr);
4048                 readl(phba->CAregaddr);
4049         }
4050         if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
4051                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
4052                 pring->stats.iocb_cmd_empty++;
4053
4054                 /* Force update of the local copy of cmdGetInx */
4055                 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
4056                 lpfc_sli_resume_iocb(phba, pring);
4057
4058                 if ((pring->lpfc_sli_cmd_available))
4059                         (pring->lpfc_sli_cmd_available) (phba, pring);
4060
4061         }
4062
4063         phba->fcp_ring_in_use = 0;
4064         spin_unlock_irqrestore(&phba->hbalock, iflag);
4065         return rc;
4066 }
4067
4068 /**
4069  * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
4070  * @phba: Pointer to HBA context object.
4071  * @pring: Pointer to driver SLI ring object.
4072  * @rspiocbp: Pointer to driver response IOCB object.
4073  *
4074  * This function is called from the worker thread when there is a slow-path
4075  * response IOCB to process. This function chains all the response iocbs until
4076  * seeing the iocb with the LE bit set. The function will call
4077  * lpfc_sli_process_sol_iocb function if the response iocb indicates a
4078  * completion of a command iocb. The function will call the
4079  * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
4080  * The function frees the resources or calls the completion handler if this
4081  * iocb is an abort completion. The function returns NULL when the response
4082  * iocb has the LE bit set and all the chained iocbs are processed, otherwise
4083  * this function shall chain the iocb on to the iocb_continueq and return the
4084  * response iocb passed in.
4085  **/
4086 static struct lpfc_iocbq *
4087 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4088                         struct lpfc_iocbq *rspiocbp)
4089 {
4090         struct lpfc_iocbq *saveq;
4091         struct lpfc_iocbq *cmdiocb;
4092         struct lpfc_iocbq *next_iocb;
4093         IOCB_t *irsp;
4094         uint32_t free_saveq;
4095         u8 cmd_type;
4096         lpfc_iocb_type type;
4097         unsigned long iflag;
4098         u32 ulp_status = get_job_ulpstatus(phba, rspiocbp);
4099         u32 ulp_word4 = get_job_word4(phba, rspiocbp);
4100         u32 ulp_command = get_job_cmnd(phba, rspiocbp);
4101         int rc;
4102
4103         spin_lock_irqsave(&phba->hbalock, iflag);
4104         /* First add the response iocb to the countinueq list */
4105         list_add_tail(&rspiocbp->list, &pring->iocb_continueq);
4106         pring->iocb_continueq_cnt++;
4107
4108         /*
4109          * By default, the driver expects to free all resources
4110          * associated with this iocb completion.
4111          */
4112         free_saveq = 1;
4113         saveq = list_get_first(&pring->iocb_continueq,
4114                                struct lpfc_iocbq, list);
4115         list_del_init(&pring->iocb_continueq);
4116         pring->iocb_continueq_cnt = 0;
4117
4118         pring->stats.iocb_rsp++;
4119
4120         /*
4121          * If resource errors reported from HBA, reduce
4122          * queuedepths of the SCSI device.
4123          */
4124         if (ulp_status == IOSTAT_LOCAL_REJECT &&
4125             ((ulp_word4 & IOERR_PARAM_MASK) ==
4126              IOERR_NO_RESOURCES)) {
4127                 spin_unlock_irqrestore(&phba->hbalock, iflag);
4128                 phba->lpfc_rampdown_queue_depth(phba);
4129                 spin_lock_irqsave(&phba->hbalock, iflag);
4130         }
4131
4132         if (ulp_status) {
4133                 /* Rsp ring <ringno> error: IOCB */
4134                 if (phba->sli_rev < LPFC_SLI_REV4) {
4135                         irsp = &rspiocbp->iocb;
4136                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4137                                         "0328 Rsp Ring %d error: ulp_status x%x "
4138                                         "IOCB Data: "
4139                                         "x%08x x%08x x%08x x%08x "
4140                                         "x%08x x%08x x%08x x%08x "
4141                                         "x%08x x%08x x%08x x%08x "
4142                                         "x%08x x%08x x%08x x%08x\n",
4143                                         pring->ringno, ulp_status,
4144                                         get_job_ulpword(rspiocbp, 0),
4145                                         get_job_ulpword(rspiocbp, 1),
4146                                         get_job_ulpword(rspiocbp, 2),
4147                                         get_job_ulpword(rspiocbp, 3),
4148                                         get_job_ulpword(rspiocbp, 4),
4149                                         get_job_ulpword(rspiocbp, 5),
4150                                         *(((uint32_t *)irsp) + 6),
4151                                         *(((uint32_t *)irsp) + 7),
4152                                         *(((uint32_t *)irsp) + 8),
4153                                         *(((uint32_t *)irsp) + 9),
4154                                         *(((uint32_t *)irsp) + 10),
4155                                         *(((uint32_t *)irsp) + 11),
4156                                         *(((uint32_t *)irsp) + 12),
4157                                         *(((uint32_t *)irsp) + 13),
4158                                         *(((uint32_t *)irsp) + 14),
4159                                         *(((uint32_t *)irsp) + 15));
4160                 } else {
4161                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4162                                         "0321 Rsp Ring %d error: "
4163                                         "IOCB Data: "
4164                                         "x%x x%x x%x x%x\n",
4165                                         pring->ringno,
4166                                         rspiocbp->wcqe_cmpl.word0,
4167                                         rspiocbp->wcqe_cmpl.total_data_placed,
4168                                         rspiocbp->wcqe_cmpl.parameter,
4169                                         rspiocbp->wcqe_cmpl.word3);
4170                 }
4171         }
4172
4173
4174         /*
4175          * Fetch the iocb command type and call the correct completion
4176          * routine. Solicited and Unsolicited IOCBs on the ELS ring
4177          * get freed back to the lpfc_iocb_list by the discovery
4178          * kernel thread.
4179          */
4180         cmd_type = ulp_command & CMD_IOCB_MASK;
4181         type = lpfc_sli_iocb_cmd_type(cmd_type);
4182         switch (type) {
4183         case LPFC_SOL_IOCB:
4184                 spin_unlock_irqrestore(&phba->hbalock, iflag);
4185                 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
4186                 spin_lock_irqsave(&phba->hbalock, iflag);
4187                 break;
4188         case LPFC_UNSOL_IOCB:
4189                 spin_unlock_irqrestore(&phba->hbalock, iflag);
4190                 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
4191                 spin_lock_irqsave(&phba->hbalock, iflag);
4192                 if (!rc)
4193                         free_saveq = 0;
4194                 break;
4195         case LPFC_ABORT_IOCB:
4196                 cmdiocb = NULL;
4197                 if (ulp_command != CMD_XRI_ABORTED_CX)
4198                         cmdiocb = lpfc_sli_iocbq_lookup(phba, pring,
4199                                                         saveq);
4200                 if (cmdiocb) {
4201                         /* Call the specified completion routine */
4202                         if (cmdiocb->cmd_cmpl) {
4203                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
4204                                 cmdiocb->cmd_cmpl(phba, cmdiocb, saveq);
4205                                 spin_lock_irqsave(&phba->hbalock, iflag);
4206                         } else {
4207                                 __lpfc_sli_release_iocbq(phba, cmdiocb);
4208                         }
4209                 }
4210                 break;
4211         case LPFC_UNKNOWN_IOCB:
4212                 if (ulp_command == CMD_ADAPTER_MSG) {
4213                         char adaptermsg[LPFC_MAX_ADPTMSG];
4214
4215                         memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
4216                         memcpy(&adaptermsg[0], (uint8_t *)&rspiocbp->wqe,
4217                                MAX_MSG_DATA);
4218                         dev_warn(&((phba->pcidev)->dev),
4219                                  "lpfc%d: %s\n",
4220                                  phba->brd_no, adaptermsg);
4221                 } else {
4222                         /* Unknown command */
4223                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4224                                         "0335 Unknown IOCB "
4225                                         "command Data: x%x "
4226                                         "x%x x%x x%x\n",
4227                                         ulp_command,
4228                                         ulp_status,
4229                                         get_wqe_reqtag(rspiocbp),
4230                                         get_job_ulpcontext(phba, rspiocbp));
4231                 }
4232                 break;
4233         }
4234
4235         if (free_saveq) {
4236                 list_for_each_entry_safe(rspiocbp, next_iocb,
4237                                          &saveq->list, list) {
4238                         list_del_init(&rspiocbp->list);
4239                         __lpfc_sli_release_iocbq(phba, rspiocbp);
4240                 }
4241                 __lpfc_sli_release_iocbq(phba, saveq);
4242         }
4243         rspiocbp = NULL;
4244         spin_unlock_irqrestore(&phba->hbalock, iflag);
4245         return rspiocbp;
4246 }
4247
4248 /**
4249  * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
4250  * @phba: Pointer to HBA context object.
4251  * @pring: Pointer to driver SLI ring object.
4252  * @mask: Host attention register mask for this ring.
4253  *
4254  * This routine wraps the actual slow_ring event process routine from the
4255  * API jump table function pointer from the lpfc_hba struct.
4256  **/
4257 void
4258 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
4259                                 struct lpfc_sli_ring *pring, uint32_t mask)
4260 {
4261         phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
4262 }
4263
4264 /**
4265  * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
4266  * @phba: Pointer to HBA context object.
4267  * @pring: Pointer to driver SLI ring object.
4268  * @mask: Host attention register mask for this ring.
4269  *
4270  * This function is called from the worker thread when there is a ring event
4271  * for non-fcp rings. The caller does not hold any lock. The function will
4272  * remove each response iocb in the response ring and calls the handle
4273  * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4274  **/
4275 static void
4276 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
4277                                    struct lpfc_sli_ring *pring, uint32_t mask)
4278 {
4279         struct lpfc_pgp *pgp;
4280         IOCB_t *entry;
4281         IOCB_t *irsp = NULL;
4282         struct lpfc_iocbq *rspiocbp = NULL;
4283         uint32_t portRspPut, portRspMax;
4284         unsigned long iflag;
4285         uint32_t status;
4286
4287         pgp = &phba->port_gp[pring->ringno];
4288         spin_lock_irqsave(&phba->hbalock, iflag);
4289         pring->stats.iocb_event++;
4290
4291         /*
4292          * The next available response entry should never exceed the maximum
4293          * entries.  If it does, treat it as an adapter hardware error.
4294          */
4295         portRspMax = pring->sli.sli3.numRiocb;
4296         portRspPut = le32_to_cpu(pgp->rspPutInx);
4297         if (portRspPut >= portRspMax) {
4298                 /*
4299                  * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
4300                  * rsp ring <portRspMax>
4301                  */
4302                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4303                                 "0303 Ring %d handler: portRspPut %d "
4304                                 "is bigger than rsp ring %d\n",
4305                                 pring->ringno, portRspPut, portRspMax);
4306
4307                 phba->link_state = LPFC_HBA_ERROR;
4308                 spin_unlock_irqrestore(&phba->hbalock, iflag);
4309
4310                 phba->work_hs = HS_FFER3;
4311                 lpfc_handle_eratt(phba);
4312
4313                 return;
4314         }
4315
4316         rmb();
4317         while (pring->sli.sli3.rspidx != portRspPut) {
4318                 /*
4319                  * Build a completion list and call the appropriate handler.
4320                  * The process is to get the next available response iocb, get
4321                  * a free iocb from the list, copy the response data into the
4322                  * free iocb, insert to the continuation list, and update the
4323                  * next response index to slim.  This process makes response
4324                  * iocb's in the ring available to DMA as fast as possible but
4325                  * pays a penalty for a copy operation.  Since the iocb is
4326                  * only 32 bytes, this penalty is considered small relative to
4327                  * the PCI reads for register values and a slim write.  When
4328                  * the ulpLe field is set, the entire Command has been
4329                  * received.
4330                  */
4331                 entry = lpfc_resp_iocb(phba, pring);
4332
4333                 phba->last_completion_time = jiffies;
4334                 rspiocbp = __lpfc_sli_get_iocbq(phba);
4335                 if (rspiocbp == NULL) {
4336                         printk(KERN_ERR "%s: out of buffers! Failing "
4337                                "completion.\n", __func__);
4338                         break;
4339                 }
4340
4341                 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
4342                                       phba->iocb_rsp_size);
4343                 irsp = &rspiocbp->iocb;
4344
4345                 if (++pring->sli.sli3.rspidx >= portRspMax)
4346                         pring->sli.sli3.rspidx = 0;
4347
4348                 if (pring->ringno == LPFC_ELS_RING) {
4349                         lpfc_debugfs_slow_ring_trc(phba,
4350                         "IOCB rsp ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
4351                                 *(((uint32_t *) irsp) + 4),
4352                                 *(((uint32_t *) irsp) + 6),
4353                                 *(((uint32_t *) irsp) + 7));
4354                 }
4355
4356                 writel(pring->sli.sli3.rspidx,
4357                         &phba->host_gp[pring->ringno].rspGetInx);
4358
4359                 spin_unlock_irqrestore(&phba->hbalock, iflag);
4360                 /* Handle the response IOCB */
4361                 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
4362                 spin_lock_irqsave(&phba->hbalock, iflag);
4363
4364                 /*
4365                  * If the port response put pointer has not been updated, sync
4366                  * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
4367                  * response put pointer.
4368                  */
4369                 if (pring->sli.sli3.rspidx == portRspPut) {
4370                         portRspPut = le32_to_cpu(pgp->rspPutInx);
4371                 }
4372         } /* while (pring->sli.sli3.rspidx != portRspPut) */
4373
4374         if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
4375                 /* At least one response entry has been freed */
4376                 pring->stats.iocb_rsp_full++;
4377                 /* SET RxRE_RSP in Chip Att register */
4378                 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
4379                 writel(status, phba->CAregaddr);
4380                 readl(phba->CAregaddr); /* flush */
4381         }
4382         if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
4383                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
4384                 pring->stats.iocb_cmd_empty++;
4385
4386                 /* Force update of the local copy of cmdGetInx */
4387                 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
4388                 lpfc_sli_resume_iocb(phba, pring);
4389
4390                 if ((pring->lpfc_sli_cmd_available))
4391                         (pring->lpfc_sli_cmd_available) (phba, pring);
4392
4393         }
4394
4395         spin_unlock_irqrestore(&phba->hbalock, iflag);
4396         return;
4397 }
4398
4399 /**
4400  * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
4401  * @phba: Pointer to HBA context object.
4402  * @pring: Pointer to driver SLI ring object.
4403  * @mask: Host attention register mask for this ring.
4404  *
4405  * This function is called from the worker thread when there is a pending
4406  * ELS response iocb on the driver internal slow-path response iocb worker
4407  * queue. The caller does not hold any lock. The function will remove each
4408  * response iocb from the response worker queue and calls the handle
4409  * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4410  **/
4411 static void
4412 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
4413                                    struct lpfc_sli_ring *pring, uint32_t mask)
4414 {
4415         struct lpfc_iocbq *irspiocbq;
4416         struct hbq_dmabuf *dmabuf;
4417         struct lpfc_cq_event *cq_event;
4418         unsigned long iflag;
4419         int count = 0;
4420
4421         spin_lock_irqsave(&phba->hbalock, iflag);
4422         phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
4423         spin_unlock_irqrestore(&phba->hbalock, iflag);
4424         while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
4425                 /* Get the response iocb from the head of work queue */
4426                 spin_lock_irqsave(&phba->hbalock, iflag);
4427                 list_remove_head(&phba->sli4_hba.sp_queue_event,
4428                                  cq_event, struct lpfc_cq_event, list);
4429                 spin_unlock_irqrestore(&phba->hbalock, iflag);
4430
4431                 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
4432                 case CQE_CODE_COMPL_WQE:
4433                         irspiocbq = container_of(cq_event, struct lpfc_iocbq,
4434                                                  cq_event);
4435                         /* Translate ELS WCQE to response IOCBQ */
4436                         irspiocbq = lpfc_sli4_els_preprocess_rspiocbq(phba,
4437                                                                       irspiocbq);
4438                         if (irspiocbq)
4439                                 lpfc_sli_sp_handle_rspiocb(phba, pring,
4440                                                            irspiocbq);
4441                         count++;
4442                         break;
4443                 case CQE_CODE_RECEIVE:
4444                 case CQE_CODE_RECEIVE_V1:
4445                         dmabuf = container_of(cq_event, struct hbq_dmabuf,
4446                                               cq_event);
4447                         lpfc_sli4_handle_received_buffer(phba, dmabuf);
4448                         count++;
4449                         break;
4450                 default:
4451                         break;
4452                 }
4453
4454                 /* Limit the number of events to 64 to avoid soft lockups */
4455                 if (count == 64)
4456                         break;
4457         }
4458 }
4459
4460 /**
4461  * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
4462  * @phba: Pointer to HBA context object.
4463  * @pring: Pointer to driver SLI ring object.
4464  *
4465  * This function aborts all iocbs in the given ring and frees all the iocb
4466  * objects in txq. This function issues an abort iocb for all the iocb commands
4467  * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4468  * the return of this function. The caller is not required to hold any locks.
4469  **/
4470 void
4471 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
4472 {
4473         LIST_HEAD(completions);
4474         struct lpfc_iocbq *iocb, *next_iocb;
4475
4476         if (pring->ringno == LPFC_ELS_RING) {
4477                 lpfc_fabric_abort_hba(phba);
4478         }
4479
4480         /* Error everything on txq and txcmplq
4481          * First do the txq.
4482          */
4483         if (phba->sli_rev >= LPFC_SLI_REV4) {
4484                 spin_lock_irq(&pring->ring_lock);
4485                 list_splice_init(&pring->txq, &completions);
4486                 pring->txq_cnt = 0;
4487                 spin_unlock_irq(&pring->ring_lock);
4488
4489                 spin_lock_irq(&phba->hbalock);
4490                 /* Next issue ABTS for everything on the txcmplq */
4491                 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
4492                         lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
4493                 spin_unlock_irq(&phba->hbalock);
4494         } else {
4495                 spin_lock_irq(&phba->hbalock);
4496                 list_splice_init(&pring->txq, &completions);
4497                 pring->txq_cnt = 0;
4498
4499                 /* Next issue ABTS for everything on the txcmplq */
4500                 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
4501                         lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
4502                 spin_unlock_irq(&phba->hbalock);
4503         }
4504         /* Make sure HBA is alive */
4505         lpfc_issue_hb_tmo(phba);
4506
4507         /* Cancel all the IOCBs from the completions list */
4508         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
4509                               IOERR_SLI_ABORTED);
4510 }
4511
4512 /**
4513  * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
4514  * @phba: Pointer to HBA context object.
4515  *
4516  * This function aborts all iocbs in FCP rings and frees all the iocb
4517  * objects in txq. This function issues an abort iocb for all the iocb commands
4518  * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4519  * the return of this function. The caller is not required to hold any locks.
4520  **/
4521 void
4522 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
4523 {
4524         struct lpfc_sli *psli = &phba->sli;
4525         struct lpfc_sli_ring  *pring;
4526         uint32_t i;
4527
4528         /* Look on all the FCP Rings for the iotag */
4529         if (phba->sli_rev >= LPFC_SLI_REV4) {
4530                 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4531                         pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4532                         lpfc_sli_abort_iocb_ring(phba, pring);
4533                 }
4534         } else {
4535                 pring = &psli->sli3_ring[LPFC_FCP_RING];
4536                 lpfc_sli_abort_iocb_ring(phba, pring);
4537         }
4538 }
4539
4540 /**
4541  * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
4542  * @phba: Pointer to HBA context object.
4543  *
4544  * This function flushes all iocbs in the IO ring and frees all the iocb
4545  * objects in txq and txcmplq. This function will not issue abort iocbs
4546  * for all the iocb commands in txcmplq, they will just be returned with
4547  * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4548  * slot has been permanently disabled.
4549  **/
4550 void
4551 lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
4552 {
4553         LIST_HEAD(txq);
4554         LIST_HEAD(txcmplq);
4555         struct lpfc_sli *psli = &phba->sli;
4556         struct lpfc_sli_ring  *pring;
4557         uint32_t i;
4558         struct lpfc_iocbq *piocb, *next_iocb;
4559
4560         spin_lock_irq(&phba->hbalock);
4561         if (phba->hba_flag & HBA_IOQ_FLUSH ||
4562             !phba->sli4_hba.hdwq) {
4563                 spin_unlock_irq(&phba->hbalock);
4564                 return;
4565         }
4566         /* Indicate the I/O queues are flushed */
4567         phba->hba_flag |= HBA_IOQ_FLUSH;
4568         spin_unlock_irq(&phba->hbalock);
4569
4570         /* Look on all the FCP Rings for the iotag */
4571         if (phba->sli_rev >= LPFC_SLI_REV4) {
4572                 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4573                         pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4574
4575                         spin_lock_irq(&pring->ring_lock);
4576                         /* Retrieve everything on txq */
4577                         list_splice_init(&pring->txq, &txq);
4578                         list_for_each_entry_safe(piocb, next_iocb,
4579                                                  &pring->txcmplq, list)
4580                                 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
4581                         /* Retrieve everything on the txcmplq */
4582                         list_splice_init(&pring->txcmplq, &txcmplq);
4583                         pring->txq_cnt = 0;
4584                         pring->txcmplq_cnt = 0;
4585                         spin_unlock_irq(&pring->ring_lock);
4586
4587                         /* Flush the txq */
4588                         lpfc_sli_cancel_iocbs(phba, &txq,
4589                                               IOSTAT_LOCAL_REJECT,
4590                                               IOERR_SLI_DOWN);
4591                         /* Flush the txcmplq */
4592                         lpfc_sli_cancel_iocbs(phba, &txcmplq,
4593                                               IOSTAT_LOCAL_REJECT,
4594                                               IOERR_SLI_DOWN);
4595                         if (unlikely(pci_channel_offline(phba->pcidev)))
4596                                 lpfc_sli4_io_xri_aborted(phba, NULL, 0);
4597                 }
4598         } else {
4599                 pring = &psli->sli3_ring[LPFC_FCP_RING];
4600
4601                 spin_lock_irq(&phba->hbalock);
4602                 /* Retrieve everything on txq */
4603                 list_splice_init(&pring->txq, &txq);
4604                 list_for_each_entry_safe(piocb, next_iocb,
4605                                          &pring->txcmplq, list)
4606                         piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
4607                 /* Retrieve everything on the txcmplq */
4608                 list_splice_init(&pring->txcmplq, &txcmplq);
4609                 pring->txq_cnt = 0;
4610                 pring->txcmplq_cnt = 0;
4611                 spin_unlock_irq(&phba->hbalock);
4612
4613                 /* Flush the txq */
4614                 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4615                                       IOERR_SLI_DOWN);
4616                 /* Flush the txcmpq */
4617                 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4618                                       IOERR_SLI_DOWN);
4619         }
4620 }
4621
4622 /**
4623  * lpfc_sli_brdready_s3 - Check for sli3 host ready status
4624  * @phba: Pointer to HBA context object.
4625  * @mask: Bit mask to be checked.
4626  *
4627  * This function reads the host status register and compares
4628  * with the provided bit mask to check if HBA completed
4629  * the restart. This function will wait in a loop for the
4630  * HBA to complete restart. If the HBA does not restart within
4631  * 15 iterations, the function will reset the HBA again. The
4632  * function returns 1 when HBA fail to restart otherwise returns
4633  * zero.
4634  **/
4635 static int
4636 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4637 {
4638         uint32_t status;
4639         int i = 0;
4640         int retval = 0;
4641
4642         /* Read the HBA Host Status Register */
4643         if (lpfc_readl(phba->HSregaddr, &status))
4644                 return 1;
4645
4646         phba->hba_flag |= HBA_NEEDS_CFG_PORT;
4647
4648         /*
4649          * Check status register every 100ms for 5 retries, then every
4650          * 500ms for 5, then every 2.5 sec for 5, then reset board and
4651          * every 2.5 sec for 4.
4652          * Break our of the loop if errors occurred during init.
4653          */
4654         while (((status & mask) != mask) &&
4655                !(status & HS_FFERM) &&
4656                i++ < 20) {
4657
4658                 if (i <= 5)
4659                         msleep(10);
4660                 else if (i <= 10)
4661                         msleep(500);
4662                 else
4663                         msleep(2500);
4664
4665                 if (i == 15) {
4666                                 /* Do post */
4667                         phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4668                         lpfc_sli_brdrestart(phba);
4669                 }
4670                 /* Read the HBA Host Status Register */
4671                 if (lpfc_readl(phba->HSregaddr, &status)) {
4672                         retval = 1;
4673                         break;
4674                 }
4675         }
4676
4677         /* Check to see if any errors occurred during init */
4678         if ((status & HS_FFERM) || (i >= 20)) {
4679                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4680                                 "2751 Adapter failed to restart, "
4681                                 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4682                                 status,
4683                                 readl(phba->MBslimaddr + 0xa8),
4684                                 readl(phba->MBslimaddr + 0xac));
4685                 phba->link_state = LPFC_HBA_ERROR;
4686                 retval = 1;
4687         }
4688
4689         return retval;
4690 }
4691
4692 /**
4693  * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4694  * @phba: Pointer to HBA context object.
4695  * @mask: Bit mask to be checked.
4696  *
4697  * This function checks the host status register to check if HBA is
4698  * ready. This function will wait in a loop for the HBA to be ready
4699  * If the HBA is not ready , the function will will reset the HBA PCI
4700  * function again. The function returns 1 when HBA fail to be ready
4701  * otherwise returns zero.
4702  **/
4703 static int
4704 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4705 {
4706         uint32_t status;
4707         int retval = 0;
4708
4709         /* Read the HBA Host Status Register */
4710         status = lpfc_sli4_post_status_check(phba);
4711
4712         if (status) {
4713                 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4714                 lpfc_sli_brdrestart(phba);
4715                 status = lpfc_sli4_post_status_check(phba);
4716         }
4717
4718         /* Check to see if any errors occurred during init */
4719         if (status) {
4720                 phba->link_state = LPFC_HBA_ERROR;
4721                 retval = 1;
4722         } else
4723                 phba->sli4_hba.intr_enable = 0;
4724
4725         phba->hba_flag &= ~HBA_SETUP;
4726         return retval;
4727 }
4728
4729 /**
4730  * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4731  * @phba: Pointer to HBA context object.
4732  * @mask: Bit mask to be checked.
4733  *
4734  * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4735  * from the API jump table function pointer from the lpfc_hba struct.
4736  **/
4737 int
4738 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4739 {
4740         return phba->lpfc_sli_brdready(phba, mask);
4741 }
4742
4743 #define BARRIER_TEST_PATTERN (0xdeadbeef)
4744
4745 /**
4746  * lpfc_reset_barrier - Make HBA ready for HBA reset
4747  * @phba: Pointer to HBA context object.
4748  *
4749  * This function is called before resetting an HBA. This function is called
4750  * with hbalock held and requests HBA to quiesce DMAs before a reset.
4751  **/
4752 void lpfc_reset_barrier(struct lpfc_hba *phba)
4753 {
4754         uint32_t __iomem *resp_buf;
4755         uint32_t __iomem *mbox_buf;
4756         volatile uint32_t mbox;
4757         uint32_t hc_copy, ha_copy, resp_data;
4758         int  i;
4759         uint8_t hdrtype;
4760
4761         lockdep_assert_held(&phba->hbalock);
4762
4763         pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4764         if (hdrtype != 0x80 ||
4765             (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4766              FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4767                 return;
4768
4769         /*
4770          * Tell the other part of the chip to suspend temporarily all
4771          * its DMA activity.
4772          */
4773         resp_buf = phba->MBslimaddr;
4774
4775         /* Disable the error attention */
4776         if (lpfc_readl(phba->HCregaddr, &hc_copy))
4777                 return;
4778         writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4779         readl(phba->HCregaddr); /* flush */
4780         phba->link_flag |= LS_IGNORE_ERATT;
4781
4782         if (lpfc_readl(phba->HAregaddr, &ha_copy))
4783                 return;
4784         if (ha_copy & HA_ERATT) {
4785                 /* Clear Chip error bit */
4786                 writel(HA_ERATT, phba->HAregaddr);
4787                 phba->pport->stopped = 1;
4788         }
4789
4790         mbox = 0;
4791         ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4792         ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4793
4794         writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4795         mbox_buf = phba->MBslimaddr;
4796         writel(mbox, mbox_buf);
4797
4798         for (i = 0; i < 50; i++) {
4799                 if (lpfc_readl((resp_buf + 1), &resp_data))
4800                         return;
4801                 if (resp_data != ~(BARRIER_TEST_PATTERN))
4802                         mdelay(1);
4803                 else
4804                         break;
4805         }
4806         resp_data = 0;
4807         if (lpfc_readl((resp_buf + 1), &resp_data))
4808                 return;
4809         if (resp_data  != ~(BARRIER_TEST_PATTERN)) {
4810                 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4811                     phba->pport->stopped)
4812                         goto restore_hc;
4813                 else
4814                         goto clear_errat;
4815         }
4816
4817         ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
4818         resp_data = 0;
4819         for (i = 0; i < 500; i++) {
4820                 if (lpfc_readl(resp_buf, &resp_data))
4821                         return;
4822                 if (resp_data != mbox)
4823                         mdelay(1);
4824                 else
4825                         break;
4826         }
4827
4828 clear_errat:
4829
4830         while (++i < 500) {
4831                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4832                         return;
4833                 if (!(ha_copy & HA_ERATT))
4834                         mdelay(1);
4835                 else
4836                         break;
4837         }
4838
4839         if (readl(phba->HAregaddr) & HA_ERATT) {
4840                 writel(HA_ERATT, phba->HAregaddr);
4841                 phba->pport->stopped = 1;
4842         }
4843
4844 restore_hc:
4845         phba->link_flag &= ~LS_IGNORE_ERATT;
4846         writel(hc_copy, phba->HCregaddr);
4847         readl(phba->HCregaddr); /* flush */
4848 }
4849
4850 /**
4851  * lpfc_sli_brdkill - Issue a kill_board mailbox command
4852  * @phba: Pointer to HBA context object.
4853  *
4854  * This function issues a kill_board mailbox command and waits for
4855  * the error attention interrupt. This function is called for stopping
4856  * the firmware processing. The caller is not required to hold any
4857  * locks. This function calls lpfc_hba_down_post function to free
4858  * any pending commands after the kill. The function will return 1 when it
4859  * fails to kill the board else will return 0.
4860  **/
4861 int
4862 lpfc_sli_brdkill(struct lpfc_hba *phba)
4863 {
4864         struct lpfc_sli *psli;
4865         LPFC_MBOXQ_t *pmb;
4866         uint32_t status;
4867         uint32_t ha_copy;
4868         int retval;
4869         int i = 0;
4870
4871         psli = &phba->sli;
4872
4873         /* Kill HBA */
4874         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4875                         "0329 Kill HBA Data: x%x x%x\n",
4876                         phba->pport->port_state, psli->sli_flag);
4877
4878         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4879         if (!pmb)
4880                 return 1;
4881
4882         /* Disable the error attention */
4883         spin_lock_irq(&phba->hbalock);
4884         if (lpfc_readl(phba->HCregaddr, &status)) {
4885                 spin_unlock_irq(&phba->hbalock);
4886                 mempool_free(pmb, phba->mbox_mem_pool);
4887                 return 1;
4888         }
4889         status &= ~HC_ERINT_ENA;
4890         writel(status, phba->HCregaddr);
4891         readl(phba->HCregaddr); /* flush */
4892         phba->link_flag |= LS_IGNORE_ERATT;
4893         spin_unlock_irq(&phba->hbalock);
4894
4895         lpfc_kill_board(phba, pmb);
4896         pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4897         retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4898
4899         if (retval != MBX_SUCCESS) {
4900                 if (retval != MBX_BUSY)
4901                         mempool_free(pmb, phba->mbox_mem_pool);
4902                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4903                                 "2752 KILL_BOARD command failed retval %d\n",
4904                                 retval);
4905                 spin_lock_irq(&phba->hbalock);
4906                 phba->link_flag &= ~LS_IGNORE_ERATT;
4907                 spin_unlock_irq(&phba->hbalock);
4908                 return 1;
4909         }
4910
4911         spin_lock_irq(&phba->hbalock);
4912         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4913         spin_unlock_irq(&phba->hbalock);
4914
4915         mempool_free(pmb, phba->mbox_mem_pool);
4916
4917         /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4918          * attention every 100ms for 3 seconds. If we don't get ERATT after
4919          * 3 seconds we still set HBA_ERROR state because the status of the
4920          * board is now undefined.
4921          */
4922         if (lpfc_readl(phba->HAregaddr, &ha_copy))
4923                 return 1;
4924         while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4925                 mdelay(100);
4926                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4927                         return 1;
4928         }
4929
4930         del_timer_sync(&psli->mbox_tmo);
4931         if (ha_copy & HA_ERATT) {
4932                 writel(HA_ERATT, phba->HAregaddr);
4933                 phba->pport->stopped = 1;
4934         }
4935         spin_lock_irq(&phba->hbalock);
4936         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4937         psli->mbox_active = NULL;
4938         phba->link_flag &= ~LS_IGNORE_ERATT;
4939         spin_unlock_irq(&phba->hbalock);
4940
4941         lpfc_hba_down_post(phba);
4942         phba->link_state = LPFC_HBA_ERROR;
4943
4944         return ha_copy & HA_ERATT ? 0 : 1;
4945 }
4946
4947 /**
4948  * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
4949  * @phba: Pointer to HBA context object.
4950  *
4951  * This function resets the HBA by writing HC_INITFF to the control
4952  * register. After the HBA resets, this function resets all the iocb ring
4953  * indices. This function disables PCI layer parity checking during
4954  * the reset.
4955  * This function returns 0 always.
4956  * The caller is not required to hold any locks.
4957  **/
4958 int
4959 lpfc_sli_brdreset(struct lpfc_hba *phba)
4960 {
4961         struct lpfc_sli *psli;
4962         struct lpfc_sli_ring *pring;
4963         uint16_t cfg_value;
4964         int i;
4965
4966         psli = &phba->sli;
4967
4968         /* Reset HBA */
4969         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4970                         "0325 Reset HBA Data: x%x x%x\n",
4971                         (phba->pport) ? phba->pport->port_state : 0,
4972                         psli->sli_flag);
4973
4974         /* perform board reset */
4975         phba->fc_eventTag = 0;
4976         phba->link_events = 0;
4977         phba->hba_flag |= HBA_NEEDS_CFG_PORT;
4978         if (phba->pport) {
4979                 phba->pport->fc_myDID = 0;
4980                 phba->pport->fc_prevDID = 0;
4981         }
4982
4983         /* Turn off parity checking and serr during the physical reset */
4984         if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
4985                 return -EIO;
4986
4987         pci_write_config_word(phba->pcidev, PCI_COMMAND,
4988                               (cfg_value &
4989                                ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4990
4991         psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4992
4993         /* Now toggle INITFF bit in the Host Control Register */
4994         writel(HC_INITFF, phba->HCregaddr);
4995         mdelay(1);
4996         readl(phba->HCregaddr); /* flush */
4997         writel(0, phba->HCregaddr);
4998         readl(phba->HCregaddr); /* flush */
4999
5000         /* Restore PCI cmd register */
5001         pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
5002
5003         /* Initialize relevant SLI info */
5004         for (i = 0; i < psli->num_rings; i++) {
5005                 pring = &psli->sli3_ring[i];
5006                 pring->flag = 0;
5007                 pring->sli.sli3.rspidx = 0;
5008                 pring->sli.sli3.next_cmdidx  = 0;
5009                 pring->sli.sli3.local_getidx = 0;
5010                 pring->sli.sli3.cmdidx = 0;
5011                 pring->missbufcnt = 0;
5012         }
5013
5014         phba->link_state = LPFC_WARM_START;
5015         return 0;
5016 }
5017
5018 /**
5019  * lpfc_sli4_brdreset - Reset a sli-4 HBA
5020  * @phba: Pointer to HBA context object.
5021  *
5022  * This function resets a SLI4 HBA. This function disables PCI layer parity
5023  * checking during resets the device. The caller is not required to hold
5024  * any locks.
5025  *
5026  * This function returns 0 on success else returns negative error code.
5027  **/
5028 int
5029 lpfc_sli4_brdreset(struct lpfc_hba *phba)
5030 {
5031         struct lpfc_sli *psli = &phba->sli;
5032         uint16_t cfg_value;
5033         int rc = 0;
5034
5035         /* Reset HBA */
5036         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5037                         "0295 Reset HBA Data: x%x x%x x%x\n",
5038                         phba->pport->port_state, psli->sli_flag,
5039                         phba->hba_flag);
5040
5041         /* perform board reset */
5042         phba->fc_eventTag = 0;
5043         phba->link_events = 0;
5044         phba->pport->fc_myDID = 0;
5045         phba->pport->fc_prevDID = 0;
5046         phba->hba_flag &= ~HBA_SETUP;
5047
5048         spin_lock_irq(&phba->hbalock);
5049         psli->sli_flag &= ~(LPFC_PROCESS_LA);
5050         phba->fcf.fcf_flag = 0;
5051         spin_unlock_irq(&phba->hbalock);
5052
5053         /* Now physically reset the device */
5054         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5055                         "0389 Performing PCI function reset!\n");
5056
5057         /* Turn off parity checking and serr during the physical reset */
5058         if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
5059                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5060                                 "3205 PCI read Config failed\n");
5061                 return -EIO;
5062         }
5063
5064         pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
5065                               ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
5066
5067         /* Perform FCoE PCI function reset before freeing queue memory */
5068         rc = lpfc_pci_function_reset(phba);
5069
5070         /* Restore PCI cmd register */
5071         pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
5072
5073         return rc;
5074 }
5075
5076 /**
5077  * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
5078  * @phba: Pointer to HBA context object.
5079  *
5080  * This function is called in the SLI initialization code path to
5081  * restart the HBA. The caller is not required to hold any lock.
5082  * This function writes MBX_RESTART mailbox command to the SLIM and
5083  * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
5084  * function to free any pending commands. The function enables
5085  * POST only during the first initialization. The function returns zero.
5086  * The function does not guarantee completion of MBX_RESTART mailbox
5087  * command before the return of this function.
5088  **/
5089 static int
5090 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
5091 {
5092         MAILBOX_t *mb;
5093         struct lpfc_sli *psli;
5094         volatile uint32_t word0;
5095         void __iomem *to_slim;
5096         uint32_t hba_aer_enabled;
5097
5098         spin_lock_irq(&phba->hbalock);
5099
5100         /* Take PCIe device Advanced Error Reporting (AER) state */
5101         hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
5102
5103         psli = &phba->sli;
5104
5105         /* Restart HBA */
5106         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5107                         "0337 Restart HBA Data: x%x x%x\n",
5108                         (phba->pport) ? phba->pport->port_state : 0,
5109                         psli->sli_flag);
5110
5111         word0 = 0;
5112         mb = (MAILBOX_t *) &word0;
5113         mb->mbxCommand = MBX_RESTART;
5114         mb->mbxHc = 1;
5115
5116         lpfc_reset_barrier(phba);
5117
5118         to_slim = phba->MBslimaddr;
5119         writel(*(uint32_t *) mb, to_slim);
5120         readl(to_slim); /* flush */
5121
5122         /* Only skip post after fc_ffinit is completed */
5123         if (phba->pport && phba->pport->port_state)
5124                 word0 = 1;      /* This is really setting up word1 */
5125         else
5126                 word0 = 0;      /* This is really setting up word1 */
5127         to_slim = phba->MBslimaddr + sizeof (uint32_t);
5128         writel(*(uint32_t *) mb, to_slim);
5129         readl(to_slim); /* flush */
5130
5131         lpfc_sli_brdreset(phba);
5132         if (phba->pport)
5133                 phba->pport->stopped = 0;
5134         phba->link_state = LPFC_INIT_START;
5135         phba->hba_flag = 0;
5136         spin_unlock_irq(&phba->hbalock);
5137
5138         memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
5139         psli->stats_start = ktime_get_seconds();
5140
5141         /* Give the INITFF and Post time to settle. */
5142         mdelay(100);
5143
5144         /* Reset HBA AER if it was enabled, note hba_flag was reset above */
5145         if (hba_aer_enabled)
5146                 pci_disable_pcie_error_reporting(phba->pcidev);
5147
5148         lpfc_hba_down_post(phba);
5149
5150         return 0;
5151 }
5152
5153 /**
5154  * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
5155  * @phba: Pointer to HBA context object.
5156  *
5157  * This function is called in the SLI initialization code path to restart
5158  * a SLI4 HBA. The caller is not required to hold any lock.
5159  * At the end of the function, it calls lpfc_hba_down_post function to
5160  * free any pending commands.
5161  **/
5162 static int
5163 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
5164 {
5165         struct lpfc_sli *psli = &phba->sli;
5166         uint32_t hba_aer_enabled;
5167         int rc;
5168
5169         /* Restart HBA */
5170         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5171                         "0296 Restart HBA Data: x%x x%x\n",
5172                         phba->pport->port_state, psli->sli_flag);
5173
5174         /* Take PCIe device Advanced Error Reporting (AER) state */
5175         hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
5176
5177         rc = lpfc_sli4_brdreset(phba);
5178         if (rc) {
5179                 phba->link_state = LPFC_HBA_ERROR;
5180                 goto hba_down_queue;
5181         }
5182
5183         spin_lock_irq(&phba->hbalock);
5184         phba->pport->stopped = 0;
5185         phba->link_state = LPFC_INIT_START;
5186         phba->hba_flag = 0;
5187         spin_unlock_irq(&phba->hbalock);
5188
5189         memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
5190         psli->stats_start = ktime_get_seconds();
5191
5192         /* Reset HBA AER if it was enabled, note hba_flag was reset above */
5193         if (hba_aer_enabled)
5194                 pci_disable_pcie_error_reporting(phba->pcidev);
5195
5196 hba_down_queue:
5197         lpfc_hba_down_post(phba);
5198         lpfc_sli4_queue_destroy(phba);
5199
5200         return rc;
5201 }
5202
5203 /**
5204  * lpfc_sli_brdrestart - Wrapper func for restarting hba
5205  * @phba: Pointer to HBA context object.
5206  *
5207  * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
5208  * API jump table function pointer from the lpfc_hba struct.
5209 **/
5210 int
5211 lpfc_sli_brdrestart(struct lpfc_hba *phba)
5212 {
5213         return phba->lpfc_sli_brdrestart(phba);
5214 }
5215
5216 /**
5217  * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
5218  * @phba: Pointer to HBA context object.
5219  *
5220  * This function is called after a HBA restart to wait for successful
5221  * restart of the HBA. Successful restart of the HBA is indicated by
5222  * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
5223  * iteration, the function will restart the HBA again. The function returns
5224  * zero if HBA successfully restarted else returns negative error code.
5225  **/
5226 int
5227 lpfc_sli_chipset_init(struct lpfc_hba *phba)
5228 {
5229         uint32_t status, i = 0;
5230
5231         /* Read the HBA Host Status Register */
5232         if (lpfc_readl(phba->HSregaddr, &status))
5233                 return -EIO;
5234
5235         /* Check status register to see what current state is */
5236         i = 0;
5237         while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
5238
5239                 /* Check every 10ms for 10 retries, then every 100ms for 90
5240                  * retries, then every 1 sec for 50 retires for a total of
5241                  * ~60 seconds before reset the board again and check every
5242                  * 1 sec for 50 retries. The up to 60 seconds before the
5243                  * board ready is required by the Falcon FIPS zeroization
5244                  * complete, and any reset the board in between shall cause
5245                  * restart of zeroization, further delay the board ready.
5246                  */
5247                 if (i++ >= 200) {
5248                         /* Adapter failed to init, timeout, status reg
5249                            <status> */
5250                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5251                                         "0436 Adapter failed to init, "
5252                                         "timeout, status reg x%x, "
5253                                         "FW Data: A8 x%x AC x%x\n", status,
5254                                         readl(phba->MBslimaddr + 0xa8),
5255                                         readl(phba->MBslimaddr + 0xac));
5256                         phba->link_state = LPFC_HBA_ERROR;
5257                         return -ETIMEDOUT;
5258                 }
5259
5260                 /* Check to see if any errors occurred during init */
5261                 if (status & HS_FFERM) {
5262                         /* ERROR: During chipset initialization */
5263                         /* Adapter failed to init, chipset, status reg
5264                            <status> */
5265                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5266                                         "0437 Adapter failed to init, "
5267                                         "chipset, status reg x%x, "
5268                                         "FW Data: A8 x%x AC x%x\n", status,
5269                                         readl(phba->MBslimaddr + 0xa8),
5270                                         readl(phba->MBslimaddr + 0xac));
5271                         phba->link_state = LPFC_HBA_ERROR;
5272                         return -EIO;
5273                 }
5274
5275                 if (i <= 10)
5276                         msleep(10);
5277                 else if (i <= 100)
5278                         msleep(100);
5279                 else
5280                         msleep(1000);
5281
5282                 if (i == 150) {
5283                         /* Do post */
5284                         phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5285                         lpfc_sli_brdrestart(phba);
5286                 }
5287                 /* Read the HBA Host Status Register */
5288                 if (lpfc_readl(phba->HSregaddr, &status))
5289                         return -EIO;
5290         }
5291
5292         /* Check to see if any errors occurred during init */
5293         if (status & HS_FFERM) {
5294                 /* ERROR: During chipset initialization */
5295                 /* Adapter failed to init, chipset, status reg <status> */
5296                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5297                                 "0438 Adapter failed to init, chipset, "
5298                                 "status reg x%x, "
5299                                 "FW Data: A8 x%x AC x%x\n", status,
5300                                 readl(phba->MBslimaddr + 0xa8),
5301                                 readl(phba->MBslimaddr + 0xac));
5302                 phba->link_state = LPFC_HBA_ERROR;
5303                 return -EIO;
5304         }
5305
5306         phba->hba_flag |= HBA_NEEDS_CFG_PORT;
5307
5308         /* Clear all interrupt enable conditions */
5309         writel(0, phba->HCregaddr);
5310         readl(phba->HCregaddr); /* flush */
5311
5312         /* setup host attn register */
5313         writel(0xffffffff, phba->HAregaddr);
5314         readl(phba->HAregaddr); /* flush */
5315         return 0;
5316 }
5317
5318 /**
5319  * lpfc_sli_hbq_count - Get the number of HBQs to be configured
5320  *
5321  * This function calculates and returns the number of HBQs required to be
5322  * configured.
5323  **/
5324 int
5325 lpfc_sli_hbq_count(void)
5326 {
5327         return ARRAY_SIZE(lpfc_hbq_defs);
5328 }
5329
5330 /**
5331  * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
5332  *
5333  * This function adds the number of hbq entries in every HBQ to get
5334  * the total number of hbq entries required for the HBA and returns
5335  * the total count.
5336  **/
5337 static int
5338 lpfc_sli_hbq_entry_count(void)
5339 {
5340         int  hbq_count = lpfc_sli_hbq_count();
5341         int  count = 0;
5342         int  i;
5343
5344         for (i = 0; i < hbq_count; ++i)
5345                 count += lpfc_hbq_defs[i]->entry_count;
5346         return count;
5347 }
5348
5349 /**
5350  * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
5351  *
5352  * This function calculates amount of memory required for all hbq entries
5353  * to be configured and returns the total memory required.
5354  **/
5355 int
5356 lpfc_sli_hbq_size(void)
5357 {
5358         return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
5359 }
5360
5361 /**
5362  * lpfc_sli_hbq_setup - configure and initialize HBQs
5363  * @phba: Pointer to HBA context object.
5364  *
5365  * This function is called during the SLI initialization to configure
5366  * all the HBQs and post buffers to the HBQ. The caller is not
5367  * required to hold any locks. This function will return zero if successful
5368  * else it will return negative error code.
5369  **/
5370 static int
5371 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
5372 {
5373         int  hbq_count = lpfc_sli_hbq_count();
5374         LPFC_MBOXQ_t *pmb;
5375         MAILBOX_t *pmbox;
5376         uint32_t hbqno;
5377         uint32_t hbq_entry_index;
5378
5379                                 /* Get a Mailbox buffer to setup mailbox
5380                                  * commands for HBA initialization
5381                                  */
5382         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5383
5384         if (!pmb)
5385                 return -ENOMEM;
5386
5387         pmbox = &pmb->u.mb;
5388
5389         /* Initialize the struct lpfc_sli_hbq structure for each hbq */
5390         phba->link_state = LPFC_INIT_MBX_CMDS;
5391         phba->hbq_in_use = 1;
5392
5393         hbq_entry_index = 0;
5394         for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
5395                 phba->hbqs[hbqno].next_hbqPutIdx = 0;
5396                 phba->hbqs[hbqno].hbqPutIdx      = 0;
5397                 phba->hbqs[hbqno].local_hbqGetIdx   = 0;
5398                 phba->hbqs[hbqno].entry_count =
5399                         lpfc_hbq_defs[hbqno]->entry_count;
5400                 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
5401                         hbq_entry_index, pmb);
5402                 hbq_entry_index += phba->hbqs[hbqno].entry_count;
5403
5404                 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
5405                         /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
5406                            mbxStatus <status>, ring <num> */
5407
5408                         lpfc_printf_log(phba, KERN_ERR,
5409                                         LOG_SLI | LOG_VPORT,
5410                                         "1805 Adapter failed to init. "
5411                                         "Data: x%x x%x x%x\n",
5412                                         pmbox->mbxCommand,
5413                                         pmbox->mbxStatus, hbqno);
5414
5415                         phba->link_state = LPFC_HBA_ERROR;
5416                         mempool_free(pmb, phba->mbox_mem_pool);
5417                         return -ENXIO;
5418                 }
5419         }
5420         phba->hbq_count = hbq_count;
5421
5422         mempool_free(pmb, phba->mbox_mem_pool);
5423
5424         /* Initially populate or replenish the HBQs */
5425         for (hbqno = 0; hbqno < hbq_count; ++hbqno)
5426                 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
5427         return 0;
5428 }
5429
5430 /**
5431  * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
5432  * @phba: Pointer to HBA context object.
5433  *
5434  * This function is called during the SLI initialization to configure
5435  * all the HBQs and post buffers to the HBQ. The caller is not
5436  * required to hold any locks. This function will return zero if successful
5437  * else it will return negative error code.
5438  **/
5439 static int
5440 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
5441 {
5442         phba->hbq_in_use = 1;
5443         /**
5444          * Specific case when the MDS diagnostics is enabled and supported.
5445          * The receive buffer count is truncated to manage the incoming
5446          * traffic.
5447          **/
5448         if (phba->cfg_enable_mds_diags && phba->mds_diags_support)
5449                 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5450                         lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1;
5451         else
5452                 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5453                         lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
5454         phba->hbq_count = 1;
5455         lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
5456         /* Initially populate or replenish the HBQs */
5457         return 0;
5458 }
5459
5460 /**
5461  * lpfc_sli_config_port - Issue config port mailbox command
5462  * @phba: Pointer to HBA context object.
5463  * @sli_mode: sli mode - 2/3
5464  *
5465  * This function is called by the sli initialization code path
5466  * to issue config_port mailbox command. This function restarts the
5467  * HBA firmware and issues a config_port mailbox command to configure
5468  * the SLI interface in the sli mode specified by sli_mode
5469  * variable. The caller is not required to hold any locks.
5470  * The function returns 0 if successful, else returns negative error
5471  * code.
5472  **/
5473 int
5474 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
5475 {
5476         LPFC_MBOXQ_t *pmb;
5477         uint32_t resetcount = 0, rc = 0, done = 0;
5478
5479         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5480         if (!pmb) {
5481                 phba->link_state = LPFC_HBA_ERROR;
5482                 return -ENOMEM;
5483         }
5484
5485         phba->sli_rev = sli_mode;
5486         while (resetcount < 2 && !done) {
5487                 spin_lock_irq(&phba->hbalock);
5488                 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5489                 spin_unlock_irq(&phba->hbalock);
5490                 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5491                 lpfc_sli_brdrestart(phba);
5492                 rc = lpfc_sli_chipset_init(phba);
5493                 if (rc)
5494                         break;
5495
5496                 spin_lock_irq(&phba->hbalock);
5497                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5498                 spin_unlock_irq(&phba->hbalock);
5499                 resetcount++;
5500
5501                 /* Call pre CONFIG_PORT mailbox command initialization.  A
5502                  * value of 0 means the call was successful.  Any other
5503                  * nonzero value is a failure, but if ERESTART is returned,
5504                  * the driver may reset the HBA and try again.
5505                  */
5506                 rc = lpfc_config_port_prep(phba);
5507                 if (rc == -ERESTART) {
5508                         phba->link_state = LPFC_LINK_UNKNOWN;
5509                         continue;
5510                 } else if (rc)
5511                         break;
5512
5513                 phba->link_state = LPFC_INIT_MBX_CMDS;
5514                 lpfc_config_port(phba, pmb);
5515                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5516                 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
5517                                         LPFC_SLI3_HBQ_ENABLED |
5518                                         LPFC_SLI3_CRP_ENABLED |
5519                                         LPFC_SLI3_DSS_ENABLED);
5520                 if (rc != MBX_SUCCESS) {
5521                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5522                                 "0442 Adapter failed to init, mbxCmd x%x "
5523                                 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
5524                                 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
5525                         spin_lock_irq(&phba->hbalock);
5526                         phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
5527                         spin_unlock_irq(&phba->hbalock);
5528                         rc = -ENXIO;
5529                 } else {
5530                         /* Allow asynchronous mailbox command to go through */
5531                         spin_lock_irq(&phba->hbalock);
5532                         phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5533                         spin_unlock_irq(&phba->hbalock);
5534                         done = 1;
5535
5536                         if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5537                             (pmb->u.mb.un.varCfgPort.gasabt == 0))
5538                                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5539                                         "3110 Port did not grant ASABT\n");
5540                 }
5541         }
5542         if (!done) {
5543                 rc = -EINVAL;
5544                 goto do_prep_failed;
5545         }
5546         if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5547                 if (!pmb->u.mb.un.varCfgPort.cMA) {
5548                         rc = -ENXIO;
5549                         goto do_prep_failed;
5550                 }
5551                 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
5552                         phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
5553                         phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5554                         phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5555                                 phba->max_vpi : phba->max_vports;
5556
5557                 } else
5558                         phba->max_vpi = 0;
5559                 if (pmb->u.mb.un.varCfgPort.gerbm)
5560                         phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
5561                 if (pmb->u.mb.un.varCfgPort.gcrp)
5562                         phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
5563
5564                 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5565                 phba->port_gp = phba->mbox->us.s3_pgp.port;
5566
5567                 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5568                         if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5569                                 phba->cfg_enable_bg = 0;
5570                                 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5571                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5572                                                 "0443 Adapter did not grant "
5573                                                 "BlockGuard\n");
5574                         }
5575                 }
5576         } else {
5577                 phba->hbq_get = NULL;
5578                 phba->port_gp = phba->mbox->us.s2.port;
5579                 phba->max_vpi = 0;
5580         }
5581 do_prep_failed:
5582         mempool_free(pmb, phba->mbox_mem_pool);
5583         return rc;
5584 }
5585
5586
5587 /**
5588  * lpfc_sli_hba_setup - SLI initialization function
5589  * @phba: Pointer to HBA context object.
5590  *
5591  * This function is the main SLI initialization function. This function
5592  * is called by the HBA initialization code, HBA reset code and HBA
5593  * error attention handler code. Caller is not required to hold any
5594  * locks. This function issues config_port mailbox command to configure
5595  * the SLI, setup iocb rings and HBQ rings. In the end the function
5596  * calls the config_port_post function to issue init_link mailbox
5597  * command and to start the discovery. The function will return zero
5598  * if successful, else it will return negative error code.
5599  **/
5600 int
5601 lpfc_sli_hba_setup(struct lpfc_hba *phba)
5602 {
5603         uint32_t rc;
5604         int  i;
5605         int longs;
5606
5607         /* Enable ISR already does config_port because of config_msi mbx */
5608         if (phba->hba_flag & HBA_NEEDS_CFG_PORT) {
5609                 rc = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
5610                 if (rc)
5611                         return -EIO;
5612                 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
5613         }
5614         phba->fcp_embed_io = 0; /* SLI4 FC support only */
5615
5616         /* Enable PCIe device Advanced Error Reporting (AER) if configured */
5617         if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5618                 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5619                 if (!rc) {
5620                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5621                                         "2709 This device supports "
5622                                         "Advanced Error Reporting (AER)\n");
5623                         spin_lock_irq(&phba->hbalock);
5624                         phba->hba_flag |= HBA_AER_ENABLED;
5625                         spin_unlock_irq(&phba->hbalock);
5626                 } else {
5627                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5628                                         "2708 This device does not support "
5629                                         "Advanced Error Reporting (AER): %d\n",
5630                                         rc);
5631                         phba->cfg_aer_support = 0;
5632                 }
5633         }
5634
5635         if (phba->sli_rev == 3) {
5636                 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5637                 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5638         } else {
5639                 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5640                 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5641                 phba->sli3_options = 0;
5642         }
5643
5644         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5645                         "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5646                         phba->sli_rev, phba->max_vpi);
5647         rc = lpfc_sli_ring_map(phba);
5648
5649         if (rc)
5650                 goto lpfc_sli_hba_setup_error;
5651
5652         /* Initialize VPIs. */
5653         if (phba->sli_rev == LPFC_SLI_REV3) {
5654                 /*
5655                  * The VPI bitmask and physical ID array are allocated
5656                  * and initialized once only - at driver load.  A port
5657                  * reset doesn't need to reinitialize this memory.
5658                  */
5659                 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5660                         longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5661                         phba->vpi_bmask = kcalloc(longs,
5662                                                   sizeof(unsigned long),
5663                                                   GFP_KERNEL);
5664                         if (!phba->vpi_bmask) {
5665                                 rc = -ENOMEM;
5666                                 goto lpfc_sli_hba_setup_error;
5667                         }
5668
5669                         phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5670                                                 sizeof(uint16_t),
5671                                                 GFP_KERNEL);
5672                         if (!phba->vpi_ids) {
5673                                 kfree(phba->vpi_bmask);
5674                                 rc = -ENOMEM;
5675                                 goto lpfc_sli_hba_setup_error;
5676                         }
5677                         for (i = 0; i < phba->max_vpi; i++)
5678                                 phba->vpi_ids[i] = i;
5679                 }
5680         }
5681
5682         /* Init HBQs */
5683         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5684                 rc = lpfc_sli_hbq_setup(phba);
5685                 if (rc)
5686                         goto lpfc_sli_hba_setup_error;
5687         }
5688         spin_lock_irq(&phba->hbalock);
5689         phba->sli.sli_flag |= LPFC_PROCESS_LA;
5690         spin_unlock_irq(&phba->hbalock);
5691
5692         rc = lpfc_config_port_post(phba);
5693         if (rc)
5694                 goto lpfc_sli_hba_setup_error;
5695
5696         return rc;
5697
5698 lpfc_sli_hba_setup_error:
5699         phba->link_state = LPFC_HBA_ERROR;
5700         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5701                         "0445 Firmware initialization failed\n");
5702         return rc;
5703 }
5704
5705 /**
5706  * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5707  * @phba: Pointer to HBA context object.
5708  *
5709  * This function issue a dump mailbox command to read config region
5710  * 23 and parse the records in the region and populate driver
5711  * data structure.
5712  **/
5713 static int
5714 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5715 {
5716         LPFC_MBOXQ_t *mboxq;
5717         struct lpfc_dmabuf *mp;
5718         struct lpfc_mqe *mqe;
5719         uint32_t data_length;
5720         int rc;
5721
5722         /* Program the default value of vlan_id and fc_map */
5723         phba->valid_vlan = 0;
5724         phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5725         phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5726         phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5727
5728         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5729         if (!mboxq)
5730                 return -ENOMEM;
5731
5732         mqe = &mboxq->u.mqe;
5733         if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5734                 rc = -ENOMEM;
5735                 goto out_free_mboxq;
5736         }
5737
5738         mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
5739         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5740
5741         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5742                         "(%d):2571 Mailbox cmd x%x Status x%x "
5743                         "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5744                         "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5745                         "CQ: x%x x%x x%x x%x\n",
5746                         mboxq->vport ? mboxq->vport->vpi : 0,
5747                         bf_get(lpfc_mqe_command, mqe),
5748                         bf_get(lpfc_mqe_status, mqe),
5749                         mqe->un.mb_words[0], mqe->un.mb_words[1],
5750                         mqe->un.mb_words[2], mqe->un.mb_words[3],
5751                         mqe->un.mb_words[4], mqe->un.mb_words[5],
5752                         mqe->un.mb_words[6], mqe->un.mb_words[7],
5753                         mqe->un.mb_words[8], mqe->un.mb_words[9],
5754                         mqe->un.mb_words[10], mqe->un.mb_words[11],
5755                         mqe->un.mb_words[12], mqe->un.mb_words[13],
5756                         mqe->un.mb_words[14], mqe->un.mb_words[15],
5757                         mqe->un.mb_words[16], mqe->un.mb_words[50],
5758                         mboxq->mcqe.word0,
5759                         mboxq->mcqe.mcqe_tag0,  mboxq->mcqe.mcqe_tag1,
5760                         mboxq->mcqe.trailer);
5761
5762         if (rc) {
5763                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5764                 kfree(mp);
5765                 rc = -EIO;
5766                 goto out_free_mboxq;
5767         }
5768         data_length = mqe->un.mb_words[5];
5769         if (data_length > DMP_RGN23_SIZE) {
5770                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5771                 kfree(mp);
5772                 rc = -EIO;
5773                 goto out_free_mboxq;
5774         }
5775
5776         lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5777         lpfc_mbuf_free(phba, mp->virt, mp->phys);
5778         kfree(mp);
5779         rc = 0;
5780
5781 out_free_mboxq:
5782         mempool_free(mboxq, phba->mbox_mem_pool);
5783         return rc;
5784 }
5785
5786 /**
5787  * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5788  * @phba: pointer to lpfc hba data structure.
5789  * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5790  * @vpd: pointer to the memory to hold resulting port vpd data.
5791  * @vpd_size: On input, the number of bytes allocated to @vpd.
5792  *            On output, the number of data bytes in @vpd.
5793  *
5794  * This routine executes a READ_REV SLI4 mailbox command.  In
5795  * addition, this routine gets the port vpd data.
5796  *
5797  * Return codes
5798  *      0 - successful
5799  *      -ENOMEM - could not allocated memory.
5800  **/
5801 static int
5802 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5803                     uint8_t *vpd, uint32_t *vpd_size)
5804 {
5805         int rc = 0;
5806         uint32_t dma_size;
5807         struct lpfc_dmabuf *dmabuf;
5808         struct lpfc_mqe *mqe;
5809
5810         dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5811         if (!dmabuf)
5812                 return -ENOMEM;
5813
5814         /*
5815          * Get a DMA buffer for the vpd data resulting from the READ_REV
5816          * mailbox command.
5817          */
5818         dma_size = *vpd_size;
5819         dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5820                                           &dmabuf->phys, GFP_KERNEL);
5821         if (!dmabuf->virt) {
5822                 kfree(dmabuf);
5823                 return -ENOMEM;
5824         }
5825
5826         /*
5827          * The SLI4 implementation of READ_REV conflicts at word1,
5828          * bits 31:16 and SLI4 adds vpd functionality not present
5829          * in SLI3.  This code corrects the conflicts.
5830          */
5831         lpfc_read_rev(phba, mboxq);
5832         mqe = &mboxq->u.mqe;
5833         mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5834         mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5835         mqe->un.read_rev.word1 &= 0x0000FFFF;
5836         bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5837         bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5838
5839         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5840         if (rc) {
5841                 dma_free_coherent(&phba->pcidev->dev, dma_size,
5842                                   dmabuf->virt, dmabuf->phys);
5843                 kfree(dmabuf);
5844                 return -EIO;
5845         }
5846
5847         /*
5848          * The available vpd length cannot be bigger than the
5849          * DMA buffer passed to the port.  Catch the less than
5850          * case and update the caller's size.
5851          */
5852         if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5853                 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5854
5855         memcpy(vpd, dmabuf->virt, *vpd_size);
5856
5857         dma_free_coherent(&phba->pcidev->dev, dma_size,
5858                           dmabuf->virt, dmabuf->phys);
5859         kfree(dmabuf);
5860         return 0;
5861 }
5862
5863 /**
5864  * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
5865  * @phba: pointer to lpfc hba data structure.
5866  *
5867  * This routine retrieves SLI4 device physical port name this PCI function
5868  * is attached to.
5869  *
5870  * Return codes
5871  *      0 - successful
5872  *      otherwise - failed to retrieve controller attributes
5873  **/
5874 static int
5875 lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
5876 {
5877         LPFC_MBOXQ_t *mboxq;
5878         struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5879         struct lpfc_controller_attribute *cntl_attr;
5880         void *virtaddr = NULL;
5881         uint32_t alloclen, reqlen;
5882         uint32_t shdr_status, shdr_add_status;
5883         union lpfc_sli4_cfg_shdr *shdr;
5884         int rc;
5885
5886         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5887         if (!mboxq)
5888                 return -ENOMEM;
5889
5890         /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
5891         reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5892         alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5893                         LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5894                         LPFC_SLI4_MBX_NEMBED);
5895
5896         if (alloclen < reqlen) {
5897                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5898                                 "3084 Allocated DMA memory size (%d) is "
5899                                 "less than the requested DMA memory size "
5900                                 "(%d)\n", alloclen, reqlen);
5901                 rc = -ENOMEM;
5902                 goto out_free_mboxq;
5903         }
5904         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5905         virtaddr = mboxq->sge_array->addr[0];
5906         mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5907         shdr = &mbx_cntl_attr->cfg_shdr;
5908         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5909         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5910         if (shdr_status || shdr_add_status || rc) {
5911                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5912                                 "3085 Mailbox x%x (x%x/x%x) failed, "
5913                                 "rc:x%x, status:x%x, add_status:x%x\n",
5914                                 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5915                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5916                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5917                                 rc, shdr_status, shdr_add_status);
5918                 rc = -ENXIO;
5919                 goto out_free_mboxq;
5920         }
5921
5922         cntl_attr = &mbx_cntl_attr->cntl_attr;
5923         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5924         phba->sli4_hba.lnk_info.lnk_tp =
5925                 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5926         phba->sli4_hba.lnk_info.lnk_no =
5927                 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5928         phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr);
5929         phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr);
5930
5931         memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
5932         strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
5933                 sizeof(phba->BIOSVersion));
5934
5935         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5936                         "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, "
5937                         "flash_id: x%02x, asic_rev: x%02x\n",
5938                         phba->sli4_hba.lnk_info.lnk_tp,
5939                         phba->sli4_hba.lnk_info.lnk_no,
5940                         phba->BIOSVersion, phba->sli4_hba.flash_id,
5941                         phba->sli4_hba.asic_rev);
5942 out_free_mboxq:
5943         if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5944                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5945         else
5946                 mempool_free(mboxq, phba->mbox_mem_pool);
5947         return rc;
5948 }
5949
5950 /**
5951  * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5952  * @phba: pointer to lpfc hba data structure.
5953  *
5954  * This routine retrieves SLI4 device physical port name this PCI function
5955  * is attached to.
5956  *
5957  * Return codes
5958  *      0 - successful
5959  *      otherwise - failed to retrieve physical port name
5960  **/
5961 static int
5962 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5963 {
5964         LPFC_MBOXQ_t *mboxq;
5965         struct lpfc_mbx_get_port_name *get_port_name;
5966         uint32_t shdr_status, shdr_add_status;
5967         union lpfc_sli4_cfg_shdr *shdr;
5968         char cport_name = 0;
5969         int rc;
5970
5971         /* We assume nothing at this point */
5972         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5973         phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5974
5975         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5976         if (!mboxq)
5977                 return -ENOMEM;
5978         /* obtain link type and link number via READ_CONFIG */
5979         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5980         lpfc_sli4_read_config(phba);
5981         if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5982                 goto retrieve_ppname;
5983
5984         /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5985         rc = lpfc_sli4_get_ctl_attr(phba);
5986         if (rc)
5987                 goto out_free_mboxq;
5988
5989 retrieve_ppname:
5990         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5991                 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5992                 sizeof(struct lpfc_mbx_get_port_name) -
5993                 sizeof(struct lpfc_sli4_cfg_mhdr),
5994                 LPFC_SLI4_MBX_EMBED);
5995         get_port_name = &mboxq->u.mqe.un.get_port_name;
5996         shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5997         bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5998         bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5999                 phba->sli4_hba.lnk_info.lnk_tp);
6000         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6001         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6002         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6003         if (shdr_status || shdr_add_status || rc) {
6004                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6005                                 "3087 Mailbox x%x (x%x/x%x) failed: "
6006                                 "rc:x%x, status:x%x, add_status:x%x\n",
6007                                 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
6008                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
6009                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
6010                                 rc, shdr_status, shdr_add_status);
6011                 rc = -ENXIO;
6012                 goto out_free_mboxq;
6013         }
6014         switch (phba->sli4_hba.lnk_info.lnk_no) {
6015         case LPFC_LINK_NUMBER_0:
6016                 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
6017                                 &get_port_name->u.response);
6018                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6019                 break;
6020         case LPFC_LINK_NUMBER_1:
6021                 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
6022                                 &get_port_name->u.response);
6023                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6024                 break;
6025         case LPFC_LINK_NUMBER_2:
6026                 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
6027                                 &get_port_name->u.response);
6028                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6029                 break;
6030         case LPFC_LINK_NUMBER_3:
6031                 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
6032                                 &get_port_name->u.response);
6033                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6034                 break;
6035         default:
6036                 break;
6037         }
6038
6039         if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
6040                 phba->Port[0] = cport_name;
6041                 phba->Port[1] = '\0';
6042                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6043                                 "3091 SLI get port name: %s\n", phba->Port);
6044         }
6045
6046 out_free_mboxq:
6047         if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
6048                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
6049         else
6050                 mempool_free(mboxq, phba->mbox_mem_pool);
6051         return rc;
6052 }
6053
6054 /**
6055  * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
6056  * @phba: pointer to lpfc hba data structure.
6057  *
6058  * This routine is called to explicitly arm the SLI4 device's completion and
6059  * event queues
6060  **/
6061 static void
6062 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
6063 {
6064         int qidx;
6065         struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
6066         struct lpfc_sli4_hdw_queue *qp;
6067         struct lpfc_queue *eq;
6068
6069         sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
6070         sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
6071         if (sli4_hba->nvmels_cq)
6072                 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
6073                                            LPFC_QUEUE_REARM);
6074
6075         if (sli4_hba->hdwq) {
6076                 /* Loop thru all Hardware Queues */
6077                 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
6078                         qp = &sli4_hba->hdwq[qidx];
6079                         /* ARM the corresponding CQ */
6080                         sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
6081                                                 LPFC_QUEUE_REARM);
6082                 }
6083
6084                 /* Loop thru all IRQ vectors */
6085                 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
6086                         eq = sli4_hba->hba_eq_hdl[qidx].eq;
6087                         /* ARM the corresponding EQ */
6088                         sli4_hba->sli4_write_eq_db(phba, eq,
6089                                                    0, LPFC_QUEUE_REARM);
6090                 }
6091         }
6092
6093         if (phba->nvmet_support) {
6094                 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
6095                         sli4_hba->sli4_write_cq_db(phba,
6096                                 sli4_hba->nvmet_cqset[qidx], 0,
6097                                 LPFC_QUEUE_REARM);
6098                 }
6099         }
6100 }
6101
6102 /**
6103  * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
6104  * @phba: Pointer to HBA context object.
6105  * @type: The resource extent type.
6106  * @extnt_count: buffer to hold port available extent count.
6107  * @extnt_size: buffer to hold element count per extent.
6108  *
6109  * This function calls the port and retrievs the number of available
6110  * extents and their size for a particular extent type.
6111  *
6112  * Returns: 0 if successful.  Nonzero otherwise.
6113  **/
6114 int
6115 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
6116                                uint16_t *extnt_count, uint16_t *extnt_size)
6117 {
6118         int rc = 0;
6119         uint32_t length;
6120         uint32_t mbox_tmo;
6121         struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
6122         LPFC_MBOXQ_t *mbox;
6123
6124         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6125         if (!mbox)
6126                 return -ENOMEM;
6127
6128         /* Find out how many extents are available for this resource type */
6129         length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
6130                   sizeof(struct lpfc_sli4_cfg_mhdr));
6131         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6132                          LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
6133                          length, LPFC_SLI4_MBX_EMBED);
6134
6135         /* Send an extents count of 0 - the GET doesn't use it. */
6136         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6137                                         LPFC_SLI4_MBX_EMBED);
6138         if (unlikely(rc)) {
6139                 rc = -EIO;
6140                 goto err_exit;
6141         }
6142
6143         if (!phba->sli4_hba.intr_enable)
6144                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6145         else {
6146                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6147                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6148         }
6149         if (unlikely(rc)) {
6150                 rc = -EIO;
6151                 goto err_exit;
6152         }
6153
6154         rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
6155         if (bf_get(lpfc_mbox_hdr_status,
6156                    &rsrc_info->header.cfg_shdr.response)) {
6157                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6158                                 "2930 Failed to get resource extents "
6159                                 "Status 0x%x Add'l Status 0x%x\n",
6160                                 bf_get(lpfc_mbox_hdr_status,
6161                                        &rsrc_info->header.cfg_shdr.response),
6162                                 bf_get(lpfc_mbox_hdr_add_status,
6163                                        &rsrc_info->header.cfg_shdr.response));
6164                 rc = -EIO;
6165                 goto err_exit;
6166         }
6167
6168         *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
6169                               &rsrc_info->u.rsp);
6170         *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
6171                              &rsrc_info->u.rsp);
6172
6173         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6174                         "3162 Retrieved extents type-%d from port: count:%d, "
6175                         "size:%d\n", type, *extnt_count, *extnt_size);
6176
6177 err_exit:
6178         mempool_free(mbox, phba->mbox_mem_pool);
6179         return rc;
6180 }
6181
6182 /**
6183  * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
6184  * @phba: Pointer to HBA context object.
6185  * @type: The extent type to check.
6186  *
6187  * This function reads the current available extents from the port and checks
6188  * if the extent count or extent size has changed since the last access.
6189  * Callers use this routine post port reset to understand if there is a
6190  * extent reprovisioning requirement.
6191  *
6192  * Returns:
6193  *   -Error: error indicates problem.
6194  *   1: Extent count or size has changed.
6195  *   0: No changes.
6196  **/
6197 static int
6198 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
6199 {
6200         uint16_t curr_ext_cnt, rsrc_ext_cnt;
6201         uint16_t size_diff, rsrc_ext_size;
6202         int rc = 0;
6203         struct lpfc_rsrc_blks *rsrc_entry;
6204         struct list_head *rsrc_blk_list = NULL;
6205
6206         size_diff = 0;
6207         curr_ext_cnt = 0;
6208         rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
6209                                             &rsrc_ext_cnt,
6210                                             &rsrc_ext_size);
6211         if (unlikely(rc))
6212                 return -EIO;
6213
6214         switch (type) {
6215         case LPFC_RSC_TYPE_FCOE_RPI:
6216                 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6217                 break;
6218         case LPFC_RSC_TYPE_FCOE_VPI:
6219                 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
6220                 break;
6221         case LPFC_RSC_TYPE_FCOE_XRI:
6222                 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6223                 break;
6224         case LPFC_RSC_TYPE_FCOE_VFI:
6225                 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6226                 break;
6227         default:
6228                 break;
6229         }
6230
6231         list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
6232                 curr_ext_cnt++;
6233                 if (rsrc_entry->rsrc_size != rsrc_ext_size)
6234                         size_diff++;
6235         }
6236
6237         if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
6238                 rc = 1;
6239
6240         return rc;
6241 }
6242
6243 /**
6244  * lpfc_sli4_cfg_post_extnts -
6245  * @phba: Pointer to HBA context object.
6246  * @extnt_cnt: number of available extents.
6247  * @type: the extent type (rpi, xri, vfi, vpi).
6248  * @emb: buffer to hold either MBX_EMBED or MBX_NEMBED operation.
6249  * @mbox: pointer to the caller's allocated mailbox structure.
6250  *
6251  * This function executes the extents allocation request.  It also
6252  * takes care of the amount of memory needed to allocate or get the
6253  * allocated extents. It is the caller's responsibility to evaluate
6254  * the response.
6255  *
6256  * Returns:
6257  *   -Error:  Error value describes the condition found.
6258  *   0: if successful
6259  **/
6260 static int
6261 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
6262                           uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
6263 {
6264         int rc = 0;
6265         uint32_t req_len;
6266         uint32_t emb_len;
6267         uint32_t alloc_len, mbox_tmo;
6268
6269         /* Calculate the total requested length of the dma memory */
6270         req_len = extnt_cnt * sizeof(uint16_t);
6271
6272         /*
6273          * Calculate the size of an embedded mailbox.  The uint32_t
6274          * accounts for extents-specific word.
6275          */
6276         emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6277                 sizeof(uint32_t);
6278
6279         /*
6280          * Presume the allocation and response will fit into an embedded
6281          * mailbox.  If not true, reconfigure to a non-embedded mailbox.
6282          */
6283         *emb = LPFC_SLI4_MBX_EMBED;
6284         if (req_len > emb_len) {
6285                 req_len = extnt_cnt * sizeof(uint16_t) +
6286                         sizeof(union lpfc_sli4_cfg_shdr) +
6287                         sizeof(uint32_t);
6288                 *emb = LPFC_SLI4_MBX_NEMBED;
6289         }
6290
6291         alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6292                                      LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
6293                                      req_len, *emb);
6294         if (alloc_len < req_len) {
6295                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6296                         "2982 Allocated DMA memory size (x%x) is "
6297                         "less than the requested DMA memory "
6298                         "size (x%x)\n", alloc_len, req_len);
6299                 return -ENOMEM;
6300         }
6301         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
6302         if (unlikely(rc))
6303                 return -EIO;
6304
6305         if (!phba->sli4_hba.intr_enable)
6306                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6307         else {
6308                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6309                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6310         }
6311
6312         if (unlikely(rc))
6313                 rc = -EIO;
6314         return rc;
6315 }
6316
6317 /**
6318  * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
6319  * @phba: Pointer to HBA context object.
6320  * @type:  The resource extent type to allocate.
6321  *
6322  * This function allocates the number of elements for the specified
6323  * resource type.
6324  **/
6325 static int
6326 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
6327 {
6328         bool emb = false;
6329         uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
6330         uint16_t rsrc_id, rsrc_start, j, k;
6331         uint16_t *ids;
6332         int i, rc;
6333         unsigned long longs;
6334         unsigned long *bmask;
6335         struct lpfc_rsrc_blks *rsrc_blks;
6336         LPFC_MBOXQ_t *mbox;
6337         uint32_t length;
6338         struct lpfc_id_range *id_array = NULL;
6339         void *virtaddr = NULL;
6340         struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6341         struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6342         struct list_head *ext_blk_list;
6343
6344         rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
6345                                             &rsrc_cnt,
6346                                             &rsrc_size);
6347         if (unlikely(rc))
6348                 return -EIO;
6349
6350         if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
6351                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6352                         "3009 No available Resource Extents "
6353                         "for resource type 0x%x: Count: 0x%x, "
6354                         "Size 0x%x\n", type, rsrc_cnt,
6355                         rsrc_size);
6356                 return -ENOMEM;
6357         }
6358
6359         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
6360                         "2903 Post resource extents type-0x%x: "
6361                         "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
6362
6363         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6364         if (!mbox)
6365                 return -ENOMEM;
6366
6367         rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
6368         if (unlikely(rc)) {
6369                 rc = -EIO;
6370                 goto err_exit;
6371         }
6372
6373         /*
6374          * Figure out where the response is located.  Then get local pointers
6375          * to the response data.  The port does not guarantee to respond to
6376          * all extents counts request so update the local variable with the
6377          * allocated count from the port.
6378          */
6379         if (emb == LPFC_SLI4_MBX_EMBED) {
6380                 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6381                 id_array = &rsrc_ext->u.rsp.id[0];
6382                 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6383         } else {
6384                 virtaddr = mbox->sge_array->addr[0];
6385                 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6386                 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6387                 id_array = &n_rsrc->id;
6388         }
6389
6390         longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
6391         rsrc_id_cnt = rsrc_cnt * rsrc_size;
6392
6393         /*
6394          * Based on the resource size and count, correct the base and max
6395          * resource values.
6396          */
6397         length = sizeof(struct lpfc_rsrc_blks);
6398         switch (type) {
6399         case LPFC_RSC_TYPE_FCOE_RPI:
6400                 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6401                                                    sizeof(unsigned long),
6402                                                    GFP_KERNEL);
6403                 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6404                         rc = -ENOMEM;
6405                         goto err_exit;
6406                 }
6407                 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
6408                                                  sizeof(uint16_t),
6409                                                  GFP_KERNEL);
6410                 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6411                         kfree(phba->sli4_hba.rpi_bmask);
6412                         rc = -ENOMEM;
6413                         goto err_exit;
6414                 }
6415
6416                 /*
6417                  * The next_rpi was initialized with the maximum available
6418                  * count but the port may allocate a smaller number.  Catch
6419                  * that case and update the next_rpi.
6420                  */
6421                 phba->sli4_hba.next_rpi = rsrc_id_cnt;
6422
6423                 /* Initialize local ptrs for common extent processing later. */
6424                 bmask = phba->sli4_hba.rpi_bmask;
6425                 ids = phba->sli4_hba.rpi_ids;
6426                 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6427                 break;
6428         case LPFC_RSC_TYPE_FCOE_VPI:
6429                 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6430                                           GFP_KERNEL);
6431                 if (unlikely(!phba->vpi_bmask)) {
6432                         rc = -ENOMEM;
6433                         goto err_exit;
6434                 }
6435                 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
6436                                          GFP_KERNEL);
6437                 if (unlikely(!phba->vpi_ids)) {
6438                         kfree(phba->vpi_bmask);
6439                         rc = -ENOMEM;
6440                         goto err_exit;
6441                 }
6442
6443                 /* Initialize local ptrs for common extent processing later. */
6444                 bmask = phba->vpi_bmask;
6445                 ids = phba->vpi_ids;
6446                 ext_blk_list = &phba->lpfc_vpi_blk_list;
6447                 break;
6448         case LPFC_RSC_TYPE_FCOE_XRI:
6449                 phba->sli4_hba.xri_bmask = kcalloc(longs,
6450                                                    sizeof(unsigned long),
6451                                                    GFP_KERNEL);
6452                 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6453                         rc = -ENOMEM;
6454                         goto err_exit;
6455                 }
6456                 phba->sli4_hba.max_cfg_param.xri_used = 0;
6457                 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
6458                                                  sizeof(uint16_t),
6459                                                  GFP_KERNEL);
6460                 if (unlikely(!phba->sli4_hba.xri_ids)) {
6461                         kfree(phba->sli4_hba.xri_bmask);
6462                         rc = -ENOMEM;
6463                         goto err_exit;
6464                 }
6465
6466                 /* Initialize local ptrs for common extent processing later. */
6467                 bmask = phba->sli4_hba.xri_bmask;
6468                 ids = phba->sli4_hba.xri_ids;
6469                 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6470                 break;
6471         case LPFC_RSC_TYPE_FCOE_VFI:
6472                 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6473                                                    sizeof(unsigned long),
6474                                                    GFP_KERNEL);
6475                 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6476                         rc = -ENOMEM;
6477                         goto err_exit;
6478                 }
6479                 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
6480                                                  sizeof(uint16_t),
6481                                                  GFP_KERNEL);
6482                 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6483                         kfree(phba->sli4_hba.vfi_bmask);
6484                         rc = -ENOMEM;
6485                         goto err_exit;
6486                 }
6487
6488                 /* Initialize local ptrs for common extent processing later. */
6489                 bmask = phba->sli4_hba.vfi_bmask;
6490                 ids = phba->sli4_hba.vfi_ids;
6491                 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6492                 break;
6493         default:
6494                 /* Unsupported Opcode.  Fail call. */
6495                 id_array = NULL;
6496                 bmask = NULL;
6497                 ids = NULL;
6498                 ext_blk_list = NULL;
6499                 goto err_exit;
6500         }
6501
6502         /*
6503          * Complete initializing the extent configuration with the
6504          * allocated ids assigned to this function.  The bitmask serves
6505          * as an index into the array and manages the available ids.  The
6506          * array just stores the ids communicated to the port via the wqes.
6507          */
6508         for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6509                 if ((i % 2) == 0)
6510                         rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6511                                          &id_array[k]);
6512                 else
6513                         rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6514                                          &id_array[k]);
6515
6516                 rsrc_blks = kzalloc(length, GFP_KERNEL);
6517                 if (unlikely(!rsrc_blks)) {
6518                         rc = -ENOMEM;
6519                         kfree(bmask);
6520                         kfree(ids);
6521                         goto err_exit;
6522                 }
6523                 rsrc_blks->rsrc_start = rsrc_id;
6524                 rsrc_blks->rsrc_size = rsrc_size;
6525                 list_add_tail(&rsrc_blks->list, ext_blk_list);
6526                 rsrc_start = rsrc_id;
6527                 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
6528                         phba->sli4_hba.io_xri_start = rsrc_start +
6529                                 lpfc_sli4_get_iocb_cnt(phba);
6530                 }
6531
6532                 while (rsrc_id < (rsrc_start + rsrc_size)) {
6533                         ids[j] = rsrc_id;
6534                         rsrc_id++;
6535                         j++;
6536                 }
6537                 /* Entire word processed.  Get next word.*/
6538                 if ((i % 2) == 1)
6539                         k++;
6540         }
6541  err_exit:
6542         lpfc_sli4_mbox_cmd_free(phba, mbox);
6543         return rc;
6544 }
6545
6546
6547
6548 /**
6549  * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6550  * @phba: Pointer to HBA context object.
6551  * @type: the extent's type.
6552  *
6553  * This function deallocates all extents of a particular resource type.
6554  * SLI4 does not allow for deallocating a particular extent range.  It
6555  * is the caller's responsibility to release all kernel memory resources.
6556  **/
6557 static int
6558 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6559 {
6560         int rc;
6561         uint32_t length, mbox_tmo = 0;
6562         LPFC_MBOXQ_t *mbox;
6563         struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6564         struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6565
6566         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6567         if (!mbox)
6568                 return -ENOMEM;
6569
6570         /*
6571          * This function sends an embedded mailbox because it only sends the
6572          * the resource type.  All extents of this type are released by the
6573          * port.
6574          */
6575         length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6576                   sizeof(struct lpfc_sli4_cfg_mhdr));
6577         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6578                          LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6579                          length, LPFC_SLI4_MBX_EMBED);
6580
6581         /* Send an extents count of 0 - the dealloc doesn't use it. */
6582         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6583                                         LPFC_SLI4_MBX_EMBED);
6584         if (unlikely(rc)) {
6585                 rc = -EIO;
6586                 goto out_free_mbox;
6587         }
6588         if (!phba->sli4_hba.intr_enable)
6589                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6590         else {
6591                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6592                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6593         }
6594         if (unlikely(rc)) {
6595                 rc = -EIO;
6596                 goto out_free_mbox;
6597         }
6598
6599         dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6600         if (bf_get(lpfc_mbox_hdr_status,
6601                    &dealloc_rsrc->header.cfg_shdr.response)) {
6602                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6603                                 "2919 Failed to release resource extents "
6604                                 "for type %d - Status 0x%x Add'l Status 0x%x. "
6605                                 "Resource memory not released.\n",
6606                                 type,
6607                                 bf_get(lpfc_mbox_hdr_status,
6608                                     &dealloc_rsrc->header.cfg_shdr.response),
6609                                 bf_get(lpfc_mbox_hdr_add_status,
6610                                     &dealloc_rsrc->header.cfg_shdr.response));
6611                 rc = -EIO;
6612                 goto out_free_mbox;
6613         }
6614
6615         /* Release kernel memory resources for the specific type. */
6616         switch (type) {
6617         case LPFC_RSC_TYPE_FCOE_VPI:
6618                 kfree(phba->vpi_bmask);
6619                 kfree(phba->vpi_ids);
6620                 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6621                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6622                                     &phba->lpfc_vpi_blk_list, list) {
6623                         list_del_init(&rsrc_blk->list);
6624                         kfree(rsrc_blk);
6625                 }
6626                 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6627                 break;
6628         case LPFC_RSC_TYPE_FCOE_XRI:
6629                 kfree(phba->sli4_hba.xri_bmask);
6630                 kfree(phba->sli4_hba.xri_ids);
6631                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6632                                     &phba->sli4_hba.lpfc_xri_blk_list, list) {
6633                         list_del_init(&rsrc_blk->list);
6634                         kfree(rsrc_blk);
6635                 }
6636                 break;
6637         case LPFC_RSC_TYPE_FCOE_VFI:
6638                 kfree(phba->sli4_hba.vfi_bmask);
6639                 kfree(phba->sli4_hba.vfi_ids);
6640                 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6641                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6642                                     &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6643                         list_del_init(&rsrc_blk->list);
6644                         kfree(rsrc_blk);
6645                 }
6646                 break;
6647         case LPFC_RSC_TYPE_FCOE_RPI:
6648                 /* RPI bitmask and physical id array are cleaned up earlier. */
6649                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6650                                     &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6651                         list_del_init(&rsrc_blk->list);
6652                         kfree(rsrc_blk);
6653                 }
6654                 break;
6655         default:
6656                 break;
6657         }
6658
6659         bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6660
6661  out_free_mbox:
6662         mempool_free(mbox, phba->mbox_mem_pool);
6663         return rc;
6664 }
6665
6666 static void
6667 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6668                   uint32_t feature)
6669 {
6670         uint32_t len;
6671         u32 sig_freq = 0;
6672
6673         len = sizeof(struct lpfc_mbx_set_feature) -
6674                 sizeof(struct lpfc_sli4_cfg_mhdr);
6675         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6676                          LPFC_MBOX_OPCODE_SET_FEATURES, len,
6677                          LPFC_SLI4_MBX_EMBED);
6678
6679         switch (feature) {
6680         case LPFC_SET_UE_RECOVERY:
6681                 bf_set(lpfc_mbx_set_feature_UER,
6682                        &mbox->u.mqe.un.set_feature, 1);
6683                 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6684                 mbox->u.mqe.un.set_feature.param_len = 8;
6685                 break;
6686         case LPFC_SET_MDS_DIAGS:
6687                 bf_set(lpfc_mbx_set_feature_mds,
6688                        &mbox->u.mqe.un.set_feature, 1);
6689                 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6690                        &mbox->u.mqe.un.set_feature, 1);
6691                 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6692                 mbox->u.mqe.un.set_feature.param_len = 8;
6693                 break;
6694         case LPFC_SET_CGN_SIGNAL:
6695                 if (phba->cmf_active_mode == LPFC_CFG_OFF)
6696                         sig_freq = 0;
6697                 else
6698                         sig_freq = phba->cgn_sig_freq;
6699
6700                 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6701                         bf_set(lpfc_mbx_set_feature_CGN_alarm_freq,
6702                                &mbox->u.mqe.un.set_feature, sig_freq);
6703                         bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
6704                                &mbox->u.mqe.un.set_feature, sig_freq);
6705                 }
6706
6707                 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY)
6708                         bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
6709                                &mbox->u.mqe.un.set_feature, sig_freq);
6710
6711                 if (phba->cmf_active_mode == LPFC_CFG_OFF ||
6712                     phba->cgn_reg_signal == EDC_CG_SIG_NOTSUPPORTED)
6713                         sig_freq = 0;
6714                 else
6715                         sig_freq = lpfc_acqe_cgn_frequency;
6716
6717                 bf_set(lpfc_mbx_set_feature_CGN_acqe_freq,
6718                        &mbox->u.mqe.un.set_feature, sig_freq);
6719
6720                 mbox->u.mqe.un.set_feature.feature = LPFC_SET_CGN_SIGNAL;
6721                 mbox->u.mqe.un.set_feature.param_len = 12;
6722                 break;
6723         case LPFC_SET_DUAL_DUMP:
6724                 bf_set(lpfc_mbx_set_feature_dd,
6725                        &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
6726                 bf_set(lpfc_mbx_set_feature_ddquery,
6727                        &mbox->u.mqe.un.set_feature, 0);
6728                 mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
6729                 mbox->u.mqe.un.set_feature.param_len = 4;
6730                 break;
6731         case LPFC_SET_ENABLE_MI:
6732                 mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_MI;
6733                 mbox->u.mqe.un.set_feature.param_len = 4;
6734                 bf_set(lpfc_mbx_set_feature_milunq, &mbox->u.mqe.un.set_feature,
6735                        phba->pport->cfg_lun_queue_depth);
6736                 bf_set(lpfc_mbx_set_feature_mi, &mbox->u.mqe.un.set_feature,
6737                        phba->sli4_hba.pc_sli4_params.mi_ver);
6738                 break;
6739         case LPFC_SET_ENABLE_CMF:
6740                 bf_set(lpfc_mbx_set_feature_dd, &mbox->u.mqe.un.set_feature, 1);
6741                 mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_CMF;
6742                 mbox->u.mqe.un.set_feature.param_len = 4;
6743                 bf_set(lpfc_mbx_set_feature_cmf,
6744                        &mbox->u.mqe.un.set_feature, 1);
6745                 break;
6746         }
6747         return;
6748 }
6749
6750 /**
6751  * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6752  * @phba: Pointer to HBA context object.
6753  *
6754  * Disable FW logging into host memory on the adapter. To
6755  * be done before reading logs from the host memory.
6756  **/
6757 void
6758 lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6759 {
6760         struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6761
6762         spin_lock_irq(&phba->hbalock);
6763         ras_fwlog->state = INACTIVE;
6764         spin_unlock_irq(&phba->hbalock);
6765
6766         /* Disable FW logging to host memory */
6767         writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6768                phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6769
6770         /* Wait 10ms for firmware to stop using DMA buffer */
6771         usleep_range(10 * 1000, 20 * 1000);
6772 }
6773
6774 /**
6775  * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6776  * @phba: Pointer to HBA context object.
6777  *
6778  * This function is called to free memory allocated for RAS FW logging
6779  * support in the driver.
6780  **/
6781 void
6782 lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6783 {
6784         struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6785         struct lpfc_dmabuf *dmabuf, *next;
6786
6787         if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6788                 list_for_each_entry_safe(dmabuf, next,
6789                                     &ras_fwlog->fwlog_buff_list,
6790                                     list) {
6791                         list_del(&dmabuf->list);
6792                         dma_free_coherent(&phba->pcidev->dev,
6793                                           LPFC_RAS_MAX_ENTRY_SIZE,
6794                                           dmabuf->virt, dmabuf->phys);
6795                         kfree(dmabuf);
6796                 }
6797         }
6798
6799         if (ras_fwlog->lwpd.virt) {
6800                 dma_free_coherent(&phba->pcidev->dev,
6801                                   sizeof(uint32_t) * 2,
6802                                   ras_fwlog->lwpd.virt,
6803                                   ras_fwlog->lwpd.phys);
6804                 ras_fwlog->lwpd.virt = NULL;
6805         }
6806
6807         spin_lock_irq(&phba->hbalock);
6808         ras_fwlog->state = INACTIVE;
6809         spin_unlock_irq(&phba->hbalock);
6810 }
6811
6812 /**
6813  * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6814  * @phba: Pointer to HBA context object.
6815  * @fwlog_buff_count: Count of buffers to be created.
6816  *
6817  * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6818  * to update FW log is posted to the adapter.
6819  * Buffer count is calculated based on module param ras_fwlog_buffsize
6820  * Size of each buffer posted to FW is 64K.
6821  **/
6822
6823 static int
6824 lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6825                         uint32_t fwlog_buff_count)
6826 {
6827         struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6828         struct lpfc_dmabuf *dmabuf;
6829         int rc = 0, i = 0;
6830
6831         /* Initialize List */
6832         INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6833
6834         /* Allocate memory for the LWPD */
6835         ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6836                                             sizeof(uint32_t) * 2,
6837                                             &ras_fwlog->lwpd.phys,
6838                                             GFP_KERNEL);
6839         if (!ras_fwlog->lwpd.virt) {
6840                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6841                                 "6185 LWPD Memory Alloc Failed\n");
6842
6843                 return -ENOMEM;
6844         }
6845
6846         ras_fwlog->fw_buffcount = fwlog_buff_count;
6847         for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6848                 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6849                                  GFP_KERNEL);
6850                 if (!dmabuf) {
6851                         rc = -ENOMEM;
6852                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6853                                         "6186 Memory Alloc failed FW logging");
6854                         goto free_mem;
6855                 }
6856
6857                 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6858                                                   LPFC_RAS_MAX_ENTRY_SIZE,
6859                                                   &dmabuf->phys, GFP_KERNEL);
6860                 if (!dmabuf->virt) {
6861                         kfree(dmabuf);
6862                         rc = -ENOMEM;
6863                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6864                                         "6187 DMA Alloc Failed FW logging");
6865                         goto free_mem;
6866                 }
6867                 dmabuf->buffer_tag = i;
6868                 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6869         }
6870
6871 free_mem:
6872         if (rc)
6873                 lpfc_sli4_ras_dma_free(phba);
6874
6875         return rc;
6876 }
6877
6878 /**
6879  * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6880  * @phba: pointer to lpfc hba data structure.
6881  * @pmb: pointer to the driver internal queue element for mailbox command.
6882  *
6883  * Completion handler for driver's RAS MBX command to the device.
6884  **/
6885 static void
6886 lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6887 {
6888         MAILBOX_t *mb;
6889         union lpfc_sli4_cfg_shdr *shdr;
6890         uint32_t shdr_status, shdr_add_status;
6891         struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6892
6893         mb = &pmb->u.mb;
6894
6895         shdr = (union lpfc_sli4_cfg_shdr *)
6896                 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6897         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6898         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6899
6900         if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
6901                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6902                                 "6188 FW LOG mailbox "
6903                                 "completed with status x%x add_status x%x,"
6904                                 " mbx status x%x\n",
6905                                 shdr_status, shdr_add_status, mb->mbxStatus);
6906
6907                 ras_fwlog->ras_hwsupport = false;
6908                 goto disable_ras;
6909         }
6910
6911         spin_lock_irq(&phba->hbalock);
6912         ras_fwlog->state = ACTIVE;
6913         spin_unlock_irq(&phba->hbalock);
6914         mempool_free(pmb, phba->mbox_mem_pool);
6915
6916         return;
6917
6918 disable_ras:
6919         /* Free RAS DMA memory */
6920         lpfc_sli4_ras_dma_free(phba);
6921         mempool_free(pmb, phba->mbox_mem_pool);
6922 }
6923
6924 /**
6925  * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
6926  * @phba: pointer to lpfc hba data structure.
6927  * @fwlog_level: Logging verbosity level.
6928  * @fwlog_enable: Enable/Disable logging.
6929  *
6930  * Initialize memory and post mailbox command to enable FW logging in host
6931  * memory.
6932  **/
6933 int
6934 lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
6935                          uint32_t fwlog_level,
6936                          uint32_t fwlog_enable)
6937 {
6938         struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6939         struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
6940         struct lpfc_dmabuf *dmabuf;
6941         LPFC_MBOXQ_t *mbox;
6942         uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
6943         int rc = 0;
6944
6945         spin_lock_irq(&phba->hbalock);
6946         ras_fwlog->state = INACTIVE;
6947         spin_unlock_irq(&phba->hbalock);
6948
6949         fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
6950                           phba->cfg_ras_fwlog_buffsize);
6951         fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
6952
6953         /*
6954          * If re-enabling FW logging support use earlier allocated
6955          * DMA buffers while posting MBX command.
6956          **/
6957         if (!ras_fwlog->lwpd.virt) {
6958                 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
6959                 if (rc) {
6960                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6961                                         "6189 FW Log Memory Allocation Failed");
6962                         return rc;
6963                 }
6964         }
6965
6966         /* Setup Mailbox command */
6967         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6968         if (!mbox) {
6969                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6970                                 "6190 RAS MBX Alloc Failed");
6971                 rc = -ENOMEM;
6972                 goto mem_free;
6973         }
6974
6975         ras_fwlog->fw_loglevel = fwlog_level;
6976         len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
6977                 sizeof(struct lpfc_sli4_cfg_mhdr));
6978
6979         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
6980                          LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
6981                          len, LPFC_SLI4_MBX_EMBED);
6982
6983         mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
6984         bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
6985                fwlog_enable);
6986         bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
6987                ras_fwlog->fw_loglevel);
6988         bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
6989                ras_fwlog->fw_buffcount);
6990         bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
6991                LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
6992
6993         /* Update DMA buffer address */
6994         list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
6995                 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
6996
6997                 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
6998                         putPaddrLow(dmabuf->phys);
6999
7000                 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
7001                         putPaddrHigh(dmabuf->phys);
7002         }
7003
7004         /* Update LPWD address */
7005         mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
7006         mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
7007
7008         spin_lock_irq(&phba->hbalock);
7009         ras_fwlog->state = REG_INPROGRESS;
7010         spin_unlock_irq(&phba->hbalock);
7011         mbox->vport = phba->pport;
7012         mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
7013
7014         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
7015
7016         if (rc == MBX_NOT_FINISHED) {
7017                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7018                                 "6191 FW-Log Mailbox failed. "
7019                                 "status %d mbxStatus : x%x", rc,
7020                                 bf_get(lpfc_mqe_status, &mbox->u.mqe));
7021                 mempool_free(mbox, phba->mbox_mem_pool);
7022                 rc = -EIO;
7023                 goto mem_free;
7024         } else
7025                 rc = 0;
7026 mem_free:
7027         if (rc)
7028                 lpfc_sli4_ras_dma_free(phba);
7029
7030         return rc;
7031 }
7032
7033 /**
7034  * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
7035  * @phba: Pointer to HBA context object.
7036  *
7037  * Check if RAS is supported on the adapter and initialize it.
7038  **/
7039 void
7040 lpfc_sli4_ras_setup(struct lpfc_hba *phba)
7041 {
7042         /* Check RAS FW Log needs to be enabled or not */
7043         if (lpfc_check_fwlog_support(phba))
7044                 return;
7045
7046         lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
7047                                  LPFC_RAS_ENABLE_LOGGING);
7048 }
7049
7050 /**
7051  * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
7052  * @phba: Pointer to HBA context object.
7053  *
7054  * This function allocates all SLI4 resource identifiers.
7055  **/
7056 int
7057 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
7058 {
7059         int i, rc, error = 0;
7060         uint16_t count, base;
7061         unsigned long longs;
7062
7063         if (!phba->sli4_hba.rpi_hdrs_in_use)
7064                 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
7065         if (phba->sli4_hba.extents_in_use) {
7066                 /*
7067                  * The port supports resource extents. The XRI, VPI, VFI, RPI
7068                  * resource extent count must be read and allocated before
7069                  * provisioning the resource id arrays.
7070                  */
7071                 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
7072                     LPFC_IDX_RSRC_RDY) {
7073                         /*
7074                          * Extent-based resources are set - the driver could
7075                          * be in a port reset. Figure out if any corrective
7076                          * actions need to be taken.
7077                          */
7078                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7079                                                  LPFC_RSC_TYPE_FCOE_VFI);
7080                         if (rc != 0)
7081                                 error++;
7082                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7083                                                  LPFC_RSC_TYPE_FCOE_VPI);
7084                         if (rc != 0)
7085                                 error++;
7086                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7087                                                  LPFC_RSC_TYPE_FCOE_XRI);
7088                         if (rc != 0)
7089                                 error++;
7090                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7091                                                  LPFC_RSC_TYPE_FCOE_RPI);
7092                         if (rc != 0)
7093                                 error++;
7094
7095                         /*
7096                          * It's possible that the number of resources
7097                          * provided to this port instance changed between
7098                          * resets.  Detect this condition and reallocate
7099                          * resources.  Otherwise, there is no action.
7100                          */
7101                         if (error) {
7102                                 lpfc_printf_log(phba, KERN_INFO,
7103                                                 LOG_MBOX | LOG_INIT,
7104                                                 "2931 Detected extent resource "
7105                                                 "change.  Reallocating all "
7106                                                 "extents.\n");
7107                                 rc = lpfc_sli4_dealloc_extent(phba,
7108                                                  LPFC_RSC_TYPE_FCOE_VFI);
7109                                 rc = lpfc_sli4_dealloc_extent(phba,
7110                                                  LPFC_RSC_TYPE_FCOE_VPI);
7111                                 rc = lpfc_sli4_dealloc_extent(phba,
7112                                                  LPFC_RSC_TYPE_FCOE_XRI);
7113                                 rc = lpfc_sli4_dealloc_extent(phba,
7114                                                  LPFC_RSC_TYPE_FCOE_RPI);
7115                         } else
7116                                 return 0;
7117                 }
7118
7119                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
7120                 if (unlikely(rc))
7121                         goto err_exit;
7122
7123                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
7124                 if (unlikely(rc))
7125                         goto err_exit;
7126
7127                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
7128                 if (unlikely(rc))
7129                         goto err_exit;
7130
7131                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
7132                 if (unlikely(rc))
7133                         goto err_exit;
7134                 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
7135                        LPFC_IDX_RSRC_RDY);
7136                 return rc;
7137         } else {
7138                 /*
7139                  * The port does not support resource extents.  The XRI, VPI,
7140                  * VFI, RPI resource ids were determined from READ_CONFIG.
7141                  * Just allocate the bitmasks and provision the resource id
7142                  * arrays.  If a port reset is active, the resources don't
7143                  * need any action - just exit.
7144                  */
7145                 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
7146                     LPFC_IDX_RSRC_RDY) {
7147                         lpfc_sli4_dealloc_resource_identifiers(phba);
7148                         lpfc_sli4_remove_rpis(phba);
7149                 }
7150                 /* RPIs. */
7151                 count = phba->sli4_hba.max_cfg_param.max_rpi;
7152                 if (count <= 0) {
7153                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7154                                         "3279 Invalid provisioning of "
7155                                         "rpi:%d\n", count);
7156                         rc = -EINVAL;
7157                         goto err_exit;
7158                 }
7159                 base = phba->sli4_hba.max_cfg_param.rpi_base;
7160                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7161                 phba->sli4_hba.rpi_bmask = kcalloc(longs,
7162                                                    sizeof(unsigned long),
7163                                                    GFP_KERNEL);
7164                 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
7165                         rc = -ENOMEM;
7166                         goto err_exit;
7167                 }
7168                 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
7169                                                  GFP_KERNEL);
7170                 if (unlikely(!phba->sli4_hba.rpi_ids)) {
7171                         rc = -ENOMEM;
7172                         goto free_rpi_bmask;
7173                 }
7174
7175                 for (i = 0; i < count; i++)
7176                         phba->sli4_hba.rpi_ids[i] = base + i;
7177
7178                 /* VPIs. */
7179                 count = phba->sli4_hba.max_cfg_param.max_vpi;
7180                 if (count <= 0) {
7181                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7182                                         "3280 Invalid provisioning of "
7183                                         "vpi:%d\n", count);
7184                         rc = -EINVAL;
7185                         goto free_rpi_ids;
7186                 }
7187                 base = phba->sli4_hba.max_cfg_param.vpi_base;
7188                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7189                 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
7190                                           GFP_KERNEL);
7191                 if (unlikely(!phba->vpi_bmask)) {
7192                         rc = -ENOMEM;
7193                         goto free_rpi_ids;
7194                 }
7195                 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
7196                                         GFP_KERNEL);
7197                 if (unlikely(!phba->vpi_ids)) {
7198                         rc = -ENOMEM;
7199                         goto free_vpi_bmask;
7200                 }
7201
7202                 for (i = 0; i < count; i++)
7203                         phba->vpi_ids[i] = base + i;
7204
7205                 /* XRIs. */
7206                 count = phba->sli4_hba.max_cfg_param.max_xri;
7207                 if (count <= 0) {
7208                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7209                                         "3281 Invalid provisioning of "
7210                                         "xri:%d\n", count);
7211                         rc = -EINVAL;
7212                         goto free_vpi_ids;
7213                 }
7214                 base = phba->sli4_hba.max_cfg_param.xri_base;
7215                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7216                 phba->sli4_hba.xri_bmask = kcalloc(longs,
7217                                                    sizeof(unsigned long),
7218                                                    GFP_KERNEL);
7219                 if (unlikely(!phba->sli4_hba.xri_bmask)) {
7220                         rc = -ENOMEM;
7221                         goto free_vpi_ids;
7222                 }
7223                 phba->sli4_hba.max_cfg_param.xri_used = 0;
7224                 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
7225                                                  GFP_KERNEL);
7226                 if (unlikely(!phba->sli4_hba.xri_ids)) {
7227                         rc = -ENOMEM;
7228                         goto free_xri_bmask;
7229                 }
7230
7231                 for (i = 0; i < count; i++)
7232                         phba->sli4_hba.xri_ids[i] = base + i;
7233
7234                 /* VFIs. */
7235                 count = phba->sli4_hba.max_cfg_param.max_vfi;
7236                 if (count <= 0) {
7237                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7238                                         "3282 Invalid provisioning of "
7239                                         "vfi:%d\n", count);
7240                         rc = -EINVAL;
7241                         goto free_xri_ids;
7242                 }
7243                 base = phba->sli4_hba.max_cfg_param.vfi_base;
7244                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7245                 phba->sli4_hba.vfi_bmask = kcalloc(longs,
7246                                                    sizeof(unsigned long),
7247                                                    GFP_KERNEL);
7248                 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
7249                         rc = -ENOMEM;
7250                         goto free_xri_ids;
7251                 }
7252                 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
7253                                                  GFP_KERNEL);
7254                 if (unlikely(!phba->sli4_hba.vfi_ids)) {
7255                         rc = -ENOMEM;
7256                         goto free_vfi_bmask;
7257                 }
7258
7259                 for (i = 0; i < count; i++)
7260                         phba->sli4_hba.vfi_ids[i] = base + i;
7261
7262                 /*
7263                  * Mark all resources ready.  An HBA reset doesn't need
7264                  * to reset the initialization.
7265                  */
7266                 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
7267                        LPFC_IDX_RSRC_RDY);
7268                 return 0;
7269         }
7270
7271  free_vfi_bmask:
7272         kfree(phba->sli4_hba.vfi_bmask);
7273         phba->sli4_hba.vfi_bmask = NULL;
7274  free_xri_ids:
7275         kfree(phba->sli4_hba.xri_ids);
7276         phba->sli4_hba.xri_ids = NULL;
7277  free_xri_bmask:
7278         kfree(phba->sli4_hba.xri_bmask);
7279         phba->sli4_hba.xri_bmask = NULL;
7280  free_vpi_ids:
7281         kfree(phba->vpi_ids);
7282         phba->vpi_ids = NULL;
7283  free_vpi_bmask:
7284         kfree(phba->vpi_bmask);
7285         phba->vpi_bmask = NULL;
7286  free_rpi_ids:
7287         kfree(phba->sli4_hba.rpi_ids);
7288         phba->sli4_hba.rpi_ids = NULL;
7289  free_rpi_bmask:
7290         kfree(phba->sli4_hba.rpi_bmask);
7291         phba->sli4_hba.rpi_bmask = NULL;
7292  err_exit:
7293         return rc;
7294 }
7295
7296 /**
7297  * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
7298  * @phba: Pointer to HBA context object.
7299  *
7300  * This function allocates the number of elements for the specified
7301  * resource type.
7302  **/
7303 int
7304 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
7305 {
7306         if (phba->sli4_hba.extents_in_use) {
7307                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
7308                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
7309                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
7310                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
7311         } else {
7312                 kfree(phba->vpi_bmask);
7313                 phba->sli4_hba.max_cfg_param.vpi_used = 0;
7314                 kfree(phba->vpi_ids);
7315                 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7316                 kfree(phba->sli4_hba.xri_bmask);
7317                 kfree(phba->sli4_hba.xri_ids);
7318                 kfree(phba->sli4_hba.vfi_bmask);
7319                 kfree(phba->sli4_hba.vfi_ids);
7320                 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7321                 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7322         }
7323
7324         return 0;
7325 }
7326
7327 /**
7328  * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
7329  * @phba: Pointer to HBA context object.
7330  * @type: The resource extent type.
7331  * @extnt_cnt: buffer to hold port extent count response
7332  * @extnt_size: buffer to hold port extent size response.
7333  *
7334  * This function calls the port to read the host allocated extents
7335  * for a particular type.
7336  **/
7337 int
7338 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
7339                                uint16_t *extnt_cnt, uint16_t *extnt_size)
7340 {
7341         bool emb;
7342         int rc = 0;
7343         uint16_t curr_blks = 0;
7344         uint32_t req_len, emb_len;
7345         uint32_t alloc_len, mbox_tmo;
7346         struct list_head *blk_list_head;
7347         struct lpfc_rsrc_blks *rsrc_blk;
7348         LPFC_MBOXQ_t *mbox;
7349         void *virtaddr = NULL;
7350         struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
7351         struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
7352         union  lpfc_sli4_cfg_shdr *shdr;
7353
7354         switch (type) {
7355         case LPFC_RSC_TYPE_FCOE_VPI:
7356                 blk_list_head = &phba->lpfc_vpi_blk_list;
7357                 break;
7358         case LPFC_RSC_TYPE_FCOE_XRI:
7359                 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
7360                 break;
7361         case LPFC_RSC_TYPE_FCOE_VFI:
7362                 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
7363                 break;
7364         case LPFC_RSC_TYPE_FCOE_RPI:
7365                 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
7366                 break;
7367         default:
7368                 return -EIO;
7369         }
7370
7371         /* Count the number of extents currently allocatd for this type. */
7372         list_for_each_entry(rsrc_blk, blk_list_head, list) {
7373                 if (curr_blks == 0) {
7374                         /*
7375                          * The GET_ALLOCATED mailbox does not return the size,
7376                          * just the count.  The size should be just the size
7377                          * stored in the current allocated block and all sizes
7378                          * for an extent type are the same so set the return
7379                          * value now.
7380                          */
7381                         *extnt_size = rsrc_blk->rsrc_size;
7382                 }
7383                 curr_blks++;
7384         }
7385
7386         /*
7387          * Calculate the size of an embedded mailbox.  The uint32_t
7388          * accounts for extents-specific word.
7389          */
7390         emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
7391                 sizeof(uint32_t);
7392
7393         /*
7394          * Presume the allocation and response will fit into an embedded
7395          * mailbox.  If not true, reconfigure to a non-embedded mailbox.
7396          */
7397         emb = LPFC_SLI4_MBX_EMBED;
7398         req_len = emb_len;
7399         if (req_len > emb_len) {
7400                 req_len = curr_blks * sizeof(uint16_t) +
7401                         sizeof(union lpfc_sli4_cfg_shdr) +
7402                         sizeof(uint32_t);
7403                 emb = LPFC_SLI4_MBX_NEMBED;
7404         }
7405
7406         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7407         if (!mbox)
7408                 return -ENOMEM;
7409         memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
7410
7411         alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7412                                      LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
7413                                      req_len, emb);
7414         if (alloc_len < req_len) {
7415                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7416                         "2983 Allocated DMA memory size (x%x) is "
7417                         "less than the requested DMA memory "
7418                         "size (x%x)\n", alloc_len, req_len);
7419                 rc = -ENOMEM;
7420                 goto err_exit;
7421         }
7422         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
7423         if (unlikely(rc)) {
7424                 rc = -EIO;
7425                 goto err_exit;
7426         }
7427
7428         if (!phba->sli4_hba.intr_enable)
7429                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
7430         else {
7431                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
7432                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
7433         }
7434
7435         if (unlikely(rc)) {
7436                 rc = -EIO;
7437                 goto err_exit;
7438         }
7439
7440         /*
7441          * Figure out where the response is located.  Then get local pointers
7442          * to the response data.  The port does not guarantee to respond to
7443          * all extents counts request so update the local variable with the
7444          * allocated count from the port.
7445          */
7446         if (emb == LPFC_SLI4_MBX_EMBED) {
7447                 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
7448                 shdr = &rsrc_ext->header.cfg_shdr;
7449                 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
7450         } else {
7451                 virtaddr = mbox->sge_array->addr[0];
7452                 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
7453                 shdr = &n_rsrc->cfg_shdr;
7454                 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
7455         }
7456
7457         if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
7458                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7459                         "2984 Failed to read allocated resources "
7460                         "for type %d - Status 0x%x Add'l Status 0x%x.\n",
7461                         type,
7462                         bf_get(lpfc_mbox_hdr_status, &shdr->response),
7463                         bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
7464                 rc = -EIO;
7465                 goto err_exit;
7466         }
7467  err_exit:
7468         lpfc_sli4_mbox_cmd_free(phba, mbox);
7469         return rc;
7470 }
7471
7472 /**
7473  * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
7474  * @phba: pointer to lpfc hba data structure.
7475  * @sgl_list: linked link of sgl buffers to post
7476  * @cnt: number of linked list buffers
7477  *
7478  * This routine walks the list of buffers that have been allocated and
7479  * repost them to the port by using SGL block post. This is needed after a
7480  * pci_function_reset/warm_start or start. It attempts to construct blocks
7481  * of buffer sgls which contains contiguous xris and uses the non-embedded
7482  * SGL block post mailbox commands to post them to the port. For single
7483  * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
7484  * mailbox command for posting.
7485  *
7486  * Returns: 0 = success, non-zero failure.
7487  **/
7488 static int
7489 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
7490                           struct list_head *sgl_list, int cnt)
7491 {
7492         struct lpfc_sglq *sglq_entry = NULL;
7493         struct lpfc_sglq *sglq_entry_next = NULL;
7494         struct lpfc_sglq *sglq_entry_first = NULL;
7495         int status, total_cnt;
7496         int post_cnt = 0, num_posted = 0, block_cnt = 0;
7497         int last_xritag = NO_XRI;
7498         LIST_HEAD(prep_sgl_list);
7499         LIST_HEAD(blck_sgl_list);
7500         LIST_HEAD(allc_sgl_list);
7501         LIST_HEAD(post_sgl_list);
7502         LIST_HEAD(free_sgl_list);
7503
7504         spin_lock_irq(&phba->hbalock);
7505         spin_lock(&phba->sli4_hba.sgl_list_lock);
7506         list_splice_init(sgl_list, &allc_sgl_list);
7507         spin_unlock(&phba->sli4_hba.sgl_list_lock);
7508         spin_unlock_irq(&phba->hbalock);
7509
7510         total_cnt = cnt;
7511         list_for_each_entry_safe(sglq_entry, sglq_entry_next,
7512                                  &allc_sgl_list, list) {
7513                 list_del_init(&sglq_entry->list);
7514                 block_cnt++;
7515                 if ((last_xritag != NO_XRI) &&
7516                     (sglq_entry->sli4_xritag != last_xritag + 1)) {
7517                         /* a hole in xri block, form a sgl posting block */
7518                         list_splice_init(&prep_sgl_list, &blck_sgl_list);
7519                         post_cnt = block_cnt - 1;
7520                         /* prepare list for next posting block */
7521                         list_add_tail(&sglq_entry->list, &prep_sgl_list);
7522                         block_cnt = 1;
7523                 } else {
7524                         /* prepare list for next posting block */
7525                         list_add_tail(&sglq_entry->list, &prep_sgl_list);
7526                         /* enough sgls for non-embed sgl mbox command */
7527                         if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
7528                                 list_splice_init(&prep_sgl_list,
7529                                                  &blck_sgl_list);
7530                                 post_cnt = block_cnt;
7531                                 block_cnt = 0;
7532                         }
7533                 }
7534                 num_posted++;
7535
7536                 /* keep track of last sgl's xritag */
7537                 last_xritag = sglq_entry->sli4_xritag;
7538
7539                 /* end of repost sgl list condition for buffers */
7540                 if (num_posted == total_cnt) {
7541                         if (post_cnt == 0) {
7542                                 list_splice_init(&prep_sgl_list,
7543                                                  &blck_sgl_list);
7544                                 post_cnt = block_cnt;
7545                         } else if (block_cnt == 1) {
7546                                 status = lpfc_sli4_post_sgl(phba,
7547                                                 sglq_entry->phys, 0,
7548                                                 sglq_entry->sli4_xritag);
7549                                 if (!status) {
7550                                         /* successful, put sgl to posted list */
7551                                         list_add_tail(&sglq_entry->list,
7552                                                       &post_sgl_list);
7553                                 } else {
7554                                         /* Failure, put sgl to free list */
7555                                         lpfc_printf_log(phba, KERN_WARNING,
7556                                                 LOG_SLI,
7557                                                 "3159 Failed to post "
7558                                                 "sgl, xritag:x%x\n",
7559                                                 sglq_entry->sli4_xritag);
7560                                         list_add_tail(&sglq_entry->list,
7561                                                       &free_sgl_list);
7562                                         total_cnt--;
7563                                 }
7564                         }
7565                 }
7566
7567                 /* continue until a nembed page worth of sgls */
7568                 if (post_cnt == 0)
7569                         continue;
7570
7571                 /* post the buffer list sgls as a block */
7572                 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7573                                                  post_cnt);
7574
7575                 if (!status) {
7576                         /* success, put sgl list to posted sgl list */
7577                         list_splice_init(&blck_sgl_list, &post_sgl_list);
7578                 } else {
7579                         /* Failure, put sgl list to free sgl list */
7580                         sglq_entry_first = list_first_entry(&blck_sgl_list,
7581                                                             struct lpfc_sglq,
7582                                                             list);
7583                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7584                                         "3160 Failed to post sgl-list, "
7585                                         "xritag:x%x-x%x\n",
7586                                         sglq_entry_first->sli4_xritag,
7587                                         (sglq_entry_first->sli4_xritag +
7588                                          post_cnt - 1));
7589                         list_splice_init(&blck_sgl_list, &free_sgl_list);
7590                         total_cnt -= post_cnt;
7591                 }
7592
7593                 /* don't reset xirtag due to hole in xri block */
7594                 if (block_cnt == 0)
7595                         last_xritag = NO_XRI;
7596
7597                 /* reset sgl post count for next round of posting */
7598                 post_cnt = 0;
7599         }
7600
7601         /* free the sgls failed to post */
7602         lpfc_free_sgl_list(phba, &free_sgl_list);
7603
7604         /* push sgls posted to the available list */
7605         if (!list_empty(&post_sgl_list)) {
7606                 spin_lock_irq(&phba->hbalock);
7607                 spin_lock(&phba->sli4_hba.sgl_list_lock);
7608                 list_splice_init(&post_sgl_list, sgl_list);
7609                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7610                 spin_unlock_irq(&phba->hbalock);
7611         } else {
7612                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7613                                 "3161 Failure to post sgl to port.\n");
7614                 return -EIO;
7615         }
7616
7617         /* return the number of XRIs actually posted */
7618         return total_cnt;
7619 }
7620
7621 /**
7622  * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
7623  * @phba: pointer to lpfc hba data structure.
7624  *
7625  * This routine walks the list of nvme buffers that have been allocated and
7626  * repost them to the port by using SGL block post. This is needed after a
7627  * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7628  * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
7629  * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
7630  *
7631  * Returns: 0 = success, non-zero failure.
7632  **/
7633 static int
7634 lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
7635 {
7636         LIST_HEAD(post_nblist);
7637         int num_posted, rc = 0;
7638
7639         /* get all NVME buffers need to repost to a local list */
7640         lpfc_io_buf_flush(phba, &post_nblist);
7641
7642         /* post the list of nvme buffer sgls to port if available */
7643         if (!list_empty(&post_nblist)) {
7644                 num_posted = lpfc_sli4_post_io_sgl_list(
7645                         phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
7646                 /* failed to post any nvme buffer, return error */
7647                 if (num_posted == 0)
7648                         rc = -EIO;
7649         }
7650         return rc;
7651 }
7652
7653 static void
7654 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7655 {
7656         uint32_t len;
7657
7658         len = sizeof(struct lpfc_mbx_set_host_data) -
7659                 sizeof(struct lpfc_sli4_cfg_mhdr);
7660         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7661                          LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7662                          LPFC_SLI4_MBX_EMBED);
7663
7664         mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
7665         mbox->u.mqe.un.set_host_data.param_len =
7666                                         LPFC_HOST_OS_DRIVER_VERSION_SIZE;
7667         snprintf(mbox->u.mqe.un.set_host_data.un.data,
7668                  LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7669                  "Linux %s v"LPFC_DRIVER_VERSION,
7670                  (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7671 }
7672
7673 int
7674 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
7675                     struct lpfc_queue *drq, int count, int idx)
7676 {
7677         int rc, i;
7678         struct lpfc_rqe hrqe;
7679         struct lpfc_rqe drqe;
7680         struct lpfc_rqb *rqbp;
7681         unsigned long flags;
7682         struct rqb_dmabuf *rqb_buffer;
7683         LIST_HEAD(rqb_buf_list);
7684
7685         rqbp = hrq->rqbp;
7686         for (i = 0; i < count; i++) {
7687                 spin_lock_irqsave(&phba->hbalock, flags);
7688                 /* IF RQ is already full, don't bother */
7689                 if (rqbp->buffer_count + i >= rqbp->entry_count - 1) {
7690                         spin_unlock_irqrestore(&phba->hbalock, flags);
7691                         break;
7692                 }
7693                 spin_unlock_irqrestore(&phba->hbalock, flags);
7694
7695                 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7696                 if (!rqb_buffer)
7697                         break;
7698                 rqb_buffer->hrq = hrq;
7699                 rqb_buffer->drq = drq;
7700                 rqb_buffer->idx = idx;
7701                 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7702         }
7703
7704         spin_lock_irqsave(&phba->hbalock, flags);
7705         while (!list_empty(&rqb_buf_list)) {
7706                 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7707                                  hbuf.list);
7708
7709                 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7710                 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7711                 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7712                 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7713                 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7714                 if (rc < 0) {
7715                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7716                                         "6421 Cannot post to HRQ %d: %x %x %x "
7717                                         "DRQ %x %x\n",
7718                                         hrq->queue_id,
7719                                         hrq->host_index,
7720                                         hrq->hba_index,
7721                                         hrq->entry_count,
7722                                         drq->host_index,
7723                                         drq->hba_index);
7724                         rqbp->rqb_free_buffer(phba, rqb_buffer);
7725                 } else {
7726                         list_add_tail(&rqb_buffer->hbuf.list,
7727                                       &rqbp->rqb_buffer_list);
7728                         rqbp->buffer_count++;
7729                 }
7730         }
7731         spin_unlock_irqrestore(&phba->hbalock, flags);
7732         return 1;
7733 }
7734
7735 static void
7736 lpfc_mbx_cmpl_cgn_set_ftrs(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7737 {
7738         struct lpfc_vport *vport = pmb->vport;
7739         union lpfc_sli4_cfg_shdr *shdr;
7740         u32 shdr_status, shdr_add_status;
7741         u32 sig, acqe;
7742
7743         /* Two outcomes. (1) Set featurs was successul and EDC negotiation
7744          * is done. (2) Mailbox failed and send FPIN support only.
7745          */
7746         shdr = (union lpfc_sli4_cfg_shdr *)
7747                 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
7748         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7749         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7750         if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) {
7751                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
7752                                 "2516 CGN SET_FEATURE mbox failed with "
7753                                 "status x%x add_status x%x, mbx status x%x "
7754                                 "Reset Congestion to FPINs only\n",
7755                                 shdr_status, shdr_add_status,
7756                                 pmb->u.mb.mbxStatus);
7757                 /* If there is a mbox error, move on to RDF */
7758                 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
7759                 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
7760                 goto out;
7761         }
7762
7763         /* Zero out Congestion Signal ACQE counter */
7764         phba->cgn_acqe_cnt = 0;
7765         atomic64_set(&phba->cgn_acqe_stat.warn, 0);
7766         atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
7767
7768         acqe = bf_get(lpfc_mbx_set_feature_CGN_acqe_freq,
7769                       &pmb->u.mqe.un.set_feature);
7770         sig = bf_get(lpfc_mbx_set_feature_CGN_warn_freq,
7771                      &pmb->u.mqe.un.set_feature);
7772         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7773                         "4620 SET_FEATURES Success: Freq: %ds %dms "
7774                         " Reg: x%x x%x\n", acqe, sig,
7775                         phba->cgn_reg_signal, phba->cgn_reg_fpin);
7776 out:
7777         mempool_free(pmb, phba->mbox_mem_pool);
7778
7779         /* Register for FPIN events from the fabric now that the
7780          * EDC common_set_features has completed.
7781          */
7782         lpfc_issue_els_rdf(vport, 0);
7783 }
7784
7785 int
7786 lpfc_config_cgn_signal(struct lpfc_hba *phba)
7787 {
7788         LPFC_MBOXQ_t *mboxq;
7789         u32 rc;
7790
7791         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7792         if (!mboxq)
7793                 goto out_rdf;
7794
7795         lpfc_set_features(phba, mboxq, LPFC_SET_CGN_SIGNAL);
7796         mboxq->vport = phba->pport;
7797         mboxq->mbox_cmpl = lpfc_mbx_cmpl_cgn_set_ftrs;
7798
7799         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7800                         "4621 SET_FEATURES: FREQ sig x%x acqe x%x: "
7801                         "Reg: x%x x%x\n",
7802                         phba->cgn_sig_freq, lpfc_acqe_cgn_frequency,
7803                         phba->cgn_reg_signal, phba->cgn_reg_fpin);
7804
7805         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
7806         if (rc == MBX_NOT_FINISHED)
7807                 goto out;
7808         return 0;
7809
7810 out:
7811         mempool_free(mboxq, phba->mbox_mem_pool);
7812 out_rdf:
7813         /* If there is a mbox error, move on to RDF */
7814         phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
7815         phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
7816         lpfc_issue_els_rdf(phba->pport, 0);
7817         return -EIO;
7818 }
7819
7820 /**
7821  * lpfc_init_idle_stat_hb - Initialize idle_stat tracking
7822  * @phba: pointer to lpfc hba data structure.
7823  *
7824  * This routine initializes the per-cq idle_stat to dynamically dictate
7825  * polling decisions.
7826  *
7827  * Return codes:
7828  *   None
7829  **/
7830 static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
7831 {
7832         int i;
7833         struct lpfc_sli4_hdw_queue *hdwq;
7834         struct lpfc_queue *cq;
7835         struct lpfc_idle_stat *idle_stat;
7836         u64 wall;
7837
7838         for_each_present_cpu(i) {
7839                 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
7840                 cq = hdwq->io_cq;
7841
7842                 /* Skip if we've already handled this cq's primary CPU */
7843                 if (cq->chann != i)
7844                         continue;
7845
7846                 idle_stat = &phba->sli4_hba.idle_stat[i];
7847
7848                 idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1);
7849                 idle_stat->prev_wall = wall;
7850
7851                 if (phba->nvmet_support ||
7852                     phba->cmf_active_mode != LPFC_CFG_OFF)
7853                         cq->poll_mode = LPFC_QUEUE_WORK;
7854                 else
7855                         cq->poll_mode = LPFC_IRQ_POLL;
7856         }
7857
7858         if (!phba->nvmet_support)
7859                 schedule_delayed_work(&phba->idle_stat_delay_work,
7860                                       msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
7861 }
7862
7863 static void lpfc_sli4_dip(struct lpfc_hba *phba)
7864 {
7865         uint32_t if_type;
7866
7867         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7868         if (if_type == LPFC_SLI_INTF_IF_TYPE_2 ||
7869             if_type == LPFC_SLI_INTF_IF_TYPE_6) {
7870                 struct lpfc_register reg_data;
7871
7872                 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
7873                                &reg_data.word0))
7874                         return;
7875
7876                 if (bf_get(lpfc_sliport_status_dip, &reg_data))
7877                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7878                                         "2904 Firmware Dump Image Present"
7879                                         " on Adapter");
7880         }
7881 }
7882
7883 /**
7884  * lpfc_cmf_setup - Initialize idle_stat tracking
7885  * @phba: Pointer to HBA context object.
7886  *
7887  * This is called from HBA setup during driver load or when the HBA
7888  * comes online. this does all the initialization to support CMF and MI.
7889  **/
7890 static int
7891 lpfc_cmf_setup(struct lpfc_hba *phba)
7892 {
7893         LPFC_MBOXQ_t *mboxq;
7894         struct lpfc_mqe *mqe;
7895         struct lpfc_dmabuf *mp;
7896         struct lpfc_pc_sli4_params *sli4_params;
7897         struct lpfc_sli4_parameters *mbx_sli4_parameters;
7898         int length;
7899         int rc, cmf, mi_ver;
7900
7901         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7902         if (!mboxq)
7903                 return -ENOMEM;
7904         mqe = &mboxq->u.mqe;
7905
7906         /* Read the port's SLI4 Config Parameters */
7907         length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
7908                   sizeof(struct lpfc_sli4_cfg_mhdr));
7909         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7910                          LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
7911                          length, LPFC_SLI4_MBX_EMBED);
7912
7913         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7914         if (unlikely(rc)) {
7915                 mempool_free(mboxq, phba->mbox_mem_pool);
7916                 return rc;
7917         }
7918
7919         /* Gather info on CMF and MI support */
7920         sli4_params = &phba->sli4_hba.pc_sli4_params;
7921         mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
7922         sli4_params->mi_ver = bf_get(cfg_mi_ver, mbx_sli4_parameters);
7923         sli4_params->cmf = bf_get(cfg_cmf, mbx_sli4_parameters);
7924
7925         /* Are we forcing MI off via module parameter? */
7926         if (!phba->cfg_enable_mi)
7927                 sli4_params->mi_ver = 0;
7928
7929         /* Always try to enable MI feature if we can */
7930         if (sli4_params->mi_ver) {
7931                 lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_MI);
7932                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7933                 mi_ver = bf_get(lpfc_mbx_set_feature_mi,
7934                                  &mboxq->u.mqe.un.set_feature);
7935
7936                 if (rc == MBX_SUCCESS) {
7937                         if (mi_ver) {
7938                                 lpfc_printf_log(phba,
7939                                                 KERN_WARNING, LOG_CGN_MGMT,
7940                                                 "6215 MI is enabled\n");
7941                                 sli4_params->mi_ver = mi_ver;
7942                         } else {
7943                                 lpfc_printf_log(phba,
7944                                                 KERN_WARNING, LOG_CGN_MGMT,
7945                                                 "6338 MI is disabled\n");
7946                                 sli4_params->mi_ver = 0;
7947                         }
7948                 } else {
7949                         /* mi_ver is already set from GET_SLI4_PARAMETERS */
7950                         lpfc_printf_log(phba, KERN_INFO,
7951                                         LOG_CGN_MGMT | LOG_INIT,
7952                                         "6245 Enable MI Mailbox x%x (x%x/x%x) "
7953                                         "failed, rc:x%x mi:x%x\n",
7954                                         bf_get(lpfc_mqe_command, &mboxq->u.mqe),
7955                                         lpfc_sli_config_mbox_subsys_get
7956                                                 (phba, mboxq),
7957                                         lpfc_sli_config_mbox_opcode_get
7958                                                 (phba, mboxq),
7959                                         rc, sli4_params->mi_ver);
7960                 }
7961         } else {
7962                 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
7963                                 "6217 MI is disabled\n");
7964         }
7965
7966         /* Ensure FDMI is enabled for MI if enable_mi is set */
7967         if (sli4_params->mi_ver)
7968                 phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT;
7969
7970         /* Always try to enable CMF feature if we can */
7971         if (sli4_params->cmf) {
7972                 lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_CMF);
7973                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7974                 cmf = bf_get(lpfc_mbx_set_feature_cmf,
7975                              &mboxq->u.mqe.un.set_feature);
7976                 if (rc == MBX_SUCCESS && cmf) {
7977                         lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
7978                                         "6218 CMF is enabled: mode %d\n",
7979                                         phba->cmf_active_mode);
7980                 } else {
7981                         lpfc_printf_log(phba, KERN_WARNING,
7982                                         LOG_CGN_MGMT | LOG_INIT,
7983                                         "6219 Enable CMF Mailbox x%x (x%x/x%x) "
7984                                         "failed, rc:x%x dd:x%x\n",
7985                                         bf_get(lpfc_mqe_command, &mboxq->u.mqe),
7986                                         lpfc_sli_config_mbox_subsys_get
7987                                                 (phba, mboxq),
7988                                         lpfc_sli_config_mbox_opcode_get
7989                                                 (phba, mboxq),
7990                                         rc, cmf);
7991                         sli4_params->cmf = 0;
7992                         phba->cmf_active_mode = LPFC_CFG_OFF;
7993                         goto no_cmf;
7994                 }
7995
7996                 /* Allocate Congestion Information Buffer */
7997                 if (!phba->cgn_i) {
7998                         mp = kmalloc(sizeof(*mp), GFP_KERNEL);
7999                         if (mp)
8000                                 mp->virt = dma_alloc_coherent
8001                                                 (&phba->pcidev->dev,
8002                                                 sizeof(struct lpfc_cgn_info),
8003                                                 &mp->phys, GFP_KERNEL);
8004                         if (!mp || !mp->virt) {
8005                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8006                                                 "2640 Failed to alloc memory "
8007                                                 "for Congestion Info\n");
8008                                 kfree(mp);
8009                                 sli4_params->cmf = 0;
8010                                 phba->cmf_active_mode = LPFC_CFG_OFF;
8011                                 goto no_cmf;
8012                         }
8013                         phba->cgn_i = mp;
8014
8015                         /* initialize congestion buffer info */
8016                         lpfc_init_congestion_buf(phba);
8017                         lpfc_init_congestion_stat(phba);
8018                 }
8019
8020                 rc = lpfc_sli4_cgn_params_read(phba);
8021                 if (rc < 0) {
8022                         lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
8023                                         "6242 Error reading Cgn Params (%d)\n",
8024                                         rc);
8025                         /* Ensure CGN Mode is off */
8026                         sli4_params->cmf = 0;
8027                 } else if (!rc) {
8028                         lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
8029                                         "6243 CGN Event empty object.\n");
8030                         /* Ensure CGN Mode is off */
8031                         sli4_params->cmf = 0;
8032                 }
8033         } else {
8034 no_cmf:
8035                 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8036                                 "6220 CMF is disabled\n");
8037         }
8038
8039         /* Only register congestion buffer with firmware if BOTH
8040          * CMF and E2E are enabled.
8041          */
8042         if (sli4_params->cmf && sli4_params->mi_ver) {
8043                 rc = lpfc_reg_congestion_buf(phba);
8044                 if (rc) {
8045                         dma_free_coherent(&phba->pcidev->dev,
8046                                           sizeof(struct lpfc_cgn_info),
8047                                           phba->cgn_i->virt, phba->cgn_i->phys);
8048                         kfree(phba->cgn_i);
8049                         phba->cgn_i = NULL;
8050                         /* Ensure CGN Mode is off */
8051                         phba->cmf_active_mode = LPFC_CFG_OFF;
8052                         return 0;
8053                 }
8054         }
8055         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8056                         "6470 Setup MI version %d CMF %d mode %d\n",
8057                         sli4_params->mi_ver, sli4_params->cmf,
8058                         phba->cmf_active_mode);
8059
8060         mempool_free(mboxq, phba->mbox_mem_pool);
8061
8062         /* Initialize atomic counters */
8063         atomic_set(&phba->cgn_fabric_warn_cnt, 0);
8064         atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
8065         atomic_set(&phba->cgn_sync_alarm_cnt, 0);
8066         atomic_set(&phba->cgn_sync_warn_cnt, 0);
8067         atomic_set(&phba->cgn_driver_evt_cnt, 0);
8068         atomic_set(&phba->cgn_latency_evt_cnt, 0);
8069         atomic64_set(&phba->cgn_latency_evt, 0);
8070
8071         phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
8072
8073         /* Allocate RX Monitor Buffer */
8074         if (!phba->rxtable) {
8075                 phba->rxtable = kmalloc_array(LPFC_MAX_RXMONITOR_ENTRY,
8076                                               sizeof(struct rxtable_entry),
8077                                               GFP_KERNEL);
8078                 if (!phba->rxtable) {
8079                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8080                                         "2644 Failed to alloc memory "
8081                                         "for RX Monitor Buffer\n");
8082                         return -ENOMEM;
8083                 }
8084         }
8085         atomic_set(&phba->rxtable_idx_head, 0);
8086         atomic_set(&phba->rxtable_idx_tail, 0);
8087         return 0;
8088 }
8089
8090 static int
8091 lpfc_set_host_tm(struct lpfc_hba *phba)
8092 {
8093         LPFC_MBOXQ_t *mboxq;
8094         uint32_t len, rc;
8095         struct timespec64 cur_time;
8096         struct tm broken;
8097         uint32_t month, day, year;
8098         uint32_t hour, minute, second;
8099         struct lpfc_mbx_set_host_date_time *tm;
8100
8101         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8102         if (!mboxq)
8103                 return -ENOMEM;
8104
8105         len = sizeof(struct lpfc_mbx_set_host_data) -
8106                 sizeof(struct lpfc_sli4_cfg_mhdr);
8107         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
8108                          LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
8109                          LPFC_SLI4_MBX_EMBED);
8110
8111         mboxq->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_DATE_TIME;
8112         mboxq->u.mqe.un.set_host_data.param_len =
8113                         sizeof(struct lpfc_mbx_set_host_date_time);
8114         tm = &mboxq->u.mqe.un.set_host_data.un.tm;
8115         ktime_get_real_ts64(&cur_time);
8116         time64_to_tm(cur_time.tv_sec, 0, &broken);
8117         month = broken.tm_mon + 1;
8118         day = broken.tm_mday;
8119         year = broken.tm_year - 100;
8120         hour = broken.tm_hour;
8121         minute = broken.tm_min;
8122         second = broken.tm_sec;
8123         bf_set(lpfc_mbx_set_host_month, tm, month);
8124         bf_set(lpfc_mbx_set_host_day, tm, day);
8125         bf_set(lpfc_mbx_set_host_year, tm, year);
8126         bf_set(lpfc_mbx_set_host_hour, tm, hour);
8127         bf_set(lpfc_mbx_set_host_min, tm, minute);
8128         bf_set(lpfc_mbx_set_host_sec, tm, second);
8129
8130         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8131         mempool_free(mboxq, phba->mbox_mem_pool);
8132         return rc;
8133 }
8134
8135 /**
8136  * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
8137  * @phba: Pointer to HBA context object.
8138  *
8139  * This function is the main SLI4 device initialization PCI function. This
8140  * function is called by the HBA initialization code, HBA reset code and
8141  * HBA error attention handler code. Caller is not required to hold any
8142  * locks.
8143  **/
8144 int
8145 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
8146 {
8147         int rc, i, cnt, len, dd;
8148         LPFC_MBOXQ_t *mboxq;
8149         struct lpfc_mqe *mqe;
8150         uint8_t *vpd;
8151         uint32_t vpd_size;
8152         uint32_t ftr_rsp = 0;
8153         struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
8154         struct lpfc_vport *vport = phba->pport;
8155         struct lpfc_dmabuf *mp;
8156         struct lpfc_rqb *rqbp;
8157         u32 flg;
8158
8159         /* Perform a PCI function reset to start from clean */
8160         rc = lpfc_pci_function_reset(phba);
8161         if (unlikely(rc))
8162                 return -ENODEV;
8163
8164         /* Check the HBA Host Status Register for readyness */
8165         rc = lpfc_sli4_post_status_check(phba);
8166         if (unlikely(rc))
8167                 return -ENODEV;
8168         else {
8169                 spin_lock_irq(&phba->hbalock);
8170                 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
8171                 flg = phba->sli.sli_flag;
8172                 spin_unlock_irq(&phba->hbalock);
8173                 /* Allow a little time after setting SLI_ACTIVE for any polled
8174                  * MBX commands to complete via BSG.
8175                  */
8176                 for (i = 0; i < 50 && (flg & LPFC_SLI_MBOX_ACTIVE); i++) {
8177                         msleep(20);
8178                         spin_lock_irq(&phba->hbalock);
8179                         flg = phba->sli.sli_flag;
8180                         spin_unlock_irq(&phba->hbalock);
8181                 }
8182         }
8183
8184         lpfc_sli4_dip(phba);
8185
8186         /*
8187          * Allocate a single mailbox container for initializing the
8188          * port.
8189          */
8190         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8191         if (!mboxq)
8192                 return -ENOMEM;
8193
8194         /* Issue READ_REV to collect vpd and FW information. */
8195         vpd_size = SLI4_PAGE_SIZE;
8196         vpd = kzalloc(vpd_size, GFP_KERNEL);
8197         if (!vpd) {
8198                 rc = -ENOMEM;
8199                 goto out_free_mbox;
8200         }
8201
8202         rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
8203         if (unlikely(rc)) {
8204                 kfree(vpd);
8205                 goto out_free_mbox;
8206         }
8207
8208         mqe = &mboxq->u.mqe;
8209         phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
8210         if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
8211                 phba->hba_flag |= HBA_FCOE_MODE;
8212                 phba->fcp_embed_io = 0; /* SLI4 FC support only */
8213         } else {
8214                 phba->hba_flag &= ~HBA_FCOE_MODE;
8215         }
8216
8217         if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
8218                 LPFC_DCBX_CEE_MODE)
8219                 phba->hba_flag |= HBA_FIP_SUPPORT;
8220         else
8221                 phba->hba_flag &= ~HBA_FIP_SUPPORT;
8222
8223         phba->hba_flag &= ~HBA_IOQ_FLUSH;
8224
8225         if (phba->sli_rev != LPFC_SLI_REV4) {
8226                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8227                         "0376 READ_REV Error. SLI Level %d "
8228                         "FCoE enabled %d\n",
8229                         phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
8230                 rc = -EIO;
8231                 kfree(vpd);
8232                 goto out_free_mbox;
8233         }
8234
8235         rc = lpfc_set_host_tm(phba);
8236         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
8237                         "6468 Set host date / time: Status x%x:\n", rc);
8238
8239         /*
8240          * Continue initialization with default values even if driver failed
8241          * to read FCoE param config regions, only read parameters if the
8242          * board is FCoE
8243          */
8244         if (phba->hba_flag & HBA_FCOE_MODE &&
8245             lpfc_sli4_read_fcoe_params(phba))
8246                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
8247                         "2570 Failed to read FCoE parameters\n");
8248
8249         /*
8250          * Retrieve sli4 device physical port name, failure of doing it
8251          * is considered as non-fatal.
8252          */
8253         rc = lpfc_sli4_retrieve_pport_name(phba);
8254         if (!rc)
8255                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8256                                 "3080 Successful retrieving SLI4 device "
8257                                 "physical port name: %s.\n", phba->Port);
8258
8259         rc = lpfc_sli4_get_ctl_attr(phba);
8260         if (!rc)
8261                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8262                                 "8351 Successful retrieving SLI4 device "
8263                                 "CTL ATTR\n");
8264
8265         /*
8266          * Evaluate the read rev and vpd data. Populate the driver
8267          * state with the results. If this routine fails, the failure
8268          * is not fatal as the driver will use generic values.
8269          */
8270         rc = lpfc_parse_vpd(phba, vpd, vpd_size);
8271         if (unlikely(!rc)) {
8272                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8273                                 "0377 Error %d parsing vpd. "
8274                                 "Using defaults.\n", rc);
8275                 rc = 0;
8276         }
8277         kfree(vpd);
8278
8279         /* Save information as VPD data */
8280         phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
8281         phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
8282
8283         /*
8284          * This is because first G7 ASIC doesn't support the standard
8285          * 0x5a NVME cmd descriptor type/subtype
8286          */
8287         if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8288                         LPFC_SLI_INTF_IF_TYPE_6) &&
8289             (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
8290             (phba->vpd.rev.smRev == 0) &&
8291             (phba->cfg_nvme_embed_cmd == 1))
8292                 phba->cfg_nvme_embed_cmd = 0;
8293
8294         phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
8295         phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
8296                                          &mqe->un.read_rev);
8297         phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
8298                                        &mqe->un.read_rev);
8299         phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
8300                                             &mqe->un.read_rev);
8301         phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
8302                                            &mqe->un.read_rev);
8303         phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
8304         memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
8305         phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
8306         memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
8307         phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
8308         memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
8309         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8310                         "(%d):0380 READ_REV Status x%x "
8311                         "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
8312                         mboxq->vport ? mboxq->vport->vpi : 0,
8313                         bf_get(lpfc_mqe_status, mqe),
8314                         phba->vpd.rev.opFwName,
8315                         phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
8316                         phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
8317
8318         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8319             LPFC_SLI_INTF_IF_TYPE_0) {
8320                 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
8321                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8322                 if (rc == MBX_SUCCESS) {
8323                         phba->hba_flag |= HBA_RECOVERABLE_UE;
8324                         /* Set 1Sec interval to detect UE */
8325                         phba->eratt_poll_interval = 1;
8326                         phba->sli4_hba.ue_to_sr = bf_get(
8327                                         lpfc_mbx_set_feature_UESR,
8328                                         &mboxq->u.mqe.un.set_feature);
8329                         phba->sli4_hba.ue_to_rp = bf_get(
8330                                         lpfc_mbx_set_feature_UERP,
8331                                         &mboxq->u.mqe.un.set_feature);
8332                 }
8333         }
8334
8335         if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
8336                 /* Enable MDS Diagnostics only if the SLI Port supports it */
8337                 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
8338                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8339                 if (rc != MBX_SUCCESS)
8340                         phba->mds_diags_support = 0;
8341         }
8342
8343         /*
8344          * Discover the port's supported feature set and match it against the
8345          * hosts requests.
8346          */
8347         lpfc_request_features(phba, mboxq);
8348         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8349         if (unlikely(rc)) {
8350                 rc = -EIO;
8351                 goto out_free_mbox;
8352         }
8353
8354         /* Disable VMID if app header is not supported */
8355         if (phba->cfg_vmid_app_header && !(bf_get(lpfc_mbx_rq_ftr_rsp_ashdr,
8356                                                   &mqe->un.req_ftrs))) {
8357                 bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 0);
8358                 phba->cfg_vmid_app_header = 0;
8359                 lpfc_printf_log(phba, KERN_DEBUG, LOG_SLI,
8360                                 "1242 vmid feature not supported\n");
8361         }
8362
8363         /*
8364          * The port must support FCP initiator mode as this is the
8365          * only mode running in the host.
8366          */
8367         if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
8368                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8369                                 "0378 No support for fcpi mode.\n");
8370                 ftr_rsp++;
8371         }
8372
8373         /* Performance Hints are ONLY for FCoE */
8374         if (phba->hba_flag & HBA_FCOE_MODE) {
8375                 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
8376                         phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
8377                 else
8378                         phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
8379         }
8380
8381         /*
8382          * If the port cannot support the host's requested features
8383          * then turn off the global config parameters to disable the
8384          * feature in the driver.  This is not a fatal error.
8385          */
8386         if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
8387                 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
8388                         phba->cfg_enable_bg = 0;
8389                         phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
8390                         ftr_rsp++;
8391                 }
8392         }
8393
8394         if (phba->max_vpi && phba->cfg_enable_npiv &&
8395             !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
8396                 ftr_rsp++;
8397
8398         if (ftr_rsp) {
8399                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8400                                 "0379 Feature Mismatch Data: x%08x %08x "
8401                                 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
8402                                 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
8403                                 phba->cfg_enable_npiv, phba->max_vpi);
8404                 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
8405                         phba->cfg_enable_bg = 0;
8406                 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
8407                         phba->cfg_enable_npiv = 0;
8408         }
8409
8410         /* These SLI3 features are assumed in SLI4 */
8411         spin_lock_irq(&phba->hbalock);
8412         phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
8413         spin_unlock_irq(&phba->hbalock);
8414
8415         /* Always try to enable dual dump feature if we can */
8416         lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
8417         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8418         dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
8419         if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
8420                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8421                                 "6448 Dual Dump is enabled\n");
8422         else
8423                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
8424                                 "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
8425                                 "rc:x%x dd:x%x\n",
8426                                 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8427                                 lpfc_sli_config_mbox_subsys_get(
8428                                         phba, mboxq),
8429                                 lpfc_sli_config_mbox_opcode_get(
8430                                         phba, mboxq),
8431                                 rc, dd);
8432         /*
8433          * Allocate all resources (xri,rpi,vpi,vfi) now.  Subsequent
8434          * calls depends on these resources to complete port setup.
8435          */
8436         rc = lpfc_sli4_alloc_resource_identifiers(phba);
8437         if (rc) {
8438                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8439                                 "2920 Failed to alloc Resource IDs "
8440                                 "rc = x%x\n", rc);
8441                 goto out_free_mbox;
8442         }
8443
8444         lpfc_set_host_data(phba, mboxq);
8445
8446         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8447         if (rc) {
8448                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8449                                 "2134 Failed to set host os driver version %x",
8450                                 rc);
8451         }
8452
8453         /* Read the port's service parameters. */
8454         rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
8455         if (rc) {
8456                 phba->link_state = LPFC_HBA_ERROR;
8457                 rc = -ENOMEM;
8458                 goto out_free_mbox;
8459         }
8460
8461         mboxq->vport = vport;
8462         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8463         mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
8464         if (rc == MBX_SUCCESS) {
8465                 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
8466                 rc = 0;
8467         }
8468
8469         /*
8470          * This memory was allocated by the lpfc_read_sparam routine. Release
8471          * it to the mbuf pool.
8472          */
8473         lpfc_mbuf_free(phba, mp->virt, mp->phys);
8474         kfree(mp);
8475         mboxq->ctx_buf = NULL;
8476         if (unlikely(rc)) {
8477                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8478                                 "0382 READ_SPARAM command failed "
8479                                 "status %d, mbxStatus x%x\n",
8480                                 rc, bf_get(lpfc_mqe_status, mqe));
8481                 phba->link_state = LPFC_HBA_ERROR;
8482                 rc = -EIO;
8483                 goto out_free_mbox;
8484         }
8485
8486         lpfc_update_vport_wwn(vport);
8487
8488         /* Update the fc_host data structures with new wwn. */
8489         fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
8490         fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
8491
8492         /* Create all the SLI4 queues */
8493         rc = lpfc_sli4_queue_create(phba);
8494         if (rc) {
8495                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8496                                 "3089 Failed to allocate queues\n");
8497                 rc = -ENODEV;
8498                 goto out_free_mbox;
8499         }
8500         /* Set up all the queues to the device */
8501         rc = lpfc_sli4_queue_setup(phba);
8502         if (unlikely(rc)) {
8503                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8504                                 "0381 Error %d during queue setup.\n ", rc);
8505                 goto out_stop_timers;
8506         }
8507         /* Initialize the driver internal SLI layer lists. */
8508         lpfc_sli4_setup(phba);
8509         lpfc_sli4_queue_init(phba);
8510
8511         /* update host els xri-sgl sizes and mappings */
8512         rc = lpfc_sli4_els_sgl_update(phba);
8513         if (unlikely(rc)) {
8514                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8515                                 "1400 Failed to update xri-sgl size and "
8516                                 "mapping: %d\n", rc);
8517                 goto out_destroy_queue;
8518         }
8519
8520         /* register the els sgl pool to the port */
8521         rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
8522                                        phba->sli4_hba.els_xri_cnt);
8523         if (unlikely(rc < 0)) {
8524                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8525                                 "0582 Error %d during els sgl post "
8526                                 "operation\n", rc);
8527                 rc = -ENODEV;
8528                 goto out_destroy_queue;
8529         }
8530         phba->sli4_hba.els_xri_cnt = rc;
8531
8532         if (phba->nvmet_support) {
8533                 /* update host nvmet xri-sgl sizes and mappings */
8534                 rc = lpfc_sli4_nvmet_sgl_update(phba);
8535                 if (unlikely(rc)) {
8536                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8537                                         "6308 Failed to update nvmet-sgl size "
8538                                         "and mapping: %d\n", rc);
8539                         goto out_destroy_queue;
8540                 }
8541
8542                 /* register the nvmet sgl pool to the port */
8543                 rc = lpfc_sli4_repost_sgl_list(
8544                         phba,
8545                         &phba->sli4_hba.lpfc_nvmet_sgl_list,
8546                         phba->sli4_hba.nvmet_xri_cnt);
8547                 if (unlikely(rc < 0)) {
8548                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8549                                         "3117 Error %d during nvmet "
8550                                         "sgl post\n", rc);
8551                         rc = -ENODEV;
8552                         goto out_destroy_queue;
8553                 }
8554                 phba->sli4_hba.nvmet_xri_cnt = rc;
8555
8556                 /* We allocate an iocbq for every receive context SGL.
8557                  * The additional allocation is for abort and ls handling.
8558                  */
8559                 cnt = phba->sli4_hba.nvmet_xri_cnt +
8560                         phba->sli4_hba.max_cfg_param.max_xri;
8561         } else {
8562                 /* update host common xri-sgl sizes and mappings */
8563                 rc = lpfc_sli4_io_sgl_update(phba);
8564                 if (unlikely(rc)) {
8565                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8566                                         "6082 Failed to update nvme-sgl size "
8567                                         "and mapping: %d\n", rc);
8568                         goto out_destroy_queue;
8569                 }
8570
8571                 /* register the allocated common sgl pool to the port */
8572                 rc = lpfc_sli4_repost_io_sgl_list(phba);
8573                 if (unlikely(rc)) {
8574                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8575                                         "6116 Error %d during nvme sgl post "
8576                                         "operation\n", rc);
8577                         /* Some NVME buffers were moved to abort nvme list */
8578                         /* A pci function reset will repost them */
8579                         rc = -ENODEV;
8580                         goto out_destroy_queue;
8581                 }
8582                 /* Each lpfc_io_buf job structure has an iocbq element.
8583                  * This cnt provides for abort, els, ct and ls requests.
8584                  */
8585                 cnt = phba->sli4_hba.max_cfg_param.max_xri;
8586         }
8587
8588         if (!phba->sli.iocbq_lookup) {
8589                 /* Initialize and populate the iocb list per host */
8590                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8591                                 "2821 initialize iocb list with %d entries\n",
8592                                 cnt);
8593                 rc = lpfc_init_iocb_list(phba, cnt);
8594                 if (rc) {
8595                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8596                                         "1413 Failed to init iocb list.\n");
8597                         goto out_destroy_queue;
8598                 }
8599         }
8600
8601         if (phba->nvmet_support)
8602                 lpfc_nvmet_create_targetport(phba);
8603
8604         if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
8605                 /* Post initial buffers to all RQs created */
8606                 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
8607                         rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
8608                         INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
8609                         rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
8610                         rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
8611                         rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
8612                         rqbp->buffer_count = 0;
8613
8614                         lpfc_post_rq_buffer(
8615                                 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
8616                                 phba->sli4_hba.nvmet_mrq_data[i],
8617                                 phba->cfg_nvmet_mrq_post, i);
8618                 }
8619         }
8620
8621         /* Post the rpi header region to the device. */
8622         rc = lpfc_sli4_post_all_rpi_hdrs(phba);
8623         if (unlikely(rc)) {
8624                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8625                                 "0393 Error %d during rpi post operation\n",
8626                                 rc);
8627                 rc = -ENODEV;
8628                 goto out_free_iocblist;
8629         }
8630         lpfc_sli4_node_prep(phba);
8631
8632         if (!(phba->hba_flag & HBA_FCOE_MODE)) {
8633                 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
8634                         /*
8635                          * The FC Port needs to register FCFI (index 0)
8636                          */
8637                         lpfc_reg_fcfi(phba, mboxq);
8638                         mboxq->vport = phba->pport;
8639                         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8640                         if (rc != MBX_SUCCESS)
8641                                 goto out_unset_queue;
8642                         rc = 0;
8643                         phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
8644                                                 &mboxq->u.mqe.un.reg_fcfi);
8645                 } else {
8646                         /* We are a NVME Target mode with MRQ > 1 */
8647
8648                         /* First register the FCFI */
8649                         lpfc_reg_fcfi_mrq(phba, mboxq, 0);
8650                         mboxq->vport = phba->pport;
8651                         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8652                         if (rc != MBX_SUCCESS)
8653                                 goto out_unset_queue;
8654                         rc = 0;
8655                         phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
8656                                                 &mboxq->u.mqe.un.reg_fcfi_mrq);
8657
8658                         /* Next register the MRQs */
8659                         lpfc_reg_fcfi_mrq(phba, mboxq, 1);
8660                         mboxq->vport = phba->pport;
8661                         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8662                         if (rc != MBX_SUCCESS)
8663                                 goto out_unset_queue;
8664                         rc = 0;
8665                 }
8666                 /* Check if the port is configured to be disabled */
8667                 lpfc_sli_read_link_ste(phba);
8668         }
8669
8670         /* Don't post more new bufs if repost already recovered
8671          * the nvme sgls.
8672          */
8673         if (phba->nvmet_support == 0) {
8674                 if (phba->sli4_hba.io_xri_cnt == 0) {
8675                         len = lpfc_new_io_buf(
8676                                               phba, phba->sli4_hba.io_xri_max);
8677                         if (len == 0) {
8678                                 rc = -ENOMEM;
8679                                 goto out_unset_queue;
8680                         }
8681
8682                         if (phba->cfg_xri_rebalancing)
8683                                 lpfc_create_multixri_pools(phba);
8684                 }
8685         } else {
8686                 phba->cfg_xri_rebalancing = 0;
8687         }
8688
8689         /* Allow asynchronous mailbox command to go through */
8690         spin_lock_irq(&phba->hbalock);
8691         phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8692         spin_unlock_irq(&phba->hbalock);
8693
8694         /* Post receive buffers to the device */
8695         lpfc_sli4_rb_setup(phba);
8696
8697         /* Reset HBA FCF states after HBA reset */
8698         phba->fcf.fcf_flag = 0;
8699         phba->fcf.current_rec.flag = 0;
8700
8701         /* Start the ELS watchdog timer */
8702         mod_timer(&vport->els_tmofunc,
8703                   jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
8704
8705         /* Start heart beat timer */
8706         mod_timer(&phba->hb_tmofunc,
8707                   jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
8708         phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
8709         phba->last_completion_time = jiffies;
8710
8711         /* start eq_delay heartbeat */
8712         if (phba->cfg_auto_imax)
8713                 queue_delayed_work(phba->wq, &phba->eq_delay_work,
8714                                    msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
8715
8716         /* start per phba idle_stat_delay heartbeat */
8717         lpfc_init_idle_stat_hb(phba);
8718
8719         /* Start error attention (ERATT) polling timer */
8720         mod_timer(&phba->eratt_poll,
8721                   jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
8722
8723         /* Enable PCIe device Advanced Error Reporting (AER) if configured */
8724         if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
8725                 rc = pci_enable_pcie_error_reporting(phba->pcidev);
8726                 if (!rc) {
8727                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8728                                         "2829 This device supports "
8729                                         "Advanced Error Reporting (AER)\n");
8730                         spin_lock_irq(&phba->hbalock);
8731                         phba->hba_flag |= HBA_AER_ENABLED;
8732                         spin_unlock_irq(&phba->hbalock);
8733                 } else {
8734                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8735                                         "2830 This device does not support "
8736                                         "Advanced Error Reporting (AER)\n");
8737                         phba->cfg_aer_support = 0;
8738                 }
8739                 rc = 0;
8740         }
8741
8742         /*
8743          * The port is ready, set the host's link state to LINK_DOWN
8744          * in preparation for link interrupts.
8745          */
8746         spin_lock_irq(&phba->hbalock);
8747         phba->link_state = LPFC_LINK_DOWN;
8748
8749         /* Check if physical ports are trunked */
8750         if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
8751                 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
8752         if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
8753                 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
8754         if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
8755                 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
8756         if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
8757                 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
8758         spin_unlock_irq(&phba->hbalock);
8759
8760         /* Arm the CQs and then EQs on device */
8761         lpfc_sli4_arm_cqeq_intr(phba);
8762
8763         /* Indicate device interrupt mode */
8764         phba->sli4_hba.intr_enable = 1;
8765
8766         /* Setup CMF after HBA is initialized */
8767         lpfc_cmf_setup(phba);
8768
8769         if (!(phba->hba_flag & HBA_FCOE_MODE) &&
8770             (phba->hba_flag & LINK_DISABLED)) {
8771                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8772                                 "3103 Adapter Link is disabled.\n");
8773                 lpfc_down_link(phba, mboxq);
8774                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8775                 if (rc != MBX_SUCCESS) {
8776                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8777                                         "3104 Adapter failed to issue "
8778                                         "DOWN_LINK mbox cmd, rc:x%x\n", rc);
8779                         goto out_io_buff_free;
8780                 }
8781         } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
8782                 /* don't perform init_link on SLI4 FC port loopback test */
8783                 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
8784                         rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
8785                         if (rc)
8786                                 goto out_io_buff_free;
8787                 }
8788         }
8789         mempool_free(mboxq, phba->mbox_mem_pool);
8790
8791         phba->hba_flag |= HBA_SETUP;
8792         return rc;
8793
8794 out_io_buff_free:
8795         /* Free allocated IO Buffers */
8796         lpfc_io_free(phba);
8797 out_unset_queue:
8798         /* Unset all the queues set up in this routine when error out */
8799         lpfc_sli4_queue_unset(phba);
8800 out_free_iocblist:
8801         lpfc_free_iocb_list(phba);
8802 out_destroy_queue:
8803         lpfc_sli4_queue_destroy(phba);
8804 out_stop_timers:
8805         lpfc_stop_hba_timers(phba);
8806 out_free_mbox:
8807         mempool_free(mboxq, phba->mbox_mem_pool);
8808         return rc;
8809 }
8810
8811 /**
8812  * lpfc_mbox_timeout - Timeout call back function for mbox timer
8813  * @t: Context to fetch pointer to hba structure from.
8814  *
8815  * This is the callback function for mailbox timer. The mailbox
8816  * timer is armed when a new mailbox command is issued and the timer
8817  * is deleted when the mailbox complete. The function is called by
8818  * the kernel timer code when a mailbox does not complete within
8819  * expected time. This function wakes up the worker thread to
8820  * process the mailbox timeout and returns. All the processing is
8821  * done by the worker thread function lpfc_mbox_timeout_handler.
8822  **/
8823 void
8824 lpfc_mbox_timeout(struct timer_list *t)
8825 {
8826         struct lpfc_hba  *phba = from_timer(phba, t, sli.mbox_tmo);
8827         unsigned long iflag;
8828         uint32_t tmo_posted;
8829
8830         spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
8831         tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
8832         if (!tmo_posted)
8833                 phba->pport->work_port_events |= WORKER_MBOX_TMO;
8834         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
8835
8836         if (!tmo_posted)
8837                 lpfc_worker_wake_up(phba);
8838         return;
8839 }
8840
8841 /**
8842  * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
8843  *                                    are pending
8844  * @phba: Pointer to HBA context object.
8845  *
8846  * This function checks if any mailbox completions are present on the mailbox
8847  * completion queue.
8848  **/
8849 static bool
8850 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
8851 {
8852
8853         uint32_t idx;
8854         struct lpfc_queue *mcq;
8855         struct lpfc_mcqe *mcqe;
8856         bool pending_completions = false;
8857         uint8_t qe_valid;
8858
8859         if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
8860                 return false;
8861
8862         /* Check for completions on mailbox completion queue */
8863
8864         mcq = phba->sli4_hba.mbx_cq;
8865         idx = mcq->hba_index;
8866         qe_valid = mcq->qe_valid;
8867         while (bf_get_le32(lpfc_cqe_valid,
8868                (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
8869                 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
8870                 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
8871                     (!bf_get_le32(lpfc_trailer_async, mcqe))) {
8872                         pending_completions = true;
8873                         break;
8874                 }
8875                 idx = (idx + 1) % mcq->entry_count;
8876                 if (mcq->hba_index == idx)
8877                         break;
8878
8879                 /* if the index wrapped around, toggle the valid bit */
8880                 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
8881                         qe_valid = (qe_valid) ? 0 : 1;
8882         }
8883         return pending_completions;
8884
8885 }
8886
8887 /**
8888  * lpfc_sli4_process_missed_mbox_completions - process mbox completions
8889  *                                            that were missed.
8890  * @phba: Pointer to HBA context object.
8891  *
8892  * For sli4, it is possible to miss an interrupt. As such mbox completions
8893  * maybe missed causing erroneous mailbox timeouts to occur. This function
8894  * checks to see if mbox completions are on the mailbox completion queue
8895  * and will process all the completions associated with the eq for the
8896  * mailbox completion queue.
8897  **/
8898 static bool
8899 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
8900 {
8901         struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
8902         uint32_t eqidx;
8903         struct lpfc_queue *fpeq = NULL;
8904         struct lpfc_queue *eq;
8905         bool mbox_pending;
8906
8907         if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
8908                 return false;
8909
8910         /* Find the EQ associated with the mbox CQ */
8911         if (sli4_hba->hdwq) {
8912                 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
8913                         eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
8914                         if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
8915                                 fpeq = eq;
8916                                 break;
8917                         }
8918                 }
8919         }
8920         if (!fpeq)
8921                 return false;
8922
8923         /* Turn off interrupts from this EQ */
8924
8925         sli4_hba->sli4_eq_clr_intr(fpeq);
8926
8927         /* Check to see if a mbox completion is pending */
8928
8929         mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
8930
8931         /*
8932          * If a mbox completion is pending, process all the events on EQ
8933          * associated with the mbox completion queue (this could include
8934          * mailbox commands, async events, els commands, receive queue data
8935          * and fcp commands)
8936          */
8937
8938         if (mbox_pending)
8939                 /* process and rearm the EQ */
8940                 lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
8941         else
8942                 /* Always clear and re-arm the EQ */
8943                 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
8944
8945         return mbox_pending;
8946
8947 }
8948
8949 /**
8950  * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
8951  * @phba: Pointer to HBA context object.
8952  *
8953  * This function is called from worker thread when a mailbox command times out.
8954  * The caller is not required to hold any locks. This function will reset the
8955  * HBA and recover all the pending commands.
8956  **/
8957 void
8958 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
8959 {
8960         LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
8961         MAILBOX_t *mb = NULL;
8962
8963         struct lpfc_sli *psli = &phba->sli;
8964
8965         /* If the mailbox completed, process the completion */
8966         lpfc_sli4_process_missed_mbox_completions(phba);
8967
8968         if (!(psli->sli_flag & LPFC_SLI_ACTIVE))
8969                 return;
8970
8971         if (pmbox != NULL)
8972                 mb = &pmbox->u.mb;
8973         /* Check the pmbox pointer first.  There is a race condition
8974          * between the mbox timeout handler getting executed in the
8975          * worklist and the mailbox actually completing. When this
8976          * race condition occurs, the mbox_active will be NULL.
8977          */
8978         spin_lock_irq(&phba->hbalock);
8979         if (pmbox == NULL) {
8980                 lpfc_printf_log(phba, KERN_WARNING,
8981                                 LOG_MBOX | LOG_SLI,
8982                                 "0353 Active Mailbox cleared - mailbox timeout "
8983                                 "exiting\n");
8984                 spin_unlock_irq(&phba->hbalock);
8985                 return;
8986         }
8987
8988         /* Mbox cmd <mbxCommand> timeout */
8989         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8990                         "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
8991                         mb->mbxCommand,
8992                         phba->pport->port_state,
8993                         phba->sli.sli_flag,
8994                         phba->sli.mbox_active);
8995         spin_unlock_irq(&phba->hbalock);
8996
8997         /* Setting state unknown so lpfc_sli_abort_iocb_ring
8998          * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
8999          * it to fail all outstanding SCSI IO.
9000          */
9001         spin_lock_irq(&phba->pport->work_port_lock);
9002         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
9003         spin_unlock_irq(&phba->pport->work_port_lock);
9004         spin_lock_irq(&phba->hbalock);
9005         phba->link_state = LPFC_LINK_UNKNOWN;
9006         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
9007         spin_unlock_irq(&phba->hbalock);
9008
9009         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9010                         "0345 Resetting board due to mailbox timeout\n");
9011
9012         /* Reset the HBA device */
9013         lpfc_reset_hba(phba);
9014 }
9015
9016 /**
9017  * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
9018  * @phba: Pointer to HBA context object.
9019  * @pmbox: Pointer to mailbox object.
9020  * @flag: Flag indicating how the mailbox need to be processed.
9021  *
9022  * This function is called by discovery code and HBA management code
9023  * to submit a mailbox command to firmware with SLI-3 interface spec. This
9024  * function gets the hbalock to protect the data structures.
9025  * The mailbox command can be submitted in polling mode, in which case
9026  * this function will wait in a polling loop for the completion of the
9027  * mailbox.
9028  * If the mailbox is submitted in no_wait mode (not polling) the
9029  * function will submit the command and returns immediately without waiting
9030  * for the mailbox completion. The no_wait is supported only when HBA
9031  * is in SLI2/SLI3 mode - interrupts are enabled.
9032  * The SLI interface allows only one mailbox pending at a time. If the
9033  * mailbox is issued in polling mode and there is already a mailbox
9034  * pending, then the function will return an error. If the mailbox is issued
9035  * in NO_WAIT mode and there is a mailbox pending already, the function
9036  * will return MBX_BUSY after queuing the mailbox into mailbox queue.
9037  * The sli layer owns the mailbox object until the completion of mailbox
9038  * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
9039  * return codes the caller owns the mailbox command after the return of
9040  * the function.
9041  **/
9042 static int
9043 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
9044                        uint32_t flag)
9045 {
9046         MAILBOX_t *mbx;
9047         struct lpfc_sli *psli = &phba->sli;
9048         uint32_t status, evtctr;
9049         uint32_t ha_copy, hc_copy;
9050         int i;
9051         unsigned long timeout;
9052         unsigned long drvr_flag = 0;
9053         uint32_t word0, ldata;
9054         void __iomem *to_slim;
9055         int processing_queue = 0;
9056
9057         spin_lock_irqsave(&phba->hbalock, drvr_flag);
9058         if (!pmbox) {
9059                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9060                 /* processing mbox queue from intr_handler */
9061                 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9062                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9063                         return MBX_SUCCESS;
9064                 }
9065                 processing_queue = 1;
9066                 pmbox = lpfc_mbox_get(phba);
9067                 if (!pmbox) {
9068                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9069                         return MBX_SUCCESS;
9070                 }
9071         }
9072
9073         if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
9074                 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
9075                 if(!pmbox->vport) {
9076                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9077                         lpfc_printf_log(phba, KERN_ERR,
9078                                         LOG_MBOX | LOG_VPORT,
9079                                         "1806 Mbox x%x failed. No vport\n",
9080                                         pmbox->u.mb.mbxCommand);
9081                         dump_stack();
9082                         goto out_not_finished;
9083                 }
9084         }
9085
9086         /* If the PCI channel is in offline state, do not post mbox. */
9087         if (unlikely(pci_channel_offline(phba->pcidev))) {
9088                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9089                 goto out_not_finished;
9090         }
9091
9092         /* If HBA has a deferred error attention, fail the iocb. */
9093         if (unlikely(phba->hba_flag & DEFER_ERATT)) {
9094                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9095                 goto out_not_finished;
9096         }
9097
9098         psli = &phba->sli;
9099
9100         mbx = &pmbox->u.mb;
9101         status = MBX_SUCCESS;
9102
9103         if (phba->link_state == LPFC_HBA_ERROR) {
9104                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9105
9106                 /* Mbox command <mbxCommand> cannot issue */
9107                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9108                                 "(%d):0311 Mailbox command x%x cannot "
9109                                 "issue Data: x%x x%x\n",
9110                                 pmbox->vport ? pmbox->vport->vpi : 0,
9111                                 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9112                 goto out_not_finished;
9113         }
9114
9115         if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
9116                 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
9117                         !(hc_copy & HC_MBINT_ENA)) {
9118                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9119                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9120                                 "(%d):2528 Mailbox command x%x cannot "
9121                                 "issue Data: x%x x%x\n",
9122                                 pmbox->vport ? pmbox->vport->vpi : 0,
9123                                 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9124                         goto out_not_finished;
9125                 }
9126         }
9127
9128         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9129                 /* Polling for a mbox command when another one is already active
9130                  * is not allowed in SLI. Also, the driver must have established
9131                  * SLI2 mode to queue and process multiple mbox commands.
9132                  */
9133
9134                 if (flag & MBX_POLL) {
9135                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9136
9137                         /* Mbox command <mbxCommand> cannot issue */
9138                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9139                                         "(%d):2529 Mailbox command x%x "
9140                                         "cannot issue Data: x%x x%x\n",
9141                                         pmbox->vport ? pmbox->vport->vpi : 0,
9142                                         pmbox->u.mb.mbxCommand,
9143                                         psli->sli_flag, flag);
9144                         goto out_not_finished;
9145                 }
9146
9147                 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
9148                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9149                         /* Mbox command <mbxCommand> cannot issue */
9150                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9151                                         "(%d):2530 Mailbox command x%x "
9152                                         "cannot issue Data: x%x x%x\n",
9153                                         pmbox->vport ? pmbox->vport->vpi : 0,
9154                                         pmbox->u.mb.mbxCommand,
9155                                         psli->sli_flag, flag);
9156                         goto out_not_finished;
9157                 }
9158
9159                 /* Another mailbox command is still being processed, queue this
9160                  * command to be processed later.
9161                  */
9162                 lpfc_mbox_put(phba, pmbox);
9163
9164                 /* Mbox cmd issue - BUSY */
9165                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9166                                 "(%d):0308 Mbox cmd issue - BUSY Data: "
9167                                 "x%x x%x x%x x%x\n",
9168                                 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
9169                                 mbx->mbxCommand,
9170                                 phba->pport ? phba->pport->port_state : 0xff,
9171                                 psli->sli_flag, flag);
9172
9173                 psli->slistat.mbox_busy++;
9174                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9175
9176                 if (pmbox->vport) {
9177                         lpfc_debugfs_disc_trc(pmbox->vport,
9178                                 LPFC_DISC_TRC_MBOX_VPORT,
9179                                 "MBOX Bsy vport:  cmd:x%x mb:x%x x%x",
9180                                 (uint32_t)mbx->mbxCommand,
9181                                 mbx->un.varWords[0], mbx->un.varWords[1]);
9182                 }
9183                 else {
9184                         lpfc_debugfs_disc_trc(phba->pport,
9185                                 LPFC_DISC_TRC_MBOX,
9186                                 "MBOX Bsy:        cmd:x%x mb:x%x x%x",
9187                                 (uint32_t)mbx->mbxCommand,
9188                                 mbx->un.varWords[0], mbx->un.varWords[1]);
9189                 }
9190
9191                 return MBX_BUSY;
9192         }
9193
9194         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9195
9196         /* If we are not polling, we MUST be in SLI2 mode */
9197         if (flag != MBX_POLL) {
9198                 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
9199                     (mbx->mbxCommand != MBX_KILL_BOARD)) {
9200                         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9201                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9202                         /* Mbox command <mbxCommand> cannot issue */
9203                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9204                                         "(%d):2531 Mailbox command x%x "
9205                                         "cannot issue Data: x%x x%x\n",
9206                                         pmbox->vport ? pmbox->vport->vpi : 0,
9207                                         pmbox->u.mb.mbxCommand,
9208                                         psli->sli_flag, flag);
9209                         goto out_not_finished;
9210                 }
9211                 /* timeout active mbox command */
9212                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
9213                                            1000);
9214                 mod_timer(&psli->mbox_tmo, jiffies + timeout);
9215         }
9216
9217         /* Mailbox cmd <cmd> issue */
9218         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9219                         "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
9220                         "x%x\n",
9221                         pmbox->vport ? pmbox->vport->vpi : 0,
9222                         mbx->mbxCommand,
9223                         phba->pport ? phba->pport->port_state : 0xff,
9224                         psli->sli_flag, flag);
9225
9226         if (mbx->mbxCommand != MBX_HEARTBEAT) {
9227                 if (pmbox->vport) {
9228                         lpfc_debugfs_disc_trc(pmbox->vport,
9229                                 LPFC_DISC_TRC_MBOX_VPORT,
9230                                 "MBOX Send vport: cmd:x%x mb:x%x x%x",
9231                                 (uint32_t)mbx->mbxCommand,
9232                                 mbx->un.varWords[0], mbx->un.varWords[1]);
9233                 }
9234                 else {
9235                         lpfc_debugfs_disc_trc(phba->pport,
9236                                 LPFC_DISC_TRC_MBOX,
9237                                 "MBOX Send:       cmd:x%x mb:x%x x%x",
9238                                 (uint32_t)mbx->mbxCommand,
9239                                 mbx->un.varWords[0], mbx->un.varWords[1]);
9240                 }
9241         }
9242
9243         psli->slistat.mbox_cmd++;
9244         evtctr = psli->slistat.mbox_event;
9245
9246         /* next set own bit for the adapter and copy over command word */
9247         mbx->mbxOwner = OWN_CHIP;
9248
9249         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9250                 /* Populate mbox extension offset word. */
9251                 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
9252                         *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
9253                                 = (uint8_t *)phba->mbox_ext
9254                                   - (uint8_t *)phba->mbox;
9255                 }
9256
9257                 /* Copy the mailbox extension data */
9258                 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
9259                         lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
9260                                               (uint8_t *)phba->mbox_ext,
9261                                               pmbox->in_ext_byte_len);
9262                 }
9263                 /* Copy command data to host SLIM area */
9264                 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
9265         } else {
9266                 /* Populate mbox extension offset word. */
9267                 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
9268                         *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
9269                                 = MAILBOX_HBA_EXT_OFFSET;
9270
9271                 /* Copy the mailbox extension data */
9272                 if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
9273                         lpfc_memcpy_to_slim(phba->MBslimaddr +
9274                                 MAILBOX_HBA_EXT_OFFSET,
9275                                 pmbox->ctx_buf, pmbox->in_ext_byte_len);
9276
9277                 if (mbx->mbxCommand == MBX_CONFIG_PORT)
9278                         /* copy command data into host mbox for cmpl */
9279                         lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
9280                                               MAILBOX_CMD_SIZE);
9281
9282                 /* First copy mbox command data to HBA SLIM, skip past first
9283                    word */
9284                 to_slim = phba->MBslimaddr + sizeof (uint32_t);
9285                 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
9286                             MAILBOX_CMD_SIZE - sizeof (uint32_t));
9287
9288                 /* Next copy over first word, with mbxOwner set */
9289                 ldata = *((uint32_t *)mbx);
9290                 to_slim = phba->MBslimaddr;
9291                 writel(ldata, to_slim);
9292                 readl(to_slim); /* flush */
9293
9294                 if (mbx->mbxCommand == MBX_CONFIG_PORT)
9295                         /* switch over to host mailbox */
9296                         psli->sli_flag |= LPFC_SLI_ACTIVE;
9297         }
9298
9299         wmb();
9300
9301         switch (flag) {
9302         case MBX_NOWAIT:
9303                 /* Set up reference to mailbox command */
9304                 psli->mbox_active = pmbox;
9305                 /* Interrupt board to do it */
9306                 writel(CA_MBATT, phba->CAregaddr);
9307                 readl(phba->CAregaddr); /* flush */
9308                 /* Don't wait for it to finish, just return */
9309                 break;
9310
9311         case MBX_POLL:
9312                 /* Set up null reference to mailbox command */
9313                 psli->mbox_active = NULL;
9314                 /* Interrupt board to do it */
9315                 writel(CA_MBATT, phba->CAregaddr);
9316                 readl(phba->CAregaddr); /* flush */
9317
9318                 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9319                         /* First read mbox status word */
9320                         word0 = *((uint32_t *)phba->mbox);
9321                         word0 = le32_to_cpu(word0);
9322                 } else {
9323                         /* First read mbox status word */
9324                         if (lpfc_readl(phba->MBslimaddr, &word0)) {
9325                                 spin_unlock_irqrestore(&phba->hbalock,
9326                                                        drvr_flag);
9327                                 goto out_not_finished;
9328                         }
9329                 }
9330
9331                 /* Read the HBA Host Attention Register */
9332                 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
9333                         spin_unlock_irqrestore(&phba->hbalock,
9334                                                        drvr_flag);
9335                         goto out_not_finished;
9336                 }
9337                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
9338                                                         1000) + jiffies;
9339                 i = 0;
9340                 /* Wait for command to complete */
9341                 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
9342                        (!(ha_copy & HA_MBATT) &&
9343                         (phba->link_state > LPFC_WARM_START))) {
9344                         if (time_after(jiffies, timeout)) {
9345                                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9346                                 spin_unlock_irqrestore(&phba->hbalock,
9347                                                        drvr_flag);
9348                                 goto out_not_finished;
9349                         }
9350
9351                         /* Check if we took a mbox interrupt while we were
9352                            polling */
9353                         if (((word0 & OWN_CHIP) != OWN_CHIP)
9354                             && (evtctr != psli->slistat.mbox_event))
9355                                 break;
9356
9357                         if (i++ > 10) {
9358                                 spin_unlock_irqrestore(&phba->hbalock,
9359                                                        drvr_flag);
9360                                 msleep(1);
9361                                 spin_lock_irqsave(&phba->hbalock, drvr_flag);
9362                         }
9363
9364                         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9365                                 /* First copy command data */
9366                                 word0 = *((uint32_t *)phba->mbox);
9367                                 word0 = le32_to_cpu(word0);
9368                                 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
9369                                         MAILBOX_t *slimmb;
9370                                         uint32_t slimword0;
9371                                         /* Check real SLIM for any errors */
9372                                         slimword0 = readl(phba->MBslimaddr);
9373                                         slimmb = (MAILBOX_t *) & slimword0;
9374                                         if (((slimword0 & OWN_CHIP) != OWN_CHIP)
9375                                             && slimmb->mbxStatus) {
9376                                                 psli->sli_flag &=
9377                                                     ~LPFC_SLI_ACTIVE;
9378                                                 word0 = slimword0;
9379                                         }
9380                                 }
9381                         } else {
9382                                 /* First copy command data */
9383                                 word0 = readl(phba->MBslimaddr);
9384                         }
9385                         /* Read the HBA Host Attention Register */
9386                         if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
9387                                 spin_unlock_irqrestore(&phba->hbalock,
9388                                                        drvr_flag);
9389                                 goto out_not_finished;
9390                         }
9391                 }
9392
9393                 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9394                         /* copy results back to user */
9395                         lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
9396                                                 MAILBOX_CMD_SIZE);
9397                         /* Copy the mailbox extension data */
9398                         if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
9399                                 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
9400                                                       pmbox->ctx_buf,
9401                                                       pmbox->out_ext_byte_len);
9402                         }
9403                 } else {
9404                         /* First copy command data */
9405                         lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
9406                                                 MAILBOX_CMD_SIZE);
9407                         /* Copy the mailbox extension data */
9408                         if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
9409                                 lpfc_memcpy_from_slim(
9410                                         pmbox->ctx_buf,
9411                                         phba->MBslimaddr +
9412                                         MAILBOX_HBA_EXT_OFFSET,
9413                                         pmbox->out_ext_byte_len);
9414                         }
9415                 }
9416
9417                 writel(HA_MBATT, phba->HAregaddr);
9418                 readl(phba->HAregaddr); /* flush */
9419
9420                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9421                 status = mbx->mbxStatus;
9422         }
9423
9424         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9425         return status;
9426
9427 out_not_finished:
9428         if (processing_queue) {
9429                 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
9430                 lpfc_mbox_cmpl_put(phba, pmbox);
9431         }
9432         return MBX_NOT_FINISHED;
9433 }
9434
9435 /**
9436  * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
9437  * @phba: Pointer to HBA context object.
9438  *
9439  * The function blocks the posting of SLI4 asynchronous mailbox commands from
9440  * the driver internal pending mailbox queue. It will then try to wait out the
9441  * possible outstanding mailbox command before return.
9442  *
9443  * Returns:
9444  *      0 - the outstanding mailbox command completed; otherwise, the wait for
9445  *      the outstanding mailbox command timed out.
9446  **/
9447 static int
9448 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
9449 {
9450         struct lpfc_sli *psli = &phba->sli;
9451         LPFC_MBOXQ_t *mboxq;
9452         int rc = 0;
9453         unsigned long timeout = 0;
9454         u32 sli_flag;
9455         u8 cmd, subsys, opcode;
9456
9457         /* Mark the asynchronous mailbox command posting as blocked */
9458         spin_lock_irq(&phba->hbalock);
9459         psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
9460         /* Determine how long we might wait for the active mailbox
9461          * command to be gracefully completed by firmware.
9462          */
9463         if (phba->sli.mbox_active)
9464                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
9465                                                 phba->sli.mbox_active) *
9466                                                 1000) + jiffies;
9467         spin_unlock_irq(&phba->hbalock);
9468
9469         /* Make sure the mailbox is really active */
9470         if (timeout)
9471                 lpfc_sli4_process_missed_mbox_completions(phba);
9472
9473         /* Wait for the outstanding mailbox command to complete */
9474         while (phba->sli.mbox_active) {
9475                 /* Check active mailbox complete status every 2ms */
9476                 msleep(2);
9477                 if (time_after(jiffies, timeout)) {
9478                         /* Timeout, mark the outstanding cmd not complete */
9479
9480                         /* Sanity check sli.mbox_active has not completed or
9481                          * cancelled from another context during last 2ms sleep,
9482                          * so take hbalock to be sure before logging.
9483                          */
9484                         spin_lock_irq(&phba->hbalock);
9485                         if (phba->sli.mbox_active) {
9486                                 mboxq = phba->sli.mbox_active;
9487                                 cmd = mboxq->u.mb.mbxCommand;
9488                                 subsys = lpfc_sli_config_mbox_subsys_get(phba,
9489                                                                          mboxq);
9490                                 opcode = lpfc_sli_config_mbox_opcode_get(phba,
9491                                                                          mboxq);
9492                                 sli_flag = psli->sli_flag;
9493                                 spin_unlock_irq(&phba->hbalock);
9494                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9495                                                 "2352 Mailbox command x%x "
9496                                                 "(x%x/x%x) sli_flag x%x could "
9497                                                 "not complete\n",
9498                                                 cmd, subsys, opcode,
9499                                                 sli_flag);
9500                         } else {
9501                                 spin_unlock_irq(&phba->hbalock);
9502                         }
9503
9504                         rc = 1;
9505                         break;
9506                 }
9507         }
9508
9509         /* Can not cleanly block async mailbox command, fails it */
9510         if (rc) {
9511                 spin_lock_irq(&phba->hbalock);
9512                 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
9513                 spin_unlock_irq(&phba->hbalock);
9514         }
9515         return rc;
9516 }
9517
9518 /**
9519  * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
9520  * @phba: Pointer to HBA context object.
9521  *
9522  * The function unblocks and resume posting of SLI4 asynchronous mailbox
9523  * commands from the driver internal pending mailbox queue. It makes sure
9524  * that there is no outstanding mailbox command before resuming posting
9525  * asynchronous mailbox commands. If, for any reason, there is outstanding
9526  * mailbox command, it will try to wait it out before resuming asynchronous
9527  * mailbox command posting.
9528  **/
9529 static void
9530 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
9531 {
9532         struct lpfc_sli *psli = &phba->sli;
9533
9534         spin_lock_irq(&phba->hbalock);
9535         if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9536                 /* Asynchronous mailbox posting is not blocked, do nothing */
9537                 spin_unlock_irq(&phba->hbalock);
9538                 return;
9539         }
9540
9541         /* Outstanding synchronous mailbox command is guaranteed to be done,
9542          * successful or timeout, after timing-out the outstanding mailbox
9543          * command shall always be removed, so just unblock posting async
9544          * mailbox command and resume
9545          */
9546         psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
9547         spin_unlock_irq(&phba->hbalock);
9548
9549         /* wake up worker thread to post asynchronous mailbox command */
9550         lpfc_worker_wake_up(phba);
9551 }
9552
9553 /**
9554  * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
9555  * @phba: Pointer to HBA context object.
9556  * @mboxq: Pointer to mailbox object.
9557  *
9558  * The function waits for the bootstrap mailbox register ready bit from
9559  * port for twice the regular mailbox command timeout value.
9560  *
9561  *      0 - no timeout on waiting for bootstrap mailbox register ready.
9562  *      MBXERR_ERROR - wait for bootstrap mailbox register timed out.
9563  **/
9564 static int
9565 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9566 {
9567         uint32_t db_ready;
9568         unsigned long timeout;
9569         struct lpfc_register bmbx_reg;
9570
9571         timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
9572                                    * 1000) + jiffies;
9573
9574         do {
9575                 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
9576                 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
9577                 if (!db_ready)
9578                         mdelay(2);
9579
9580                 if (time_after(jiffies, timeout))
9581                         return MBXERR_ERROR;
9582         } while (!db_ready);
9583
9584         return 0;
9585 }
9586
9587 /**
9588  * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
9589  * @phba: Pointer to HBA context object.
9590  * @mboxq: Pointer to mailbox object.
9591  *
9592  * The function posts a mailbox to the port.  The mailbox is expected
9593  * to be comletely filled in and ready for the port to operate on it.
9594  * This routine executes a synchronous completion operation on the
9595  * mailbox by polling for its completion.
9596  *
9597  * The caller must not be holding any locks when calling this routine.
9598  *
9599  * Returns:
9600  *      MBX_SUCCESS - mailbox posted successfully
9601  *      Any of the MBX error values.
9602  **/
9603 static int
9604 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9605 {
9606         int rc = MBX_SUCCESS;
9607         unsigned long iflag;
9608         uint32_t mcqe_status;
9609         uint32_t mbx_cmnd;
9610         struct lpfc_sli *psli = &phba->sli;
9611         struct lpfc_mqe *mb = &mboxq->u.mqe;
9612         struct lpfc_bmbx_create *mbox_rgn;
9613         struct dma_address *dma_address;
9614
9615         /*
9616          * Only one mailbox can be active to the bootstrap mailbox region
9617          * at a time and there is no queueing provided.
9618          */
9619         spin_lock_irqsave(&phba->hbalock, iflag);
9620         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9621                 spin_unlock_irqrestore(&phba->hbalock, iflag);
9622                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9623                                 "(%d):2532 Mailbox command x%x (x%x/x%x) "
9624                                 "cannot issue Data: x%x x%x\n",
9625                                 mboxq->vport ? mboxq->vport->vpi : 0,
9626                                 mboxq->u.mb.mbxCommand,
9627                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9628                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9629                                 psli->sli_flag, MBX_POLL);
9630                 return MBXERR_ERROR;
9631         }
9632         /* The server grabs the token and owns it until release */
9633         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9634         phba->sli.mbox_active = mboxq;
9635         spin_unlock_irqrestore(&phba->hbalock, iflag);
9636
9637         /* wait for bootstrap mbox register for readyness */
9638         rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9639         if (rc)
9640                 goto exit;
9641         /*
9642          * Initialize the bootstrap memory region to avoid stale data areas
9643          * in the mailbox post.  Then copy the caller's mailbox contents to
9644          * the bmbx mailbox region.
9645          */
9646         mbx_cmnd = bf_get(lpfc_mqe_command, mb);
9647         memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
9648         lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
9649                                sizeof(struct lpfc_mqe));
9650
9651         /* Post the high mailbox dma address to the port and wait for ready. */
9652         dma_address = &phba->sli4_hba.bmbx.dma_address;
9653         writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
9654
9655         /* wait for bootstrap mbox register for hi-address write done */
9656         rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9657         if (rc)
9658                 goto exit;
9659
9660         /* Post the low mailbox dma address to the port. */
9661         writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
9662
9663         /* wait for bootstrap mbox register for low address write done */
9664         rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9665         if (rc)
9666                 goto exit;
9667
9668         /*
9669          * Read the CQ to ensure the mailbox has completed.
9670          * If so, update the mailbox status so that the upper layers
9671          * can complete the request normally.
9672          */
9673         lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
9674                                sizeof(struct lpfc_mqe));
9675         mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
9676         lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
9677                                sizeof(struct lpfc_mcqe));
9678         mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
9679         /*
9680          * When the CQE status indicates a failure and the mailbox status
9681          * indicates success then copy the CQE status into the mailbox status
9682          * (and prefix it with x4000).
9683          */
9684         if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
9685                 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
9686                         bf_set(lpfc_mqe_status, mb,
9687                                (LPFC_MBX_ERROR_RANGE | mcqe_status));
9688                 rc = MBXERR_ERROR;
9689         } else
9690                 lpfc_sli4_swap_str(phba, mboxq);
9691
9692         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9693                         "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
9694                         "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
9695                         " x%x x%x CQ: x%x x%x x%x x%x\n",
9696                         mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
9697                         lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9698                         lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9699                         bf_get(lpfc_mqe_status, mb),
9700                         mb->un.mb_words[0], mb->un.mb_words[1],
9701                         mb->un.mb_words[2], mb->un.mb_words[3],
9702                         mb->un.mb_words[4], mb->un.mb_words[5],
9703                         mb->un.mb_words[6], mb->un.mb_words[7],
9704                         mb->un.mb_words[8], mb->un.mb_words[9],
9705                         mb->un.mb_words[10], mb->un.mb_words[11],
9706                         mb->un.mb_words[12], mboxq->mcqe.word0,
9707                         mboxq->mcqe.mcqe_tag0,  mboxq->mcqe.mcqe_tag1,
9708                         mboxq->mcqe.trailer);
9709 exit:
9710         /* We are holding the token, no needed for lock when release */
9711         spin_lock_irqsave(&phba->hbalock, iflag);
9712         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9713         phba->sli.mbox_active = NULL;
9714         spin_unlock_irqrestore(&phba->hbalock, iflag);
9715         return rc;
9716 }
9717
9718 /**
9719  * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
9720  * @phba: Pointer to HBA context object.
9721  * @mboxq: Pointer to mailbox object.
9722  * @flag: Flag indicating how the mailbox need to be processed.
9723  *
9724  * This function is called by discovery code and HBA management code to submit
9725  * a mailbox command to firmware with SLI-4 interface spec.
9726  *
9727  * Return codes the caller owns the mailbox command after the return of the
9728  * function.
9729  **/
9730 static int
9731 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
9732                        uint32_t flag)
9733 {
9734         struct lpfc_sli *psli = &phba->sli;
9735         unsigned long iflags;
9736         int rc;
9737
9738         /* dump from issue mailbox command if setup */
9739         lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
9740
9741         rc = lpfc_mbox_dev_check(phba);
9742         if (unlikely(rc)) {
9743                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9744                                 "(%d):2544 Mailbox command x%x (x%x/x%x) "
9745                                 "cannot issue Data: x%x x%x\n",
9746                                 mboxq->vport ? mboxq->vport->vpi : 0,
9747                                 mboxq->u.mb.mbxCommand,
9748                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9749                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9750                                 psli->sli_flag, flag);
9751                 goto out_not_finished;
9752         }
9753
9754         /* Detect polling mode and jump to a handler */
9755         if (!phba->sli4_hba.intr_enable) {
9756                 if (flag == MBX_POLL)
9757                         rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
9758                 else
9759                         rc = -EIO;
9760                 if (rc != MBX_SUCCESS)
9761                         lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
9762                                         "(%d):2541 Mailbox command x%x "
9763                                         "(x%x/x%x) failure: "
9764                                         "mqe_sta: x%x mcqe_sta: x%x/x%x "
9765                                         "Data: x%x x%x\n",
9766                                         mboxq->vport ? mboxq->vport->vpi : 0,
9767                                         mboxq->u.mb.mbxCommand,
9768                                         lpfc_sli_config_mbox_subsys_get(phba,
9769                                                                         mboxq),
9770                                         lpfc_sli_config_mbox_opcode_get(phba,
9771                                                                         mboxq),
9772                                         bf_get(lpfc_mqe_status, &mboxq->u.mqe),
9773                                         bf_get(lpfc_mcqe_status, &mboxq->mcqe),
9774                                         bf_get(lpfc_mcqe_ext_status,
9775                                                &mboxq->mcqe),
9776                                         psli->sli_flag, flag);
9777                 return rc;
9778         } else if (flag == MBX_POLL) {
9779                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
9780                                 "(%d):2542 Try to issue mailbox command "
9781                                 "x%x (x%x/x%x) synchronously ahead of async "
9782                                 "mailbox command queue: x%x x%x\n",
9783                                 mboxq->vport ? mboxq->vport->vpi : 0,
9784                                 mboxq->u.mb.mbxCommand,
9785                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9786                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9787                                 psli->sli_flag, flag);
9788                 /* Try to block the asynchronous mailbox posting */
9789                 rc = lpfc_sli4_async_mbox_block(phba);
9790                 if (!rc) {
9791                         /* Successfully blocked, now issue sync mbox cmd */
9792                         rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
9793                         if (rc != MBX_SUCCESS)
9794                                 lpfc_printf_log(phba, KERN_WARNING,
9795                                         LOG_MBOX | LOG_SLI,
9796                                         "(%d):2597 Sync Mailbox command "
9797                                         "x%x (x%x/x%x) failure: "
9798                                         "mqe_sta: x%x mcqe_sta: x%x/x%x "
9799                                         "Data: x%x x%x\n",
9800                                         mboxq->vport ? mboxq->vport->vpi : 0,
9801                                         mboxq->u.mb.mbxCommand,
9802                                         lpfc_sli_config_mbox_subsys_get(phba,
9803                                                                         mboxq),
9804                                         lpfc_sli_config_mbox_opcode_get(phba,
9805                                                                         mboxq),
9806                                         bf_get(lpfc_mqe_status, &mboxq->u.mqe),
9807                                         bf_get(lpfc_mcqe_status, &mboxq->mcqe),
9808                                         bf_get(lpfc_mcqe_ext_status,
9809                                                &mboxq->mcqe),
9810                                         psli->sli_flag, flag);
9811                         /* Unblock the async mailbox posting afterward */
9812                         lpfc_sli4_async_mbox_unblock(phba);
9813                 }
9814                 return rc;
9815         }
9816
9817         /* Now, interrupt mode asynchronous mailbox command */
9818         rc = lpfc_mbox_cmd_check(phba, mboxq);
9819         if (rc) {
9820                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9821                                 "(%d):2543 Mailbox command x%x (x%x/x%x) "
9822                                 "cannot issue Data: x%x x%x\n",
9823                                 mboxq->vport ? mboxq->vport->vpi : 0,
9824                                 mboxq->u.mb.mbxCommand,
9825                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9826                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9827                                 psli->sli_flag, flag);
9828                 goto out_not_finished;
9829         }
9830
9831         /* Put the mailbox command to the driver internal FIFO */
9832         psli->slistat.mbox_busy++;
9833         spin_lock_irqsave(&phba->hbalock, iflags);
9834         lpfc_mbox_put(phba, mboxq);
9835         spin_unlock_irqrestore(&phba->hbalock, iflags);
9836         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9837                         "(%d):0354 Mbox cmd issue - Enqueue Data: "
9838                         "x%x (x%x/x%x) x%x x%x x%x\n",
9839                         mboxq->vport ? mboxq->vport->vpi : 0xffffff,
9840                         bf_get(lpfc_mqe_command, &mboxq->u.mqe),
9841                         lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9842                         lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9843                         phba->pport->port_state,
9844                         psli->sli_flag, MBX_NOWAIT);
9845         /* Wake up worker thread to transport mailbox command from head */
9846         lpfc_worker_wake_up(phba);
9847
9848         return MBX_BUSY;
9849
9850 out_not_finished:
9851         return MBX_NOT_FINISHED;
9852 }
9853
9854 /**
9855  * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
9856  * @phba: Pointer to HBA context object.
9857  *
9858  * This function is called by worker thread to send a mailbox command to
9859  * SLI4 HBA firmware.
9860  *
9861  **/
9862 int
9863 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
9864 {
9865         struct lpfc_sli *psli = &phba->sli;
9866         LPFC_MBOXQ_t *mboxq;
9867         int rc = MBX_SUCCESS;
9868         unsigned long iflags;
9869         struct lpfc_mqe *mqe;
9870         uint32_t mbx_cmnd;
9871
9872         /* Check interrupt mode before post async mailbox command */
9873         if (unlikely(!phba->sli4_hba.intr_enable))
9874                 return MBX_NOT_FINISHED;
9875
9876         /* Check for mailbox command service token */
9877         spin_lock_irqsave(&phba->hbalock, iflags);
9878         if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9879                 spin_unlock_irqrestore(&phba->hbalock, iflags);
9880                 return MBX_NOT_FINISHED;
9881         }
9882         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9883                 spin_unlock_irqrestore(&phba->hbalock, iflags);
9884                 return MBX_NOT_FINISHED;
9885         }
9886         if (unlikely(phba->sli.mbox_active)) {
9887                 spin_unlock_irqrestore(&phba->hbalock, iflags);
9888                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9889                                 "0384 There is pending active mailbox cmd\n");
9890                 return MBX_NOT_FINISHED;
9891         }
9892         /* Take the mailbox command service token */
9893         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9894
9895         /* Get the next mailbox command from head of queue */
9896         mboxq = lpfc_mbox_get(phba);
9897
9898         /* If no more mailbox command waiting for post, we're done */
9899         if (!mboxq) {
9900                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9901                 spin_unlock_irqrestore(&phba->hbalock, iflags);
9902                 return MBX_SUCCESS;
9903         }
9904         phba->sli.mbox_active = mboxq;
9905         spin_unlock_irqrestore(&phba->hbalock, iflags);
9906
9907         /* Check device readiness for posting mailbox command */
9908         rc = lpfc_mbox_dev_check(phba);
9909         if (unlikely(rc))
9910                 /* Driver clean routine will clean up pending mailbox */
9911                 goto out_not_finished;
9912
9913         /* Prepare the mbox command to be posted */
9914         mqe = &mboxq->u.mqe;
9915         mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
9916
9917         /* Start timer for the mbox_tmo and log some mailbox post messages */
9918         mod_timer(&psli->mbox_tmo, (jiffies +
9919                   msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
9920
9921         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9922                         "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
9923                         "x%x x%x\n",
9924                         mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
9925                         lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9926                         lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9927                         phba->pport->port_state, psli->sli_flag);
9928
9929         if (mbx_cmnd != MBX_HEARTBEAT) {
9930                 if (mboxq->vport) {
9931                         lpfc_debugfs_disc_trc(mboxq->vport,
9932                                 LPFC_DISC_TRC_MBOX_VPORT,
9933                                 "MBOX Send vport: cmd:x%x mb:x%x x%x",
9934                                 mbx_cmnd, mqe->un.mb_words[0],
9935                                 mqe->un.mb_words[1]);
9936                 } else {
9937                         lpfc_debugfs_disc_trc(phba->pport,
9938                                 LPFC_DISC_TRC_MBOX,
9939                                 "MBOX Send: cmd:x%x mb:x%x x%x",
9940                                 mbx_cmnd, mqe->un.mb_words[0],
9941                                 mqe->un.mb_words[1]);
9942                 }
9943         }
9944         psli->slistat.mbox_cmd++;
9945
9946         /* Post the mailbox command to the port */
9947         rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
9948         if (rc != MBX_SUCCESS) {
9949                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9950                                 "(%d):2533 Mailbox command x%x (x%x/x%x) "
9951                                 "cannot issue Data: x%x x%x\n",
9952                                 mboxq->vport ? mboxq->vport->vpi : 0,
9953                                 mboxq->u.mb.mbxCommand,
9954                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9955                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9956                                 psli->sli_flag, MBX_NOWAIT);
9957                 goto out_not_finished;
9958         }
9959
9960         return rc;
9961
9962 out_not_finished:
9963         spin_lock_irqsave(&phba->hbalock, iflags);
9964         if (phba->sli.mbox_active) {
9965                 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
9966                 __lpfc_mbox_cmpl_put(phba, mboxq);
9967                 /* Release the token */
9968                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9969                 phba->sli.mbox_active = NULL;
9970         }
9971         spin_unlock_irqrestore(&phba->hbalock, iflags);
9972
9973         return MBX_NOT_FINISHED;
9974 }
9975
9976 /**
9977  * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
9978  * @phba: Pointer to HBA context object.
9979  * @pmbox: Pointer to mailbox object.
9980  * @flag: Flag indicating how the mailbox need to be processed.
9981  *
9982  * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
9983  * the API jump table function pointer from the lpfc_hba struct.
9984  *
9985  * Return codes the caller owns the mailbox command after the return of the
9986  * function.
9987  **/
9988 int
9989 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
9990 {
9991         return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
9992 }
9993
9994 /**
9995  * lpfc_mbox_api_table_setup - Set up mbox api function jump table
9996  * @phba: The hba struct for which this call is being executed.
9997  * @dev_grp: The HBA PCI-Device group number.
9998  *
9999  * This routine sets up the mbox interface API function jump table in @phba
10000  * struct.
10001  * Returns: 0 - success, -ENODEV - failure.
10002  **/
10003 int
10004 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10005 {
10006
10007         switch (dev_grp) {
10008         case LPFC_PCI_DEV_LP:
10009                 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
10010                 phba->lpfc_sli_handle_slow_ring_event =
10011                                 lpfc_sli_handle_slow_ring_event_s3;
10012                 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
10013                 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
10014                 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
10015                 break;
10016         case LPFC_PCI_DEV_OC:
10017                 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
10018                 phba->lpfc_sli_handle_slow_ring_event =
10019                                 lpfc_sli_handle_slow_ring_event_s4;
10020                 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
10021                 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
10022                 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
10023                 break;
10024         default:
10025                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10026                                 "1420 Invalid HBA PCI-device group: 0x%x\n",
10027                                 dev_grp);
10028                 return -ENODEV;
10029         }
10030         return 0;
10031 }
10032
10033 /**
10034  * __lpfc_sli_ringtx_put - Add an iocb to the txq
10035  * @phba: Pointer to HBA context object.
10036  * @pring: Pointer to driver SLI ring object.
10037  * @piocb: Pointer to address of newly added command iocb.
10038  *
10039  * This function is called with hbalock held for SLI3 ports or
10040  * the ring lock held for SLI4 ports to add a command
10041  * iocb to the txq when SLI layer cannot submit the command iocb
10042  * to the ring.
10043  **/
10044 void
10045 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10046                     struct lpfc_iocbq *piocb)
10047 {
10048         if (phba->sli_rev == LPFC_SLI_REV4)
10049                 lockdep_assert_held(&pring->ring_lock);
10050         else
10051                 lockdep_assert_held(&phba->hbalock);
10052         /* Insert the caller's iocb in the txq tail for later processing. */
10053         list_add_tail(&piocb->list, &pring->txq);
10054 }
10055
10056 /**
10057  * lpfc_sli_next_iocb - Get the next iocb in the txq
10058  * @phba: Pointer to HBA context object.
10059  * @pring: Pointer to driver SLI ring object.
10060  * @piocb: Pointer to address of newly added command iocb.
10061  *
10062  * This function is called with hbalock held before a new
10063  * iocb is submitted to the firmware. This function checks
10064  * txq to flush the iocbs in txq to Firmware before
10065  * submitting new iocbs to the Firmware.
10066  * If there are iocbs in the txq which need to be submitted
10067  * to firmware, lpfc_sli_next_iocb returns the first element
10068  * of the txq after dequeuing it from txq.
10069  * If there is no iocb in the txq then the function will return
10070  * *piocb and *piocb is set to NULL. Caller needs to check
10071  * *piocb to find if there are more commands in the txq.
10072  **/
10073 static struct lpfc_iocbq *
10074 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10075                    struct lpfc_iocbq **piocb)
10076 {
10077         struct lpfc_iocbq * nextiocb;
10078
10079         lockdep_assert_held(&phba->hbalock);
10080
10081         nextiocb = lpfc_sli_ringtx_get(phba, pring);
10082         if (!nextiocb) {
10083                 nextiocb = *piocb;
10084                 *piocb = NULL;
10085         }
10086
10087         return nextiocb;
10088 }
10089
10090 /**
10091  * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
10092  * @phba: Pointer to HBA context object.
10093  * @ring_number: SLI ring number to issue iocb on.
10094  * @piocb: Pointer to command iocb.
10095  * @flag: Flag indicating if this command can be put into txq.
10096  *
10097  * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
10098  * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
10099  * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
10100  * flag is turned on, the function returns IOCB_ERROR. When the link is down,
10101  * this function allows only iocbs for posting buffers. This function finds
10102  * next available slot in the command ring and posts the command to the
10103  * available slot and writes the port attention register to request HBA start
10104  * processing new iocb. If there is no slot available in the ring and
10105  * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
10106  * the function returns IOCB_BUSY.
10107  *
10108  * This function is called with hbalock held. The function will return success
10109  * after it successfully submit the iocb to firmware or after adding to the
10110  * txq.
10111  **/
10112 static int
10113 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
10114                     struct lpfc_iocbq *piocb, uint32_t flag)
10115 {
10116         struct lpfc_iocbq *nextiocb;
10117         IOCB_t *iocb;
10118         struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
10119
10120         lockdep_assert_held(&phba->hbalock);
10121
10122         if (piocb->cmd_cmpl && (!piocb->vport) &&
10123            (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
10124            (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
10125                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10126                                 "1807 IOCB x%x failed. No vport\n",
10127                                 piocb->iocb.ulpCommand);
10128                 dump_stack();
10129                 return IOCB_ERROR;
10130         }
10131
10132
10133         /* If the PCI channel is in offline state, do not post iocbs. */
10134         if (unlikely(pci_channel_offline(phba->pcidev)))
10135                 return IOCB_ERROR;
10136
10137         /* If HBA has a deferred error attention, fail the iocb. */
10138         if (unlikely(phba->hba_flag & DEFER_ERATT))
10139                 return IOCB_ERROR;
10140
10141         /*
10142          * We should never get an IOCB if we are in a < LINK_DOWN state
10143          */
10144         if (unlikely(phba->link_state < LPFC_LINK_DOWN))
10145                 return IOCB_ERROR;
10146
10147         /*
10148          * Check to see if we are blocking IOCB processing because of a
10149          * outstanding event.
10150          */
10151         if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
10152                 goto iocb_busy;
10153
10154         if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
10155                 /*
10156                  * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
10157                  * can be issued if the link is not up.
10158                  */
10159                 switch (piocb->iocb.ulpCommand) {
10160                 case CMD_GEN_REQUEST64_CR:
10161                 case CMD_GEN_REQUEST64_CX:
10162                         if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
10163                                 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
10164                                         FC_RCTL_DD_UNSOL_CMD) ||
10165                                 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
10166                                         MENLO_TRANSPORT_TYPE))
10167
10168                                 goto iocb_busy;
10169                         break;
10170                 case CMD_QUE_RING_BUF_CN:
10171                 case CMD_QUE_RING_BUF64_CN:
10172                         /*
10173                          * For IOCBs, like QUE_RING_BUF, that have no rsp ring
10174                          * completion, cmd_cmpl MUST be 0.
10175                          */
10176                         if (piocb->cmd_cmpl)
10177                                 piocb->cmd_cmpl = NULL;
10178                         fallthrough;
10179                 case CMD_CREATE_XRI_CR:
10180                 case CMD_CLOSE_XRI_CN:
10181                 case CMD_CLOSE_XRI_CX:
10182                         break;
10183                 default:
10184                         goto iocb_busy;
10185                 }
10186
10187         /*
10188          * For FCP commands, we must be in a state where we can process link
10189          * attention events.
10190          */
10191         } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
10192                             !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
10193                 goto iocb_busy;
10194         }
10195
10196         while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
10197                (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
10198                 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
10199
10200         if (iocb)
10201                 lpfc_sli_update_ring(phba, pring);
10202         else
10203                 lpfc_sli_update_full_ring(phba, pring);
10204
10205         if (!piocb)
10206                 return IOCB_SUCCESS;
10207
10208         goto out_busy;
10209
10210  iocb_busy:
10211         pring->stats.iocb_cmd_delay++;
10212
10213  out_busy:
10214
10215         if (!(flag & SLI_IOCB_RET_IOCB)) {
10216                 __lpfc_sli_ringtx_put(phba, pring, piocb);
10217                 return IOCB_SUCCESS;
10218         }
10219
10220         return IOCB_BUSY;
10221 }
10222
10223 /**
10224  * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
10225  * @phba: Pointer to HBA context object.
10226  * @piocbq: Pointer to command iocb.
10227  * @sglq: Pointer to the scatter gather queue object.
10228  *
10229  * This routine converts the bpl or bde that is in the IOCB
10230  * to a sgl list for the sli4 hardware. The physical address
10231  * of the bpl/bde is converted back to a virtual address.
10232  * If the IOCB contains a BPL then the list of BDE's is
10233  * converted to sli4_sge's. If the IOCB contains a single
10234  * BDE then it is converted to a single sli_sge.
10235  * The IOCB is still in cpu endianess so the contents of
10236  * the bpl can be used without byte swapping.
10237  *
10238  * Returns valid XRI = Success, NO_XRI = Failure.
10239 **/
10240 static uint16_t
10241 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
10242                 struct lpfc_sglq *sglq)
10243 {
10244         uint16_t xritag = NO_XRI;
10245         struct ulp_bde64 *bpl = NULL;
10246         struct ulp_bde64 bde;
10247         struct sli4_sge *sgl  = NULL;
10248         struct lpfc_dmabuf *dmabuf;
10249         IOCB_t *icmd;
10250         int numBdes = 0;
10251         int i = 0;
10252         uint32_t offset = 0; /* accumulated offset in the sg request list */
10253         int inbound = 0; /* number of sg reply entries inbound from firmware */
10254
10255         if (!piocbq || !sglq)
10256                 return xritag;
10257
10258         sgl  = (struct sli4_sge *)sglq->sgl;
10259         icmd = &piocbq->iocb;
10260         if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
10261                 return sglq->sli4_xritag;
10262         if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
10263                 numBdes = icmd->un.genreq64.bdl.bdeSize /
10264                                 sizeof(struct ulp_bde64);
10265                 /* The addrHigh and addrLow fields within the IOCB
10266                  * have not been byteswapped yet so there is no
10267                  * need to swap them back.
10268                  */
10269                 if (piocbq->context3)
10270                         dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
10271                 else
10272                         return xritag;
10273
10274                 bpl  = (struct ulp_bde64 *)dmabuf->virt;
10275                 if (!bpl)
10276                         return xritag;
10277
10278                 for (i = 0; i < numBdes; i++) {
10279                         /* Should already be byte swapped. */
10280                         sgl->addr_hi = bpl->addrHigh;
10281                         sgl->addr_lo = bpl->addrLow;
10282
10283                         sgl->word2 = le32_to_cpu(sgl->word2);
10284                         if ((i+1) == numBdes)
10285                                 bf_set(lpfc_sli4_sge_last, sgl, 1);
10286                         else
10287                                 bf_set(lpfc_sli4_sge_last, sgl, 0);
10288                         /* swap the size field back to the cpu so we
10289                          * can assign it to the sgl.
10290                          */
10291                         bde.tus.w = le32_to_cpu(bpl->tus.w);
10292                         sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
10293                         /* The offsets in the sgl need to be accumulated
10294                          * separately for the request and reply lists.
10295                          * The request is always first, the reply follows.
10296                          */
10297                         if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
10298                                 /* add up the reply sg entries */
10299                                 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
10300                                         inbound++;
10301                                 /* first inbound? reset the offset */
10302                                 if (inbound == 1)
10303                                         offset = 0;
10304                                 bf_set(lpfc_sli4_sge_offset, sgl, offset);
10305                                 bf_set(lpfc_sli4_sge_type, sgl,
10306                                         LPFC_SGE_TYPE_DATA);
10307                                 offset += bde.tus.f.bdeSize;
10308                         }
10309                         sgl->word2 = cpu_to_le32(sgl->word2);
10310                         bpl++;
10311                         sgl++;
10312                 }
10313         } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
10314                         /* The addrHigh and addrLow fields of the BDE have not
10315                          * been byteswapped yet so they need to be swapped
10316                          * before putting them in the sgl.
10317                          */
10318                         sgl->addr_hi =
10319                                 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
10320                         sgl->addr_lo =
10321                                 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
10322                         sgl->word2 = le32_to_cpu(sgl->word2);
10323                         bf_set(lpfc_sli4_sge_last, sgl, 1);
10324                         sgl->word2 = cpu_to_le32(sgl->word2);
10325                         sgl->sge_len =
10326                                 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
10327         }
10328         return sglq->sli4_xritag;
10329 }
10330
10331 /**
10332  * lpfc_sli4_iocb2wqe - Convert the IOCB to a work queue entry.
10333  * @phba: Pointer to HBA context object.
10334  * @iocbq: Pointer to command iocb.
10335  * @wqe: Pointer to the work queue entry.
10336  *
10337  * This routine converts the iocb command to its Work Queue Entry
10338  * equivalent. The wqe pointer should not have any fields set when
10339  * this routine is called because it will memcpy over them.
10340  * This routine does not set the CQ_ID or the WQEC bits in the
10341  * wqe.
10342  *
10343  * Returns: 0 = Success, IOCB_ERROR = Failure.
10344  **/
10345 static int
10346 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
10347                 union lpfc_wqe128 *wqe)
10348 {
10349         uint32_t xmit_len = 0, total_len = 0;
10350         uint8_t ct = 0;
10351         uint32_t fip;
10352         uint32_t abort_tag;
10353         uint8_t command_type = ELS_COMMAND_NON_FIP;
10354         uint8_t cmnd;
10355         uint16_t xritag;
10356         uint16_t abrt_iotag;
10357         struct lpfc_iocbq *abrtiocbq;
10358         struct ulp_bde64 *bpl = NULL;
10359         uint32_t els_id = LPFC_ELS_ID_DEFAULT;
10360         int numBdes, i;
10361         struct ulp_bde64 bde;
10362         struct lpfc_nodelist *ndlp;
10363         uint32_t *pcmd;
10364         uint32_t if_type;
10365
10366         fip = phba->hba_flag & HBA_FIP_SUPPORT;
10367         /* The fcp commands will set command type */
10368         if (iocbq->cmd_flag &  LPFC_IO_FCP)
10369                 command_type = FCP_COMMAND;
10370         else if (fip && (iocbq->cmd_flag & LPFC_FIP_ELS_ID_MASK))
10371                 command_type = ELS_COMMAND_FIP;
10372         else
10373                 command_type = ELS_COMMAND_NON_FIP;
10374
10375         if (phba->fcp_embed_io)
10376                 memset(wqe, 0, sizeof(union lpfc_wqe128));
10377         /* Some of the fields are in the right position already */
10378         memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
10379         /* The ct field has moved so reset */
10380         wqe->generic.wqe_com.word7 = 0;
10381         wqe->generic.wqe_com.word10 = 0;
10382
10383         abort_tag = (uint32_t) iocbq->iotag;
10384         xritag = iocbq->sli4_xritag;
10385         /* words0-2 bpl convert bde */
10386         if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
10387                 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
10388                                 sizeof(struct ulp_bde64);
10389                 bpl  = (struct ulp_bde64 *)
10390                         ((struct lpfc_dmabuf *)iocbq->context3)->virt;
10391                 if (!bpl)
10392                         return IOCB_ERROR;
10393
10394                 /* Should already be byte swapped. */
10395                 wqe->generic.bde.addrHigh =  le32_to_cpu(bpl->addrHigh);
10396                 wqe->generic.bde.addrLow =  le32_to_cpu(bpl->addrLow);
10397                 /* swap the size field back to the cpu so we
10398                  * can assign it to the sgl.
10399                  */
10400                 wqe->generic.bde.tus.w  = le32_to_cpu(bpl->tus.w);
10401                 xmit_len = wqe->generic.bde.tus.f.bdeSize;
10402                 total_len = 0;
10403                 for (i = 0; i < numBdes; i++) {
10404                         bde.tus.w  = le32_to_cpu(bpl[i].tus.w);
10405                         total_len += bde.tus.f.bdeSize;
10406                 }
10407         } else
10408                 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
10409
10410         iocbq->iocb.ulpIoTag = iocbq->iotag;
10411         cmnd = iocbq->iocb.ulpCommand;
10412
10413         switch (iocbq->iocb.ulpCommand) {
10414         case CMD_ELS_REQUEST64_CR:
10415                 if (iocbq->cmd_flag & LPFC_IO_LIBDFC)
10416                         ndlp = iocbq->context_un.ndlp;
10417                 else
10418                         ndlp = (struct lpfc_nodelist *)iocbq->context1;
10419                 if (!iocbq->iocb.ulpLe) {
10420                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10421                                 "2007 Only Limited Edition cmd Format"
10422                                 " supported 0x%x\n",
10423                                 iocbq->iocb.ulpCommand);
10424                         return IOCB_ERROR;
10425                 }
10426
10427                 wqe->els_req.payload_len = xmit_len;
10428                 /* Els_reguest64 has a TMO */
10429                 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
10430                         iocbq->iocb.ulpTimeout);
10431                 /* Need a VF for word 4 set the vf bit*/
10432                 bf_set(els_req64_vf, &wqe->els_req, 0);
10433                 /* And a VFID for word 12 */
10434                 bf_set(els_req64_vfid, &wqe->els_req, 0);
10435                 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
10436                 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
10437                        iocbq->iocb.ulpContext);
10438                 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
10439                 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
10440                 /* CCP CCPE PV PRI in word10 were set in the memcpy */
10441                 if (command_type == ELS_COMMAND_FIP)
10442                         els_id = ((iocbq->cmd_flag & LPFC_FIP_ELS_ID_MASK)
10443                                         >> LPFC_FIP_ELS_ID_SHIFT);
10444                 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
10445                                         iocbq->context2)->virt);
10446                 if_type = bf_get(lpfc_sli_intf_if_type,
10447                                         &phba->sli4_hba.sli_intf);
10448                 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
10449                         if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
10450                                 *pcmd == ELS_CMD_SCR ||
10451                                 *pcmd == ELS_CMD_RDF ||
10452                                 *pcmd == ELS_CMD_EDC ||
10453                                 *pcmd == ELS_CMD_RSCN_XMT ||
10454                                 *pcmd == ELS_CMD_FDISC ||
10455                                 *pcmd == ELS_CMD_LOGO ||
10456                                 *pcmd == ELS_CMD_QFPA ||
10457                                 *pcmd == ELS_CMD_UVEM ||
10458                                 *pcmd == ELS_CMD_PLOGI)) {
10459                                 bf_set(els_req64_sp, &wqe->els_req, 1);
10460                                 bf_set(els_req64_sid, &wqe->els_req,
10461                                         iocbq->vport->fc_myDID);
10462                                 if ((*pcmd == ELS_CMD_FLOGI) &&
10463                                         !(phba->fc_topology ==
10464                                                 LPFC_TOPOLOGY_LOOP))
10465                                         bf_set(els_req64_sid, &wqe->els_req, 0);
10466                                 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
10467                                 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
10468                                         phba->vpi_ids[iocbq->vport->vpi]);
10469                         } else if (pcmd && iocbq->context1) {
10470                                 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
10471                                 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
10472                                         phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
10473                         }
10474                 }
10475                 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
10476                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
10477                 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
10478                 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
10479                 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
10480                 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
10481                 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
10482                 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
10483                 wqe->els_req.max_response_payload_len = total_len - xmit_len;
10484                 break;
10485         case CMD_XMIT_SEQUENCE64_CX:
10486                 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
10487                        iocbq->iocb.un.ulpWord[3]);
10488                 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
10489                        iocbq->iocb.unsli3.rcvsli3.ox_id);
10490                 /* The entire sequence is transmitted for this IOCB */
10491                 xmit_len = total_len;
10492                 cmnd = CMD_XMIT_SEQUENCE64_CR;
10493                 if (phba->link_flag & LS_LOOPBACK_MODE)
10494                         bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
10495                 fallthrough;
10496         case CMD_XMIT_SEQUENCE64_CR:
10497                 /* word3 iocb=io_tag32 wqe=reserved */
10498                 wqe->xmit_sequence.rsvd3 = 0;
10499                 /* word4 relative_offset memcpy */
10500                 /* word5 r_ctl/df_ctl memcpy */
10501                 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
10502                 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
10503                 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
10504                        LPFC_WQE_IOD_WRITE);
10505                 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
10506                        LPFC_WQE_LENLOC_WORD12);
10507                 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
10508                 wqe->xmit_sequence.xmit_len = xmit_len;
10509                 command_type = OTHER_COMMAND;
10510                 break;
10511         case CMD_XMIT_BCAST64_CN:
10512                 /* word3 iocb=iotag32 wqe=seq_payload_len */
10513                 wqe->xmit_bcast64.seq_payload_len = xmit_len;
10514                 /* word4 iocb=rsvd wqe=rsvd */
10515                 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
10516                 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
10517                 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
10518                         ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
10519                 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
10520                 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
10521                 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
10522                        LPFC_WQE_LENLOC_WORD3);
10523                 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
10524                 break;
10525         case CMD_FCP_IWRITE64_CR:
10526                 command_type = FCP_COMMAND_DATA_OUT;
10527                 /* word3 iocb=iotag wqe=payload_offset_len */
10528                 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
10529                 bf_set(payload_offset_len, &wqe->fcp_iwrite,
10530                        xmit_len + sizeof(struct fcp_rsp));
10531                 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
10532                        0);
10533                 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
10534                 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
10535                 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
10536                        iocbq->iocb.ulpFCP2Rcvy);
10537                 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
10538                 /* Always open the exchange */
10539                 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
10540                 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
10541                        LPFC_WQE_LENLOC_WORD4);
10542                 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
10543                 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
10544                 if (iocbq->cmd_flag & LPFC_IO_OAS) {
10545                         bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
10546                         bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
10547                         if (iocbq->priority) {
10548                                 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
10549                                        (iocbq->priority << 1));
10550                         } else {
10551                                 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
10552                                        (phba->cfg_XLanePriority << 1));
10553                         }
10554                 }
10555                 /* Note, word 10 is already initialized to 0 */
10556
10557                 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
10558                 if (phba->cfg_enable_pbde)
10559                         bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
10560                 else
10561                         bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
10562
10563                 if (phba->fcp_embed_io) {
10564                         struct lpfc_io_buf *lpfc_cmd;
10565                         struct sli4_sge *sgl;
10566                         struct fcp_cmnd *fcp_cmnd;
10567                         uint32_t *ptr;
10568
10569                         /* 128 byte wqe support here */
10570
10571                         lpfc_cmd = iocbq->context1;
10572                         sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
10573                         fcp_cmnd = lpfc_cmd->fcp_cmnd;
10574
10575                         /* Word 0-2 - FCP_CMND */
10576                         wqe->generic.bde.tus.f.bdeFlags =
10577                                 BUFF_TYPE_BDE_IMMED;
10578                         wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
10579                         wqe->generic.bde.addrHigh = 0;
10580                         wqe->generic.bde.addrLow =  88;  /* Word 22 */
10581
10582                         bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
10583                         bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
10584
10585                         /* Word 22-29  FCP CMND Payload */
10586                         ptr = &wqe->words[22];
10587                         memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
10588                 }
10589                 break;
10590         case CMD_FCP_IREAD64_CR:
10591                 /* word3 iocb=iotag wqe=payload_offset_len */
10592                 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
10593                 bf_set(payload_offset_len, &wqe->fcp_iread,
10594                        xmit_len + sizeof(struct fcp_rsp));
10595                 bf_set(cmd_buff_len, &wqe->fcp_iread,
10596                        0);
10597                 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
10598                 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
10599                 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
10600                        iocbq->iocb.ulpFCP2Rcvy);
10601                 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
10602                 /* Always open the exchange */
10603                 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
10604                 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
10605                        LPFC_WQE_LENLOC_WORD4);
10606                 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
10607                 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
10608                 if (iocbq->cmd_flag & LPFC_IO_OAS) {
10609                         bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
10610                         bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
10611                         if (iocbq->priority) {
10612                                 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
10613                                        (iocbq->priority << 1));
10614                         } else {
10615                                 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
10616                                        (phba->cfg_XLanePriority << 1));
10617                         }
10618                 }
10619                 /* Note, word 10 is already initialized to 0 */
10620
10621                 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
10622                 if (phba->cfg_enable_pbde)
10623                         bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
10624                 else
10625                         bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
10626
10627                 if (phba->fcp_embed_io) {
10628                         struct lpfc_io_buf *lpfc_cmd;
10629                         struct sli4_sge *sgl;
10630                         struct fcp_cmnd *fcp_cmnd;
10631                         uint32_t *ptr;
10632
10633                         /* 128 byte wqe support here */
10634
10635                         lpfc_cmd = iocbq->context1;
10636                         sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
10637                         fcp_cmnd = lpfc_cmd->fcp_cmnd;
10638
10639                         /* Word 0-2 - FCP_CMND */
10640                         wqe->generic.bde.tus.f.bdeFlags =
10641                                 BUFF_TYPE_BDE_IMMED;
10642                         wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
10643                         wqe->generic.bde.addrHigh = 0;
10644                         wqe->generic.bde.addrLow =  88;  /* Word 22 */
10645
10646                         bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
10647                         bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
10648
10649                         /* Word 22-29  FCP CMND Payload */
10650                         ptr = &wqe->words[22];
10651                         memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
10652                 }
10653                 break;
10654         case CMD_FCP_ICMND64_CR:
10655                 /* word3 iocb=iotag wqe=payload_offset_len */
10656                 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
10657                 bf_set(payload_offset_len, &wqe->fcp_icmd,
10658                        xmit_len + sizeof(struct fcp_rsp));
10659                 bf_set(cmd_buff_len, &wqe->fcp_icmd,
10660                        0);
10661                 /* word3 iocb=IO_TAG wqe=reserved */
10662                 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
10663                 /* Always open the exchange */
10664                 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
10665                 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
10666                 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
10667                 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
10668                        LPFC_WQE_LENLOC_NONE);
10669                 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
10670                        iocbq->iocb.ulpFCP2Rcvy);
10671                 if (iocbq->cmd_flag & LPFC_IO_OAS) {
10672                         bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
10673                         bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
10674                         if (iocbq->priority) {
10675                                 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
10676                                        (iocbq->priority << 1));
10677                         } else {
10678                                 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
10679                                        (phba->cfg_XLanePriority << 1));
10680                         }
10681                 }
10682                 /* Note, word 10 is already initialized to 0 */
10683
10684                 if (phba->fcp_embed_io) {
10685                         struct lpfc_io_buf *lpfc_cmd;
10686                         struct sli4_sge *sgl;
10687                         struct fcp_cmnd *fcp_cmnd;
10688                         uint32_t *ptr;
10689
10690                         /* 128 byte wqe support here */
10691
10692                         lpfc_cmd = iocbq->context1;
10693                         sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
10694                         fcp_cmnd = lpfc_cmd->fcp_cmnd;
10695
10696                         /* Word 0-2 - FCP_CMND */
10697                         wqe->generic.bde.tus.f.bdeFlags =
10698                                 BUFF_TYPE_BDE_IMMED;
10699                         wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
10700                         wqe->generic.bde.addrHigh = 0;
10701                         wqe->generic.bde.addrLow =  88;  /* Word 22 */
10702
10703                         bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
10704                         bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
10705
10706                         /* Word 22-29  FCP CMND Payload */
10707                         ptr = &wqe->words[22];
10708                         memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
10709                 }
10710                 break;
10711         case CMD_GEN_REQUEST64_CR:
10712                 /* For this command calculate the xmit length of the
10713                  * request bde.
10714                  */
10715                 xmit_len = 0;
10716                 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
10717                         sizeof(struct ulp_bde64);
10718                 for (i = 0; i < numBdes; i++) {
10719                         bde.tus.w = le32_to_cpu(bpl[i].tus.w);
10720                         if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
10721                                 break;
10722                         xmit_len += bde.tus.f.bdeSize;
10723                 }
10724                 /* word3 iocb=IO_TAG wqe=request_payload_len */
10725                 wqe->gen_req.request_payload_len = xmit_len;
10726                 /* word4 iocb=parameter wqe=relative_offset memcpy */
10727                 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
10728                 /* word6 context tag copied in memcpy */
10729                 if (iocbq->iocb.ulpCt_h  || iocbq->iocb.ulpCt_l) {
10730                         ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
10731                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10732                                 "2015 Invalid CT %x command 0x%x\n",
10733                                 ct, iocbq->iocb.ulpCommand);
10734                         return IOCB_ERROR;
10735                 }
10736                 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
10737                 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
10738                 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
10739                 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
10740                 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
10741                 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
10742                 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
10743                 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
10744                 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
10745                 command_type = OTHER_COMMAND;
10746                 break;
10747         case CMD_XMIT_ELS_RSP64_CX:
10748                 ndlp = (struct lpfc_nodelist *)iocbq->context1;
10749                 /* words0-2 BDE memcpy */
10750                 /* word3 iocb=iotag32 wqe=response_payload_len */
10751                 wqe->xmit_els_rsp.response_payload_len = xmit_len;
10752                 /* word4 */
10753                 wqe->xmit_els_rsp.word4 = 0;
10754                 /* word5 iocb=rsvd wge=did */
10755                 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
10756                          iocbq->iocb.un.xseq64.xmit_els_remoteID);
10757
10758                 if_type = bf_get(lpfc_sli_intf_if_type,
10759                                         &phba->sli4_hba.sli_intf);
10760                 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
10761                         if (iocbq->vport->fc_flag & FC_PT2PT) {
10762                                 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
10763                                 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
10764                                         iocbq->vport->fc_myDID);
10765                                 if (iocbq->vport->fc_myDID == Fabric_DID) {
10766                                         bf_set(wqe_els_did,
10767                                                 &wqe->xmit_els_rsp.wqe_dest, 0);
10768                                 }
10769                         }
10770                 }
10771                 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
10772                        ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
10773                 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
10774                 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
10775                        iocbq->iocb.unsli3.rcvsli3.ox_id);
10776                 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
10777                         bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
10778                                phba->vpi_ids[iocbq->vport->vpi]);
10779                 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
10780                 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
10781                 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
10782                 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
10783                        LPFC_WQE_LENLOC_WORD3);
10784                 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
10785                 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
10786                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
10787                 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
10788                                 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
10789                                 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
10790                                         iocbq->vport->fc_myDID);
10791                                 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
10792                                 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
10793                                         phba->vpi_ids[phba->pport->vpi]);
10794                 }
10795                 command_type = OTHER_COMMAND;
10796                 break;
10797         case CMD_CLOSE_XRI_CN:
10798         case CMD_ABORT_XRI_CN:
10799         case CMD_ABORT_XRI_CX:
10800                 /* words 0-2 memcpy should be 0 rserved */
10801                 /* port will send abts */
10802                 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
10803                 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
10804                         abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
10805                         fip = abrtiocbq->cmd_flag & LPFC_FIP_ELS_ID_MASK;
10806                 } else
10807                         fip = 0;
10808
10809                 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
10810                         /*
10811                          * The link is down, or the command was ELS_FIP
10812                          * so the fw does not need to send abts
10813                          * on the wire.
10814                          */
10815                         bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
10816                 else
10817                         bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
10818                 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
10819                 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
10820                 wqe->abort_cmd.rsrvd5 = 0;
10821                 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
10822                         ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
10823                 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
10824                 /*
10825                  * The abort handler will send us CMD_ABORT_XRI_CN or
10826                  * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
10827                  */
10828                 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
10829                 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
10830                 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
10831                        LPFC_WQE_LENLOC_NONE);
10832                 cmnd = CMD_ABORT_XRI_CX;
10833                 command_type = OTHER_COMMAND;
10834                 xritag = 0;
10835                 break;
10836         case CMD_XMIT_BLS_RSP64_CX:
10837                 ndlp = (struct lpfc_nodelist *)iocbq->context1;
10838                 /* As BLS ABTS RSP WQE is very different from other WQEs,
10839                  * we re-construct this WQE here based on information in
10840                  * iocbq from scratch.
10841                  */
10842                 memset(wqe, 0, sizeof(*wqe));
10843                 /* OX_ID is invariable to who sent ABTS to CT exchange */
10844                 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
10845                        bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
10846                 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
10847                     LPFC_ABTS_UNSOL_INT) {
10848                         /* ABTS sent by initiator to CT exchange, the
10849                          * RX_ID field will be filled with the newly
10850                          * allocated responder XRI.
10851                          */
10852                         bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
10853                                iocbq->sli4_xritag);
10854                 } else {
10855                         /* ABTS sent by responder to CT exchange, the
10856                          * RX_ID field will be filled with the responder
10857                          * RX_ID from ABTS.
10858                          */
10859                         bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
10860                                bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
10861                 }
10862                 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
10863                 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
10864
10865                 /* Use CT=VPI */
10866                 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
10867                         ndlp->nlp_DID);
10868                 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
10869                         iocbq->iocb.ulpContext);
10870                 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
10871                 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
10872                         phba->vpi_ids[phba->pport->vpi]);
10873                 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
10874                 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
10875                        LPFC_WQE_LENLOC_NONE);
10876                 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
10877                 command_type = OTHER_COMMAND;
10878                 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
10879                         bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
10880                                bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
10881                         bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
10882                                bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
10883                         bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
10884                                bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
10885                 }
10886
10887                 break;
10888         case CMD_SEND_FRAME:
10889                 bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_SEND_FRAME);
10890                 bf_set(wqe_sof, &wqe->generic.wqe_com, 0x2E); /* SOF byte */
10891                 bf_set(wqe_eof, &wqe->generic.wqe_com, 0x41); /* EOF byte */
10892                 bf_set(wqe_lenloc, &wqe->generic.wqe_com, 1);
10893                 bf_set(wqe_xbl, &wqe->generic.wqe_com, 1);
10894                 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
10895                 bf_set(wqe_xc, &wqe->generic.wqe_com, 1);
10896                 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 0xA);
10897                 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
10898                 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
10899                 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
10900                 return 0;
10901         case CMD_XRI_ABORTED_CX:
10902         case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
10903         case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
10904         case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
10905         case CMD_FCP_TRSP64_CX: /* Target mode rcv */
10906         case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
10907         default:
10908                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10909                                 "2014 Invalid command 0x%x\n",
10910                                 iocbq->iocb.ulpCommand);
10911                 return IOCB_ERROR;
10912         }
10913
10914         if (iocbq->cmd_flag & LPFC_IO_DIF_PASS)
10915                 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
10916         else if (iocbq->cmd_flag & LPFC_IO_DIF_STRIP)
10917                 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
10918         else if (iocbq->cmd_flag & LPFC_IO_DIF_INSERT)
10919                 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
10920         iocbq->cmd_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
10921                               LPFC_IO_DIF_INSERT);
10922         bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
10923         bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
10924         wqe->generic.wqe_com.abort_tag = abort_tag;
10925         bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
10926         bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
10927         bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
10928         bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
10929         return 0;
10930 }
10931
10932 /**
10933  * __lpfc_sli_issue_fcp_io_s3 - SLI3 device for sending fcp io iocb
10934  * @phba: Pointer to HBA context object.
10935  * @ring_number: SLI ring number to issue wqe on.
10936  * @piocb: Pointer to command iocb.
10937  * @flag: Flag indicating if this command can be put into txq.
10938  *
10939  * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to
10940  * send  an iocb command to an HBA with SLI-3 interface spec.
10941  *
10942  * This function takes the hbalock before invoking the lockless version.
10943  * The function will return success after it successfully submit the wqe to
10944  * firmware or after adding to the txq.
10945  **/
10946 static int
10947 __lpfc_sli_issue_fcp_io_s3(struct lpfc_hba *phba, uint32_t ring_number,
10948                            struct lpfc_iocbq *piocb, uint32_t flag)
10949 {
10950         unsigned long iflags;
10951         int rc;
10952
10953         spin_lock_irqsave(&phba->hbalock, iflags);
10954         rc = __lpfc_sli_issue_iocb_s3(phba, ring_number, piocb, flag);
10955         spin_unlock_irqrestore(&phba->hbalock, iflags);
10956
10957         return rc;
10958 }
10959
10960 /**
10961  * __lpfc_sli_issue_fcp_io_s4 - SLI4 device for sending fcp io wqe
10962  * @phba: Pointer to HBA context object.
10963  * @ring_number: SLI ring number to issue wqe on.
10964  * @piocb: Pointer to command iocb.
10965  * @flag: Flag indicating if this command can be put into txq.
10966  *
10967  * __lpfc_sli_issue_fcp_io_s4 is used by other functions in the driver to issue
10968  * an wqe command to an HBA with SLI-4 interface spec.
10969  *
10970  * This function is a lockless version. The function will return success
10971  * after it successfully submit the wqe to firmware or after adding to the
10972  * txq.
10973  **/
10974 static int
10975 __lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
10976                            struct lpfc_iocbq *piocb, uint32_t flag)
10977 {
10978         int rc;
10979         struct lpfc_io_buf *lpfc_cmd =
10980                 (struct lpfc_io_buf *)piocb->context1;
10981
10982         lpfc_prep_embed_io(phba, lpfc_cmd);
10983         rc = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
10984         return rc;
10985 }
10986
10987 void
10988 lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
10989 {
10990         struct lpfc_iocbq *piocb = &lpfc_cmd->cur_iocbq;
10991         union lpfc_wqe128 *wqe = &lpfc_cmd->cur_iocbq.wqe;
10992         struct sli4_sge *sgl;
10993
10994         /* 128 byte wqe support here */
10995         sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
10996
10997         if (phba->fcp_embed_io) {
10998                 struct fcp_cmnd *fcp_cmnd;
10999                 u32 *ptr;
11000
11001                 fcp_cmnd = lpfc_cmd->fcp_cmnd;
11002
11003                 /* Word 0-2 - FCP_CMND */
11004                 wqe->generic.bde.tus.f.bdeFlags =
11005                         BUFF_TYPE_BDE_IMMED;
11006                 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
11007                 wqe->generic.bde.addrHigh = 0;
11008                 wqe->generic.bde.addrLow =  88;  /* Word 22 */
11009
11010                 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
11011                 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
11012
11013                 /* Word 22-29  FCP CMND Payload */
11014                 ptr = &wqe->words[22];
11015                 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
11016         } else {
11017                 /* Word 0-2 - Inline BDE */
11018                 wqe->generic.bde.tus.f.bdeFlags =  BUFF_TYPE_BDE_64;
11019                 wqe->generic.bde.tus.f.bdeSize = sizeof(struct fcp_cmnd);
11020                 wqe->generic.bde.addrHigh = sgl->addr_hi;
11021                 wqe->generic.bde.addrLow =  sgl->addr_lo;
11022
11023                 /* Word 10 */
11024                 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
11025                 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
11026         }
11027
11028         /* add the VMID tags as per switch response */
11029         if (unlikely(piocb->cmd_flag & LPFC_IO_VMID)) {
11030                 if (phba->pport->vmid_priority_tagging) {
11031                         bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
11032                         bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
11033                                         (piocb->vmid_tag.cs_ctl_vmid));
11034                 } else {
11035                         bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
11036                         bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
11037                         wqe->words[31] = piocb->vmid_tag.app_id;
11038                 }
11039         }
11040 }
11041
11042 /**
11043  * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
11044  * @phba: Pointer to HBA context object.
11045  * @ring_number: SLI ring number to issue iocb on.
11046  * @piocb: Pointer to command iocb.
11047  * @flag: Flag indicating if this command can be put into txq.
11048  *
11049  * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
11050  * an iocb command to an HBA with SLI-4 interface spec.
11051  *
11052  * This function is called with ringlock held. The function will return success
11053  * after it successfully submit the iocb to firmware or after adding to the
11054  * txq.
11055  **/
11056 static int
11057 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
11058                          struct lpfc_iocbq *piocb, uint32_t flag)
11059 {
11060         struct lpfc_sglq *sglq;
11061         union lpfc_wqe128 *wqe;
11062         struct lpfc_queue *wq;
11063         struct lpfc_sli_ring *pring;
11064         u32 ulp_command = get_job_cmnd(phba, piocb);
11065
11066         /* Get the WQ */
11067         if ((piocb->cmd_flag & LPFC_IO_FCP) ||
11068             (piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
11069                 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
11070         } else {
11071                 wq = phba->sli4_hba.els_wq;
11072         }
11073
11074         /* Get corresponding ring */
11075         pring = wq->pring;
11076
11077         /*
11078          * The WQE can be either 64 or 128 bytes,
11079          */
11080
11081         lockdep_assert_held(&pring->ring_lock);
11082         wqe = &piocb->wqe;
11083         if (piocb->sli4_xritag == NO_XRI) {
11084                 if (ulp_command == CMD_ABORT_XRI_WQE)
11085                         sglq = NULL;
11086                 else {
11087                         if (!list_empty(&pring->txq)) {
11088                                 if (!(flag & SLI_IOCB_RET_IOCB)) {
11089                                         __lpfc_sli_ringtx_put(phba,
11090                                                 pring, piocb);
11091                                         return IOCB_SUCCESS;
11092                                 } else {
11093                                         return IOCB_BUSY;
11094                                 }
11095                         } else {
11096                                 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
11097                                 if (!sglq) {
11098                                         if (!(flag & SLI_IOCB_RET_IOCB)) {
11099                                                 __lpfc_sli_ringtx_put(phba,
11100                                                                 pring,
11101                                                                 piocb);
11102                                                 return IOCB_SUCCESS;
11103                                         } else
11104                                                 return IOCB_BUSY;
11105                                 }
11106                         }
11107                 }
11108         } else if (piocb->cmd_flag &  LPFC_IO_FCP) {
11109                 /* These IO's already have an XRI and a mapped sgl. */
11110                 sglq = NULL;
11111         }
11112         else {
11113                 /*
11114                  * This is a continuation of a commandi,(CX) so this
11115                  * sglq is on the active list
11116                  */
11117                 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
11118                 if (!sglq)
11119                         return IOCB_ERROR;
11120         }
11121
11122         if (sglq) {
11123                 piocb->sli4_lxritag = sglq->sli4_lxritag;
11124                 piocb->sli4_xritag = sglq->sli4_xritag;
11125
11126                 /* ABTS sent by initiator to CT exchange, the
11127                  * RX_ID field will be filled with the newly
11128                  * allocated responder XRI.
11129                  */
11130                 if (ulp_command == CMD_XMIT_BLS_RSP64_CX &&
11131                     piocb->abort_bls == LPFC_ABTS_UNSOL_INT)
11132                         bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
11133                                piocb->sli4_xritag);
11134
11135                 bf_set(wqe_xri_tag, &wqe->generic.wqe_com,
11136                        piocb->sli4_xritag);
11137
11138                 if (lpfc_wqe_bpl2sgl(phba, piocb, sglq) == NO_XRI)
11139                         return IOCB_ERROR;
11140         }
11141
11142         if (lpfc_sli4_wq_put(wq, wqe))
11143                 return IOCB_ERROR;
11144         lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
11145
11146         return 0;
11147 }
11148
11149 /*
11150  * lpfc_sli_issue_fcp_io - Wrapper func for issuing fcp i/o
11151  *
11152  * This routine wraps the actual fcp i/o function for issusing WQE for sli-4
11153  * or IOCB for sli-3  function.
11154  * pointer from the lpfc_hba struct.
11155  *
11156  * Return codes:
11157  * IOCB_ERROR - Error
11158  * IOCB_SUCCESS - Success
11159  * IOCB_BUSY - Busy
11160  **/
11161 int
11162 lpfc_sli_issue_fcp_io(struct lpfc_hba *phba, uint32_t ring_number,
11163                       struct lpfc_iocbq *piocb, uint32_t flag)
11164 {
11165         return phba->__lpfc_sli_issue_fcp_io(phba, ring_number, piocb, flag);
11166 }
11167
11168 /*
11169  * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
11170  *
11171  * This routine wraps the actual lockless version for issusing IOCB function
11172  * pointer from the lpfc_hba struct.
11173  *
11174  * Return codes:
11175  * IOCB_ERROR - Error
11176  * IOCB_SUCCESS - Success
11177  * IOCB_BUSY - Busy
11178  **/
11179 int
11180 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
11181                 struct lpfc_iocbq *piocb, uint32_t flag)
11182 {
11183         return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
11184 }
11185
11186 /**
11187  * lpfc_sli_api_table_setup - Set up sli api function jump table
11188  * @phba: The hba struct for which this call is being executed.
11189  * @dev_grp: The HBA PCI-Device group number.
11190  *
11191  * This routine sets up the SLI interface API function jump table in @phba
11192  * struct.
11193  * Returns: 0 - success, -ENODEV - failure.
11194  **/
11195 int
11196 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
11197 {
11198
11199         switch (dev_grp) {
11200         case LPFC_PCI_DEV_LP:
11201                 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
11202                 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
11203                 phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s3;
11204                 break;
11205         case LPFC_PCI_DEV_OC:
11206                 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
11207                 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
11208                 phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s4;
11209                 break;
11210         default:
11211                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11212                                 "1419 Invalid HBA PCI-device group: 0x%x\n",
11213                                 dev_grp);
11214                 return -ENODEV;
11215         }
11216         phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
11217         return 0;
11218 }
11219
11220 /**
11221  * lpfc_sli4_calc_ring - Calculates which ring to use
11222  * @phba: Pointer to HBA context object.
11223  * @piocb: Pointer to command iocb.
11224  *
11225  * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
11226  * hba_wqidx, thus we need to calculate the corresponding ring.
11227  * Since ABORTS must go on the same WQ of the command they are
11228  * aborting, we use command's hba_wqidx.
11229  */
11230 struct lpfc_sli_ring *
11231 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
11232 {
11233         struct lpfc_io_buf *lpfc_cmd;
11234
11235         if (piocb->cmd_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
11236                 if (unlikely(!phba->sli4_hba.hdwq))
11237                         return NULL;
11238                 /*
11239                  * for abort iocb hba_wqidx should already
11240                  * be setup based on what work queue we used.
11241                  */
11242                 if (!(piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
11243                         lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
11244                         piocb->hba_wqidx = lpfc_cmd->hdwq_no;
11245                 }
11246                 return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
11247         } else {
11248                 if (unlikely(!phba->sli4_hba.els_wq))
11249                         return NULL;
11250                 piocb->hba_wqidx = 0;
11251                 return phba->sli4_hba.els_wq->pring;
11252         }
11253 }
11254
11255 /**
11256  * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
11257  * @phba: Pointer to HBA context object.
11258  * @ring_number: Ring number
11259  * @piocb: Pointer to command iocb.
11260  * @flag: Flag indicating if this command can be put into txq.
11261  *
11262  * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
11263  * function. This function gets the hbalock and calls
11264  * __lpfc_sli_issue_iocb function and will return the error returned
11265  * by __lpfc_sli_issue_iocb function. This wrapper is used by
11266  * functions which do not hold hbalock.
11267  **/
11268 int
11269 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
11270                     struct lpfc_iocbq *piocb, uint32_t flag)
11271 {
11272         struct lpfc_sli_ring *pring;
11273         struct lpfc_queue *eq;
11274         unsigned long iflags;
11275         int rc;
11276
11277         if (phba->sli_rev == LPFC_SLI_REV4) {
11278                 eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
11279
11280                 pring = lpfc_sli4_calc_ring(phba, piocb);
11281                 if (unlikely(pring == NULL))
11282                         return IOCB_ERROR;
11283
11284                 spin_lock_irqsave(&pring->ring_lock, iflags);
11285                 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
11286                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11287
11288                 lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
11289         } else {
11290                 /* For now, SLI2/3 will still use hbalock */
11291                 spin_lock_irqsave(&phba->hbalock, iflags);
11292                 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
11293                 spin_unlock_irqrestore(&phba->hbalock, iflags);
11294         }
11295         return rc;
11296 }
11297
11298 /**
11299  * lpfc_extra_ring_setup - Extra ring setup function
11300  * @phba: Pointer to HBA context object.
11301  *
11302  * This function is called while driver attaches with the
11303  * HBA to setup the extra ring. The extra ring is used
11304  * only when driver needs to support target mode functionality
11305  * or IP over FC functionalities.
11306  *
11307  * This function is called with no lock held. SLI3 only.
11308  **/
11309 static int
11310 lpfc_extra_ring_setup( struct lpfc_hba *phba)
11311 {
11312         struct lpfc_sli *psli;
11313         struct lpfc_sli_ring *pring;
11314
11315         psli = &phba->sli;
11316
11317         /* Adjust cmd/rsp ring iocb entries more evenly */
11318
11319         /* Take some away from the FCP ring */
11320         pring = &psli->sli3_ring[LPFC_FCP_RING];
11321         pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11322         pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11323         pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11324         pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11325
11326         /* and give them to the extra ring */
11327         pring = &psli->sli3_ring[LPFC_EXTRA_RING];
11328
11329         pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11330         pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11331         pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11332         pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11333
11334         /* Setup default profile for this ring */
11335         pring->iotag_max = 4096;
11336         pring->num_mask = 1;
11337         pring->prt[0].profile = 0;      /* Mask 0 */
11338         pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
11339         pring->prt[0].type = phba->cfg_multi_ring_type;
11340         pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
11341         return 0;
11342 }
11343
11344 static void
11345 lpfc_sli_post_recovery_event(struct lpfc_hba *phba,
11346                              struct lpfc_nodelist *ndlp)
11347 {
11348         unsigned long iflags;
11349         struct lpfc_work_evt  *evtp = &ndlp->recovery_evt;
11350
11351         spin_lock_irqsave(&phba->hbalock, iflags);
11352         if (!list_empty(&evtp->evt_listp)) {
11353                 spin_unlock_irqrestore(&phba->hbalock, iflags);
11354                 return;
11355         }
11356
11357         /* Incrementing the reference count until the queued work is done. */
11358         evtp->evt_arg1  = lpfc_nlp_get(ndlp);
11359         if (!evtp->evt_arg1) {
11360                 spin_unlock_irqrestore(&phba->hbalock, iflags);
11361                 return;
11362         }
11363         evtp->evt = LPFC_EVT_RECOVER_PORT;
11364         list_add_tail(&evtp->evt_listp, &phba->work_list);
11365         spin_unlock_irqrestore(&phba->hbalock, iflags);
11366
11367         lpfc_worker_wake_up(phba);
11368 }
11369
11370 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
11371  * @phba: Pointer to HBA context object.
11372  * @iocbq: Pointer to iocb object.
11373  *
11374  * The async_event handler calls this routine when it receives
11375  * an ASYNC_STATUS_CN event from the port.  The port generates
11376  * this event when an Abort Sequence request to an rport fails
11377  * twice in succession.  The abort could be originated by the
11378  * driver or by the port.  The ABTS could have been for an ELS
11379  * or FCP IO.  The port only generates this event when an ABTS
11380  * fails to complete after one retry.
11381  */
11382 static void
11383 lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
11384                           struct lpfc_iocbq *iocbq)
11385 {
11386         struct lpfc_nodelist *ndlp = NULL;
11387         uint16_t rpi = 0, vpi = 0;
11388         struct lpfc_vport *vport = NULL;
11389
11390         /* The rpi in the ulpContext is vport-sensitive. */
11391         vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
11392         rpi = iocbq->iocb.ulpContext;
11393
11394         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11395                         "3092 Port generated ABTS async event "
11396                         "on vpi %d rpi %d status 0x%x\n",
11397                         vpi, rpi, iocbq->iocb.ulpStatus);
11398
11399         vport = lpfc_find_vport_by_vpid(phba, vpi);
11400         if (!vport)
11401                 goto err_exit;
11402         ndlp = lpfc_findnode_rpi(vport, rpi);
11403         if (!ndlp)
11404                 goto err_exit;
11405
11406         if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
11407                 lpfc_sli_abts_recover_port(vport, ndlp);
11408         return;
11409
11410  err_exit:
11411         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11412                         "3095 Event Context not found, no "
11413                         "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
11414                         iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
11415                         vpi, rpi);
11416 }
11417
11418 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
11419  * @phba: pointer to HBA context object.
11420  * @ndlp: nodelist pointer for the impacted rport.
11421  * @axri: pointer to the wcqe containing the failed exchange.
11422  *
11423  * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
11424  * port.  The port generates this event when an abort exchange request to an
11425  * rport fails twice in succession with no reply.  The abort could be originated
11426  * by the driver or by the port.  The ABTS could have been for an ELS or FCP IO.
11427  */
11428 void
11429 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
11430                            struct lpfc_nodelist *ndlp,
11431                            struct sli4_wcqe_xri_aborted *axri)
11432 {
11433         uint32_t ext_status = 0;
11434
11435         if (!ndlp) {
11436                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11437                                 "3115 Node Context not found, driver "
11438                                 "ignoring abts err event\n");
11439                 return;
11440         }
11441
11442         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11443                         "3116 Port generated FCP XRI ABORT event on "
11444                         "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
11445                         ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
11446                         bf_get(lpfc_wcqe_xa_xri, axri),
11447                         bf_get(lpfc_wcqe_xa_status, axri),
11448                         axri->parameter);
11449
11450         /*
11451          * Catch the ABTS protocol failure case.  Older OCe FW releases returned
11452          * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
11453          * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
11454          */
11455         ext_status = axri->parameter & IOERR_PARAM_MASK;
11456         if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
11457             ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
11458                 lpfc_sli_post_recovery_event(phba, ndlp);
11459 }
11460
11461 /**
11462  * lpfc_sli_async_event_handler - ASYNC iocb handler function
11463  * @phba: Pointer to HBA context object.
11464  * @pring: Pointer to driver SLI ring object.
11465  * @iocbq: Pointer to iocb object.
11466  *
11467  * This function is called by the slow ring event handler
11468  * function when there is an ASYNC event iocb in the ring.
11469  * This function is called with no lock held.
11470  * Currently this function handles only temperature related
11471  * ASYNC events. The function decodes the temperature sensor
11472  * event message and posts events for the management applications.
11473  **/
11474 static void
11475 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
11476         struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
11477 {
11478         IOCB_t *icmd;
11479         uint16_t evt_code;
11480         struct temp_event temp_event_data;
11481         struct Scsi_Host *shost;
11482         uint32_t *iocb_w;
11483
11484         icmd = &iocbq->iocb;
11485         evt_code = icmd->un.asyncstat.evt_code;
11486
11487         switch (evt_code) {
11488         case ASYNC_TEMP_WARN:
11489         case ASYNC_TEMP_SAFE:
11490                 temp_event_data.data = (uint32_t) icmd->ulpContext;
11491                 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
11492                 if (evt_code == ASYNC_TEMP_WARN) {
11493                         temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
11494                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11495                                 "0347 Adapter is very hot, please take "
11496                                 "corrective action. temperature : %d Celsius\n",
11497                                 (uint32_t) icmd->ulpContext);
11498                 } else {
11499                         temp_event_data.event_code = LPFC_NORMAL_TEMP;
11500                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11501                                 "0340 Adapter temperature is OK now. "
11502                                 "temperature : %d Celsius\n",
11503                                 (uint32_t) icmd->ulpContext);
11504                 }
11505
11506                 /* Send temperature change event to applications */
11507                 shost = lpfc_shost_from_vport(phba->pport);
11508                 fc_host_post_vendor_event(shost, fc_get_event_number(),
11509                         sizeof(temp_event_data), (char *) &temp_event_data,
11510                         LPFC_NL_VENDOR_ID);
11511                 break;
11512         case ASYNC_STATUS_CN:
11513                 lpfc_sli_abts_err_handler(phba, iocbq);
11514                 break;
11515         default:
11516                 iocb_w = (uint32_t *) icmd;
11517                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11518                         "0346 Ring %d handler: unexpected ASYNC_STATUS"
11519                         " evt_code 0x%x\n"
11520                         "W0  0x%08x W1  0x%08x W2  0x%08x W3  0x%08x\n"
11521                         "W4  0x%08x W5  0x%08x W6  0x%08x W7  0x%08x\n"
11522                         "W8  0x%08x W9  0x%08x W10 0x%08x W11 0x%08x\n"
11523                         "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
11524                         pring->ringno, icmd->un.asyncstat.evt_code,
11525                         iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
11526                         iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
11527                         iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
11528                         iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
11529
11530                 break;
11531         }
11532 }
11533
11534
11535 /**
11536  * lpfc_sli4_setup - SLI ring setup function
11537  * @phba: Pointer to HBA context object.
11538  *
11539  * lpfc_sli_setup sets up rings of the SLI interface with
11540  * number of iocbs per ring and iotags. This function is
11541  * called while driver attach to the HBA and before the
11542  * interrupts are enabled. So there is no need for locking.
11543  *
11544  * This function always returns 0.
11545  **/
11546 int
11547 lpfc_sli4_setup(struct lpfc_hba *phba)
11548 {
11549         struct lpfc_sli_ring *pring;
11550
11551         pring = phba->sli4_hba.els_wq->pring;
11552         pring->num_mask = LPFC_MAX_RING_MASK;
11553         pring->prt[0].profile = 0;      /* Mask 0 */
11554         pring->prt[0].rctl = FC_RCTL_ELS_REQ;
11555         pring->prt[0].type = FC_TYPE_ELS;
11556         pring->prt[0].lpfc_sli_rcv_unsol_event =
11557             lpfc_els_unsol_event;
11558         pring->prt[1].profile = 0;      /* Mask 1 */
11559         pring->prt[1].rctl = FC_RCTL_ELS_REP;
11560         pring->prt[1].type = FC_TYPE_ELS;
11561         pring->prt[1].lpfc_sli_rcv_unsol_event =
11562             lpfc_els_unsol_event;
11563         pring->prt[2].profile = 0;      /* Mask 2 */
11564         /* NameServer Inquiry */
11565         pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
11566         /* NameServer */
11567         pring->prt[2].type = FC_TYPE_CT;
11568         pring->prt[2].lpfc_sli_rcv_unsol_event =
11569             lpfc_ct_unsol_event;
11570         pring->prt[3].profile = 0;      /* Mask 3 */
11571         /* NameServer response */
11572         pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
11573         /* NameServer */
11574         pring->prt[3].type = FC_TYPE_CT;
11575         pring->prt[3].lpfc_sli_rcv_unsol_event =
11576             lpfc_ct_unsol_event;
11577         return 0;
11578 }
11579
11580 /**
11581  * lpfc_sli_setup - SLI ring setup function
11582  * @phba: Pointer to HBA context object.
11583  *
11584  * lpfc_sli_setup sets up rings of the SLI interface with
11585  * number of iocbs per ring and iotags. This function is
11586  * called while driver attach to the HBA and before the
11587  * interrupts are enabled. So there is no need for locking.
11588  *
11589  * This function always returns 0. SLI3 only.
11590  **/
11591 int
11592 lpfc_sli_setup(struct lpfc_hba *phba)
11593 {
11594         int i, totiocbsize = 0;
11595         struct lpfc_sli *psli = &phba->sli;
11596         struct lpfc_sli_ring *pring;
11597
11598         psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
11599         psli->sli_flag = 0;
11600
11601         psli->iocbq_lookup = NULL;
11602         psli->iocbq_lookup_len = 0;
11603         psli->last_iotag = 0;
11604
11605         for (i = 0; i < psli->num_rings; i++) {
11606                 pring = &psli->sli3_ring[i];
11607                 switch (i) {
11608                 case LPFC_FCP_RING:     /* ring 0 - FCP */
11609                         /* numCiocb and numRiocb are used in config_port */
11610                         pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
11611                         pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
11612                         pring->sli.sli3.numCiocb +=
11613                                 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11614                         pring->sli.sli3.numRiocb +=
11615                                 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11616                         pring->sli.sli3.numCiocb +=
11617                                 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11618                         pring->sli.sli3.numRiocb +=
11619                                 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11620                         pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11621                                                         SLI3_IOCB_CMD_SIZE :
11622                                                         SLI2_IOCB_CMD_SIZE;
11623                         pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11624                                                         SLI3_IOCB_RSP_SIZE :
11625                                                         SLI2_IOCB_RSP_SIZE;
11626                         pring->iotag_ctr = 0;
11627                         pring->iotag_max =
11628                             (phba->cfg_hba_queue_depth * 2);
11629                         pring->fast_iotag = pring->iotag_max;
11630                         pring->num_mask = 0;
11631                         break;
11632                 case LPFC_EXTRA_RING:   /* ring 1 - EXTRA */
11633                         /* numCiocb and numRiocb are used in config_port */
11634                         pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
11635                         pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
11636                         pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11637                                                         SLI3_IOCB_CMD_SIZE :
11638                                                         SLI2_IOCB_CMD_SIZE;
11639                         pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11640                                                         SLI3_IOCB_RSP_SIZE :
11641                                                         SLI2_IOCB_RSP_SIZE;
11642                         pring->iotag_max = phba->cfg_hba_queue_depth;
11643                         pring->num_mask = 0;
11644                         break;
11645                 case LPFC_ELS_RING:     /* ring 2 - ELS / CT */
11646                         /* numCiocb and numRiocb are used in config_port */
11647                         pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
11648                         pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
11649                         pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11650                                                         SLI3_IOCB_CMD_SIZE :
11651                                                         SLI2_IOCB_CMD_SIZE;
11652                         pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11653                                                         SLI3_IOCB_RSP_SIZE :
11654                                                         SLI2_IOCB_RSP_SIZE;
11655                         pring->fast_iotag = 0;
11656                         pring->iotag_ctr = 0;
11657                         pring->iotag_max = 4096;
11658                         pring->lpfc_sli_rcv_async_status =
11659                                 lpfc_sli_async_event_handler;
11660                         pring->num_mask = LPFC_MAX_RING_MASK;
11661                         pring->prt[0].profile = 0;      /* Mask 0 */
11662                         pring->prt[0].rctl = FC_RCTL_ELS_REQ;
11663                         pring->prt[0].type = FC_TYPE_ELS;
11664                         pring->prt[0].lpfc_sli_rcv_unsol_event =
11665                             lpfc_els_unsol_event;
11666                         pring->prt[1].profile = 0;      /* Mask 1 */
11667                         pring->prt[1].rctl = FC_RCTL_ELS_REP;
11668                         pring->prt[1].type = FC_TYPE_ELS;
11669                         pring->prt[1].lpfc_sli_rcv_unsol_event =
11670                             lpfc_els_unsol_event;
11671                         pring->prt[2].profile = 0;      /* Mask 2 */
11672                         /* NameServer Inquiry */
11673                         pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
11674                         /* NameServer */
11675                         pring->prt[2].type = FC_TYPE_CT;
11676                         pring->prt[2].lpfc_sli_rcv_unsol_event =
11677                             lpfc_ct_unsol_event;
11678                         pring->prt[3].profile = 0;      /* Mask 3 */
11679                         /* NameServer response */
11680                         pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
11681                         /* NameServer */
11682                         pring->prt[3].type = FC_TYPE_CT;
11683                         pring->prt[3].lpfc_sli_rcv_unsol_event =
11684                             lpfc_ct_unsol_event;
11685                         break;
11686                 }
11687                 totiocbsize += (pring->sli.sli3.numCiocb *
11688                         pring->sli.sli3.sizeCiocb) +
11689                         (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
11690         }
11691         if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
11692                 /* Too many cmd / rsp ring entries in SLI2 SLIM */
11693                 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
11694                        "SLI2 SLIM Data: x%x x%lx\n",
11695                        phba->brd_no, totiocbsize,
11696                        (unsigned long) MAX_SLIM_IOCB_SIZE);
11697         }
11698         if (phba->cfg_multi_ring_support == 2)
11699                 lpfc_extra_ring_setup(phba);
11700
11701         return 0;
11702 }
11703
11704 /**
11705  * lpfc_sli4_queue_init - Queue initialization function
11706  * @phba: Pointer to HBA context object.
11707  *
11708  * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
11709  * ring. This function also initializes ring indices of each ring.
11710  * This function is called during the initialization of the SLI
11711  * interface of an HBA.
11712  * This function is called with no lock held and always returns
11713  * 1.
11714  **/
11715 void
11716 lpfc_sli4_queue_init(struct lpfc_hba *phba)
11717 {
11718         struct lpfc_sli *psli;
11719         struct lpfc_sli_ring *pring;
11720         int i;
11721
11722         psli = &phba->sli;
11723         spin_lock_irq(&phba->hbalock);
11724         INIT_LIST_HEAD(&psli->mboxq);
11725         INIT_LIST_HEAD(&psli->mboxq_cmpl);
11726         /* Initialize list headers for txq and txcmplq as double linked lists */
11727         for (i = 0; i < phba->cfg_hdw_queue; i++) {
11728                 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
11729                 pring->flag = 0;
11730                 pring->ringno = LPFC_FCP_RING;
11731                 pring->txcmplq_cnt = 0;
11732                 INIT_LIST_HEAD(&pring->txq);
11733                 INIT_LIST_HEAD(&pring->txcmplq);
11734                 INIT_LIST_HEAD(&pring->iocb_continueq);
11735                 spin_lock_init(&pring->ring_lock);
11736         }
11737         pring = phba->sli4_hba.els_wq->pring;
11738         pring->flag = 0;
11739         pring->ringno = LPFC_ELS_RING;
11740         pring->txcmplq_cnt = 0;
11741         INIT_LIST_HEAD(&pring->txq);
11742         INIT_LIST_HEAD(&pring->txcmplq);
11743         INIT_LIST_HEAD(&pring->iocb_continueq);
11744         spin_lock_init(&pring->ring_lock);
11745
11746         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11747                 pring = phba->sli4_hba.nvmels_wq->pring;
11748                 pring->flag = 0;
11749                 pring->ringno = LPFC_ELS_RING;
11750                 pring->txcmplq_cnt = 0;
11751                 INIT_LIST_HEAD(&pring->txq);
11752                 INIT_LIST_HEAD(&pring->txcmplq);
11753                 INIT_LIST_HEAD(&pring->iocb_continueq);
11754                 spin_lock_init(&pring->ring_lock);
11755         }
11756
11757         spin_unlock_irq(&phba->hbalock);
11758 }
11759
11760 /**
11761  * lpfc_sli_queue_init - Queue initialization function
11762  * @phba: Pointer to HBA context object.
11763  *
11764  * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
11765  * ring. This function also initializes ring indices of each ring.
11766  * This function is called during the initialization of the SLI
11767  * interface of an HBA.
11768  * This function is called with no lock held and always returns
11769  * 1.
11770  **/
11771 void
11772 lpfc_sli_queue_init(struct lpfc_hba *phba)
11773 {
11774         struct lpfc_sli *psli;
11775         struct lpfc_sli_ring *pring;
11776         int i;
11777
11778         psli = &phba->sli;
11779         spin_lock_irq(&phba->hbalock);
11780         INIT_LIST_HEAD(&psli->mboxq);
11781         INIT_LIST_HEAD(&psli->mboxq_cmpl);
11782         /* Initialize list headers for txq and txcmplq as double linked lists */
11783         for (i = 0; i < psli->num_rings; i++) {
11784                 pring = &psli->sli3_ring[i];
11785                 pring->ringno = i;
11786                 pring->sli.sli3.next_cmdidx  = 0;
11787                 pring->sli.sli3.local_getidx = 0;
11788                 pring->sli.sli3.cmdidx = 0;
11789                 INIT_LIST_HEAD(&pring->iocb_continueq);
11790                 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
11791                 INIT_LIST_HEAD(&pring->postbufq);
11792                 pring->flag = 0;
11793                 INIT_LIST_HEAD(&pring->txq);
11794                 INIT_LIST_HEAD(&pring->txcmplq);
11795                 spin_lock_init(&pring->ring_lock);
11796         }
11797         spin_unlock_irq(&phba->hbalock);
11798 }
11799
11800 /**
11801  * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
11802  * @phba: Pointer to HBA context object.
11803  *
11804  * This routine flushes the mailbox command subsystem. It will unconditionally
11805  * flush all the mailbox commands in the three possible stages in the mailbox
11806  * command sub-system: pending mailbox command queue; the outstanding mailbox
11807  * command; and completed mailbox command queue. It is caller's responsibility
11808  * to make sure that the driver is in the proper state to flush the mailbox
11809  * command sub-system. Namely, the posting of mailbox commands into the
11810  * pending mailbox command queue from the various clients must be stopped;
11811  * either the HBA is in a state that it will never works on the outstanding
11812  * mailbox command (such as in EEH or ERATT conditions) or the outstanding
11813  * mailbox command has been completed.
11814  **/
11815 static void
11816 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
11817 {
11818         LIST_HEAD(completions);
11819         struct lpfc_sli *psli = &phba->sli;
11820         LPFC_MBOXQ_t *pmb;
11821         unsigned long iflag;
11822
11823         /* Disable softirqs, including timers from obtaining phba->hbalock */
11824         local_bh_disable();
11825
11826         /* Flush all the mailbox commands in the mbox system */
11827         spin_lock_irqsave(&phba->hbalock, iflag);
11828
11829         /* The pending mailbox command queue */
11830         list_splice_init(&phba->sli.mboxq, &completions);
11831         /* The outstanding active mailbox command */
11832         if (psli->mbox_active) {
11833                 list_add_tail(&psli->mbox_active->list, &completions);
11834                 psli->mbox_active = NULL;
11835                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11836         }
11837         /* The completed mailbox command queue */
11838         list_splice_init(&phba->sli.mboxq_cmpl, &completions);
11839         spin_unlock_irqrestore(&phba->hbalock, iflag);
11840
11841         /* Enable softirqs again, done with phba->hbalock */
11842         local_bh_enable();
11843
11844         /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
11845         while (!list_empty(&completions)) {
11846                 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
11847                 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
11848                 if (pmb->mbox_cmpl)
11849                         pmb->mbox_cmpl(phba, pmb);
11850         }
11851 }
11852
11853 /**
11854  * lpfc_sli_host_down - Vport cleanup function
11855  * @vport: Pointer to virtual port object.
11856  *
11857  * lpfc_sli_host_down is called to clean up the resources
11858  * associated with a vport before destroying virtual
11859  * port data structures.
11860  * This function does following operations:
11861  * - Free discovery resources associated with this virtual
11862  *   port.
11863  * - Free iocbs associated with this virtual port in
11864  *   the txq.
11865  * - Send abort for all iocb commands associated with this
11866  *   vport in txcmplq.
11867  *
11868  * This function is called with no lock held and always returns 1.
11869  **/
11870 int
11871 lpfc_sli_host_down(struct lpfc_vport *vport)
11872 {
11873         LIST_HEAD(completions);
11874         struct lpfc_hba *phba = vport->phba;
11875         struct lpfc_sli *psli = &phba->sli;
11876         struct lpfc_queue *qp = NULL;
11877         struct lpfc_sli_ring *pring;
11878         struct lpfc_iocbq *iocb, *next_iocb;
11879         int i;
11880         unsigned long flags = 0;
11881         uint16_t prev_pring_flag;
11882
11883         lpfc_cleanup_discovery_resources(vport);
11884
11885         spin_lock_irqsave(&phba->hbalock, flags);
11886
11887         /*
11888          * Error everything on the txq since these iocbs
11889          * have not been given to the FW yet.
11890          * Also issue ABTS for everything on the txcmplq
11891          */
11892         if (phba->sli_rev != LPFC_SLI_REV4) {
11893                 for (i = 0; i < psli->num_rings; i++) {
11894                         pring = &psli->sli3_ring[i];
11895                         prev_pring_flag = pring->flag;
11896                         /* Only slow rings */
11897                         if (pring->ringno == LPFC_ELS_RING) {
11898                                 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11899                                 /* Set the lpfc data pending flag */
11900                                 set_bit(LPFC_DATA_READY, &phba->data_flags);
11901                         }
11902                         list_for_each_entry_safe(iocb, next_iocb,
11903                                                  &pring->txq, list) {
11904                                 if (iocb->vport != vport)
11905                                         continue;
11906                                 list_move_tail(&iocb->list, &completions);
11907                         }
11908                         list_for_each_entry_safe(iocb, next_iocb,
11909                                                  &pring->txcmplq, list) {
11910                                 if (iocb->vport != vport)
11911                                         continue;
11912                                 lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11913                                                            NULL);
11914                         }
11915                         pring->flag = prev_pring_flag;
11916                 }
11917         } else {
11918                 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11919                         pring = qp->pring;
11920                         if (!pring)
11921                                 continue;
11922                         if (pring == phba->sli4_hba.els_wq->pring) {
11923                                 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11924                                 /* Set the lpfc data pending flag */
11925                                 set_bit(LPFC_DATA_READY, &phba->data_flags);
11926                         }
11927                         prev_pring_flag = pring->flag;
11928                         spin_lock(&pring->ring_lock);
11929                         list_for_each_entry_safe(iocb, next_iocb,
11930                                                  &pring->txq, list) {
11931                                 if (iocb->vport != vport)
11932                                         continue;
11933                                 list_move_tail(&iocb->list, &completions);
11934                         }
11935                         spin_unlock(&pring->ring_lock);
11936                         list_for_each_entry_safe(iocb, next_iocb,
11937                                                  &pring->txcmplq, list) {
11938                                 if (iocb->vport != vport)
11939                                         continue;
11940                                 lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11941                                                            NULL);
11942                         }
11943                         pring->flag = prev_pring_flag;
11944                 }
11945         }
11946         spin_unlock_irqrestore(&phba->hbalock, flags);
11947
11948         /* Make sure HBA is alive */
11949         lpfc_issue_hb_tmo(phba);
11950
11951         /* Cancel all the IOCBs from the completions list */
11952         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11953                               IOERR_SLI_DOWN);
11954         return 1;
11955 }
11956
11957 /**
11958  * lpfc_sli_hba_down - Resource cleanup function for the HBA
11959  * @phba: Pointer to HBA context object.
11960  *
11961  * This function cleans up all iocb, buffers, mailbox commands
11962  * while shutting down the HBA. This function is called with no
11963  * lock held and always returns 1.
11964  * This function does the following to cleanup driver resources:
11965  * - Free discovery resources for each virtual port
11966  * - Cleanup any pending fabric iocbs
11967  * - Iterate through the iocb txq and free each entry
11968  *   in the list.
11969  * - Free up any buffer posted to the HBA
11970  * - Free mailbox commands in the mailbox queue.
11971  **/
11972 int
11973 lpfc_sli_hba_down(struct lpfc_hba *phba)
11974 {
11975         LIST_HEAD(completions);
11976         struct lpfc_sli *psli = &phba->sli;
11977         struct lpfc_queue *qp = NULL;
11978         struct lpfc_sli_ring *pring;
11979         struct lpfc_dmabuf *buf_ptr;
11980         unsigned long flags = 0;
11981         int i;
11982
11983         /* Shutdown the mailbox command sub-system */
11984         lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
11985
11986         lpfc_hba_down_prep(phba);
11987
11988         /* Disable softirqs, including timers from obtaining phba->hbalock */
11989         local_bh_disable();
11990
11991         lpfc_fabric_abort_hba(phba);
11992
11993         spin_lock_irqsave(&phba->hbalock, flags);
11994
11995         /*
11996          * Error everything on the txq since these iocbs
11997          * have not been given to the FW yet.
11998          */
11999         if (phba->sli_rev != LPFC_SLI_REV4) {
12000                 for (i = 0; i < psli->num_rings; i++) {
12001                         pring = &psli->sli3_ring[i];
12002                         /* Only slow rings */
12003                         if (pring->ringno == LPFC_ELS_RING) {
12004                                 pring->flag |= LPFC_DEFERRED_RING_EVENT;
12005                                 /* Set the lpfc data pending flag */
12006                                 set_bit(LPFC_DATA_READY, &phba->data_flags);
12007                         }
12008                         list_splice_init(&pring->txq, &completions);
12009                 }
12010         } else {
12011                 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
12012                         pring = qp->pring;
12013                         if (!pring)
12014                                 continue;
12015                         spin_lock(&pring->ring_lock);
12016                         list_splice_init(&pring->txq, &completions);
12017                         spin_unlock(&pring->ring_lock);
12018                         if (pring == phba->sli4_hba.els_wq->pring) {
12019                                 pring->flag |= LPFC_DEFERRED_RING_EVENT;
12020                                 /* Set the lpfc data pending flag */
12021                                 set_bit(LPFC_DATA_READY, &phba->data_flags);
12022                         }
12023                 }
12024         }
12025         spin_unlock_irqrestore(&phba->hbalock, flags);
12026
12027         /* Cancel all the IOCBs from the completions list */
12028         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
12029                               IOERR_SLI_DOWN);
12030
12031         spin_lock_irqsave(&phba->hbalock, flags);
12032         list_splice_init(&phba->elsbuf, &completions);
12033         phba->elsbuf_cnt = 0;
12034         phba->elsbuf_prev_cnt = 0;
12035         spin_unlock_irqrestore(&phba->hbalock, flags);
12036
12037         while (!list_empty(&completions)) {
12038                 list_remove_head(&completions, buf_ptr,
12039                         struct lpfc_dmabuf, list);
12040                 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
12041                 kfree(buf_ptr);
12042         }
12043
12044         /* Enable softirqs again, done with phba->hbalock */
12045         local_bh_enable();
12046
12047         /* Return any active mbox cmds */
12048         del_timer_sync(&psli->mbox_tmo);
12049
12050         spin_lock_irqsave(&phba->pport->work_port_lock, flags);
12051         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
12052         spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
12053
12054         return 1;
12055 }
12056
12057 /**
12058  * lpfc_sli_pcimem_bcopy - SLI memory copy function
12059  * @srcp: Source memory pointer.
12060  * @destp: Destination memory pointer.
12061  * @cnt: Number of words required to be copied.
12062  *
12063  * This function is used for copying data between driver memory
12064  * and the SLI memory. This function also changes the endianness
12065  * of each word if native endianness is different from SLI
12066  * endianness. This function can be called with or without
12067  * lock.
12068  **/
12069 void
12070 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
12071 {
12072         uint32_t *src = srcp;
12073         uint32_t *dest = destp;
12074         uint32_t ldata;
12075         int i;
12076
12077         for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
12078                 ldata = *src;
12079                 ldata = le32_to_cpu(ldata);
12080                 *dest = ldata;
12081                 src++;
12082                 dest++;
12083         }
12084 }
12085
12086
12087 /**
12088  * lpfc_sli_bemem_bcopy - SLI memory copy function
12089  * @srcp: Source memory pointer.
12090  * @destp: Destination memory pointer.
12091  * @cnt: Number of words required to be copied.
12092  *
12093  * This function is used for copying data between a data structure
12094  * with big endian representation to local endianness.
12095  * This function can be called with or without lock.
12096  **/
12097 void
12098 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
12099 {
12100         uint32_t *src = srcp;
12101         uint32_t *dest = destp;
12102         uint32_t ldata;
12103         int i;
12104
12105         for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
12106                 ldata = *src;
12107                 ldata = be32_to_cpu(ldata);
12108                 *dest = ldata;
12109                 src++;
12110                 dest++;
12111         }
12112 }
12113
12114 /**
12115  * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
12116  * @phba: Pointer to HBA context object.
12117  * @pring: Pointer to driver SLI ring object.
12118  * @mp: Pointer to driver buffer object.
12119  *
12120  * This function is called with no lock held.
12121  * It always return zero after adding the buffer to the postbufq
12122  * buffer list.
12123  **/
12124 int
12125 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12126                          struct lpfc_dmabuf *mp)
12127 {
12128         /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
12129            later */
12130         spin_lock_irq(&phba->hbalock);
12131         list_add_tail(&mp->list, &pring->postbufq);
12132         pring->postbufq_cnt++;
12133         spin_unlock_irq(&phba->hbalock);
12134         return 0;
12135 }
12136
12137 /**
12138  * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
12139  * @phba: Pointer to HBA context object.
12140  *
12141  * When HBQ is enabled, buffers are searched based on tags. This function
12142  * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
12143  * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
12144  * does not conflict with tags of buffer posted for unsolicited events.
12145  * The function returns the allocated tag. The function is called with
12146  * no locks held.
12147  **/
12148 uint32_t
12149 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
12150 {
12151         spin_lock_irq(&phba->hbalock);
12152         phba->buffer_tag_count++;
12153         /*
12154          * Always set the QUE_BUFTAG_BIT to distiguish between
12155          * a tag assigned by HBQ.
12156          */
12157         phba->buffer_tag_count |= QUE_BUFTAG_BIT;
12158         spin_unlock_irq(&phba->hbalock);
12159         return phba->buffer_tag_count;
12160 }
12161
12162 /**
12163  * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
12164  * @phba: Pointer to HBA context object.
12165  * @pring: Pointer to driver SLI ring object.
12166  * @tag: Buffer tag.
12167  *
12168  * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
12169  * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
12170  * iocb is posted to the response ring with the tag of the buffer.
12171  * This function searches the pring->postbufq list using the tag
12172  * to find buffer associated with CMD_IOCB_RET_XRI64_CX
12173  * iocb. If the buffer is found then lpfc_dmabuf object of the
12174  * buffer is returned to the caller else NULL is returned.
12175  * This function is called with no lock held.
12176  **/
12177 struct lpfc_dmabuf *
12178 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12179                         uint32_t tag)
12180 {
12181         struct lpfc_dmabuf *mp, *next_mp;
12182         struct list_head *slp = &pring->postbufq;
12183
12184         /* Search postbufq, from the beginning, looking for a match on tag */
12185         spin_lock_irq(&phba->hbalock);
12186         list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
12187                 if (mp->buffer_tag == tag) {
12188                         list_del_init(&mp->list);
12189                         pring->postbufq_cnt--;
12190                         spin_unlock_irq(&phba->hbalock);
12191                         return mp;
12192                 }
12193         }
12194
12195         spin_unlock_irq(&phba->hbalock);
12196         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12197                         "0402 Cannot find virtual addr for buffer tag on "
12198                         "ring %d Data x%lx x%px x%px x%x\n",
12199                         pring->ringno, (unsigned long) tag,
12200                         slp->next, slp->prev, pring->postbufq_cnt);
12201
12202         return NULL;
12203 }
12204
12205 /**
12206  * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
12207  * @phba: Pointer to HBA context object.
12208  * @pring: Pointer to driver SLI ring object.
12209  * @phys: DMA address of the buffer.
12210  *
12211  * This function searches the buffer list using the dma_address
12212  * of unsolicited event to find the driver's lpfc_dmabuf object
12213  * corresponding to the dma_address. The function returns the
12214  * lpfc_dmabuf object if a buffer is found else it returns NULL.
12215  * This function is called by the ct and els unsolicited event
12216  * handlers to get the buffer associated with the unsolicited
12217  * event.
12218  *
12219  * This function is called with no lock held.
12220  **/
12221 struct lpfc_dmabuf *
12222 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12223                          dma_addr_t phys)
12224 {
12225         struct lpfc_dmabuf *mp, *next_mp;
12226         struct list_head *slp = &pring->postbufq;
12227
12228         /* Search postbufq, from the beginning, looking for a match on phys */
12229         spin_lock_irq(&phba->hbalock);
12230         list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
12231                 if (mp->phys == phys) {
12232                         list_del_init(&mp->list);
12233                         pring->postbufq_cnt--;
12234                         spin_unlock_irq(&phba->hbalock);
12235                         return mp;
12236                 }
12237         }
12238
12239         spin_unlock_irq(&phba->hbalock);
12240         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12241                         "0410 Cannot find virtual addr for mapped buf on "
12242                         "ring %d Data x%llx x%px x%px x%x\n",
12243                         pring->ringno, (unsigned long long)phys,
12244                         slp->next, slp->prev, pring->postbufq_cnt);
12245         return NULL;
12246 }
12247
12248 /**
12249  * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
12250  * @phba: Pointer to HBA context object.
12251  * @cmdiocb: Pointer to driver command iocb object.
12252  * @rspiocb: Pointer to driver response iocb object.
12253  *
12254  * This function is the completion handler for the abort iocbs for
12255  * ELS commands. This function is called from the ELS ring event
12256  * handler with no lock held. This function frees memory resources
12257  * associated with the abort iocb.
12258  **/
12259 static void
12260 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12261                         struct lpfc_iocbq *rspiocb)
12262 {
12263         IOCB_t *irsp = &rspiocb->iocb;
12264         uint16_t abort_iotag, abort_context;
12265         struct lpfc_iocbq *abort_iocb = NULL;
12266
12267         if (irsp->ulpStatus) {
12268
12269                 /*
12270                  * Assume that the port already completed and returned, or
12271                  * will return the iocb. Just Log the message.
12272                  */
12273                 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
12274                 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
12275
12276                 spin_lock_irq(&phba->hbalock);
12277                 if (phba->sli_rev < LPFC_SLI_REV4) {
12278                         if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
12279                             irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
12280                             irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
12281                                 spin_unlock_irq(&phba->hbalock);
12282                                 goto release_iocb;
12283                         }
12284                         if (abort_iotag != 0 &&
12285                                 abort_iotag <= phba->sli.last_iotag)
12286                                 abort_iocb =
12287                                         phba->sli.iocbq_lookup[abort_iotag];
12288                 } else
12289                         /* For sli4 the abort_tag is the XRI,
12290                          * so the abort routine puts the iotag  of the iocb
12291                          * being aborted in the context field of the abort
12292                          * IOCB.
12293                          */
12294                         abort_iocb = phba->sli.iocbq_lookup[abort_context];
12295
12296                 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
12297                                 "0327 Cannot abort els iocb x%px "
12298                                 "with tag %x context %x, abort status %x, "
12299                                 "abort code %x\n",
12300                                 abort_iocb, abort_iotag, abort_context,
12301                                 irsp->ulpStatus, irsp->un.ulpWord[4]);
12302
12303                 spin_unlock_irq(&phba->hbalock);
12304         }
12305 release_iocb:
12306         lpfc_sli_release_iocbq(phba, cmdiocb);
12307         return;
12308 }
12309
12310 /**
12311  * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
12312  * @phba: Pointer to HBA context object.
12313  * @cmdiocb: Pointer to driver command iocb object.
12314  * @rspiocb: Pointer to driver response iocb object.
12315  *
12316  * The function is called from SLI ring event handler with no
12317  * lock held. This function is the completion handler for ELS commands
12318  * which are aborted. The function frees memory resources used for
12319  * the aborted ELS commands.
12320  **/
12321 void
12322 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12323                      struct lpfc_iocbq *rspiocb)
12324 {
12325         struct lpfc_nodelist *ndlp = NULL;
12326         IOCB_t *irsp = &rspiocb->iocb;
12327
12328         /* ELS cmd tag <ulpIoTag> completes */
12329         lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
12330                         "0139 Ignoring ELS cmd code x%x completion Data: "
12331                         "x%x x%x x%x\n",
12332                         irsp->ulpIoTag, irsp->ulpStatus,
12333                         irsp->un.ulpWord[4], irsp->ulpTimeout);
12334         /*
12335          * Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp
12336          * if exchange is busy.
12337          */
12338         if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
12339                 ndlp = cmdiocb->context_un.ndlp;
12340                 lpfc_ct_free_iocb(phba, cmdiocb);
12341         } else {
12342                 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
12343                 lpfc_els_free_iocb(phba, cmdiocb);
12344         }
12345
12346         lpfc_nlp_put(ndlp);
12347 }
12348
12349 /**
12350  * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
12351  * @phba: Pointer to HBA context object.
12352  * @pring: Pointer to driver SLI ring object.
12353  * @cmdiocb: Pointer to driver command iocb object.
12354  * @cmpl: completion function.
12355  *
12356  * This function issues an abort iocb for the provided command iocb. In case
12357  * of unloading, the abort iocb will not be issued to commands on the ELS
12358  * ring. Instead, the callback function shall be changed to those commands
12359  * so that nothing happens when them finishes. This function is called with
12360  * hbalock held andno ring_lock held (SLI4). The function returns IOCB_SUCCESS
12361  * when the command iocb is an abort request.
12362  *
12363  **/
12364 int
12365 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12366                            struct lpfc_iocbq *cmdiocb, void *cmpl)
12367 {
12368         struct lpfc_vport *vport = cmdiocb->vport;
12369         struct lpfc_iocbq *abtsiocbp;
12370         IOCB_t *icmd = NULL;
12371         IOCB_t *iabt = NULL;
12372         int retval = IOCB_ERROR;
12373         unsigned long iflags;
12374         struct lpfc_nodelist *ndlp;
12375
12376         /*
12377          * There are certain command types we don't want to abort.  And we
12378          * don't want to abort commands that are already in the process of
12379          * being aborted.
12380          */
12381         icmd = &cmdiocb->iocb;
12382         if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
12383             icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
12384             cmdiocb->cmd_flag & LPFC_DRIVER_ABORTED)
12385                 return IOCB_ABORTING;
12386
12387         if (!pring) {
12388                 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
12389                         cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
12390                 else
12391                         cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
12392                 return retval;
12393         }
12394
12395         /*
12396          * If we're unloading, don't abort iocb on the ELS ring, but change
12397          * the callback so that nothing happens when it finishes.
12398          */
12399         if ((vport->load_flag & FC_UNLOADING) &&
12400             pring->ringno == LPFC_ELS_RING) {
12401                 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
12402                         cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
12403                 else
12404                         cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
12405                 return retval;
12406         }
12407
12408         /* issue ABTS for this IOCB based on iotag */
12409         abtsiocbp = __lpfc_sli_get_iocbq(phba);
12410         if (abtsiocbp == NULL)
12411                 return IOCB_NORESOURCE;
12412
12413         /* This signals the response to set the correct status
12414          * before calling the completion handler
12415          */
12416         cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
12417
12418         iabt = &abtsiocbp->iocb;
12419         iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
12420         iabt->un.acxri.abortContextTag = icmd->ulpContext;
12421         if (phba->sli_rev == LPFC_SLI_REV4) {
12422                 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
12423                 if (pring->ringno == LPFC_ELS_RING)
12424                         iabt->un.acxri.abortContextTag = cmdiocb->iotag;
12425         } else {
12426                 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
12427                 if (pring->ringno == LPFC_ELS_RING) {
12428                         ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
12429                         iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
12430                 }
12431         }
12432         iabt->ulpLe = 1;
12433         iabt->ulpClass = icmd->ulpClass;
12434
12435         /* ABTS WQE must go to the same WQ as the WQE to be aborted */
12436         abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
12437         if (cmdiocb->cmd_flag & LPFC_IO_FCP)
12438                 abtsiocbp->cmd_flag |= (LPFC_IO_FCP | LPFC_USE_FCPWQIDX);
12439         if (cmdiocb->cmd_flag & LPFC_IO_FOF)
12440                 abtsiocbp->cmd_flag |= LPFC_IO_FOF;
12441
12442         if (phba->link_state < LPFC_LINK_UP ||
12443             (phba->sli_rev == LPFC_SLI_REV4 &&
12444              phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN))
12445                 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
12446         else
12447                 iabt->ulpCommand = CMD_ABORT_XRI_CN;
12448
12449         if (cmpl)
12450                 abtsiocbp->cmd_cmpl = cmpl;
12451         else
12452                 abtsiocbp->cmd_cmpl = lpfc_sli_abort_els_cmpl;
12453         abtsiocbp->vport = vport;
12454
12455         if (phba->sli_rev == LPFC_SLI_REV4) {
12456                 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
12457                 if (unlikely(pring == NULL))
12458                         goto abort_iotag_exit;
12459                 /* Note: both hbalock and ring_lock need to be set here */
12460                 spin_lock_irqsave(&pring->ring_lock, iflags);
12461                 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
12462                         abtsiocbp, 0);
12463                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
12464         } else {
12465                 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
12466                         abtsiocbp, 0);
12467         }
12468
12469 abort_iotag_exit:
12470
12471         lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
12472                          "0339 Abort xri x%x, original iotag x%x, "
12473                          "abort cmd iotag x%x retval x%x\n",
12474                          iabt->un.acxri.abortIoTag,
12475                          iabt->un.acxri.abortContextTag,
12476                          abtsiocbp->iotag, retval);
12477
12478         if (retval) {
12479                 cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
12480                 __lpfc_sli_release_iocbq(phba, abtsiocbp);
12481         }
12482
12483         /*
12484          * Caller to this routine should check for IOCB_ERROR
12485          * and handle it properly.  This routine no longer removes
12486          * iocb off txcmplq and call compl in case of IOCB_ERROR.
12487          */
12488         return retval;
12489 }
12490
12491 /**
12492  * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
12493  * @phba: pointer to lpfc HBA data structure.
12494  *
12495  * This routine will abort all pending and outstanding iocbs to an HBA.
12496  **/
12497 void
12498 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
12499 {
12500         struct lpfc_sli *psli = &phba->sli;
12501         struct lpfc_sli_ring *pring;
12502         struct lpfc_queue *qp = NULL;
12503         int i;
12504
12505         if (phba->sli_rev != LPFC_SLI_REV4) {
12506                 for (i = 0; i < psli->num_rings; i++) {
12507                         pring = &psli->sli3_ring[i];
12508                         lpfc_sli_abort_iocb_ring(phba, pring);
12509                 }
12510                 return;
12511         }
12512         list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
12513                 pring = qp->pring;
12514                 if (!pring)
12515                         continue;
12516                 lpfc_sli_abort_iocb_ring(phba, pring);
12517         }
12518 }
12519
12520 /**
12521  * lpfc_sli_validate_fcp_iocb_for_abort - filter iocbs appropriate for FCP aborts
12522  * @iocbq: Pointer to iocb object.
12523  * @vport: Pointer to driver virtual port object.
12524  *
12525  * This function acts as an iocb filter for functions which abort FCP iocbs.
12526  *
12527  * Return values
12528  * -ENODEV, if a null iocb or vport ptr is encountered
12529  * -EINVAL, if the iocb is not an FCP I/O, not on the TX cmpl queue, premarked as
12530  *          driver already started the abort process, or is an abort iocb itself
12531  * 0, passes criteria for aborting the FCP I/O iocb
12532  **/
12533 static int
12534 lpfc_sli_validate_fcp_iocb_for_abort(struct lpfc_iocbq *iocbq,
12535                                      struct lpfc_vport *vport)
12536 {
12537         IOCB_t *icmd = NULL;
12538
12539         /* No null ptr vports */
12540         if (!iocbq || iocbq->vport != vport)
12541                 return -ENODEV;
12542
12543         /* iocb must be for FCP IO, already exists on the TX cmpl queue,
12544          * can't be premarked as driver aborted, nor be an ABORT iocb itself
12545          */
12546         icmd = &iocbq->iocb;
12547         if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
12548             !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ) ||
12549             (iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
12550             (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
12551              icmd->ulpCommand == CMD_CLOSE_XRI_CN))
12552                 return -EINVAL;
12553
12554         return 0;
12555 }
12556
12557 /**
12558  * lpfc_sli_validate_fcp_iocb - validate commands associated with a SCSI target
12559  * @iocbq: Pointer to driver iocb object.
12560  * @vport: Pointer to driver virtual port object.
12561  * @tgt_id: SCSI ID of the target.
12562  * @lun_id: LUN ID of the scsi device.
12563  * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
12564  *
12565  * This function acts as an iocb filter for validating a lun/SCSI target/SCSI
12566  * host.
12567  *
12568  * It will return
12569  * 0 if the filtering criteria is met for the given iocb and will return
12570  * 1 if the filtering criteria is not met.
12571  * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
12572  * given iocb is for the SCSI device specified by vport, tgt_id and
12573  * lun_id parameter.
12574  * If ctx_cmd == LPFC_CTX_TGT,  the function returns 0 only if the
12575  * given iocb is for the SCSI target specified by vport and tgt_id
12576  * parameters.
12577  * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
12578  * given iocb is for the SCSI host associated with the given vport.
12579  * This function is called with no locks held.
12580  **/
12581 static int
12582 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
12583                            uint16_t tgt_id, uint64_t lun_id,
12584                            lpfc_ctx_cmd ctx_cmd)
12585 {
12586         struct lpfc_io_buf *lpfc_cmd;
12587         int rc = 1;
12588
12589         lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
12590
12591         if (lpfc_cmd->pCmd == NULL)
12592                 return rc;
12593
12594         switch (ctx_cmd) {
12595         case LPFC_CTX_LUN:
12596                 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
12597                     (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
12598                     (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
12599                         rc = 0;
12600                 break;
12601         case LPFC_CTX_TGT:
12602                 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
12603                     (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
12604                         rc = 0;
12605                 break;
12606         case LPFC_CTX_HOST:
12607                 rc = 0;
12608                 break;
12609         default:
12610                 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
12611                         __func__, ctx_cmd);
12612                 break;
12613         }
12614
12615         return rc;
12616 }
12617
12618 /**
12619  * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
12620  * @vport: Pointer to virtual port.
12621  * @tgt_id: SCSI ID of the target.
12622  * @lun_id: LUN ID of the scsi device.
12623  * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12624  *
12625  * This function returns number of FCP commands pending for the vport.
12626  * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
12627  * commands pending on the vport associated with SCSI device specified
12628  * by tgt_id and lun_id parameters.
12629  * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
12630  * commands pending on the vport associated with SCSI target specified
12631  * by tgt_id parameter.
12632  * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
12633  * commands pending on the vport.
12634  * This function returns the number of iocbs which satisfy the filter.
12635  * This function is called without any lock held.
12636  **/
12637 int
12638 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
12639                   lpfc_ctx_cmd ctx_cmd)
12640 {
12641         struct lpfc_hba *phba = vport->phba;
12642         struct lpfc_iocbq *iocbq;
12643         IOCB_t *icmd = NULL;
12644         int sum, i;
12645         unsigned long iflags;
12646
12647         spin_lock_irqsave(&phba->hbalock, iflags);
12648         for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
12649                 iocbq = phba->sli.iocbq_lookup[i];
12650
12651                 if (!iocbq || iocbq->vport != vport)
12652                         continue;
12653                 if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
12654                     !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ))
12655                         continue;
12656
12657                 /* Include counting outstanding aborts */
12658                 icmd = &iocbq->iocb;
12659                 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
12660                     icmd->ulpCommand == CMD_CLOSE_XRI_CN) {
12661                         sum++;
12662                         continue;
12663                 }
12664
12665                 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12666                                                ctx_cmd) == 0)
12667                         sum++;
12668         }
12669         spin_unlock_irqrestore(&phba->hbalock, iflags);
12670
12671         return sum;
12672 }
12673
12674 /**
12675  * lpfc_sli4_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
12676  * @phba: Pointer to HBA context object
12677  * @cmdiocb: Pointer to command iocb object.
12678  * @wcqe: pointer to the complete wcqe
12679  *
12680  * This function is called when an aborted FCP iocb completes. This
12681  * function is called by the ring event handler with no lock held.
12682  * This function frees the iocb. It is called for sli-4 adapters.
12683  **/
12684 void
12685 lpfc_sli4_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12686                          struct lpfc_wcqe_complete *wcqe)
12687 {
12688         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12689                         "3017 ABORT_XRI_CN completing on rpi x%x "
12690                         "original iotag x%x, abort cmd iotag x%x "
12691                         "status 0x%x, reason 0x%x\n",
12692                         cmdiocb->iocb.un.acxri.abortContextTag,
12693                         cmdiocb->iocb.un.acxri.abortIoTag,
12694                         cmdiocb->iotag,
12695                         (bf_get(lpfc_wcqe_c_status, wcqe)
12696                         & LPFC_IOCB_STATUS_MASK),
12697                         wcqe->parameter);
12698         lpfc_sli_release_iocbq(phba, cmdiocb);
12699 }
12700
12701 /**
12702  * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
12703  * @phba: Pointer to HBA context object
12704  * @cmdiocb: Pointer to command iocb object.
12705  * @rspiocb: Pointer to response iocb object.
12706  *
12707  * This function is called when an aborted FCP iocb completes. This
12708  * function is called by the ring event handler with no lock held.
12709  * This function frees the iocb.
12710  **/
12711 void
12712 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12713                         struct lpfc_iocbq *rspiocb)
12714 {
12715         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12716                         "3096 ABORT_XRI_CN completing on rpi x%x "
12717                         "original iotag x%x, abort cmd iotag x%x "
12718                         "status 0x%x, reason 0x%x\n",
12719                         cmdiocb->iocb.un.acxri.abortContextTag,
12720                         cmdiocb->iocb.un.acxri.abortIoTag,
12721                         cmdiocb->iotag, rspiocb->iocb.ulpStatus,
12722                         rspiocb->iocb.un.ulpWord[4]);
12723         lpfc_sli_release_iocbq(phba, cmdiocb);
12724         return;
12725 }
12726
12727 /**
12728  * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
12729  * @vport: Pointer to virtual port.
12730  * @tgt_id: SCSI ID of the target.
12731  * @lun_id: LUN ID of the scsi device.
12732  * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12733  *
12734  * This function sends an abort command for every SCSI command
12735  * associated with the given virtual port pending on the ring
12736  * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
12737  * lpfc_sli_validate_fcp_iocb function.  The ordering for validation before
12738  * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
12739  * followed by lpfc_sli_validate_fcp_iocb.
12740  *
12741  * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
12742  * FCP iocbs associated with lun specified by tgt_id and lun_id
12743  * parameters
12744  * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
12745  * FCP iocbs associated with SCSI target specified by tgt_id parameter.
12746  * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
12747  * FCP iocbs associated with virtual port.
12748  * The pring used for SLI3 is sli3_ring[LPFC_FCP_RING], for SLI4
12749  * lpfc_sli4_calc_ring is used.
12750  * This function returns number of iocbs it failed to abort.
12751  * This function is called with no locks held.
12752  **/
12753 int
12754 lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
12755                     lpfc_ctx_cmd abort_cmd)
12756 {
12757         struct lpfc_hba *phba = vport->phba;
12758         struct lpfc_sli_ring *pring = NULL;
12759         struct lpfc_iocbq *iocbq;
12760         int errcnt = 0, ret_val = 0;
12761         unsigned long iflags;
12762         int i;
12763         void *fcp_cmpl = NULL;
12764
12765         /* all I/Os are in process of being flushed */
12766         if (phba->hba_flag & HBA_IOQ_FLUSH)
12767                 return errcnt;
12768
12769         for (i = 1; i <= phba->sli.last_iotag; i++) {
12770                 iocbq = phba->sli.iocbq_lookup[i];
12771
12772                 if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
12773                         continue;
12774
12775                 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12776                                                abort_cmd) != 0)
12777                         continue;
12778
12779                 spin_lock_irqsave(&phba->hbalock, iflags);
12780                 if (phba->sli_rev == LPFC_SLI_REV3) {
12781                         pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12782                         fcp_cmpl = lpfc_sli_abort_fcp_cmpl;
12783                 } else if (phba->sli_rev == LPFC_SLI_REV4) {
12784                         pring = lpfc_sli4_calc_ring(phba, iocbq);
12785                         fcp_cmpl = lpfc_sli4_abort_fcp_cmpl;
12786                 }
12787                 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq,
12788                                                      fcp_cmpl);
12789                 spin_unlock_irqrestore(&phba->hbalock, iflags);
12790                 if (ret_val != IOCB_SUCCESS)
12791                         errcnt++;
12792         }
12793
12794         return errcnt;
12795 }
12796
12797 /**
12798  * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
12799  * @vport: Pointer to virtual port.
12800  * @pring: Pointer to driver SLI ring object.
12801  * @tgt_id: SCSI ID of the target.
12802  * @lun_id: LUN ID of the scsi device.
12803  * @cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12804  *
12805  * This function sends an abort command for every SCSI command
12806  * associated with the given virtual port pending on the ring
12807  * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
12808  * lpfc_sli_validate_fcp_iocb function.  The ordering for validation before
12809  * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
12810  * followed by lpfc_sli_validate_fcp_iocb.
12811  *
12812  * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
12813  * FCP iocbs associated with lun specified by tgt_id and lun_id
12814  * parameters
12815  * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
12816  * FCP iocbs associated with SCSI target specified by tgt_id parameter.
12817  * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
12818  * FCP iocbs associated with virtual port.
12819  * This function returns number of iocbs it aborted .
12820  * This function is called with no locks held right after a taskmgmt
12821  * command is sent.
12822  **/
12823 int
12824 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
12825                         uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
12826 {
12827         struct lpfc_hba *phba = vport->phba;
12828         struct lpfc_io_buf *lpfc_cmd;
12829         struct lpfc_iocbq *abtsiocbq;
12830         struct lpfc_nodelist *ndlp;
12831         struct lpfc_iocbq *iocbq;
12832         IOCB_t *icmd;
12833         int sum, i, ret_val;
12834         unsigned long iflags;
12835         struct lpfc_sli_ring *pring_s4 = NULL;
12836
12837         spin_lock_irqsave(&phba->hbalock, iflags);
12838
12839         /* all I/Os are in process of being flushed */
12840         if (phba->hba_flag & HBA_IOQ_FLUSH) {
12841                 spin_unlock_irqrestore(&phba->hbalock, iflags);
12842                 return 0;
12843         }
12844         sum = 0;
12845
12846         for (i = 1; i <= phba->sli.last_iotag; i++) {
12847                 iocbq = phba->sli.iocbq_lookup[i];
12848
12849                 if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
12850                         continue;
12851
12852                 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12853                                                cmd) != 0)
12854                         continue;
12855
12856                 /* Guard against IO completion being called at same time */
12857                 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
12858                 spin_lock(&lpfc_cmd->buf_lock);
12859
12860                 if (!lpfc_cmd->pCmd) {
12861                         spin_unlock(&lpfc_cmd->buf_lock);
12862                         continue;
12863                 }
12864
12865                 if (phba->sli_rev == LPFC_SLI_REV4) {
12866                         pring_s4 =
12867                             phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
12868                         if (!pring_s4) {
12869                                 spin_unlock(&lpfc_cmd->buf_lock);
12870                                 continue;
12871                         }
12872                         /* Note: both hbalock and ring_lock must be set here */
12873                         spin_lock(&pring_s4->ring_lock);
12874                 }
12875
12876                 /*
12877                  * If the iocbq is already being aborted, don't take a second
12878                  * action, but do count it.
12879                  */
12880                 if ((iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
12881                     !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
12882                         if (phba->sli_rev == LPFC_SLI_REV4)
12883                                 spin_unlock(&pring_s4->ring_lock);
12884                         spin_unlock(&lpfc_cmd->buf_lock);
12885                         continue;
12886                 }
12887
12888                 /* issue ABTS for this IOCB based on iotag */
12889                 abtsiocbq = __lpfc_sli_get_iocbq(phba);
12890                 if (!abtsiocbq) {
12891                         if (phba->sli_rev == LPFC_SLI_REV4)
12892                                 spin_unlock(&pring_s4->ring_lock);
12893                         spin_unlock(&lpfc_cmd->buf_lock);
12894                         continue;
12895                 }
12896
12897                 icmd = &iocbq->iocb;
12898                 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
12899                 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
12900                 if (phba->sli_rev == LPFC_SLI_REV4)
12901                         abtsiocbq->iocb.un.acxri.abortIoTag =
12902                                                          iocbq->sli4_xritag;
12903                 else
12904                         abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
12905                 abtsiocbq->iocb.ulpLe = 1;
12906                 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
12907                 abtsiocbq->vport = vport;
12908
12909                 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
12910                 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
12911                 if (iocbq->cmd_flag & LPFC_IO_FCP)
12912                         abtsiocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
12913                 if (iocbq->cmd_flag & LPFC_IO_FOF)
12914                         abtsiocbq->cmd_flag |= LPFC_IO_FOF;
12915
12916                 ndlp = lpfc_cmd->rdata->pnode;
12917
12918                 if (lpfc_is_link_up(phba) &&
12919                     (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
12920                         abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
12921                 else
12922                         abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
12923
12924                 /* Setup callback routine and issue the command. */
12925                 abtsiocbq->cmd_cmpl = lpfc_sli_abort_fcp_cmpl;
12926
12927                 /*
12928                  * Indicate the IO is being aborted by the driver and set
12929                  * the caller's flag into the aborted IO.
12930                  */
12931                 iocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
12932
12933                 if (phba->sli_rev == LPFC_SLI_REV4) {
12934                         ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
12935                                                         abtsiocbq, 0);
12936                         spin_unlock(&pring_s4->ring_lock);
12937                 } else {
12938                         ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
12939                                                         abtsiocbq, 0);
12940                 }
12941
12942                 spin_unlock(&lpfc_cmd->buf_lock);
12943
12944                 if (ret_val == IOCB_ERROR)
12945                         __lpfc_sli_release_iocbq(phba, abtsiocbq);
12946                 else
12947                         sum++;
12948         }
12949         spin_unlock_irqrestore(&phba->hbalock, iflags);
12950         return sum;
12951 }
12952
12953 /**
12954  * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
12955  * @phba: Pointer to HBA context object.
12956  * @cmdiocbq: Pointer to command iocb.
12957  * @rspiocbq: Pointer to response iocb.
12958  *
12959  * This function is the completion handler for iocbs issued using
12960  * lpfc_sli_issue_iocb_wait function. This function is called by the
12961  * ring event handler function without any lock held. This function
12962  * can be called from both worker thread context and interrupt
12963  * context. This function also can be called from other thread which
12964  * cleans up the SLI layer objects.
12965  * This function copy the contents of the response iocb to the
12966  * response iocb memory object provided by the caller of
12967  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
12968  * sleeps for the iocb completion.
12969  **/
12970 static void
12971 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
12972                         struct lpfc_iocbq *cmdiocbq,
12973                         struct lpfc_iocbq *rspiocbq)
12974 {
12975         wait_queue_head_t *pdone_q;
12976         unsigned long iflags;
12977         struct lpfc_io_buf *lpfc_cmd;
12978         size_t offset = offsetof(struct lpfc_iocbq, wqe);
12979
12980         spin_lock_irqsave(&phba->hbalock, iflags);
12981         if (cmdiocbq->cmd_flag & LPFC_IO_WAKE_TMO) {
12982
12983                 /*
12984                  * A time out has occurred for the iocb.  If a time out
12985                  * completion handler has been supplied, call it.  Otherwise,
12986                  * just free the iocbq.
12987                  */
12988
12989                 spin_unlock_irqrestore(&phba->hbalock, iflags);
12990                 cmdiocbq->cmd_cmpl = cmdiocbq->wait_cmd_cmpl;
12991                 cmdiocbq->wait_cmd_cmpl = NULL;
12992                 if (cmdiocbq->cmd_cmpl)
12993                         cmdiocbq->cmd_cmpl(phba, cmdiocbq, NULL);
12994                 else
12995                         lpfc_sli_release_iocbq(phba, cmdiocbq);
12996                 return;
12997         }
12998
12999         /* Copy the contents of the local rspiocb into the caller's buffer. */
13000         cmdiocbq->cmd_flag |= LPFC_IO_WAKE;
13001         if (cmdiocbq->context2 && rspiocbq)
13002                 memcpy((char *)cmdiocbq->context2 + offset,
13003                        (char *)rspiocbq + offset, sizeof(*rspiocbq) - offset);
13004
13005         /* Set the exchange busy flag for task management commands */
13006         if ((cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
13007             !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) {
13008                 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
13009                                         cur_iocbq);
13010                 if (rspiocbq && (rspiocbq->cmd_flag & LPFC_EXCHANGE_BUSY))
13011                         lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
13012                 else
13013                         lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
13014         }
13015
13016         pdone_q = cmdiocbq->context_un.wait_queue;
13017         if (pdone_q)
13018                 wake_up(pdone_q);
13019         spin_unlock_irqrestore(&phba->hbalock, iflags);
13020         return;
13021 }
13022
13023 /**
13024  * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
13025  * @phba: Pointer to HBA context object..
13026  * @piocbq: Pointer to command iocb.
13027  * @flag: Flag to test.
13028  *
13029  * This routine grabs the hbalock and then test the cmd_flag to
13030  * see if the passed in flag is set.
13031  * Returns:
13032  * 1 if flag is set.
13033  * 0 if flag is not set.
13034  **/
13035 static int
13036 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
13037                  struct lpfc_iocbq *piocbq, uint32_t flag)
13038 {
13039         unsigned long iflags;
13040         int ret;
13041
13042         spin_lock_irqsave(&phba->hbalock, iflags);
13043         ret = piocbq->cmd_flag & flag;
13044         spin_unlock_irqrestore(&phba->hbalock, iflags);
13045         return ret;
13046
13047 }
13048
13049 /**
13050  * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
13051  * @phba: Pointer to HBA context object..
13052  * @ring_number: Ring number
13053  * @piocb: Pointer to command iocb.
13054  * @prspiocbq: Pointer to response iocb.
13055  * @timeout: Timeout in number of seconds.
13056  *
13057  * This function issues the iocb to firmware and waits for the
13058  * iocb to complete. The cmd_cmpl field of the shall be used
13059  * to handle iocbs which time out. If the field is NULL, the
13060  * function shall free the iocbq structure.  If more clean up is
13061  * needed, the caller is expected to provide a completion function
13062  * that will provide the needed clean up.  If the iocb command is
13063  * not completed within timeout seconds, the function will either
13064  * free the iocbq structure (if cmd_cmpl == NULL) or execute the
13065  * completion function set in the cmd_cmpl field and then return
13066  * a status of IOCB_TIMEDOUT.  The caller should not free the iocb
13067  * resources if this function returns IOCB_TIMEDOUT.
13068  * The function waits for the iocb completion using an
13069  * non-interruptible wait.
13070  * This function will sleep while waiting for iocb completion.
13071  * So, this function should not be called from any context which
13072  * does not allow sleeping. Due to the same reason, this function
13073  * cannot be called with interrupt disabled.
13074  * This function assumes that the iocb completions occur while
13075  * this function sleep. So, this function cannot be called from
13076  * the thread which process iocb completion for this ring.
13077  * This function clears the cmd_flag of the iocb object before
13078  * issuing the iocb and the iocb completion handler sets this
13079  * flag and wakes this thread when the iocb completes.
13080  * The contents of the response iocb will be copied to prspiocbq
13081  * by the completion handler when the command completes.
13082  * This function returns IOCB_SUCCESS when success.
13083  * This function is called with no lock held.
13084  **/
13085 int
13086 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
13087                          uint32_t ring_number,
13088                          struct lpfc_iocbq *piocb,
13089                          struct lpfc_iocbq *prspiocbq,
13090                          uint32_t timeout)
13091 {
13092         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
13093         long timeleft, timeout_req = 0;
13094         int retval = IOCB_SUCCESS;
13095         uint32_t creg_val;
13096         struct lpfc_iocbq *iocb;
13097         int txq_cnt = 0;
13098         int txcmplq_cnt = 0;
13099         struct lpfc_sli_ring *pring;
13100         unsigned long iflags;
13101         bool iocb_completed = true;
13102
13103         if (phba->sli_rev >= LPFC_SLI_REV4)
13104                 pring = lpfc_sli4_calc_ring(phba, piocb);
13105         else
13106                 pring = &phba->sli.sli3_ring[ring_number];
13107         /*
13108          * If the caller has provided a response iocbq buffer, then context2
13109          * is NULL or its an error.
13110          */
13111         if (prspiocbq) {
13112                 if (piocb->context2)
13113                         return IOCB_ERROR;
13114                 piocb->context2 = prspiocbq;
13115         }
13116
13117         piocb->wait_cmd_cmpl = piocb->cmd_cmpl;
13118         piocb->cmd_cmpl = lpfc_sli_wake_iocb_wait;
13119         piocb->context_un.wait_queue = &done_q;
13120         piocb->cmd_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
13121
13122         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
13123                 if (lpfc_readl(phba->HCregaddr, &creg_val))
13124                         return IOCB_ERROR;
13125                 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
13126                 writel(creg_val, phba->HCregaddr);
13127                 readl(phba->HCregaddr); /* flush */
13128         }
13129
13130         retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
13131                                      SLI_IOCB_RET_IOCB);
13132         if (retval == IOCB_SUCCESS) {
13133                 timeout_req = msecs_to_jiffies(timeout * 1000);
13134                 timeleft = wait_event_timeout(done_q,
13135                                 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
13136                                 timeout_req);
13137                 spin_lock_irqsave(&phba->hbalock, iflags);
13138                 if (!(piocb->cmd_flag & LPFC_IO_WAKE)) {
13139
13140                         /*
13141                          * IOCB timed out.  Inform the wake iocb wait
13142                          * completion function and set local status
13143                          */
13144
13145                         iocb_completed = false;
13146                         piocb->cmd_flag |= LPFC_IO_WAKE_TMO;
13147                 }
13148                 spin_unlock_irqrestore(&phba->hbalock, iflags);
13149                 if (iocb_completed) {
13150                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13151                                         "0331 IOCB wake signaled\n");
13152                         /* Note: we are not indicating if the IOCB has a success
13153                          * status or not - that's for the caller to check.
13154                          * IOCB_SUCCESS means just that the command was sent and
13155                          * completed. Not that it completed successfully.
13156                          * */
13157                 } else if (timeleft == 0) {
13158                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13159                                         "0338 IOCB wait timeout error - no "
13160                                         "wake response Data x%x\n", timeout);
13161                         retval = IOCB_TIMEDOUT;
13162                 } else {
13163                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13164                                         "0330 IOCB wake NOT set, "
13165                                         "Data x%x x%lx\n",
13166                                         timeout, (timeleft / jiffies));
13167                         retval = IOCB_TIMEDOUT;
13168                 }
13169         } else if (retval == IOCB_BUSY) {
13170                 if (phba->cfg_log_verbose & LOG_SLI) {
13171                         list_for_each_entry(iocb, &pring->txq, list) {
13172                                 txq_cnt++;
13173                         }
13174                         list_for_each_entry(iocb, &pring->txcmplq, list) {
13175                                 txcmplq_cnt++;
13176                         }
13177                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13178                                 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
13179                                 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
13180                 }
13181                 return retval;
13182         } else {
13183                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13184                                 "0332 IOCB wait issue failed, Data x%x\n",
13185                                 retval);
13186                 retval = IOCB_ERROR;
13187         }
13188
13189         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
13190                 if (lpfc_readl(phba->HCregaddr, &creg_val))
13191                         return IOCB_ERROR;
13192                 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
13193                 writel(creg_val, phba->HCregaddr);
13194                 readl(phba->HCregaddr); /* flush */
13195         }
13196
13197         if (prspiocbq)
13198                 piocb->context2 = NULL;
13199
13200         piocb->context_un.wait_queue = NULL;
13201         piocb->cmd_cmpl = NULL;
13202         return retval;
13203 }
13204
13205 /**
13206  * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
13207  * @phba: Pointer to HBA context object.
13208  * @pmboxq: Pointer to driver mailbox object.
13209  * @timeout: Timeout in number of seconds.
13210  *
13211  * This function issues the mailbox to firmware and waits for the
13212  * mailbox command to complete. If the mailbox command is not
13213  * completed within timeout seconds, it returns MBX_TIMEOUT.
13214  * The function waits for the mailbox completion using an
13215  * interruptible wait. If the thread is woken up due to a
13216  * signal, MBX_TIMEOUT error is returned to the caller. Caller
13217  * should not free the mailbox resources, if this function returns
13218  * MBX_TIMEOUT.
13219  * This function will sleep while waiting for mailbox completion.
13220  * So, this function should not be called from any context which
13221  * does not allow sleeping. Due to the same reason, this function
13222  * cannot be called with interrupt disabled.
13223  * This function assumes that the mailbox completion occurs while
13224  * this function sleep. So, this function cannot be called from
13225  * the worker thread which processes mailbox completion.
13226  * This function is called in the context of HBA management
13227  * applications.
13228  * This function returns MBX_SUCCESS when successful.
13229  * This function is called with no lock held.
13230  **/
13231 int
13232 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
13233                          uint32_t timeout)
13234 {
13235         struct completion mbox_done;
13236         int retval;
13237         unsigned long flag;
13238
13239         pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
13240         /* setup wake call as IOCB callback */
13241         pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
13242
13243         /* setup context3 field to pass wait_queue pointer to wake function  */
13244         init_completion(&mbox_done);
13245         pmboxq->context3 = &mbox_done;
13246         /* now issue the command */
13247         retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
13248         if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
13249                 wait_for_completion_timeout(&mbox_done,
13250                                             msecs_to_jiffies(timeout * 1000));
13251
13252                 spin_lock_irqsave(&phba->hbalock, flag);
13253                 pmboxq->context3 = NULL;
13254                 /*
13255                  * if LPFC_MBX_WAKE flag is set the mailbox is completed
13256                  * else do not free the resources.
13257                  */
13258                 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
13259                         retval = MBX_SUCCESS;
13260                 } else {
13261                         retval = MBX_TIMEOUT;
13262                         pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13263                 }
13264                 spin_unlock_irqrestore(&phba->hbalock, flag);
13265         }
13266         return retval;
13267 }
13268
13269 /**
13270  * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
13271  * @phba: Pointer to HBA context.
13272  * @mbx_action: Mailbox shutdown options.
13273  *
13274  * This function is called to shutdown the driver's mailbox sub-system.
13275  * It first marks the mailbox sub-system is in a block state to prevent
13276  * the asynchronous mailbox command from issued off the pending mailbox
13277  * command queue. If the mailbox command sub-system shutdown is due to
13278  * HBA error conditions such as EEH or ERATT, this routine shall invoke
13279  * the mailbox sub-system flush routine to forcefully bring down the
13280  * mailbox sub-system. Otherwise, if it is due to normal condition (such
13281  * as with offline or HBA function reset), this routine will wait for the
13282  * outstanding mailbox command to complete before invoking the mailbox
13283  * sub-system flush routine to gracefully bring down mailbox sub-system.
13284  **/
13285 void
13286 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
13287 {
13288         struct lpfc_sli *psli = &phba->sli;
13289         unsigned long timeout;
13290
13291         if (mbx_action == LPFC_MBX_NO_WAIT) {
13292                 /* delay 100ms for port state */
13293                 msleep(100);
13294                 lpfc_sli_mbox_sys_flush(phba);
13295                 return;
13296         }
13297         timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
13298
13299         /* Disable softirqs, including timers from obtaining phba->hbalock */
13300         local_bh_disable();
13301
13302         spin_lock_irq(&phba->hbalock);
13303         psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
13304
13305         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
13306                 /* Determine how long we might wait for the active mailbox
13307                  * command to be gracefully completed by firmware.
13308                  */
13309                 if (phba->sli.mbox_active)
13310                         timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
13311                                                 phba->sli.mbox_active) *
13312                                                 1000) + jiffies;
13313                 spin_unlock_irq(&phba->hbalock);
13314
13315                 /* Enable softirqs again, done with phba->hbalock */
13316                 local_bh_enable();
13317
13318                 while (phba->sli.mbox_active) {
13319                         /* Check active mailbox complete status every 2ms */
13320                         msleep(2);
13321                         if (time_after(jiffies, timeout))
13322                                 /* Timeout, let the mailbox flush routine to
13323                                  * forcefully release active mailbox command
13324                                  */
13325                                 break;
13326                 }
13327         } else {
13328                 spin_unlock_irq(&phba->hbalock);
13329
13330                 /* Enable softirqs again, done with phba->hbalock */
13331                 local_bh_enable();
13332         }
13333
13334         lpfc_sli_mbox_sys_flush(phba);
13335 }
13336
13337 /**
13338  * lpfc_sli_eratt_read - read sli-3 error attention events
13339  * @phba: Pointer to HBA context.
13340  *
13341  * This function is called to read the SLI3 device error attention registers
13342  * for possible error attention events. The caller must hold the hostlock
13343  * with spin_lock_irq().
13344  *
13345  * This function returns 1 when there is Error Attention in the Host Attention
13346  * Register and returns 0 otherwise.
13347  **/
13348 static int
13349 lpfc_sli_eratt_read(struct lpfc_hba *phba)
13350 {
13351         uint32_t ha_copy;
13352
13353         /* Read chip Host Attention (HA) register */
13354         if (lpfc_readl(phba->HAregaddr, &ha_copy))
13355                 goto unplug_err;
13356
13357         if (ha_copy & HA_ERATT) {
13358                 /* Read host status register to retrieve error event */
13359                 if (lpfc_sli_read_hs(phba))
13360                         goto unplug_err;
13361
13362                 /* Check if there is a deferred error condition is active */
13363                 if ((HS_FFER1 & phba->work_hs) &&
13364                     ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
13365                       HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
13366                         phba->hba_flag |= DEFER_ERATT;
13367                         /* Clear all interrupt enable conditions */
13368                         writel(0, phba->HCregaddr);
13369                         readl(phba->HCregaddr);
13370                 }
13371
13372                 /* Set the driver HA work bitmap */
13373                 phba->work_ha |= HA_ERATT;
13374                 /* Indicate polling handles this ERATT */
13375                 phba->hba_flag |= HBA_ERATT_HANDLED;
13376                 return 1;
13377         }
13378         return 0;
13379
13380 unplug_err:
13381         /* Set the driver HS work bitmap */
13382         phba->work_hs |= UNPLUG_ERR;
13383         /* Set the driver HA work bitmap */
13384         phba->work_ha |= HA_ERATT;
13385         /* Indicate polling handles this ERATT */
13386         phba->hba_flag |= HBA_ERATT_HANDLED;
13387         return 1;
13388 }
13389
13390 /**
13391  * lpfc_sli4_eratt_read - read sli-4 error attention events
13392  * @phba: Pointer to HBA context.
13393  *
13394  * This function is called to read the SLI4 device error attention registers
13395  * for possible error attention events. The caller must hold the hostlock
13396  * with spin_lock_irq().
13397  *
13398  * This function returns 1 when there is Error Attention in the Host Attention
13399  * Register and returns 0 otherwise.
13400  **/
13401 static int
13402 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
13403 {
13404         uint32_t uerr_sta_hi, uerr_sta_lo;
13405         uint32_t if_type, portsmphr;
13406         struct lpfc_register portstat_reg;
13407         u32 logmask;
13408
13409         /*
13410          * For now, use the SLI4 device internal unrecoverable error
13411          * registers for error attention. This can be changed later.
13412          */
13413         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
13414         switch (if_type) {
13415         case LPFC_SLI_INTF_IF_TYPE_0:
13416                 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
13417                         &uerr_sta_lo) ||
13418                         lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
13419                         &uerr_sta_hi)) {
13420                         phba->work_hs |= UNPLUG_ERR;
13421                         phba->work_ha |= HA_ERATT;
13422                         phba->hba_flag |= HBA_ERATT_HANDLED;
13423                         return 1;
13424                 }
13425                 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
13426                     (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
13427                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13428                                         "1423 HBA Unrecoverable error: "
13429                                         "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
13430                                         "ue_mask_lo_reg=0x%x, "
13431                                         "ue_mask_hi_reg=0x%x\n",
13432                                         uerr_sta_lo, uerr_sta_hi,
13433                                         phba->sli4_hba.ue_mask_lo,
13434                                         phba->sli4_hba.ue_mask_hi);
13435                         phba->work_status[0] = uerr_sta_lo;
13436                         phba->work_status[1] = uerr_sta_hi;
13437                         phba->work_ha |= HA_ERATT;
13438                         phba->hba_flag |= HBA_ERATT_HANDLED;
13439                         return 1;
13440                 }
13441                 break;
13442         case LPFC_SLI_INTF_IF_TYPE_2:
13443         case LPFC_SLI_INTF_IF_TYPE_6:
13444                 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
13445                         &portstat_reg.word0) ||
13446                         lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
13447                         &portsmphr)){
13448                         phba->work_hs |= UNPLUG_ERR;
13449                         phba->work_ha |= HA_ERATT;
13450                         phba->hba_flag |= HBA_ERATT_HANDLED;
13451                         return 1;
13452                 }
13453                 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
13454                         phba->work_status[0] =
13455                                 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
13456                         phba->work_status[1] =
13457                                 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
13458                         logmask = LOG_TRACE_EVENT;
13459                         if (phba->work_status[0] ==
13460                                 SLIPORT_ERR1_REG_ERR_CODE_2 &&
13461                             phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART)
13462                                 logmask = LOG_SLI;
13463                         lpfc_printf_log(phba, KERN_ERR, logmask,
13464                                         "2885 Port Status Event: "
13465                                         "port status reg 0x%x, "
13466                                         "port smphr reg 0x%x, "
13467                                         "error 1=0x%x, error 2=0x%x\n",
13468                                         portstat_reg.word0,
13469                                         portsmphr,
13470                                         phba->work_status[0],
13471                                         phba->work_status[1]);
13472                         phba->work_ha |= HA_ERATT;
13473                         phba->hba_flag |= HBA_ERATT_HANDLED;
13474                         return 1;
13475                 }
13476                 break;
13477         case LPFC_SLI_INTF_IF_TYPE_1:
13478         default:
13479                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13480                                 "2886 HBA Error Attention on unsupported "
13481                                 "if type %d.", if_type);
13482                 return 1;
13483         }
13484
13485         return 0;
13486 }
13487
13488 /**
13489  * lpfc_sli_check_eratt - check error attention events
13490  * @phba: Pointer to HBA context.
13491  *
13492  * This function is called from timer soft interrupt context to check HBA's
13493  * error attention register bit for error attention events.
13494  *
13495  * This function returns 1 when there is Error Attention in the Host Attention
13496  * Register and returns 0 otherwise.
13497  **/
13498 int
13499 lpfc_sli_check_eratt(struct lpfc_hba *phba)
13500 {
13501         uint32_t ha_copy;
13502
13503         /* If somebody is waiting to handle an eratt, don't process it
13504          * here. The brdkill function will do this.
13505          */
13506         if (phba->link_flag & LS_IGNORE_ERATT)
13507                 return 0;
13508
13509         /* Check if interrupt handler handles this ERATT */
13510         spin_lock_irq(&phba->hbalock);
13511         if (phba->hba_flag & HBA_ERATT_HANDLED) {
13512                 /* Interrupt handler has handled ERATT */
13513                 spin_unlock_irq(&phba->hbalock);
13514                 return 0;
13515         }
13516
13517         /*
13518          * If there is deferred error attention, do not check for error
13519          * attention
13520          */
13521         if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13522                 spin_unlock_irq(&phba->hbalock);
13523                 return 0;
13524         }
13525
13526         /* If PCI channel is offline, don't process it */
13527         if (unlikely(pci_channel_offline(phba->pcidev))) {
13528                 spin_unlock_irq(&phba->hbalock);
13529                 return 0;
13530         }
13531
13532         switch (phba->sli_rev) {
13533         case LPFC_SLI_REV2:
13534         case LPFC_SLI_REV3:
13535                 /* Read chip Host Attention (HA) register */
13536                 ha_copy = lpfc_sli_eratt_read(phba);
13537                 break;
13538         case LPFC_SLI_REV4:
13539                 /* Read device Uncoverable Error (UERR) registers */
13540                 ha_copy = lpfc_sli4_eratt_read(phba);
13541                 break;
13542         default:
13543                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13544                                 "0299 Invalid SLI revision (%d)\n",
13545                                 phba->sli_rev);
13546                 ha_copy = 0;
13547                 break;
13548         }
13549         spin_unlock_irq(&phba->hbalock);
13550
13551         return ha_copy;
13552 }
13553
13554 /**
13555  * lpfc_intr_state_check - Check device state for interrupt handling
13556  * @phba: Pointer to HBA context.
13557  *
13558  * This inline routine checks whether a device or its PCI slot is in a state
13559  * that the interrupt should be handled.
13560  *
13561  * This function returns 0 if the device or the PCI slot is in a state that
13562  * interrupt should be handled, otherwise -EIO.
13563  */
13564 static inline int
13565 lpfc_intr_state_check(struct lpfc_hba *phba)
13566 {
13567         /* If the pci channel is offline, ignore all the interrupts */
13568         if (unlikely(pci_channel_offline(phba->pcidev)))
13569                 return -EIO;
13570
13571         /* Update device level interrupt statistics */
13572         phba->sli.slistat.sli_intr++;
13573
13574         /* Ignore all interrupts during initialization. */
13575         if (unlikely(phba->link_state < LPFC_LINK_DOWN))
13576                 return -EIO;
13577
13578         return 0;
13579 }
13580
13581 /**
13582  * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
13583  * @irq: Interrupt number.
13584  * @dev_id: The device context pointer.
13585  *
13586  * This function is directly called from the PCI layer as an interrupt
13587  * service routine when device with SLI-3 interface spec is enabled with
13588  * MSI-X multi-message interrupt mode and there are slow-path events in
13589  * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
13590  * interrupt mode, this function is called as part of the device-level
13591  * interrupt handler. When the PCI slot is in error recovery or the HBA
13592  * is undergoing initialization, the interrupt handler will not process
13593  * the interrupt. The link attention and ELS ring attention events are
13594  * handled by the worker thread. The interrupt handler signals the worker
13595  * thread and returns for these events. This function is called without
13596  * any lock held. It gets the hbalock to access and update SLI data
13597  * structures.
13598  *
13599  * This function returns IRQ_HANDLED when interrupt is handled else it
13600  * returns IRQ_NONE.
13601  **/
13602 irqreturn_t
13603 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
13604 {
13605         struct lpfc_hba  *phba;
13606         uint32_t ha_copy, hc_copy;
13607         uint32_t work_ha_copy;
13608         unsigned long status;
13609         unsigned long iflag;
13610         uint32_t control;
13611
13612         MAILBOX_t *mbox, *pmbox;
13613         struct lpfc_vport *vport;
13614         struct lpfc_nodelist *ndlp;
13615         struct lpfc_dmabuf *mp;
13616         LPFC_MBOXQ_t *pmb;
13617         int rc;
13618
13619         /*
13620          * Get the driver's phba structure from the dev_id and
13621          * assume the HBA is not interrupting.
13622          */
13623         phba = (struct lpfc_hba *)dev_id;
13624
13625         if (unlikely(!phba))
13626                 return IRQ_NONE;
13627
13628         /*
13629          * Stuff needs to be attented to when this function is invoked as an
13630          * individual interrupt handler in MSI-X multi-message interrupt mode
13631          */
13632         if (phba->intr_type == MSIX) {
13633                 /* Check device state for handling interrupt */
13634                 if (lpfc_intr_state_check(phba))
13635                         return IRQ_NONE;
13636                 /* Need to read HA REG for slow-path events */
13637                 spin_lock_irqsave(&phba->hbalock, iflag);
13638                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
13639                         goto unplug_error;
13640                 /* If somebody is waiting to handle an eratt don't process it
13641                  * here. The brdkill function will do this.
13642                  */
13643                 if (phba->link_flag & LS_IGNORE_ERATT)
13644                         ha_copy &= ~HA_ERATT;
13645                 /* Check the need for handling ERATT in interrupt handler */
13646                 if (ha_copy & HA_ERATT) {
13647                         if (phba->hba_flag & HBA_ERATT_HANDLED)
13648                                 /* ERATT polling has handled ERATT */
13649                                 ha_copy &= ~HA_ERATT;
13650                         else
13651                                 /* Indicate interrupt handler handles ERATT */
13652                                 phba->hba_flag |= HBA_ERATT_HANDLED;
13653                 }
13654
13655                 /*
13656                  * If there is deferred error attention, do not check for any
13657                  * interrupt.
13658                  */
13659                 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13660                         spin_unlock_irqrestore(&phba->hbalock, iflag);
13661                         return IRQ_NONE;
13662                 }
13663
13664                 /* Clear up only attention source related to slow-path */
13665                 if (lpfc_readl(phba->HCregaddr, &hc_copy))
13666                         goto unplug_error;
13667
13668                 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
13669                         HC_LAINT_ENA | HC_ERINT_ENA),
13670                         phba->HCregaddr);
13671                 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
13672                         phba->HAregaddr);
13673                 writel(hc_copy, phba->HCregaddr);
13674                 readl(phba->HAregaddr); /* flush */
13675                 spin_unlock_irqrestore(&phba->hbalock, iflag);
13676         } else
13677                 ha_copy = phba->ha_copy;
13678
13679         work_ha_copy = ha_copy & phba->work_ha_mask;
13680
13681         if (work_ha_copy) {
13682                 if (work_ha_copy & HA_LATT) {
13683                         if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
13684                                 /*
13685                                  * Turn off Link Attention interrupts
13686                                  * until CLEAR_LA done
13687                                  */
13688                                 spin_lock_irqsave(&phba->hbalock, iflag);
13689                                 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
13690                                 if (lpfc_readl(phba->HCregaddr, &control))
13691                                         goto unplug_error;
13692                                 control &= ~HC_LAINT_ENA;
13693                                 writel(control, phba->HCregaddr);
13694                                 readl(phba->HCregaddr); /* flush */
13695                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
13696                         }
13697                         else
13698                                 work_ha_copy &= ~HA_LATT;
13699                 }
13700
13701                 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
13702                         /*
13703                          * Turn off Slow Rings interrupts, LPFC_ELS_RING is
13704                          * the only slow ring.
13705                          */
13706                         status = (work_ha_copy &
13707                                 (HA_RXMASK  << (4*LPFC_ELS_RING)));
13708                         status >>= (4*LPFC_ELS_RING);
13709                         if (status & HA_RXMASK) {
13710                                 spin_lock_irqsave(&phba->hbalock, iflag);
13711                                 if (lpfc_readl(phba->HCregaddr, &control))
13712                                         goto unplug_error;
13713
13714                                 lpfc_debugfs_slow_ring_trc(phba,
13715                                 "ISR slow ring:   ctl:x%x stat:x%x isrcnt:x%x",
13716                                 control, status,
13717                                 (uint32_t)phba->sli.slistat.sli_intr);
13718
13719                                 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
13720                                         lpfc_debugfs_slow_ring_trc(phba,
13721                                                 "ISR Disable ring:"
13722                                                 "pwork:x%x hawork:x%x wait:x%x",
13723                                                 phba->work_ha, work_ha_copy,
13724                                                 (uint32_t)((unsigned long)
13725                                                 &phba->work_waitq));
13726
13727                                         control &=
13728                                             ~(HC_R0INT_ENA << LPFC_ELS_RING);
13729                                         writel(control, phba->HCregaddr);
13730                                         readl(phba->HCregaddr); /* flush */
13731                                 }
13732                                 else {
13733                                         lpfc_debugfs_slow_ring_trc(phba,
13734                                                 "ISR slow ring:   pwork:"
13735                                                 "x%x hawork:x%x wait:x%x",
13736                                                 phba->work_ha, work_ha_copy,
13737                                                 (uint32_t)((unsigned long)
13738                                                 &phba->work_waitq));
13739                                 }
13740                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
13741                         }
13742                 }
13743                 spin_lock_irqsave(&phba->hbalock, iflag);
13744                 if (work_ha_copy & HA_ERATT) {
13745                         if (lpfc_sli_read_hs(phba))
13746                                 goto unplug_error;
13747                         /*
13748                          * Check if there is a deferred error condition
13749                          * is active
13750                          */
13751                         if ((HS_FFER1 & phba->work_hs) &&
13752                                 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
13753                                   HS_FFER6 | HS_FFER7 | HS_FFER8) &
13754                                   phba->work_hs)) {
13755                                 phba->hba_flag |= DEFER_ERATT;
13756                                 /* Clear all interrupt enable conditions */
13757                                 writel(0, phba->HCregaddr);
13758                                 readl(phba->HCregaddr);
13759                         }
13760                 }
13761
13762                 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
13763                         pmb = phba->sli.mbox_active;
13764                         pmbox = &pmb->u.mb;
13765                         mbox = phba->mbox;
13766                         vport = pmb->vport;
13767
13768                         /* First check out the status word */
13769                         lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
13770                         if (pmbox->mbxOwner != OWN_HOST) {
13771                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
13772                                 /*
13773                                  * Stray Mailbox Interrupt, mbxCommand <cmd>
13774                                  * mbxStatus <status>
13775                                  */
13776                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13777                                                 "(%d):0304 Stray Mailbox "
13778                                                 "Interrupt mbxCommand x%x "
13779                                                 "mbxStatus x%x\n",
13780                                                 (vport ? vport->vpi : 0),
13781                                                 pmbox->mbxCommand,
13782                                                 pmbox->mbxStatus);
13783                                 /* clear mailbox attention bit */
13784                                 work_ha_copy &= ~HA_MBATT;
13785                         } else {
13786                                 phba->sli.mbox_active = NULL;
13787                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
13788                                 phba->last_completion_time = jiffies;
13789                                 del_timer(&phba->sli.mbox_tmo);
13790                                 if (pmb->mbox_cmpl) {
13791                                         lpfc_sli_pcimem_bcopy(mbox, pmbox,
13792                                                         MAILBOX_CMD_SIZE);
13793                                         if (pmb->out_ext_byte_len &&
13794                                                 pmb->ctx_buf)
13795                                                 lpfc_sli_pcimem_bcopy(
13796                                                 phba->mbox_ext,
13797                                                 pmb->ctx_buf,
13798                                                 pmb->out_ext_byte_len);
13799                                 }
13800                                 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13801                                         pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13802
13803                                         lpfc_debugfs_disc_trc(vport,
13804                                                 LPFC_DISC_TRC_MBOX_VPORT,
13805                                                 "MBOX dflt rpi: : "
13806                                                 "status:x%x rpi:x%x",
13807                                                 (uint32_t)pmbox->mbxStatus,
13808                                                 pmbox->un.varWords[0], 0);
13809
13810                                         if (!pmbox->mbxStatus) {
13811                                                 mp = (struct lpfc_dmabuf *)
13812                                                         (pmb->ctx_buf);
13813                                                 ndlp = (struct lpfc_nodelist *)
13814                                                         pmb->ctx_ndlp;
13815
13816                                                 /* Reg_LOGIN of dflt RPI was
13817                                                  * successful. new lets get
13818                                                  * rid of the RPI using the
13819                                                  * same mbox buffer.
13820                                                  */
13821                                                 lpfc_unreg_login(phba,
13822                                                         vport->vpi,
13823                                                         pmbox->un.varWords[0],
13824                                                         pmb);
13825                                                 pmb->mbox_cmpl =
13826                                                         lpfc_mbx_cmpl_dflt_rpi;
13827                                                 pmb->ctx_buf = mp;
13828                                                 pmb->ctx_ndlp = ndlp;
13829                                                 pmb->vport = vport;
13830                                                 rc = lpfc_sli_issue_mbox(phba,
13831                                                                 pmb,
13832                                                                 MBX_NOWAIT);
13833                                                 if (rc != MBX_BUSY)
13834                                                         lpfc_printf_log(phba,
13835                                                         KERN_ERR,
13836                                                         LOG_TRACE_EVENT,
13837                                                         "0350 rc should have"
13838                                                         "been MBX_BUSY\n");
13839                                                 if (rc != MBX_NOT_FINISHED)
13840                                                         goto send_current_mbox;
13841                                         }
13842                                 }
13843                                 spin_lock_irqsave(
13844                                                 &phba->pport->work_port_lock,
13845                                                 iflag);
13846                                 phba->pport->work_port_events &=
13847                                         ~WORKER_MBOX_TMO;
13848                                 spin_unlock_irqrestore(
13849                                                 &phba->pport->work_port_lock,
13850                                                 iflag);
13851
13852                                 /* Do NOT queue MBX_HEARTBEAT to the worker
13853                                  * thread for processing.
13854                                  */
13855                                 if (pmbox->mbxCommand == MBX_HEARTBEAT) {
13856                                         /* Process mbox now */
13857                                         phba->sli.mbox_active = NULL;
13858                                         phba->sli.sli_flag &=
13859                                                 ~LPFC_SLI_MBOX_ACTIVE;
13860                                         if (pmb->mbox_cmpl)
13861                                                 pmb->mbox_cmpl(phba, pmb);
13862                                 } else {
13863                                         /* Queue to worker thread to process */
13864                                         lpfc_mbox_cmpl_put(phba, pmb);
13865                                 }
13866                         }
13867                 } else
13868                         spin_unlock_irqrestore(&phba->hbalock, iflag);
13869
13870                 if ((work_ha_copy & HA_MBATT) &&
13871                     (phba->sli.mbox_active == NULL)) {
13872 send_current_mbox:
13873                         /* Process next mailbox command if there is one */
13874                         do {
13875                                 rc = lpfc_sli_issue_mbox(phba, NULL,
13876                                                          MBX_NOWAIT);
13877                         } while (rc == MBX_NOT_FINISHED);
13878                         if (rc != MBX_SUCCESS)
13879                                 lpfc_printf_log(phba, KERN_ERR,
13880                                                 LOG_TRACE_EVENT,
13881                                                 "0349 rc should be "
13882                                                 "MBX_SUCCESS\n");
13883                 }
13884
13885                 spin_lock_irqsave(&phba->hbalock, iflag);
13886                 phba->work_ha |= work_ha_copy;
13887                 spin_unlock_irqrestore(&phba->hbalock, iflag);
13888                 lpfc_worker_wake_up(phba);
13889         }
13890         return IRQ_HANDLED;
13891 unplug_error:
13892         spin_unlock_irqrestore(&phba->hbalock, iflag);
13893         return IRQ_HANDLED;
13894
13895 } /* lpfc_sli_sp_intr_handler */
13896
13897 /**
13898  * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
13899  * @irq: Interrupt number.
13900  * @dev_id: The device context pointer.
13901  *
13902  * This function is directly called from the PCI layer as an interrupt
13903  * service routine when device with SLI-3 interface spec is enabled with
13904  * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
13905  * ring event in the HBA. However, when the device is enabled with either
13906  * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13907  * device-level interrupt handler. When the PCI slot is in error recovery
13908  * or the HBA is undergoing initialization, the interrupt handler will not
13909  * process the interrupt. The SCSI FCP fast-path ring event are handled in
13910  * the intrrupt context. This function is called without any lock held.
13911  * It gets the hbalock to access and update SLI data structures.
13912  *
13913  * This function returns IRQ_HANDLED when interrupt is handled else it
13914  * returns IRQ_NONE.
13915  **/
13916 irqreturn_t
13917 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
13918 {
13919         struct lpfc_hba  *phba;
13920         uint32_t ha_copy;
13921         unsigned long status;
13922         unsigned long iflag;
13923         struct lpfc_sli_ring *pring;
13924
13925         /* Get the driver's phba structure from the dev_id and
13926          * assume the HBA is not interrupting.
13927          */
13928         phba = (struct lpfc_hba *) dev_id;
13929
13930         if (unlikely(!phba))
13931                 return IRQ_NONE;
13932
13933         /*
13934          * Stuff needs to be attented to when this function is invoked as an
13935          * individual interrupt handler in MSI-X multi-message interrupt mode
13936          */
13937         if (phba->intr_type == MSIX) {
13938                 /* Check device state for handling interrupt */
13939                 if (lpfc_intr_state_check(phba))
13940                         return IRQ_NONE;
13941                 /* Need to read HA REG for FCP ring and other ring events */
13942                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
13943                         return IRQ_HANDLED;
13944                 /* Clear up only attention source related to fast-path */
13945                 spin_lock_irqsave(&phba->hbalock, iflag);
13946                 /*
13947                  * If there is deferred error attention, do not check for
13948                  * any interrupt.
13949                  */
13950                 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13951                         spin_unlock_irqrestore(&phba->hbalock, iflag);
13952                         return IRQ_NONE;
13953                 }
13954                 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
13955                         phba->HAregaddr);
13956                 readl(phba->HAregaddr); /* flush */
13957                 spin_unlock_irqrestore(&phba->hbalock, iflag);
13958         } else
13959                 ha_copy = phba->ha_copy;
13960
13961         /*
13962          * Process all events on FCP ring. Take the optimized path for FCP IO.
13963          */
13964         ha_copy &= ~(phba->work_ha_mask);
13965
13966         status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
13967         status >>= (4*LPFC_FCP_RING);
13968         pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
13969         if (status & HA_RXMASK)
13970                 lpfc_sli_handle_fast_ring_event(phba, pring, status);
13971
13972         if (phba->cfg_multi_ring_support == 2) {
13973                 /*
13974                  * Process all events on extra ring. Take the optimized path
13975                  * for extra ring IO.
13976                  */
13977                 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
13978                 status >>= (4*LPFC_EXTRA_RING);
13979                 if (status & HA_RXMASK) {
13980                         lpfc_sli_handle_fast_ring_event(phba,
13981                                         &phba->sli.sli3_ring[LPFC_EXTRA_RING],
13982                                         status);
13983                 }
13984         }
13985         return IRQ_HANDLED;
13986 }  /* lpfc_sli_fp_intr_handler */
13987
13988 /**
13989  * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
13990  * @irq: Interrupt number.
13991  * @dev_id: The device context pointer.
13992  *
13993  * This function is the HBA device-level interrupt handler to device with
13994  * SLI-3 interface spec, called from the PCI layer when either MSI or
13995  * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
13996  * requires driver attention. This function invokes the slow-path interrupt
13997  * attention handling function and fast-path interrupt attention handling
13998  * function in turn to process the relevant HBA attention events. This
13999  * function is called without any lock held. It gets the hbalock to access
14000  * and update SLI data structures.
14001  *
14002  * This function returns IRQ_HANDLED when interrupt is handled, else it
14003  * returns IRQ_NONE.
14004  **/
14005 irqreturn_t
14006 lpfc_sli_intr_handler(int irq, void *dev_id)
14007 {
14008         struct lpfc_hba  *phba;
14009         irqreturn_t sp_irq_rc, fp_irq_rc;
14010         unsigned long status1, status2;
14011         uint32_t hc_copy;
14012
14013         /*
14014          * Get the driver's phba structure from the dev_id and
14015          * assume the HBA is not interrupting.
14016          */
14017         phba = (struct lpfc_hba *) dev_id;
14018
14019         if (unlikely(!phba))
14020                 return IRQ_NONE;
14021
14022         /* Check device state for handling interrupt */
14023         if (lpfc_intr_state_check(phba))
14024                 return IRQ_NONE;
14025
14026         spin_lock(&phba->hbalock);
14027         if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
14028                 spin_unlock(&phba->hbalock);
14029                 return IRQ_HANDLED;
14030         }
14031
14032         if (unlikely(!phba->ha_copy)) {
14033                 spin_unlock(&phba->hbalock);
14034                 return IRQ_NONE;
14035         } else if (phba->ha_copy & HA_ERATT) {
14036                 if (phba->hba_flag & HBA_ERATT_HANDLED)
14037                         /* ERATT polling has handled ERATT */
14038                         phba->ha_copy &= ~HA_ERATT;
14039                 else
14040                         /* Indicate interrupt handler handles ERATT */
14041                         phba->hba_flag |= HBA_ERATT_HANDLED;
14042         }
14043
14044         /*
14045          * If there is deferred error attention, do not check for any interrupt.
14046          */
14047         if (unlikely(phba->hba_flag & DEFER_ERATT)) {
14048                 spin_unlock(&phba->hbalock);
14049                 return IRQ_NONE;
14050         }
14051
14052         /* Clear attention sources except link and error attentions */
14053         if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
14054                 spin_unlock(&phba->hbalock);
14055                 return IRQ_HANDLED;
14056         }
14057         writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
14058                 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
14059                 phba->HCregaddr);
14060         writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
14061         writel(hc_copy, phba->HCregaddr);
14062         readl(phba->HAregaddr); /* flush */
14063         spin_unlock(&phba->hbalock);
14064
14065         /*
14066          * Invokes slow-path host attention interrupt handling as appropriate.
14067          */
14068
14069         /* status of events with mailbox and link attention */
14070         status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
14071
14072         /* status of events with ELS ring */
14073         status2 = (phba->ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
14074         status2 >>= (4*LPFC_ELS_RING);
14075
14076         if (status1 || (status2 & HA_RXMASK))
14077                 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
14078         else
14079                 sp_irq_rc = IRQ_NONE;
14080
14081         /*
14082          * Invoke fast-path host attention interrupt handling as appropriate.
14083          */
14084
14085         /* status of events with FCP ring */
14086         status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
14087         status1 >>= (4*LPFC_FCP_RING);
14088
14089         /* status of events with extra ring */
14090         if (phba->cfg_multi_ring_support == 2) {
14091                 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
14092                 status2 >>= (4*LPFC_EXTRA_RING);
14093         } else
14094                 status2 = 0;
14095
14096         if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
14097                 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
14098         else
14099                 fp_irq_rc = IRQ_NONE;
14100
14101         /* Return device-level interrupt handling status */
14102         return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
14103 }  /* lpfc_sli_intr_handler */
14104
14105 /**
14106  * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
14107  * @phba: pointer to lpfc hba data structure.
14108  *
14109  * This routine is invoked by the worker thread to process all the pending
14110  * SLI4 els abort xri events.
14111  **/
14112 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
14113 {
14114         struct lpfc_cq_event *cq_event;
14115         unsigned long iflags;
14116
14117         /* First, declare the els xri abort event has been handled */
14118         spin_lock_irqsave(&phba->hbalock, iflags);
14119         phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
14120         spin_unlock_irqrestore(&phba->hbalock, iflags);
14121
14122         /* Now, handle all the els xri abort events */
14123         spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
14124         while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
14125                 /* Get the first event from the head of the event queue */
14126                 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
14127                                  cq_event, struct lpfc_cq_event, list);
14128                 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
14129                                        iflags);
14130                 /* Notify aborted XRI for ELS work queue */
14131                 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
14132
14133                 /* Free the event processed back to the free pool */
14134                 lpfc_sli4_cq_event_release(phba, cq_event);
14135                 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
14136                                   iflags);
14137         }
14138         spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
14139 }
14140
14141 /**
14142  * lpfc_sli4_els_preprocess_rspiocbq - Get response iocbq from els wcqe
14143  * @phba: Pointer to HBA context object.
14144  * @irspiocbq: Pointer to work-queue completion queue entry.
14145  *
14146  * This routine handles an ELS work-queue completion event and construct
14147  * a pseudo response ELS IOCBQ from the SLI4 ELS WCQE for the common
14148  * discovery engine to handle.
14149  *
14150  * Return: Pointer to the receive IOCBQ, NULL otherwise.
14151  **/
14152 static struct lpfc_iocbq *
14153 lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
14154                                   struct lpfc_iocbq *irspiocbq)
14155 {
14156         struct lpfc_sli_ring *pring;
14157         struct lpfc_iocbq *cmdiocbq;
14158         struct lpfc_wcqe_complete *wcqe;
14159         unsigned long iflags;
14160
14161         pring = lpfc_phba_elsring(phba);
14162         if (unlikely(!pring))
14163                 return NULL;
14164
14165         wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
14166         spin_lock_irqsave(&pring->ring_lock, iflags);
14167         pring->stats.iocb_event++;
14168         /* Look up the ELS command IOCB and create pseudo response IOCB */
14169         cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
14170                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14171         if (unlikely(!cmdiocbq)) {
14172                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
14173                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14174                                 "0386 ELS complete with no corresponding "
14175                                 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
14176                                 wcqe->word0, wcqe->total_data_placed,
14177                                 wcqe->parameter, wcqe->word3);
14178                 lpfc_sli_release_iocbq(phba, irspiocbq);
14179                 return NULL;
14180         }
14181
14182         memcpy(&irspiocbq->wqe, &cmdiocbq->wqe, sizeof(union lpfc_wqe128));
14183         memcpy(&irspiocbq->wcqe_cmpl, wcqe, sizeof(*wcqe));
14184
14185         /* Put the iocb back on the txcmplq */
14186         lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
14187         spin_unlock_irqrestore(&pring->ring_lock, iflags);
14188
14189         if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
14190                 spin_lock_irqsave(&phba->hbalock, iflags);
14191                 irspiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
14192                 spin_unlock_irqrestore(&phba->hbalock, iflags);
14193         }
14194
14195         return irspiocbq;
14196 }
14197
14198 inline struct lpfc_cq_event *
14199 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
14200 {
14201         struct lpfc_cq_event *cq_event;
14202
14203         /* Allocate a new internal CQ_EVENT entry */
14204         cq_event = lpfc_sli4_cq_event_alloc(phba);
14205         if (!cq_event) {
14206                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14207                                 "0602 Failed to alloc CQ_EVENT entry\n");
14208                 return NULL;
14209         }
14210
14211         /* Move the CQE into the event */
14212         memcpy(&cq_event->cqe, entry, size);
14213         return cq_event;
14214 }
14215
14216 /**
14217  * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
14218  * @phba: Pointer to HBA context object.
14219  * @mcqe: Pointer to mailbox completion queue entry.
14220  *
14221  * This routine process a mailbox completion queue entry with asynchronous
14222  * event.
14223  *
14224  * Return: true if work posted to worker thread, otherwise false.
14225  **/
14226 static bool
14227 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
14228 {
14229         struct lpfc_cq_event *cq_event;
14230         unsigned long iflags;
14231
14232         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14233                         "0392 Async Event: word0:x%x, word1:x%x, "
14234                         "word2:x%x, word3:x%x\n", mcqe->word0,
14235                         mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
14236
14237         cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
14238         if (!cq_event)
14239                 return false;
14240
14241         spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
14242         list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
14243         spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
14244
14245         /* Set the async event flag */
14246         spin_lock_irqsave(&phba->hbalock, iflags);
14247         phba->hba_flag |= ASYNC_EVENT;
14248         spin_unlock_irqrestore(&phba->hbalock, iflags);
14249
14250         return true;
14251 }
14252
14253 /**
14254  * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
14255  * @phba: Pointer to HBA context object.
14256  * @mcqe: Pointer to mailbox completion queue entry.
14257  *
14258  * This routine process a mailbox completion queue entry with mailbox
14259  * completion event.
14260  *
14261  * Return: true if work posted to worker thread, otherwise false.
14262  **/
14263 static bool
14264 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
14265 {
14266         uint32_t mcqe_status;
14267         MAILBOX_t *mbox, *pmbox;
14268         struct lpfc_mqe *mqe;
14269         struct lpfc_vport *vport;
14270         struct lpfc_nodelist *ndlp;
14271         struct lpfc_dmabuf *mp;
14272         unsigned long iflags;
14273         LPFC_MBOXQ_t *pmb;
14274         bool workposted = false;
14275         int rc;
14276
14277         /* If not a mailbox complete MCQE, out by checking mailbox consume */
14278         if (!bf_get(lpfc_trailer_completed, mcqe))
14279                 goto out_no_mqe_complete;
14280
14281         /* Get the reference to the active mbox command */
14282         spin_lock_irqsave(&phba->hbalock, iflags);
14283         pmb = phba->sli.mbox_active;
14284         if (unlikely(!pmb)) {
14285                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14286                                 "1832 No pending MBOX command to handle\n");
14287                 spin_unlock_irqrestore(&phba->hbalock, iflags);
14288                 goto out_no_mqe_complete;
14289         }
14290         spin_unlock_irqrestore(&phba->hbalock, iflags);
14291         mqe = &pmb->u.mqe;
14292         pmbox = (MAILBOX_t *)&pmb->u.mqe;
14293         mbox = phba->mbox;
14294         vport = pmb->vport;
14295
14296         /* Reset heartbeat timer */
14297         phba->last_completion_time = jiffies;
14298         del_timer(&phba->sli.mbox_tmo);
14299
14300         /* Move mbox data to caller's mailbox region, do endian swapping */
14301         if (pmb->mbox_cmpl && mbox)
14302                 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
14303
14304         /*
14305          * For mcqe errors, conditionally move a modified error code to
14306          * the mbox so that the error will not be missed.
14307          */
14308         mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
14309         if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
14310                 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
14311                         bf_set(lpfc_mqe_status, mqe,
14312                                (LPFC_MBX_ERROR_RANGE | mcqe_status));
14313         }
14314         if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
14315                 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
14316                 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
14317                                       "MBOX dflt rpi: status:x%x rpi:x%x",
14318                                       mcqe_status,
14319                                       pmbox->un.varWords[0], 0);
14320                 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
14321                         mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
14322                         ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
14323
14324                         /* Reg_LOGIN of dflt RPI was successful. Mark the
14325                          * node as having an UNREG_LOGIN in progress to stop
14326                          * an unsolicited PLOGI from the same NPortId from
14327                          * starting another mailbox transaction.
14328                          */
14329                         spin_lock_irqsave(&ndlp->lock, iflags);
14330                         ndlp->nlp_flag |= NLP_UNREG_INP;
14331                         spin_unlock_irqrestore(&ndlp->lock, iflags);
14332                         lpfc_unreg_login(phba, vport->vpi,
14333                                          pmbox->un.varWords[0], pmb);
14334                         pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
14335                         pmb->ctx_buf = mp;
14336
14337                         /* No reference taken here.  This is a default
14338                          * RPI reg/immediate unreg cycle. The reference was
14339                          * taken in the reg rpi path and is released when
14340                          * this mailbox completes.
14341                          */
14342                         pmb->ctx_ndlp = ndlp;
14343                         pmb->vport = vport;
14344                         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
14345                         if (rc != MBX_BUSY)
14346                                 lpfc_printf_log(phba, KERN_ERR,
14347                                                 LOG_TRACE_EVENT,
14348                                                 "0385 rc should "
14349                                                 "have been MBX_BUSY\n");
14350                         if (rc != MBX_NOT_FINISHED)
14351                                 goto send_current_mbox;
14352                 }
14353         }
14354         spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
14355         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
14356         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
14357
14358         /* Do NOT queue MBX_HEARTBEAT to the worker thread for processing. */
14359         if (pmbox->mbxCommand == MBX_HEARTBEAT) {
14360                 spin_lock_irqsave(&phba->hbalock, iflags);
14361                 /* Release the mailbox command posting token */
14362                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
14363                 phba->sli.mbox_active = NULL;
14364                 if (bf_get(lpfc_trailer_consumed, mcqe))
14365                         lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14366                 spin_unlock_irqrestore(&phba->hbalock, iflags);
14367
14368                 /* Post the next mbox command, if there is one */
14369                 lpfc_sli4_post_async_mbox(phba);
14370
14371                 /* Process cmpl now */
14372                 if (pmb->mbox_cmpl)
14373                         pmb->mbox_cmpl(phba, pmb);
14374                 return false;
14375         }
14376
14377         /* There is mailbox completion work to queue to the worker thread */
14378         spin_lock_irqsave(&phba->hbalock, iflags);
14379         __lpfc_mbox_cmpl_put(phba, pmb);
14380         phba->work_ha |= HA_MBATT;
14381         spin_unlock_irqrestore(&phba->hbalock, iflags);
14382         workposted = true;
14383
14384 send_current_mbox:
14385         spin_lock_irqsave(&phba->hbalock, iflags);
14386         /* Release the mailbox command posting token */
14387         phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
14388         /* Setting active mailbox pointer need to be in sync to flag clear */
14389         phba->sli.mbox_active = NULL;
14390         if (bf_get(lpfc_trailer_consumed, mcqe))
14391                 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14392         spin_unlock_irqrestore(&phba->hbalock, iflags);
14393         /* Wake up worker thread to post the next pending mailbox command */
14394         lpfc_worker_wake_up(phba);
14395         return workposted;
14396
14397 out_no_mqe_complete:
14398         spin_lock_irqsave(&phba->hbalock, iflags);
14399         if (bf_get(lpfc_trailer_consumed, mcqe))
14400                 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14401         spin_unlock_irqrestore(&phba->hbalock, iflags);
14402         return false;
14403 }
14404
14405 /**
14406  * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
14407  * @phba: Pointer to HBA context object.
14408  * @cq: Pointer to associated CQ
14409  * @cqe: Pointer to mailbox completion queue entry.
14410  *
14411  * This routine process a mailbox completion queue entry, it invokes the
14412  * proper mailbox complete handling or asynchronous event handling routine
14413  * according to the MCQE's async bit.
14414  *
14415  * Return: true if work posted to worker thread, otherwise false.
14416  **/
14417 static bool
14418 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14419                          struct lpfc_cqe *cqe)
14420 {
14421         struct lpfc_mcqe mcqe;
14422         bool workposted;
14423
14424         cq->CQ_mbox++;
14425
14426         /* Copy the mailbox MCQE and convert endian order as needed */
14427         lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
14428
14429         /* Invoke the proper event handling routine */
14430         if (!bf_get(lpfc_trailer_async, &mcqe))
14431                 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
14432         else
14433                 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
14434         return workposted;
14435 }
14436
14437 /**
14438  * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
14439  * @phba: Pointer to HBA context object.
14440  * @cq: Pointer to associated CQ
14441  * @wcqe: Pointer to work-queue completion queue entry.
14442  *
14443  * This routine handles an ELS work-queue completion event.
14444  *
14445  * Return: true if work posted to worker thread, otherwise false.
14446  **/
14447 static bool
14448 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14449                              struct lpfc_wcqe_complete *wcqe)
14450 {
14451         struct lpfc_iocbq *irspiocbq;
14452         unsigned long iflags;
14453         struct lpfc_sli_ring *pring = cq->pring;
14454         int txq_cnt = 0;
14455         int txcmplq_cnt = 0;
14456
14457         /* Check for response status */
14458         if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
14459                 /* Log the error status */
14460                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14461                                 "0357 ELS CQE error: status=x%x: "
14462                                 "CQE: %08x %08x %08x %08x\n",
14463                                 bf_get(lpfc_wcqe_c_status, wcqe),
14464                                 wcqe->word0, wcqe->total_data_placed,
14465                                 wcqe->parameter, wcqe->word3);
14466         }
14467
14468         /* Get an irspiocbq for later ELS response processing use */
14469         irspiocbq = lpfc_sli_get_iocbq(phba);
14470         if (!irspiocbq) {
14471                 if (!list_empty(&pring->txq))
14472                         txq_cnt++;
14473                 if (!list_empty(&pring->txcmplq))
14474                         txcmplq_cnt++;
14475                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14476                         "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
14477                         "els_txcmplq_cnt=%d\n",
14478                         txq_cnt, phba->iocb_cnt,
14479                         txcmplq_cnt);
14480                 return false;
14481         }
14482
14483         /* Save off the slow-path queue event for work thread to process */
14484         memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
14485         spin_lock_irqsave(&phba->hbalock, iflags);
14486         list_add_tail(&irspiocbq->cq_event.list,
14487                       &phba->sli4_hba.sp_queue_event);
14488         phba->hba_flag |= HBA_SP_QUEUE_EVT;
14489         spin_unlock_irqrestore(&phba->hbalock, iflags);
14490
14491         return true;
14492 }
14493
14494 /**
14495  * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
14496  * @phba: Pointer to HBA context object.
14497  * @wcqe: Pointer to work-queue completion queue entry.
14498  *
14499  * This routine handles slow-path WQ entry consumed event by invoking the
14500  * proper WQ release routine to the slow-path WQ.
14501  **/
14502 static void
14503 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
14504                              struct lpfc_wcqe_release *wcqe)
14505 {
14506         /* sanity check on queue memory */
14507         if (unlikely(!phba->sli4_hba.els_wq))
14508                 return;
14509         /* Check for the slow-path ELS work queue */
14510         if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
14511                 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
14512                                      bf_get(lpfc_wcqe_r_wqe_index, wcqe));
14513         else
14514                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14515                                 "2579 Slow-path wqe consume event carries "
14516                                 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
14517                                 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
14518                                 phba->sli4_hba.els_wq->queue_id);
14519 }
14520
14521 /**
14522  * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
14523  * @phba: Pointer to HBA context object.
14524  * @cq: Pointer to a WQ completion queue.
14525  * @wcqe: Pointer to work-queue completion queue entry.
14526  *
14527  * This routine handles an XRI abort event.
14528  *
14529  * Return: true if work posted to worker thread, otherwise false.
14530  **/
14531 static bool
14532 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
14533                                    struct lpfc_queue *cq,
14534                                    struct sli4_wcqe_xri_aborted *wcqe)
14535 {
14536         bool workposted = false;
14537         struct lpfc_cq_event *cq_event;
14538         unsigned long iflags;
14539
14540         switch (cq->subtype) {
14541         case LPFC_IO:
14542                 lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
14543                 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14544                         /* Notify aborted XRI for NVME work queue */
14545                         if (phba->nvmet_support)
14546                                 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
14547                 }
14548                 workposted = false;
14549                 break;
14550         case LPFC_NVME_LS: /* NVME LS uses ELS resources */
14551         case LPFC_ELS:
14552                 cq_event = lpfc_cq_event_setup(phba, wcqe, sizeof(*wcqe));
14553                 if (!cq_event) {
14554                         workposted = false;
14555                         break;
14556                 }
14557                 cq_event->hdwq = cq->hdwq;
14558                 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
14559                                   iflags);
14560                 list_add_tail(&cq_event->list,
14561                               &phba->sli4_hba.sp_els_xri_aborted_work_queue);
14562                 /* Set the els xri abort event flag */
14563                 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
14564                 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
14565                                        iflags);
14566                 workposted = true;
14567                 break;
14568         default:
14569                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14570                                 "0603 Invalid CQ subtype %d: "
14571                                 "%08x %08x %08x %08x\n",
14572                                 cq->subtype, wcqe->word0, wcqe->parameter,
14573                                 wcqe->word2, wcqe->word3);
14574                 workposted = false;
14575                 break;
14576         }
14577         return workposted;
14578 }
14579
14580 #define FC_RCTL_MDS_DIAGS       0xF4
14581
14582 /**
14583  * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
14584  * @phba: Pointer to HBA context object.
14585  * @rcqe: Pointer to receive-queue completion queue entry.
14586  *
14587  * This routine process a receive-queue completion queue entry.
14588  *
14589  * Return: true if work posted to worker thread, otherwise false.
14590  **/
14591 static bool
14592 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
14593 {
14594         bool workposted = false;
14595         struct fc_frame_header *fc_hdr;
14596         struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
14597         struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
14598         struct lpfc_nvmet_tgtport *tgtp;
14599         struct hbq_dmabuf *dma_buf;
14600         uint32_t status, rq_id;
14601         unsigned long iflags;
14602
14603         /* sanity check on queue memory */
14604         if (unlikely(!hrq) || unlikely(!drq))
14605                 return workposted;
14606
14607         if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
14608                 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
14609         else
14610                 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
14611         if (rq_id != hrq->queue_id)
14612                 goto out;
14613
14614         status = bf_get(lpfc_rcqe_status, rcqe);
14615         switch (status) {
14616         case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
14617                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14618                                 "2537 Receive Frame Truncated!!\n");
14619                 fallthrough;
14620         case FC_STATUS_RQ_SUCCESS:
14621                 spin_lock_irqsave(&phba->hbalock, iflags);
14622                 lpfc_sli4_rq_release(hrq, drq);
14623                 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
14624                 if (!dma_buf) {
14625                         hrq->RQ_no_buf_found++;
14626                         spin_unlock_irqrestore(&phba->hbalock, iflags);
14627                         goto out;
14628                 }
14629                 hrq->RQ_rcv_buf++;
14630                 hrq->RQ_buf_posted--;
14631                 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
14632
14633                 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
14634
14635                 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
14636                     fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
14637                         spin_unlock_irqrestore(&phba->hbalock, iflags);
14638                         /* Handle MDS Loopback frames */
14639                         if  (!(phba->pport->load_flag & FC_UNLOADING))
14640                                 lpfc_sli4_handle_mds_loopback(phba->pport,
14641                                                               dma_buf);
14642                         else
14643                                 lpfc_in_buf_free(phba, &dma_buf->dbuf);
14644                         break;
14645                 }
14646
14647                 /* save off the frame for the work thread to process */
14648                 list_add_tail(&dma_buf->cq_event.list,
14649                               &phba->sli4_hba.sp_queue_event);
14650                 /* Frame received */
14651                 phba->hba_flag |= HBA_SP_QUEUE_EVT;
14652                 spin_unlock_irqrestore(&phba->hbalock, iflags);
14653                 workposted = true;
14654                 break;
14655         case FC_STATUS_INSUFF_BUF_FRM_DISC:
14656                 if (phba->nvmet_support) {
14657                         tgtp = phba->targetport->private;
14658                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14659                                         "6402 RQE Error x%x, posted %d err_cnt "
14660                                         "%d: %x %x %x\n",
14661                                         status, hrq->RQ_buf_posted,
14662                                         hrq->RQ_no_posted_buf,
14663                                         atomic_read(&tgtp->rcv_fcp_cmd_in),
14664                                         atomic_read(&tgtp->rcv_fcp_cmd_out),
14665                                         atomic_read(&tgtp->xmt_fcp_release));
14666                 }
14667                 fallthrough;
14668
14669         case FC_STATUS_INSUFF_BUF_NEED_BUF:
14670                 hrq->RQ_no_posted_buf++;
14671                 /* Post more buffers if possible */
14672                 spin_lock_irqsave(&phba->hbalock, iflags);
14673                 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
14674                 spin_unlock_irqrestore(&phba->hbalock, iflags);
14675                 workposted = true;
14676                 break;
14677         }
14678 out:
14679         return workposted;
14680 }
14681
14682 /**
14683  * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
14684  * @phba: Pointer to HBA context object.
14685  * @cq: Pointer to the completion queue.
14686  * @cqe: Pointer to a completion queue entry.
14687  *
14688  * This routine process a slow-path work-queue or receive queue completion queue
14689  * entry.
14690  *
14691  * Return: true if work posted to worker thread, otherwise false.
14692  **/
14693 static bool
14694 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14695                          struct lpfc_cqe *cqe)
14696 {
14697         struct lpfc_cqe cqevt;
14698         bool workposted = false;
14699
14700         /* Copy the work queue CQE and convert endian order if needed */
14701         lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
14702
14703         /* Check and process for different type of WCQE and dispatch */
14704         switch (bf_get(lpfc_cqe_code, &cqevt)) {
14705         case CQE_CODE_COMPL_WQE:
14706                 /* Process the WQ/RQ complete event */
14707                 phba->last_completion_time = jiffies;
14708                 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
14709                                 (struct lpfc_wcqe_complete *)&cqevt);
14710                 break;
14711         case CQE_CODE_RELEASE_WQE:
14712                 /* Process the WQ release event */
14713                 lpfc_sli4_sp_handle_rel_wcqe(phba,
14714                                 (struct lpfc_wcqe_release *)&cqevt);
14715                 break;
14716         case CQE_CODE_XRI_ABORTED:
14717                 /* Process the WQ XRI abort event */
14718                 phba->last_completion_time = jiffies;
14719                 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14720                                 (struct sli4_wcqe_xri_aborted *)&cqevt);
14721                 break;
14722         case CQE_CODE_RECEIVE:
14723         case CQE_CODE_RECEIVE_V1:
14724                 /* Process the RQ event */
14725                 phba->last_completion_time = jiffies;
14726                 workposted = lpfc_sli4_sp_handle_rcqe(phba,
14727                                 (struct lpfc_rcqe *)&cqevt);
14728                 break;
14729         default:
14730                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14731                                 "0388 Not a valid WCQE code: x%x\n",
14732                                 bf_get(lpfc_cqe_code, &cqevt));
14733                 break;
14734         }
14735         return workposted;
14736 }
14737
14738 /**
14739  * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
14740  * @phba: Pointer to HBA context object.
14741  * @eqe: Pointer to fast-path event queue entry.
14742  * @speq: Pointer to slow-path event queue.
14743  *
14744  * This routine process a event queue entry from the slow-path event queue.
14745  * It will check the MajorCode and MinorCode to determine this is for a
14746  * completion event on a completion queue, if not, an error shall be logged
14747  * and just return. Otherwise, it will get to the corresponding completion
14748  * queue and process all the entries on that completion queue, rearm the
14749  * completion queue, and then return.
14750  *
14751  **/
14752 static void
14753 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
14754         struct lpfc_queue *speq)
14755 {
14756         struct lpfc_queue *cq = NULL, *childq;
14757         uint16_t cqid;
14758         int ret = 0;
14759
14760         /* Get the reference to the corresponding CQ */
14761         cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14762
14763         list_for_each_entry(childq, &speq->child_list, list) {
14764                 if (childq->queue_id == cqid) {
14765                         cq = childq;
14766                         break;
14767                 }
14768         }
14769         if (unlikely(!cq)) {
14770                 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
14771                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14772                                         "0365 Slow-path CQ identifier "
14773                                         "(%d) does not exist\n", cqid);
14774                 return;
14775         }
14776
14777         /* Save EQ associated with this CQ */
14778         cq->assoc_qp = speq;
14779
14780         if (is_kdump_kernel())
14781                 ret = queue_work(phba->wq, &cq->spwork);
14782         else
14783                 ret = queue_work_on(cq->chann, phba->wq, &cq->spwork);
14784
14785         if (!ret)
14786                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14787                                 "0390 Cannot schedule queue work "
14788                                 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14789                                 cqid, cq->queue_id, raw_smp_processor_id());
14790 }
14791
14792 /**
14793  * __lpfc_sli4_process_cq - Process elements of a CQ
14794  * @phba: Pointer to HBA context object.
14795  * @cq: Pointer to CQ to be processed
14796  * @handler: Routine to process each cqe
14797  * @delay: Pointer to usdelay to set in case of rescheduling of the handler
14798  * @poll_mode: Polling mode we were called from
14799  *
14800  * This routine processes completion queue entries in a CQ. While a valid
14801  * queue element is found, the handler is called. During processing checks
14802  * are made for periodic doorbell writes to let the hardware know of
14803  * element consumption.
14804  *
14805  * If the max limit on cqes to process is hit, or there are no more valid
14806  * entries, the loop stops. If we processed a sufficient number of elements,
14807  * meaning there is sufficient load, rather than rearming and generating
14808  * another interrupt, a cq rescheduling delay will be set. A delay of 0
14809  * indicates no rescheduling.
14810  *
14811  * Returns True if work scheduled, False otherwise.
14812  **/
14813 static bool
14814 __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
14815         bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
14816                         struct lpfc_cqe *), unsigned long *delay,
14817                         enum lpfc_poll_mode poll_mode)
14818 {
14819         struct lpfc_cqe *cqe;
14820         bool workposted = false;
14821         int count = 0, consumed = 0;
14822         bool arm = true;
14823
14824         /* default - no reschedule */
14825         *delay = 0;
14826
14827         if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
14828                 goto rearm_and_exit;
14829
14830         /* Process all the entries to the CQ */
14831         cq->q_flag = 0;
14832         cqe = lpfc_sli4_cq_get(cq);
14833         while (cqe) {
14834                 workposted |= handler(phba, cq, cqe);
14835                 __lpfc_sli4_consume_cqe(phba, cq, cqe);
14836
14837                 consumed++;
14838                 if (!(++count % cq->max_proc_limit))
14839                         break;
14840
14841                 if (!(count % cq->notify_interval)) {
14842                         phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14843                                                 LPFC_QUEUE_NOARM);
14844                         consumed = 0;
14845                         cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
14846                 }
14847
14848                 if (count == LPFC_NVMET_CQ_NOTIFY)
14849                         cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
14850
14851                 cqe = lpfc_sli4_cq_get(cq);
14852         }
14853         if (count >= phba->cfg_cq_poll_threshold) {
14854                 *delay = 1;
14855                 arm = false;
14856         }
14857
14858         /* Note: complete the irq_poll softirq before rearming CQ */
14859         if (poll_mode == LPFC_IRQ_POLL)
14860                 irq_poll_complete(&cq->iop);
14861
14862         /* Track the max number of CQEs processed in 1 EQ */
14863         if (count > cq->CQ_max_cqe)
14864                 cq->CQ_max_cqe = count;
14865
14866         cq->assoc_qp->EQ_cqe_cnt += count;
14867
14868         /* Catch the no cq entry condition */
14869         if (unlikely(count == 0))
14870                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14871                                 "0369 No entry from completion queue "
14872                                 "qid=%d\n", cq->queue_id);
14873
14874         xchg(&cq->queue_claimed, 0);
14875
14876 rearm_and_exit:
14877         phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14878                         arm ?  LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
14879
14880         return workposted;
14881 }
14882
14883 /**
14884  * __lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
14885  * @cq: pointer to CQ to process
14886  *
14887  * This routine calls the cq processing routine with a handler specific
14888  * to the type of queue bound to it.
14889  *
14890  * The CQ routine returns two values: the first is the calling status,
14891  * which indicates whether work was queued to the  background discovery
14892  * thread. If true, the routine should wakeup the discovery thread;
14893  * the second is the delay parameter. If non-zero, rather than rearming
14894  * the CQ and yet another interrupt, the CQ handler should be queued so
14895  * that it is processed in a subsequent polling action. The value of
14896  * the delay indicates when to reschedule it.
14897  **/
14898 static void
14899 __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
14900 {
14901         struct lpfc_hba *phba = cq->phba;
14902         unsigned long delay;
14903         bool workposted = false;
14904         int ret = 0;
14905
14906         /* Process and rearm the CQ */
14907         switch (cq->type) {
14908         case LPFC_MCQ:
14909                 workposted |= __lpfc_sli4_process_cq(phba, cq,
14910                                                 lpfc_sli4_sp_handle_mcqe,
14911                                                 &delay, LPFC_QUEUE_WORK);
14912                 break;
14913         case LPFC_WCQ:
14914                 if (cq->subtype == LPFC_IO)
14915                         workposted |= __lpfc_sli4_process_cq(phba, cq,
14916                                                 lpfc_sli4_fp_handle_cqe,
14917                                                 &delay, LPFC_QUEUE_WORK);
14918                 else
14919                         workposted |= __lpfc_sli4_process_cq(phba, cq,
14920                                                 lpfc_sli4_sp_handle_cqe,
14921                                                 &delay, LPFC_QUEUE_WORK);
14922                 break;
14923         default:
14924                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14925                                 "0370 Invalid completion queue type (%d)\n",
14926                                 cq->type);
14927                 return;
14928         }
14929
14930         if (delay) {
14931                 if (is_kdump_kernel())
14932                         ret = queue_delayed_work(phba->wq, &cq->sched_spwork,
14933                                                 delay);
14934                 else
14935                         ret = queue_delayed_work_on(cq->chann, phba->wq,
14936                                                 &cq->sched_spwork, delay);
14937                 if (!ret)
14938                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14939                                 "0394 Cannot schedule queue work "
14940                                 "for cqid=%d on CPU %d\n",
14941                                 cq->queue_id, cq->chann);
14942         }
14943
14944         /* wake up worker thread if there are works to be done */
14945         if (workposted)
14946                 lpfc_worker_wake_up(phba);
14947 }
14948
14949 /**
14950  * lpfc_sli4_sp_process_cq - slow-path work handler when started by
14951  *   interrupt
14952  * @work: pointer to work element
14953  *
14954  * translates from the work handler and calls the slow-path handler.
14955  **/
14956 static void
14957 lpfc_sli4_sp_process_cq(struct work_struct *work)
14958 {
14959         struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
14960
14961         __lpfc_sli4_sp_process_cq(cq);
14962 }
14963
14964 /**
14965  * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
14966  * @work: pointer to work element
14967  *
14968  * translates from the work handler and calls the slow-path handler.
14969  **/
14970 static void
14971 lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
14972 {
14973         struct lpfc_queue *cq = container_of(to_delayed_work(work),
14974                                         struct lpfc_queue, sched_spwork);
14975
14976         __lpfc_sli4_sp_process_cq(cq);
14977 }
14978
14979 /**
14980  * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
14981  * @phba: Pointer to HBA context object.
14982  * @cq: Pointer to associated CQ
14983  * @wcqe: Pointer to work-queue completion queue entry.
14984  *
14985  * This routine process a fast-path work queue completion entry from fast-path
14986  * event queue for FCP command response completion.
14987  **/
14988 static void
14989 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14990                              struct lpfc_wcqe_complete *wcqe)
14991 {
14992         struct lpfc_sli_ring *pring = cq->pring;
14993         struct lpfc_iocbq *cmdiocbq;
14994         unsigned long iflags;
14995
14996         /* Check for response status */
14997         if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
14998                 /* If resource errors reported from HBA, reduce queue
14999                  * depth of the SCSI device.
15000                  */
15001                 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
15002                      IOSTAT_LOCAL_REJECT)) &&
15003                     ((wcqe->parameter & IOERR_PARAM_MASK) ==
15004                      IOERR_NO_RESOURCES))
15005                         phba->lpfc_rampdown_queue_depth(phba);
15006
15007                 /* Log the cmpl status */
15008                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
15009                                 "0373 FCP CQE cmpl: status=x%x: "
15010                                 "CQE: %08x %08x %08x %08x\n",
15011                                 bf_get(lpfc_wcqe_c_status, wcqe),
15012                                 wcqe->word0, wcqe->total_data_placed,
15013                                 wcqe->parameter, wcqe->word3);
15014         }
15015
15016         /* Look up the FCP command IOCB and create pseudo response IOCB */
15017         spin_lock_irqsave(&pring->ring_lock, iflags);
15018         pring->stats.iocb_event++;
15019         cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
15020                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
15021         spin_unlock_irqrestore(&pring->ring_lock, iflags);
15022         if (unlikely(!cmdiocbq)) {
15023                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15024                                 "0374 FCP complete with no corresponding "
15025                                 "cmdiocb: iotag (%d)\n",
15026                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
15027                 return;
15028         }
15029 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
15030         cmdiocbq->isr_timestamp = cq->isr_timestamp;
15031 #endif
15032         if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
15033                 spin_lock_irqsave(&phba->hbalock, iflags);
15034                 cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
15035                 spin_unlock_irqrestore(&phba->hbalock, iflags);
15036         }
15037
15038         if (cmdiocbq->cmd_cmpl) {
15039                 /* For FCP the flag is cleared in cmd_cmpl */
15040                 if (!(cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
15041                     cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED) {
15042                         spin_lock_irqsave(&phba->hbalock, iflags);
15043                         cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
15044                         spin_unlock_irqrestore(&phba->hbalock, iflags);
15045                 }
15046
15047                 /* Pass the cmd_iocb and the wcqe to the upper layer */
15048                 memcpy(&cmdiocbq->wcqe_cmpl, wcqe,
15049                        sizeof(struct lpfc_wcqe_complete));
15050                 cmdiocbq->cmd_cmpl(phba, cmdiocbq, cmdiocbq);
15051         } else {
15052                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15053                                 "0375 FCP cmdiocb not callback function "
15054                                 "iotag: (%d)\n",
15055                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
15056         }
15057 }
15058
15059 /**
15060  * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
15061  * @phba: Pointer to HBA context object.
15062  * @cq: Pointer to completion queue.
15063  * @wcqe: Pointer to work-queue completion queue entry.
15064  *
15065  * This routine handles an fast-path WQ entry consumed event by invoking the
15066  * proper WQ release routine to the slow-path WQ.
15067  **/
15068 static void
15069 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15070                              struct lpfc_wcqe_release *wcqe)
15071 {
15072         struct lpfc_queue *childwq;
15073         bool wqid_matched = false;
15074         uint16_t hba_wqid;
15075
15076         /* Check for fast-path FCP work queue release */
15077         hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
15078         list_for_each_entry(childwq, &cq->child_list, list) {
15079                 if (childwq->queue_id == hba_wqid) {
15080                         lpfc_sli4_wq_release(childwq,
15081                                         bf_get(lpfc_wcqe_r_wqe_index, wcqe));
15082                         if (childwq->q_flag & HBA_NVMET_WQFULL)
15083                                 lpfc_nvmet_wqfull_process(phba, childwq);
15084                         wqid_matched = true;
15085                         break;
15086                 }
15087         }
15088         /* Report warning log message if no match found */
15089         if (wqid_matched != true)
15090                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15091                                 "2580 Fast-path wqe consume event carries "
15092                                 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
15093 }
15094
15095 /**
15096  * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
15097  * @phba: Pointer to HBA context object.
15098  * @cq: Pointer to completion queue.
15099  * @rcqe: Pointer to receive-queue completion queue entry.
15100  *
15101  * This routine process a receive-queue completion queue entry.
15102  *
15103  * Return: true if work posted to worker thread, otherwise false.
15104  **/
15105 static bool
15106 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15107                             struct lpfc_rcqe *rcqe)
15108 {
15109         bool workposted = false;
15110         struct lpfc_queue *hrq;
15111         struct lpfc_queue *drq;
15112         struct rqb_dmabuf *dma_buf;
15113         struct fc_frame_header *fc_hdr;
15114         struct lpfc_nvmet_tgtport *tgtp;
15115         uint32_t status, rq_id;
15116         unsigned long iflags;
15117         uint32_t fctl, idx;
15118
15119         if ((phba->nvmet_support == 0) ||
15120             (phba->sli4_hba.nvmet_cqset == NULL))
15121                 return workposted;
15122
15123         idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
15124         hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
15125         drq = phba->sli4_hba.nvmet_mrq_data[idx];
15126
15127         /* sanity check on queue memory */
15128         if (unlikely(!hrq) || unlikely(!drq))
15129                 return workposted;
15130
15131         if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
15132                 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
15133         else
15134                 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
15135
15136         if ((phba->nvmet_support == 0) ||
15137             (rq_id != hrq->queue_id))
15138                 return workposted;
15139
15140         status = bf_get(lpfc_rcqe_status, rcqe);
15141         switch (status) {
15142         case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
15143                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15144                                 "6126 Receive Frame Truncated!!\n");
15145                 fallthrough;
15146         case FC_STATUS_RQ_SUCCESS:
15147                 spin_lock_irqsave(&phba->hbalock, iflags);
15148                 lpfc_sli4_rq_release(hrq, drq);
15149                 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
15150                 if (!dma_buf) {
15151                         hrq->RQ_no_buf_found++;
15152                         spin_unlock_irqrestore(&phba->hbalock, iflags);
15153                         goto out;
15154                 }
15155                 spin_unlock_irqrestore(&phba->hbalock, iflags);
15156                 hrq->RQ_rcv_buf++;
15157                 hrq->RQ_buf_posted--;
15158                 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
15159
15160                 /* Just some basic sanity checks on FCP Command frame */
15161                 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
15162                         fc_hdr->fh_f_ctl[1] << 8 |
15163                         fc_hdr->fh_f_ctl[2]);
15164                 if (((fctl &
15165                     (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
15166                     (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
15167                     (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
15168                         goto drop;
15169
15170                 if (fc_hdr->fh_type == FC_TYPE_FCP) {
15171                         dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
15172                         lpfc_nvmet_unsol_fcp_event(
15173                                 phba, idx, dma_buf, cq->isr_timestamp,
15174                                 cq->q_flag & HBA_NVMET_CQ_NOTIFY);
15175                         return false;
15176                 }
15177 drop:
15178                 lpfc_rq_buf_free(phba, &dma_buf->hbuf);
15179                 break;
15180         case FC_STATUS_INSUFF_BUF_FRM_DISC:
15181                 if (phba->nvmet_support) {
15182                         tgtp = phba->targetport->private;
15183                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15184                                         "6401 RQE Error x%x, posted %d err_cnt "
15185                                         "%d: %x %x %x\n",
15186                                         status, hrq->RQ_buf_posted,
15187                                         hrq->RQ_no_posted_buf,
15188                                         atomic_read(&tgtp->rcv_fcp_cmd_in),
15189                                         atomic_read(&tgtp->rcv_fcp_cmd_out),
15190                                         atomic_read(&tgtp->xmt_fcp_release));
15191                 }
15192                 fallthrough;
15193
15194         case FC_STATUS_INSUFF_BUF_NEED_BUF:
15195                 hrq->RQ_no_posted_buf++;
15196                 /* Post more buffers if possible */
15197                 break;
15198         }
15199 out:
15200         return workposted;
15201 }
15202
15203 /**
15204  * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
15205  * @phba: adapter with cq
15206  * @cq: Pointer to the completion queue.
15207  * @cqe: Pointer to fast-path completion queue entry.
15208  *
15209  * This routine process a fast-path work queue completion entry from fast-path
15210  * event queue for FCP command response completion.
15211  *
15212  * Return: true if work posted to worker thread, otherwise false.
15213  **/
15214 static bool
15215 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15216                          struct lpfc_cqe *cqe)
15217 {
15218         struct lpfc_wcqe_release wcqe;
15219         bool workposted = false;
15220
15221         /* Copy the work queue CQE and convert endian order if needed */
15222         lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
15223
15224         /* Check and process for different type of WCQE and dispatch */
15225         switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
15226         case CQE_CODE_COMPL_WQE:
15227         case CQE_CODE_NVME_ERSP:
15228                 cq->CQ_wq++;
15229                 /* Process the WQ complete event */
15230                 phba->last_completion_time = jiffies;
15231                 if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
15232                         lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
15233                                 (struct lpfc_wcqe_complete *)&wcqe);
15234                 break;
15235         case CQE_CODE_RELEASE_WQE:
15236                 cq->CQ_release_wqe++;
15237                 /* Process the WQ release event */
15238                 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
15239                                 (struct lpfc_wcqe_release *)&wcqe);
15240                 break;
15241         case CQE_CODE_XRI_ABORTED:
15242                 cq->CQ_xri_aborted++;
15243                 /* Process the WQ XRI abort event */
15244                 phba->last_completion_time = jiffies;
15245                 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
15246                                 (struct sli4_wcqe_xri_aborted *)&wcqe);
15247                 break;
15248         case CQE_CODE_RECEIVE_V1:
15249         case CQE_CODE_RECEIVE:
15250                 phba->last_completion_time = jiffies;
15251                 if (cq->subtype == LPFC_NVMET) {
15252                         workposted = lpfc_sli4_nvmet_handle_rcqe(
15253                                 phba, cq, (struct lpfc_rcqe *)&wcqe);
15254                 }
15255                 break;
15256         default:
15257                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15258                                 "0144 Not a valid CQE code: x%x\n",
15259                                 bf_get(lpfc_wcqe_c_code, &wcqe));
15260                 break;
15261         }
15262         return workposted;
15263 }
15264
15265 /**
15266  * lpfc_sli4_sched_cq_work - Schedules cq work
15267  * @phba: Pointer to HBA context object.
15268  * @cq: Pointer to CQ
15269  * @cqid: CQ ID
15270  *
15271  * This routine checks the poll mode of the CQ corresponding to
15272  * cq->chann, then either schedules a softirq or queue_work to complete
15273  * cq work.
15274  *
15275  * queue_work path is taken if in NVMET mode, or if poll_mode is in
15276  * LPFC_QUEUE_WORK mode.  Otherwise, softirq path is taken.
15277  *
15278  **/
15279 static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
15280                                     struct lpfc_queue *cq, uint16_t cqid)
15281 {
15282         int ret = 0;
15283
15284         switch (cq->poll_mode) {
15285         case LPFC_IRQ_POLL:
15286                 /* CGN mgmt is mutually exclusive from softirq processing */
15287                 if (phba->cmf_active_mode == LPFC_CFG_OFF) {
15288                         irq_poll_sched(&cq->iop);
15289                         break;
15290                 }
15291                 fallthrough;
15292         case LPFC_QUEUE_WORK:
15293         default:
15294                 if (is_kdump_kernel())
15295                         ret = queue_work(phba->wq, &cq->irqwork);
15296                 else
15297                         ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
15298                 if (!ret)
15299                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15300                                         "0383 Cannot schedule queue work "
15301                                         "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
15302                                         cqid, cq->queue_id,
15303                                         raw_smp_processor_id());
15304         }
15305 }
15306
15307 /**
15308  * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
15309  * @phba: Pointer to HBA context object.
15310  * @eq: Pointer to the queue structure.
15311  * @eqe: Pointer to fast-path event queue entry.
15312  *
15313  * This routine process a event queue entry from the fast-path event queue.
15314  * It will check the MajorCode and MinorCode to determine this is for a
15315  * completion event on a completion queue, if not, an error shall be logged
15316  * and just return. Otherwise, it will get to the corresponding completion
15317  * queue and process all the entries on the completion queue, rearm the
15318  * completion queue, and then return.
15319  **/
15320 static void
15321 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
15322                          struct lpfc_eqe *eqe)
15323 {
15324         struct lpfc_queue *cq = NULL;
15325         uint32_t qidx = eq->hdwq;
15326         uint16_t cqid, id;
15327
15328         if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
15329                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15330                                 "0366 Not a valid completion "
15331                                 "event: majorcode=x%x, minorcode=x%x\n",
15332                                 bf_get_le32(lpfc_eqe_major_code, eqe),
15333                                 bf_get_le32(lpfc_eqe_minor_code, eqe));
15334                 return;
15335         }
15336
15337         /* Get the reference to the corresponding CQ */
15338         cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
15339
15340         /* Use the fast lookup method first */
15341         if (cqid <= phba->sli4_hba.cq_max) {
15342                 cq = phba->sli4_hba.cq_lookup[cqid];
15343                 if (cq)
15344                         goto  work_cq;
15345         }
15346
15347         /* Next check for NVMET completion */
15348         if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
15349                 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
15350                 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
15351                         /* Process NVMET unsol rcv */
15352                         cq = phba->sli4_hba.nvmet_cqset[cqid - id];
15353                         goto  process_cq;
15354                 }
15355         }
15356
15357         if (phba->sli4_hba.nvmels_cq &&
15358             (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
15359                 /* Process NVME unsol rcv */
15360                 cq = phba->sli4_hba.nvmels_cq;
15361         }
15362
15363         /* Otherwise this is a Slow path event */
15364         if (cq == NULL) {
15365                 lpfc_sli4_sp_handle_eqe(phba, eqe,
15366                                         phba->sli4_hba.hdwq[qidx].hba_eq);
15367                 return;
15368         }
15369
15370 process_cq:
15371         if (unlikely(cqid != cq->queue_id)) {
15372                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15373                                 "0368 Miss-matched fast-path completion "
15374                                 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
15375                                 cqid, cq->queue_id);
15376                 return;
15377         }
15378
15379 work_cq:
15380 #if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
15381         if (phba->ktime_on)
15382                 cq->isr_timestamp = ktime_get_ns();
15383         else
15384                 cq->isr_timestamp = 0;
15385 #endif
15386         lpfc_sli4_sched_cq_work(phba, cq, cqid);
15387 }
15388
15389 /**
15390  * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
15391  * @cq: Pointer to CQ to be processed
15392  * @poll_mode: Enum lpfc_poll_state to determine poll mode
15393  *
15394  * This routine calls the cq processing routine with the handler for
15395  * fast path CQEs.
15396  *
15397  * The CQ routine returns two values: the first is the calling status,
15398  * which indicates whether work was queued to the  background discovery
15399  * thread. If true, the routine should wakeup the discovery thread;
15400  * the second is the delay parameter. If non-zero, rather than rearming
15401  * the CQ and yet another interrupt, the CQ handler should be queued so
15402  * that it is processed in a subsequent polling action. The value of
15403  * the delay indicates when to reschedule it.
15404  **/
15405 static void
15406 __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq,
15407                            enum lpfc_poll_mode poll_mode)
15408 {
15409         struct lpfc_hba *phba = cq->phba;
15410         unsigned long delay;
15411         bool workposted = false;
15412         int ret = 0;
15413
15414         /* process and rearm the CQ */
15415         workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
15416                                              &delay, poll_mode);
15417
15418         if (delay) {
15419                 if (is_kdump_kernel())
15420                         ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
15421                                                 delay);
15422                 else
15423                         ret = queue_delayed_work_on(cq->chann, phba->wq,
15424                                                 &cq->sched_irqwork, delay);
15425                 if (!ret)
15426                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15427                                         "0367 Cannot schedule queue work "
15428                                         "for cqid=%d on CPU %d\n",
15429                                         cq->queue_id, cq->chann);
15430         }
15431
15432         /* wake up worker thread if there are works to be done */
15433         if (workposted)
15434                 lpfc_worker_wake_up(phba);
15435 }
15436
15437 /**
15438  * lpfc_sli4_hba_process_cq - fast-path work handler when started by
15439  *   interrupt
15440  * @work: pointer to work element
15441  *
15442  * translates from the work handler and calls the fast-path handler.
15443  **/
15444 static void
15445 lpfc_sli4_hba_process_cq(struct work_struct *work)
15446 {
15447         struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
15448
15449         __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
15450 }
15451
15452 /**
15453  * lpfc_sli4_dly_hba_process_cq - fast-path work handler when started by timer
15454  * @work: pointer to work element
15455  *
15456  * translates from the work handler and calls the fast-path handler.
15457  **/
15458 static void
15459 lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
15460 {
15461         struct lpfc_queue *cq = container_of(to_delayed_work(work),
15462                                         struct lpfc_queue, sched_irqwork);
15463
15464         __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
15465 }
15466
15467 /**
15468  * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
15469  * @irq: Interrupt number.
15470  * @dev_id: The device context pointer.
15471  *
15472  * This function is directly called from the PCI layer as an interrupt
15473  * service routine when device with SLI-4 interface spec is enabled with
15474  * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
15475  * ring event in the HBA. However, when the device is enabled with either
15476  * MSI or Pin-IRQ interrupt mode, this function is called as part of the
15477  * device-level interrupt handler. When the PCI slot is in error recovery
15478  * or the HBA is undergoing initialization, the interrupt handler will not
15479  * process the interrupt. The SCSI FCP fast-path ring event are handled in
15480  * the intrrupt context. This function is called without any lock held.
15481  * It gets the hbalock to access and update SLI data structures. Note that,
15482  * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
15483  * equal to that of FCP CQ index.
15484  *
15485  * The link attention and ELS ring attention events are handled
15486  * by the worker thread. The interrupt handler signals the worker thread
15487  * and returns for these events. This function is called without any lock
15488  * held. It gets the hbalock to access and update SLI data structures.
15489  *
15490  * This function returns IRQ_HANDLED when interrupt is handled else it
15491  * returns IRQ_NONE.
15492  **/
15493 irqreturn_t
15494 lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
15495 {
15496         struct lpfc_hba *phba;
15497         struct lpfc_hba_eq_hdl *hba_eq_hdl;
15498         struct lpfc_queue *fpeq;
15499         unsigned long iflag;
15500         int ecount = 0;
15501         int hba_eqidx;
15502         struct lpfc_eq_intr_info *eqi;
15503
15504         /* Get the driver's phba structure from the dev_id */
15505         hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
15506         phba = hba_eq_hdl->phba;
15507         hba_eqidx = hba_eq_hdl->idx;
15508
15509         if (unlikely(!phba))
15510                 return IRQ_NONE;
15511         if (unlikely(!phba->sli4_hba.hdwq))
15512                 return IRQ_NONE;
15513
15514         /* Get to the EQ struct associated with this vector */
15515         fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
15516         if (unlikely(!fpeq))
15517                 return IRQ_NONE;
15518
15519         /* Check device state for handling interrupt */
15520         if (unlikely(lpfc_intr_state_check(phba))) {
15521                 /* Check again for link_state with lock held */
15522                 spin_lock_irqsave(&phba->hbalock, iflag);
15523                 if (phba->link_state < LPFC_LINK_DOWN)
15524                         /* Flush, clear interrupt, and rearm the EQ */
15525                         lpfc_sli4_eqcq_flush(phba, fpeq);
15526                 spin_unlock_irqrestore(&phba->hbalock, iflag);
15527                 return IRQ_NONE;
15528         }
15529
15530         eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
15531         eqi->icnt++;
15532
15533         fpeq->last_cpu = raw_smp_processor_id();
15534
15535         if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
15536             fpeq->q_flag & HBA_EQ_DELAY_CHK &&
15537             phba->cfg_auto_imax &&
15538             fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
15539             phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
15540                 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
15541
15542         /* process and rearm the EQ */
15543         ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
15544
15545         if (unlikely(ecount == 0)) {
15546                 fpeq->EQ_no_entry++;
15547                 if (phba->intr_type == MSIX)
15548                         /* MSI-X treated interrupt served as no EQ share INT */
15549                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15550                                         "0358 MSI-X interrupt with no EQE\n");
15551                 else
15552                         /* Non MSI-X treated on interrupt as EQ share INT */
15553                         return IRQ_NONE;
15554         }
15555
15556         return IRQ_HANDLED;
15557 } /* lpfc_sli4_hba_intr_handler */
15558
15559 /**
15560  * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
15561  * @irq: Interrupt number.
15562  * @dev_id: The device context pointer.
15563  *
15564  * This function is the device-level interrupt handler to device with SLI-4
15565  * interface spec, called from the PCI layer when either MSI or Pin-IRQ
15566  * interrupt mode is enabled and there is an event in the HBA which requires
15567  * driver attention. This function invokes the slow-path interrupt attention
15568  * handling function and fast-path interrupt attention handling function in
15569  * turn to process the relevant HBA attention events. This function is called
15570  * without any lock held. It gets the hbalock to access and update SLI data
15571  * structures.
15572  *
15573  * This function returns IRQ_HANDLED when interrupt is handled, else it
15574  * returns IRQ_NONE.
15575  **/
15576 irqreturn_t
15577 lpfc_sli4_intr_handler(int irq, void *dev_id)
15578 {
15579         struct lpfc_hba  *phba;
15580         irqreturn_t hba_irq_rc;
15581         bool hba_handled = false;
15582         int qidx;
15583
15584         /* Get the driver's phba structure from the dev_id */
15585         phba = (struct lpfc_hba *)dev_id;
15586
15587         if (unlikely(!phba))
15588                 return IRQ_NONE;
15589
15590         /*
15591          * Invoke fast-path host attention interrupt handling as appropriate.
15592          */
15593         for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
15594                 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
15595                                         &phba->sli4_hba.hba_eq_hdl[qidx]);
15596                 if (hba_irq_rc == IRQ_HANDLED)
15597                         hba_handled |= true;
15598         }
15599
15600         return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
15601 } /* lpfc_sli4_intr_handler */
15602
15603 void lpfc_sli4_poll_hbtimer(struct timer_list *t)
15604 {
15605         struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
15606         struct lpfc_queue *eq;
15607         int i = 0;
15608
15609         rcu_read_lock();
15610
15611         list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
15612                 i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
15613         if (!list_empty(&phba->poll_list))
15614                 mod_timer(&phba->cpuhp_poll_timer,
15615                           jiffies + msecs_to_jiffies(LPFC_POLL_HB));
15616
15617         rcu_read_unlock();
15618 }
15619
15620 inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
15621 {
15622         struct lpfc_hba *phba = eq->phba;
15623         int i = 0;
15624
15625         /*
15626          * Unlocking an irq is one of the entry point to check
15627          * for re-schedule, but we are good for io submission
15628          * path as midlayer does a get_cpu to glue us in. Flush
15629          * out the invalidate queue so we can see the updated
15630          * value for flag.
15631          */
15632         smp_rmb();
15633
15634         if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
15635                 /* We will not likely get the completion for the caller
15636                  * during this iteration but i guess that's fine.
15637                  * Future io's coming on this eq should be able to
15638                  * pick it up.  As for the case of single io's, they
15639                  * will be handled through a sched from polling timer
15640                  * function which is currently triggered every 1msec.
15641                  */
15642                 i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
15643
15644         return i;
15645 }
15646
15647 static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
15648 {
15649         struct lpfc_hba *phba = eq->phba;
15650
15651         /* kickstart slowpath processing if needed */
15652         if (list_empty(&phba->poll_list))
15653                 mod_timer(&phba->cpuhp_poll_timer,
15654                           jiffies + msecs_to_jiffies(LPFC_POLL_HB));
15655
15656         list_add_rcu(&eq->_poll_list, &phba->poll_list);
15657         synchronize_rcu();
15658 }
15659
15660 static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
15661 {
15662         struct lpfc_hba *phba = eq->phba;
15663
15664         /* Disable slowpath processing for this eq.  Kick start the eq
15665          * by RE-ARMING the eq's ASAP
15666          */
15667         list_del_rcu(&eq->_poll_list);
15668         synchronize_rcu();
15669
15670         if (list_empty(&phba->poll_list))
15671                 del_timer_sync(&phba->cpuhp_poll_timer);
15672 }
15673
15674 void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
15675 {
15676         struct lpfc_queue *eq, *next;
15677
15678         list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
15679                 list_del(&eq->_poll_list);
15680
15681         INIT_LIST_HEAD(&phba->poll_list);
15682         synchronize_rcu();
15683 }
15684
15685 static inline void
15686 __lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
15687 {
15688         if (mode == eq->mode)
15689                 return;
15690         /*
15691          * currently this function is only called during a hotplug
15692          * event and the cpu on which this function is executing
15693          * is going offline.  By now the hotplug has instructed
15694          * the scheduler to remove this cpu from cpu active mask.
15695          * So we don't need to work about being put aside by the
15696          * scheduler for a high priority process.  Yes, the inte-
15697          * rrupts could come but they are known to retire ASAP.
15698          */
15699
15700         /* Disable polling in the fastpath */
15701         WRITE_ONCE(eq->mode, mode);
15702         /* flush out the store buffer */
15703         smp_wmb();
15704
15705         /*
15706          * Add this eq to the polling list and start polling. For
15707          * a grace period both interrupt handler and poller will
15708          * try to process the eq _but_ that's fine.  We have a
15709          * synchronization mechanism in place (queue_claimed) to
15710          * deal with it.  This is just a draining phase for int-
15711          * errupt handler (not eq's) as we have guranteed through
15712          * barrier that all the CPUs have seen the new CQ_POLLED
15713          * state. which will effectively disable the REARMING of
15714          * the EQ.  The whole idea is eq's die off eventually as
15715          * we are not rearming EQ's anymore.
15716          */
15717         mode ? lpfc_sli4_add_to_poll_list(eq) :
15718                lpfc_sli4_remove_from_poll_list(eq);
15719 }
15720
15721 void lpfc_sli4_start_polling(struct lpfc_queue *eq)
15722 {
15723         __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
15724 }
15725
15726 void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
15727 {
15728         struct lpfc_hba *phba = eq->phba;
15729
15730         __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
15731
15732         /* Kick start for the pending io's in h/w.
15733          * Once we switch back to interrupt processing on a eq
15734          * the io path completion will only arm eq's when it
15735          * receives a completion.  But since eq's are in disa-
15736          * rmed state it doesn't receive a completion.  This
15737          * creates a deadlock scenaro.
15738          */
15739         phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
15740 }
15741
15742 /**
15743  * lpfc_sli4_queue_free - free a queue structure and associated memory
15744  * @queue: The queue structure to free.
15745  *
15746  * This function frees a queue structure and the DMAable memory used for
15747  * the host resident queue. This function must be called after destroying the
15748  * queue on the HBA.
15749  **/
15750 void
15751 lpfc_sli4_queue_free(struct lpfc_queue *queue)
15752 {
15753         struct lpfc_dmabuf *dmabuf;
15754
15755         if (!queue)
15756                 return;
15757
15758         if (!list_empty(&queue->wq_list))
15759                 list_del(&queue->wq_list);
15760
15761         while (!list_empty(&queue->page_list)) {
15762                 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
15763                                  list);
15764                 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
15765                                   dmabuf->virt, dmabuf->phys);
15766                 kfree(dmabuf);
15767         }
15768         if (queue->rqbp) {
15769                 lpfc_free_rq_buffer(queue->phba, queue);
15770                 kfree(queue->rqbp);
15771         }
15772
15773         if (!list_empty(&queue->cpu_list))
15774                 list_del(&queue->cpu_list);
15775
15776         kfree(queue);
15777         return;
15778 }
15779
15780 /**
15781  * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
15782  * @phba: The HBA that this queue is being created on.
15783  * @page_size: The size of a queue page
15784  * @entry_size: The size of each queue entry for this queue.
15785  * @entry_count: The number of entries that this queue will handle.
15786  * @cpu: The cpu that will primarily utilize this queue.
15787  *
15788  * This function allocates a queue structure and the DMAable memory used for
15789  * the host resident queue. This function must be called before creating the
15790  * queue on the HBA.
15791  **/
15792 struct lpfc_queue *
15793 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
15794                       uint32_t entry_size, uint32_t entry_count, int cpu)
15795 {
15796         struct lpfc_queue *queue;
15797         struct lpfc_dmabuf *dmabuf;
15798         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15799         uint16_t x, pgcnt;
15800
15801         if (!phba->sli4_hba.pc_sli4_params.supported)
15802                 hw_page_size = page_size;
15803
15804         pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
15805
15806         /* If needed, Adjust page count to match the max the adapter supports */
15807         if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
15808                 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
15809
15810         queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
15811                              GFP_KERNEL, cpu_to_node(cpu));
15812         if (!queue)
15813                 return NULL;
15814
15815         INIT_LIST_HEAD(&queue->list);
15816         INIT_LIST_HEAD(&queue->_poll_list);
15817         INIT_LIST_HEAD(&queue->wq_list);
15818         INIT_LIST_HEAD(&queue->wqfull_list);
15819         INIT_LIST_HEAD(&queue->page_list);
15820         INIT_LIST_HEAD(&queue->child_list);
15821         INIT_LIST_HEAD(&queue->cpu_list);
15822
15823         /* Set queue parameters now.  If the system cannot provide memory
15824          * resources, the free routine needs to know what was allocated.
15825          */
15826         queue->page_count = pgcnt;
15827         queue->q_pgs = (void **)&queue[1];
15828         queue->entry_cnt_per_pg = hw_page_size / entry_size;
15829         queue->entry_size = entry_size;
15830         queue->entry_count = entry_count;
15831         queue->page_size = hw_page_size;
15832         queue->phba = phba;
15833
15834         for (x = 0; x < queue->page_count; x++) {
15835                 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
15836                                       dev_to_node(&phba->pcidev->dev));
15837                 if (!dmabuf)
15838                         goto out_fail;
15839                 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
15840                                                   hw_page_size, &dmabuf->phys,
15841                                                   GFP_KERNEL);
15842                 if (!dmabuf->virt) {
15843                         kfree(dmabuf);
15844                         goto out_fail;
15845                 }
15846                 dmabuf->buffer_tag = x;
15847                 list_add_tail(&dmabuf->list, &queue->page_list);
15848                 /* use lpfc_sli4_qe to index a paritcular entry in this page */
15849                 queue->q_pgs[x] = dmabuf->virt;
15850         }
15851         INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
15852         INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
15853         INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
15854         INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
15855
15856         /* notify_interval will be set during q creation */
15857
15858         return queue;
15859 out_fail:
15860         lpfc_sli4_queue_free(queue);
15861         return NULL;
15862 }
15863
15864 /**
15865  * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
15866  * @phba: HBA structure that indicates port to create a queue on.
15867  * @pci_barset: PCI BAR set flag.
15868  *
15869  * This function shall perform iomap of the specified PCI BAR address to host
15870  * memory address if not already done so and return it. The returned host
15871  * memory address can be NULL.
15872  */
15873 static void __iomem *
15874 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
15875 {
15876         if (!phba->pcidev)
15877                 return NULL;
15878
15879         switch (pci_barset) {
15880         case WQ_PCI_BAR_0_AND_1:
15881                 return phba->pci_bar0_memmap_p;
15882         case WQ_PCI_BAR_2_AND_3:
15883                 return phba->pci_bar2_memmap_p;
15884         case WQ_PCI_BAR_4_AND_5:
15885                 return phba->pci_bar4_memmap_p;
15886         default:
15887                 break;
15888         }
15889         return NULL;
15890 }
15891
15892 /**
15893  * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
15894  * @phba: HBA structure that EQs are on.
15895  * @startq: The starting EQ index to modify
15896  * @numq: The number of EQs (consecutive indexes) to modify
15897  * @usdelay: amount of delay
15898  *
15899  * This function revises the EQ delay on 1 or more EQs. The EQ delay
15900  * is set either by writing to a register (if supported by the SLI Port)
15901  * or by mailbox command. The mailbox command allows several EQs to be
15902  * updated at once.
15903  *
15904  * The @phba struct is used to send a mailbox command to HBA. The @startq
15905  * is used to get the starting EQ index to change. The @numq value is
15906  * used to specify how many consecutive EQ indexes, starting at EQ index,
15907  * are to be changed. This function is asynchronous and will wait for any
15908  * mailbox commands to finish before returning.
15909  *
15910  * On success this function will return a zero. If unable to allocate
15911  * enough memory this function will return -ENOMEM. If a mailbox command
15912  * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
15913  * have had their delay multipler changed.
15914  **/
15915 void
15916 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
15917                          uint32_t numq, uint32_t usdelay)
15918 {
15919         struct lpfc_mbx_modify_eq_delay *eq_delay;
15920         LPFC_MBOXQ_t *mbox;
15921         struct lpfc_queue *eq;
15922         int cnt = 0, rc, length;
15923         uint32_t shdr_status, shdr_add_status;
15924         uint32_t dmult;
15925         int qidx;
15926         union lpfc_sli4_cfg_shdr *shdr;
15927
15928         if (startq >= phba->cfg_irq_chann)
15929                 return;
15930
15931         if (usdelay > 0xFFFF) {
15932                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
15933                                 "6429 usdelay %d too large. Scaled down to "
15934                                 "0xFFFF.\n", usdelay);
15935                 usdelay = 0xFFFF;
15936         }
15937
15938         /* set values by EQ_DELAY register if supported */
15939         if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
15940                 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
15941                         eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
15942                         if (!eq)
15943                                 continue;
15944
15945                         lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
15946
15947                         if (++cnt >= numq)
15948                                 break;
15949                 }
15950                 return;
15951         }
15952
15953         /* Otherwise, set values by mailbox cmd */
15954
15955         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15956         if (!mbox) {
15957                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15958                                 "6428 Failed allocating mailbox cmd buffer."
15959                                 " EQ delay was not set.\n");
15960                 return;
15961         }
15962         length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
15963                   sizeof(struct lpfc_sli4_cfg_mhdr));
15964         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15965                          LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
15966                          length, LPFC_SLI4_MBX_EMBED);
15967         eq_delay = &mbox->u.mqe.un.eq_delay;
15968
15969         /* Calculate delay multiper from maximum interrupt per second */
15970         dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
15971         if (dmult)
15972                 dmult--;
15973         if (dmult > LPFC_DMULT_MAX)
15974                 dmult = LPFC_DMULT_MAX;
15975
15976         for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
15977                 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
15978                 if (!eq)
15979                         continue;
15980                 eq->q_mode = usdelay;
15981                 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
15982                 eq_delay->u.request.eq[cnt].phase = 0;
15983                 eq_delay->u.request.eq[cnt].delay_multi = dmult;
15984
15985                 if (++cnt >= numq)
15986                         break;
15987         }
15988         eq_delay->u.request.num_eq = cnt;
15989
15990         mbox->vport = phba->pport;
15991         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15992         mbox->ctx_buf = NULL;
15993         mbox->ctx_ndlp = NULL;
15994         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15995         shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
15996         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15997         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15998         if (shdr_status || shdr_add_status || rc) {
15999                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16000                                 "2512 MODIFY_EQ_DELAY mailbox failed with "
16001                                 "status x%x add_status x%x, mbx status x%x\n",
16002                                 shdr_status, shdr_add_status, rc);
16003         }
16004         mempool_free(mbox, phba->mbox_mem_pool);
16005         return;
16006 }
16007
16008 /**
16009  * lpfc_eq_create - Create an Event Queue on the HBA
16010  * @phba: HBA structure that indicates port to create a queue on.
16011  * @eq: The queue structure to use to create the event queue.
16012  * @imax: The maximum interrupt per second limit.
16013  *
16014  * This function creates an event queue, as detailed in @eq, on a port,
16015  * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
16016  *
16017  * The @phba struct is used to send mailbox command to HBA. The @eq struct
16018  * is used to get the entry count and entry size that are necessary to
16019  * determine the number of pages to allocate and use for this queue. This
16020  * function will send the EQ_CREATE mailbox command to the HBA to setup the
16021  * event queue. This function is asynchronous and will wait for the mailbox
16022  * command to finish before continuing.
16023  *
16024  * On success this function will return a zero. If unable to allocate enough
16025  * memory this function will return -ENOMEM. If the queue create mailbox command
16026  * fails this function will return -ENXIO.
16027  **/
16028 int
16029 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
16030 {
16031         struct lpfc_mbx_eq_create *eq_create;
16032         LPFC_MBOXQ_t *mbox;
16033         int rc, length, status = 0;
16034         struct lpfc_dmabuf *dmabuf;
16035         uint32_t shdr_status, shdr_add_status;
16036         union lpfc_sli4_cfg_shdr *shdr;
16037         uint16_t dmult;
16038         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16039
16040         /* sanity check on queue memory */
16041         if (!eq)
16042                 return -ENODEV;
16043         if (!phba->sli4_hba.pc_sli4_params.supported)
16044                 hw_page_size = SLI4_PAGE_SIZE;
16045
16046         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16047         if (!mbox)
16048                 return -ENOMEM;
16049         length = (sizeof(struct lpfc_mbx_eq_create) -
16050                   sizeof(struct lpfc_sli4_cfg_mhdr));
16051         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16052                          LPFC_MBOX_OPCODE_EQ_CREATE,
16053                          length, LPFC_SLI4_MBX_EMBED);
16054         eq_create = &mbox->u.mqe.un.eq_create;
16055         shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
16056         bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
16057                eq->page_count);
16058         bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
16059                LPFC_EQE_SIZE);
16060         bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
16061
16062         /* Use version 2 of CREATE_EQ if eqav is set */
16063         if (phba->sli4_hba.pc_sli4_params.eqav) {
16064                 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16065                        LPFC_Q_CREATE_VERSION_2);
16066                 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
16067                        phba->sli4_hba.pc_sli4_params.eqav);
16068         }
16069
16070         /* don't setup delay multiplier using EQ_CREATE */
16071         dmult = 0;
16072         bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
16073                dmult);
16074         switch (eq->entry_count) {
16075         default:
16076                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16077                                 "0360 Unsupported EQ count. (%d)\n",
16078                                 eq->entry_count);
16079                 if (eq->entry_count < 256) {
16080                         status = -EINVAL;
16081                         goto out;
16082                 }
16083                 fallthrough;    /* otherwise default to smallest count */
16084         case 256:
16085                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16086                        LPFC_EQ_CNT_256);
16087                 break;
16088         case 512:
16089                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16090                        LPFC_EQ_CNT_512);
16091                 break;
16092         case 1024:
16093                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16094                        LPFC_EQ_CNT_1024);
16095                 break;
16096         case 2048:
16097                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16098                        LPFC_EQ_CNT_2048);
16099                 break;
16100         case 4096:
16101                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16102                        LPFC_EQ_CNT_4096);
16103                 break;
16104         }
16105         list_for_each_entry(dmabuf, &eq->page_list, list) {
16106                 memset(dmabuf->virt, 0, hw_page_size);
16107                 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16108                                         putPaddrLow(dmabuf->phys);
16109                 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16110                                         putPaddrHigh(dmabuf->phys);
16111         }
16112         mbox->vport = phba->pport;
16113         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16114         mbox->ctx_buf = NULL;
16115         mbox->ctx_ndlp = NULL;
16116         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16117         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16118         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16119         if (shdr_status || shdr_add_status || rc) {
16120                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16121                                 "2500 EQ_CREATE mailbox failed with "
16122                                 "status x%x add_status x%x, mbx status x%x\n",
16123                                 shdr_status, shdr_add_status, rc);
16124                 status = -ENXIO;
16125         }
16126         eq->type = LPFC_EQ;
16127         eq->subtype = LPFC_NONE;
16128         eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
16129         if (eq->queue_id == 0xFFFF)
16130                 status = -ENXIO;
16131         eq->host_index = 0;
16132         eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
16133         eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
16134 out:
16135         mempool_free(mbox, phba->mbox_mem_pool);
16136         return status;
16137 }
16138
16139 static int lpfc_cq_poll_hdler(struct irq_poll *iop, int budget)
16140 {
16141         struct lpfc_queue *cq = container_of(iop, struct lpfc_queue, iop);
16142
16143         __lpfc_sli4_hba_process_cq(cq, LPFC_IRQ_POLL);
16144
16145         return 1;
16146 }
16147
16148 /**
16149  * lpfc_cq_create - Create a Completion Queue on the HBA
16150  * @phba: HBA structure that indicates port to create a queue on.
16151  * @cq: The queue structure to use to create the completion queue.
16152  * @eq: The event queue to bind this completion queue to.
16153  * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
16154  * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
16155  *
16156  * This function creates a completion queue, as detailed in @wq, on a port,
16157  * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
16158  *
16159  * The @phba struct is used to send mailbox command to HBA. The @cq struct
16160  * is used to get the entry count and entry size that are necessary to
16161  * determine the number of pages to allocate and use for this queue. The @eq
16162  * is used to indicate which event queue to bind this completion queue to. This
16163  * function will send the CQ_CREATE mailbox command to the HBA to setup the
16164  * completion queue. This function is asynchronous and will wait for the mailbox
16165  * command to finish before continuing.
16166  *
16167  * On success this function will return a zero. If unable to allocate enough
16168  * memory this function will return -ENOMEM. If the queue create mailbox command
16169  * fails this function will return -ENXIO.
16170  **/
16171 int
16172 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
16173                struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
16174 {
16175         struct lpfc_mbx_cq_create *cq_create;
16176         struct lpfc_dmabuf *dmabuf;
16177         LPFC_MBOXQ_t *mbox;
16178         int rc, length, status = 0;
16179         uint32_t shdr_status, shdr_add_status;
16180         union lpfc_sli4_cfg_shdr *shdr;
16181
16182         /* sanity check on queue memory */
16183         if (!cq || !eq)
16184                 return -ENODEV;
16185
16186         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16187         if (!mbox)
16188                 return -ENOMEM;
16189         length = (sizeof(struct lpfc_mbx_cq_create) -
16190                   sizeof(struct lpfc_sli4_cfg_mhdr));
16191         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16192                          LPFC_MBOX_OPCODE_CQ_CREATE,
16193                          length, LPFC_SLI4_MBX_EMBED);
16194         cq_create = &mbox->u.mqe.un.cq_create;
16195         shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
16196         bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
16197                     cq->page_count);
16198         bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
16199         bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
16200         bf_set(lpfc_mbox_hdr_version, &shdr->request,
16201                phba->sli4_hba.pc_sli4_params.cqv);
16202         if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
16203                 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
16204                        (cq->page_size / SLI4_PAGE_SIZE));
16205                 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
16206                        eq->queue_id);
16207                 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
16208                        phba->sli4_hba.pc_sli4_params.cqav);
16209         } else {
16210                 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
16211                        eq->queue_id);
16212         }
16213         switch (cq->entry_count) {
16214         case 2048:
16215         case 4096:
16216                 if (phba->sli4_hba.pc_sli4_params.cqv ==
16217                     LPFC_Q_CREATE_VERSION_2) {
16218                         cq_create->u.request.context.lpfc_cq_context_count =
16219                                 cq->entry_count;
16220                         bf_set(lpfc_cq_context_count,
16221                                &cq_create->u.request.context,
16222                                LPFC_CQ_CNT_WORD7);
16223                         break;
16224                 }
16225                 fallthrough;
16226         default:
16227                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16228                                 "0361 Unsupported CQ count: "
16229                                 "entry cnt %d sz %d pg cnt %d\n",
16230                                 cq->entry_count, cq->entry_size,
16231                                 cq->page_count);
16232                 if (cq->entry_count < 256) {
16233                         status = -EINVAL;
16234                         goto out;
16235                 }
16236                 fallthrough;    /* otherwise default to smallest count */
16237         case 256:
16238                 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
16239                        LPFC_CQ_CNT_256);
16240                 break;
16241         case 512:
16242                 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
16243                        LPFC_CQ_CNT_512);
16244                 break;
16245         case 1024:
16246                 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
16247                        LPFC_CQ_CNT_1024);
16248                 break;
16249         }
16250         list_for_each_entry(dmabuf, &cq->page_list, list) {
16251                 memset(dmabuf->virt, 0, cq->page_size);
16252                 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16253                                         putPaddrLow(dmabuf->phys);
16254                 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16255                                         putPaddrHigh(dmabuf->phys);
16256         }
16257         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16258
16259         /* The IOCTL status is embedded in the mailbox subheader. */
16260         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16261         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16262         if (shdr_status || shdr_add_status || rc) {
16263                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16264                                 "2501 CQ_CREATE mailbox failed with "
16265                                 "status x%x add_status x%x, mbx status x%x\n",
16266                                 shdr_status, shdr_add_status, rc);
16267                 status = -ENXIO;
16268                 goto out;
16269         }
16270         cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
16271         if (cq->queue_id == 0xFFFF) {
16272                 status = -ENXIO;
16273                 goto out;
16274         }
16275         /* link the cq onto the parent eq child list */
16276         list_add_tail(&cq->list, &eq->child_list);
16277         /* Set up completion queue's type and subtype */
16278         cq->type = type;
16279         cq->subtype = subtype;
16280         cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
16281         cq->assoc_qid = eq->queue_id;
16282         cq->assoc_qp = eq;
16283         cq->host_index = 0;
16284         cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
16285         cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
16286
16287         if (cq->queue_id > phba->sli4_hba.cq_max)
16288                 phba->sli4_hba.cq_max = cq->queue_id;
16289
16290         irq_poll_init(&cq->iop, LPFC_IRQ_POLL_WEIGHT, lpfc_cq_poll_hdler);
16291 out:
16292         mempool_free(mbox, phba->mbox_mem_pool);
16293         return status;
16294 }
16295
16296 /**
16297  * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
16298  * @phba: HBA structure that indicates port to create a queue on.
16299  * @cqp: The queue structure array to use to create the completion queues.
16300  * @hdwq: The hardware queue array  with the EQ to bind completion queues to.
16301  * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
16302  * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
16303  *
16304  * This function creates a set of  completion queue, s to support MRQ
16305  * as detailed in @cqp, on a port,
16306  * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
16307  *
16308  * The @phba struct is used to send mailbox command to HBA. The @cq struct
16309  * is used to get the entry count and entry size that are necessary to
16310  * determine the number of pages to allocate and use for this queue. The @eq
16311  * is used to indicate which event queue to bind this completion queue to. This
16312  * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
16313  * completion queue. This function is asynchronous and will wait for the mailbox
16314  * command to finish before continuing.
16315  *
16316  * On success this function will return a zero. If unable to allocate enough
16317  * memory this function will return -ENOMEM. If the queue create mailbox command
16318  * fails this function will return -ENXIO.
16319  **/
16320 int
16321 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
16322                    struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
16323                    uint32_t subtype)
16324 {
16325         struct lpfc_queue *cq;
16326         struct lpfc_queue *eq;
16327         struct lpfc_mbx_cq_create_set *cq_set;
16328         struct lpfc_dmabuf *dmabuf;
16329         LPFC_MBOXQ_t *mbox;
16330         int rc, length, alloclen, status = 0;
16331         int cnt, idx, numcq, page_idx = 0;
16332         uint32_t shdr_status, shdr_add_status;
16333         union lpfc_sli4_cfg_shdr *shdr;
16334         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16335
16336         /* sanity check on queue memory */
16337         numcq = phba->cfg_nvmet_mrq;
16338         if (!cqp || !hdwq || !numcq)
16339                 return -ENODEV;
16340
16341         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16342         if (!mbox)
16343                 return -ENOMEM;
16344
16345         length = sizeof(struct lpfc_mbx_cq_create_set);
16346         length += ((numcq * cqp[0]->page_count) *
16347                    sizeof(struct dma_address));
16348         alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16349                         LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
16350                         LPFC_SLI4_MBX_NEMBED);
16351         if (alloclen < length) {
16352                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16353                                 "3098 Allocated DMA memory size (%d) is "
16354                                 "less than the requested DMA memory size "
16355                                 "(%d)\n", alloclen, length);
16356                 status = -ENOMEM;
16357                 goto out;
16358         }
16359         cq_set = mbox->sge_array->addr[0];
16360         shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
16361         bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
16362
16363         for (idx = 0; idx < numcq; idx++) {
16364                 cq = cqp[idx];
16365                 eq = hdwq[idx].hba_eq;
16366                 if (!cq || !eq) {
16367                         status = -ENOMEM;
16368                         goto out;
16369                 }
16370                 if (!phba->sli4_hba.pc_sli4_params.supported)
16371                         hw_page_size = cq->page_size;
16372
16373                 switch (idx) {
16374                 case 0:
16375                         bf_set(lpfc_mbx_cq_create_set_page_size,
16376                                &cq_set->u.request,
16377                                (hw_page_size / SLI4_PAGE_SIZE));
16378                         bf_set(lpfc_mbx_cq_create_set_num_pages,
16379                                &cq_set->u.request, cq->page_count);
16380                         bf_set(lpfc_mbx_cq_create_set_evt,
16381                                &cq_set->u.request, 1);
16382                         bf_set(lpfc_mbx_cq_create_set_valid,
16383                                &cq_set->u.request, 1);
16384                         bf_set(lpfc_mbx_cq_create_set_cqe_size,
16385                                &cq_set->u.request, 0);
16386                         bf_set(lpfc_mbx_cq_create_set_num_cq,
16387                                &cq_set->u.request, numcq);
16388                         bf_set(lpfc_mbx_cq_create_set_autovalid,
16389                                &cq_set->u.request,
16390                                phba->sli4_hba.pc_sli4_params.cqav);
16391                         switch (cq->entry_count) {
16392                         case 2048:
16393                         case 4096:
16394                                 if (phba->sli4_hba.pc_sli4_params.cqv ==
16395                                     LPFC_Q_CREATE_VERSION_2) {
16396                                         bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16397                                                &cq_set->u.request,
16398                                                 cq->entry_count);
16399                                         bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16400                                                &cq_set->u.request,
16401                                                LPFC_CQ_CNT_WORD7);
16402                                         break;
16403                                 }
16404                                 fallthrough;
16405                         default:
16406                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16407                                                 "3118 Bad CQ count. (%d)\n",
16408                                                 cq->entry_count);
16409                                 if (cq->entry_count < 256) {
16410                                         status = -EINVAL;
16411                                         goto out;
16412                                 }
16413                                 fallthrough;    /* otherwise default to smallest */
16414                         case 256:
16415                                 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16416                                        &cq_set->u.request, LPFC_CQ_CNT_256);
16417                                 break;
16418                         case 512:
16419                                 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16420                                        &cq_set->u.request, LPFC_CQ_CNT_512);
16421                                 break;
16422                         case 1024:
16423                                 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16424                                        &cq_set->u.request, LPFC_CQ_CNT_1024);
16425                                 break;
16426                         }
16427                         bf_set(lpfc_mbx_cq_create_set_eq_id0,
16428                                &cq_set->u.request, eq->queue_id);
16429                         break;
16430                 case 1:
16431                         bf_set(lpfc_mbx_cq_create_set_eq_id1,
16432                                &cq_set->u.request, eq->queue_id);
16433                         break;
16434                 case 2:
16435                         bf_set(lpfc_mbx_cq_create_set_eq_id2,
16436                                &cq_set->u.request, eq->queue_id);
16437                         break;
16438                 case 3:
16439                         bf_set(lpfc_mbx_cq_create_set_eq_id3,
16440                                &cq_set->u.request, eq->queue_id);
16441                         break;
16442                 case 4:
16443                         bf_set(lpfc_mbx_cq_create_set_eq_id4,
16444                                &cq_set->u.request, eq->queue_id);
16445                         break;
16446                 case 5:
16447                         bf_set(lpfc_mbx_cq_create_set_eq_id5,
16448                                &cq_set->u.request, eq->queue_id);
16449                         break;
16450                 case 6:
16451                         bf_set(lpfc_mbx_cq_create_set_eq_id6,
16452                                &cq_set->u.request, eq->queue_id);
16453                         break;
16454                 case 7:
16455                         bf_set(lpfc_mbx_cq_create_set_eq_id7,
16456                                &cq_set->u.request, eq->queue_id);
16457                         break;
16458                 case 8:
16459                         bf_set(lpfc_mbx_cq_create_set_eq_id8,
16460                                &cq_set->u.request, eq->queue_id);
16461                         break;
16462                 case 9:
16463                         bf_set(lpfc_mbx_cq_create_set_eq_id9,
16464                                &cq_set->u.request, eq->queue_id);
16465                         break;
16466                 case 10:
16467                         bf_set(lpfc_mbx_cq_create_set_eq_id10,
16468                                &cq_set->u.request, eq->queue_id);
16469                         break;
16470                 case 11:
16471                         bf_set(lpfc_mbx_cq_create_set_eq_id11,
16472                                &cq_set->u.request, eq->queue_id);
16473                         break;
16474                 case 12:
16475                         bf_set(lpfc_mbx_cq_create_set_eq_id12,
16476                                &cq_set->u.request, eq->queue_id);
16477                         break;
16478                 case 13:
16479                         bf_set(lpfc_mbx_cq_create_set_eq_id13,
16480                                &cq_set->u.request, eq->queue_id);
16481                         break;
16482                 case 14:
16483                         bf_set(lpfc_mbx_cq_create_set_eq_id14,
16484                                &cq_set->u.request, eq->queue_id);
16485                         break;
16486                 case 15:
16487                         bf_set(lpfc_mbx_cq_create_set_eq_id15,
16488                                &cq_set->u.request, eq->queue_id);
16489                         break;
16490                 }
16491
16492                 /* link the cq onto the parent eq child list */
16493                 list_add_tail(&cq->list, &eq->child_list);
16494                 /* Set up completion queue's type and subtype */
16495                 cq->type = type;
16496                 cq->subtype = subtype;
16497                 cq->assoc_qid = eq->queue_id;
16498                 cq->assoc_qp = eq;
16499                 cq->host_index = 0;
16500                 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
16501                 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
16502                                          cq->entry_count);
16503                 cq->chann = idx;
16504
16505                 rc = 0;
16506                 list_for_each_entry(dmabuf, &cq->page_list, list) {
16507                         memset(dmabuf->virt, 0, hw_page_size);
16508                         cnt = page_idx + dmabuf->buffer_tag;
16509                         cq_set->u.request.page[cnt].addr_lo =
16510                                         putPaddrLow(dmabuf->phys);
16511                         cq_set->u.request.page[cnt].addr_hi =
16512                                         putPaddrHigh(dmabuf->phys);
16513                         rc++;
16514                 }
16515                 page_idx += rc;
16516         }
16517
16518         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16519
16520         /* The IOCTL status is embedded in the mailbox subheader. */
16521         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16522         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16523         if (shdr_status || shdr_add_status || rc) {
16524                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16525                                 "3119 CQ_CREATE_SET mailbox failed with "
16526                                 "status x%x add_status x%x, mbx status x%x\n",
16527                                 shdr_status, shdr_add_status, rc);
16528                 status = -ENXIO;
16529                 goto out;
16530         }
16531         rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
16532         if (rc == 0xFFFF) {
16533                 status = -ENXIO;
16534                 goto out;
16535         }
16536
16537         for (idx = 0; idx < numcq; idx++) {
16538                 cq = cqp[idx];
16539                 cq->queue_id = rc + idx;
16540                 if (cq->queue_id > phba->sli4_hba.cq_max)
16541                         phba->sli4_hba.cq_max = cq->queue_id;
16542         }
16543
16544 out:
16545         lpfc_sli4_mbox_cmd_free(phba, mbox);
16546         return status;
16547 }
16548
16549 /**
16550  * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
16551  * @phba: HBA structure that indicates port to create a queue on.
16552  * @mq: The queue structure to use to create the mailbox queue.
16553  * @mbox: An allocated pointer to type LPFC_MBOXQ_t
16554  * @cq: The completion queue to associate with this cq.
16555  *
16556  * This function provides failback (fb) functionality when the
16557  * mq_create_ext fails on older FW generations.  It's purpose is identical
16558  * to mq_create_ext otherwise.
16559  *
16560  * This routine cannot fail as all attributes were previously accessed and
16561  * initialized in mq_create_ext.
16562  **/
16563 static void
16564 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
16565                        LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
16566 {
16567         struct lpfc_mbx_mq_create *mq_create;
16568         struct lpfc_dmabuf *dmabuf;
16569         int length;
16570
16571         length = (sizeof(struct lpfc_mbx_mq_create) -
16572                   sizeof(struct lpfc_sli4_cfg_mhdr));
16573         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16574                          LPFC_MBOX_OPCODE_MQ_CREATE,
16575                          length, LPFC_SLI4_MBX_EMBED);
16576         mq_create = &mbox->u.mqe.un.mq_create;
16577         bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
16578                mq->page_count);
16579         bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
16580                cq->queue_id);
16581         bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
16582         switch (mq->entry_count) {
16583         case 16:
16584                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16585                        LPFC_MQ_RING_SIZE_16);
16586                 break;
16587         case 32:
16588                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16589                        LPFC_MQ_RING_SIZE_32);
16590                 break;
16591         case 64:
16592                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16593                        LPFC_MQ_RING_SIZE_64);
16594                 break;
16595         case 128:
16596                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16597                        LPFC_MQ_RING_SIZE_128);
16598                 break;
16599         }
16600         list_for_each_entry(dmabuf, &mq->page_list, list) {
16601                 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16602                         putPaddrLow(dmabuf->phys);
16603                 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16604                         putPaddrHigh(dmabuf->phys);
16605         }
16606 }
16607
16608 /**
16609  * lpfc_mq_create - Create a mailbox Queue on the HBA
16610  * @phba: HBA structure that indicates port to create a queue on.
16611  * @mq: The queue structure to use to create the mailbox queue.
16612  * @cq: The completion queue to associate with this cq.
16613  * @subtype: The queue's subtype.
16614  *
16615  * This function creates a mailbox queue, as detailed in @mq, on a port,
16616  * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
16617  *
16618  * The @phba struct is used to send mailbox command to HBA. The @cq struct
16619  * is used to get the entry count and entry size that are necessary to
16620  * determine the number of pages to allocate and use for this queue. This
16621  * function will send the MQ_CREATE mailbox command to the HBA to setup the
16622  * mailbox queue. This function is asynchronous and will wait for the mailbox
16623  * command to finish before continuing.
16624  *
16625  * On success this function will return a zero. If unable to allocate enough
16626  * memory this function will return -ENOMEM. If the queue create mailbox command
16627  * fails this function will return -ENXIO.
16628  **/
16629 int32_t
16630 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
16631                struct lpfc_queue *cq, uint32_t subtype)
16632 {
16633         struct lpfc_mbx_mq_create *mq_create;
16634         struct lpfc_mbx_mq_create_ext *mq_create_ext;
16635         struct lpfc_dmabuf *dmabuf;
16636         LPFC_MBOXQ_t *mbox;
16637         int rc, length, status = 0;
16638         uint32_t shdr_status, shdr_add_status;
16639         union lpfc_sli4_cfg_shdr *shdr;
16640         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16641
16642         /* sanity check on queue memory */
16643         if (!mq || !cq)
16644                 return -ENODEV;
16645         if (!phba->sli4_hba.pc_sli4_params.supported)
16646                 hw_page_size = SLI4_PAGE_SIZE;
16647
16648         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16649         if (!mbox)
16650                 return -ENOMEM;
16651         length = (sizeof(struct lpfc_mbx_mq_create_ext) -
16652                   sizeof(struct lpfc_sli4_cfg_mhdr));
16653         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16654                          LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
16655                          length, LPFC_SLI4_MBX_EMBED);
16656
16657         mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
16658         shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
16659         bf_set(lpfc_mbx_mq_create_ext_num_pages,
16660                &mq_create_ext->u.request, mq->page_count);
16661         bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
16662                &mq_create_ext->u.request, 1);
16663         bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
16664                &mq_create_ext->u.request, 1);
16665         bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
16666                &mq_create_ext->u.request, 1);
16667         bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
16668                &mq_create_ext->u.request, 1);
16669         bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
16670                &mq_create_ext->u.request, 1);
16671         bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
16672         bf_set(lpfc_mbox_hdr_version, &shdr->request,
16673                phba->sli4_hba.pc_sli4_params.mqv);
16674         if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
16675                 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
16676                        cq->queue_id);
16677         else
16678                 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
16679                        cq->queue_id);
16680         switch (mq->entry_count) {
16681         default:
16682                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16683                                 "0362 Unsupported MQ count. (%d)\n",
16684                                 mq->entry_count);
16685                 if (mq->entry_count < 16) {
16686                         status = -EINVAL;
16687                         goto out;
16688                 }
16689                 fallthrough;    /* otherwise default to smallest count */
16690         case 16:
16691                 bf_set(lpfc_mq_context_ring_size,
16692                        &mq_create_ext->u.request.context,
16693                        LPFC_MQ_RING_SIZE_16);
16694                 break;
16695         case 32:
16696                 bf_set(lpfc_mq_context_ring_size,
16697                        &mq_create_ext->u.request.context,
16698                        LPFC_MQ_RING_SIZE_32);
16699                 break;
16700         case 64:
16701                 bf_set(lpfc_mq_context_ring_size,
16702                        &mq_create_ext->u.request.context,
16703                        LPFC_MQ_RING_SIZE_64);
16704                 break;
16705         case 128:
16706                 bf_set(lpfc_mq_context_ring_size,
16707                        &mq_create_ext->u.request.context,
16708                        LPFC_MQ_RING_SIZE_128);
16709                 break;
16710         }
16711         list_for_each_entry(dmabuf, &mq->page_list, list) {
16712                 memset(dmabuf->virt, 0, hw_page_size);
16713                 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
16714                                         putPaddrLow(dmabuf->phys);
16715                 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
16716                                         putPaddrHigh(dmabuf->phys);
16717         }
16718         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16719         mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16720                               &mq_create_ext->u.response);
16721         if (rc != MBX_SUCCESS) {
16722                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16723                                 "2795 MQ_CREATE_EXT failed with "
16724                                 "status x%x. Failback to MQ_CREATE.\n",
16725                                 rc);
16726                 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
16727                 mq_create = &mbox->u.mqe.un.mq_create;
16728                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16729                 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
16730                 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16731                                       &mq_create->u.response);
16732         }
16733
16734         /* The IOCTL status is embedded in the mailbox subheader. */
16735         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16736         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16737         if (shdr_status || shdr_add_status || rc) {
16738                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16739                                 "2502 MQ_CREATE mailbox failed with "
16740                                 "status x%x add_status x%x, mbx status x%x\n",
16741                                 shdr_status, shdr_add_status, rc);
16742                 status = -ENXIO;
16743                 goto out;
16744         }
16745         if (mq->queue_id == 0xFFFF) {
16746                 status = -ENXIO;
16747                 goto out;
16748         }
16749         mq->type = LPFC_MQ;
16750         mq->assoc_qid = cq->queue_id;
16751         mq->subtype = subtype;
16752         mq->host_index = 0;
16753         mq->hba_index = 0;
16754
16755         /* link the mq onto the parent cq child list */
16756         list_add_tail(&mq->list, &cq->child_list);
16757 out:
16758         mempool_free(mbox, phba->mbox_mem_pool);
16759         return status;
16760 }
16761
16762 /**
16763  * lpfc_wq_create - Create a Work Queue on the HBA
16764  * @phba: HBA structure that indicates port to create a queue on.
16765  * @wq: The queue structure to use to create the work queue.
16766  * @cq: The completion queue to bind this work queue to.
16767  * @subtype: The subtype of the work queue indicating its functionality.
16768  *
16769  * This function creates a work queue, as detailed in @wq, on a port, described
16770  * by @phba by sending a WQ_CREATE mailbox command to the HBA.
16771  *
16772  * The @phba struct is used to send mailbox command to HBA. The @wq struct
16773  * is used to get the entry count and entry size that are necessary to
16774  * determine the number of pages to allocate and use for this queue. The @cq
16775  * is used to indicate which completion queue to bind this work queue to. This
16776  * function will send the WQ_CREATE mailbox command to the HBA to setup the
16777  * work queue. This function is asynchronous and will wait for the mailbox
16778  * command to finish before continuing.
16779  *
16780  * On success this function will return a zero. If unable to allocate enough
16781  * memory this function will return -ENOMEM. If the queue create mailbox command
16782  * fails this function will return -ENXIO.
16783  **/
16784 int
16785 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
16786                struct lpfc_queue *cq, uint32_t subtype)
16787 {
16788         struct lpfc_mbx_wq_create *wq_create;
16789         struct lpfc_dmabuf *dmabuf;
16790         LPFC_MBOXQ_t *mbox;
16791         int rc, length, status = 0;
16792         uint32_t shdr_status, shdr_add_status;
16793         union lpfc_sli4_cfg_shdr *shdr;
16794         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16795         struct dma_address *page;
16796         void __iomem *bar_memmap_p;
16797         uint32_t db_offset;
16798         uint16_t pci_barset;
16799         uint8_t dpp_barset;
16800         uint32_t dpp_offset;
16801         uint8_t wq_create_version;
16802 #ifdef CONFIG_X86
16803         unsigned long pg_addr;
16804 #endif
16805
16806         /* sanity check on queue memory */
16807         if (!wq || !cq)
16808                 return -ENODEV;
16809         if (!phba->sli4_hba.pc_sli4_params.supported)
16810                 hw_page_size = wq->page_size;
16811
16812         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16813         if (!mbox)
16814                 return -ENOMEM;
16815         length = (sizeof(struct lpfc_mbx_wq_create) -
16816                   sizeof(struct lpfc_sli4_cfg_mhdr));
16817         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16818                          LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
16819                          length, LPFC_SLI4_MBX_EMBED);
16820         wq_create = &mbox->u.mqe.un.wq_create;
16821         shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
16822         bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
16823                     wq->page_count);
16824         bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
16825                     cq->queue_id);
16826
16827         /* wqv is the earliest version supported, NOT the latest */
16828         bf_set(lpfc_mbox_hdr_version, &shdr->request,
16829                phba->sli4_hba.pc_sli4_params.wqv);
16830
16831         if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
16832             (wq->page_size > SLI4_PAGE_SIZE))
16833                 wq_create_version = LPFC_Q_CREATE_VERSION_1;
16834         else
16835                 wq_create_version = LPFC_Q_CREATE_VERSION_0;
16836
16837         switch (wq_create_version) {
16838         case LPFC_Q_CREATE_VERSION_1:
16839                 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
16840                        wq->entry_count);
16841                 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16842                        LPFC_Q_CREATE_VERSION_1);
16843
16844                 switch (wq->entry_size) {
16845                 default:
16846                 case 64:
16847                         bf_set(lpfc_mbx_wq_create_wqe_size,
16848                                &wq_create->u.request_1,
16849                                LPFC_WQ_WQE_SIZE_64);
16850                         break;
16851                 case 128:
16852                         bf_set(lpfc_mbx_wq_create_wqe_size,
16853                                &wq_create->u.request_1,
16854                                LPFC_WQ_WQE_SIZE_128);
16855                         break;
16856                 }
16857                 /* Request DPP by default */
16858                 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
16859                 bf_set(lpfc_mbx_wq_create_page_size,
16860                        &wq_create->u.request_1,
16861                        (wq->page_size / SLI4_PAGE_SIZE));
16862                 page = wq_create->u.request_1.page;
16863                 break;
16864         default:
16865                 page = wq_create->u.request.page;
16866                 break;
16867         }
16868
16869         list_for_each_entry(dmabuf, &wq->page_list, list) {
16870                 memset(dmabuf->virt, 0, hw_page_size);
16871                 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
16872                 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
16873         }
16874
16875         if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16876                 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
16877
16878         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16879         /* The IOCTL status is embedded in the mailbox subheader. */
16880         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16881         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16882         if (shdr_status || shdr_add_status || rc) {
16883                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16884                                 "2503 WQ_CREATE mailbox failed with "
16885                                 "status x%x add_status x%x, mbx status x%x\n",
16886                                 shdr_status, shdr_add_status, rc);
16887                 status = -ENXIO;
16888                 goto out;
16889         }
16890
16891         if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
16892                 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
16893                                         &wq_create->u.response);
16894         else
16895                 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
16896                                         &wq_create->u.response_1);
16897
16898         if (wq->queue_id == 0xFFFF) {
16899                 status = -ENXIO;
16900                 goto out;
16901         }
16902
16903         wq->db_format = LPFC_DB_LIST_FORMAT;
16904         if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
16905                 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
16906                         wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
16907                                                &wq_create->u.response);
16908                         if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
16909                             (wq->db_format != LPFC_DB_RING_FORMAT)) {
16910                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16911                                                 "3265 WQ[%d] doorbell format "
16912                                                 "not supported: x%x\n",
16913                                                 wq->queue_id, wq->db_format);
16914                                 status = -EINVAL;
16915                                 goto out;
16916                         }
16917                         pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
16918                                             &wq_create->u.response);
16919                         bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16920                                                                    pci_barset);
16921                         if (!bar_memmap_p) {
16922                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16923                                                 "3263 WQ[%d] failed to memmap "
16924                                                 "pci barset:x%x\n",
16925                                                 wq->queue_id, pci_barset);
16926                                 status = -ENOMEM;
16927                                 goto out;
16928                         }
16929                         db_offset = wq_create->u.response.doorbell_offset;
16930                         if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
16931                             (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
16932                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16933                                                 "3252 WQ[%d] doorbell offset "
16934                                                 "not supported: x%x\n",
16935                                                 wq->queue_id, db_offset);
16936                                 status = -EINVAL;
16937                                 goto out;
16938                         }
16939                         wq->db_regaddr = bar_memmap_p + db_offset;
16940                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16941                                         "3264 WQ[%d]: barset:x%x, offset:x%x, "
16942                                         "format:x%x\n", wq->queue_id,
16943                                         pci_barset, db_offset, wq->db_format);
16944                 } else
16945                         wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
16946         } else {
16947                 /* Check if DPP was honored by the firmware */
16948                 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
16949                                     &wq_create->u.response_1);
16950                 if (wq->dpp_enable) {
16951                         pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
16952                                             &wq_create->u.response_1);
16953                         bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16954                                                                    pci_barset);
16955                         if (!bar_memmap_p) {
16956                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16957                                                 "3267 WQ[%d] failed to memmap "
16958                                                 "pci barset:x%x\n",
16959                                                 wq->queue_id, pci_barset);
16960                                 status = -ENOMEM;
16961                                 goto out;
16962                         }
16963                         db_offset = wq_create->u.response_1.doorbell_offset;
16964                         wq->db_regaddr = bar_memmap_p + db_offset;
16965                         wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
16966                                             &wq_create->u.response_1);
16967                         dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
16968                                             &wq_create->u.response_1);
16969                         bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16970                                                                    dpp_barset);
16971                         if (!bar_memmap_p) {
16972                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16973                                                 "3268 WQ[%d] failed to memmap "
16974                                                 "pci barset:x%x\n",
16975                                                 wq->queue_id, dpp_barset);
16976                                 status = -ENOMEM;
16977                                 goto out;
16978                         }
16979                         dpp_offset = wq_create->u.response_1.dpp_offset;
16980                         wq->dpp_regaddr = bar_memmap_p + dpp_offset;
16981                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16982                                         "3271 WQ[%d]: barset:x%x, offset:x%x, "
16983                                         "dpp_id:x%x dpp_barset:x%x "
16984                                         "dpp_offset:x%x\n",
16985                                         wq->queue_id, pci_barset, db_offset,
16986                                         wq->dpp_id, dpp_barset, dpp_offset);
16987
16988 #ifdef CONFIG_X86
16989                         /* Enable combined writes for DPP aperture */
16990                         pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
16991                         rc = set_memory_wc(pg_addr, 1);
16992                         if (rc) {
16993                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16994                                         "3272 Cannot setup Combined "
16995                                         "Write on WQ[%d] - disable DPP\n",
16996                                         wq->queue_id);
16997                                 phba->cfg_enable_dpp = 0;
16998                         }
16999 #else
17000                         phba->cfg_enable_dpp = 0;
17001 #endif
17002                 } else
17003                         wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
17004         }
17005         wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
17006         if (wq->pring == NULL) {
17007                 status = -ENOMEM;
17008                 goto out;
17009         }
17010         wq->type = LPFC_WQ;
17011         wq->assoc_qid = cq->queue_id;
17012         wq->subtype = subtype;
17013         wq->host_index = 0;
17014         wq->hba_index = 0;
17015         wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
17016
17017         /* link the wq onto the parent cq child list */
17018         list_add_tail(&wq->list, &cq->child_list);
17019 out:
17020         mempool_free(mbox, phba->mbox_mem_pool);
17021         return status;
17022 }
17023
17024 /**
17025  * lpfc_rq_create - Create a Receive Queue on the HBA
17026  * @phba: HBA structure that indicates port to create a queue on.
17027  * @hrq: The queue structure to use to create the header receive queue.
17028  * @drq: The queue structure to use to create the data receive queue.
17029  * @cq: The completion queue to bind this work queue to.
17030  * @subtype: The subtype of the work queue indicating its functionality.
17031  *
17032  * This function creates a receive buffer queue pair , as detailed in @hrq and
17033  * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
17034  * to the HBA.
17035  *
17036  * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
17037  * struct is used to get the entry count that is necessary to determine the
17038  * number of pages to use for this queue. The @cq is used to indicate which
17039  * completion queue to bind received buffers that are posted to these queues to.
17040  * This function will send the RQ_CREATE mailbox command to the HBA to setup the
17041  * receive queue pair. This function is asynchronous and will wait for the
17042  * mailbox command to finish before continuing.
17043  *
17044  * On success this function will return a zero. If unable to allocate enough
17045  * memory this function will return -ENOMEM. If the queue create mailbox command
17046  * fails this function will return -ENXIO.
17047  **/
17048 int
17049 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
17050                struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
17051 {
17052         struct lpfc_mbx_rq_create *rq_create;
17053         struct lpfc_dmabuf *dmabuf;
17054         LPFC_MBOXQ_t *mbox;
17055         int rc, length, status = 0;
17056         uint32_t shdr_status, shdr_add_status;
17057         union lpfc_sli4_cfg_shdr *shdr;
17058         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
17059         void __iomem *bar_memmap_p;
17060         uint32_t db_offset;
17061         uint16_t pci_barset;
17062
17063         /* sanity check on queue memory */
17064         if (!hrq || !drq || !cq)
17065                 return -ENODEV;
17066         if (!phba->sli4_hba.pc_sli4_params.supported)
17067                 hw_page_size = SLI4_PAGE_SIZE;
17068
17069         if (hrq->entry_count != drq->entry_count)
17070                 return -EINVAL;
17071         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17072         if (!mbox)
17073                 return -ENOMEM;
17074         length = (sizeof(struct lpfc_mbx_rq_create) -
17075                   sizeof(struct lpfc_sli4_cfg_mhdr));
17076         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17077                          LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
17078                          length, LPFC_SLI4_MBX_EMBED);
17079         rq_create = &mbox->u.mqe.un.rq_create;
17080         shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
17081         bf_set(lpfc_mbox_hdr_version, &shdr->request,
17082                phba->sli4_hba.pc_sli4_params.rqv);
17083         if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
17084                 bf_set(lpfc_rq_context_rqe_count_1,
17085                        &rq_create->u.request.context,
17086                        hrq->entry_count);
17087                 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
17088                 bf_set(lpfc_rq_context_rqe_size,
17089                        &rq_create->u.request.context,
17090                        LPFC_RQE_SIZE_8);
17091                 bf_set(lpfc_rq_context_page_size,
17092                        &rq_create->u.request.context,
17093                        LPFC_RQ_PAGE_SIZE_4096);
17094         } else {
17095                 switch (hrq->entry_count) {
17096                 default:
17097                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17098                                         "2535 Unsupported RQ count. (%d)\n",
17099                                         hrq->entry_count);
17100                         if (hrq->entry_count < 512) {
17101                                 status = -EINVAL;
17102                                 goto out;
17103                         }
17104                         fallthrough;    /* otherwise default to smallest count */
17105                 case 512:
17106                         bf_set(lpfc_rq_context_rqe_count,
17107                                &rq_create->u.request.context,
17108                                LPFC_RQ_RING_SIZE_512);
17109                         break;
17110                 case 1024:
17111                         bf_set(lpfc_rq_context_rqe_count,
17112                                &rq_create->u.request.context,
17113                                LPFC_RQ_RING_SIZE_1024);
17114                         break;
17115                 case 2048:
17116                         bf_set(lpfc_rq_context_rqe_count,
17117                                &rq_create->u.request.context,
17118                                LPFC_RQ_RING_SIZE_2048);
17119                         break;
17120                 case 4096:
17121                         bf_set(lpfc_rq_context_rqe_count,
17122                                &rq_create->u.request.context,
17123                                LPFC_RQ_RING_SIZE_4096);
17124                         break;
17125                 }
17126                 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
17127                        LPFC_HDR_BUF_SIZE);
17128         }
17129         bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
17130                cq->queue_id);
17131         bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
17132                hrq->page_count);
17133         list_for_each_entry(dmabuf, &hrq->page_list, list) {
17134                 memset(dmabuf->virt, 0, hw_page_size);
17135                 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
17136                                         putPaddrLow(dmabuf->phys);
17137                 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
17138                                         putPaddrHigh(dmabuf->phys);
17139         }
17140         if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
17141                 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
17142
17143         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17144         /* The IOCTL status is embedded in the mailbox subheader. */
17145         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17146         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17147         if (shdr_status || shdr_add_status || rc) {
17148                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17149                                 "2504 RQ_CREATE mailbox failed with "
17150                                 "status x%x add_status x%x, mbx status x%x\n",
17151                                 shdr_status, shdr_add_status, rc);
17152                 status = -ENXIO;
17153                 goto out;
17154         }
17155         hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17156         if (hrq->queue_id == 0xFFFF) {
17157                 status = -ENXIO;
17158                 goto out;
17159         }
17160
17161         if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
17162                 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
17163                                         &rq_create->u.response);
17164                 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
17165                     (hrq->db_format != LPFC_DB_RING_FORMAT)) {
17166                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17167                                         "3262 RQ [%d] doorbell format not "
17168                                         "supported: x%x\n", hrq->queue_id,
17169                                         hrq->db_format);
17170                         status = -EINVAL;
17171                         goto out;
17172                 }
17173
17174                 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
17175                                     &rq_create->u.response);
17176                 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
17177                 if (!bar_memmap_p) {
17178                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17179                                         "3269 RQ[%d] failed to memmap pci "
17180                                         "barset:x%x\n", hrq->queue_id,
17181                                         pci_barset);
17182                         status = -ENOMEM;
17183                         goto out;
17184                 }
17185
17186                 db_offset = rq_create->u.response.doorbell_offset;
17187                 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
17188                     (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
17189                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17190                                         "3270 RQ[%d] doorbell offset not "
17191                                         "supported: x%x\n", hrq->queue_id,
17192                                         db_offset);
17193                         status = -EINVAL;
17194                         goto out;
17195                 }
17196                 hrq->db_regaddr = bar_memmap_p + db_offset;
17197                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
17198                                 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
17199                                 "format:x%x\n", hrq->queue_id, pci_barset,
17200                                 db_offset, hrq->db_format);
17201         } else {
17202                 hrq->db_format = LPFC_DB_RING_FORMAT;
17203                 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17204         }
17205         hrq->type = LPFC_HRQ;
17206         hrq->assoc_qid = cq->queue_id;
17207         hrq->subtype = subtype;
17208         hrq->host_index = 0;
17209         hrq->hba_index = 0;
17210         hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17211
17212         /* now create the data queue */
17213         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17214                          LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
17215                          length, LPFC_SLI4_MBX_EMBED);
17216         bf_set(lpfc_mbox_hdr_version, &shdr->request,
17217                phba->sli4_hba.pc_sli4_params.rqv);
17218         if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
17219                 bf_set(lpfc_rq_context_rqe_count_1,
17220                        &rq_create->u.request.context, hrq->entry_count);
17221                 if (subtype == LPFC_NVMET)
17222                         rq_create->u.request.context.buffer_size =
17223                                 LPFC_NVMET_DATA_BUF_SIZE;
17224                 else
17225                         rq_create->u.request.context.buffer_size =
17226                                 LPFC_DATA_BUF_SIZE;
17227                 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
17228                        LPFC_RQE_SIZE_8);
17229                 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
17230                        (PAGE_SIZE/SLI4_PAGE_SIZE));
17231         } else {
17232                 switch (drq->entry_count) {
17233                 default:
17234                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17235                                         "2536 Unsupported RQ count. (%d)\n",
17236                                         drq->entry_count);
17237                         if (drq->entry_count < 512) {
17238                                 status = -EINVAL;
17239                                 goto out;
17240                         }
17241                         fallthrough;    /* otherwise default to smallest count */
17242                 case 512:
17243                         bf_set(lpfc_rq_context_rqe_count,
17244                                &rq_create->u.request.context,
17245                                LPFC_RQ_RING_SIZE_512);
17246                         break;
17247                 case 1024:
17248                         bf_set(lpfc_rq_context_rqe_count,
17249                                &rq_create->u.request.context,
17250                                LPFC_RQ_RING_SIZE_1024);
17251                         break;
17252                 case 2048:
17253                         bf_set(lpfc_rq_context_rqe_count,
17254                                &rq_create->u.request.context,
17255                                LPFC_RQ_RING_SIZE_2048);
17256                         break;
17257                 case 4096:
17258                         bf_set(lpfc_rq_context_rqe_count,
17259                                &rq_create->u.request.context,
17260                                LPFC_RQ_RING_SIZE_4096);
17261                         break;
17262                 }
17263                 if (subtype == LPFC_NVMET)
17264                         bf_set(lpfc_rq_context_buf_size,
17265                                &rq_create->u.request.context,
17266                                LPFC_NVMET_DATA_BUF_SIZE);
17267                 else
17268                         bf_set(lpfc_rq_context_buf_size,
17269                                &rq_create->u.request.context,
17270                                LPFC_DATA_BUF_SIZE);
17271         }
17272         bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
17273                cq->queue_id);
17274         bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
17275                drq->page_count);
17276         list_for_each_entry(dmabuf, &drq->page_list, list) {
17277                 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
17278                                         putPaddrLow(dmabuf->phys);
17279                 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
17280                                         putPaddrHigh(dmabuf->phys);
17281         }
17282         if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
17283                 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
17284         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17285         /* The IOCTL status is embedded in the mailbox subheader. */
17286         shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
17287         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17288         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17289         if (shdr_status || shdr_add_status || rc) {
17290                 status = -ENXIO;
17291                 goto out;
17292         }
17293         drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17294         if (drq->queue_id == 0xFFFF) {
17295                 status = -ENXIO;
17296                 goto out;
17297         }
17298         drq->type = LPFC_DRQ;
17299         drq->assoc_qid = cq->queue_id;
17300         drq->subtype = subtype;
17301         drq->host_index = 0;
17302         drq->hba_index = 0;
17303         drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17304
17305         /* link the header and data RQs onto the parent cq child list */
17306         list_add_tail(&hrq->list, &cq->child_list);
17307         list_add_tail(&drq->list, &cq->child_list);
17308
17309 out:
17310         mempool_free(mbox, phba->mbox_mem_pool);
17311         return status;
17312 }
17313
17314 /**
17315  * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
17316  * @phba: HBA structure that indicates port to create a queue on.
17317  * @hrqp: The queue structure array to use to create the header receive queues.
17318  * @drqp: The queue structure array to use to create the data receive queues.
17319  * @cqp: The completion queue array to bind these receive queues to.
17320  * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
17321  *
17322  * This function creates a receive buffer queue pair , as detailed in @hrq and
17323  * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
17324  * to the HBA.
17325  *
17326  * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
17327  * struct is used to get the entry count that is necessary to determine the
17328  * number of pages to use for this queue. The @cq is used to indicate which
17329  * completion queue to bind received buffers that are posted to these queues to.
17330  * This function will send the RQ_CREATE mailbox command to the HBA to setup the
17331  * receive queue pair. This function is asynchronous and will wait for the
17332  * mailbox command to finish before continuing.
17333  *
17334  * On success this function will return a zero. If unable to allocate enough
17335  * memory this function will return -ENOMEM. If the queue create mailbox command
17336  * fails this function will return -ENXIO.
17337  **/
17338 int
17339 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
17340                 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
17341                 uint32_t subtype)
17342 {
17343         struct lpfc_queue *hrq, *drq, *cq;
17344         struct lpfc_mbx_rq_create_v2 *rq_create;
17345         struct lpfc_dmabuf *dmabuf;
17346         LPFC_MBOXQ_t *mbox;
17347         int rc, length, alloclen, status = 0;
17348         int cnt, idx, numrq, page_idx = 0;
17349         uint32_t shdr_status, shdr_add_status;
17350         union lpfc_sli4_cfg_shdr *shdr;
17351         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
17352
17353         numrq = phba->cfg_nvmet_mrq;
17354         /* sanity check on array memory */
17355         if (!hrqp || !drqp || !cqp || !numrq)
17356                 return -ENODEV;
17357         if (!phba->sli4_hba.pc_sli4_params.supported)
17358                 hw_page_size = SLI4_PAGE_SIZE;
17359
17360         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17361         if (!mbox)
17362                 return -ENOMEM;
17363
17364         length = sizeof(struct lpfc_mbx_rq_create_v2);
17365         length += ((2 * numrq * hrqp[0]->page_count) *
17366                    sizeof(struct dma_address));
17367
17368         alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17369                                     LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
17370                                     LPFC_SLI4_MBX_NEMBED);
17371         if (alloclen < length) {
17372                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17373                                 "3099 Allocated DMA memory size (%d) is "
17374                                 "less than the requested DMA memory size "
17375                                 "(%d)\n", alloclen, length);
17376                 status = -ENOMEM;
17377                 goto out;
17378         }
17379
17380
17381
17382         rq_create = mbox->sge_array->addr[0];
17383         shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
17384
17385         bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
17386         cnt = 0;
17387
17388         for (idx = 0; idx < numrq; idx++) {
17389                 hrq = hrqp[idx];
17390                 drq = drqp[idx];
17391                 cq  = cqp[idx];
17392
17393                 /* sanity check on queue memory */
17394                 if (!hrq || !drq || !cq) {
17395                         status = -ENODEV;
17396                         goto out;
17397                 }
17398
17399                 if (hrq->entry_count != drq->entry_count) {
17400                         status = -EINVAL;
17401                         goto out;
17402                 }
17403
17404                 if (idx == 0) {
17405                         bf_set(lpfc_mbx_rq_create_num_pages,
17406                                &rq_create->u.request,
17407                                hrq->page_count);
17408                         bf_set(lpfc_mbx_rq_create_rq_cnt,
17409                                &rq_create->u.request, (numrq * 2));
17410                         bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
17411                                1);
17412                         bf_set(lpfc_rq_context_base_cq,
17413                                &rq_create->u.request.context,
17414                                cq->queue_id);
17415                         bf_set(lpfc_rq_context_data_size,
17416                                &rq_create->u.request.context,
17417                                LPFC_NVMET_DATA_BUF_SIZE);
17418                         bf_set(lpfc_rq_context_hdr_size,
17419                                &rq_create->u.request.context,
17420                                LPFC_HDR_BUF_SIZE);
17421                         bf_set(lpfc_rq_context_rqe_count_1,
17422                                &rq_create->u.request.context,
17423                                hrq->entry_count);
17424                         bf_set(lpfc_rq_context_rqe_size,
17425                                &rq_create->u.request.context,
17426                                LPFC_RQE_SIZE_8);
17427                         bf_set(lpfc_rq_context_page_size,
17428                                &rq_create->u.request.context,
17429                                (PAGE_SIZE/SLI4_PAGE_SIZE));
17430                 }
17431                 rc = 0;
17432                 list_for_each_entry(dmabuf, &hrq->page_list, list) {
17433                         memset(dmabuf->virt, 0, hw_page_size);
17434                         cnt = page_idx + dmabuf->buffer_tag;
17435                         rq_create->u.request.page[cnt].addr_lo =
17436                                         putPaddrLow(dmabuf->phys);
17437                         rq_create->u.request.page[cnt].addr_hi =
17438                                         putPaddrHigh(dmabuf->phys);
17439                         rc++;
17440                 }
17441                 page_idx += rc;
17442
17443                 rc = 0;
17444                 list_for_each_entry(dmabuf, &drq->page_list, list) {
17445                         memset(dmabuf->virt, 0, hw_page_size);
17446                         cnt = page_idx + dmabuf->buffer_tag;
17447                         rq_create->u.request.page[cnt].addr_lo =
17448                                         putPaddrLow(dmabuf->phys);
17449                         rq_create->u.request.page[cnt].addr_hi =
17450                                         putPaddrHigh(dmabuf->phys);
17451                         rc++;
17452                 }
17453                 page_idx += rc;
17454
17455                 hrq->db_format = LPFC_DB_RING_FORMAT;
17456                 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17457                 hrq->type = LPFC_HRQ;
17458                 hrq->assoc_qid = cq->queue_id;
17459                 hrq->subtype = subtype;
17460                 hrq->host_index = 0;
17461                 hrq->hba_index = 0;
17462                 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17463
17464                 drq->db_format = LPFC_DB_RING_FORMAT;
17465                 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17466                 drq->type = LPFC_DRQ;
17467                 drq->assoc_qid = cq->queue_id;
17468                 drq->subtype = subtype;
17469                 drq->host_index = 0;
17470                 drq->hba_index = 0;
17471                 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17472
17473                 list_add_tail(&hrq->list, &cq->child_list);
17474                 list_add_tail(&drq->list, &cq->child_list);
17475         }
17476
17477         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17478         /* The IOCTL status is embedded in the mailbox subheader. */
17479         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17480         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17481         if (shdr_status || shdr_add_status || rc) {
17482                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17483                                 "3120 RQ_CREATE mailbox failed with "
17484                                 "status x%x add_status x%x, mbx status x%x\n",
17485                                 shdr_status, shdr_add_status, rc);
17486                 status = -ENXIO;
17487                 goto out;
17488         }
17489         rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17490         if (rc == 0xFFFF) {
17491                 status = -ENXIO;
17492                 goto out;
17493         }
17494
17495         /* Initialize all RQs with associated queue id */
17496         for (idx = 0; idx < numrq; idx++) {
17497                 hrq = hrqp[idx];
17498                 hrq->queue_id = rc + (2 * idx);
17499                 drq = drqp[idx];
17500                 drq->queue_id = rc + (2 * idx) + 1;
17501         }
17502
17503 out:
17504         lpfc_sli4_mbox_cmd_free(phba, mbox);
17505         return status;
17506 }
17507
17508 /**
17509  * lpfc_eq_destroy - Destroy an event Queue on the HBA
17510  * @phba: HBA structure that indicates port to destroy a queue on.
17511  * @eq: The queue structure associated with the queue to destroy.
17512  *
17513  * This function destroys a queue, as detailed in @eq by sending an mailbox
17514  * command, specific to the type of queue, to the HBA.
17515  *
17516  * The @eq struct is used to get the queue ID of the queue to destroy.
17517  *
17518  * On success this function will return a zero. If the queue destroy mailbox
17519  * command fails this function will return -ENXIO.
17520  **/
17521 int
17522 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
17523 {
17524         LPFC_MBOXQ_t *mbox;
17525         int rc, length, status = 0;
17526         uint32_t shdr_status, shdr_add_status;
17527         union lpfc_sli4_cfg_shdr *shdr;
17528
17529         /* sanity check on queue memory */
17530         if (!eq)
17531                 return -ENODEV;
17532
17533         mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
17534         if (!mbox)
17535                 return -ENOMEM;
17536         length = (sizeof(struct lpfc_mbx_eq_destroy) -
17537                   sizeof(struct lpfc_sli4_cfg_mhdr));
17538         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17539                          LPFC_MBOX_OPCODE_EQ_DESTROY,
17540                          length, LPFC_SLI4_MBX_EMBED);
17541         bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
17542                eq->queue_id);
17543         mbox->vport = eq->phba->pport;
17544         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17545
17546         rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
17547         /* The IOCTL status is embedded in the mailbox subheader. */
17548         shdr = (union lpfc_sli4_cfg_shdr *)
17549                 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
17550         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17551         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17552         if (shdr_status || shdr_add_status || rc) {
17553                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17554                                 "2505 EQ_DESTROY mailbox failed with "
17555                                 "status x%x add_status x%x, mbx status x%x\n",
17556                                 shdr_status, shdr_add_status, rc);
17557                 status = -ENXIO;
17558         }
17559
17560         /* Remove eq from any list */
17561         list_del_init(&eq->list);
17562         mempool_free(mbox, eq->phba->mbox_mem_pool);
17563         return status;
17564 }
17565
17566 /**
17567  * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
17568  * @phba: HBA structure that indicates port to destroy a queue on.
17569  * @cq: The queue structure associated with the queue to destroy.
17570  *
17571  * This function destroys a queue, as detailed in @cq by sending an mailbox
17572  * command, specific to the type of queue, to the HBA.
17573  *
17574  * The @cq struct is used to get the queue ID of the queue to destroy.
17575  *
17576  * On success this function will return a zero. If the queue destroy mailbox
17577  * command fails this function will return -ENXIO.
17578  **/
17579 int
17580 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
17581 {
17582         LPFC_MBOXQ_t *mbox;
17583         int rc, length, status = 0;
17584         uint32_t shdr_status, shdr_add_status;
17585         union lpfc_sli4_cfg_shdr *shdr;
17586
17587         /* sanity check on queue memory */
17588         if (!cq)
17589                 return -ENODEV;
17590         mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
17591         if (!mbox)
17592                 return -ENOMEM;
17593         length = (sizeof(struct lpfc_mbx_cq_destroy) -
17594                   sizeof(struct lpfc_sli4_cfg_mhdr));
17595         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17596                          LPFC_MBOX_OPCODE_CQ_DESTROY,
17597                          length, LPFC_SLI4_MBX_EMBED);
17598         bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
17599                cq->queue_id);
17600         mbox->vport = cq->phba->pport;
17601         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17602         rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
17603         /* The IOCTL status is embedded in the mailbox subheader. */
17604         shdr = (union lpfc_sli4_cfg_shdr *)
17605                 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
17606         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17607         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17608         if (shdr_status || shdr_add_status || rc) {
17609                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17610                                 "2506 CQ_DESTROY mailbox failed with "
17611                                 "status x%x add_status x%x, mbx status x%x\n",
17612                                 shdr_status, shdr_add_status, rc);
17613                 status = -ENXIO;
17614         }
17615         /* Remove cq from any list */
17616         list_del_init(&cq->list);
17617         mempool_free(mbox, cq->phba->mbox_mem_pool);
17618         return status;
17619 }
17620
17621 /**
17622  * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
17623  * @phba: HBA structure that indicates port to destroy a queue on.
17624  * @mq: The queue structure associated with the queue to destroy.
17625  *
17626  * This function destroys a queue, as detailed in @mq by sending an mailbox
17627  * command, specific to the type of queue, to the HBA.
17628  *
17629  * The @mq struct is used to get the queue ID of the queue to destroy.
17630  *
17631  * On success this function will return a zero. If the queue destroy mailbox
17632  * command fails this function will return -ENXIO.
17633  **/
17634 int
17635 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
17636 {
17637         LPFC_MBOXQ_t *mbox;
17638         int rc, length, status = 0;
17639         uint32_t shdr_status, shdr_add_status;
17640         union lpfc_sli4_cfg_shdr *shdr;
17641
17642         /* sanity check on queue memory */
17643         if (!mq)
17644                 return -ENODEV;
17645         mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
17646         if (!mbox)
17647                 return -ENOMEM;
17648         length = (sizeof(struct lpfc_mbx_mq_destroy) -
17649                   sizeof(struct lpfc_sli4_cfg_mhdr));
17650         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17651                          LPFC_MBOX_OPCODE_MQ_DESTROY,
17652                          length, LPFC_SLI4_MBX_EMBED);
17653         bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
17654                mq->queue_id);
17655         mbox->vport = mq->phba->pport;
17656         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17657         rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
17658         /* The IOCTL status is embedded in the mailbox subheader. */
17659         shdr = (union lpfc_sli4_cfg_shdr *)
17660                 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
17661         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17662         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17663         if (shdr_status || shdr_add_status || rc) {
17664                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17665                                 "2507 MQ_DESTROY mailbox failed with "
17666                                 "status x%x add_status x%x, mbx status x%x\n",
17667                                 shdr_status, shdr_add_status, rc);
17668                 status = -ENXIO;
17669         }
17670         /* Remove mq from any list */
17671         list_del_init(&mq->list);
17672         mempool_free(mbox, mq->phba->mbox_mem_pool);
17673         return status;
17674 }
17675
17676 /**
17677  * lpfc_wq_destroy - Destroy a Work Queue on the HBA
17678  * @phba: HBA structure that indicates port to destroy a queue on.
17679  * @wq: The queue structure associated with the queue to destroy.
17680  *
17681  * This function destroys a queue, as detailed in @wq by sending an mailbox
17682  * command, specific to the type of queue, to the HBA.
17683  *
17684  * The @wq struct is used to get the queue ID of the queue to destroy.
17685  *
17686  * On success this function will return a zero. If the queue destroy mailbox
17687  * command fails this function will return -ENXIO.
17688  **/
17689 int
17690 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
17691 {
17692         LPFC_MBOXQ_t *mbox;
17693         int rc, length, status = 0;
17694         uint32_t shdr_status, shdr_add_status;
17695         union lpfc_sli4_cfg_shdr *shdr;
17696
17697         /* sanity check on queue memory */
17698         if (!wq)
17699                 return -ENODEV;
17700         mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
17701         if (!mbox)
17702                 return -ENOMEM;
17703         length = (sizeof(struct lpfc_mbx_wq_destroy) -
17704                   sizeof(struct lpfc_sli4_cfg_mhdr));
17705         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17706                          LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
17707                          length, LPFC_SLI4_MBX_EMBED);
17708         bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
17709                wq->queue_id);
17710         mbox->vport = wq->phba->pport;
17711         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17712         rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
17713         shdr = (union lpfc_sli4_cfg_shdr *)
17714                 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
17715         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17716         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17717         if (shdr_status || shdr_add_status || rc) {
17718                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17719                                 "2508 WQ_DESTROY mailbox failed with "
17720                                 "status x%x add_status x%x, mbx status x%x\n",
17721                                 shdr_status, shdr_add_status, rc);
17722                 status = -ENXIO;
17723         }
17724         /* Remove wq from any list */
17725         list_del_init(&wq->list);
17726         kfree(wq->pring);
17727         wq->pring = NULL;
17728         mempool_free(mbox, wq->phba->mbox_mem_pool);
17729         return status;
17730 }
17731
17732 /**
17733  * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
17734  * @phba: HBA structure that indicates port to destroy a queue on.
17735  * @hrq: The queue structure associated with the queue to destroy.
17736  * @drq: The queue structure associated with the queue to destroy.
17737  *
17738  * This function destroys a queue, as detailed in @rq by sending an mailbox
17739  * command, specific to the type of queue, to the HBA.
17740  *
17741  * The @rq struct is used to get the queue ID of the queue to destroy.
17742  *
17743  * On success this function will return a zero. If the queue destroy mailbox
17744  * command fails this function will return -ENXIO.
17745  **/
17746 int
17747 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
17748                 struct lpfc_queue *drq)
17749 {
17750         LPFC_MBOXQ_t *mbox;
17751         int rc, length, status = 0;
17752         uint32_t shdr_status, shdr_add_status;
17753         union lpfc_sli4_cfg_shdr *shdr;
17754
17755         /* sanity check on queue memory */
17756         if (!hrq || !drq)
17757                 return -ENODEV;
17758         mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
17759         if (!mbox)
17760                 return -ENOMEM;
17761         length = (sizeof(struct lpfc_mbx_rq_destroy) -
17762                   sizeof(struct lpfc_sli4_cfg_mhdr));
17763         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17764                          LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
17765                          length, LPFC_SLI4_MBX_EMBED);
17766         bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17767                hrq->queue_id);
17768         mbox->vport = hrq->phba->pport;
17769         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17770         rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
17771         /* The IOCTL status is embedded in the mailbox subheader. */
17772         shdr = (union lpfc_sli4_cfg_shdr *)
17773                 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17774         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17775         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17776         if (shdr_status || shdr_add_status || rc) {
17777                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17778                                 "2509 RQ_DESTROY mailbox failed with "
17779                                 "status x%x add_status x%x, mbx status x%x\n",
17780                                 shdr_status, shdr_add_status, rc);
17781                 mempool_free(mbox, hrq->phba->mbox_mem_pool);
17782                 return -ENXIO;
17783         }
17784         bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17785                drq->queue_id);
17786         rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
17787         shdr = (union lpfc_sli4_cfg_shdr *)
17788                 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17789         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17790         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17791         if (shdr_status || shdr_add_status || rc) {
17792                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17793                                 "2510 RQ_DESTROY mailbox failed with "
17794                                 "status x%x add_status x%x, mbx status x%x\n",
17795                                 shdr_status, shdr_add_status, rc);
17796                 status = -ENXIO;
17797         }
17798         list_del_init(&hrq->list);
17799         list_del_init(&drq->list);
17800         mempool_free(mbox, hrq->phba->mbox_mem_pool);
17801         return status;
17802 }
17803
17804 /**
17805  * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
17806  * @phba: The virtual port for which this call being executed.
17807  * @pdma_phys_addr0: Physical address of the 1st SGL page.
17808  * @pdma_phys_addr1: Physical address of the 2nd SGL page.
17809  * @xritag: the xritag that ties this io to the SGL pages.
17810  *
17811  * This routine will post the sgl pages for the IO that has the xritag
17812  * that is in the iocbq structure. The xritag is assigned during iocbq
17813  * creation and persists for as long as the driver is loaded.
17814  * if the caller has fewer than 256 scatter gather segments to map then
17815  * pdma_phys_addr1 should be 0.
17816  * If the caller needs to map more than 256 scatter gather segment then
17817  * pdma_phys_addr1 should be a valid physical address.
17818  * physical address for SGLs must be 64 byte aligned.
17819  * If you are going to map 2 SGL's then the first one must have 256 entries
17820  * the second sgl can have between 1 and 256 entries.
17821  *
17822  * Return codes:
17823  *      0 - Success
17824  *      -ENXIO, -ENOMEM - Failure
17825  **/
17826 int
17827 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
17828                 dma_addr_t pdma_phys_addr0,
17829                 dma_addr_t pdma_phys_addr1,
17830                 uint16_t xritag)
17831 {
17832         struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
17833         LPFC_MBOXQ_t *mbox;
17834         int rc;
17835         uint32_t shdr_status, shdr_add_status;
17836         uint32_t mbox_tmo;
17837         union lpfc_sli4_cfg_shdr *shdr;
17838
17839         if (xritag == NO_XRI) {
17840                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17841                                 "0364 Invalid param:\n");
17842                 return -EINVAL;
17843         }
17844
17845         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17846         if (!mbox)
17847                 return -ENOMEM;
17848
17849         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17850                         LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
17851                         sizeof(struct lpfc_mbx_post_sgl_pages) -
17852                         sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
17853
17854         post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
17855                                 &mbox->u.mqe.un.post_sgl_pages;
17856         bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
17857         bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
17858
17859         post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
17860                                 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
17861         post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
17862                                 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
17863
17864         post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
17865                                 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
17866         post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
17867                                 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
17868         if (!phba->sli4_hba.intr_enable)
17869                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17870         else {
17871                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17872                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17873         }
17874         /* The IOCTL status is embedded in the mailbox subheader. */
17875         shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
17876         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17877         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17878         if (!phba->sli4_hba.intr_enable)
17879                 mempool_free(mbox, phba->mbox_mem_pool);
17880         else if (rc != MBX_TIMEOUT)
17881                 mempool_free(mbox, phba->mbox_mem_pool);
17882         if (shdr_status || shdr_add_status || rc) {
17883                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17884                                 "2511 POST_SGL mailbox failed with "
17885                                 "status x%x add_status x%x, mbx status x%x\n",
17886                                 shdr_status, shdr_add_status, rc);
17887         }
17888         return 0;
17889 }
17890
17891 /**
17892  * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
17893  * @phba: pointer to lpfc hba data structure.
17894  *
17895  * This routine is invoked to post rpi header templates to the
17896  * HBA consistent with the SLI-4 interface spec.  This routine
17897  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
17898  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
17899  *
17900  * Returns
17901  *      A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
17902  *      LPFC_RPI_ALLOC_ERROR if no rpis are available.
17903  **/
17904 static uint16_t
17905 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
17906 {
17907         unsigned long xri;
17908
17909         /*
17910          * Fetch the next logical xri.  Because this index is logical,
17911          * the driver starts at 0 each time.
17912          */
17913         spin_lock_irq(&phba->hbalock);
17914         xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
17915                                  phba->sli4_hba.max_cfg_param.max_xri, 0);
17916         if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
17917                 spin_unlock_irq(&phba->hbalock);
17918                 return NO_XRI;
17919         } else {
17920                 set_bit(xri, phba->sli4_hba.xri_bmask);
17921                 phba->sli4_hba.max_cfg_param.xri_used++;
17922         }
17923         spin_unlock_irq(&phba->hbalock);
17924         return xri;
17925 }
17926
17927 /**
17928  * __lpfc_sli4_free_xri - Release an xri for reuse.
17929  * @phba: pointer to lpfc hba data structure.
17930  * @xri: xri to release.
17931  *
17932  * This routine is invoked to release an xri to the pool of
17933  * available rpis maintained by the driver.
17934  **/
17935 static void
17936 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
17937 {
17938         if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
17939                 phba->sli4_hba.max_cfg_param.xri_used--;
17940         }
17941 }
17942
17943 /**
17944  * lpfc_sli4_free_xri - Release an xri for reuse.
17945  * @phba: pointer to lpfc hba data structure.
17946  * @xri: xri to release.
17947  *
17948  * This routine is invoked to release an xri to the pool of
17949  * available rpis maintained by the driver.
17950  **/
17951 void
17952 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
17953 {
17954         spin_lock_irq(&phba->hbalock);
17955         __lpfc_sli4_free_xri(phba, xri);
17956         spin_unlock_irq(&phba->hbalock);
17957 }
17958
17959 /**
17960  * lpfc_sli4_next_xritag - Get an xritag for the io
17961  * @phba: Pointer to HBA context object.
17962  *
17963  * This function gets an xritag for the iocb. If there is no unused xritag
17964  * it will return 0xffff.
17965  * The function returns the allocated xritag if successful, else returns zero.
17966  * Zero is not a valid xritag.
17967  * The caller is not required to hold any lock.
17968  **/
17969 uint16_t
17970 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
17971 {
17972         uint16_t xri_index;
17973
17974         xri_index = lpfc_sli4_alloc_xri(phba);
17975         if (xri_index == NO_XRI)
17976                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
17977                                 "2004 Failed to allocate XRI.last XRITAG is %d"
17978                                 " Max XRI is %d, Used XRI is %d\n",
17979                                 xri_index,
17980                                 phba->sli4_hba.max_cfg_param.max_xri,
17981                                 phba->sli4_hba.max_cfg_param.xri_used);
17982         return xri_index;
17983 }
17984
17985 /**
17986  * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
17987  * @phba: pointer to lpfc hba data structure.
17988  * @post_sgl_list: pointer to els sgl entry list.
17989  * @post_cnt: number of els sgl entries on the list.
17990  *
17991  * This routine is invoked to post a block of driver's sgl pages to the
17992  * HBA using non-embedded mailbox command. No Lock is held. This routine
17993  * is only called when the driver is loading and after all IO has been
17994  * stopped.
17995  **/
17996 static int
17997 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
17998                             struct list_head *post_sgl_list,
17999                             int post_cnt)
18000 {
18001         struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
18002         struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
18003         struct sgl_page_pairs *sgl_pg_pairs;
18004         void *viraddr;
18005         LPFC_MBOXQ_t *mbox;
18006         uint32_t reqlen, alloclen, pg_pairs;
18007         uint32_t mbox_tmo;
18008         uint16_t xritag_start = 0;
18009         int rc = 0;
18010         uint32_t shdr_status, shdr_add_status;
18011         union lpfc_sli4_cfg_shdr *shdr;
18012
18013         reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
18014                  sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
18015         if (reqlen > SLI4_PAGE_SIZE) {
18016                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18017                                 "2559 Block sgl registration required DMA "
18018                                 "size (%d) great than a page\n", reqlen);
18019                 return -ENOMEM;
18020         }
18021
18022         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18023         if (!mbox)
18024                 return -ENOMEM;
18025
18026         /* Allocate DMA memory and set up the non-embedded mailbox command */
18027         alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
18028                          LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
18029                          LPFC_SLI4_MBX_NEMBED);
18030
18031         if (alloclen < reqlen) {
18032                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18033                                 "0285 Allocated DMA memory size (%d) is "
18034                                 "less than the requested DMA memory "
18035                                 "size (%d)\n", alloclen, reqlen);
18036                 lpfc_sli4_mbox_cmd_free(phba, mbox);
18037                 return -ENOMEM;
18038         }
18039         /* Set up the SGL pages in the non-embedded DMA pages */
18040         viraddr = mbox->sge_array->addr[0];
18041         sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
18042         sgl_pg_pairs = &sgl->sgl_pg_pairs;
18043
18044         pg_pairs = 0;
18045         list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
18046                 /* Set up the sge entry */
18047                 sgl_pg_pairs->sgl_pg0_addr_lo =
18048                                 cpu_to_le32(putPaddrLow(sglq_entry->phys));
18049                 sgl_pg_pairs->sgl_pg0_addr_hi =
18050                                 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
18051                 sgl_pg_pairs->sgl_pg1_addr_lo =
18052                                 cpu_to_le32(putPaddrLow(0));
18053                 sgl_pg_pairs->sgl_pg1_addr_hi =
18054                                 cpu_to_le32(putPaddrHigh(0));
18055
18056                 /* Keep the first xritag on the list */
18057                 if (pg_pairs == 0)
18058                         xritag_start = sglq_entry->sli4_xritag;
18059                 sgl_pg_pairs++;
18060                 pg_pairs++;
18061         }
18062
18063         /* Complete initialization and perform endian conversion. */
18064         bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
18065         bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
18066         sgl->word0 = cpu_to_le32(sgl->word0);
18067
18068         if (!phba->sli4_hba.intr_enable)
18069                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
18070         else {
18071                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
18072                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
18073         }
18074         shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
18075         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18076         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18077         if (!phba->sli4_hba.intr_enable)
18078                 lpfc_sli4_mbox_cmd_free(phba, mbox);
18079         else if (rc != MBX_TIMEOUT)
18080                 lpfc_sli4_mbox_cmd_free(phba, mbox);
18081         if (shdr_status || shdr_add_status || rc) {
18082                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18083                                 "2513 POST_SGL_BLOCK mailbox command failed "
18084                                 "status x%x add_status x%x mbx status x%x\n",
18085                                 shdr_status, shdr_add_status, rc);
18086                 rc = -ENXIO;
18087         }
18088         return rc;
18089 }
18090
18091 /**
18092  * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
18093  * @phba: pointer to lpfc hba data structure.
18094  * @nblist: pointer to nvme buffer list.
18095  * @count: number of scsi buffers on the list.
18096  *
18097  * This routine is invoked to post a block of @count scsi sgl pages from a
18098  * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
18099  * No Lock is held.
18100  *
18101  **/
18102 static int
18103 lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
18104                             int count)
18105 {
18106         struct lpfc_io_buf *lpfc_ncmd;
18107         struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
18108         struct sgl_page_pairs *sgl_pg_pairs;
18109         void *viraddr;
18110         LPFC_MBOXQ_t *mbox;
18111         uint32_t reqlen, alloclen, pg_pairs;
18112         uint32_t mbox_tmo;
18113         uint16_t xritag_start = 0;
18114         int rc = 0;
18115         uint32_t shdr_status, shdr_add_status;
18116         dma_addr_t pdma_phys_bpl1;
18117         union lpfc_sli4_cfg_shdr *shdr;
18118
18119         /* Calculate the requested length of the dma memory */
18120         reqlen = count * sizeof(struct sgl_page_pairs) +
18121                  sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
18122         if (reqlen > SLI4_PAGE_SIZE) {
18123                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
18124                                 "6118 Block sgl registration required DMA "
18125                                 "size (%d) great than a page\n", reqlen);
18126                 return -ENOMEM;
18127         }
18128         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18129         if (!mbox) {
18130                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18131                                 "6119 Failed to allocate mbox cmd memory\n");
18132                 return -ENOMEM;
18133         }
18134
18135         /* Allocate DMA memory and set up the non-embedded mailbox command */
18136         alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
18137                                     LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
18138                                     reqlen, LPFC_SLI4_MBX_NEMBED);
18139
18140         if (alloclen < reqlen) {
18141                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18142                                 "6120 Allocated DMA memory size (%d) is "
18143                                 "less than the requested DMA memory "
18144                                 "size (%d)\n", alloclen, reqlen);
18145                 lpfc_sli4_mbox_cmd_free(phba, mbox);
18146                 return -ENOMEM;
18147         }
18148
18149         /* Get the first SGE entry from the non-embedded DMA memory */
18150         viraddr = mbox->sge_array->addr[0];
18151
18152         /* Set up the SGL pages in the non-embedded DMA pages */
18153         sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
18154         sgl_pg_pairs = &sgl->sgl_pg_pairs;
18155
18156         pg_pairs = 0;
18157         list_for_each_entry(lpfc_ncmd, nblist, list) {
18158                 /* Set up the sge entry */
18159                 sgl_pg_pairs->sgl_pg0_addr_lo =
18160                         cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
18161                 sgl_pg_pairs->sgl_pg0_addr_hi =
18162                         cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
18163                 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
18164                         pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
18165                                                 SGL_PAGE_SIZE;
18166                 else
18167                         pdma_phys_bpl1 = 0;
18168                 sgl_pg_pairs->sgl_pg1_addr_lo =
18169                         cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
18170                 sgl_pg_pairs->sgl_pg1_addr_hi =
18171                         cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
18172                 /* Keep the first xritag on the list */
18173                 if (pg_pairs == 0)
18174                         xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
18175                 sgl_pg_pairs++;
18176                 pg_pairs++;
18177         }
18178         bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
18179         bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
18180         /* Perform endian conversion if necessary */
18181         sgl->word0 = cpu_to_le32(sgl->word0);
18182
18183         if (!phba->sli4_hba.intr_enable) {
18184                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
18185         } else {
18186                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
18187                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
18188         }
18189         shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
18190         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18191         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18192         if (!phba->sli4_hba.intr_enable)
18193                 lpfc_sli4_mbox_cmd_free(phba, mbox);
18194         else if (rc != MBX_TIMEOUT)
18195                 lpfc_sli4_mbox_cmd_free(phba, mbox);
18196         if (shdr_status || shdr_add_status || rc) {
18197                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18198                                 "6125 POST_SGL_BLOCK mailbox command failed "
18199                                 "status x%x add_status x%x mbx status x%x\n",
18200                                 shdr_status, shdr_add_status, rc);
18201                 rc = -ENXIO;
18202         }
18203         return rc;
18204 }
18205
18206 /**
18207  * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
18208  * @phba: pointer to lpfc hba data structure.
18209  * @post_nblist: pointer to the nvme buffer list.
18210  * @sb_count: number of nvme buffers.
18211  *
18212  * This routine walks a list of nvme buffers that was passed in. It attempts
18213  * to construct blocks of nvme buffer sgls which contains contiguous xris and
18214  * uses the non-embedded SGL block post mailbox commands to post to the port.
18215  * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
18216  * embedded SGL post mailbox command for posting. The @post_nblist passed in
18217  * must be local list, thus no lock is needed when manipulate the list.
18218  *
18219  * Returns: 0 = failure, non-zero number of successfully posted buffers.
18220  **/
18221 int
18222 lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
18223                            struct list_head *post_nblist, int sb_count)
18224 {
18225         struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
18226         int status, sgl_size;
18227         int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
18228         dma_addr_t pdma_phys_sgl1;
18229         int last_xritag = NO_XRI;
18230         int cur_xritag;
18231         LIST_HEAD(prep_nblist);
18232         LIST_HEAD(blck_nblist);
18233         LIST_HEAD(nvme_nblist);
18234
18235         /* sanity check */
18236         if (sb_count <= 0)
18237                 return -EINVAL;
18238
18239         sgl_size = phba->cfg_sg_dma_buf_size;
18240         list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
18241                 list_del_init(&lpfc_ncmd->list);
18242                 block_cnt++;
18243                 if ((last_xritag != NO_XRI) &&
18244                     (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
18245                         /* a hole in xri block, form a sgl posting block */
18246                         list_splice_init(&prep_nblist, &blck_nblist);
18247                         post_cnt = block_cnt - 1;
18248                         /* prepare list for next posting block */
18249                         list_add_tail(&lpfc_ncmd->list, &prep_nblist);
18250                         block_cnt = 1;
18251                 } else {
18252                         /* prepare list for next posting block */
18253                         list_add_tail(&lpfc_ncmd->list, &prep_nblist);
18254                         /* enough sgls for non-embed sgl mbox command */
18255                         if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
18256                                 list_splice_init(&prep_nblist, &blck_nblist);
18257                                 post_cnt = block_cnt;
18258                                 block_cnt = 0;
18259                         }
18260                 }
18261                 num_posting++;
18262                 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
18263
18264                 /* end of repost sgl list condition for NVME buffers */
18265                 if (num_posting == sb_count) {
18266                         if (post_cnt == 0) {
18267                                 /* last sgl posting block */
18268                                 list_splice_init(&prep_nblist, &blck_nblist);
18269                                 post_cnt = block_cnt;
18270                         } else if (block_cnt == 1) {
18271                                 /* last single sgl with non-contiguous xri */
18272                                 if (sgl_size > SGL_PAGE_SIZE)
18273                                         pdma_phys_sgl1 =
18274                                                 lpfc_ncmd->dma_phys_sgl +
18275                                                 SGL_PAGE_SIZE;
18276                                 else
18277                                         pdma_phys_sgl1 = 0;
18278                                 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
18279                                 status = lpfc_sli4_post_sgl(
18280                                                 phba, lpfc_ncmd->dma_phys_sgl,
18281                                                 pdma_phys_sgl1, cur_xritag);
18282                                 if (status) {
18283                                         /* Post error.  Buffer unavailable. */
18284                                         lpfc_ncmd->flags |=
18285                                                 LPFC_SBUF_NOT_POSTED;
18286                                 } else {
18287                                         /* Post success. Bffer available. */
18288                                         lpfc_ncmd->flags &=
18289                                                 ~LPFC_SBUF_NOT_POSTED;
18290                                         lpfc_ncmd->status = IOSTAT_SUCCESS;
18291                                         num_posted++;
18292                                 }
18293                                 /* success, put on NVME buffer sgl list */
18294                                 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
18295                         }
18296                 }
18297
18298                 /* continue until a nembed page worth of sgls */
18299                 if (post_cnt == 0)
18300                         continue;
18301
18302                 /* post block of NVME buffer list sgls */
18303                 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
18304                                                      post_cnt);
18305
18306                 /* don't reset xirtag due to hole in xri block */
18307                 if (block_cnt == 0)
18308                         last_xritag = NO_XRI;
18309
18310                 /* reset NVME buffer post count for next round of posting */
18311                 post_cnt = 0;
18312
18313                 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
18314                 while (!list_empty(&blck_nblist)) {
18315                         list_remove_head(&blck_nblist, lpfc_ncmd,
18316                                          struct lpfc_io_buf, list);
18317                         if (status) {
18318                                 /* Post error.  Mark buffer unavailable. */
18319                                 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
18320                         } else {
18321                                 /* Post success, Mark buffer available. */
18322                                 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
18323                                 lpfc_ncmd->status = IOSTAT_SUCCESS;
18324                                 num_posted++;
18325                         }
18326                         list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
18327                 }
18328         }
18329         /* Push NVME buffers with sgl posted to the available list */
18330         lpfc_io_buf_replenish(phba, &nvme_nblist);
18331
18332         return num_posted;
18333 }
18334
18335 /**
18336  * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
18337  * @phba: pointer to lpfc_hba struct that the frame was received on
18338  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18339  *
18340  * This function checks the fields in the @fc_hdr to see if the FC frame is a
18341  * valid type of frame that the LPFC driver will handle. This function will
18342  * return a zero if the frame is a valid frame or a non zero value when the
18343  * frame does not pass the check.
18344  **/
18345 static int
18346 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
18347 {
18348         /*  make rctl_names static to save stack space */
18349         struct fc_vft_header *fc_vft_hdr;
18350         uint32_t *header = (uint32_t *) fc_hdr;
18351
18352 #define FC_RCTL_MDS_DIAGS       0xF4
18353
18354         switch (fc_hdr->fh_r_ctl) {
18355         case FC_RCTL_DD_UNCAT:          /* uncategorized information */
18356         case FC_RCTL_DD_SOL_DATA:       /* solicited data */
18357         case FC_RCTL_DD_UNSOL_CTL:      /* unsolicited control */
18358         case FC_RCTL_DD_SOL_CTL:        /* solicited control or reply */
18359         case FC_RCTL_DD_UNSOL_DATA:     /* unsolicited data */
18360         case FC_RCTL_DD_DATA_DESC:      /* data descriptor */
18361         case FC_RCTL_DD_UNSOL_CMD:      /* unsolicited command */
18362         case FC_RCTL_DD_CMD_STATUS:     /* command status */
18363         case FC_RCTL_ELS_REQ:   /* extended link services request */
18364         case FC_RCTL_ELS_REP:   /* extended link services reply */
18365         case FC_RCTL_ELS4_REQ:  /* FC-4 ELS request */
18366         case FC_RCTL_ELS4_REP:  /* FC-4 ELS reply */
18367         case FC_RCTL_BA_ABTS:   /* basic link service abort */
18368         case FC_RCTL_BA_RMC:    /* remove connection */
18369         case FC_RCTL_BA_ACC:    /* basic accept */
18370         case FC_RCTL_BA_RJT:    /* basic reject */
18371         case FC_RCTL_BA_PRMT:
18372         case FC_RCTL_ACK_1:     /* acknowledge_1 */
18373         case FC_RCTL_ACK_0:     /* acknowledge_0 */
18374         case FC_RCTL_P_RJT:     /* port reject */
18375         case FC_RCTL_F_RJT:     /* fabric reject */
18376         case FC_RCTL_P_BSY:     /* port busy */
18377         case FC_RCTL_F_BSY:     /* fabric busy to data frame */
18378         case FC_RCTL_F_BSYL:    /* fabric busy to link control frame */
18379         case FC_RCTL_LCR:       /* link credit reset */
18380         case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
18381         case FC_RCTL_END:       /* end */
18382                 break;
18383         case FC_RCTL_VFTH:      /* Virtual Fabric tagging Header */
18384                 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
18385                 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
18386                 return lpfc_fc_frame_check(phba, fc_hdr);
18387         case FC_RCTL_BA_NOP:    /* basic link service NOP */
18388         default:
18389                 goto drop;
18390         }
18391
18392         switch (fc_hdr->fh_type) {
18393         case FC_TYPE_BLS:
18394         case FC_TYPE_ELS:
18395         case FC_TYPE_FCP:
18396         case FC_TYPE_CT:
18397         case FC_TYPE_NVME:
18398                 break;
18399         case FC_TYPE_IP:
18400         case FC_TYPE_ILS:
18401         default:
18402                 goto drop;
18403         }
18404
18405         lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
18406                         "2538 Received frame rctl:x%x, type:x%x, "
18407                         "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
18408                         fc_hdr->fh_r_ctl, fc_hdr->fh_type,
18409                         be32_to_cpu(header[0]), be32_to_cpu(header[1]),
18410                         be32_to_cpu(header[2]), be32_to_cpu(header[3]),
18411                         be32_to_cpu(header[4]), be32_to_cpu(header[5]),
18412                         be32_to_cpu(header[6]));
18413         return 0;
18414 drop:
18415         lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
18416                         "2539 Dropped frame rctl:x%x type:x%x\n",
18417                         fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18418         return 1;
18419 }
18420
18421 /**
18422  * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
18423  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18424  *
18425  * This function processes the FC header to retrieve the VFI from the VF
18426  * header, if one exists. This function will return the VFI if one exists
18427  * or 0 if no VSAN Header exists.
18428  **/
18429 static uint32_t
18430 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
18431 {
18432         struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
18433
18434         if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
18435                 return 0;
18436         return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
18437 }
18438
18439 /**
18440  * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
18441  * @phba: Pointer to the HBA structure to search for the vport on
18442  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18443  * @fcfi: The FC Fabric ID that the frame came from
18444  * @did: Destination ID to match against
18445  *
18446  * This function searches the @phba for a vport that matches the content of the
18447  * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
18448  * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
18449  * returns the matching vport pointer or NULL if unable to match frame to a
18450  * vport.
18451  **/
18452 static struct lpfc_vport *
18453 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
18454                        uint16_t fcfi, uint32_t did)
18455 {
18456         struct lpfc_vport **vports;
18457         struct lpfc_vport *vport = NULL;
18458         int i;
18459
18460         if (did == Fabric_DID)
18461                 return phba->pport;
18462         if ((phba->pport->fc_flag & FC_PT2PT) &&
18463                 !(phba->link_state == LPFC_HBA_READY))
18464                 return phba->pport;
18465
18466         vports = lpfc_create_vport_work_array(phba);
18467         if (vports != NULL) {
18468                 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
18469                         if (phba->fcf.fcfi == fcfi &&
18470                             vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
18471                             vports[i]->fc_myDID == did) {
18472                                 vport = vports[i];
18473                                 break;
18474                         }
18475                 }
18476         }
18477         lpfc_destroy_vport_work_array(phba, vports);
18478         return vport;
18479 }
18480
18481 /**
18482  * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
18483  * @vport: The vport to work on.
18484  *
18485  * This function updates the receive sequence time stamp for this vport. The
18486  * receive sequence time stamp indicates the time that the last frame of the
18487  * the sequence that has been idle for the longest amount of time was received.
18488  * the driver uses this time stamp to indicate if any received sequences have
18489  * timed out.
18490  **/
18491 static void
18492 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
18493 {
18494         struct lpfc_dmabuf *h_buf;
18495         struct hbq_dmabuf *dmabuf = NULL;
18496
18497         /* get the oldest sequence on the rcv list */
18498         h_buf = list_get_first(&vport->rcv_buffer_list,
18499                                struct lpfc_dmabuf, list);
18500         if (!h_buf)
18501                 return;
18502         dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18503         vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
18504 }
18505
18506 /**
18507  * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
18508  * @vport: The vport that the received sequences were sent to.
18509  *
18510  * This function cleans up all outstanding received sequences. This is called
18511  * by the driver when a link event or user action invalidates all the received
18512  * sequences.
18513  **/
18514 void
18515 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
18516 {
18517         struct lpfc_dmabuf *h_buf, *hnext;
18518         struct lpfc_dmabuf *d_buf, *dnext;
18519         struct hbq_dmabuf *dmabuf = NULL;
18520
18521         /* start with the oldest sequence on the rcv list */
18522         list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
18523                 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18524                 list_del_init(&dmabuf->hbuf.list);
18525                 list_for_each_entry_safe(d_buf, dnext,
18526                                          &dmabuf->dbuf.list, list) {
18527                         list_del_init(&d_buf->list);
18528                         lpfc_in_buf_free(vport->phba, d_buf);
18529                 }
18530                 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
18531         }
18532 }
18533
18534 /**
18535  * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
18536  * @vport: The vport that the received sequences were sent to.
18537  *
18538  * This function determines whether any received sequences have timed out by
18539  * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
18540  * indicates that there is at least one timed out sequence this routine will
18541  * go through the received sequences one at a time from most inactive to most
18542  * active to determine which ones need to be cleaned up. Once it has determined
18543  * that a sequence needs to be cleaned up it will simply free up the resources
18544  * without sending an abort.
18545  **/
18546 void
18547 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
18548 {
18549         struct lpfc_dmabuf *h_buf, *hnext;
18550         struct lpfc_dmabuf *d_buf, *dnext;
18551         struct hbq_dmabuf *dmabuf = NULL;
18552         unsigned long timeout;
18553         int abort_count = 0;
18554
18555         timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
18556                    vport->rcv_buffer_time_stamp);
18557         if (list_empty(&vport->rcv_buffer_list) ||
18558             time_before(jiffies, timeout))
18559                 return;
18560         /* start with the oldest sequence on the rcv list */
18561         list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
18562                 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18563                 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
18564                            dmabuf->time_stamp);
18565                 if (time_before(jiffies, timeout))
18566                         break;
18567                 abort_count++;
18568                 list_del_init(&dmabuf->hbuf.list);
18569                 list_for_each_entry_safe(d_buf, dnext,
18570                                          &dmabuf->dbuf.list, list) {
18571                         list_del_init(&d_buf->list);
18572                         lpfc_in_buf_free(vport->phba, d_buf);
18573                 }
18574                 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
18575         }
18576         if (abort_count)
18577                 lpfc_update_rcv_time_stamp(vport);
18578 }
18579
18580 /**
18581  * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
18582  * @vport: pointer to a vitural port
18583  * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
18584  *
18585  * This function searches through the existing incomplete sequences that have
18586  * been sent to this @vport. If the frame matches one of the incomplete
18587  * sequences then the dbuf in the @dmabuf is added to the list of frames that
18588  * make up that sequence. If no sequence is found that matches this frame then
18589  * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
18590  * This function returns a pointer to the first dmabuf in the sequence list that
18591  * the frame was linked to.
18592  **/
18593 static struct hbq_dmabuf *
18594 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
18595 {
18596         struct fc_frame_header *new_hdr;
18597         struct fc_frame_header *temp_hdr;
18598         struct lpfc_dmabuf *d_buf;
18599         struct lpfc_dmabuf *h_buf;
18600         struct hbq_dmabuf *seq_dmabuf = NULL;
18601         struct hbq_dmabuf *temp_dmabuf = NULL;
18602         uint8_t found = 0;
18603
18604         INIT_LIST_HEAD(&dmabuf->dbuf.list);
18605         dmabuf->time_stamp = jiffies;
18606         new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18607
18608         /* Use the hdr_buf to find the sequence that this frame belongs to */
18609         list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
18610                 temp_hdr = (struct fc_frame_header *)h_buf->virt;
18611                 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
18612                     (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
18613                     (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
18614                         continue;
18615                 /* found a pending sequence that matches this frame */
18616                 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18617                 break;
18618         }
18619         if (!seq_dmabuf) {
18620                 /*
18621                  * This indicates first frame received for this sequence.
18622                  * Queue the buffer on the vport's rcv_buffer_list.
18623                  */
18624                 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
18625                 lpfc_update_rcv_time_stamp(vport);
18626                 return dmabuf;
18627         }
18628         temp_hdr = seq_dmabuf->hbuf.virt;
18629         if (be16_to_cpu(new_hdr->fh_seq_cnt) <
18630                 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
18631                 list_del_init(&seq_dmabuf->hbuf.list);
18632                 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
18633                 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
18634                 lpfc_update_rcv_time_stamp(vport);
18635                 return dmabuf;
18636         }
18637         /* move this sequence to the tail to indicate a young sequence */
18638         list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
18639         seq_dmabuf->time_stamp = jiffies;
18640         lpfc_update_rcv_time_stamp(vport);
18641         if (list_empty(&seq_dmabuf->dbuf.list)) {
18642                 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
18643                 return seq_dmabuf;
18644         }
18645         /* find the correct place in the sequence to insert this frame */
18646         d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
18647         while (!found) {
18648                 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18649                 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
18650                 /*
18651                  * If the frame's sequence count is greater than the frame on
18652                  * the list then insert the frame right after this frame
18653                  */
18654                 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
18655                         be16_to_cpu(temp_hdr->fh_seq_cnt)) {
18656                         list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
18657                         found = 1;
18658                         break;
18659                 }
18660
18661                 if (&d_buf->list == &seq_dmabuf->dbuf.list)
18662                         break;
18663                 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
18664         }
18665
18666         if (found)
18667                 return seq_dmabuf;
18668         return NULL;
18669 }
18670
18671 /**
18672  * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
18673  * @vport: pointer to a vitural port
18674  * @dmabuf: pointer to a dmabuf that describes the FC sequence
18675  *
18676  * This function tries to abort from the partially assembed sequence, described
18677  * by the information from basic abbort @dmabuf. It checks to see whether such
18678  * partially assembled sequence held by the driver. If so, it shall free up all
18679  * the frames from the partially assembled sequence.
18680  *
18681  * Return
18682  * true  -- if there is matching partially assembled sequence present and all
18683  *          the frames freed with the sequence;
18684  * false -- if there is no matching partially assembled sequence present so
18685  *          nothing got aborted in the lower layer driver
18686  **/
18687 static bool
18688 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
18689                             struct hbq_dmabuf *dmabuf)
18690 {
18691         struct fc_frame_header *new_hdr;
18692         struct fc_frame_header *temp_hdr;
18693         struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
18694         struct hbq_dmabuf *seq_dmabuf = NULL;
18695
18696         /* Use the hdr_buf to find the sequence that matches this frame */
18697         INIT_LIST_HEAD(&dmabuf->dbuf.list);
18698         INIT_LIST_HEAD(&dmabuf->hbuf.list);
18699         new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18700         list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
18701                 temp_hdr = (struct fc_frame_header *)h_buf->virt;
18702                 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
18703                     (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
18704                     (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
18705                         continue;
18706                 /* found a pending sequence that matches this frame */
18707                 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18708                 break;
18709         }
18710
18711         /* Free up all the frames from the partially assembled sequence */
18712         if (seq_dmabuf) {
18713                 list_for_each_entry_safe(d_buf, n_buf,
18714                                          &seq_dmabuf->dbuf.list, list) {
18715                         list_del_init(&d_buf->list);
18716                         lpfc_in_buf_free(vport->phba, d_buf);
18717                 }
18718                 return true;
18719         }
18720         return false;
18721 }
18722
18723 /**
18724  * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
18725  * @vport: pointer to a vitural port
18726  * @dmabuf: pointer to a dmabuf that describes the FC sequence
18727  *
18728  * This function tries to abort from the assembed sequence from upper level
18729  * protocol, described by the information from basic abbort @dmabuf. It
18730  * checks to see whether such pending context exists at upper level protocol.
18731  * If so, it shall clean up the pending context.
18732  *
18733  * Return
18734  * true  -- if there is matching pending context of the sequence cleaned
18735  *          at ulp;
18736  * false -- if there is no matching pending context of the sequence present
18737  *          at ulp.
18738  **/
18739 static bool
18740 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
18741 {
18742         struct lpfc_hba *phba = vport->phba;
18743         int handled;
18744
18745         /* Accepting abort at ulp with SLI4 only */
18746         if (phba->sli_rev < LPFC_SLI_REV4)
18747                 return false;
18748
18749         /* Register all caring upper level protocols to attend abort */
18750         handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
18751         if (handled)
18752                 return true;
18753
18754         return false;
18755 }
18756
18757 /**
18758  * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
18759  * @phba: Pointer to HBA context object.
18760  * @cmd_iocbq: pointer to the command iocbq structure.
18761  * @rsp_iocbq: pointer to the response iocbq structure.
18762  *
18763  * This function handles the sequence abort response iocb command complete
18764  * event. It properly releases the memory allocated to the sequence abort
18765  * accept iocb.
18766  **/
18767 static void
18768 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
18769                              struct lpfc_iocbq *cmd_iocbq,
18770                              struct lpfc_iocbq *rsp_iocbq)
18771 {
18772         struct lpfc_nodelist *ndlp;
18773
18774         if (cmd_iocbq) {
18775                 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
18776                 lpfc_nlp_put(ndlp);
18777                 lpfc_sli_release_iocbq(phba, cmd_iocbq);
18778         }
18779
18780         /* Failure means BLS ABORT RSP did not get delivered to remote node*/
18781         if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
18782                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18783                         "3154 BLS ABORT RSP failed, data:  x%x/x%x\n",
18784                         rsp_iocbq->iocb.ulpStatus,
18785                         rsp_iocbq->iocb.un.ulpWord[4]);
18786 }
18787
18788 /**
18789  * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
18790  * @phba: Pointer to HBA context object.
18791  * @xri: xri id in transaction.
18792  *
18793  * This function validates the xri maps to the known range of XRIs allocated an
18794  * used by the driver.
18795  **/
18796 uint16_t
18797 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
18798                       uint16_t xri)
18799 {
18800         uint16_t i;
18801
18802         for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
18803                 if (xri == phba->sli4_hba.xri_ids[i])
18804                         return i;
18805         }
18806         return NO_XRI;
18807 }
18808
18809 /**
18810  * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
18811  * @vport: pointer to a virtual port.
18812  * @fc_hdr: pointer to a FC frame header.
18813  * @aborted: was the partially assembled receive sequence successfully aborted
18814  *
18815  * This function sends a basic response to a previous unsol sequence abort
18816  * event after aborting the sequence handling.
18817  **/
18818 void
18819 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
18820                         struct fc_frame_header *fc_hdr, bool aborted)
18821 {
18822         struct lpfc_hba *phba = vport->phba;
18823         struct lpfc_iocbq *ctiocb = NULL;
18824         struct lpfc_nodelist *ndlp;
18825         uint16_t oxid, rxid, xri, lxri;
18826         uint32_t sid, fctl;
18827         IOCB_t *icmd;
18828         int rc;
18829
18830         if (!lpfc_is_link_up(phba))
18831                 return;
18832
18833         sid = sli4_sid_from_fc_hdr(fc_hdr);
18834         oxid = be16_to_cpu(fc_hdr->fh_ox_id);
18835         rxid = be16_to_cpu(fc_hdr->fh_rx_id);
18836
18837         ndlp = lpfc_findnode_did(vport, sid);
18838         if (!ndlp) {
18839                 ndlp = lpfc_nlp_init(vport, sid);
18840                 if (!ndlp) {
18841                         lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
18842                                          "1268 Failed to allocate ndlp for "
18843                                          "oxid:x%x SID:x%x\n", oxid, sid);
18844                         return;
18845                 }
18846                 /* Put ndlp onto pport node list */
18847                 lpfc_enqueue_node(vport, ndlp);
18848         }
18849
18850         /* Allocate buffer for rsp iocb */
18851         ctiocb = lpfc_sli_get_iocbq(phba);
18852         if (!ctiocb)
18853                 return;
18854
18855         /* Extract the F_CTL field from FC_HDR */
18856         fctl = sli4_fctl_from_fc_hdr(fc_hdr);
18857
18858         icmd = &ctiocb->iocb;
18859         icmd->un.xseq64.bdl.bdeSize = 0;
18860         icmd->un.xseq64.bdl.ulpIoTag32 = 0;
18861         icmd->un.xseq64.w5.hcsw.Dfctl = 0;
18862         icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
18863         icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
18864
18865         /* Fill in the rest of iocb fields */
18866         icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
18867         icmd->ulpBdeCount = 0;
18868         icmd->ulpLe = 1;
18869         icmd->ulpClass = CLASS3;
18870         icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
18871         ctiocb->context1 = lpfc_nlp_get(ndlp);
18872         if (!ctiocb->context1) {
18873                 lpfc_sli_release_iocbq(phba, ctiocb);
18874                 return;
18875         }
18876
18877         ctiocb->vport = phba->pport;
18878         ctiocb->cmd_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
18879         ctiocb->sli4_lxritag = NO_XRI;
18880         ctiocb->sli4_xritag = NO_XRI;
18881
18882         if (fctl & FC_FC_EX_CTX) {
18883                 /* Exchange responder sent the abort so we
18884                  * own the oxid.
18885                  */
18886                 ctiocb->abort_bls = LPFC_ABTS_UNSOL_RSP;
18887                 xri = oxid;
18888         } else {
18889                 ctiocb->abort_bls = LPFC_ABTS_UNSOL_INT;
18890                 xri = rxid;
18891         }
18892         lxri = lpfc_sli4_xri_inrange(phba, xri);
18893         if (lxri != NO_XRI)
18894                 lpfc_set_rrq_active(phba, ndlp, lxri,
18895                         (xri == oxid) ? rxid : oxid, 0);
18896         /* For BA_ABTS from exchange responder, if the logical xri with
18897          * the oxid maps to the FCP XRI range, the port no longer has
18898          * that exchange context, send a BLS_RJT. Override the IOCB for
18899          * a BA_RJT.
18900          */
18901         if ((fctl & FC_FC_EX_CTX) &&
18902             (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
18903                 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
18904                 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
18905                 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
18906                 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
18907         }
18908
18909         /* If BA_ABTS failed to abort a partially assembled receive sequence,
18910          * the driver no longer has that exchange, send a BLS_RJT. Override
18911          * the IOCB for a BA_RJT.
18912          */
18913         if (aborted == false) {
18914                 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
18915                 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
18916                 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
18917                 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
18918         }
18919
18920         if (fctl & FC_FC_EX_CTX) {
18921                 /* ABTS sent by responder to CT exchange, construction
18922                  * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
18923                  * field and RX_ID from ABTS for RX_ID field.
18924                  */
18925                 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
18926         } else {
18927                 /* ABTS sent by initiator to CT exchange, construction
18928                  * of BA_ACC will need to allocate a new XRI as for the
18929                  * XRI_TAG field.
18930                  */
18931                 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
18932         }
18933         bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
18934         bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
18935
18936         /* Xmit CT abts response on exchange <xid> */
18937         lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
18938                          "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
18939                          icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
18940
18941         rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
18942         if (rc == IOCB_ERROR) {
18943                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
18944                                  "2925 Failed to issue CT ABTS RSP x%x on "
18945                                  "xri x%x, Data x%x\n",
18946                                  icmd->un.xseq64.w5.hcsw.Rctl, oxid,
18947                                  phba->link_state);
18948                 lpfc_nlp_put(ndlp);
18949                 ctiocb->context1 = NULL;
18950                 lpfc_sli_release_iocbq(phba, ctiocb);
18951         }
18952 }
18953
18954 /**
18955  * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
18956  * @vport: Pointer to the vport on which this sequence was received
18957  * @dmabuf: pointer to a dmabuf that describes the FC sequence
18958  *
18959  * This function handles an SLI-4 unsolicited abort event. If the unsolicited
18960  * receive sequence is only partially assembed by the driver, it shall abort
18961  * the partially assembled frames for the sequence. Otherwise, if the
18962  * unsolicited receive sequence has been completely assembled and passed to
18963  * the Upper Layer Protocol (ULP), it then mark the per oxid status for the
18964  * unsolicited sequence has been aborted. After that, it will issue a basic
18965  * accept to accept the abort.
18966  **/
18967 static void
18968 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
18969                              struct hbq_dmabuf *dmabuf)
18970 {
18971         struct lpfc_hba *phba = vport->phba;
18972         struct fc_frame_header fc_hdr;
18973         uint32_t fctl;
18974         bool aborted;
18975
18976         /* Make a copy of fc_hdr before the dmabuf being released */
18977         memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
18978         fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
18979
18980         if (fctl & FC_FC_EX_CTX) {
18981                 /* ABTS by responder to exchange, no cleanup needed */
18982                 aborted = true;
18983         } else {
18984                 /* ABTS by initiator to exchange, need to do cleanup */
18985                 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
18986                 if (aborted == false)
18987                         aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
18988         }
18989         lpfc_in_buf_free(phba, &dmabuf->dbuf);
18990
18991         if (phba->nvmet_support) {
18992                 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
18993                 return;
18994         }
18995
18996         /* Respond with BA_ACC or BA_RJT accordingly */
18997         lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
18998 }
18999
19000 /**
19001  * lpfc_seq_complete - Indicates if a sequence is complete
19002  * @dmabuf: pointer to a dmabuf that describes the FC sequence
19003  *
19004  * This function checks the sequence, starting with the frame described by
19005  * @dmabuf, to see if all the frames associated with this sequence are present.
19006  * the frames associated with this sequence are linked to the @dmabuf using the
19007  * dbuf list. This function looks for two major things. 1) That the first frame
19008  * has a sequence count of zero. 2) There is a frame with last frame of sequence
19009  * set. 3) That there are no holes in the sequence count. The function will
19010  * return 1 when the sequence is complete, otherwise it will return 0.
19011  **/
19012 static int
19013 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
19014 {
19015         struct fc_frame_header *hdr;
19016         struct lpfc_dmabuf *d_buf;
19017         struct hbq_dmabuf *seq_dmabuf;
19018         uint32_t fctl;
19019         int seq_count = 0;
19020
19021         hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
19022         /* make sure first fame of sequence has a sequence count of zero */
19023         if (hdr->fh_seq_cnt != seq_count)
19024                 return 0;
19025         fctl = (hdr->fh_f_ctl[0] << 16 |
19026                 hdr->fh_f_ctl[1] << 8 |
19027                 hdr->fh_f_ctl[2]);
19028         /* If last frame of sequence we can return success. */
19029         if (fctl & FC_FC_END_SEQ)
19030                 return 1;
19031         list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
19032                 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
19033                 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
19034                 /* If there is a hole in the sequence count then fail. */
19035                 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
19036                         return 0;
19037                 fctl = (hdr->fh_f_ctl[0] << 16 |
19038                         hdr->fh_f_ctl[1] << 8 |
19039                         hdr->fh_f_ctl[2]);
19040                 /* If last frame of sequence we can return success. */
19041                 if (fctl & FC_FC_END_SEQ)
19042                         return 1;
19043         }
19044         return 0;
19045 }
19046
19047 /**
19048  * lpfc_prep_seq - Prep sequence for ULP processing
19049  * @vport: Pointer to the vport on which this sequence was received
19050  * @seq_dmabuf: pointer to a dmabuf that describes the FC sequence
19051  *
19052  * This function takes a sequence, described by a list of frames, and creates
19053  * a list of iocbq structures to describe the sequence. This iocbq list will be
19054  * used to issue to the generic unsolicited sequence handler. This routine
19055  * returns a pointer to the first iocbq in the list. If the function is unable
19056  * to allocate an iocbq then it throw out the received frames that were not
19057  * able to be described and return a pointer to the first iocbq. If unable to
19058  * allocate any iocbqs (including the first) this function will return NULL.
19059  **/
19060 static struct lpfc_iocbq *
19061 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
19062 {
19063         struct hbq_dmabuf *hbq_buf;
19064         struct lpfc_dmabuf *d_buf, *n_buf;
19065         struct lpfc_iocbq *first_iocbq, *iocbq;
19066         struct fc_frame_header *fc_hdr;
19067         uint32_t sid;
19068         uint32_t len, tot_len;
19069         struct ulp_bde64 *pbde;
19070
19071         fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
19072         /* remove from receive buffer list */
19073         list_del_init(&seq_dmabuf->hbuf.list);
19074         lpfc_update_rcv_time_stamp(vport);
19075         /* get the Remote Port's SID */
19076         sid = sli4_sid_from_fc_hdr(fc_hdr);
19077         tot_len = 0;
19078         /* Get an iocbq struct to fill in. */
19079         first_iocbq = lpfc_sli_get_iocbq(vport->phba);
19080         if (first_iocbq) {
19081                 /* Initialize the first IOCB. */
19082                 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
19083                 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
19084                 first_iocbq->vport = vport;
19085
19086                 /* Check FC Header to see what TYPE of frame we are rcv'ing */
19087                 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
19088                         first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
19089                         first_iocbq->iocb.un.rcvels.parmRo =
19090                                 sli4_did_from_fc_hdr(fc_hdr);
19091                         first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
19092                 } else
19093                         first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
19094                 first_iocbq->iocb.ulpContext = NO_XRI;
19095                 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
19096                         be16_to_cpu(fc_hdr->fh_ox_id);
19097                 /* iocbq is prepped for internal consumption.  Physical vpi. */
19098                 first_iocbq->iocb.unsli3.rcvsli3.vpi =
19099                         vport->phba->vpi_ids[vport->vpi];
19100                 /* put the first buffer into the first IOCBq */
19101                 tot_len = bf_get(lpfc_rcqe_length,
19102                                        &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
19103
19104                 first_iocbq->context2 = &seq_dmabuf->dbuf;
19105                 first_iocbq->context3 = NULL;
19106                 first_iocbq->iocb.ulpBdeCount = 1;
19107                 if (tot_len > LPFC_DATA_BUF_SIZE)
19108                         first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
19109                                                         LPFC_DATA_BUF_SIZE;
19110                 else
19111                         first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
19112
19113                 first_iocbq->iocb.un.rcvels.remoteID = sid;
19114
19115                 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
19116         }
19117         iocbq = first_iocbq;
19118         /*
19119          * Each IOCBq can have two Buffers assigned, so go through the list
19120          * of buffers for this sequence and save two buffers in each IOCBq
19121          */
19122         list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
19123                 if (!iocbq) {
19124                         lpfc_in_buf_free(vport->phba, d_buf);
19125                         continue;
19126                 }
19127                 if (!iocbq->context3) {
19128                         iocbq->context3 = d_buf;
19129                         iocbq->iocb.ulpBdeCount++;
19130                         /* We need to get the size out of the right CQE */
19131                         hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
19132                         len = bf_get(lpfc_rcqe_length,
19133                                        &hbq_buf->cq_event.cqe.rcqe_cmpl);
19134                         pbde = (struct ulp_bde64 *)
19135                                         &iocbq->iocb.unsli3.sli3Words[4];
19136                         if (len > LPFC_DATA_BUF_SIZE)
19137                                 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
19138                         else
19139                                 pbde->tus.f.bdeSize = len;
19140
19141                         iocbq->iocb.unsli3.rcvsli3.acc_len += len;
19142                         tot_len += len;
19143                 } else {
19144                         iocbq = lpfc_sli_get_iocbq(vport->phba);
19145                         if (!iocbq) {
19146                                 if (first_iocbq) {
19147                                         first_iocbq->iocb.ulpStatus =
19148                                                         IOSTAT_FCP_RSP_ERROR;
19149                                         first_iocbq->iocb.un.ulpWord[4] =
19150                                                         IOERR_NO_RESOURCES;
19151                                 }
19152                                 lpfc_in_buf_free(vport->phba, d_buf);
19153                                 continue;
19154                         }
19155                         /* We need to get the size out of the right CQE */
19156                         hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
19157                         len = bf_get(lpfc_rcqe_length,
19158                                        &hbq_buf->cq_event.cqe.rcqe_cmpl);
19159                         iocbq->context2 = d_buf;
19160                         iocbq->context3 = NULL;
19161                         iocbq->iocb.ulpBdeCount = 1;
19162                         if (len > LPFC_DATA_BUF_SIZE)
19163                                 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
19164                                                         LPFC_DATA_BUF_SIZE;
19165                         else
19166                                 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
19167
19168                         tot_len += len;
19169                         iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
19170
19171                         iocbq->iocb.un.rcvels.remoteID = sid;
19172                         list_add_tail(&iocbq->list, &first_iocbq->list);
19173                 }
19174         }
19175         /* Free the sequence's header buffer */
19176         if (!first_iocbq)
19177                 lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
19178
19179         return first_iocbq;
19180 }
19181
19182 static void
19183 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
19184                           struct hbq_dmabuf *seq_dmabuf)
19185 {
19186         struct fc_frame_header *fc_hdr;
19187         struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
19188         struct lpfc_hba *phba = vport->phba;
19189
19190         fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
19191         iocbq = lpfc_prep_seq(vport, seq_dmabuf);
19192         if (!iocbq) {
19193                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19194                                 "2707 Ring %d handler: Failed to allocate "
19195                                 "iocb Rctl x%x Type x%x received\n",
19196                                 LPFC_ELS_RING,
19197                                 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
19198                 return;
19199         }
19200         if (!lpfc_complete_unsol_iocb(phba,
19201                                       phba->sli4_hba.els_wq->pring,
19202                                       iocbq, fc_hdr->fh_r_ctl,
19203                                       fc_hdr->fh_type)) {
19204                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19205                                 "2540 Ring %d handler: unexpected Rctl "
19206                                 "x%x Type x%x received\n",
19207                                 LPFC_ELS_RING,
19208                                 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
19209                 lpfc_in_buf_free(phba, &seq_dmabuf->dbuf);
19210         }
19211
19212         /* Free iocb created in lpfc_prep_seq */
19213         list_for_each_entry_safe(curr_iocb, next_iocb,
19214                                  &iocbq->list, list) {
19215                 list_del_init(&curr_iocb->list);
19216                 lpfc_sli_release_iocbq(phba, curr_iocb);
19217         }
19218         lpfc_sli_release_iocbq(phba, iocbq);
19219 }
19220
19221 static void
19222 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
19223                             struct lpfc_iocbq *rspiocb)
19224 {
19225         struct lpfc_dmabuf *pcmd = cmdiocb->context2;
19226
19227         if (pcmd && pcmd->virt)
19228                 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
19229         kfree(pcmd);
19230         lpfc_sli_release_iocbq(phba, cmdiocb);
19231         lpfc_drain_txq(phba);
19232 }
19233
19234 static void
19235 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
19236                               struct hbq_dmabuf *dmabuf)
19237 {
19238         struct fc_frame_header *fc_hdr;
19239         struct lpfc_hba *phba = vport->phba;
19240         struct lpfc_iocbq *iocbq = NULL;
19241         union  lpfc_wqe *wqe;
19242         struct lpfc_dmabuf *pcmd = NULL;
19243         uint32_t frame_len;
19244         int rc;
19245         unsigned long iflags;
19246
19247         fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
19248         frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
19249
19250         /* Send the received frame back */
19251         iocbq = lpfc_sli_get_iocbq(phba);
19252         if (!iocbq) {
19253                 /* Queue cq event and wakeup worker thread to process it */
19254                 spin_lock_irqsave(&phba->hbalock, iflags);
19255                 list_add_tail(&dmabuf->cq_event.list,
19256                               &phba->sli4_hba.sp_queue_event);
19257                 phba->hba_flag |= HBA_SP_QUEUE_EVT;
19258                 spin_unlock_irqrestore(&phba->hbalock, iflags);
19259                 lpfc_worker_wake_up(phba);
19260                 return;
19261         }
19262
19263         /* Allocate buffer for command payload */
19264         pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
19265         if (pcmd)
19266                 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
19267                                             &pcmd->phys);
19268         if (!pcmd || !pcmd->virt)
19269                 goto exit;
19270
19271         INIT_LIST_HEAD(&pcmd->list);
19272
19273         /* copyin the payload */
19274         memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
19275
19276         /* fill in BDE's for command */
19277         iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
19278         iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
19279         iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
19280         iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
19281
19282         iocbq->context2 = pcmd;
19283         iocbq->vport = vport;
19284         iocbq->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK;
19285         iocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
19286
19287         /*
19288          * Setup rest of the iocb as though it were a WQE
19289          * Build the SEND_FRAME WQE
19290          */
19291         wqe = (union lpfc_wqe *)&iocbq->iocb;
19292
19293         wqe->send_frame.frame_len = frame_len;
19294         wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
19295         wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
19296         wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
19297         wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
19298         wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
19299         wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
19300
19301         iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
19302         iocbq->iocb.ulpLe = 1;
19303         iocbq->cmd_cmpl = lpfc_sli4_mds_loopback_cmpl;
19304         rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
19305         if (rc == IOCB_ERROR)
19306                 goto exit;
19307
19308         lpfc_in_buf_free(phba, &dmabuf->dbuf);
19309         return;
19310
19311 exit:
19312         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
19313                         "2023 Unable to process MDS loopback frame\n");
19314         if (pcmd && pcmd->virt)
19315                 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
19316         kfree(pcmd);
19317         if (iocbq)
19318                 lpfc_sli_release_iocbq(phba, iocbq);
19319         lpfc_in_buf_free(phba, &dmabuf->dbuf);
19320 }
19321
19322 /**
19323  * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
19324  * @phba: Pointer to HBA context object.
19325  * @dmabuf: Pointer to a dmabuf that describes the FC sequence.
19326  *
19327  * This function is called with no lock held. This function processes all
19328  * the received buffers and gives it to upper layers when a received buffer
19329  * indicates that it is the final frame in the sequence. The interrupt
19330  * service routine processes received buffers at interrupt contexts.
19331  * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
19332  * appropriate receive function when the final frame in a sequence is received.
19333  **/
19334 void
19335 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
19336                                  struct hbq_dmabuf *dmabuf)
19337 {
19338         struct hbq_dmabuf *seq_dmabuf;
19339         struct fc_frame_header *fc_hdr;
19340         struct lpfc_vport *vport;
19341         uint32_t fcfi;
19342         uint32_t did;
19343
19344         /* Process each received buffer */
19345         fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
19346
19347         if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
19348             fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
19349                 vport = phba->pport;
19350                 /* Handle MDS Loopback frames */
19351                 if  (!(phba->pport->load_flag & FC_UNLOADING))
19352                         lpfc_sli4_handle_mds_loopback(vport, dmabuf);
19353                 else
19354                         lpfc_in_buf_free(phba, &dmabuf->dbuf);
19355                 return;
19356         }
19357
19358         /* check to see if this a valid type of frame */
19359         if (lpfc_fc_frame_check(phba, fc_hdr)) {
19360                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19361                 return;
19362         }
19363
19364         if ((bf_get(lpfc_cqe_code,
19365                     &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
19366                 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
19367                               &dmabuf->cq_event.cqe.rcqe_cmpl);
19368         else
19369                 fcfi = bf_get(lpfc_rcqe_fcf_id,
19370                               &dmabuf->cq_event.cqe.rcqe_cmpl);
19371
19372         if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
19373                 vport = phba->pport;
19374                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
19375                                 "2023 MDS Loopback %d bytes\n",
19376                                 bf_get(lpfc_rcqe_length,
19377                                        &dmabuf->cq_event.cqe.rcqe_cmpl));
19378                 /* Handle MDS Loopback frames */
19379                 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
19380                 return;
19381         }
19382
19383         /* d_id this frame is directed to */
19384         did = sli4_did_from_fc_hdr(fc_hdr);
19385
19386         vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
19387         if (!vport) {
19388                 /* throw out the frame */
19389                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19390                 return;
19391         }
19392
19393         /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
19394         if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
19395                 (did != Fabric_DID)) {
19396                 /*
19397                  * Throw out the frame if we are not pt2pt.
19398                  * The pt2pt protocol allows for discovery frames
19399                  * to be received without a registered VPI.
19400                  */
19401                 if (!(vport->fc_flag & FC_PT2PT) ||
19402                         (phba->link_state == LPFC_HBA_READY)) {
19403                         lpfc_in_buf_free(phba, &dmabuf->dbuf);
19404                         return;
19405                 }
19406         }
19407
19408         /* Handle the basic abort sequence (BA_ABTS) event */
19409         if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
19410                 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
19411                 return;
19412         }
19413
19414         /* Link this frame */
19415         seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
19416         if (!seq_dmabuf) {
19417                 /* unable to add frame to vport - throw it out */
19418                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19419                 return;
19420         }
19421         /* If not last frame in sequence continue processing frames. */
19422         if (!lpfc_seq_complete(seq_dmabuf))
19423                 return;
19424
19425         /* Send the complete sequence to the upper layer protocol */
19426         lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
19427 }
19428
19429 /**
19430  * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
19431  * @phba: pointer to lpfc hba data structure.
19432  *
19433  * This routine is invoked to post rpi header templates to the
19434  * HBA consistent with the SLI-4 interface spec.  This routine
19435  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
19436  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
19437  *
19438  * This routine does not require any locks.  It's usage is expected
19439  * to be driver load or reset recovery when the driver is
19440  * sequential.
19441  *
19442  * Return codes
19443  *      0 - successful
19444  *      -EIO - The mailbox failed to complete successfully.
19445  *      When this error occurs, the driver is not guaranteed
19446  *      to have any rpi regions posted to the device and
19447  *      must either attempt to repost the regions or take a
19448  *      fatal error.
19449  **/
19450 int
19451 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
19452 {
19453         struct lpfc_rpi_hdr *rpi_page;
19454         uint32_t rc = 0;
19455         uint16_t lrpi = 0;
19456
19457         /* SLI4 ports that support extents do not require RPI headers. */
19458         if (!phba->sli4_hba.rpi_hdrs_in_use)
19459                 goto exit;
19460         if (phba->sli4_hba.extents_in_use)
19461                 return -EIO;
19462
19463         list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
19464                 /*
19465                  * Assign the rpi headers a physical rpi only if the driver
19466                  * has not initialized those resources.  A port reset only
19467                  * needs the headers posted.
19468                  */
19469                 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
19470                     LPFC_RPI_RSRC_RDY)
19471                         rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
19472
19473                 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
19474                 if (rc != MBX_SUCCESS) {
19475                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19476                                         "2008 Error %d posting all rpi "
19477                                         "headers\n", rc);
19478                         rc = -EIO;
19479                         break;
19480                 }
19481         }
19482
19483  exit:
19484         bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
19485                LPFC_RPI_RSRC_RDY);
19486         return rc;
19487 }
19488
19489 /**
19490  * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
19491  * @phba: pointer to lpfc hba data structure.
19492  * @rpi_page:  pointer to the rpi memory region.
19493  *
19494  * This routine is invoked to post a single rpi header to the
19495  * HBA consistent with the SLI-4 interface spec.  This memory region
19496  * maps up to 64 rpi context regions.
19497  *
19498  * Return codes
19499  *      0 - successful
19500  *      -ENOMEM - No available memory
19501  *      -EIO - The mailbox failed to complete successfully.
19502  **/
19503 int
19504 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
19505 {
19506         LPFC_MBOXQ_t *mboxq;
19507         struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
19508         uint32_t rc = 0;
19509         uint32_t shdr_status, shdr_add_status;
19510         union lpfc_sli4_cfg_shdr *shdr;
19511
19512         /* SLI4 ports that support extents do not require RPI headers. */
19513         if (!phba->sli4_hba.rpi_hdrs_in_use)
19514                 return rc;
19515         if (phba->sli4_hba.extents_in_use)
19516                 return -EIO;
19517
19518         /* The port is notified of the header region via a mailbox command. */
19519         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19520         if (!mboxq) {
19521                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19522                                 "2001 Unable to allocate memory for issuing "
19523                                 "SLI_CONFIG_SPECIAL mailbox command\n");
19524                 return -ENOMEM;
19525         }
19526
19527         /* Post all rpi memory regions to the port. */
19528         hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
19529         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
19530                          LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
19531                          sizeof(struct lpfc_mbx_post_hdr_tmpl) -
19532                          sizeof(struct lpfc_sli4_cfg_mhdr),
19533                          LPFC_SLI4_MBX_EMBED);
19534
19535
19536         /* Post the physical rpi to the port for this rpi header. */
19537         bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
19538                rpi_page->start_rpi);
19539         bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
19540                hdr_tmpl, rpi_page->page_count);
19541
19542         hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
19543         hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
19544         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19545         shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
19546         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
19547         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
19548         mempool_free(mboxq, phba->mbox_mem_pool);
19549         if (shdr_status || shdr_add_status || rc) {
19550                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19551                                 "2514 POST_RPI_HDR mailbox failed with "
19552                                 "status x%x add_status x%x, mbx status x%x\n",
19553                                 shdr_status, shdr_add_status, rc);
19554                 rc = -ENXIO;
19555         } else {
19556                 /*
19557                  * The next_rpi stores the next logical module-64 rpi value used
19558                  * to post physical rpis in subsequent rpi postings.
19559                  */
19560                 spin_lock_irq(&phba->hbalock);
19561                 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
19562                 spin_unlock_irq(&phba->hbalock);
19563         }
19564         return rc;
19565 }
19566
19567 /**
19568  * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
19569  * @phba: pointer to lpfc hba data structure.
19570  *
19571  * This routine is invoked to post rpi header templates to the
19572  * HBA consistent with the SLI-4 interface spec.  This routine
19573  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
19574  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
19575  *
19576  * Returns
19577  *      A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
19578  *      LPFC_RPI_ALLOC_ERROR if no rpis are available.
19579  **/
19580 int
19581 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
19582 {
19583         unsigned long rpi;
19584         uint16_t max_rpi, rpi_limit;
19585         uint16_t rpi_remaining, lrpi = 0;
19586         struct lpfc_rpi_hdr *rpi_hdr;
19587         unsigned long iflag;
19588
19589         /*
19590          * Fetch the next logical rpi.  Because this index is logical,
19591          * the  driver starts at 0 each time.
19592          */
19593         spin_lock_irqsave(&phba->hbalock, iflag);
19594         max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
19595         rpi_limit = phba->sli4_hba.next_rpi;
19596
19597         rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
19598         if (rpi >= rpi_limit)
19599                 rpi = LPFC_RPI_ALLOC_ERROR;
19600         else {
19601                 set_bit(rpi, phba->sli4_hba.rpi_bmask);
19602                 phba->sli4_hba.max_cfg_param.rpi_used++;
19603                 phba->sli4_hba.rpi_count++;
19604         }
19605         lpfc_printf_log(phba, KERN_INFO,
19606                         LOG_NODE | LOG_DISCOVERY,
19607                         "0001 Allocated rpi:x%x max:x%x lim:x%x\n",
19608                         (int) rpi, max_rpi, rpi_limit);
19609
19610         /*
19611          * Don't try to allocate more rpi header regions if the device limit
19612          * has been exhausted.
19613          */
19614         if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
19615             (phba->sli4_hba.rpi_count >= max_rpi)) {
19616                 spin_unlock_irqrestore(&phba->hbalock, iflag);
19617                 return rpi;
19618         }
19619
19620         /*
19621          * RPI header postings are not required for SLI4 ports capable of
19622          * extents.
19623          */
19624         if (!phba->sli4_hba.rpi_hdrs_in_use) {
19625                 spin_unlock_irqrestore(&phba->hbalock, iflag);
19626                 return rpi;
19627         }
19628
19629         /*
19630          * If the driver is running low on rpi resources, allocate another
19631          * page now.  Note that the next_rpi value is used because
19632          * it represents how many are actually in use whereas max_rpi notes
19633          * how many are supported max by the device.
19634          */
19635         rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
19636         spin_unlock_irqrestore(&phba->hbalock, iflag);
19637         if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
19638                 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
19639                 if (!rpi_hdr) {
19640                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19641                                         "2002 Error Could not grow rpi "
19642                                         "count\n");
19643                 } else {
19644                         lrpi = rpi_hdr->start_rpi;
19645                         rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
19646                         lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
19647                 }
19648         }
19649
19650         return rpi;
19651 }
19652
19653 /**
19654  * __lpfc_sli4_free_rpi - Release an rpi for reuse.
19655  * @phba: pointer to lpfc hba data structure.
19656  * @rpi: rpi to free
19657  *
19658  * This routine is invoked to release an rpi to the pool of
19659  * available rpis maintained by the driver.
19660  **/
19661 static void
19662 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
19663 {
19664         /*
19665          * if the rpi value indicates a prior unreg has already
19666          * been done, skip the unreg.
19667          */
19668         if (rpi == LPFC_RPI_ALLOC_ERROR)
19669                 return;
19670
19671         if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
19672                 phba->sli4_hba.rpi_count--;
19673                 phba->sli4_hba.max_cfg_param.rpi_used--;
19674         } else {
19675                 lpfc_printf_log(phba, KERN_INFO,
19676                                 LOG_NODE | LOG_DISCOVERY,
19677                                 "2016 rpi %x not inuse\n",
19678                                 rpi);
19679         }
19680 }
19681
19682 /**
19683  * lpfc_sli4_free_rpi - Release an rpi for reuse.
19684  * @phba: pointer to lpfc hba data structure.
19685  * @rpi: rpi to free
19686  *
19687  * This routine is invoked to release an rpi to the pool of
19688  * available rpis maintained by the driver.
19689  **/
19690 void
19691 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
19692 {
19693         spin_lock_irq(&phba->hbalock);
19694         __lpfc_sli4_free_rpi(phba, rpi);
19695         spin_unlock_irq(&phba->hbalock);
19696 }
19697
19698 /**
19699  * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
19700  * @phba: pointer to lpfc hba data structure.
19701  *
19702  * This routine is invoked to remove the memory region that
19703  * provided rpi via a bitmask.
19704  **/
19705 void
19706 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
19707 {
19708         kfree(phba->sli4_hba.rpi_bmask);
19709         kfree(phba->sli4_hba.rpi_ids);
19710         bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
19711 }
19712
19713 /**
19714  * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
19715  * @ndlp: pointer to lpfc nodelist data structure.
19716  * @cmpl: completion call-back.
19717  * @arg: data to load as MBox 'caller buffer information'
19718  *
19719  * This routine is invoked to remove the memory region that
19720  * provided rpi via a bitmask.
19721  **/
19722 int
19723 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
19724         void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
19725 {
19726         LPFC_MBOXQ_t *mboxq;
19727         struct lpfc_hba *phba = ndlp->phba;
19728         int rc;
19729
19730         /* The port is notified of the header region via a mailbox command. */
19731         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19732         if (!mboxq)
19733                 return -ENOMEM;
19734
19735         /* If cmpl assigned, then this nlp_get pairs with
19736          * lpfc_mbx_cmpl_resume_rpi.
19737          *
19738          * Else cmpl is NULL, then this nlp_get pairs with
19739          * lpfc_sli_def_mbox_cmpl.
19740          */
19741         if (!lpfc_nlp_get(ndlp)) {
19742                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19743                                 "2122 %s: Failed to get nlp ref\n",
19744                                 __func__);
19745                 mempool_free(mboxq, phba->mbox_mem_pool);
19746                 return -EIO;
19747         }
19748
19749         /* Post all rpi memory regions to the port. */
19750         lpfc_resume_rpi(mboxq, ndlp);
19751         if (cmpl) {
19752                 mboxq->mbox_cmpl = cmpl;
19753                 mboxq->ctx_buf = arg;
19754         } else
19755                 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19756         mboxq->ctx_ndlp = ndlp;
19757         mboxq->vport = ndlp->vport;
19758         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19759         if (rc == MBX_NOT_FINISHED) {
19760                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19761                                 "2010 Resume RPI Mailbox failed "
19762                                 "status %d, mbxStatus x%x\n", rc,
19763                                 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
19764                 lpfc_nlp_put(ndlp);
19765                 mempool_free(mboxq, phba->mbox_mem_pool);
19766                 return -EIO;
19767         }
19768         return 0;
19769 }
19770
19771 /**
19772  * lpfc_sli4_init_vpi - Initialize a vpi with the port
19773  * @vport: Pointer to the vport for which the vpi is being initialized
19774  *
19775  * This routine is invoked to activate a vpi with the port.
19776  *
19777  * Returns:
19778  *    0 success
19779  *    -Evalue otherwise
19780  **/
19781 int
19782 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
19783 {
19784         LPFC_MBOXQ_t *mboxq;
19785         int rc = 0;
19786         int retval = MBX_SUCCESS;
19787         uint32_t mbox_tmo;
19788         struct lpfc_hba *phba = vport->phba;
19789         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19790         if (!mboxq)
19791                 return -ENOMEM;
19792         lpfc_init_vpi(phba, mboxq, vport->vpi);
19793         mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
19794         rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
19795         if (rc != MBX_SUCCESS) {
19796                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
19797                                 "2022 INIT VPI Mailbox failed "
19798                                 "status %d, mbxStatus x%x\n", rc,
19799                                 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
19800                 retval = -EIO;
19801         }
19802         if (rc != MBX_TIMEOUT)
19803                 mempool_free(mboxq, vport->phba->mbox_mem_pool);
19804
19805         return retval;
19806 }
19807
19808 /**
19809  * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
19810  * @phba: pointer to lpfc hba data structure.
19811  * @mboxq: Pointer to mailbox object.
19812  *
19813  * This routine is invoked to manually add a single FCF record. The caller
19814  * must pass a completely initialized FCF_Record.  This routine takes
19815  * care of the nonembedded mailbox operations.
19816  **/
19817 static void
19818 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
19819 {
19820         void *virt_addr;
19821         union lpfc_sli4_cfg_shdr *shdr;
19822         uint32_t shdr_status, shdr_add_status;
19823
19824         virt_addr = mboxq->sge_array->addr[0];
19825         /* The IOCTL status is embedded in the mailbox subheader. */
19826         shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
19827         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
19828         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
19829
19830         if ((shdr_status || shdr_add_status) &&
19831                 (shdr_status != STATUS_FCF_IN_USE))
19832                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19833                         "2558 ADD_FCF_RECORD mailbox failed with "
19834                         "status x%x add_status x%x\n",
19835                         shdr_status, shdr_add_status);
19836
19837         lpfc_sli4_mbox_cmd_free(phba, mboxq);
19838 }
19839
19840 /**
19841  * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
19842  * @phba: pointer to lpfc hba data structure.
19843  * @fcf_record:  pointer to the initialized fcf record to add.
19844  *
19845  * This routine is invoked to manually add a single FCF record. The caller
19846  * must pass a completely initialized FCF_Record.  This routine takes
19847  * care of the nonembedded mailbox operations.
19848  **/
19849 int
19850 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
19851 {
19852         int rc = 0;
19853         LPFC_MBOXQ_t *mboxq;
19854         uint8_t *bytep;
19855         void *virt_addr;
19856         struct lpfc_mbx_sge sge;
19857         uint32_t alloc_len, req_len;
19858         uint32_t fcfindex;
19859
19860         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19861         if (!mboxq) {
19862                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19863                         "2009 Failed to allocate mbox for ADD_FCF cmd\n");
19864                 return -ENOMEM;
19865         }
19866
19867         req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
19868                   sizeof(uint32_t);
19869
19870         /* Allocate DMA memory and set up the non-embedded mailbox command */
19871         alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
19872                                      LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
19873                                      req_len, LPFC_SLI4_MBX_NEMBED);
19874         if (alloc_len < req_len) {
19875                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19876                         "2523 Allocated DMA memory size (x%x) is "
19877                         "less than the requested DMA memory "
19878                         "size (x%x)\n", alloc_len, req_len);
19879                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19880                 return -ENOMEM;
19881         }
19882
19883         /*
19884          * Get the first SGE entry from the non-embedded DMA memory.  This
19885          * routine only uses a single SGE.
19886          */
19887         lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
19888         virt_addr = mboxq->sge_array->addr[0];
19889         /*
19890          * Configure the FCF record for FCFI 0.  This is the driver's
19891          * hardcoded default and gets used in nonFIP mode.
19892          */
19893         fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
19894         bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
19895         lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
19896
19897         /*
19898          * Copy the fcf_index and the FCF Record Data. The data starts after
19899          * the FCoE header plus word10. The data copy needs to be endian
19900          * correct.
19901          */
19902         bytep += sizeof(uint32_t);
19903         lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
19904         mboxq->vport = phba->pport;
19905         mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
19906         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19907         if (rc == MBX_NOT_FINISHED) {
19908                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19909                         "2515 ADD_FCF_RECORD mailbox failed with "
19910                         "status 0x%x\n", rc);
19911                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19912                 rc = -EIO;
19913         } else
19914                 rc = 0;
19915
19916         return rc;
19917 }
19918
19919 /**
19920  * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
19921  * @phba: pointer to lpfc hba data structure.
19922  * @fcf_record:  pointer to the fcf record to write the default data.
19923  * @fcf_index: FCF table entry index.
19924  *
19925  * This routine is invoked to build the driver's default FCF record.  The
19926  * values used are hardcoded.  This routine handles memory initialization.
19927  *
19928  **/
19929 void
19930 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
19931                                 struct fcf_record *fcf_record,
19932                                 uint16_t fcf_index)
19933 {
19934         memset(fcf_record, 0, sizeof(struct fcf_record));
19935         fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
19936         fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
19937         fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
19938         bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
19939         bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
19940         bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
19941         bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
19942         bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
19943         bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
19944         bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
19945         bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
19946         bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
19947         bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
19948         bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
19949         bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
19950         bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
19951                 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
19952         /* Set the VLAN bit map */
19953         if (phba->valid_vlan) {
19954                 fcf_record->vlan_bitmap[phba->vlan_id / 8]
19955                         = 1 << (phba->vlan_id % 8);
19956         }
19957 }
19958
19959 /**
19960  * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
19961  * @phba: pointer to lpfc hba data structure.
19962  * @fcf_index: FCF table entry offset.
19963  *
19964  * This routine is invoked to scan the entire FCF table by reading FCF
19965  * record and processing it one at a time starting from the @fcf_index
19966  * for initial FCF discovery or fast FCF failover rediscovery.
19967  *
19968  * Return 0 if the mailbox command is submitted successfully, none 0
19969  * otherwise.
19970  **/
19971 int
19972 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19973 {
19974         int rc = 0, error;
19975         LPFC_MBOXQ_t *mboxq;
19976
19977         phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
19978         phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
19979         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19980         if (!mboxq) {
19981                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19982                                 "2000 Failed to allocate mbox for "
19983                                 "READ_FCF cmd\n");
19984                 error = -ENOMEM;
19985                 goto fail_fcf_scan;
19986         }
19987         /* Construct the read FCF record mailbox command */
19988         rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19989         if (rc) {
19990                 error = -EINVAL;
19991                 goto fail_fcf_scan;
19992         }
19993         /* Issue the mailbox command asynchronously */
19994         mboxq->vport = phba->pport;
19995         mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
19996
19997         spin_lock_irq(&phba->hbalock);
19998         phba->hba_flag |= FCF_TS_INPROG;
19999         spin_unlock_irq(&phba->hbalock);
20000
20001         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
20002         if (rc == MBX_NOT_FINISHED)
20003                 error = -EIO;
20004         else {
20005                 /* Reset eligible FCF count for new scan */
20006                 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
20007                         phba->fcf.eligible_fcf_cnt = 0;
20008                 error = 0;
20009         }
20010 fail_fcf_scan:
20011         if (error) {
20012                 if (mboxq)
20013                         lpfc_sli4_mbox_cmd_free(phba, mboxq);
20014                 /* FCF scan failed, clear FCF_TS_INPROG flag */
20015                 spin_lock_irq(&phba->hbalock);
20016                 phba->hba_flag &= ~FCF_TS_INPROG;
20017                 spin_unlock_irq(&phba->hbalock);
20018         }
20019         return error;
20020 }
20021
20022 /**
20023  * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
20024  * @phba: pointer to lpfc hba data structure.
20025  * @fcf_index: FCF table entry offset.
20026  *
20027  * This routine is invoked to read an FCF record indicated by @fcf_index
20028  * and to use it for FLOGI roundrobin FCF failover.
20029  *
20030  * Return 0 if the mailbox command is submitted successfully, none 0
20031  * otherwise.
20032  **/
20033 int
20034 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
20035 {
20036         int rc = 0, error;
20037         LPFC_MBOXQ_t *mboxq;
20038
20039         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20040         if (!mboxq) {
20041                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
20042                                 "2763 Failed to allocate mbox for "
20043                                 "READ_FCF cmd\n");
20044                 error = -ENOMEM;
20045                 goto fail_fcf_read;
20046         }
20047         /* Construct the read FCF record mailbox command */
20048         rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
20049         if (rc) {
20050                 error = -EINVAL;
20051                 goto fail_fcf_read;
20052         }
20053         /* Issue the mailbox command asynchronously */
20054         mboxq->vport = phba->pport;
20055         mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
20056         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
20057         if (rc == MBX_NOT_FINISHED)
20058                 error = -EIO;
20059         else
20060                 error = 0;
20061
20062 fail_fcf_read:
20063         if (error && mboxq)
20064                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
20065         return error;
20066 }
20067
20068 /**
20069  * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
20070  * @phba: pointer to lpfc hba data structure.
20071  * @fcf_index: FCF table entry offset.
20072  *
20073  * This routine is invoked to read an FCF record indicated by @fcf_index to
20074  * determine whether it's eligible for FLOGI roundrobin failover list.
20075  *
20076  * Return 0 if the mailbox command is submitted successfully, none 0
20077  * otherwise.
20078  **/
20079 int
20080 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
20081 {
20082         int rc = 0, error;
20083         LPFC_MBOXQ_t *mboxq;
20084
20085         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20086         if (!mboxq) {
20087                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
20088                                 "2758 Failed to allocate mbox for "
20089                                 "READ_FCF cmd\n");
20090                                 error = -ENOMEM;
20091                                 goto fail_fcf_read;
20092         }
20093         /* Construct the read FCF record mailbox command */
20094         rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
20095         if (rc) {
20096                 error = -EINVAL;
20097                 goto fail_fcf_read;
20098         }
20099         /* Issue the mailbox command asynchronously */
20100         mboxq->vport = phba->pport;
20101         mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
20102         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
20103         if (rc == MBX_NOT_FINISHED)
20104                 error = -EIO;
20105         else
20106                 error = 0;
20107
20108 fail_fcf_read:
20109         if (error && mboxq)
20110                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
20111         return error;
20112 }
20113
20114 /**
20115  * lpfc_check_next_fcf_pri_level
20116  * @phba: pointer to the lpfc_hba struct for this port.
20117  * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
20118  * routine when the rr_bmask is empty. The FCF indecies are put into the
20119  * rr_bmask based on their priority level. Starting from the highest priority
20120  * to the lowest. The most likely FCF candidate will be in the highest
20121  * priority group. When this routine is called it searches the fcf_pri list for
20122  * next lowest priority group and repopulates the rr_bmask with only those
20123  * fcf_indexes.
20124  * returns:
20125  * 1=success 0=failure
20126  **/
20127 static int
20128 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
20129 {
20130         uint16_t next_fcf_pri;
20131         uint16_t last_index;
20132         struct lpfc_fcf_pri *fcf_pri;
20133         int rc;
20134         int ret = 0;
20135
20136         last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
20137                         LPFC_SLI4_FCF_TBL_INDX_MAX);
20138         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20139                         "3060 Last IDX %d\n", last_index);
20140
20141         /* Verify the priority list has 2 or more entries */
20142         spin_lock_irq(&phba->hbalock);
20143         if (list_empty(&phba->fcf.fcf_pri_list) ||
20144             list_is_singular(&phba->fcf.fcf_pri_list)) {
20145                 spin_unlock_irq(&phba->hbalock);
20146                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20147                         "3061 Last IDX %d\n", last_index);
20148                 return 0; /* Empty rr list */
20149         }
20150         spin_unlock_irq(&phba->hbalock);
20151
20152         next_fcf_pri = 0;
20153         /*
20154          * Clear the rr_bmask and set all of the bits that are at this
20155          * priority.
20156          */
20157         memset(phba->fcf.fcf_rr_bmask, 0,
20158                         sizeof(*phba->fcf.fcf_rr_bmask));
20159         spin_lock_irq(&phba->hbalock);
20160         list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
20161                 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
20162                         continue;
20163                 /*
20164                  * the 1st priority that has not FLOGI failed
20165                  * will be the highest.
20166                  */
20167                 if (!next_fcf_pri)
20168                         next_fcf_pri = fcf_pri->fcf_rec.priority;
20169                 spin_unlock_irq(&phba->hbalock);
20170                 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
20171                         rc = lpfc_sli4_fcf_rr_index_set(phba,
20172                                                 fcf_pri->fcf_rec.fcf_index);
20173                         if (rc)
20174                                 return 0;
20175                 }
20176                 spin_lock_irq(&phba->hbalock);
20177         }
20178         /*
20179          * if next_fcf_pri was not set above and the list is not empty then
20180          * we have failed flogis on all of them. So reset flogi failed
20181          * and start at the beginning.
20182          */
20183         if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
20184                 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
20185                         fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
20186                         /*
20187                          * the 1st priority that has not FLOGI failed
20188                          * will be the highest.
20189                          */
20190                         if (!next_fcf_pri)
20191                                 next_fcf_pri = fcf_pri->fcf_rec.priority;
20192                         spin_unlock_irq(&phba->hbalock);
20193                         if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
20194                                 rc = lpfc_sli4_fcf_rr_index_set(phba,
20195                                                 fcf_pri->fcf_rec.fcf_index);
20196                                 if (rc)
20197                                         return 0;
20198                         }
20199                         spin_lock_irq(&phba->hbalock);
20200                 }
20201         } else
20202                 ret = 1;
20203         spin_unlock_irq(&phba->hbalock);
20204
20205         return ret;
20206 }
20207 /**
20208  * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
20209  * @phba: pointer to lpfc hba data structure.
20210  *
20211  * This routine is to get the next eligible FCF record index in a round
20212  * robin fashion. If the next eligible FCF record index equals to the
20213  * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
20214  * shall be returned, otherwise, the next eligible FCF record's index
20215  * shall be returned.
20216  **/
20217 uint16_t
20218 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
20219 {
20220         uint16_t next_fcf_index;
20221
20222 initial_priority:
20223         /* Search start from next bit of currently registered FCF index */
20224         next_fcf_index = phba->fcf.current_rec.fcf_indx;
20225
20226 next_priority:
20227         /* Determine the next fcf index to check */
20228         next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
20229         next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
20230                                        LPFC_SLI4_FCF_TBL_INDX_MAX,
20231                                        next_fcf_index);
20232
20233         /* Wrap around condition on phba->fcf.fcf_rr_bmask */
20234         if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20235                 /*
20236                  * If we have wrapped then we need to clear the bits that
20237                  * have been tested so that we can detect when we should
20238                  * change the priority level.
20239                  */
20240                 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
20241                                                LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
20242         }
20243
20244
20245         /* Check roundrobin failover list empty condition */
20246         if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
20247                 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
20248                 /*
20249                  * If next fcf index is not found check if there are lower
20250                  * Priority level fcf's in the fcf_priority list.
20251                  * Set up the rr_bmask with all of the avaiable fcf bits
20252                  * at that level and continue the selection process.
20253                  */
20254                 if (lpfc_check_next_fcf_pri_level(phba))
20255                         goto initial_priority;
20256                 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
20257                                 "2844 No roundrobin failover FCF available\n");
20258
20259                 return LPFC_FCOE_FCF_NEXT_NONE;
20260         }
20261
20262         if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
20263                 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
20264                 LPFC_FCF_FLOGI_FAILED) {
20265                 if (list_is_singular(&phba->fcf.fcf_pri_list))
20266                         return LPFC_FCOE_FCF_NEXT_NONE;
20267
20268                 goto next_priority;
20269         }
20270
20271         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20272                         "2845 Get next roundrobin failover FCF (x%x)\n",
20273                         next_fcf_index);
20274
20275         return next_fcf_index;
20276 }
20277
20278 /**
20279  * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
20280  * @phba: pointer to lpfc hba data structure.
20281  * @fcf_index: index into the FCF table to 'set'
20282  *
20283  * This routine sets the FCF record index in to the eligible bmask for
20284  * roundrobin failover search. It checks to make sure that the index
20285  * does not go beyond the range of the driver allocated bmask dimension
20286  * before setting the bit.
20287  *
20288  * Returns 0 if the index bit successfully set, otherwise, it returns
20289  * -EINVAL.
20290  **/
20291 int
20292 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
20293 {
20294         if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20295                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20296                                 "2610 FCF (x%x) reached driver's book "
20297                                 "keeping dimension:x%x\n",
20298                                 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
20299                 return -EINVAL;
20300         }
20301         /* Set the eligible FCF record index bmask */
20302         set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
20303
20304         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20305                         "2790 Set FCF (x%x) to roundrobin FCF failover "
20306                         "bmask\n", fcf_index);
20307
20308         return 0;
20309 }
20310
20311 /**
20312  * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
20313  * @phba: pointer to lpfc hba data structure.
20314  * @fcf_index: index into the FCF table to 'clear'
20315  *
20316  * This routine clears the FCF record index from the eligible bmask for
20317  * roundrobin failover search. It checks to make sure that the index
20318  * does not go beyond the range of the driver allocated bmask dimension
20319  * before clearing the bit.
20320  **/
20321 void
20322 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
20323 {
20324         struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
20325         if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20326                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20327                                 "2762 FCF (x%x) reached driver's book "
20328                                 "keeping dimension:x%x\n",
20329                                 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
20330                 return;
20331         }
20332         /* Clear the eligible FCF record index bmask */
20333         spin_lock_irq(&phba->hbalock);
20334         list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
20335                                  list) {
20336                 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
20337                         list_del_init(&fcf_pri->list);
20338                         break;
20339                 }
20340         }
20341         spin_unlock_irq(&phba->hbalock);
20342         clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
20343
20344         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20345                         "2791 Clear FCF (x%x) from roundrobin failover "
20346                         "bmask\n", fcf_index);
20347 }
20348
20349 /**
20350  * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
20351  * @phba: pointer to lpfc hba data structure.
20352  * @mbox: An allocated pointer to type LPFC_MBOXQ_t
20353  *
20354  * This routine is the completion routine for the rediscover FCF table mailbox
20355  * command. If the mailbox command returned failure, it will try to stop the
20356  * FCF rediscover wait timer.
20357  **/
20358 static void
20359 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
20360 {
20361         struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
20362         uint32_t shdr_status, shdr_add_status;
20363
20364         redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
20365
20366         shdr_status = bf_get(lpfc_mbox_hdr_status,
20367                              &redisc_fcf->header.cfg_shdr.response);
20368         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
20369                              &redisc_fcf->header.cfg_shdr.response);
20370         if (shdr_status || shdr_add_status) {
20371                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20372                                 "2746 Requesting for FCF rediscovery failed "
20373                                 "status x%x add_status x%x\n",
20374                                 shdr_status, shdr_add_status);
20375                 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
20376                         spin_lock_irq(&phba->hbalock);
20377                         phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
20378                         spin_unlock_irq(&phba->hbalock);
20379                         /*
20380                          * CVL event triggered FCF rediscover request failed,
20381                          * last resort to re-try current registered FCF entry.
20382                          */
20383                         lpfc_retry_pport_discovery(phba);
20384                 } else {
20385                         spin_lock_irq(&phba->hbalock);
20386                         phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
20387                         spin_unlock_irq(&phba->hbalock);
20388                         /*
20389                          * DEAD FCF event triggered FCF rediscover request
20390                          * failed, last resort to fail over as a link down
20391                          * to FCF registration.
20392                          */
20393                         lpfc_sli4_fcf_dead_failthrough(phba);
20394                 }
20395         } else {
20396                 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20397                                 "2775 Start FCF rediscover quiescent timer\n");
20398                 /*
20399                  * Start FCF rediscovery wait timer for pending FCF
20400                  * before rescan FCF record table.
20401                  */
20402                 lpfc_fcf_redisc_wait_start_timer(phba);
20403         }
20404
20405         mempool_free(mbox, phba->mbox_mem_pool);
20406 }
20407
20408 /**
20409  * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
20410  * @phba: pointer to lpfc hba data structure.
20411  *
20412  * This routine is invoked to request for rediscovery of the entire FCF table
20413  * by the port.
20414  **/
20415 int
20416 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
20417 {
20418         LPFC_MBOXQ_t *mbox;
20419         struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
20420         int rc, length;
20421
20422         /* Cancel retry delay timers to all vports before FCF rediscover */
20423         lpfc_cancel_all_vport_retry_delay_timer(phba);
20424
20425         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20426         if (!mbox) {
20427                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20428                                 "2745 Failed to allocate mbox for "
20429                                 "requesting FCF rediscover.\n");
20430                 return -ENOMEM;
20431         }
20432
20433         length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
20434                   sizeof(struct lpfc_sli4_cfg_mhdr));
20435         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
20436                          LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
20437                          length, LPFC_SLI4_MBX_EMBED);
20438
20439         redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
20440         /* Set count to 0 for invalidating the entire FCF database */
20441         bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
20442
20443         /* Issue the mailbox command asynchronously */
20444         mbox->vport = phba->pport;
20445         mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
20446         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
20447
20448         if (rc == MBX_NOT_FINISHED) {
20449                 mempool_free(mbox, phba->mbox_mem_pool);
20450                 return -EIO;
20451         }
20452         return 0;
20453 }
20454
20455 /**
20456  * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
20457  * @phba: pointer to lpfc hba data structure.
20458  *
20459  * This function is the failover routine as a last resort to the FCF DEAD
20460  * event when driver failed to perform fast FCF failover.
20461  **/
20462 void
20463 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
20464 {
20465         uint32_t link_state;
20466
20467         /*
20468          * Last resort as FCF DEAD event failover will treat this as
20469          * a link down, but save the link state because we don't want
20470          * it to be changed to Link Down unless it is already down.
20471          */
20472         link_state = phba->link_state;
20473         lpfc_linkdown(phba);
20474         phba->link_state = link_state;
20475
20476         /* Unregister FCF if no devices connected to it */
20477         lpfc_unregister_unused_fcf(phba);
20478 }
20479
20480 /**
20481  * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
20482  * @phba: pointer to lpfc hba data structure.
20483  * @rgn23_data: pointer to configure region 23 data.
20484  *
20485  * This function gets SLI3 port configure region 23 data through memory dump
20486  * mailbox command. When it successfully retrieves data, the size of the data
20487  * will be returned, otherwise, 0 will be returned.
20488  **/
20489 static uint32_t
20490 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
20491 {
20492         LPFC_MBOXQ_t *pmb = NULL;
20493         MAILBOX_t *mb;
20494         uint32_t offset = 0;
20495         int rc;
20496
20497         if (!rgn23_data)
20498                 return 0;
20499
20500         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20501         if (!pmb) {
20502                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20503                                 "2600 failed to allocate mailbox memory\n");
20504                 return 0;
20505         }
20506         mb = &pmb->u.mb;
20507
20508         do {
20509                 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
20510                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
20511
20512                 if (rc != MBX_SUCCESS) {
20513                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
20514                                         "2601 failed to read config "
20515                                         "region 23, rc 0x%x Status 0x%x\n",
20516                                         rc, mb->mbxStatus);
20517                         mb->un.varDmp.word_cnt = 0;
20518                 }
20519                 /*
20520                  * dump mem may return a zero when finished or we got a
20521                  * mailbox error, either way we are done.
20522                  */
20523                 if (mb->un.varDmp.word_cnt == 0)
20524                         break;
20525
20526                 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
20527                         mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
20528
20529                 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
20530                                        rgn23_data + offset,
20531                                        mb->un.varDmp.word_cnt);
20532                 offset += mb->un.varDmp.word_cnt;
20533         } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
20534
20535         mempool_free(pmb, phba->mbox_mem_pool);
20536         return offset;
20537 }
20538
20539 /**
20540  * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
20541  * @phba: pointer to lpfc hba data structure.
20542  * @rgn23_data: pointer to configure region 23 data.
20543  *
20544  * This function gets SLI4 port configure region 23 data through memory dump
20545  * mailbox command. When it successfully retrieves data, the size of the data
20546  * will be returned, otherwise, 0 will be returned.
20547  **/
20548 static uint32_t
20549 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
20550 {
20551         LPFC_MBOXQ_t *mboxq = NULL;
20552         struct lpfc_dmabuf *mp = NULL;
20553         struct lpfc_mqe *mqe;
20554         uint32_t data_length = 0;
20555         int rc;
20556
20557         if (!rgn23_data)
20558                 return 0;
20559
20560         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20561         if (!mboxq) {
20562                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20563                                 "3105 failed to allocate mailbox memory\n");
20564                 return 0;
20565         }
20566
20567         if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
20568                 goto out;
20569         mqe = &mboxq->u.mqe;
20570         mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
20571         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
20572         if (rc)
20573                 goto out;
20574         data_length = mqe->un.mb_words[5];
20575         if (data_length == 0)
20576                 goto out;
20577         if (data_length > DMP_RGN23_SIZE) {
20578                 data_length = 0;
20579                 goto out;
20580         }
20581         lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
20582 out:
20583         mempool_free(mboxq, phba->mbox_mem_pool);
20584         if (mp) {
20585                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
20586                 kfree(mp);
20587         }
20588         return data_length;
20589 }
20590
20591 /**
20592  * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
20593  * @phba: pointer to lpfc hba data structure.
20594  *
20595  * This function read region 23 and parse TLV for port status to
20596  * decide if the user disaled the port. If the TLV indicates the
20597  * port is disabled, the hba_flag is set accordingly.
20598  **/
20599 void
20600 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
20601 {
20602         uint8_t *rgn23_data = NULL;
20603         uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
20604         uint32_t offset = 0;
20605
20606         /* Get adapter Region 23 data */
20607         rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
20608         if (!rgn23_data)
20609                 goto out;
20610
20611         if (phba->sli_rev < LPFC_SLI_REV4)
20612                 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
20613         else {
20614                 if_type = bf_get(lpfc_sli_intf_if_type,
20615                                  &phba->sli4_hba.sli_intf);
20616                 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
20617                         goto out;
20618                 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
20619         }
20620
20621         if (!data_size)
20622                 goto out;
20623
20624         /* Check the region signature first */
20625         if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
20626                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20627                         "2619 Config region 23 has bad signature\n");
20628                         goto out;
20629         }
20630         offset += 4;
20631
20632         /* Check the data structure version */
20633         if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
20634                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20635                         "2620 Config region 23 has bad version\n");
20636                 goto out;
20637         }
20638         offset += 4;
20639
20640         /* Parse TLV entries in the region */
20641         while (offset < data_size) {
20642                 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
20643                         break;
20644                 /*
20645                  * If the TLV is not driver specific TLV or driver id is
20646                  * not linux driver id, skip the record.
20647                  */
20648                 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
20649                     (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
20650                     (rgn23_data[offset + 3] != 0)) {
20651                         offset += rgn23_data[offset + 1] * 4 + 4;
20652                         continue;
20653                 }
20654
20655                 /* Driver found a driver specific TLV in the config region */
20656                 sub_tlv_len = rgn23_data[offset + 1] * 4;
20657                 offset += 4;
20658                 tlv_offset = 0;
20659
20660                 /*
20661                  * Search for configured port state sub-TLV.
20662                  */
20663                 while ((offset < data_size) &&
20664                         (tlv_offset < sub_tlv_len)) {
20665                         if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
20666                                 offset += 4;
20667                                 tlv_offset += 4;
20668                                 break;
20669                         }
20670                         if (rgn23_data[offset] != PORT_STE_TYPE) {
20671                                 offset += rgn23_data[offset + 1] * 4 + 4;
20672                                 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
20673                                 continue;
20674                         }
20675
20676                         /* This HBA contains PORT_STE configured */
20677                         if (!rgn23_data[offset + 2])
20678                                 phba->hba_flag |= LINK_DISABLED;
20679
20680                         goto out;
20681                 }
20682         }
20683
20684 out:
20685         kfree(rgn23_data);
20686         return;
20687 }
20688
20689 /**
20690  * lpfc_log_fw_write_cmpl - logs firmware write completion status
20691  * @phba: pointer to lpfc hba data structure
20692  * @shdr_status: wr_object rsp's status field
20693  * @shdr_add_status: wr_object rsp's add_status field
20694  * @shdr_add_status_2: wr_object rsp's add_status_2 field
20695  * @shdr_change_status: wr_object rsp's change_status field
20696  * @shdr_csf: wr_object rsp's csf bit
20697  *
20698  * This routine is intended to be called after a firmware write completes.
20699  * It will log next action items to be performed by the user to instantiate
20700  * the newly downloaded firmware or reason for incompatibility.
20701  **/
20702 static void
20703 lpfc_log_fw_write_cmpl(struct lpfc_hba *phba, u32 shdr_status,
20704                        u32 shdr_add_status, u32 shdr_add_status_2,
20705                        u32 shdr_change_status, u32 shdr_csf)
20706 {
20707         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20708                         "4198 %s: flash_id x%02x, asic_rev x%02x, "
20709                         "status x%02x, add_status x%02x, add_status_2 x%02x, "
20710                         "change_status x%02x, csf %01x\n", __func__,
20711                         phba->sli4_hba.flash_id, phba->sli4_hba.asic_rev,
20712                         shdr_status, shdr_add_status, shdr_add_status_2,
20713                         shdr_change_status, shdr_csf);
20714
20715         if (shdr_add_status == LPFC_ADD_STATUS_INCOMPAT_OBJ) {
20716                 switch (shdr_add_status_2) {
20717                 case LPFC_ADD_STATUS_2_INCOMPAT_FLASH:
20718                         lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20719                                         "4199 Firmware write failed: "
20720                                         "image incompatible with flash x%02x\n",
20721                                         phba->sli4_hba.flash_id);
20722                         break;
20723                 case LPFC_ADD_STATUS_2_INCORRECT_ASIC:
20724                         lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20725                                         "4200 Firmware write failed: "
20726                                         "image incompatible with ASIC "
20727                                         "architecture x%02x\n",
20728                                         phba->sli4_hba.asic_rev);
20729                         break;
20730                 default:
20731                         lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20732                                         "4210 Firmware write failed: "
20733                                         "add_status_2 x%02x\n",
20734                                         shdr_add_status_2);
20735                         break;
20736                 }
20737         } else if (!shdr_status && !shdr_add_status) {
20738                 if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
20739                     shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
20740                         if (shdr_csf)
20741                                 shdr_change_status =
20742                                                    LPFC_CHANGE_STATUS_PCI_RESET;
20743                 }
20744
20745                 switch (shdr_change_status) {
20746                 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
20747                         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20748                                         "3198 Firmware write complete: System "
20749                                         "reboot required to instantiate\n");
20750                         break;
20751                 case (LPFC_CHANGE_STATUS_FW_RESET):
20752                         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20753                                         "3199 Firmware write complete: "
20754                                         "Firmware reset required to "
20755                                         "instantiate\n");
20756                         break;
20757                 case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
20758                         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20759                                         "3200 Firmware write complete: Port "
20760                                         "Migration or PCI Reset required to "
20761                                         "instantiate\n");
20762                         break;
20763                 case (LPFC_CHANGE_STATUS_PCI_RESET):
20764                         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20765                                         "3201 Firmware write complete: PCI "
20766                                         "Reset required to instantiate\n");
20767                         break;
20768                 default:
20769                         break;
20770                 }
20771         }
20772 }
20773
20774 /**
20775  * lpfc_wr_object - write an object to the firmware
20776  * @phba: HBA structure that indicates port to create a queue on.
20777  * @dmabuf_list: list of dmabufs to write to the port.
20778  * @size: the total byte value of the objects to write to the port.
20779  * @offset: the current offset to be used to start the transfer.
20780  *
20781  * This routine will create a wr_object mailbox command to send to the port.
20782  * the mailbox command will be constructed using the dma buffers described in
20783  * @dmabuf_list to create a list of BDEs. This routine will fill in as many
20784  * BDEs that the imbedded mailbox can support. The @offset variable will be
20785  * used to indicate the starting offset of the transfer and will also return
20786  * the offset after the write object mailbox has completed. @size is used to
20787  * determine the end of the object and whether the eof bit should be set.
20788  *
20789  * Return 0 is successful and offset will contain the the new offset to use
20790  * for the next write.
20791  * Return negative value for error cases.
20792  **/
20793 int
20794 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
20795                uint32_t size, uint32_t *offset)
20796 {
20797         struct lpfc_mbx_wr_object *wr_object;
20798         LPFC_MBOXQ_t *mbox;
20799         int rc = 0, i = 0;
20800         uint32_t shdr_status, shdr_add_status, shdr_add_status_2;
20801         uint32_t shdr_change_status = 0, shdr_csf = 0;
20802         uint32_t mbox_tmo;
20803         struct lpfc_dmabuf *dmabuf;
20804         uint32_t written = 0;
20805         bool check_change_status = false;
20806
20807         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20808         if (!mbox)
20809                 return -ENOMEM;
20810
20811         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
20812                         LPFC_MBOX_OPCODE_WRITE_OBJECT,
20813                         sizeof(struct lpfc_mbx_wr_object) -
20814                         sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
20815
20816         wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
20817         wr_object->u.request.write_offset = *offset;
20818         sprintf((uint8_t *)wr_object->u.request.object_name, "/");
20819         wr_object->u.request.object_name[0] =
20820                 cpu_to_le32(wr_object->u.request.object_name[0]);
20821         bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
20822         list_for_each_entry(dmabuf, dmabuf_list, list) {
20823                 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
20824                         break;
20825                 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
20826                 wr_object->u.request.bde[i].addrHigh =
20827                         putPaddrHigh(dmabuf->phys);
20828                 if (written + SLI4_PAGE_SIZE >= size) {
20829                         wr_object->u.request.bde[i].tus.f.bdeSize =
20830                                 (size - written);
20831                         written += (size - written);
20832                         bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
20833                         bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
20834                         check_change_status = true;
20835                 } else {
20836                         wr_object->u.request.bde[i].tus.f.bdeSize =
20837                                 SLI4_PAGE_SIZE;
20838                         written += SLI4_PAGE_SIZE;
20839                 }
20840                 i++;
20841         }
20842         wr_object->u.request.bde_count = i;
20843         bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
20844         if (!phba->sli4_hba.intr_enable)
20845                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
20846         else {
20847                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
20848                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
20849         }
20850         /* The IOCTL status is embedded in the mailbox subheader. */
20851         shdr_status = bf_get(lpfc_mbox_hdr_status,
20852                              &wr_object->header.cfg_shdr.response);
20853         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
20854                                  &wr_object->header.cfg_shdr.response);
20855         shdr_add_status_2 = bf_get(lpfc_mbox_hdr_add_status_2,
20856                                    &wr_object->header.cfg_shdr.response);
20857         if (check_change_status) {
20858                 shdr_change_status = bf_get(lpfc_wr_object_change_status,
20859                                             &wr_object->u.response);
20860                 shdr_csf = bf_get(lpfc_wr_object_csf,
20861                                   &wr_object->u.response);
20862         }
20863
20864         if (!phba->sli4_hba.intr_enable)
20865                 mempool_free(mbox, phba->mbox_mem_pool);
20866         else if (rc != MBX_TIMEOUT)
20867                 mempool_free(mbox, phba->mbox_mem_pool);
20868         if (shdr_status || shdr_add_status || shdr_add_status_2 || rc) {
20869                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20870                                 "3025 Write Object mailbox failed with "
20871                                 "status x%x add_status x%x, add_status_2 x%x, "
20872                                 "mbx status x%x\n",
20873                                 shdr_status, shdr_add_status, shdr_add_status_2,
20874                                 rc);
20875                 rc = -ENXIO;
20876                 *offset = shdr_add_status;
20877         } else {
20878                 *offset += wr_object->u.response.actual_write_length;
20879         }
20880
20881         if (rc || check_change_status)
20882                 lpfc_log_fw_write_cmpl(phba, shdr_status, shdr_add_status,
20883                                        shdr_add_status_2, shdr_change_status,
20884                                        shdr_csf);
20885         return rc;
20886 }
20887
20888 /**
20889  * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
20890  * @vport: pointer to vport data structure.
20891  *
20892  * This function iterate through the mailboxq and clean up all REG_LOGIN
20893  * and REG_VPI mailbox commands associated with the vport. This function
20894  * is called when driver want to restart discovery of the vport due to
20895  * a Clear Virtual Link event.
20896  **/
20897 void
20898 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
20899 {
20900         struct lpfc_hba *phba = vport->phba;
20901         LPFC_MBOXQ_t *mb, *nextmb;
20902         struct lpfc_dmabuf *mp;
20903         struct lpfc_nodelist *ndlp;
20904         struct lpfc_nodelist *act_mbx_ndlp = NULL;
20905         LIST_HEAD(mbox_cmd_list);
20906         uint8_t restart_loop;
20907
20908         /* Clean up internally queued mailbox commands with the vport */
20909         spin_lock_irq(&phba->hbalock);
20910         list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
20911                 if (mb->vport != vport)
20912                         continue;
20913
20914                 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
20915                         (mb->u.mb.mbxCommand != MBX_REG_VPI))
20916                         continue;
20917
20918                 list_move_tail(&mb->list, &mbox_cmd_list);
20919         }
20920         /* Clean up active mailbox command with the vport */
20921         mb = phba->sli.mbox_active;
20922         if (mb && (mb->vport == vport)) {
20923                 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
20924                         (mb->u.mb.mbxCommand == MBX_REG_VPI))
20925                         mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
20926                 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20927                         act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20928                         /* Put reference count for delayed processing */
20929                         act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
20930                         /* Unregister the RPI when mailbox complete */
20931                         mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
20932                 }
20933         }
20934         /* Cleanup any mailbox completions which are not yet processed */
20935         do {
20936                 restart_loop = 0;
20937                 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
20938                         /*
20939                          * If this mailox is already processed or it is
20940                          * for another vport ignore it.
20941                          */
20942                         if ((mb->vport != vport) ||
20943                                 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
20944                                 continue;
20945
20946                         if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
20947                                 (mb->u.mb.mbxCommand != MBX_REG_VPI))
20948                                 continue;
20949
20950                         mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
20951                         if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20952                                 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20953                                 /* Unregister the RPI when mailbox complete */
20954                                 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
20955                                 restart_loop = 1;
20956                                 spin_unlock_irq(&phba->hbalock);
20957                                 spin_lock(&ndlp->lock);
20958                                 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20959                                 spin_unlock(&ndlp->lock);
20960                                 spin_lock_irq(&phba->hbalock);
20961                                 break;
20962                         }
20963                 }
20964         } while (restart_loop);
20965
20966         spin_unlock_irq(&phba->hbalock);
20967
20968         /* Release the cleaned-up mailbox commands */
20969         while (!list_empty(&mbox_cmd_list)) {
20970                 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
20971                 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20972                         mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
20973                         if (mp) {
20974                                 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
20975                                 kfree(mp);
20976                         }
20977                         mb->ctx_buf = NULL;
20978                         ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20979                         mb->ctx_ndlp = NULL;
20980                         if (ndlp) {
20981                                 spin_lock(&ndlp->lock);
20982                                 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20983                                 spin_unlock(&ndlp->lock);
20984                                 lpfc_nlp_put(ndlp);
20985                         }
20986                 }
20987                 mempool_free(mb, phba->mbox_mem_pool);
20988         }
20989
20990         /* Release the ndlp with the cleaned-up active mailbox command */
20991         if (act_mbx_ndlp) {
20992                 spin_lock(&act_mbx_ndlp->lock);
20993                 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20994                 spin_unlock(&act_mbx_ndlp->lock);
20995                 lpfc_nlp_put(act_mbx_ndlp);
20996         }
20997 }
20998
20999 /**
21000  * lpfc_drain_txq - Drain the txq
21001  * @phba: Pointer to HBA context object.
21002  *
21003  * This function attempt to submit IOCBs on the txq
21004  * to the adapter.  For SLI4 adapters, the txq contains
21005  * ELS IOCBs that have been deferred because the there
21006  * are no SGLs.  This congestion can occur with large
21007  * vport counts during node discovery.
21008  **/
21009
21010 uint32_t
21011 lpfc_drain_txq(struct lpfc_hba *phba)
21012 {
21013         LIST_HEAD(completions);
21014         struct lpfc_sli_ring *pring;
21015         struct lpfc_iocbq *piocbq = NULL;
21016         unsigned long iflags = 0;
21017         char *fail_msg = NULL;
21018         struct lpfc_sglq *sglq;
21019         union lpfc_wqe128 wqe;
21020         uint32_t txq_cnt = 0;
21021         struct lpfc_queue *wq;
21022
21023         if (phba->link_flag & LS_MDS_LOOPBACK) {
21024                 /* MDS WQE are posted only to first WQ*/
21025                 wq = phba->sli4_hba.hdwq[0].io_wq;
21026                 if (unlikely(!wq))
21027                         return 0;
21028                 pring = wq->pring;
21029         } else {
21030                 wq = phba->sli4_hba.els_wq;
21031                 if (unlikely(!wq))
21032                         return 0;
21033                 pring = lpfc_phba_elsring(phba);
21034         }
21035
21036         if (unlikely(!pring) || list_empty(&pring->txq))
21037                 return 0;
21038
21039         spin_lock_irqsave(&pring->ring_lock, iflags);
21040         list_for_each_entry(piocbq, &pring->txq, list) {
21041                 txq_cnt++;
21042         }
21043
21044         if (txq_cnt > pring->txq_max)
21045                 pring->txq_max = txq_cnt;
21046
21047         spin_unlock_irqrestore(&pring->ring_lock, iflags);
21048
21049         while (!list_empty(&pring->txq)) {
21050                 spin_lock_irqsave(&pring->ring_lock, iflags);
21051
21052                 piocbq = lpfc_sli_ringtx_get(phba, pring);
21053                 if (!piocbq) {
21054                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
21055                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
21056                                 "2823 txq empty and txq_cnt is %d\n ",
21057                                 txq_cnt);
21058                         break;
21059                 }
21060                 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
21061                 if (!sglq) {
21062                         __lpfc_sli_ringtx_put(phba, pring, piocbq);
21063                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
21064                         break;
21065                 }
21066                 txq_cnt--;
21067
21068                 /* The xri and iocb resources secured,
21069                  * attempt to issue request
21070                  */
21071                 piocbq->sli4_lxritag = sglq->sli4_lxritag;
21072                 piocbq->sli4_xritag = sglq->sli4_xritag;
21073                 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
21074                         fail_msg = "to convert bpl to sgl";
21075                 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
21076                         fail_msg = "to convert iocb to wqe";
21077                 else if (lpfc_sli4_wq_put(wq, &wqe))
21078                         fail_msg = " - Wq is full";
21079                 else
21080                         lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
21081
21082                 if (fail_msg) {
21083                         /* Failed means we can't issue and need to cancel */
21084                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
21085                                         "2822 IOCB failed %s iotag 0x%x "
21086                                         "xri 0x%x\n",
21087                                         fail_msg,
21088                                         piocbq->iotag, piocbq->sli4_xritag);
21089                         list_add_tail(&piocbq->list, &completions);
21090                         fail_msg = NULL;
21091                 }
21092                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21093         }
21094
21095         /* Cancel all the IOCBs that cannot be issued */
21096         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
21097                                 IOERR_SLI_ABORTED);
21098
21099         return txq_cnt;
21100 }
21101
21102 /**
21103  * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
21104  * @phba: Pointer to HBA context object.
21105  * @pwqeq: Pointer to command WQE.
21106  * @sglq: Pointer to the scatter gather queue object.
21107  *
21108  * This routine converts the bpl or bde that is in the WQE
21109  * to a sgl list for the sli4 hardware. The physical address
21110  * of the bpl/bde is converted back to a virtual address.
21111  * If the WQE contains a BPL then the list of BDE's is
21112  * converted to sli4_sge's. If the WQE contains a single
21113  * BDE then it is converted to a single sli_sge.
21114  * The WQE is still in cpu endianness so the contents of
21115  * the bpl can be used without byte swapping.
21116  *
21117  * Returns valid XRI = Success, NO_XRI = Failure.
21118  */
21119 static uint16_t
21120 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
21121                  struct lpfc_sglq *sglq)
21122 {
21123         uint16_t xritag = NO_XRI;
21124         struct ulp_bde64 *bpl = NULL;
21125         struct ulp_bde64 bde;
21126         struct sli4_sge *sgl  = NULL;
21127         struct lpfc_dmabuf *dmabuf;
21128         union lpfc_wqe128 *wqe;
21129         int numBdes = 0;
21130         int i = 0;
21131         uint32_t offset = 0; /* accumulated offset in the sg request list */
21132         int inbound = 0; /* number of sg reply entries inbound from firmware */
21133         uint32_t cmd;
21134
21135         if (!pwqeq || !sglq)
21136                 return xritag;
21137
21138         sgl  = (struct sli4_sge *)sglq->sgl;
21139         wqe = &pwqeq->wqe;
21140         pwqeq->iocb.ulpIoTag = pwqeq->iotag;
21141
21142         cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
21143         if (cmd == CMD_XMIT_BLS_RSP64_WQE)
21144                 return sglq->sli4_xritag;
21145         numBdes = pwqeq->num_bdes;
21146         if (numBdes) {
21147                 /* The addrHigh and addrLow fields within the WQE
21148                  * have not been byteswapped yet so there is no
21149                  * need to swap them back.
21150                  */
21151                 if (pwqeq->context3)
21152                         dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
21153                 else
21154                         return xritag;
21155
21156                 bpl  = (struct ulp_bde64 *)dmabuf->virt;
21157                 if (!bpl)
21158                         return xritag;
21159
21160                 for (i = 0; i < numBdes; i++) {
21161                         /* Should already be byte swapped. */
21162                         sgl->addr_hi = bpl->addrHigh;
21163                         sgl->addr_lo = bpl->addrLow;
21164
21165                         sgl->word2 = le32_to_cpu(sgl->word2);
21166                         if ((i+1) == numBdes)
21167                                 bf_set(lpfc_sli4_sge_last, sgl, 1);
21168                         else
21169                                 bf_set(lpfc_sli4_sge_last, sgl, 0);
21170                         /* swap the size field back to the cpu so we
21171                          * can assign it to the sgl.
21172                          */
21173                         bde.tus.w = le32_to_cpu(bpl->tus.w);
21174                         sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
21175                         /* The offsets in the sgl need to be accumulated
21176                          * separately for the request and reply lists.
21177                          * The request is always first, the reply follows.
21178                          */
21179                         switch (cmd) {
21180                         case CMD_GEN_REQUEST64_WQE:
21181                                 /* add up the reply sg entries */
21182                                 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
21183                                         inbound++;
21184                                 /* first inbound? reset the offset */
21185                                 if (inbound == 1)
21186                                         offset = 0;
21187                                 bf_set(lpfc_sli4_sge_offset, sgl, offset);
21188                                 bf_set(lpfc_sli4_sge_type, sgl,
21189                                         LPFC_SGE_TYPE_DATA);
21190                                 offset += bde.tus.f.bdeSize;
21191                                 break;
21192                         case CMD_FCP_TRSP64_WQE:
21193                                 bf_set(lpfc_sli4_sge_offset, sgl, 0);
21194                                 bf_set(lpfc_sli4_sge_type, sgl,
21195                                         LPFC_SGE_TYPE_DATA);
21196                                 break;
21197                         case CMD_FCP_TSEND64_WQE:
21198                         case CMD_FCP_TRECEIVE64_WQE:
21199                                 bf_set(lpfc_sli4_sge_type, sgl,
21200                                         bpl->tus.f.bdeFlags);
21201                                 if (i < 3)
21202                                         offset = 0;
21203                                 else
21204                                         offset += bde.tus.f.bdeSize;
21205                                 bf_set(lpfc_sli4_sge_offset, sgl, offset);
21206                                 break;
21207                         }
21208                         sgl->word2 = cpu_to_le32(sgl->word2);
21209                         bpl++;
21210                         sgl++;
21211                 }
21212         } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
21213                 /* The addrHigh and addrLow fields of the BDE have not
21214                  * been byteswapped yet so they need to be swapped
21215                  * before putting them in the sgl.
21216                  */
21217                 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
21218                 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
21219                 sgl->word2 = le32_to_cpu(sgl->word2);
21220                 bf_set(lpfc_sli4_sge_last, sgl, 1);
21221                 sgl->word2 = cpu_to_le32(sgl->word2);
21222                 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
21223         }
21224         return sglq->sli4_xritag;
21225 }
21226
21227 /**
21228  * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
21229  * @phba: Pointer to HBA context object.
21230  * @qp: Pointer to HDW queue.
21231  * @pwqe: Pointer to command WQE.
21232  **/
21233 int
21234 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
21235                     struct lpfc_iocbq *pwqe)
21236 {
21237         union lpfc_wqe128 *wqe = &pwqe->wqe;
21238         struct lpfc_async_xchg_ctx *ctxp;
21239         struct lpfc_queue *wq;
21240         struct lpfc_sglq *sglq;
21241         struct lpfc_sli_ring *pring;
21242         unsigned long iflags;
21243         uint32_t ret = 0;
21244
21245         /* NVME_LS and NVME_LS ABTS requests. */
21246         if (pwqe->cmd_flag & LPFC_IO_NVME_LS) {
21247                 pring =  phba->sli4_hba.nvmels_wq->pring;
21248                 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21249                                           qp, wq_access);
21250                 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
21251                 if (!sglq) {
21252                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
21253                         return WQE_BUSY;
21254                 }
21255                 pwqe->sli4_lxritag = sglq->sli4_lxritag;
21256                 pwqe->sli4_xritag = sglq->sli4_xritag;
21257                 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
21258                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
21259                         return WQE_ERROR;
21260                 }
21261                 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
21262                        pwqe->sli4_xritag);
21263                 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
21264                 if (ret) {
21265                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
21266                         return ret;
21267                 }
21268
21269                 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21270                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21271
21272                 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
21273                 return 0;
21274         }
21275
21276         /* NVME_FCREQ and NVME_ABTS requests */
21277         if (pwqe->cmd_flag & (LPFC_IO_NVME | LPFC_IO_FCP | LPFC_IO_CMF)) {
21278                 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
21279                 wq = qp->io_wq;
21280                 pring = wq->pring;
21281
21282                 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
21283
21284                 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21285                                           qp, wq_access);
21286                 ret = lpfc_sli4_wq_put(wq, wqe);
21287                 if (ret) {
21288                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
21289                         return ret;
21290                 }
21291                 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21292                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21293
21294                 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
21295                 return 0;
21296         }
21297
21298         /* NVMET requests */
21299         if (pwqe->cmd_flag & LPFC_IO_NVMET) {
21300                 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
21301                 wq = qp->io_wq;
21302                 pring = wq->pring;
21303
21304                 ctxp = pwqe->context2;
21305                 sglq = ctxp->ctxbuf->sglq;
21306                 if (pwqe->sli4_xritag ==  NO_XRI) {
21307                         pwqe->sli4_lxritag = sglq->sli4_lxritag;
21308                         pwqe->sli4_xritag = sglq->sli4_xritag;
21309                 }
21310                 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
21311                        pwqe->sli4_xritag);
21312                 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
21313
21314                 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21315                                           qp, wq_access);
21316                 ret = lpfc_sli4_wq_put(wq, wqe);
21317                 if (ret) {
21318                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
21319                         return ret;
21320                 }
21321                 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21322                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21323
21324                 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
21325                 return 0;
21326         }
21327         return WQE_ERROR;
21328 }
21329
21330 /**
21331  * lpfc_sli4_issue_abort_iotag - SLI-4 WQE init & issue for the Abort
21332  * @phba: Pointer to HBA context object.
21333  * @cmdiocb: Pointer to driver command iocb object.
21334  * @cmpl: completion function.
21335  *
21336  * Fill the appropriate fields for the abort WQE and call
21337  * internal routine lpfc_sli4_issue_wqe to send the WQE
21338  * This function is called with hbalock held and no ring_lock held.
21339  *
21340  * RETURNS 0 - SUCCESS
21341  **/
21342
21343 int
21344 lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
21345                             void *cmpl)
21346 {
21347         struct lpfc_vport *vport = cmdiocb->vport;
21348         struct lpfc_iocbq *abtsiocb = NULL;
21349         union lpfc_wqe128 *abtswqe;
21350         struct lpfc_io_buf *lpfc_cmd;
21351         int retval = IOCB_ERROR;
21352         u16 xritag = cmdiocb->sli4_xritag;
21353
21354         /*
21355          * The scsi command can not be in txq and it is in flight because the
21356          * pCmd is still pointing at the SCSI command we have to abort. There
21357          * is no need to search the txcmplq. Just send an abort to the FW.
21358          */
21359
21360         abtsiocb = __lpfc_sli_get_iocbq(phba);
21361         if (!abtsiocb)
21362                 return WQE_NORESOURCE;
21363
21364         /* Indicate the IO is being aborted by the driver. */
21365         cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
21366
21367         abtswqe = &abtsiocb->wqe;
21368         memset(abtswqe, 0, sizeof(*abtswqe));
21369
21370         if (!lpfc_is_link_up(phba))
21371                 bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1);
21372         bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG);
21373         abtswqe->abort_cmd.rsrvd5 = 0;
21374         abtswqe->abort_cmd.wqe_com.abort_tag = xritag;
21375         bf_set(wqe_reqtag, &abtswqe->abort_cmd.wqe_com, abtsiocb->iotag);
21376         bf_set(wqe_cmnd, &abtswqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
21377         bf_set(wqe_xri_tag, &abtswqe->generic.wqe_com, 0);
21378         bf_set(wqe_qosd, &abtswqe->abort_cmd.wqe_com, 1);
21379         bf_set(wqe_lenloc, &abtswqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
21380         bf_set(wqe_cmd_type, &abtswqe->abort_cmd.wqe_com, OTHER_COMMAND);
21381
21382         /* ABTS WQE must go to the same WQ as the WQE to be aborted */
21383         abtsiocb->hba_wqidx = cmdiocb->hba_wqidx;
21384         abtsiocb->cmd_flag |= LPFC_USE_FCPWQIDX;
21385         if (cmdiocb->cmd_flag & LPFC_IO_FCP)
21386                 abtsiocb->cmd_flag |= LPFC_IO_FCP;
21387         if (cmdiocb->cmd_flag & LPFC_IO_NVME)
21388                 abtsiocb->cmd_flag |= LPFC_IO_NVME;
21389         if (cmdiocb->cmd_flag & LPFC_IO_FOF)
21390                 abtsiocb->cmd_flag |= LPFC_IO_FOF;
21391         abtsiocb->vport = vport;
21392         abtsiocb->cmd_cmpl = cmpl;
21393
21394         lpfc_cmd = container_of(cmdiocb, struct lpfc_io_buf, cur_iocbq);
21395         retval = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, abtsiocb);
21396
21397         lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21398                          "0359 Abort xri x%x, original iotag x%x, "
21399                          "abort cmd iotag x%x retval x%x\n",
21400                          xritag, cmdiocb->iotag, abtsiocb->iotag, retval);
21401
21402         if (retval) {
21403                 cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
21404                 __lpfc_sli_release_iocbq(phba, abtsiocb);
21405         }
21406
21407         return retval;
21408 }
21409
21410 #ifdef LPFC_MXP_STAT
21411 /**
21412  * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
21413  * @phba: pointer to lpfc hba data structure.
21414  * @hwqid: belong to which HWQ.
21415  *
21416  * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
21417  * 15 seconds after a test case is running.
21418  *
21419  * The user should call lpfc_debugfs_multixripools_write before running a test
21420  * case to clear stat_snapshot_taken. Then the user starts a test case. During
21421  * test case is running, stat_snapshot_taken is incremented by 1 every time when
21422  * this routine is called from heartbeat timer. When stat_snapshot_taken is
21423  * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
21424  **/
21425 void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
21426 {
21427         struct lpfc_sli4_hdw_queue *qp;
21428         struct lpfc_multixri_pool *multixri_pool;
21429         struct lpfc_pvt_pool *pvt_pool;
21430         struct lpfc_pbl_pool *pbl_pool;
21431         u32 txcmplq_cnt;
21432
21433         qp = &phba->sli4_hba.hdwq[hwqid];
21434         multixri_pool = qp->p_multixri_pool;
21435         if (!multixri_pool)
21436                 return;
21437
21438         if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
21439                 pvt_pool = &qp->p_multixri_pool->pvt_pool;
21440                 pbl_pool = &qp->p_multixri_pool->pbl_pool;
21441                 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21442
21443                 multixri_pool->stat_pbl_count = pbl_pool->count;
21444                 multixri_pool->stat_pvt_count = pvt_pool->count;
21445                 multixri_pool->stat_busy_count = txcmplq_cnt;
21446         }
21447
21448         multixri_pool->stat_snapshot_taken++;
21449 }
21450 #endif
21451
21452 /**
21453  * lpfc_adjust_pvt_pool_count - Adjust private pool count
21454  * @phba: pointer to lpfc hba data structure.
21455  * @hwqid: belong to which HWQ.
21456  *
21457  * This routine moves some XRIs from private to public pool when private pool
21458  * is not busy.
21459  **/
21460 void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
21461 {
21462         struct lpfc_multixri_pool *multixri_pool;
21463         u32 io_req_count;
21464         u32 prev_io_req_count;
21465
21466         multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
21467         if (!multixri_pool)
21468                 return;
21469         io_req_count = multixri_pool->io_req_count;
21470         prev_io_req_count = multixri_pool->prev_io_req_count;
21471
21472         if (prev_io_req_count != io_req_count) {
21473                 /* Private pool is busy */
21474                 multixri_pool->prev_io_req_count = io_req_count;
21475         } else {
21476                 /* Private pool is not busy.
21477                  * Move XRIs from private to public pool.
21478                  */
21479                 lpfc_move_xri_pvt_to_pbl(phba, hwqid);
21480         }
21481 }
21482
21483 /**
21484  * lpfc_adjust_high_watermark - Adjust high watermark
21485  * @phba: pointer to lpfc hba data structure.
21486  * @hwqid: belong to which HWQ.
21487  *
21488  * This routine sets high watermark as number of outstanding XRIs,
21489  * but make sure the new value is between xri_limit/2 and xri_limit.
21490  **/
21491 void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
21492 {
21493         u32 new_watermark;
21494         u32 watermark_max;
21495         u32 watermark_min;
21496         u32 xri_limit;
21497         u32 txcmplq_cnt;
21498         u32 abts_io_bufs;
21499         struct lpfc_multixri_pool *multixri_pool;
21500         struct lpfc_sli4_hdw_queue *qp;
21501
21502         qp = &phba->sli4_hba.hdwq[hwqid];
21503         multixri_pool = qp->p_multixri_pool;
21504         if (!multixri_pool)
21505                 return;
21506         xri_limit = multixri_pool->xri_limit;
21507
21508         watermark_max = xri_limit;
21509         watermark_min = xri_limit / 2;
21510
21511         txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21512         abts_io_bufs = qp->abts_scsi_io_bufs;
21513         abts_io_bufs += qp->abts_nvme_io_bufs;
21514
21515         new_watermark = txcmplq_cnt + abts_io_bufs;
21516         new_watermark = min(watermark_max, new_watermark);
21517         new_watermark = max(watermark_min, new_watermark);
21518         multixri_pool->pvt_pool.high_watermark = new_watermark;
21519
21520 #ifdef LPFC_MXP_STAT
21521         multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
21522                                           new_watermark);
21523 #endif
21524 }
21525
21526 /**
21527  * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
21528  * @phba: pointer to lpfc hba data structure.
21529  * @hwqid: belong to which HWQ.
21530  *
21531  * This routine is called from hearbeat timer when pvt_pool is idle.
21532  * All free XRIs are moved from private to public pool on hwqid with 2 steps.
21533  * The first step moves (all - low_watermark) amount of XRIs.
21534  * The second step moves the rest of XRIs.
21535  **/
21536 void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
21537 {
21538         struct lpfc_pbl_pool *pbl_pool;
21539         struct lpfc_pvt_pool *pvt_pool;
21540         struct lpfc_sli4_hdw_queue *qp;
21541         struct lpfc_io_buf *lpfc_ncmd;
21542         struct lpfc_io_buf *lpfc_ncmd_next;
21543         unsigned long iflag;
21544         struct list_head tmp_list;
21545         u32 tmp_count;
21546
21547         qp = &phba->sli4_hba.hdwq[hwqid];
21548         pbl_pool = &qp->p_multixri_pool->pbl_pool;
21549         pvt_pool = &qp->p_multixri_pool->pvt_pool;
21550         tmp_count = 0;
21551
21552         lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
21553         lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
21554
21555         if (pvt_pool->count > pvt_pool->low_watermark) {
21556                 /* Step 1: move (all - low_watermark) from pvt_pool
21557                  * to pbl_pool
21558                  */
21559
21560                 /* Move low watermark of bufs from pvt_pool to tmp_list */
21561                 INIT_LIST_HEAD(&tmp_list);
21562                 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21563                                          &pvt_pool->list, list) {
21564                         list_move_tail(&lpfc_ncmd->list, &tmp_list);
21565                         tmp_count++;
21566                         if (tmp_count >= pvt_pool->low_watermark)
21567                                 break;
21568                 }
21569
21570                 /* Move all bufs from pvt_pool to pbl_pool */
21571                 list_splice_init(&pvt_pool->list, &pbl_pool->list);
21572
21573                 /* Move all bufs from tmp_list to pvt_pool */
21574                 list_splice(&tmp_list, &pvt_pool->list);
21575
21576                 pbl_pool->count += (pvt_pool->count - tmp_count);
21577                 pvt_pool->count = tmp_count;
21578         } else {
21579                 /* Step 2: move the rest from pvt_pool to pbl_pool */
21580                 list_splice_init(&pvt_pool->list, &pbl_pool->list);
21581                 pbl_pool->count += pvt_pool->count;
21582                 pvt_pool->count = 0;
21583         }
21584
21585         spin_unlock(&pvt_pool->lock);
21586         spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21587 }
21588
21589 /**
21590  * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
21591  * @phba: pointer to lpfc hba data structure
21592  * @qp: pointer to HDW queue
21593  * @pbl_pool: specified public free XRI pool
21594  * @pvt_pool: specified private free XRI pool
21595  * @count: number of XRIs to move
21596  *
21597  * This routine tries to move some free common bufs from the specified pbl_pool
21598  * to the specified pvt_pool. It might move less than count XRIs if there's not
21599  * enough in public pool.
21600  *
21601  * Return:
21602  *   true - if XRIs are successfully moved from the specified pbl_pool to the
21603  *          specified pvt_pool
21604  *   false - if the specified pbl_pool is empty or locked by someone else
21605  **/
21606 static bool
21607 _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
21608                           struct lpfc_pbl_pool *pbl_pool,
21609                           struct lpfc_pvt_pool *pvt_pool, u32 count)
21610 {
21611         struct lpfc_io_buf *lpfc_ncmd;
21612         struct lpfc_io_buf *lpfc_ncmd_next;
21613         unsigned long iflag;
21614         int ret;
21615
21616         ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
21617         if (ret) {
21618                 if (pbl_pool->count) {
21619                         /* Move a batch of XRIs from public to private pool */
21620                         lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
21621                         list_for_each_entry_safe(lpfc_ncmd,
21622                                                  lpfc_ncmd_next,
21623                                                  &pbl_pool->list,
21624                                                  list) {
21625                                 list_move_tail(&lpfc_ncmd->list,
21626                                                &pvt_pool->list);
21627                                 pvt_pool->count++;
21628                                 pbl_pool->count--;
21629                                 count--;
21630                                 if (count == 0)
21631                                         break;
21632                         }
21633
21634                         spin_unlock(&pvt_pool->lock);
21635                         spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21636                         return true;
21637                 }
21638                 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21639         }
21640
21641         return false;
21642 }
21643
21644 /**
21645  * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
21646  * @phba: pointer to lpfc hba data structure.
21647  * @hwqid: belong to which HWQ.
21648  * @count: number of XRIs to move
21649  *
21650  * This routine tries to find some free common bufs in one of public pools with
21651  * Round Robin method. The search always starts from local hwqid, then the next
21652  * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
21653  * a batch of free common bufs are moved to private pool on hwqid.
21654  * It might move less than count XRIs if there's not enough in public pool.
21655  **/
21656 void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
21657 {
21658         struct lpfc_multixri_pool *multixri_pool;
21659         struct lpfc_multixri_pool *next_multixri_pool;
21660         struct lpfc_pvt_pool *pvt_pool;
21661         struct lpfc_pbl_pool *pbl_pool;
21662         struct lpfc_sli4_hdw_queue *qp;
21663         u32 next_hwqid;
21664         u32 hwq_count;
21665         int ret;
21666
21667         qp = &phba->sli4_hba.hdwq[hwqid];
21668         multixri_pool = qp->p_multixri_pool;
21669         pvt_pool = &multixri_pool->pvt_pool;
21670         pbl_pool = &multixri_pool->pbl_pool;
21671
21672         /* Check if local pbl_pool is available */
21673         ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
21674         if (ret) {
21675 #ifdef LPFC_MXP_STAT
21676                 multixri_pool->local_pbl_hit_count++;
21677 #endif
21678                 return;
21679         }
21680
21681         hwq_count = phba->cfg_hdw_queue;
21682
21683         /* Get the next hwqid which was found last time */
21684         next_hwqid = multixri_pool->rrb_next_hwqid;
21685
21686         do {
21687                 /* Go to next hwq */
21688                 next_hwqid = (next_hwqid + 1) % hwq_count;
21689
21690                 next_multixri_pool =
21691                         phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
21692                 pbl_pool = &next_multixri_pool->pbl_pool;
21693
21694                 /* Check if the public free xri pool is available */
21695                 ret = _lpfc_move_xri_pbl_to_pvt(
21696                         phba, qp, pbl_pool, pvt_pool, count);
21697
21698                 /* Exit while-loop if success or all hwqid are checked */
21699         } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
21700
21701         /* Starting point for the next time */
21702         multixri_pool->rrb_next_hwqid = next_hwqid;
21703
21704         if (!ret) {
21705                 /* stats: all public pools are empty*/
21706                 multixri_pool->pbl_empty_count++;
21707         }
21708
21709 #ifdef LPFC_MXP_STAT
21710         if (ret) {
21711                 if (next_hwqid == hwqid)
21712                         multixri_pool->local_pbl_hit_count++;
21713                 else
21714                         multixri_pool->other_pbl_hit_count++;
21715         }
21716 #endif
21717 }
21718
21719 /**
21720  * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
21721  * @phba: pointer to lpfc hba data structure.
21722  * @hwqid: belong to which HWQ.
21723  *
21724  * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
21725  * low watermark.
21726  **/
21727 void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
21728 {
21729         struct lpfc_multixri_pool *multixri_pool;
21730         struct lpfc_pvt_pool *pvt_pool;
21731
21732         multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
21733         pvt_pool = &multixri_pool->pvt_pool;
21734
21735         if (pvt_pool->count < pvt_pool->low_watermark)
21736                 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
21737 }
21738
21739 /**
21740  * lpfc_release_io_buf - Return one IO buf back to free pool
21741  * @phba: pointer to lpfc hba data structure.
21742  * @lpfc_ncmd: IO buf to be returned.
21743  * @qp: belong to which HWQ.
21744  *
21745  * This routine returns one IO buf back to free pool. If this is an urgent IO,
21746  * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
21747  * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
21748  * xri_limit.  If cfg_xri_rebalancing==0, the IO buf is returned to
21749  * lpfc_io_buf_list_put.
21750  **/
21751 void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
21752                          struct lpfc_sli4_hdw_queue *qp)
21753 {
21754         unsigned long iflag;
21755         struct lpfc_pbl_pool *pbl_pool;
21756         struct lpfc_pvt_pool *pvt_pool;
21757         struct lpfc_epd_pool *epd_pool;
21758         u32 txcmplq_cnt;
21759         u32 xri_owned;
21760         u32 xri_limit;
21761         u32 abts_io_bufs;
21762
21763         /* MUST zero fields if buffer is reused by another protocol */
21764         lpfc_ncmd->nvmeCmd = NULL;
21765         lpfc_ncmd->cur_iocbq.cmd_cmpl = NULL;
21766
21767         if (phba->cfg_xpsgl && !phba->nvmet_support &&
21768             !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
21769                 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
21770
21771         if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
21772                 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
21773
21774         if (phba->cfg_xri_rebalancing) {
21775                 if (lpfc_ncmd->expedite) {
21776                         /* Return to expedite pool */
21777                         epd_pool = &phba->epd_pool;
21778                         spin_lock_irqsave(&epd_pool->lock, iflag);
21779                         list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
21780                         epd_pool->count++;
21781                         spin_unlock_irqrestore(&epd_pool->lock, iflag);
21782                         return;
21783                 }
21784
21785                 /* Avoid invalid access if an IO sneaks in and is being rejected
21786                  * just _after_ xri pools are destroyed in lpfc_offline.
21787                  * Nothing much can be done at this point.
21788                  */
21789                 if (!qp->p_multixri_pool)
21790                         return;
21791
21792                 pbl_pool = &qp->p_multixri_pool->pbl_pool;
21793                 pvt_pool = &qp->p_multixri_pool->pvt_pool;
21794
21795                 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21796                 abts_io_bufs = qp->abts_scsi_io_bufs;
21797                 abts_io_bufs += qp->abts_nvme_io_bufs;
21798
21799                 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
21800                 xri_limit = qp->p_multixri_pool->xri_limit;
21801
21802 #ifdef LPFC_MXP_STAT
21803                 if (xri_owned <= xri_limit)
21804                         qp->p_multixri_pool->below_limit_count++;
21805                 else
21806                         qp->p_multixri_pool->above_limit_count++;
21807 #endif
21808
21809                 /* XRI goes to either public or private free xri pool
21810                  *     based on watermark and xri_limit
21811                  */
21812                 if ((pvt_pool->count < pvt_pool->low_watermark) ||
21813                     (xri_owned < xri_limit &&
21814                      pvt_pool->count < pvt_pool->high_watermark)) {
21815                         lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
21816                                                   qp, free_pvt_pool);
21817                         list_add_tail(&lpfc_ncmd->list,
21818                                       &pvt_pool->list);
21819                         pvt_pool->count++;
21820                         spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21821                 } else {
21822                         lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
21823                                                   qp, free_pub_pool);
21824                         list_add_tail(&lpfc_ncmd->list,
21825                                       &pbl_pool->list);
21826                         pbl_pool->count++;
21827                         spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21828                 }
21829         } else {
21830                 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
21831                                           qp, free_xri);
21832                 list_add_tail(&lpfc_ncmd->list,
21833                               &qp->lpfc_io_buf_list_put);
21834                 qp->put_io_bufs++;
21835                 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
21836                                        iflag);
21837         }
21838 }
21839
21840 /**
21841  * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
21842  * @phba: pointer to lpfc hba data structure.
21843  * @qp: pointer to HDW queue
21844  * @pvt_pool: pointer to private pool data structure.
21845  * @ndlp: pointer to lpfc nodelist data structure.
21846  *
21847  * This routine tries to get one free IO buf from private pool.
21848  *
21849  * Return:
21850  *   pointer to one free IO buf - if private pool is not empty
21851  *   NULL - if private pool is empty
21852  **/
21853 static struct lpfc_io_buf *
21854 lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
21855                                   struct lpfc_sli4_hdw_queue *qp,
21856                                   struct lpfc_pvt_pool *pvt_pool,
21857                                   struct lpfc_nodelist *ndlp)
21858 {
21859         struct lpfc_io_buf *lpfc_ncmd;
21860         struct lpfc_io_buf *lpfc_ncmd_next;
21861         unsigned long iflag;
21862
21863         lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
21864         list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21865                                  &pvt_pool->list, list) {
21866                 if (lpfc_test_rrq_active(
21867                         phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
21868                         continue;
21869                 list_del(&lpfc_ncmd->list);
21870                 pvt_pool->count--;
21871                 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21872                 return lpfc_ncmd;
21873         }
21874         spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21875
21876         return NULL;
21877 }
21878
21879 /**
21880  * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
21881  * @phba: pointer to lpfc hba data structure.
21882  *
21883  * This routine tries to get one free IO buf from expedite pool.
21884  *
21885  * Return:
21886  *   pointer to one free IO buf - if expedite pool is not empty
21887  *   NULL - if expedite pool is empty
21888  **/
21889 static struct lpfc_io_buf *
21890 lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
21891 {
21892         struct lpfc_io_buf *lpfc_ncmd;
21893         struct lpfc_io_buf *lpfc_ncmd_next;
21894         unsigned long iflag;
21895         struct lpfc_epd_pool *epd_pool;
21896
21897         epd_pool = &phba->epd_pool;
21898         lpfc_ncmd = NULL;
21899
21900         spin_lock_irqsave(&epd_pool->lock, iflag);
21901         if (epd_pool->count > 0) {
21902                 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21903                                          &epd_pool->list, list) {
21904                         list_del(&lpfc_ncmd->list);
21905                         epd_pool->count--;
21906                         break;
21907                 }
21908         }
21909         spin_unlock_irqrestore(&epd_pool->lock, iflag);
21910
21911         return lpfc_ncmd;
21912 }
21913
21914 /**
21915  * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
21916  * @phba: pointer to lpfc hba data structure.
21917  * @ndlp: pointer to lpfc nodelist data structure.
21918  * @hwqid: belong to which HWQ
21919  * @expedite: 1 means this request is urgent.
21920  *
21921  * This routine will do the following actions and then return a pointer to
21922  * one free IO buf.
21923  *
21924  * 1. If private free xri count is empty, move some XRIs from public to
21925  *    private pool.
21926  * 2. Get one XRI from private free xri pool.
21927  * 3. If we fail to get one from pvt_pool and this is an expedite request,
21928  *    get one free xri from expedite pool.
21929  *
21930  * Note: ndlp is only used on SCSI side for RRQ testing.
21931  *       The caller should pass NULL for ndlp on NVME side.
21932  *
21933  * Return:
21934  *   pointer to one free IO buf - if private pool is not empty
21935  *   NULL - if private pool is empty
21936  **/
21937 static struct lpfc_io_buf *
21938 lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
21939                                     struct lpfc_nodelist *ndlp,
21940                                     int hwqid, int expedite)
21941 {
21942         struct lpfc_sli4_hdw_queue *qp;
21943         struct lpfc_multixri_pool *multixri_pool;
21944         struct lpfc_pvt_pool *pvt_pool;
21945         struct lpfc_io_buf *lpfc_ncmd;
21946
21947         qp = &phba->sli4_hba.hdwq[hwqid];
21948         lpfc_ncmd = NULL;
21949         if (!qp) {
21950                 lpfc_printf_log(phba, KERN_INFO,
21951                                 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21952                                 "5556 NULL qp for hwqid  x%x\n", hwqid);
21953                 return lpfc_ncmd;
21954         }
21955         multixri_pool = qp->p_multixri_pool;
21956         if (!multixri_pool) {
21957                 lpfc_printf_log(phba, KERN_INFO,
21958                                 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21959                                 "5557 NULL multixri for hwqid  x%x\n", hwqid);
21960                 return lpfc_ncmd;
21961         }
21962         pvt_pool = &multixri_pool->pvt_pool;
21963         if (!pvt_pool) {
21964                 lpfc_printf_log(phba, KERN_INFO,
21965                                 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21966                                 "5558 NULL pvt_pool for hwqid  x%x\n", hwqid);
21967                 return lpfc_ncmd;
21968         }
21969         multixri_pool->io_req_count++;
21970
21971         /* If pvt_pool is empty, move some XRIs from public to private pool */
21972         if (pvt_pool->count == 0)
21973                 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
21974
21975         /* Get one XRI from private free xri pool */
21976         lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
21977
21978         if (lpfc_ncmd) {
21979                 lpfc_ncmd->hdwq = qp;
21980                 lpfc_ncmd->hdwq_no = hwqid;
21981         } else if (expedite) {
21982                 /* If we fail to get one from pvt_pool and this is an expedite
21983                  * request, get one free xri from expedite pool.
21984                  */
21985                 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
21986         }
21987
21988         return lpfc_ncmd;
21989 }
21990
21991 static inline struct lpfc_io_buf *
21992 lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
21993 {
21994         struct lpfc_sli4_hdw_queue *qp;
21995         struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
21996
21997         qp = &phba->sli4_hba.hdwq[idx];
21998         list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
21999                                  &qp->lpfc_io_buf_list_get, list) {
22000                 if (lpfc_test_rrq_active(phba, ndlp,
22001                                          lpfc_cmd->cur_iocbq.sli4_lxritag))
22002                         continue;
22003
22004                 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
22005                         continue;
22006
22007                 list_del_init(&lpfc_cmd->list);
22008                 qp->get_io_bufs--;
22009                 lpfc_cmd->hdwq = qp;
22010                 lpfc_cmd->hdwq_no = idx;
22011                 return lpfc_cmd;
22012         }
22013         return NULL;
22014 }
22015
22016 /**
22017  * lpfc_get_io_buf - Get one IO buffer from free pool
22018  * @phba: The HBA for which this call is being executed.
22019  * @ndlp: pointer to lpfc nodelist data structure.
22020  * @hwqid: belong to which HWQ
22021  * @expedite: 1 means this request is urgent.
22022  *
22023  * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
22024  * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
22025  * a IO buffer from head of @hdwq io_buf_list and returns to caller.
22026  *
22027  * Note: ndlp is only used on SCSI side for RRQ testing.
22028  *       The caller should pass NULL for ndlp on NVME side.
22029  *
22030  * Return codes:
22031  *   NULL - Error
22032  *   Pointer to lpfc_io_buf - Success
22033  **/
22034 struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
22035                                     struct lpfc_nodelist *ndlp,
22036                                     u32 hwqid, int expedite)
22037 {
22038         struct lpfc_sli4_hdw_queue *qp;
22039         unsigned long iflag;
22040         struct lpfc_io_buf *lpfc_cmd;
22041
22042         qp = &phba->sli4_hba.hdwq[hwqid];
22043         lpfc_cmd = NULL;
22044         if (!qp) {
22045                 lpfc_printf_log(phba, KERN_WARNING,
22046                                 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
22047                                 "5555 NULL qp for hwqid  x%x\n", hwqid);
22048                 return lpfc_cmd;
22049         }
22050
22051         if (phba->cfg_xri_rebalancing)
22052                 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
22053                         phba, ndlp, hwqid, expedite);
22054         else {
22055                 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
22056                                           qp, alloc_xri_get);
22057                 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
22058                         lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
22059                 if (!lpfc_cmd) {
22060                         lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
22061                                           qp, alloc_xri_put);
22062                         list_splice(&qp->lpfc_io_buf_list_put,
22063                                     &qp->lpfc_io_buf_list_get);
22064                         qp->get_io_bufs += qp->put_io_bufs;
22065                         INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
22066                         qp->put_io_bufs = 0;
22067                         spin_unlock(&qp->io_buf_list_put_lock);
22068                         if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
22069                             expedite)
22070                                 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
22071                 }
22072                 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
22073         }
22074
22075         return lpfc_cmd;
22076 }
22077
22078 /**
22079  * lpfc_read_object - Retrieve object data from HBA
22080  * @phba: The HBA for which this call is being executed.
22081  * @rdobject: Pathname of object data we want to read.
22082  * @datap: Pointer to where data will be copied to.
22083  * @datasz: size of data area
22084  *
22085  * This routine is limited to object sizes of LPFC_BPL_SIZE (1024) or less.
22086  * The data will be truncated if datasz is not large enough.
22087  * Version 1 is not supported with Embedded mbox cmd, so we must use version 0.
22088  * Returns the actual bytes read from the object.
22089  */
22090 int
22091 lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
22092                  uint32_t datasz)
22093 {
22094         struct lpfc_mbx_read_object *read_object;
22095         LPFC_MBOXQ_t *mbox;
22096         int rc, length, eof, j, byte_cnt = 0;
22097         uint32_t shdr_status, shdr_add_status;
22098         union lpfc_sli4_cfg_shdr *shdr;
22099         struct lpfc_dmabuf *pcmd;
22100         u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0};
22101
22102         /* sanity check on queue memory */
22103         if (!datap)
22104                 return -ENODEV;
22105
22106         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
22107         if (!mbox)
22108                 return -ENOMEM;
22109         length = (sizeof(struct lpfc_mbx_read_object) -
22110                   sizeof(struct lpfc_sli4_cfg_mhdr));
22111         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
22112                          LPFC_MBOX_OPCODE_READ_OBJECT,
22113                          length, LPFC_SLI4_MBX_EMBED);
22114         read_object = &mbox->u.mqe.un.read_object;
22115         shdr = (union lpfc_sli4_cfg_shdr *)&read_object->header.cfg_shdr;
22116
22117         bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_0);
22118         bf_set(lpfc_mbx_rd_object_rlen, &read_object->u.request, datasz);
22119         read_object->u.request.rd_object_offset = 0;
22120         read_object->u.request.rd_object_cnt = 1;
22121
22122         memset((void *)read_object->u.request.rd_object_name, 0,
22123                LPFC_OBJ_NAME_SZ);
22124         scnprintf((char *)rd_object_name, sizeof(rd_object_name), rdobject);
22125         for (j = 0; j < strlen(rdobject); j++)
22126                 read_object->u.request.rd_object_name[j] =
22127                         cpu_to_le32(rd_object_name[j]);
22128
22129         pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL);
22130         if (pcmd)
22131                 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
22132         if (!pcmd || !pcmd->virt) {
22133                 kfree(pcmd);
22134                 mempool_free(mbox, phba->mbox_mem_pool);
22135                 return -ENOMEM;
22136         }
22137         memset((void *)pcmd->virt, 0, LPFC_BPL_SIZE);
22138         read_object->u.request.rd_object_hbuf[0].pa_lo =
22139                 putPaddrLow(pcmd->phys);
22140         read_object->u.request.rd_object_hbuf[0].pa_hi =
22141                 putPaddrHigh(pcmd->phys);
22142         read_object->u.request.rd_object_hbuf[0].length = LPFC_BPL_SIZE;
22143
22144         mbox->vport = phba->pport;
22145         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
22146         mbox->ctx_buf = NULL;
22147         mbox->ctx_ndlp = NULL;
22148
22149         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
22150         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
22151         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
22152
22153         if (shdr_status == STATUS_FAILED &&
22154             shdr_add_status == ADD_STATUS_INVALID_OBJECT_NAME) {
22155                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
22156                                 "4674 No port cfg file in FW.\n");
22157                 byte_cnt = -ENOENT;
22158         } else if (shdr_status || shdr_add_status || rc) {
22159                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
22160                                 "2625 READ_OBJECT mailbox failed with "
22161                                 "status x%x add_status x%x, mbx status x%x\n",
22162                                 shdr_status, shdr_add_status, rc);
22163                 byte_cnt = -ENXIO;
22164         } else {
22165                 /* Success */
22166                 length = read_object->u.response.rd_object_actual_rlen;
22167                 eof = bf_get(lpfc_mbx_rd_object_eof, &read_object->u.response);
22168                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_CGN_MGMT,
22169                                 "2626 READ_OBJECT Success len %d:%d, EOF %d\n",
22170                                 length, datasz, eof);
22171
22172                 /* Detect the port config file exists but is empty */
22173                 if (!length && eof) {
22174                         byte_cnt = 0;
22175                         goto exit;
22176                 }
22177
22178                 byte_cnt = length;
22179                 lpfc_sli_pcimem_bcopy(pcmd->virt, datap, byte_cnt);
22180         }
22181
22182  exit:
22183         lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
22184         kfree(pcmd);
22185         mempool_free(mbox, phba->mbox_mem_pool);
22186         return byte_cnt;
22187 }
22188
22189 /**
22190  * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
22191  * @phba: The HBA for which this call is being executed.
22192  * @lpfc_buf: IO buf structure to append the SGL chunk
22193  *
22194  * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
22195  * and will allocate an SGL chunk if the pool is empty.
22196  *
22197  * Return codes:
22198  *   NULL - Error
22199  *   Pointer to sli4_hybrid_sgl - Success
22200  **/
22201 struct sli4_hybrid_sgl *
22202 lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
22203 {
22204         struct sli4_hybrid_sgl *list_entry = NULL;
22205         struct sli4_hybrid_sgl *tmp = NULL;
22206         struct sli4_hybrid_sgl *allocated_sgl = NULL;
22207         struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22208         struct list_head *buf_list = &hdwq->sgl_list;
22209         unsigned long iflags;
22210
22211         spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22212
22213         if (likely(!list_empty(buf_list))) {
22214                 /* break off 1 chunk from the sgl_list */
22215                 list_for_each_entry_safe(list_entry, tmp,
22216                                          buf_list, list_node) {
22217                         list_move_tail(&list_entry->list_node,
22218                                        &lpfc_buf->dma_sgl_xtra_list);
22219                         break;
22220                 }
22221         } else {
22222                 /* allocate more */
22223                 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22224                 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
22225                                    cpu_to_node(hdwq->io_wq->chann));
22226                 if (!tmp) {
22227                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22228                                         "8353 error kmalloc memory for HDWQ "
22229                                         "%d %s\n",
22230                                         lpfc_buf->hdwq_no, __func__);
22231                         return NULL;
22232                 }
22233
22234                 tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
22235                                               GFP_ATOMIC, &tmp->dma_phys_sgl);
22236                 if (!tmp->dma_sgl) {
22237                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22238                                         "8354 error pool_alloc memory for HDWQ "
22239                                         "%d %s\n",
22240                                         lpfc_buf->hdwq_no, __func__);
22241                         kfree(tmp);
22242                         return NULL;
22243                 }
22244
22245                 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22246                 list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
22247         }
22248
22249         allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
22250                                         struct sli4_hybrid_sgl,
22251                                         list_node);
22252
22253         spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22254
22255         return allocated_sgl;
22256 }
22257
22258 /**
22259  * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
22260  * @phba: The HBA for which this call is being executed.
22261  * @lpfc_buf: IO buf structure with the SGL chunk
22262  *
22263  * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
22264  *
22265  * Return codes:
22266  *   0 - Success
22267  *   -EINVAL - Error
22268  **/
22269 int
22270 lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
22271 {
22272         int rc = 0;
22273         struct sli4_hybrid_sgl *list_entry = NULL;
22274         struct sli4_hybrid_sgl *tmp = NULL;
22275         struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22276         struct list_head *buf_list = &hdwq->sgl_list;
22277         unsigned long iflags;
22278
22279         spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22280
22281         if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
22282                 list_for_each_entry_safe(list_entry, tmp,
22283                                          &lpfc_buf->dma_sgl_xtra_list,
22284                                          list_node) {
22285                         list_move_tail(&list_entry->list_node,
22286                                        buf_list);
22287                 }
22288         } else {
22289                 rc = -EINVAL;
22290         }
22291
22292         spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22293         return rc;
22294 }
22295
22296 /**
22297  * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
22298  * @phba: phba object
22299  * @hdwq: hdwq to cleanup sgl buff resources on
22300  *
22301  * This routine frees all SGL chunks of hdwq SGL chunk pool.
22302  *
22303  * Return codes:
22304  *   None
22305  **/
22306 void
22307 lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
22308                        struct lpfc_sli4_hdw_queue *hdwq)
22309 {
22310         struct list_head *buf_list = &hdwq->sgl_list;
22311         struct sli4_hybrid_sgl *list_entry = NULL;
22312         struct sli4_hybrid_sgl *tmp = NULL;
22313         unsigned long iflags;
22314
22315         spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22316
22317         /* Free sgl pool */
22318         list_for_each_entry_safe(list_entry, tmp,
22319                                  buf_list, list_node) {
22320                 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
22321                               list_entry->dma_sgl,
22322                               list_entry->dma_phys_sgl);
22323                 list_del(&list_entry->list_node);
22324                 kfree(list_entry);
22325         }
22326
22327         spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22328 }
22329
22330 /**
22331  * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
22332  * @phba: The HBA for which this call is being executed.
22333  * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
22334  *
22335  * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
22336  * and will allocate an CMD/RSP buffer if the pool is empty.
22337  *
22338  * Return codes:
22339  *   NULL - Error
22340  *   Pointer to fcp_cmd_rsp_buf - Success
22341  **/
22342 struct fcp_cmd_rsp_buf *
22343 lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22344                               struct lpfc_io_buf *lpfc_buf)
22345 {
22346         struct fcp_cmd_rsp_buf *list_entry = NULL;
22347         struct fcp_cmd_rsp_buf *tmp = NULL;
22348         struct fcp_cmd_rsp_buf *allocated_buf = NULL;
22349         struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22350         struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22351         unsigned long iflags;
22352
22353         spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22354
22355         if (likely(!list_empty(buf_list))) {
22356                 /* break off 1 chunk from the list */
22357                 list_for_each_entry_safe(list_entry, tmp,
22358                                          buf_list,
22359                                          list_node) {
22360                         list_move_tail(&list_entry->list_node,
22361                                        &lpfc_buf->dma_cmd_rsp_list);
22362                         break;
22363                 }
22364         } else {
22365                 /* allocate more */
22366                 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22367                 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
22368                                    cpu_to_node(hdwq->io_wq->chann));
22369                 if (!tmp) {
22370                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22371                                         "8355 error kmalloc memory for HDWQ "
22372                                         "%d %s\n",
22373                                         lpfc_buf->hdwq_no, __func__);
22374                         return NULL;
22375                 }
22376
22377                 tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool,
22378                                                 GFP_ATOMIC,
22379                                                 &tmp->fcp_cmd_rsp_dma_handle);
22380
22381                 if (!tmp->fcp_cmnd) {
22382                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22383                                         "8356 error pool_alloc memory for HDWQ "
22384                                         "%d %s\n",
22385                                         lpfc_buf->hdwq_no, __func__);
22386                         kfree(tmp);
22387                         return NULL;
22388                 }
22389
22390                 tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
22391                                 sizeof(struct fcp_cmnd));
22392
22393                 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22394                 list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
22395         }
22396
22397         allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
22398                                         struct fcp_cmd_rsp_buf,
22399                                         list_node);
22400
22401         spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22402
22403         return allocated_buf;
22404 }
22405
22406 /**
22407  * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
22408  * @phba: The HBA for which this call is being executed.
22409  * @lpfc_buf: IO buf structure with the CMD/RSP buf
22410  *
22411  * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
22412  *
22413  * Return codes:
22414  *   0 - Success
22415  *   -EINVAL - Error
22416  **/
22417 int
22418 lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22419                               struct lpfc_io_buf *lpfc_buf)
22420 {
22421         int rc = 0;
22422         struct fcp_cmd_rsp_buf *list_entry = NULL;
22423         struct fcp_cmd_rsp_buf *tmp = NULL;
22424         struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22425         struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22426         unsigned long iflags;
22427
22428         spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22429
22430         if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
22431                 list_for_each_entry_safe(list_entry, tmp,
22432                                          &lpfc_buf->dma_cmd_rsp_list,
22433                                          list_node) {
22434                         list_move_tail(&list_entry->list_node,
22435                                        buf_list);
22436                 }
22437         } else {
22438                 rc = -EINVAL;
22439         }
22440
22441         spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22442         return rc;
22443 }
22444
22445 /**
22446  * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
22447  * @phba: phba object
22448  * @hdwq: hdwq to cleanup cmd rsp buff resources on
22449  *
22450  * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
22451  *
22452  * Return codes:
22453  *   None
22454  **/
22455 void
22456 lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22457                                struct lpfc_sli4_hdw_queue *hdwq)
22458 {
22459         struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22460         struct fcp_cmd_rsp_buf *list_entry = NULL;
22461         struct fcp_cmd_rsp_buf *tmp = NULL;
22462         unsigned long iflags;
22463
22464         spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22465
22466         /* Free cmd_rsp buf pool */
22467         list_for_each_entry_safe(list_entry, tmp,
22468                                  buf_list,
22469                                  list_node) {
22470                 dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
22471                               list_entry->fcp_cmnd,
22472                               list_entry->fcp_cmd_rsp_dma_handle);
22473                 list_del(&list_entry->list_node);
22474                 kfree(list_entry);
22475         }
22476
22477         spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22478 }