[SCSI] scsi: fix lpfc build when wmb() is defined as mb()
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / scsi / lpfc / lpfc_sli.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/slab.h>
27
28 #include <scsi/scsi.h>
29 #include <scsi/scsi_cmnd.h>
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_transport_fc.h>
33 #include <scsi/fc/fc_fs.h>
34 #include <linux/aer.h>
35
36 #include "lpfc_hw4.h"
37 #include "lpfc_hw.h"
38 #include "lpfc_sli.h"
39 #include "lpfc_sli4.h"
40 #include "lpfc_nl.h"
41 #include "lpfc_disc.h"
42 #include "lpfc_scsi.h"
43 #include "lpfc.h"
44 #include "lpfc_crtn.h"
45 #include "lpfc_logmsg.h"
46 #include "lpfc_compat.h"
47 #include "lpfc_debugfs.h"
48 #include "lpfc_vport.h"
49
50 /* There are only four IOCB completion types. */
51 typedef enum _lpfc_iocb_type {
52         LPFC_UNKNOWN_IOCB,
53         LPFC_UNSOL_IOCB,
54         LPFC_SOL_IOCB,
55         LPFC_ABORT_IOCB
56 } lpfc_iocb_type;
57
58
59 /* Provide function prototypes local to this module. */
60 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
61                                   uint32_t);
62 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
63                               uint8_t *, uint32_t *);
64 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
65                                                          struct lpfc_iocbq *);
66 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
67                                       struct hbq_dmabuf *);
68 static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *,
69                                     struct lpfc_cqe *);
70 static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *,
71                                        int);
72 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *,
73                         uint32_t);
74
75 static IOCB_t *
76 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
77 {
78         return &iocbq->iocb;
79 }
80
81 /**
82  * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
83  * @q: The Work Queue to operate on.
84  * @wqe: The work Queue Entry to put on the Work queue.
85  *
86  * This routine will copy the contents of @wqe to the next available entry on
87  * the @q. This function will then ring the Work Queue Doorbell to signal the
88  * HBA to start processing the Work Queue Entry. This function returns 0 if
89  * successful. If no entries are available on @q then this function will return
90  * -ENOMEM.
91  * The caller is expected to hold the hbalock when calling this routine.
92  **/
93 static uint32_t
94 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
95 {
96         union lpfc_wqe *temp_wqe;
97         struct lpfc_register doorbell;
98         uint32_t host_index;
99         uint32_t idx;
100
101         /* sanity check on queue memory */
102         if (unlikely(!q))
103                 return -ENOMEM;
104         temp_wqe = q->qe[q->host_index].wqe;
105
106         /* If the host has not yet processed the next entry then we are done */
107         idx = ((q->host_index + 1) % q->entry_count);
108         if (idx == q->hba_index) {
109                 q->WQ_overflow++;
110                 return -ENOMEM;
111         }
112         q->WQ_posted++;
113         /* set consumption flag every once in a while */
114         if (!((q->host_index + 1) % q->entry_repost))
115                 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
116         if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
117                 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
118         lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
119
120         /* Update the host index before invoking device */
121         host_index = q->host_index;
122
123         q->host_index = idx;
124
125         /* Ring Doorbell */
126         doorbell.word0 = 0;
127         if (q->db_format == LPFC_DB_LIST_FORMAT) {
128                 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
129                 bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index);
130                 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
131         } else if (q->db_format == LPFC_DB_RING_FORMAT) {
132                 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
133                 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
134         } else {
135                 return -EINVAL;
136         }
137         writel(doorbell.word0, q->db_regaddr);
138
139         return 0;
140 }
141
142 /**
143  * lpfc_sli4_wq_release - Updates internal hba index for WQ
144  * @q: The Work Queue to operate on.
145  * @index: The index to advance the hba index to.
146  *
147  * This routine will update the HBA index of a queue to reflect consumption of
148  * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
149  * an entry the host calls this function to update the queue's internal
150  * pointers. This routine returns the number of entries that were consumed by
151  * the HBA.
152  **/
153 static uint32_t
154 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
155 {
156         uint32_t released = 0;
157
158         /* sanity check on queue memory */
159         if (unlikely(!q))
160                 return 0;
161
162         if (q->hba_index == index)
163                 return 0;
164         do {
165                 q->hba_index = ((q->hba_index + 1) % q->entry_count);
166                 released++;
167         } while (q->hba_index != index);
168         return released;
169 }
170
171 /**
172  * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
173  * @q: The Mailbox Queue to operate on.
174  * @wqe: The Mailbox Queue Entry to put on the Work queue.
175  *
176  * This routine will copy the contents of @mqe to the next available entry on
177  * the @q. This function will then ring the Work Queue Doorbell to signal the
178  * HBA to start processing the Work Queue Entry. This function returns 0 if
179  * successful. If no entries are available on @q then this function will return
180  * -ENOMEM.
181  * The caller is expected to hold the hbalock when calling this routine.
182  **/
183 static uint32_t
184 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
185 {
186         struct lpfc_mqe *temp_mqe;
187         struct lpfc_register doorbell;
188         uint32_t host_index;
189
190         /* sanity check on queue memory */
191         if (unlikely(!q))
192                 return -ENOMEM;
193         temp_mqe = q->qe[q->host_index].mqe;
194
195         /* If the host has not yet processed the next entry then we are done */
196         if (((q->host_index + 1) % q->entry_count) == q->hba_index)
197                 return -ENOMEM;
198         lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
199         /* Save off the mailbox pointer for completion */
200         q->phba->mbox = (MAILBOX_t *)temp_mqe;
201
202         /* Update the host index before invoking device */
203         host_index = q->host_index;
204         q->host_index = ((q->host_index + 1) % q->entry_count);
205
206         /* Ring Doorbell */
207         doorbell.word0 = 0;
208         bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
209         bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
210         writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
211         return 0;
212 }
213
214 /**
215  * lpfc_sli4_mq_release - Updates internal hba index for MQ
216  * @q: The Mailbox Queue to operate on.
217  *
218  * This routine will update the HBA index of a queue to reflect consumption of
219  * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
220  * an entry the host calls this function to update the queue's internal
221  * pointers. This routine returns the number of entries that were consumed by
222  * the HBA.
223  **/
224 static uint32_t
225 lpfc_sli4_mq_release(struct lpfc_queue *q)
226 {
227         /* sanity check on queue memory */
228         if (unlikely(!q))
229                 return 0;
230
231         /* Clear the mailbox pointer for completion */
232         q->phba->mbox = NULL;
233         q->hba_index = ((q->hba_index + 1) % q->entry_count);
234         return 1;
235 }
236
237 /**
238  * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
239  * @q: The Event Queue to get the first valid EQE from
240  *
241  * This routine will get the first valid Event Queue Entry from @q, update
242  * the queue's internal hba index, and return the EQE. If no valid EQEs are in
243  * the Queue (no more work to do), or the Queue is full of EQEs that have been
244  * processed, but not popped back to the HBA then this routine will return NULL.
245  **/
246 static struct lpfc_eqe *
247 lpfc_sli4_eq_get(struct lpfc_queue *q)
248 {
249         struct lpfc_eqe *eqe;
250         uint32_t idx;
251
252         /* sanity check on queue memory */
253         if (unlikely(!q))
254                 return NULL;
255         eqe = q->qe[q->hba_index].eqe;
256
257         /* If the next EQE is not valid then we are done */
258         if (!bf_get_le32(lpfc_eqe_valid, eqe))
259                 return NULL;
260         /* If the host has not yet processed the next entry then we are done */
261         idx = ((q->hba_index + 1) % q->entry_count);
262         if (idx == q->host_index)
263                 return NULL;
264
265         q->hba_index = idx;
266         return eqe;
267 }
268
269 /**
270  * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
271  * @q: The Event Queue to disable interrupts
272  *
273  **/
274 static inline void
275 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
276 {
277         struct lpfc_register doorbell;
278
279         doorbell.word0 = 0;
280         bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
281         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
282         bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
283                 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
284         bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
285         writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
286 }
287
288 /**
289  * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
290  * @q: The Event Queue that the host has completed processing for.
291  * @arm: Indicates whether the host wants to arms this CQ.
292  *
293  * This routine will mark all Event Queue Entries on @q, from the last
294  * known completed entry to the last entry that was processed, as completed
295  * by clearing the valid bit for each completion queue entry. Then it will
296  * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
297  * The internal host index in the @q will be updated by this routine to indicate
298  * that the host has finished processing the entries. The @arm parameter
299  * indicates that the queue should be rearmed when ringing the doorbell.
300  *
301  * This function will return the number of EQEs that were popped.
302  **/
303 uint32_t
304 lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
305 {
306         uint32_t released = 0;
307         struct lpfc_eqe *temp_eqe;
308         struct lpfc_register doorbell;
309
310         /* sanity check on queue memory */
311         if (unlikely(!q))
312                 return 0;
313
314         /* while there are valid entries */
315         while (q->hba_index != q->host_index) {
316                 temp_eqe = q->qe[q->host_index].eqe;
317                 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
318                 released++;
319                 q->host_index = ((q->host_index + 1) % q->entry_count);
320         }
321         if (unlikely(released == 0 && !arm))
322                 return 0;
323
324         /* ring doorbell for number popped */
325         doorbell.word0 = 0;
326         if (arm) {
327                 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
328                 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
329         }
330         bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
331         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
332         bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
333                         (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
334         bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
335         writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
336         /* PCI read to flush PCI pipeline on re-arming for INTx mode */
337         if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
338                 readl(q->phba->sli4_hba.EQCQDBregaddr);
339         return released;
340 }
341
342 /**
343  * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
344  * @q: The Completion Queue to get the first valid CQE from
345  *
346  * This routine will get the first valid Completion Queue Entry from @q, update
347  * the queue's internal hba index, and return the CQE. If no valid CQEs are in
348  * the Queue (no more work to do), or the Queue is full of CQEs that have been
349  * processed, but not popped back to the HBA then this routine will return NULL.
350  **/
351 static struct lpfc_cqe *
352 lpfc_sli4_cq_get(struct lpfc_queue *q)
353 {
354         struct lpfc_cqe *cqe;
355         uint32_t idx;
356
357         /* sanity check on queue memory */
358         if (unlikely(!q))
359                 return NULL;
360
361         /* If the next CQE is not valid then we are done */
362         if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
363                 return NULL;
364         /* If the host has not yet processed the next entry then we are done */
365         idx = ((q->hba_index + 1) % q->entry_count);
366         if (idx == q->host_index)
367                 return NULL;
368
369         cqe = q->qe[q->hba_index].cqe;
370         q->hba_index = idx;
371         return cqe;
372 }
373
374 /**
375  * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
376  * @q: The Completion Queue that the host has completed processing for.
377  * @arm: Indicates whether the host wants to arms this CQ.
378  *
379  * This routine will mark all Completion queue entries on @q, from the last
380  * known completed entry to the last entry that was processed, as completed
381  * by clearing the valid bit for each completion queue entry. Then it will
382  * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
383  * The internal host index in the @q will be updated by this routine to indicate
384  * that the host has finished processing the entries. The @arm parameter
385  * indicates that the queue should be rearmed when ringing the doorbell.
386  *
387  * This function will return the number of CQEs that were released.
388  **/
389 uint32_t
390 lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
391 {
392         uint32_t released = 0;
393         struct lpfc_cqe *temp_qe;
394         struct lpfc_register doorbell;
395
396         /* sanity check on queue memory */
397         if (unlikely(!q))
398                 return 0;
399         /* while there are valid entries */
400         while (q->hba_index != q->host_index) {
401                 temp_qe = q->qe[q->host_index].cqe;
402                 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
403                 released++;
404                 q->host_index = ((q->host_index + 1) % q->entry_count);
405         }
406         if (unlikely(released == 0 && !arm))
407                 return 0;
408
409         /* ring doorbell for number popped */
410         doorbell.word0 = 0;
411         if (arm)
412                 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
413         bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
414         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
415         bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
416                         (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
417         bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
418         writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
419         return released;
420 }
421
422 /**
423  * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
424  * @q: The Header Receive Queue to operate on.
425  * @wqe: The Receive Queue Entry to put on the Receive queue.
426  *
427  * This routine will copy the contents of @wqe to the next available entry on
428  * the @q. This function will then ring the Receive Queue Doorbell to signal the
429  * HBA to start processing the Receive Queue Entry. This function returns the
430  * index that the rqe was copied to if successful. If no entries are available
431  * on @q then this function will return -ENOMEM.
432  * The caller is expected to hold the hbalock when calling this routine.
433  **/
434 static int
435 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
436                  struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
437 {
438         struct lpfc_rqe *temp_hrqe;
439         struct lpfc_rqe *temp_drqe;
440         struct lpfc_register doorbell;
441         int put_index = hq->host_index;
442
443         /* sanity check on queue memory */
444         if (unlikely(!hq) || unlikely(!dq))
445                 return -ENOMEM;
446         temp_hrqe = hq->qe[hq->host_index].rqe;
447         temp_drqe = dq->qe[dq->host_index].rqe;
448
449         if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
450                 return -EINVAL;
451         if (hq->host_index != dq->host_index)
452                 return -EINVAL;
453         /* If the host has not yet processed the next entry then we are done */
454         if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
455                 return -EBUSY;
456         lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
457         lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
458
459         /* Update the host index to point to the next slot */
460         hq->host_index = ((hq->host_index + 1) % hq->entry_count);
461         dq->host_index = ((dq->host_index + 1) % dq->entry_count);
462
463         /* Ring The Header Receive Queue Doorbell */
464         if (!(hq->host_index % hq->entry_repost)) {
465                 doorbell.word0 = 0;
466                 if (hq->db_format == LPFC_DB_RING_FORMAT) {
467                         bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
468                                hq->entry_repost);
469                         bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
470                 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
471                         bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
472                                hq->entry_repost);
473                         bf_set(lpfc_rq_db_list_fm_index, &doorbell,
474                                hq->host_index);
475                         bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
476                 } else {
477                         return -EINVAL;
478                 }
479                 writel(doorbell.word0, hq->db_regaddr);
480         }
481         return put_index;
482 }
483
484 /**
485  * lpfc_sli4_rq_release - Updates internal hba index for RQ
486  * @q: The Header Receive Queue to operate on.
487  *
488  * This routine will update the HBA index of a queue to reflect consumption of
489  * one Receive Queue Entry by the HBA. When the HBA indicates that it has
490  * consumed an entry the host calls this function to update the queue's
491  * internal pointers. This routine returns the number of entries that were
492  * consumed by the HBA.
493  **/
494 static uint32_t
495 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
496 {
497         /* sanity check on queue memory */
498         if (unlikely(!hq) || unlikely(!dq))
499                 return 0;
500
501         if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
502                 return 0;
503         hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
504         dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
505         return 1;
506 }
507
508 /**
509  * lpfc_cmd_iocb - Get next command iocb entry in the ring
510  * @phba: Pointer to HBA context object.
511  * @pring: Pointer to driver SLI ring object.
512  *
513  * This function returns pointer to next command iocb entry
514  * in the command ring. The caller must hold hbalock to prevent
515  * other threads consume the next command iocb.
516  * SLI-2/SLI-3 provide different sized iocbs.
517  **/
518 static inline IOCB_t *
519 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
520 {
521         return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
522                            pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
523 }
524
525 /**
526  * lpfc_resp_iocb - Get next response iocb entry in the ring
527  * @phba: Pointer to HBA context object.
528  * @pring: Pointer to driver SLI ring object.
529  *
530  * This function returns pointer to next response iocb entry
531  * in the response ring. The caller must hold hbalock to make sure
532  * that no other thread consume the next response iocb.
533  * SLI-2/SLI-3 provide different sized iocbs.
534  **/
535 static inline IOCB_t *
536 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
537 {
538         return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
539                            pring->sli.sli3.rspidx * phba->iocb_rsp_size);
540 }
541
542 /**
543  * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
544  * @phba: Pointer to HBA context object.
545  *
546  * This function is called with hbalock held. This function
547  * allocates a new driver iocb object from the iocb pool. If the
548  * allocation is successful, it returns pointer to the newly
549  * allocated iocb object else it returns NULL.
550  **/
551 struct lpfc_iocbq *
552 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
553 {
554         struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
555         struct lpfc_iocbq * iocbq = NULL;
556
557         list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
558         if (iocbq)
559                 phba->iocb_cnt++;
560         if (phba->iocb_cnt > phba->iocb_max)
561                 phba->iocb_max = phba->iocb_cnt;
562         return iocbq;
563 }
564
565 /**
566  * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
567  * @phba: Pointer to HBA context object.
568  * @xritag: XRI value.
569  *
570  * This function clears the sglq pointer from the array of acive
571  * sglq's. The xritag that is passed in is used to index into the
572  * array. Before the xritag can be used it needs to be adjusted
573  * by subtracting the xribase.
574  *
575  * Returns sglq ponter = success, NULL = Failure.
576  **/
577 static struct lpfc_sglq *
578 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
579 {
580         struct lpfc_sglq *sglq;
581
582         sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
583         phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
584         return sglq;
585 }
586
587 /**
588  * __lpfc_get_active_sglq - Get the active sglq for this XRI.
589  * @phba: Pointer to HBA context object.
590  * @xritag: XRI value.
591  *
592  * This function returns the sglq pointer from the array of acive
593  * sglq's. The xritag that is passed in is used to index into the
594  * array. Before the xritag can be used it needs to be adjusted
595  * by subtracting the xribase.
596  *
597  * Returns sglq ponter = success, NULL = Failure.
598  **/
599 struct lpfc_sglq *
600 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
601 {
602         struct lpfc_sglq *sglq;
603
604         sglq =  phba->sli4_hba.lpfc_sglq_active_list[xritag];
605         return sglq;
606 }
607
608 /**
609  * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
610  * @phba: Pointer to HBA context object.
611  * @xritag: xri used in this exchange.
612  * @rrq: The RRQ to be cleared.
613  *
614  **/
615 void
616 lpfc_clr_rrq_active(struct lpfc_hba *phba,
617                     uint16_t xritag,
618                     struct lpfc_node_rrq *rrq)
619 {
620         struct lpfc_nodelist *ndlp = NULL;
621
622         if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
623                 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
624
625         /* The target DID could have been swapped (cable swap)
626          * we should use the ndlp from the findnode if it is
627          * available.
628          */
629         if ((!ndlp) && rrq->ndlp)
630                 ndlp = rrq->ndlp;
631
632         if (!ndlp)
633                 goto out;
634
635         if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) {
636                 rrq->send_rrq = 0;
637                 rrq->xritag = 0;
638                 rrq->rrq_stop_time = 0;
639         }
640 out:
641         mempool_free(rrq, phba->rrq_pool);
642 }
643
644 /**
645  * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
646  * @phba: Pointer to HBA context object.
647  *
648  * This function is called with hbalock held. This function
649  * Checks if stop_time (ratov from setting rrq active) has
650  * been reached, if it has and the send_rrq flag is set then
651  * it will call lpfc_send_rrq. If the send_rrq flag is not set
652  * then it will just call the routine to clear the rrq and
653  * free the rrq resource.
654  * The timer is set to the next rrq that is going to expire before
655  * leaving the routine.
656  *
657  **/
658 void
659 lpfc_handle_rrq_active(struct lpfc_hba *phba)
660 {
661         struct lpfc_node_rrq *rrq;
662         struct lpfc_node_rrq *nextrrq;
663         unsigned long next_time;
664         unsigned long iflags;
665         LIST_HEAD(send_rrq);
666
667         spin_lock_irqsave(&phba->hbalock, iflags);
668         phba->hba_flag &= ~HBA_RRQ_ACTIVE;
669         next_time = jiffies + HZ * (phba->fc_ratov + 1);
670         list_for_each_entry_safe(rrq, nextrrq,
671                                  &phba->active_rrq_list, list) {
672                 if (time_after(jiffies, rrq->rrq_stop_time))
673                         list_move(&rrq->list, &send_rrq);
674                 else if (time_before(rrq->rrq_stop_time, next_time))
675                         next_time = rrq->rrq_stop_time;
676         }
677         spin_unlock_irqrestore(&phba->hbalock, iflags);
678         if (!list_empty(&phba->active_rrq_list))
679                 mod_timer(&phba->rrq_tmr, next_time);
680         list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
681                 list_del(&rrq->list);
682                 if (!rrq->send_rrq)
683                         /* this call will free the rrq */
684                 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
685                 else if (lpfc_send_rrq(phba, rrq)) {
686                         /* if we send the rrq then the completion handler
687                         *  will clear the bit in the xribitmap.
688                         */
689                         lpfc_clr_rrq_active(phba, rrq->xritag,
690                                             rrq);
691                 }
692         }
693 }
694
695 /**
696  * lpfc_get_active_rrq - Get the active RRQ for this exchange.
697  * @vport: Pointer to vport context object.
698  * @xri: The xri used in the exchange.
699  * @did: The targets DID for this exchange.
700  *
701  * returns NULL = rrq not found in the phba->active_rrq_list.
702  *         rrq = rrq for this xri and target.
703  **/
704 struct lpfc_node_rrq *
705 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
706 {
707         struct lpfc_hba *phba = vport->phba;
708         struct lpfc_node_rrq *rrq;
709         struct lpfc_node_rrq *nextrrq;
710         unsigned long iflags;
711
712         if (phba->sli_rev != LPFC_SLI_REV4)
713                 return NULL;
714         spin_lock_irqsave(&phba->hbalock, iflags);
715         list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
716                 if (rrq->vport == vport && rrq->xritag == xri &&
717                                 rrq->nlp_DID == did){
718                         list_del(&rrq->list);
719                         spin_unlock_irqrestore(&phba->hbalock, iflags);
720                         return rrq;
721                 }
722         }
723         spin_unlock_irqrestore(&phba->hbalock, iflags);
724         return NULL;
725 }
726
727 /**
728  * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
729  * @vport: Pointer to vport context object.
730  * @ndlp: Pointer to the lpfc_node_list structure.
731  * If ndlp is NULL Remove all active RRQs for this vport from the
732  * phba->active_rrq_list and clear the rrq.
733  * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
734  **/
735 void
736 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
737
738 {
739         struct lpfc_hba *phba = vport->phba;
740         struct lpfc_node_rrq *rrq;
741         struct lpfc_node_rrq *nextrrq;
742         unsigned long iflags;
743         LIST_HEAD(rrq_list);
744
745         if (phba->sli_rev != LPFC_SLI_REV4)
746                 return;
747         if (!ndlp) {
748                 lpfc_sli4_vport_delete_els_xri_aborted(vport);
749                 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
750         }
751         spin_lock_irqsave(&phba->hbalock, iflags);
752         list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
753                 if ((rrq->vport == vport) && (!ndlp  || rrq->ndlp == ndlp))
754                         list_move(&rrq->list, &rrq_list);
755         spin_unlock_irqrestore(&phba->hbalock, iflags);
756
757         list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
758                 list_del(&rrq->list);
759                 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
760         }
761 }
762
763 /**
764  * lpfc_cleanup_wt_rrqs - Remove all rrq's from the active list.
765  * @phba: Pointer to HBA context object.
766  *
767  * Remove all rrqs from the phba->active_rrq_list and free them by
768  * calling __lpfc_clr_active_rrq
769  *
770  **/
771 void
772 lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
773 {
774         struct lpfc_node_rrq *rrq;
775         struct lpfc_node_rrq *nextrrq;
776         unsigned long next_time;
777         unsigned long iflags;
778         LIST_HEAD(rrq_list);
779
780         if (phba->sli_rev != LPFC_SLI_REV4)
781                 return;
782         spin_lock_irqsave(&phba->hbalock, iflags);
783         phba->hba_flag &= ~HBA_RRQ_ACTIVE;
784         next_time = jiffies + HZ * (phba->fc_ratov * 2);
785         list_splice_init(&phba->active_rrq_list, &rrq_list);
786         spin_unlock_irqrestore(&phba->hbalock, iflags);
787
788         list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
789                 list_del(&rrq->list);
790                 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
791         }
792         if (!list_empty(&phba->active_rrq_list))
793                 mod_timer(&phba->rrq_tmr, next_time);
794 }
795
796
797 /**
798  * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
799  * @phba: Pointer to HBA context object.
800  * @ndlp: Targets nodelist pointer for this exchange.
801  * @xritag the xri in the bitmap to test.
802  *
803  * This function is called with hbalock held. This function
804  * returns 0 = rrq not active for this xri
805  *         1 = rrq is valid for this xri.
806  **/
807 int
808 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
809                         uint16_t  xritag)
810 {
811         if (!ndlp)
812                 return 0;
813         if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap))
814                         return 1;
815         else
816                 return 0;
817 }
818
819 /**
820  * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
821  * @phba: Pointer to HBA context object.
822  * @ndlp: nodelist pointer for this target.
823  * @xritag: xri used in this exchange.
824  * @rxid: Remote Exchange ID.
825  * @send_rrq: Flag used to determine if we should send rrq els cmd.
826  *
827  * This function takes the hbalock.
828  * The active bit is always set in the active rrq xri_bitmap even
829  * if there is no slot avaiable for the other rrq information.
830  *
831  * returns 0 rrq actived for this xri
832  *         < 0 No memory or invalid ndlp.
833  **/
834 int
835 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
836                     uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
837 {
838         unsigned long iflags;
839         struct lpfc_node_rrq *rrq;
840         int empty;
841
842         if (!ndlp)
843                 return -EINVAL;
844
845         if (!phba->cfg_enable_rrq)
846                 return -EINVAL;
847
848         spin_lock_irqsave(&phba->hbalock, iflags);
849         if (phba->pport->load_flag & FC_UNLOADING) {
850                 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
851                 goto out;
852         }
853
854         /*
855          * set the active bit even if there is no mem available.
856          */
857         if (NLP_CHK_FREE_REQ(ndlp))
858                 goto out;
859
860         if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
861                 goto out;
862
863         if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap))
864                 goto out;
865
866         spin_unlock_irqrestore(&phba->hbalock, iflags);
867         rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
868         if (!rrq) {
869                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
870                                 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
871                                 " DID:0x%x Send:%d\n",
872                                 xritag, rxid, ndlp->nlp_DID, send_rrq);
873                 return -EINVAL;
874         }
875         rrq->send_rrq = send_rrq;
876         rrq->xritag = xritag;
877         rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
878         rrq->ndlp = ndlp;
879         rrq->nlp_DID = ndlp->nlp_DID;
880         rrq->vport = ndlp->vport;
881         rrq->rxid = rxid;
882         rrq->send_rrq = send_rrq;
883         spin_lock_irqsave(&phba->hbalock, iflags);
884         empty = list_empty(&phba->active_rrq_list);
885         list_add_tail(&rrq->list, &phba->active_rrq_list);
886         phba->hba_flag |= HBA_RRQ_ACTIVE;
887         if (empty)
888                 lpfc_worker_wake_up(phba);
889         spin_unlock_irqrestore(&phba->hbalock, iflags);
890         return 0;
891 out:
892         spin_unlock_irqrestore(&phba->hbalock, iflags);
893         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
894                         "2921 Can't set rrq active xri:0x%x rxid:0x%x"
895                         " DID:0x%x Send:%d\n",
896                         xritag, rxid, ndlp->nlp_DID, send_rrq);
897         return -EINVAL;
898 }
899
900 /**
901  * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
902  * @phba: Pointer to HBA context object.
903  * @piocb: Pointer to the iocbq.
904  *
905  * This function is called with hbalock held. This function
906  * gets a new driver sglq object from the sglq list. If the
907  * list is not empty then it is successful, it returns pointer to the newly
908  * allocated sglq object else it returns NULL.
909  **/
910 static struct lpfc_sglq *
911 __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
912 {
913         struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
914         struct lpfc_sglq *sglq = NULL;
915         struct lpfc_sglq *start_sglq = NULL;
916         struct lpfc_scsi_buf *lpfc_cmd;
917         struct lpfc_nodelist *ndlp;
918         int found = 0;
919
920         if (piocbq->iocb_flag &  LPFC_IO_FCP) {
921                 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
922                 ndlp = lpfc_cmd->rdata->pnode;
923         } else  if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
924                         !(piocbq->iocb_flag & LPFC_IO_LIBDFC))
925                 ndlp = piocbq->context_un.ndlp;
926         else  if ((piocbq->iocb.ulpCommand == CMD_ELS_REQUEST64_CR) &&
927                         (piocbq->iocb_flag & LPFC_IO_LIBDFC))
928                 ndlp = piocbq->context_un.ndlp;
929         else
930                 ndlp = piocbq->context1;
931
932         list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
933         start_sglq = sglq;
934         while (!found) {
935                 if (!sglq)
936                         return NULL;
937                 if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_lxritag)) {
938                         /* This xri has an rrq outstanding for this DID.
939                          * put it back in the list and get another xri.
940                          */
941                         list_add_tail(&sglq->list, lpfc_sgl_list);
942                         sglq = NULL;
943                         list_remove_head(lpfc_sgl_list, sglq,
944                                                 struct lpfc_sglq, list);
945                         if (sglq == start_sglq) {
946                                 sglq = NULL;
947                                 break;
948                         } else
949                                 continue;
950                 }
951                 sglq->ndlp = ndlp;
952                 found = 1;
953                 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
954                 sglq->state = SGL_ALLOCATED;
955         }
956         return sglq;
957 }
958
959 /**
960  * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
961  * @phba: Pointer to HBA context object.
962  *
963  * This function is called with no lock held. This function
964  * allocates a new driver iocb object from the iocb pool. If the
965  * allocation is successful, it returns pointer to the newly
966  * allocated iocb object else it returns NULL.
967  **/
968 struct lpfc_iocbq *
969 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
970 {
971         struct lpfc_iocbq * iocbq = NULL;
972         unsigned long iflags;
973
974         spin_lock_irqsave(&phba->hbalock, iflags);
975         iocbq = __lpfc_sli_get_iocbq(phba);
976         spin_unlock_irqrestore(&phba->hbalock, iflags);
977         return iocbq;
978 }
979
980 /**
981  * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
982  * @phba: Pointer to HBA context object.
983  * @iocbq: Pointer to driver iocb object.
984  *
985  * This function is called with hbalock held to release driver
986  * iocb object to the iocb pool. The iotag in the iocb object
987  * does not change for each use of the iocb object. This function
988  * clears all other fields of the iocb object when it is freed.
989  * The sqlq structure that holds the xritag and phys and virtual
990  * mappings for the scatter gather list is retrieved from the
991  * active array of sglq. The get of the sglq pointer also clears
992  * the entry in the array. If the status of the IO indiactes that
993  * this IO was aborted then the sglq entry it put on the
994  * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
995  * IO has good status or fails for any other reason then the sglq
996  * entry is added to the free list (lpfc_sgl_list).
997  **/
998 static void
999 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1000 {
1001         struct lpfc_sglq *sglq;
1002         size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1003         unsigned long iflag = 0;
1004         struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
1005
1006         if (iocbq->sli4_xritag == NO_XRI)
1007                 sglq = NULL;
1008         else
1009                 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1010
1011         if (sglq)  {
1012                 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1013                         (sglq->state != SGL_XRI_ABORTED)) {
1014                         spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
1015                                         iflag);
1016                         list_add(&sglq->list,
1017                                 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1018                         spin_unlock_irqrestore(
1019                                 &phba->sli4_hba.abts_sgl_list_lock, iflag);
1020                 } else {
1021                         sglq->state = SGL_FREED;
1022                         sglq->ndlp = NULL;
1023                         list_add_tail(&sglq->list,
1024                                 &phba->sli4_hba.lpfc_sgl_list);
1025
1026                         /* Check if TXQ queue needs to be serviced */
1027                         if (pring->txq_cnt)
1028                                 lpfc_worker_wake_up(phba);
1029                 }
1030         }
1031
1032
1033         /*
1034          * Clean all volatile data fields, preserve iotag and node struct.
1035          */
1036         memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1037         iocbq->sli4_lxritag = NO_XRI;
1038         iocbq->sli4_xritag = NO_XRI;
1039         list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1040 }
1041
1042
1043 /**
1044  * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1045  * @phba: Pointer to HBA context object.
1046  * @iocbq: Pointer to driver iocb object.
1047  *
1048  * This function is called with hbalock held to release driver
1049  * iocb object to the iocb pool. The iotag in the iocb object
1050  * does not change for each use of the iocb object. This function
1051  * clears all other fields of the iocb object when it is freed.
1052  **/
1053 static void
1054 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1055 {
1056         size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1057
1058         /*
1059          * Clean all volatile data fields, preserve iotag and node struct.
1060          */
1061         memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1062         iocbq->sli4_xritag = NO_XRI;
1063         list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1064 }
1065
1066 /**
1067  * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1068  * @phba: Pointer to HBA context object.
1069  * @iocbq: Pointer to driver iocb object.
1070  *
1071  * This function is called with hbalock held to release driver
1072  * iocb object to the iocb pool. The iotag in the iocb object
1073  * does not change for each use of the iocb object. This function
1074  * clears all other fields of the iocb object when it is freed.
1075  **/
1076 static void
1077 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1078 {
1079         phba->__lpfc_sli_release_iocbq(phba, iocbq);
1080         phba->iocb_cnt--;
1081 }
1082
1083 /**
1084  * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1085  * @phba: Pointer to HBA context object.
1086  * @iocbq: Pointer to driver iocb object.
1087  *
1088  * This function is called with no lock held to release the iocb to
1089  * iocb pool.
1090  **/
1091 void
1092 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1093 {
1094         unsigned long iflags;
1095
1096         /*
1097          * Clean all volatile data fields, preserve iotag and node struct.
1098          */
1099         spin_lock_irqsave(&phba->hbalock, iflags);
1100         __lpfc_sli_release_iocbq(phba, iocbq);
1101         spin_unlock_irqrestore(&phba->hbalock, iflags);
1102 }
1103
1104 /**
1105  * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1106  * @phba: Pointer to HBA context object.
1107  * @iocblist: List of IOCBs.
1108  * @ulpstatus: ULP status in IOCB command field.
1109  * @ulpWord4: ULP word-4 in IOCB command field.
1110  *
1111  * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1112  * on the list by invoking the complete callback function associated with the
1113  * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1114  * fields.
1115  **/
1116 void
1117 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1118                       uint32_t ulpstatus, uint32_t ulpWord4)
1119 {
1120         struct lpfc_iocbq *piocb;
1121
1122         while (!list_empty(iocblist)) {
1123                 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1124
1125                 if (!piocb->iocb_cmpl)
1126                         lpfc_sli_release_iocbq(phba, piocb);
1127                 else {
1128                         piocb->iocb.ulpStatus = ulpstatus;
1129                         piocb->iocb.un.ulpWord[4] = ulpWord4;
1130                         (piocb->iocb_cmpl) (phba, piocb, piocb);
1131                 }
1132         }
1133         return;
1134 }
1135
1136 /**
1137  * lpfc_sli_iocb_cmd_type - Get the iocb type
1138  * @iocb_cmnd: iocb command code.
1139  *
1140  * This function is called by ring event handler function to get the iocb type.
1141  * This function translates the iocb command to an iocb command type used to
1142  * decide the final disposition of each completed IOCB.
1143  * The function returns
1144  * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1145  * LPFC_SOL_IOCB     if it is a solicited iocb completion
1146  * LPFC_ABORT_IOCB   if it is an abort iocb
1147  * LPFC_UNSOL_IOCB   if it is an unsolicited iocb
1148  *
1149  * The caller is not required to hold any lock.
1150  **/
1151 static lpfc_iocb_type
1152 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1153 {
1154         lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1155
1156         if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1157                 return 0;
1158
1159         switch (iocb_cmnd) {
1160         case CMD_XMIT_SEQUENCE_CR:
1161         case CMD_XMIT_SEQUENCE_CX:
1162         case CMD_XMIT_BCAST_CN:
1163         case CMD_XMIT_BCAST_CX:
1164         case CMD_ELS_REQUEST_CR:
1165         case CMD_ELS_REQUEST_CX:
1166         case CMD_CREATE_XRI_CR:
1167         case CMD_CREATE_XRI_CX:
1168         case CMD_GET_RPI_CN:
1169         case CMD_XMIT_ELS_RSP_CX:
1170         case CMD_GET_RPI_CR:
1171         case CMD_FCP_IWRITE_CR:
1172         case CMD_FCP_IWRITE_CX:
1173         case CMD_FCP_IREAD_CR:
1174         case CMD_FCP_IREAD_CX:
1175         case CMD_FCP_ICMND_CR:
1176         case CMD_FCP_ICMND_CX:
1177         case CMD_FCP_TSEND_CX:
1178         case CMD_FCP_TRSP_CX:
1179         case CMD_FCP_TRECEIVE_CX:
1180         case CMD_FCP_AUTO_TRSP_CX:
1181         case CMD_ADAPTER_MSG:
1182         case CMD_ADAPTER_DUMP:
1183         case CMD_XMIT_SEQUENCE64_CR:
1184         case CMD_XMIT_SEQUENCE64_CX:
1185         case CMD_XMIT_BCAST64_CN:
1186         case CMD_XMIT_BCAST64_CX:
1187         case CMD_ELS_REQUEST64_CR:
1188         case CMD_ELS_REQUEST64_CX:
1189         case CMD_FCP_IWRITE64_CR:
1190         case CMD_FCP_IWRITE64_CX:
1191         case CMD_FCP_IREAD64_CR:
1192         case CMD_FCP_IREAD64_CX:
1193         case CMD_FCP_ICMND64_CR:
1194         case CMD_FCP_ICMND64_CX:
1195         case CMD_FCP_TSEND64_CX:
1196         case CMD_FCP_TRSP64_CX:
1197         case CMD_FCP_TRECEIVE64_CX:
1198         case CMD_GEN_REQUEST64_CR:
1199         case CMD_GEN_REQUEST64_CX:
1200         case CMD_XMIT_ELS_RSP64_CX:
1201         case DSSCMD_IWRITE64_CR:
1202         case DSSCMD_IWRITE64_CX:
1203         case DSSCMD_IREAD64_CR:
1204         case DSSCMD_IREAD64_CX:
1205                 type = LPFC_SOL_IOCB;
1206                 break;
1207         case CMD_ABORT_XRI_CN:
1208         case CMD_ABORT_XRI_CX:
1209         case CMD_CLOSE_XRI_CN:
1210         case CMD_CLOSE_XRI_CX:
1211         case CMD_XRI_ABORTED_CX:
1212         case CMD_ABORT_MXRI64_CN:
1213         case CMD_XMIT_BLS_RSP64_CX:
1214                 type = LPFC_ABORT_IOCB;
1215                 break;
1216         case CMD_RCV_SEQUENCE_CX:
1217         case CMD_RCV_ELS_REQ_CX:
1218         case CMD_RCV_SEQUENCE64_CX:
1219         case CMD_RCV_ELS_REQ64_CX:
1220         case CMD_ASYNC_STATUS:
1221         case CMD_IOCB_RCV_SEQ64_CX:
1222         case CMD_IOCB_RCV_ELS64_CX:
1223         case CMD_IOCB_RCV_CONT64_CX:
1224         case CMD_IOCB_RET_XRI64_CX:
1225                 type = LPFC_UNSOL_IOCB;
1226                 break;
1227         case CMD_IOCB_XMIT_MSEQ64_CR:
1228         case CMD_IOCB_XMIT_MSEQ64_CX:
1229         case CMD_IOCB_RCV_SEQ_LIST64_CX:
1230         case CMD_IOCB_RCV_ELS_LIST64_CX:
1231         case CMD_IOCB_CLOSE_EXTENDED_CN:
1232         case CMD_IOCB_ABORT_EXTENDED_CN:
1233         case CMD_IOCB_RET_HBQE64_CN:
1234         case CMD_IOCB_FCP_IBIDIR64_CR:
1235         case CMD_IOCB_FCP_IBIDIR64_CX:
1236         case CMD_IOCB_FCP_ITASKMGT64_CX:
1237         case CMD_IOCB_LOGENTRY_CN:
1238         case CMD_IOCB_LOGENTRY_ASYNC_CN:
1239                 printk("%s - Unhandled SLI-3 Command x%x\n",
1240                                 __func__, iocb_cmnd);
1241                 type = LPFC_UNKNOWN_IOCB;
1242                 break;
1243         default:
1244                 type = LPFC_UNKNOWN_IOCB;
1245                 break;
1246         }
1247
1248         return type;
1249 }
1250
1251 /**
1252  * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1253  * @phba: Pointer to HBA context object.
1254  *
1255  * This function is called from SLI initialization code
1256  * to configure every ring of the HBA's SLI interface. The
1257  * caller is not required to hold any lock. This function issues
1258  * a config_ring mailbox command for each ring.
1259  * This function returns zero if successful else returns a negative
1260  * error code.
1261  **/
1262 static int
1263 lpfc_sli_ring_map(struct lpfc_hba *phba)
1264 {
1265         struct lpfc_sli *psli = &phba->sli;
1266         LPFC_MBOXQ_t *pmb;
1267         MAILBOX_t *pmbox;
1268         int i, rc, ret = 0;
1269
1270         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1271         if (!pmb)
1272                 return -ENOMEM;
1273         pmbox = &pmb->u.mb;
1274         phba->link_state = LPFC_INIT_MBX_CMDS;
1275         for (i = 0; i < psli->num_rings; i++) {
1276                 lpfc_config_ring(phba, i, pmb);
1277                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1278                 if (rc != MBX_SUCCESS) {
1279                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1280                                         "0446 Adapter failed to init (%d), "
1281                                         "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1282                                         "ring %d\n",
1283                                         rc, pmbox->mbxCommand,
1284                                         pmbox->mbxStatus, i);
1285                         phba->link_state = LPFC_HBA_ERROR;
1286                         ret = -ENXIO;
1287                         break;
1288                 }
1289         }
1290         mempool_free(pmb, phba->mbox_mem_pool);
1291         return ret;
1292 }
1293
1294 /**
1295  * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1296  * @phba: Pointer to HBA context object.
1297  * @pring: Pointer to driver SLI ring object.
1298  * @piocb: Pointer to the driver iocb object.
1299  *
1300  * This function is called with hbalock held. The function adds the
1301  * new iocb to txcmplq of the given ring. This function always returns
1302  * 0. If this function is called for ELS ring, this function checks if
1303  * there is a vport associated with the ELS command. This function also
1304  * starts els_tmofunc timer if this is an ELS command.
1305  **/
1306 static int
1307 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1308                         struct lpfc_iocbq *piocb)
1309 {
1310         list_add_tail(&piocb->list, &pring->txcmplq);
1311         piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1312         pring->txcmplq_cnt++;
1313         if (pring->txcmplq_cnt > pring->txcmplq_max)
1314                 pring->txcmplq_max = pring->txcmplq_cnt;
1315
1316         if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1317            (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1318            (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1319                 if (!piocb->vport)
1320                         BUG();
1321                 else
1322                         mod_timer(&piocb->vport->els_tmofunc,
1323                                   jiffies + HZ * (phba->fc_ratov << 1));
1324         }
1325
1326
1327         return 0;
1328 }
1329
1330 /**
1331  * lpfc_sli_ringtx_get - Get first element of the txq
1332  * @phba: Pointer to HBA context object.
1333  * @pring: Pointer to driver SLI ring object.
1334  *
1335  * This function is called with hbalock held to get next
1336  * iocb in txq of the given ring. If there is any iocb in
1337  * the txq, the function returns first iocb in the list after
1338  * removing the iocb from the list, else it returns NULL.
1339  **/
1340 struct lpfc_iocbq *
1341 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1342 {
1343         struct lpfc_iocbq *cmd_iocb;
1344
1345         list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1346         if (cmd_iocb != NULL)
1347                 pring->txq_cnt--;
1348         return cmd_iocb;
1349 }
1350
1351 /**
1352  * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1353  * @phba: Pointer to HBA context object.
1354  * @pring: Pointer to driver SLI ring object.
1355  *
1356  * This function is called with hbalock held and the caller must post the
1357  * iocb without releasing the lock. If the caller releases the lock,
1358  * iocb slot returned by the function is not guaranteed to be available.
1359  * The function returns pointer to the next available iocb slot if there
1360  * is available slot in the ring, else it returns NULL.
1361  * If the get index of the ring is ahead of the put index, the function
1362  * will post an error attention event to the worker thread to take the
1363  * HBA to offline state.
1364  **/
1365 static IOCB_t *
1366 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1367 {
1368         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1369         uint32_t  max_cmd_idx = pring->sli.sli3.numCiocb;
1370         if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1371            (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1372                 pring->sli.sli3.next_cmdidx = 0;
1373
1374         if (unlikely(pring->sli.sli3.local_getidx ==
1375                 pring->sli.sli3.next_cmdidx)) {
1376
1377                 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1378
1379                 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1380                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1381                                         "0315 Ring %d issue: portCmdGet %d "
1382                                         "is bigger than cmd ring %d\n",
1383                                         pring->ringno,
1384                                         pring->sli.sli3.local_getidx,
1385                                         max_cmd_idx);
1386
1387                         phba->link_state = LPFC_HBA_ERROR;
1388                         /*
1389                          * All error attention handlers are posted to
1390                          * worker thread
1391                          */
1392                         phba->work_ha |= HA_ERATT;
1393                         phba->work_hs = HS_FFER3;
1394
1395                         lpfc_worker_wake_up(phba);
1396
1397                         return NULL;
1398                 }
1399
1400                 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1401                         return NULL;
1402         }
1403
1404         return lpfc_cmd_iocb(phba, pring);
1405 }
1406
1407 /**
1408  * lpfc_sli_next_iotag - Get an iotag for the iocb
1409  * @phba: Pointer to HBA context object.
1410  * @iocbq: Pointer to driver iocb object.
1411  *
1412  * This function gets an iotag for the iocb. If there is no unused iotag and
1413  * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1414  * array and assigns a new iotag.
1415  * The function returns the allocated iotag if successful, else returns zero.
1416  * Zero is not a valid iotag.
1417  * The caller is not required to hold any lock.
1418  **/
1419 uint16_t
1420 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1421 {
1422         struct lpfc_iocbq **new_arr;
1423         struct lpfc_iocbq **old_arr;
1424         size_t new_len;
1425         struct lpfc_sli *psli = &phba->sli;
1426         uint16_t iotag;
1427
1428         spin_lock_irq(&phba->hbalock);
1429         iotag = psli->last_iotag;
1430         if(++iotag < psli->iocbq_lookup_len) {
1431                 psli->last_iotag = iotag;
1432                 psli->iocbq_lookup[iotag] = iocbq;
1433                 spin_unlock_irq(&phba->hbalock);
1434                 iocbq->iotag = iotag;
1435                 return iotag;
1436         } else if (psli->iocbq_lookup_len < (0xffff
1437                                            - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1438                 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1439                 spin_unlock_irq(&phba->hbalock);
1440                 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
1441                                   GFP_KERNEL);
1442                 if (new_arr) {
1443                         spin_lock_irq(&phba->hbalock);
1444                         old_arr = psli->iocbq_lookup;
1445                         if (new_len <= psli->iocbq_lookup_len) {
1446                                 /* highly unprobable case */
1447                                 kfree(new_arr);
1448                                 iotag = psli->last_iotag;
1449                                 if(++iotag < psli->iocbq_lookup_len) {
1450                                         psli->last_iotag = iotag;
1451                                         psli->iocbq_lookup[iotag] = iocbq;
1452                                         spin_unlock_irq(&phba->hbalock);
1453                                         iocbq->iotag = iotag;
1454                                         return iotag;
1455                                 }
1456                                 spin_unlock_irq(&phba->hbalock);
1457                                 return 0;
1458                         }
1459                         if (psli->iocbq_lookup)
1460                                 memcpy(new_arr, old_arr,
1461                                        ((psli->last_iotag  + 1) *
1462                                         sizeof (struct lpfc_iocbq *)));
1463                         psli->iocbq_lookup = new_arr;
1464                         psli->iocbq_lookup_len = new_len;
1465                         psli->last_iotag = iotag;
1466                         psli->iocbq_lookup[iotag] = iocbq;
1467                         spin_unlock_irq(&phba->hbalock);
1468                         iocbq->iotag = iotag;
1469                         kfree(old_arr);
1470                         return iotag;
1471                 }
1472         } else
1473                 spin_unlock_irq(&phba->hbalock);
1474
1475         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1476                         "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1477                         psli->last_iotag);
1478
1479         return 0;
1480 }
1481
1482 /**
1483  * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1484  * @phba: Pointer to HBA context object.
1485  * @pring: Pointer to driver SLI ring object.
1486  * @iocb: Pointer to iocb slot in the ring.
1487  * @nextiocb: Pointer to driver iocb object which need to be
1488  *            posted to firmware.
1489  *
1490  * This function is called with hbalock held to post a new iocb to
1491  * the firmware. This function copies the new iocb to ring iocb slot and
1492  * updates the ring pointers. It adds the new iocb to txcmplq if there is
1493  * a completion call back for this iocb else the function will free the
1494  * iocb object.
1495  **/
1496 static void
1497 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1498                 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1499 {
1500         /*
1501          * Set up an iotag
1502          */
1503         nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1504
1505
1506         if (pring->ringno == LPFC_ELS_RING) {
1507                 lpfc_debugfs_slow_ring_trc(phba,
1508                         "IOCB cmd ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
1509                         *(((uint32_t *) &nextiocb->iocb) + 4),
1510                         *(((uint32_t *) &nextiocb->iocb) + 6),
1511                         *(((uint32_t *) &nextiocb->iocb) + 7));
1512         }
1513
1514         /*
1515          * Issue iocb command to adapter
1516          */
1517         lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1518         wmb();
1519         pring->stats.iocb_cmd++;
1520
1521         /*
1522          * If there is no completion routine to call, we can release the
1523          * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1524          * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1525          */
1526         if (nextiocb->iocb_cmpl)
1527                 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1528         else
1529                 __lpfc_sli_release_iocbq(phba, nextiocb);
1530
1531         /*
1532          * Let the HBA know what IOCB slot will be the next one the
1533          * driver will put a command into.
1534          */
1535         pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1536         writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1537 }
1538
1539 /**
1540  * lpfc_sli_update_full_ring - Update the chip attention register
1541  * @phba: Pointer to HBA context object.
1542  * @pring: Pointer to driver SLI ring object.
1543  *
1544  * The caller is not required to hold any lock for calling this function.
1545  * This function updates the chip attention bits for the ring to inform firmware
1546  * that there are pending work to be done for this ring and requests an
1547  * interrupt when there is space available in the ring. This function is
1548  * called when the driver is unable to post more iocbs to the ring due
1549  * to unavailability of space in the ring.
1550  **/
1551 static void
1552 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1553 {
1554         int ringno = pring->ringno;
1555
1556         pring->flag |= LPFC_CALL_RING_AVAILABLE;
1557
1558         wmb();
1559
1560         /*
1561          * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1562          * The HBA will tell us when an IOCB entry is available.
1563          */
1564         writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1565         readl(phba->CAregaddr); /* flush */
1566
1567         pring->stats.iocb_cmd_full++;
1568 }
1569
1570 /**
1571  * lpfc_sli_update_ring - Update chip attention register
1572  * @phba: Pointer to HBA context object.
1573  * @pring: Pointer to driver SLI ring object.
1574  *
1575  * This function updates the chip attention register bit for the
1576  * given ring to inform HBA that there is more work to be done
1577  * in this ring. The caller is not required to hold any lock.
1578  **/
1579 static void
1580 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1581 {
1582         int ringno = pring->ringno;
1583
1584         /*
1585          * Tell the HBA that there is work to do in this ring.
1586          */
1587         if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1588                 wmb();
1589                 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1590                 readl(phba->CAregaddr); /* flush */
1591         }
1592 }
1593
1594 /**
1595  * lpfc_sli_resume_iocb - Process iocbs in the txq
1596  * @phba: Pointer to HBA context object.
1597  * @pring: Pointer to driver SLI ring object.
1598  *
1599  * This function is called with hbalock held to post pending iocbs
1600  * in the txq to the firmware. This function is called when driver
1601  * detects space available in the ring.
1602  **/
1603 static void
1604 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1605 {
1606         IOCB_t *iocb;
1607         struct lpfc_iocbq *nextiocb;
1608
1609         /*
1610          * Check to see if:
1611          *  (a) there is anything on the txq to send
1612          *  (b) link is up
1613          *  (c) link attention events can be processed (fcp ring only)
1614          *  (d) IOCB processing is not blocked by the outstanding mbox command.
1615          */
1616         if (pring->txq_cnt &&
1617             lpfc_is_link_up(phba) &&
1618             (pring->ringno != phba->sli.fcp_ring ||
1619              phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1620
1621                 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1622                        (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1623                         lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1624
1625                 if (iocb)
1626                         lpfc_sli_update_ring(phba, pring);
1627                 else
1628                         lpfc_sli_update_full_ring(phba, pring);
1629         }
1630
1631         return;
1632 }
1633
1634 /**
1635  * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
1636  * @phba: Pointer to HBA context object.
1637  * @hbqno: HBQ number.
1638  *
1639  * This function is called with hbalock held to get the next
1640  * available slot for the given HBQ. If there is free slot
1641  * available for the HBQ it will return pointer to the next available
1642  * HBQ entry else it will return NULL.
1643  **/
1644 static struct lpfc_hbq_entry *
1645 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1646 {
1647         struct hbq_s *hbqp = &phba->hbqs[hbqno];
1648
1649         if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1650             ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1651                 hbqp->next_hbqPutIdx = 0;
1652
1653         if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1654                 uint32_t raw_index = phba->hbq_get[hbqno];
1655                 uint32_t getidx = le32_to_cpu(raw_index);
1656
1657                 hbqp->local_hbqGetIdx = getidx;
1658
1659                 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1660                         lpfc_printf_log(phba, KERN_ERR,
1661                                         LOG_SLI | LOG_VPORT,
1662                                         "1802 HBQ %d: local_hbqGetIdx "
1663                                         "%u is > than hbqp->entry_count %u\n",
1664                                         hbqno, hbqp->local_hbqGetIdx,
1665                                         hbqp->entry_count);
1666
1667                         phba->link_state = LPFC_HBA_ERROR;
1668                         return NULL;
1669                 }
1670
1671                 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1672                         return NULL;
1673         }
1674
1675         return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1676                         hbqp->hbqPutIdx;
1677 }
1678
1679 /**
1680  * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
1681  * @phba: Pointer to HBA context object.
1682  *
1683  * This function is called with no lock held to free all the
1684  * hbq buffers while uninitializing the SLI interface. It also
1685  * frees the HBQ buffers returned by the firmware but not yet
1686  * processed by the upper layers.
1687  **/
1688 void
1689 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1690 {
1691         struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1692         struct hbq_dmabuf *hbq_buf;
1693         unsigned long flags;
1694         int i, hbq_count;
1695         uint32_t hbqno;
1696
1697         hbq_count = lpfc_sli_hbq_count();
1698         /* Return all memory used by all HBQs */
1699         spin_lock_irqsave(&phba->hbalock, flags);
1700         for (i = 0; i < hbq_count; ++i) {
1701                 list_for_each_entry_safe(dmabuf, next_dmabuf,
1702                                 &phba->hbqs[i].hbq_buffer_list, list) {
1703                         hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1704                         list_del(&hbq_buf->dbuf.list);
1705                         (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1706                 }
1707                 phba->hbqs[i].buffer_count = 0;
1708         }
1709         /* Return all HBQ buffer that are in-fly */
1710         list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
1711                                  list) {
1712                 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1713                 list_del(&hbq_buf->dbuf.list);
1714                 if (hbq_buf->tag == -1) {
1715                         (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1716                                 (phba, hbq_buf);
1717                 } else {
1718                         hbqno = hbq_buf->tag >> 16;
1719                         if (hbqno >= LPFC_MAX_HBQS)
1720                                 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1721                                         (phba, hbq_buf);
1722                         else
1723                                 (phba->hbqs[hbqno].hbq_free_buffer)(phba,
1724                                         hbq_buf);
1725                 }
1726         }
1727
1728         /* Mark the HBQs not in use */
1729         phba->hbq_in_use = 0;
1730         spin_unlock_irqrestore(&phba->hbalock, flags);
1731 }
1732
1733 /**
1734  * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
1735  * @phba: Pointer to HBA context object.
1736  * @hbqno: HBQ number.
1737  * @hbq_buf: Pointer to HBQ buffer.
1738  *
1739  * This function is called with the hbalock held to post a
1740  * hbq buffer to the firmware. If the function finds an empty
1741  * slot in the HBQ, it will post the buffer. The function will return
1742  * pointer to the hbq entry if it successfully post the buffer
1743  * else it will return NULL.
1744  **/
1745 static int
1746 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
1747                          struct hbq_dmabuf *hbq_buf)
1748 {
1749         return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1750 }
1751
1752 /**
1753  * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1754  * @phba: Pointer to HBA context object.
1755  * @hbqno: HBQ number.
1756  * @hbq_buf: Pointer to HBQ buffer.
1757  *
1758  * This function is called with the hbalock held to post a hbq buffer to the
1759  * firmware. If the function finds an empty slot in the HBQ, it will post the
1760  * buffer and place it on the hbq_buffer_list. The function will return zero if
1761  * it successfully post the buffer else it will return an error.
1762  **/
1763 static int
1764 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
1765                             struct hbq_dmabuf *hbq_buf)
1766 {
1767         struct lpfc_hbq_entry *hbqe;
1768         dma_addr_t physaddr = hbq_buf->dbuf.phys;
1769
1770         /* Get next HBQ entry slot to use */
1771         hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
1772         if (hbqe) {
1773                 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1774
1775                 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1776                 hbqe->bde.addrLow  = le32_to_cpu(putPaddrLow(physaddr));
1777                 hbqe->bde.tus.f.bdeSize = hbq_buf->size;
1778                 hbqe->bde.tus.f.bdeFlags = 0;
1779                 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
1780                 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
1781                                 /* Sync SLIM */
1782                 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
1783                 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
1784                                 /* flush */
1785                 readl(phba->hbq_put + hbqno);
1786                 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
1787                 return 0;
1788         } else
1789                 return -ENOMEM;
1790 }
1791
1792 /**
1793  * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1794  * @phba: Pointer to HBA context object.
1795  * @hbqno: HBQ number.
1796  * @hbq_buf: Pointer to HBQ buffer.
1797  *
1798  * This function is called with the hbalock held to post an RQE to the SLI4
1799  * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1800  * the hbq_buffer_list and return zero, otherwise it will return an error.
1801  **/
1802 static int
1803 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1804                             struct hbq_dmabuf *hbq_buf)
1805 {
1806         int rc;
1807         struct lpfc_rqe hrqe;
1808         struct lpfc_rqe drqe;
1809
1810         hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
1811         hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
1812         drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
1813         drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
1814         rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
1815                               &hrqe, &drqe);
1816         if (rc < 0)
1817                 return rc;
1818         hbq_buf->tag = rc;
1819         list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
1820         return 0;
1821 }
1822
1823 /* HBQ for ELS and CT traffic. */
1824 static struct lpfc_hbq_init lpfc_els_hbq = {
1825         .rn = 1,
1826         .entry_count = 256,
1827         .mask_count = 0,
1828         .profile = 0,
1829         .ring_mask = (1 << LPFC_ELS_RING),
1830         .buffer_count = 0,
1831         .init_count = 40,
1832         .add_count = 40,
1833 };
1834
1835 /* HBQ for the extra ring if needed */
1836 static struct lpfc_hbq_init lpfc_extra_hbq = {
1837         .rn = 1,
1838         .entry_count = 200,
1839         .mask_count = 0,
1840         .profile = 0,
1841         .ring_mask = (1 << LPFC_EXTRA_RING),
1842         .buffer_count = 0,
1843         .init_count = 0,
1844         .add_count = 5,
1845 };
1846
1847 /* Array of HBQs */
1848 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
1849         &lpfc_els_hbq,
1850         &lpfc_extra_hbq,
1851 };
1852
1853 /**
1854  * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
1855  * @phba: Pointer to HBA context object.
1856  * @hbqno: HBQ number.
1857  * @count: Number of HBQ buffers to be posted.
1858  *
1859  * This function is called with no lock held to post more hbq buffers to the
1860  * given HBQ. The function returns the number of HBQ buffers successfully
1861  * posted.
1862  **/
1863 static int
1864 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
1865 {
1866         uint32_t i, posted = 0;
1867         unsigned long flags;
1868         struct hbq_dmabuf *hbq_buffer;
1869         LIST_HEAD(hbq_buf_list);
1870         if (!phba->hbqs[hbqno].hbq_alloc_buffer)
1871                 return 0;
1872
1873         if ((phba->hbqs[hbqno].buffer_count + count) >
1874             lpfc_hbq_defs[hbqno]->entry_count)
1875                 count = lpfc_hbq_defs[hbqno]->entry_count -
1876                                         phba->hbqs[hbqno].buffer_count;
1877         if (!count)
1878                 return 0;
1879         /* Allocate HBQ entries */
1880         for (i = 0; i < count; i++) {
1881                 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
1882                 if (!hbq_buffer)
1883                         break;
1884                 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
1885         }
1886         /* Check whether HBQ is still in use */
1887         spin_lock_irqsave(&phba->hbalock, flags);
1888         if (!phba->hbq_in_use)
1889                 goto err;
1890         while (!list_empty(&hbq_buf_list)) {
1891                 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1892                                  dbuf.list);
1893                 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
1894                                       (hbqno << 16));
1895                 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
1896                         phba->hbqs[hbqno].buffer_count++;
1897                         posted++;
1898                 } else
1899                         (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1900         }
1901         spin_unlock_irqrestore(&phba->hbalock, flags);
1902         return posted;
1903 err:
1904         spin_unlock_irqrestore(&phba->hbalock, flags);
1905         while (!list_empty(&hbq_buf_list)) {
1906                 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1907                                  dbuf.list);
1908                 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1909         }
1910         return 0;
1911 }
1912
1913 /**
1914  * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
1915  * @phba: Pointer to HBA context object.
1916  * @qno: HBQ number.
1917  *
1918  * This function posts more buffers to the HBQ. This function
1919  * is called with no lock held. The function returns the number of HBQ entries
1920  * successfully allocated.
1921  **/
1922 int
1923 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
1924 {
1925         if (phba->sli_rev == LPFC_SLI_REV4)
1926                 return 0;
1927         else
1928                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1929                                          lpfc_hbq_defs[qno]->add_count);
1930 }
1931
1932 /**
1933  * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
1934  * @phba: Pointer to HBA context object.
1935  * @qno:  HBQ queue number.
1936  *
1937  * This function is called from SLI initialization code path with
1938  * no lock held to post initial HBQ buffers to firmware. The
1939  * function returns the number of HBQ entries successfully allocated.
1940  **/
1941 static int
1942 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
1943 {
1944         if (phba->sli_rev == LPFC_SLI_REV4)
1945                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1946                                         lpfc_hbq_defs[qno]->entry_count);
1947         else
1948                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1949                                          lpfc_hbq_defs[qno]->init_count);
1950 }
1951
1952 /**
1953  * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
1954  * @phba: Pointer to HBA context object.
1955  * @hbqno: HBQ number.
1956  *
1957  * This function removes the first hbq buffer on an hbq list and returns a
1958  * pointer to that buffer. If it finds no buffers on the list it returns NULL.
1959  **/
1960 static struct hbq_dmabuf *
1961 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
1962 {
1963         struct lpfc_dmabuf *d_buf;
1964
1965         list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
1966         if (!d_buf)
1967                 return NULL;
1968         return container_of(d_buf, struct hbq_dmabuf, dbuf);
1969 }
1970
1971 /**
1972  * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
1973  * @phba: Pointer to HBA context object.
1974  * @tag: Tag of the hbq buffer.
1975  *
1976  * This function is called with hbalock held. This function searches
1977  * for the hbq buffer associated with the given tag in the hbq buffer
1978  * list. If it finds the hbq buffer, it returns the hbq_buffer other wise
1979  * it returns NULL.
1980  **/
1981 static struct hbq_dmabuf *
1982 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
1983 {
1984         struct lpfc_dmabuf *d_buf;
1985         struct hbq_dmabuf *hbq_buf;
1986         uint32_t hbqno;
1987
1988         hbqno = tag >> 16;
1989         if (hbqno >= LPFC_MAX_HBQS)
1990                 return NULL;
1991
1992         spin_lock_irq(&phba->hbalock);
1993         list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
1994                 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
1995                 if (hbq_buf->tag == tag) {
1996                         spin_unlock_irq(&phba->hbalock);
1997                         return hbq_buf;
1998                 }
1999         }
2000         spin_unlock_irq(&phba->hbalock);
2001         lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
2002                         "1803 Bad hbq tag. Data: x%x x%x\n",
2003                         tag, phba->hbqs[tag >> 16].buffer_count);
2004         return NULL;
2005 }
2006
2007 /**
2008  * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2009  * @phba: Pointer to HBA context object.
2010  * @hbq_buffer: Pointer to HBQ buffer.
2011  *
2012  * This function is called with hbalock. This function gives back
2013  * the hbq buffer to firmware. If the HBQ does not have space to
2014  * post the buffer, it will free the buffer.
2015  **/
2016 void
2017 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2018 {
2019         uint32_t hbqno;
2020
2021         if (hbq_buffer) {
2022                 hbqno = hbq_buffer->tag >> 16;
2023                 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2024                         (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2025         }
2026 }
2027
2028 /**
2029  * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2030  * @mbxCommand: mailbox command code.
2031  *
2032  * This function is called by the mailbox event handler function to verify
2033  * that the completed mailbox command is a legitimate mailbox command. If the
2034  * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2035  * and the mailbox event handler will take the HBA offline.
2036  **/
2037 static int
2038 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2039 {
2040         uint8_t ret;
2041
2042         switch (mbxCommand) {
2043         case MBX_LOAD_SM:
2044         case MBX_READ_NV:
2045         case MBX_WRITE_NV:
2046         case MBX_WRITE_VPARMS:
2047         case MBX_RUN_BIU_DIAG:
2048         case MBX_INIT_LINK:
2049         case MBX_DOWN_LINK:
2050         case MBX_CONFIG_LINK:
2051         case MBX_CONFIG_RING:
2052         case MBX_RESET_RING:
2053         case MBX_READ_CONFIG:
2054         case MBX_READ_RCONFIG:
2055         case MBX_READ_SPARM:
2056         case MBX_READ_STATUS:
2057         case MBX_READ_RPI:
2058         case MBX_READ_XRI:
2059         case MBX_READ_REV:
2060         case MBX_READ_LNK_STAT:
2061         case MBX_REG_LOGIN:
2062         case MBX_UNREG_LOGIN:
2063         case MBX_CLEAR_LA:
2064         case MBX_DUMP_MEMORY:
2065         case MBX_DUMP_CONTEXT:
2066         case MBX_RUN_DIAGS:
2067         case MBX_RESTART:
2068         case MBX_UPDATE_CFG:
2069         case MBX_DOWN_LOAD:
2070         case MBX_DEL_LD_ENTRY:
2071         case MBX_RUN_PROGRAM:
2072         case MBX_SET_MASK:
2073         case MBX_SET_VARIABLE:
2074         case MBX_UNREG_D_ID:
2075         case MBX_KILL_BOARD:
2076         case MBX_CONFIG_FARP:
2077         case MBX_BEACON:
2078         case MBX_LOAD_AREA:
2079         case MBX_RUN_BIU_DIAG64:
2080         case MBX_CONFIG_PORT:
2081         case MBX_READ_SPARM64:
2082         case MBX_READ_RPI64:
2083         case MBX_REG_LOGIN64:
2084         case MBX_READ_TOPOLOGY:
2085         case MBX_WRITE_WWN:
2086         case MBX_SET_DEBUG:
2087         case MBX_LOAD_EXP_ROM:
2088         case MBX_ASYNCEVT_ENABLE:
2089         case MBX_REG_VPI:
2090         case MBX_UNREG_VPI:
2091         case MBX_HEARTBEAT:
2092         case MBX_PORT_CAPABILITIES:
2093         case MBX_PORT_IOV_CONTROL:
2094         case MBX_SLI4_CONFIG:
2095         case MBX_SLI4_REQ_FTRS:
2096         case MBX_REG_FCFI:
2097         case MBX_UNREG_FCFI:
2098         case MBX_REG_VFI:
2099         case MBX_UNREG_VFI:
2100         case MBX_INIT_VPI:
2101         case MBX_INIT_VFI:
2102         case MBX_RESUME_RPI:
2103         case MBX_READ_EVENT_LOG_STATUS:
2104         case MBX_READ_EVENT_LOG:
2105         case MBX_SECURITY_MGMT:
2106         case MBX_AUTH_PORT:
2107         case MBX_ACCESS_VDATA:
2108                 ret = mbxCommand;
2109                 break;
2110         default:
2111                 ret = MBX_SHUTDOWN;
2112                 break;
2113         }
2114         return ret;
2115 }
2116
2117 /**
2118  * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2119  * @phba: Pointer to HBA context object.
2120  * @pmboxq: Pointer to mailbox command.
2121  *
2122  * This is completion handler function for mailbox commands issued from
2123  * lpfc_sli_issue_mbox_wait function. This function is called by the
2124  * mailbox event handler function with no lock held. This function
2125  * will wake up thread waiting on the wait queue pointed by context1
2126  * of the mailbox.
2127  **/
2128 void
2129 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2130 {
2131         wait_queue_head_t *pdone_q;
2132         unsigned long drvr_flag;
2133
2134         /*
2135          * If pdone_q is empty, the driver thread gave up waiting and
2136          * continued running.
2137          */
2138         pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2139         spin_lock_irqsave(&phba->hbalock, drvr_flag);
2140         pdone_q = (wait_queue_head_t *) pmboxq->context1;
2141         if (pdone_q)
2142                 wake_up_interruptible(pdone_q);
2143         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2144         return;
2145 }
2146
2147
2148 /**
2149  * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2150  * @phba: Pointer to HBA context object.
2151  * @pmb: Pointer to mailbox object.
2152  *
2153  * This function is the default mailbox completion handler. It
2154  * frees the memory resources associated with the completed mailbox
2155  * command. If the completed command is a REG_LOGIN mailbox command,
2156  * this function will issue a UREG_LOGIN to re-claim the RPI.
2157  **/
2158 void
2159 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2160 {
2161         struct lpfc_vport  *vport = pmb->vport;
2162         struct lpfc_dmabuf *mp;
2163         struct lpfc_nodelist *ndlp;
2164         struct Scsi_Host *shost;
2165         uint16_t rpi, vpi;
2166         int rc;
2167
2168         mp = (struct lpfc_dmabuf *) (pmb->context1);
2169
2170         if (mp) {
2171                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2172                 kfree(mp);
2173         }
2174
2175         /*
2176          * If a REG_LOGIN succeeded  after node is destroyed or node
2177          * is in re-discovery driver need to cleanup the RPI.
2178          */
2179         if (!(phba->pport->load_flag & FC_UNLOADING) &&
2180             pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2181             !pmb->u.mb.mbxStatus) {
2182                 rpi = pmb->u.mb.un.varWords[0];
2183                 vpi = pmb->u.mb.un.varRegLogin.vpi;
2184                 lpfc_unreg_login(phba, vpi, rpi, pmb);
2185                 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2186                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2187                 if (rc != MBX_NOT_FINISHED)
2188                         return;
2189         }
2190
2191         if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2192                 !(phba->pport->load_flag & FC_UNLOADING) &&
2193                 !pmb->u.mb.mbxStatus) {
2194                 shost = lpfc_shost_from_vport(vport);
2195                 spin_lock_irq(shost->host_lock);
2196                 vport->vpi_state |= LPFC_VPI_REGISTERED;
2197                 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2198                 spin_unlock_irq(shost->host_lock);
2199         }
2200
2201         if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2202                 ndlp = (struct lpfc_nodelist *)pmb->context2;
2203                 lpfc_nlp_put(ndlp);
2204                 pmb->context2 = NULL;
2205         }
2206
2207         /* Check security permission status on INIT_LINK mailbox command */
2208         if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2209             (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2210                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2211                                 "2860 SLI authentication is required "
2212                                 "for INIT_LINK but has not done yet\n");
2213
2214         if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2215                 lpfc_sli4_mbox_cmd_free(phba, pmb);
2216         else
2217                 mempool_free(pmb, phba->mbox_mem_pool);
2218 }
2219
2220 /**
2221  * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2222  * @phba: Pointer to HBA context object.
2223  *
2224  * This function is called with no lock held. This function processes all
2225  * the completed mailbox commands and gives it to upper layers. The interrupt
2226  * service routine processes mailbox completion interrupt and adds completed
2227  * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2228  * Worker thread call lpfc_sli_handle_mb_event, which will return the
2229  * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2230  * function returns the mailbox commands to the upper layer by calling the
2231  * completion handler function of each mailbox.
2232  **/
2233 int
2234 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2235 {
2236         MAILBOX_t *pmbox;
2237         LPFC_MBOXQ_t *pmb;
2238         int rc;
2239         LIST_HEAD(cmplq);
2240
2241         phba->sli.slistat.mbox_event++;
2242
2243         /* Get all completed mailboxe buffers into the cmplq */
2244         spin_lock_irq(&phba->hbalock);
2245         list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2246         spin_unlock_irq(&phba->hbalock);
2247
2248         /* Get a Mailbox buffer to setup mailbox commands for callback */
2249         do {
2250                 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2251                 if (pmb == NULL)
2252                         break;
2253
2254                 pmbox = &pmb->u.mb;
2255
2256                 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2257                         if (pmb->vport) {
2258                                 lpfc_debugfs_disc_trc(pmb->vport,
2259                                         LPFC_DISC_TRC_MBOX_VPORT,
2260                                         "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2261                                         (uint32_t)pmbox->mbxCommand,
2262                                         pmbox->un.varWords[0],
2263                                         pmbox->un.varWords[1]);
2264                         }
2265                         else {
2266                                 lpfc_debugfs_disc_trc(phba->pport,
2267                                         LPFC_DISC_TRC_MBOX,
2268                                         "MBOX cmpl:       cmd:x%x mb:x%x x%x",
2269                                         (uint32_t)pmbox->mbxCommand,
2270                                         pmbox->un.varWords[0],
2271                                         pmbox->un.varWords[1]);
2272                         }
2273                 }
2274
2275                 /*
2276                  * It is a fatal error if unknown mbox command completion.
2277                  */
2278                 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2279                     MBX_SHUTDOWN) {
2280                         /* Unknown mailbox command compl */
2281                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2282                                         "(%d):0323 Unknown Mailbox command "
2283                                         "x%x (x%x/x%x) Cmpl\n",
2284                                         pmb->vport ? pmb->vport->vpi : 0,
2285                                         pmbox->mbxCommand,
2286                                         lpfc_sli_config_mbox_subsys_get(phba,
2287                                                                         pmb),
2288                                         lpfc_sli_config_mbox_opcode_get(phba,
2289                                                                         pmb));
2290                         phba->link_state = LPFC_HBA_ERROR;
2291                         phba->work_hs = HS_FFER3;
2292                         lpfc_handle_eratt(phba);
2293                         continue;
2294                 }
2295
2296                 if (pmbox->mbxStatus) {
2297                         phba->sli.slistat.mbox_stat_err++;
2298                         if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2299                                 /* Mbox cmd cmpl error - RETRYing */
2300                                 lpfc_printf_log(phba, KERN_INFO,
2301                                         LOG_MBOX | LOG_SLI,
2302                                         "(%d):0305 Mbox cmd cmpl "
2303                                         "error - RETRYing Data: x%x "
2304                                         "(x%x/x%x) x%x x%x x%x\n",
2305                                         pmb->vport ? pmb->vport->vpi : 0,
2306                                         pmbox->mbxCommand,
2307                                         lpfc_sli_config_mbox_subsys_get(phba,
2308                                                                         pmb),
2309                                         lpfc_sli_config_mbox_opcode_get(phba,
2310                                                                         pmb),
2311                                         pmbox->mbxStatus,
2312                                         pmbox->un.varWords[0],
2313                                         pmb->vport->port_state);
2314                                 pmbox->mbxStatus = 0;
2315                                 pmbox->mbxOwner = OWN_HOST;
2316                                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2317                                 if (rc != MBX_NOT_FINISHED)
2318                                         continue;
2319                         }
2320                 }
2321
2322                 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2323                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2324                                 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
2325                                 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
2326                                 pmb->vport ? pmb->vport->vpi : 0,
2327                                 pmbox->mbxCommand,
2328                                 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2329                                 lpfc_sli_config_mbox_opcode_get(phba, pmb),
2330                                 pmb->mbox_cmpl,
2331                                 *((uint32_t *) pmbox),
2332                                 pmbox->un.varWords[0],
2333                                 pmbox->un.varWords[1],
2334                                 pmbox->un.varWords[2],
2335                                 pmbox->un.varWords[3],
2336                                 pmbox->un.varWords[4],
2337                                 pmbox->un.varWords[5],
2338                                 pmbox->un.varWords[6],
2339                                 pmbox->un.varWords[7]);
2340
2341                 if (pmb->mbox_cmpl)
2342                         pmb->mbox_cmpl(phba,pmb);
2343         } while (1);
2344         return 0;
2345 }
2346
2347 /**
2348  * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2349  * @phba: Pointer to HBA context object.
2350  * @pring: Pointer to driver SLI ring object.
2351  * @tag: buffer tag.
2352  *
2353  * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2354  * is set in the tag the buffer is posted for a particular exchange,
2355  * the function will return the buffer without replacing the buffer.
2356  * If the buffer is for unsolicited ELS or CT traffic, this function
2357  * returns the buffer and also posts another buffer to the firmware.
2358  **/
2359 static struct lpfc_dmabuf *
2360 lpfc_sli_get_buff(struct lpfc_hba *phba,
2361                   struct lpfc_sli_ring *pring,
2362                   uint32_t tag)
2363 {
2364         struct hbq_dmabuf *hbq_entry;
2365
2366         if (tag & QUE_BUFTAG_BIT)
2367                 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2368         hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2369         if (!hbq_entry)
2370                 return NULL;
2371         return &hbq_entry->dbuf;
2372 }
2373
2374 /**
2375  * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2376  * @phba: Pointer to HBA context object.
2377  * @pring: Pointer to driver SLI ring object.
2378  * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2379  * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2380  * @fch_type: the type for the first frame of the sequence.
2381  *
2382  * This function is called with no lock held. This function uses the r_ctl and
2383  * type of the received sequence to find the correct callback function to call
2384  * to process the sequence.
2385  **/
2386 static int
2387 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2388                          struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2389                          uint32_t fch_type)
2390 {
2391         int i;
2392
2393         /* unSolicited Responses */
2394         if (pring->prt[0].profile) {
2395                 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2396                         (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2397                                                                         saveq);
2398                 return 1;
2399         }
2400         /* We must search, based on rctl / type
2401            for the right routine */
2402         for (i = 0; i < pring->num_mask; i++) {
2403                 if ((pring->prt[i].rctl == fch_r_ctl) &&
2404                     (pring->prt[i].type == fch_type)) {
2405                         if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2406                                 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2407                                                 (phba, pring, saveq);
2408                         return 1;
2409                 }
2410         }
2411         return 0;
2412 }
2413
2414 /**
2415  * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
2416  * @phba: Pointer to HBA context object.
2417  * @pring: Pointer to driver SLI ring object.
2418  * @saveq: Pointer to the unsolicited iocb.
2419  *
2420  * This function is called with no lock held by the ring event handler
2421  * when there is an unsolicited iocb posted to the response ring by the
2422  * firmware. This function gets the buffer associated with the iocbs
2423  * and calls the event handler for the ring. This function handles both
2424  * qring buffers and hbq buffers.
2425  * When the function returns 1 the caller can free the iocb object otherwise
2426  * upper layer functions will free the iocb objects.
2427  **/
2428 static int
2429 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2430                             struct lpfc_iocbq *saveq)
2431 {
2432         IOCB_t           * irsp;
2433         WORD5            * w5p;
2434         uint32_t           Rctl, Type;
2435         uint32_t           match;
2436         struct lpfc_iocbq *iocbq;
2437         struct lpfc_dmabuf *dmzbuf;
2438
2439         match = 0;
2440         irsp = &(saveq->iocb);
2441
2442         if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2443                 if (pring->lpfc_sli_rcv_async_status)
2444                         pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2445                 else
2446                         lpfc_printf_log(phba,
2447                                         KERN_WARNING,
2448                                         LOG_SLI,
2449                                         "0316 Ring %d handler: unexpected "
2450                                         "ASYNC_STATUS iocb received evt_code "
2451                                         "0x%x\n",
2452                                         pring->ringno,
2453                                         irsp->un.asyncstat.evt_code);
2454                 return 1;
2455         }
2456
2457         if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2458                 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2459                 if (irsp->ulpBdeCount > 0) {
2460                         dmzbuf = lpfc_sli_get_buff(phba, pring,
2461                                         irsp->un.ulpWord[3]);
2462                         lpfc_in_buf_free(phba, dmzbuf);
2463                 }
2464
2465                 if (irsp->ulpBdeCount > 1) {
2466                         dmzbuf = lpfc_sli_get_buff(phba, pring,
2467                                         irsp->unsli3.sli3Words[3]);
2468                         lpfc_in_buf_free(phba, dmzbuf);
2469                 }
2470
2471                 if (irsp->ulpBdeCount > 2) {
2472                         dmzbuf = lpfc_sli_get_buff(phba, pring,
2473                                 irsp->unsli3.sli3Words[7]);
2474                         lpfc_in_buf_free(phba, dmzbuf);
2475                 }
2476
2477                 return 1;
2478         }
2479
2480         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2481                 if (irsp->ulpBdeCount != 0) {
2482                         saveq->context2 = lpfc_sli_get_buff(phba, pring,
2483                                                 irsp->un.ulpWord[3]);
2484                         if (!saveq->context2)
2485                                 lpfc_printf_log(phba,
2486                                         KERN_ERR,
2487                                         LOG_SLI,
2488                                         "0341 Ring %d Cannot find buffer for "
2489                                         "an unsolicited iocb. tag 0x%x\n",
2490                                         pring->ringno,
2491                                         irsp->un.ulpWord[3]);
2492                 }
2493                 if (irsp->ulpBdeCount == 2) {
2494                         saveq->context3 = lpfc_sli_get_buff(phba, pring,
2495                                                 irsp->unsli3.sli3Words[7]);
2496                         if (!saveq->context3)
2497                                 lpfc_printf_log(phba,
2498                                         KERN_ERR,
2499                                         LOG_SLI,
2500                                         "0342 Ring %d Cannot find buffer for an"
2501                                         " unsolicited iocb. tag 0x%x\n",
2502                                         pring->ringno,
2503                                         irsp->unsli3.sli3Words[7]);
2504                 }
2505                 list_for_each_entry(iocbq, &saveq->list, list) {
2506                         irsp = &(iocbq->iocb);
2507                         if (irsp->ulpBdeCount != 0) {
2508                                 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2509                                                         irsp->un.ulpWord[3]);
2510                                 if (!iocbq->context2)
2511                                         lpfc_printf_log(phba,
2512                                                 KERN_ERR,
2513                                                 LOG_SLI,
2514                                                 "0343 Ring %d Cannot find "
2515                                                 "buffer for an unsolicited iocb"
2516                                                 ". tag 0x%x\n", pring->ringno,
2517                                                 irsp->un.ulpWord[3]);
2518                         }
2519                         if (irsp->ulpBdeCount == 2) {
2520                                 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2521                                                 irsp->unsli3.sli3Words[7]);
2522                                 if (!iocbq->context3)
2523                                         lpfc_printf_log(phba,
2524                                                 KERN_ERR,
2525                                                 LOG_SLI,
2526                                                 "0344 Ring %d Cannot find "
2527                                                 "buffer for an unsolicited "
2528                                                 "iocb. tag 0x%x\n",
2529                                                 pring->ringno,
2530                                                 irsp->unsli3.sli3Words[7]);
2531                         }
2532                 }
2533         }
2534         if (irsp->ulpBdeCount != 0 &&
2535             (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2536              irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2537                 int found = 0;
2538
2539                 /* search continue save q for same XRI */
2540                 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2541                         if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2542                                 saveq->iocb.unsli3.rcvsli3.ox_id) {
2543                                 list_add_tail(&saveq->list, &iocbq->list);
2544                                 found = 1;
2545                                 break;
2546                         }
2547                 }
2548                 if (!found)
2549                         list_add_tail(&saveq->clist,
2550                                       &pring->iocb_continue_saveq);
2551                 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2552                         list_del_init(&iocbq->clist);
2553                         saveq = iocbq;
2554                         irsp = &(saveq->iocb);
2555                 } else
2556                         return 0;
2557         }
2558         if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2559             (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2560             (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2561                 Rctl = FC_RCTL_ELS_REQ;
2562                 Type = FC_TYPE_ELS;
2563         } else {
2564                 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2565                 Rctl = w5p->hcsw.Rctl;
2566                 Type = w5p->hcsw.Type;
2567
2568                 /* Firmware Workaround */
2569                 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2570                         (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2571                          irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
2572                         Rctl = FC_RCTL_ELS_REQ;
2573                         Type = FC_TYPE_ELS;
2574                         w5p->hcsw.Rctl = Rctl;
2575                         w5p->hcsw.Type = Type;
2576                 }
2577         }
2578
2579         if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
2580                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2581                                 "0313 Ring %d handler: unexpected Rctl x%x "
2582                                 "Type x%x received\n",
2583                                 pring->ringno, Rctl, Type);
2584
2585         return 1;
2586 }
2587
2588 /**
2589  * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
2590  * @phba: Pointer to HBA context object.
2591  * @pring: Pointer to driver SLI ring object.
2592  * @prspiocb: Pointer to response iocb object.
2593  *
2594  * This function looks up the iocb_lookup table to get the command iocb
2595  * corresponding to the given response iocb using the iotag of the
2596  * response iocb. This function is called with the hbalock held.
2597  * This function returns the command iocb object if it finds the command
2598  * iocb else returns NULL.
2599  **/
2600 static struct lpfc_iocbq *
2601 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2602                       struct lpfc_sli_ring *pring,
2603                       struct lpfc_iocbq *prspiocb)
2604 {
2605         struct lpfc_iocbq *cmd_iocb = NULL;
2606         uint16_t iotag;
2607
2608         iotag = prspiocb->iocb.ulpIoTag;
2609
2610         if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2611                 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2612                 list_del_init(&cmd_iocb->list);
2613                 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2614                         pring->txcmplq_cnt--;
2615                         cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2616                 }
2617                 return cmd_iocb;
2618         }
2619
2620         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2621                         "0317 iotag x%x is out off "
2622                         "range: max iotag x%x wd0 x%x\n",
2623                         iotag, phba->sli.last_iotag,
2624                         *(((uint32_t *) &prspiocb->iocb) + 7));
2625         return NULL;
2626 }
2627
2628 /**
2629  * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2630  * @phba: Pointer to HBA context object.
2631  * @pring: Pointer to driver SLI ring object.
2632  * @iotag: IOCB tag.
2633  *
2634  * This function looks up the iocb_lookup table to get the command iocb
2635  * corresponding to the given iotag. This function is called with the
2636  * hbalock held.
2637  * This function returns the command iocb object if it finds the command
2638  * iocb else returns NULL.
2639  **/
2640 static struct lpfc_iocbq *
2641 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2642                              struct lpfc_sli_ring *pring, uint16_t iotag)
2643 {
2644         struct lpfc_iocbq *cmd_iocb;
2645
2646         if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2647                 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2648                 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2649                         /* remove from txcmpl queue list */
2650                         list_del_init(&cmd_iocb->list);
2651                         cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2652                         pring->txcmplq_cnt--;
2653                         return cmd_iocb;
2654                 }
2655         }
2656         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2657                         "0372 iotag x%x is out off range: max iotag (x%x)\n",
2658                         iotag, phba->sli.last_iotag);
2659         return NULL;
2660 }
2661
2662 /**
2663  * lpfc_sli_process_sol_iocb - process solicited iocb completion
2664  * @phba: Pointer to HBA context object.
2665  * @pring: Pointer to driver SLI ring object.
2666  * @saveq: Pointer to the response iocb to be processed.
2667  *
2668  * This function is called by the ring event handler for non-fcp
2669  * rings when there is a new response iocb in the response ring.
2670  * The caller is not required to hold any locks. This function
2671  * gets the command iocb associated with the response iocb and
2672  * calls the completion handler for the command iocb. If there
2673  * is no completion handler, the function will free the resources
2674  * associated with command iocb. If the response iocb is for
2675  * an already aborted command iocb, the status of the completion
2676  * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
2677  * This function always returns 1.
2678  **/
2679 static int
2680 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2681                           struct lpfc_iocbq *saveq)
2682 {
2683         struct lpfc_iocbq *cmdiocbp;
2684         int rc = 1;
2685         unsigned long iflag;
2686
2687         /* Based on the iotag field, get the cmd IOCB from the txcmplq */
2688         spin_lock_irqsave(&phba->hbalock, iflag);
2689         cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
2690         spin_unlock_irqrestore(&phba->hbalock, iflag);
2691
2692         if (cmdiocbp) {
2693                 if (cmdiocbp->iocb_cmpl) {
2694                         /*
2695                          * If an ELS command failed send an event to mgmt
2696                          * application.
2697                          */
2698                         if (saveq->iocb.ulpStatus &&
2699                              (pring->ringno == LPFC_ELS_RING) &&
2700                              (cmdiocbp->iocb.ulpCommand ==
2701                                 CMD_ELS_REQUEST64_CR))
2702                                 lpfc_send_els_failure_event(phba,
2703                                         cmdiocbp, saveq);
2704
2705                         /*
2706                          * Post all ELS completions to the worker thread.
2707                          * All other are passed to the completion callback.
2708                          */
2709                         if (pring->ringno == LPFC_ELS_RING) {
2710                                 if ((phba->sli_rev < LPFC_SLI_REV4) &&
2711                                     (cmdiocbp->iocb_flag &
2712                                                         LPFC_DRIVER_ABORTED)) {
2713                                         spin_lock_irqsave(&phba->hbalock,
2714                                                           iflag);
2715                                         cmdiocbp->iocb_flag &=
2716                                                 ~LPFC_DRIVER_ABORTED;
2717                                         spin_unlock_irqrestore(&phba->hbalock,
2718                                                                iflag);
2719                                         saveq->iocb.ulpStatus =
2720                                                 IOSTAT_LOCAL_REJECT;
2721                                         saveq->iocb.un.ulpWord[4] =
2722                                                 IOERR_SLI_ABORTED;
2723
2724                                         /* Firmware could still be in progress
2725                                          * of DMAing payload, so don't free data
2726                                          * buffer till after a hbeat.
2727                                          */
2728                                         spin_lock_irqsave(&phba->hbalock,
2729                                                           iflag);
2730                                         saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
2731                                         spin_unlock_irqrestore(&phba->hbalock,
2732                                                                iflag);
2733                                 }
2734                                 if (phba->sli_rev == LPFC_SLI_REV4) {
2735                                         if (saveq->iocb_flag &
2736                                             LPFC_EXCHANGE_BUSY) {
2737                                                 /* Set cmdiocb flag for the
2738                                                  * exchange busy so sgl (xri)
2739                                                  * will not be released until
2740                                                  * the abort xri is received
2741                                                  * from hba.
2742                                                  */
2743                                                 spin_lock_irqsave(
2744                                                         &phba->hbalock, iflag);
2745                                                 cmdiocbp->iocb_flag |=
2746                                                         LPFC_EXCHANGE_BUSY;
2747                                                 spin_unlock_irqrestore(
2748                                                         &phba->hbalock, iflag);
2749                                         }
2750                                         if (cmdiocbp->iocb_flag &
2751                                             LPFC_DRIVER_ABORTED) {
2752                                                 /*
2753                                                  * Clear LPFC_DRIVER_ABORTED
2754                                                  * bit in case it was driver
2755                                                  * initiated abort.
2756                                                  */
2757                                                 spin_lock_irqsave(
2758                                                         &phba->hbalock, iflag);
2759                                                 cmdiocbp->iocb_flag &=
2760                                                         ~LPFC_DRIVER_ABORTED;
2761                                                 spin_unlock_irqrestore(
2762                                                         &phba->hbalock, iflag);
2763                                                 cmdiocbp->iocb.ulpStatus =
2764                                                         IOSTAT_LOCAL_REJECT;
2765                                                 cmdiocbp->iocb.un.ulpWord[4] =
2766                                                         IOERR_ABORT_REQUESTED;
2767                                                 /*
2768                                                  * For SLI4, irsiocb contains
2769                                                  * NO_XRI in sli_xritag, it
2770                                                  * shall not affect releasing
2771                                                  * sgl (xri) process.
2772                                                  */
2773                                                 saveq->iocb.ulpStatus =
2774                                                         IOSTAT_LOCAL_REJECT;
2775                                                 saveq->iocb.un.ulpWord[4] =
2776                                                         IOERR_SLI_ABORTED;
2777                                                 spin_lock_irqsave(
2778                                                         &phba->hbalock, iflag);
2779                                                 saveq->iocb_flag |=
2780                                                         LPFC_DELAY_MEM_FREE;
2781                                                 spin_unlock_irqrestore(
2782                                                         &phba->hbalock, iflag);
2783                                         }
2784                                 }
2785                         }
2786                         (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
2787                 } else
2788                         lpfc_sli_release_iocbq(phba, cmdiocbp);
2789         } else {
2790                 /*
2791                  * Unknown initiating command based on the response iotag.
2792                  * This could be the case on the ELS ring because of
2793                  * lpfc_els_abort().
2794                  */
2795                 if (pring->ringno != LPFC_ELS_RING) {
2796                         /*
2797                          * Ring <ringno> handler: unexpected completion IoTag
2798                          * <IoTag>
2799                          */
2800                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2801                                          "0322 Ring %d handler: "
2802                                          "unexpected completion IoTag x%x "
2803                                          "Data: x%x x%x x%x x%x\n",
2804                                          pring->ringno,
2805                                          saveq->iocb.ulpIoTag,
2806                                          saveq->iocb.ulpStatus,
2807                                          saveq->iocb.un.ulpWord[4],
2808                                          saveq->iocb.ulpCommand,
2809                                          saveq->iocb.ulpContext);
2810                 }
2811         }
2812
2813         return rc;
2814 }
2815
2816 /**
2817  * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
2818  * @phba: Pointer to HBA context object.
2819  * @pring: Pointer to driver SLI ring object.
2820  *
2821  * This function is called from the iocb ring event handlers when
2822  * put pointer is ahead of the get pointer for a ring. This function signal
2823  * an error attention condition to the worker thread and the worker
2824  * thread will transition the HBA to offline state.
2825  **/
2826 static void
2827 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2828 {
2829         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2830         /*
2831          * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
2832          * rsp ring <portRspMax>
2833          */
2834         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2835                         "0312 Ring %d handler: portRspPut %d "
2836                         "is bigger than rsp ring %d\n",
2837                         pring->ringno, le32_to_cpu(pgp->rspPutInx),
2838                         pring->sli.sli3.numRiocb);
2839
2840         phba->link_state = LPFC_HBA_ERROR;
2841
2842         /*
2843          * All error attention handlers are posted to
2844          * worker thread
2845          */
2846         phba->work_ha |= HA_ERATT;
2847         phba->work_hs = HS_FFER3;
2848
2849         lpfc_worker_wake_up(phba);
2850
2851         return;
2852 }
2853
2854 /**
2855  * lpfc_poll_eratt - Error attention polling timer timeout handler
2856  * @ptr: Pointer to address of HBA context object.
2857  *
2858  * This function is invoked by the Error Attention polling timer when the
2859  * timer times out. It will check the SLI Error Attention register for
2860  * possible attention events. If so, it will post an Error Attention event
2861  * and wake up worker thread to process it. Otherwise, it will set up the
2862  * Error Attention polling timer for the next poll.
2863  **/
2864 void lpfc_poll_eratt(unsigned long ptr)
2865 {
2866         struct lpfc_hba *phba;
2867         uint32_t eratt = 0, rem;
2868         uint64_t sli_intr, cnt;
2869
2870         phba = (struct lpfc_hba *)ptr;
2871
2872         /* Here we will also keep track of interrupts per sec of the hba */
2873         sli_intr = phba->sli.slistat.sli_intr;
2874
2875         if (phba->sli.slistat.sli_prev_intr > sli_intr)
2876                 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
2877                         sli_intr);
2878         else
2879                 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
2880
2881         /* 64-bit integer division not supporte on 32-bit x86 - use do_div */
2882         rem = do_div(cnt, LPFC_ERATT_POLL_INTERVAL);
2883         phba->sli.slistat.sli_ips = cnt;
2884
2885         phba->sli.slistat.sli_prev_intr = sli_intr;
2886
2887         /* Check chip HA register for error event */
2888         eratt = lpfc_sli_check_eratt(phba);
2889
2890         if (eratt)
2891                 /* Tell the worker thread there is work to do */
2892                 lpfc_worker_wake_up(phba);
2893         else
2894                 /* Restart the timer for next eratt poll */
2895                 mod_timer(&phba->eratt_poll, jiffies +
2896                                         HZ * LPFC_ERATT_POLL_INTERVAL);
2897         return;
2898 }
2899
2900
2901 /**
2902  * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
2903  * @phba: Pointer to HBA context object.
2904  * @pring: Pointer to driver SLI ring object.
2905  * @mask: Host attention register mask for this ring.
2906  *
2907  * This function is called from the interrupt context when there is a ring
2908  * event for the fcp ring. The caller does not hold any lock.
2909  * The function processes each response iocb in the response ring until it
2910  * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
2911  * LE bit set. The function will call the completion handler of the command iocb
2912  * if the response iocb indicates a completion for a command iocb or it is
2913  * an abort completion. The function will call lpfc_sli_process_unsol_iocb
2914  * function if this is an unsolicited iocb.
2915  * This routine presumes LPFC_FCP_RING handling and doesn't bother
2916  * to check it explicitly.
2917  */
2918 int
2919 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2920                                 struct lpfc_sli_ring *pring, uint32_t mask)
2921 {
2922         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2923         IOCB_t *irsp = NULL;
2924         IOCB_t *entry = NULL;
2925         struct lpfc_iocbq *cmdiocbq = NULL;
2926         struct lpfc_iocbq rspiocbq;
2927         uint32_t status;
2928         uint32_t portRspPut, portRspMax;
2929         int rc = 1;
2930         lpfc_iocb_type type;
2931         unsigned long iflag;
2932         uint32_t rsp_cmpl = 0;
2933
2934         spin_lock_irqsave(&phba->hbalock, iflag);
2935         pring->stats.iocb_event++;
2936
2937         /*
2938          * The next available response entry should never exceed the maximum
2939          * entries.  If it does, treat it as an adapter hardware error.
2940          */
2941         portRspMax = pring->sli.sli3.numRiocb;
2942         portRspPut = le32_to_cpu(pgp->rspPutInx);
2943         if (unlikely(portRspPut >= portRspMax)) {
2944                 lpfc_sli_rsp_pointers_error(phba, pring);
2945                 spin_unlock_irqrestore(&phba->hbalock, iflag);
2946                 return 1;
2947         }
2948         if (phba->fcp_ring_in_use) {
2949                 spin_unlock_irqrestore(&phba->hbalock, iflag);
2950                 return 1;
2951         } else
2952                 phba->fcp_ring_in_use = 1;
2953
2954         rmb();
2955         while (pring->sli.sli3.rspidx != portRspPut) {
2956                 /*
2957                  * Fetch an entry off the ring and copy it into a local data
2958                  * structure.  The copy involves a byte-swap since the
2959                  * network byte order and pci byte orders are different.
2960                  */
2961                 entry = lpfc_resp_iocb(phba, pring);
2962                 phba->last_completion_time = jiffies;
2963
2964                 if (++pring->sli.sli3.rspidx >= portRspMax)
2965                         pring->sli.sli3.rspidx = 0;
2966
2967                 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
2968                                       (uint32_t *) &rspiocbq.iocb,
2969                                       phba->iocb_rsp_size);
2970                 INIT_LIST_HEAD(&(rspiocbq.list));
2971                 irsp = &rspiocbq.iocb;
2972
2973                 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
2974                 pring->stats.iocb_rsp++;
2975                 rsp_cmpl++;
2976
2977                 if (unlikely(irsp->ulpStatus)) {
2978                         /*
2979                          * If resource errors reported from HBA, reduce
2980                          * queuedepths of the SCSI device.
2981                          */
2982                         if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2983                             ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
2984                              IOERR_NO_RESOURCES)) {
2985                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
2986                                 phba->lpfc_rampdown_queue_depth(phba);
2987                                 spin_lock_irqsave(&phba->hbalock, iflag);
2988                         }
2989
2990                         /* Rsp ring <ringno> error: IOCB */
2991                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2992                                         "0336 Rsp Ring %d error: IOCB Data: "
2993                                         "x%x x%x x%x x%x x%x x%x x%x x%x\n",
2994                                         pring->ringno,
2995                                         irsp->un.ulpWord[0],
2996                                         irsp->un.ulpWord[1],
2997                                         irsp->un.ulpWord[2],
2998                                         irsp->un.ulpWord[3],
2999                                         irsp->un.ulpWord[4],
3000                                         irsp->un.ulpWord[5],
3001                                         *(uint32_t *)&irsp->un1,
3002                                         *((uint32_t *)&irsp->un1 + 1));
3003                 }
3004
3005                 switch (type) {
3006                 case LPFC_ABORT_IOCB:
3007                 case LPFC_SOL_IOCB:
3008                         /*
3009                          * Idle exchange closed via ABTS from port.  No iocb
3010                          * resources need to be recovered.
3011                          */
3012                         if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3013                                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3014                                                 "0333 IOCB cmd 0x%x"
3015                                                 " processed. Skipping"
3016                                                 " completion\n",
3017                                                 irsp->ulpCommand);
3018                                 break;
3019                         }
3020
3021                         cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3022                                                          &rspiocbq);
3023                         if (unlikely(!cmdiocbq))
3024                                 break;
3025                         if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3026                                 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3027                         if (cmdiocbq->iocb_cmpl) {
3028                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3029                                 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3030                                                       &rspiocbq);
3031                                 spin_lock_irqsave(&phba->hbalock, iflag);
3032                         }
3033                         break;
3034                 case LPFC_UNSOL_IOCB:
3035                         spin_unlock_irqrestore(&phba->hbalock, iflag);
3036                         lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
3037                         spin_lock_irqsave(&phba->hbalock, iflag);
3038                         break;
3039                 default:
3040                         if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3041                                 char adaptermsg[LPFC_MAX_ADPTMSG];
3042                                 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3043                                 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3044                                        MAX_MSG_DATA);
3045                                 dev_warn(&((phba->pcidev)->dev),
3046                                          "lpfc%d: %s\n",
3047                                          phba->brd_no, adaptermsg);
3048                         } else {
3049                                 /* Unknown IOCB command */
3050                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3051                                                 "0334 Unknown IOCB command "
3052                                                 "Data: x%x, x%x x%x x%x x%x\n",
3053                                                 type, irsp->ulpCommand,
3054                                                 irsp->ulpStatus,
3055                                                 irsp->ulpIoTag,
3056                                                 irsp->ulpContext);
3057                         }
3058                         break;
3059                 }
3060
3061                 /*
3062                  * The response IOCB has been processed.  Update the ring
3063                  * pointer in SLIM.  If the port response put pointer has not
3064                  * been updated, sync the pgp->rspPutInx and fetch the new port
3065                  * response put pointer.
3066                  */
3067                 writel(pring->sli.sli3.rspidx,
3068                         &phba->host_gp[pring->ringno].rspGetInx);
3069
3070                 if (pring->sli.sli3.rspidx == portRspPut)
3071                         portRspPut = le32_to_cpu(pgp->rspPutInx);
3072         }
3073
3074         if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3075                 pring->stats.iocb_rsp_full++;
3076                 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3077                 writel(status, phba->CAregaddr);
3078                 readl(phba->CAregaddr);
3079         }
3080         if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3081                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3082                 pring->stats.iocb_cmd_empty++;
3083
3084                 /* Force update of the local copy of cmdGetInx */
3085                 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3086                 lpfc_sli_resume_iocb(phba, pring);
3087
3088                 if ((pring->lpfc_sli_cmd_available))
3089                         (pring->lpfc_sli_cmd_available) (phba, pring);
3090
3091         }
3092
3093         phba->fcp_ring_in_use = 0;
3094         spin_unlock_irqrestore(&phba->hbalock, iflag);
3095         return rc;
3096 }
3097
3098 /**
3099  * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3100  * @phba: Pointer to HBA context object.
3101  * @pring: Pointer to driver SLI ring object.
3102  * @rspiocbp: Pointer to driver response IOCB object.
3103  *
3104  * This function is called from the worker thread when there is a slow-path
3105  * response IOCB to process. This function chains all the response iocbs until
3106  * seeing the iocb with the LE bit set. The function will call
3107  * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3108  * completion of a command iocb. The function will call the
3109  * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3110  * The function frees the resources or calls the completion handler if this
3111  * iocb is an abort completion. The function returns NULL when the response
3112  * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3113  * this function shall chain the iocb on to the iocb_continueq and return the
3114  * response iocb passed in.
3115  **/
3116 static struct lpfc_iocbq *
3117 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3118                         struct lpfc_iocbq *rspiocbp)
3119 {
3120         struct lpfc_iocbq *saveq;
3121         struct lpfc_iocbq *cmdiocbp;
3122         struct lpfc_iocbq *next_iocb;
3123         IOCB_t *irsp = NULL;
3124         uint32_t free_saveq;
3125         uint8_t iocb_cmd_type;
3126         lpfc_iocb_type type;
3127         unsigned long iflag;
3128         int rc;
3129
3130         spin_lock_irqsave(&phba->hbalock, iflag);
3131         /* First add the response iocb to the countinueq list */
3132         list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3133         pring->iocb_continueq_cnt++;
3134
3135         /* Now, determine whether the list is completed for processing */
3136         irsp = &rspiocbp->iocb;
3137         if (irsp->ulpLe) {
3138                 /*
3139                  * By default, the driver expects to free all resources
3140                  * associated with this iocb completion.
3141                  */
3142                 free_saveq = 1;
3143                 saveq = list_get_first(&pring->iocb_continueq,
3144                                        struct lpfc_iocbq, list);
3145                 irsp = &(saveq->iocb);
3146                 list_del_init(&pring->iocb_continueq);
3147                 pring->iocb_continueq_cnt = 0;
3148
3149                 pring->stats.iocb_rsp++;
3150
3151                 /*
3152                  * If resource errors reported from HBA, reduce
3153                  * queuedepths of the SCSI device.
3154                  */
3155                 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3156                     ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3157                      IOERR_NO_RESOURCES)) {
3158                         spin_unlock_irqrestore(&phba->hbalock, iflag);
3159                         phba->lpfc_rampdown_queue_depth(phba);
3160                         spin_lock_irqsave(&phba->hbalock, iflag);
3161                 }
3162
3163                 if (irsp->ulpStatus) {
3164                         /* Rsp ring <ringno> error: IOCB */
3165                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3166                                         "0328 Rsp Ring %d error: "
3167                                         "IOCB Data: "
3168                                         "x%x x%x x%x x%x "
3169                                         "x%x x%x x%x x%x "
3170                                         "x%x x%x x%x x%x "
3171                                         "x%x x%x x%x x%x\n",
3172                                         pring->ringno,
3173                                         irsp->un.ulpWord[0],
3174                                         irsp->un.ulpWord[1],
3175                                         irsp->un.ulpWord[2],
3176                                         irsp->un.ulpWord[3],
3177                                         irsp->un.ulpWord[4],
3178                                         irsp->un.ulpWord[5],
3179                                         *(((uint32_t *) irsp) + 6),
3180                                         *(((uint32_t *) irsp) + 7),
3181                                         *(((uint32_t *) irsp) + 8),
3182                                         *(((uint32_t *) irsp) + 9),
3183                                         *(((uint32_t *) irsp) + 10),
3184                                         *(((uint32_t *) irsp) + 11),
3185                                         *(((uint32_t *) irsp) + 12),
3186                                         *(((uint32_t *) irsp) + 13),
3187                                         *(((uint32_t *) irsp) + 14),
3188                                         *(((uint32_t *) irsp) + 15));
3189                 }
3190
3191                 /*
3192                  * Fetch the IOCB command type and call the correct completion
3193                  * routine. Solicited and Unsolicited IOCBs on the ELS ring
3194                  * get freed back to the lpfc_iocb_list by the discovery
3195                  * kernel thread.
3196                  */
3197                 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3198                 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3199                 switch (type) {
3200                 case LPFC_SOL_IOCB:
3201                         spin_unlock_irqrestore(&phba->hbalock, iflag);
3202                         rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3203                         spin_lock_irqsave(&phba->hbalock, iflag);
3204                         break;
3205
3206                 case LPFC_UNSOL_IOCB:
3207                         spin_unlock_irqrestore(&phba->hbalock, iflag);
3208                         rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3209                         spin_lock_irqsave(&phba->hbalock, iflag);
3210                         if (!rc)
3211                                 free_saveq = 0;
3212                         break;
3213
3214                 case LPFC_ABORT_IOCB:
3215                         cmdiocbp = NULL;
3216                         if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
3217                                 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3218                                                                  saveq);
3219                         if (cmdiocbp) {
3220                                 /* Call the specified completion routine */
3221                                 if (cmdiocbp->iocb_cmpl) {
3222                                         spin_unlock_irqrestore(&phba->hbalock,
3223                                                                iflag);
3224                                         (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3225                                                               saveq);
3226                                         spin_lock_irqsave(&phba->hbalock,
3227                                                           iflag);
3228                                 } else
3229                                         __lpfc_sli_release_iocbq(phba,
3230                                                                  cmdiocbp);
3231                         }
3232                         break;
3233
3234                 case LPFC_UNKNOWN_IOCB:
3235                         if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3236                                 char adaptermsg[LPFC_MAX_ADPTMSG];
3237                                 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3238                                 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3239                                        MAX_MSG_DATA);
3240                                 dev_warn(&((phba->pcidev)->dev),
3241                                          "lpfc%d: %s\n",
3242                                          phba->brd_no, adaptermsg);
3243                         } else {
3244                                 /* Unknown IOCB command */
3245                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3246                                                 "0335 Unknown IOCB "
3247                                                 "command Data: x%x "
3248                                                 "x%x x%x x%x\n",
3249                                                 irsp->ulpCommand,
3250                                                 irsp->ulpStatus,
3251                                                 irsp->ulpIoTag,
3252                                                 irsp->ulpContext);
3253                         }
3254                         break;
3255                 }
3256
3257                 if (free_saveq) {
3258                         list_for_each_entry_safe(rspiocbp, next_iocb,
3259                                                  &saveq->list, list) {
3260                                 list_del(&rspiocbp->list);
3261                                 __lpfc_sli_release_iocbq(phba, rspiocbp);
3262                         }
3263                         __lpfc_sli_release_iocbq(phba, saveq);
3264                 }
3265                 rspiocbp = NULL;
3266         }
3267         spin_unlock_irqrestore(&phba->hbalock, iflag);
3268         return rspiocbp;
3269 }
3270
3271 /**
3272  * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
3273  * @phba: Pointer to HBA context object.
3274  * @pring: Pointer to driver SLI ring object.
3275  * @mask: Host attention register mask for this ring.
3276  *
3277  * This routine wraps the actual slow_ring event process routine from the
3278  * API jump table function pointer from the lpfc_hba struct.
3279  **/
3280 void
3281 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3282                                 struct lpfc_sli_ring *pring, uint32_t mask)
3283 {
3284         phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3285 }
3286
3287 /**
3288  * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3289  * @phba: Pointer to HBA context object.
3290  * @pring: Pointer to driver SLI ring object.
3291  * @mask: Host attention register mask for this ring.
3292  *
3293  * This function is called from the worker thread when there is a ring event
3294  * for non-fcp rings. The caller does not hold any lock. The function will
3295  * remove each response iocb in the response ring and calls the handle
3296  * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3297  **/
3298 static void
3299 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3300                                    struct lpfc_sli_ring *pring, uint32_t mask)
3301 {
3302         struct lpfc_pgp *pgp;
3303         IOCB_t *entry;
3304         IOCB_t *irsp = NULL;
3305         struct lpfc_iocbq *rspiocbp = NULL;
3306         uint32_t portRspPut, portRspMax;
3307         unsigned long iflag;
3308         uint32_t status;
3309
3310         pgp = &phba->port_gp[pring->ringno];
3311         spin_lock_irqsave(&phba->hbalock, iflag);
3312         pring->stats.iocb_event++;
3313
3314         /*
3315          * The next available response entry should never exceed the maximum
3316          * entries.  If it does, treat it as an adapter hardware error.
3317          */
3318         portRspMax = pring->sli.sli3.numRiocb;
3319         portRspPut = le32_to_cpu(pgp->rspPutInx);
3320         if (portRspPut >= portRspMax) {
3321                 /*
3322                  * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3323                  * rsp ring <portRspMax>
3324                  */
3325                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3326                                 "0303 Ring %d handler: portRspPut %d "
3327                                 "is bigger than rsp ring %d\n",
3328                                 pring->ringno, portRspPut, portRspMax);
3329
3330                 phba->link_state = LPFC_HBA_ERROR;
3331                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3332
3333                 phba->work_hs = HS_FFER3;
3334                 lpfc_handle_eratt(phba);
3335
3336                 return;
3337         }
3338
3339         rmb();
3340         while (pring->sli.sli3.rspidx != portRspPut) {
3341                 /*
3342                  * Build a completion list and call the appropriate handler.
3343                  * The process is to get the next available response iocb, get
3344                  * a free iocb from the list, copy the response data into the
3345                  * free iocb, insert to the continuation list, and update the
3346                  * next response index to slim.  This process makes response
3347                  * iocb's in the ring available to DMA as fast as possible but
3348                  * pays a penalty for a copy operation.  Since the iocb is
3349                  * only 32 bytes, this penalty is considered small relative to
3350                  * the PCI reads for register values and a slim write.  When
3351                  * the ulpLe field is set, the entire Command has been
3352                  * received.
3353                  */
3354                 entry = lpfc_resp_iocb(phba, pring);
3355
3356                 phba->last_completion_time = jiffies;
3357                 rspiocbp = __lpfc_sli_get_iocbq(phba);
3358                 if (rspiocbp == NULL) {
3359                         printk(KERN_ERR "%s: out of buffers! Failing "
3360                                "completion.\n", __func__);
3361                         break;
3362                 }
3363
3364                 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3365                                       phba->iocb_rsp_size);
3366                 irsp = &rspiocbp->iocb;
3367
3368                 if (++pring->sli.sli3.rspidx >= portRspMax)
3369                         pring->sli.sli3.rspidx = 0;
3370
3371                 if (pring->ringno == LPFC_ELS_RING) {
3372                         lpfc_debugfs_slow_ring_trc(phba,
3373                         "IOCB rsp ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
3374                                 *(((uint32_t *) irsp) + 4),
3375                                 *(((uint32_t *) irsp) + 6),
3376                                 *(((uint32_t *) irsp) + 7));
3377                 }
3378
3379                 writel(pring->sli.sli3.rspidx,
3380                         &phba->host_gp[pring->ringno].rspGetInx);
3381
3382                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3383                 /* Handle the response IOCB */
3384                 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3385                 spin_lock_irqsave(&phba->hbalock, iflag);
3386
3387                 /*
3388                  * If the port response put pointer has not been updated, sync
3389                  * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3390                  * response put pointer.
3391                  */
3392                 if (pring->sli.sli3.rspidx == portRspPut) {
3393                         portRspPut = le32_to_cpu(pgp->rspPutInx);
3394                 }
3395         } /* while (pring->sli.sli3.rspidx != portRspPut) */
3396
3397         if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
3398                 /* At least one response entry has been freed */
3399                 pring->stats.iocb_rsp_full++;
3400                 /* SET RxRE_RSP in Chip Att register */
3401                 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3402                 writel(status, phba->CAregaddr);
3403                 readl(phba->CAregaddr); /* flush */
3404         }
3405         if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3406                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3407                 pring->stats.iocb_cmd_empty++;
3408
3409                 /* Force update of the local copy of cmdGetInx */
3410                 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3411                 lpfc_sli_resume_iocb(phba, pring);
3412
3413                 if ((pring->lpfc_sli_cmd_available))
3414                         (pring->lpfc_sli_cmd_available) (phba, pring);
3415
3416         }
3417
3418         spin_unlock_irqrestore(&phba->hbalock, iflag);
3419         return;
3420 }
3421
3422 /**
3423  * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3424  * @phba: Pointer to HBA context object.
3425  * @pring: Pointer to driver SLI ring object.
3426  * @mask: Host attention register mask for this ring.
3427  *
3428  * This function is called from the worker thread when there is a pending
3429  * ELS response iocb on the driver internal slow-path response iocb worker
3430  * queue. The caller does not hold any lock. The function will remove each
3431  * response iocb from the response worker queue and calls the handle
3432  * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3433  **/
3434 static void
3435 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3436                                    struct lpfc_sli_ring *pring, uint32_t mask)
3437 {
3438         struct lpfc_iocbq *irspiocbq;
3439         struct hbq_dmabuf *dmabuf;
3440         struct lpfc_cq_event *cq_event;
3441         unsigned long iflag;
3442
3443         spin_lock_irqsave(&phba->hbalock, iflag);
3444         phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3445         spin_unlock_irqrestore(&phba->hbalock, iflag);
3446         while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
3447                 /* Get the response iocb from the head of work queue */
3448                 spin_lock_irqsave(&phba->hbalock, iflag);
3449                 list_remove_head(&phba->sli4_hba.sp_queue_event,
3450                                  cq_event, struct lpfc_cq_event, list);
3451                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3452
3453                 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3454                 case CQE_CODE_COMPL_WQE:
3455                         irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3456                                                  cq_event);
3457                         /* Translate ELS WCQE to response IOCBQ */
3458                         irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3459                                                                    irspiocbq);
3460                         if (irspiocbq)
3461                                 lpfc_sli_sp_handle_rspiocb(phba, pring,
3462                                                            irspiocbq);
3463                         break;
3464                 case CQE_CODE_RECEIVE:
3465                 case CQE_CODE_RECEIVE_V1:
3466                         dmabuf = container_of(cq_event, struct hbq_dmabuf,
3467                                               cq_event);
3468                         lpfc_sli4_handle_received_buffer(phba, dmabuf);
3469                         break;
3470                 default:
3471                         break;
3472                 }
3473         }
3474 }
3475
3476 /**
3477  * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
3478  * @phba: Pointer to HBA context object.
3479  * @pring: Pointer to driver SLI ring object.
3480  *
3481  * This function aborts all iocbs in the given ring and frees all the iocb
3482  * objects in txq. This function issues an abort iocb for all the iocb commands
3483  * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3484  * the return of this function. The caller is not required to hold any locks.
3485  **/
3486 void
3487 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3488 {
3489         LIST_HEAD(completions);
3490         struct lpfc_iocbq *iocb, *next_iocb;
3491
3492         if (pring->ringno == LPFC_ELS_RING) {
3493                 lpfc_fabric_abort_hba(phba);
3494         }
3495
3496         /* Error everything on txq and txcmplq
3497          * First do the txq.
3498          */
3499         spin_lock_irq(&phba->hbalock);
3500         list_splice_init(&pring->txq, &completions);
3501         pring->txq_cnt = 0;
3502
3503         /* Next issue ABTS for everything on the txcmplq */
3504         list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3505                 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3506
3507         spin_unlock_irq(&phba->hbalock);
3508
3509         /* Cancel all the IOCBs from the completions list */
3510         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3511                               IOERR_SLI_ABORTED);
3512 }
3513
3514 /**
3515  * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
3516  * @phba: Pointer to HBA context object.
3517  *
3518  * This function flushes all iocbs in the fcp ring and frees all the iocb
3519  * objects in txq and txcmplq. This function will not issue abort iocbs
3520  * for all the iocb commands in txcmplq, they will just be returned with
3521  * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3522  * slot has been permanently disabled.
3523  **/
3524 void
3525 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
3526 {
3527         LIST_HEAD(txq);
3528         LIST_HEAD(txcmplq);
3529         struct lpfc_sli *psli = &phba->sli;
3530         struct lpfc_sli_ring  *pring;
3531
3532         /* Currently, only one fcp ring */
3533         pring = &psli->ring[psli->fcp_ring];
3534
3535         spin_lock_irq(&phba->hbalock);
3536         /* Retrieve everything on txq */
3537         list_splice_init(&pring->txq, &txq);
3538         pring->txq_cnt = 0;
3539
3540         /* Retrieve everything on the txcmplq */
3541         list_splice_init(&pring->txcmplq, &txcmplq);
3542         pring->txcmplq_cnt = 0;
3543
3544         /* Indicate the I/O queues are flushed */
3545         phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
3546         spin_unlock_irq(&phba->hbalock);
3547
3548         /* Flush the txq */
3549         lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
3550                               IOERR_SLI_DOWN);
3551
3552         /* Flush the txcmpq */
3553         lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
3554                               IOERR_SLI_DOWN);
3555 }
3556
3557 /**
3558  * lpfc_sli_brdready_s3 - Check for sli3 host ready status
3559  * @phba: Pointer to HBA context object.
3560  * @mask: Bit mask to be checked.
3561  *
3562  * This function reads the host status register and compares
3563  * with the provided bit mask to check if HBA completed
3564  * the restart. This function will wait in a loop for the
3565  * HBA to complete restart. If the HBA does not restart within
3566  * 15 iterations, the function will reset the HBA again. The
3567  * function returns 1 when HBA fail to restart otherwise returns
3568  * zero.
3569  **/
3570 static int
3571 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
3572 {
3573         uint32_t status;
3574         int i = 0;
3575         int retval = 0;
3576
3577         /* Read the HBA Host Status Register */
3578         if (lpfc_readl(phba->HSregaddr, &status))
3579                 return 1;
3580
3581         /*
3582          * Check status register every 100ms for 5 retries, then every
3583          * 500ms for 5, then every 2.5 sec for 5, then reset board and
3584          * every 2.5 sec for 4.
3585          * Break our of the loop if errors occurred during init.
3586          */
3587         while (((status & mask) != mask) &&
3588                !(status & HS_FFERM) &&
3589                i++ < 20) {
3590
3591                 if (i <= 5)
3592                         msleep(10);
3593                 else if (i <= 10)
3594                         msleep(500);
3595                 else
3596                         msleep(2500);
3597
3598                 if (i == 15) {
3599                                 /* Do post */
3600                         phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3601                         lpfc_sli_brdrestart(phba);
3602                 }
3603                 /* Read the HBA Host Status Register */
3604                 if (lpfc_readl(phba->HSregaddr, &status)) {
3605                         retval = 1;
3606                         break;
3607                 }
3608         }
3609
3610         /* Check to see if any errors occurred during init */
3611         if ((status & HS_FFERM) || (i >= 20)) {
3612                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3613                                 "2751 Adapter failed to restart, "
3614                                 "status reg x%x, FW Data: A8 x%x AC x%x\n",
3615                                 status,
3616                                 readl(phba->MBslimaddr + 0xa8),
3617                                 readl(phba->MBslimaddr + 0xac));
3618                 phba->link_state = LPFC_HBA_ERROR;
3619                 retval = 1;
3620         }
3621
3622         return retval;
3623 }
3624
3625 /**
3626  * lpfc_sli_brdready_s4 - Check for sli4 host ready status
3627  * @phba: Pointer to HBA context object.
3628  * @mask: Bit mask to be checked.
3629  *
3630  * This function checks the host status register to check if HBA is
3631  * ready. This function will wait in a loop for the HBA to be ready
3632  * If the HBA is not ready , the function will will reset the HBA PCI
3633  * function again. The function returns 1 when HBA fail to be ready
3634  * otherwise returns zero.
3635  **/
3636 static int
3637 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
3638 {
3639         uint32_t status;
3640         int retval = 0;
3641
3642         /* Read the HBA Host Status Register */
3643         status = lpfc_sli4_post_status_check(phba);
3644
3645         if (status) {
3646                 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3647                 lpfc_sli_brdrestart(phba);
3648                 status = lpfc_sli4_post_status_check(phba);
3649         }
3650
3651         /* Check to see if any errors occurred during init */
3652         if (status) {
3653                 phba->link_state = LPFC_HBA_ERROR;
3654                 retval = 1;
3655         } else
3656                 phba->sli4_hba.intr_enable = 0;
3657
3658         return retval;
3659 }
3660
3661 /**
3662  * lpfc_sli_brdready - Wrapper func for checking the hba readyness
3663  * @phba: Pointer to HBA context object.
3664  * @mask: Bit mask to be checked.
3665  *
3666  * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
3667  * from the API jump table function pointer from the lpfc_hba struct.
3668  **/
3669 int
3670 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
3671 {
3672         return phba->lpfc_sli_brdready(phba, mask);
3673 }
3674
3675 #define BARRIER_TEST_PATTERN (0xdeadbeef)
3676
3677 /**
3678  * lpfc_reset_barrier - Make HBA ready for HBA reset
3679  * @phba: Pointer to HBA context object.
3680  *
3681  * This function is called before resetting an HBA. This function is called
3682  * with hbalock held and requests HBA to quiesce DMAs before a reset.
3683  **/
3684 void lpfc_reset_barrier(struct lpfc_hba *phba)
3685 {
3686         uint32_t __iomem *resp_buf;
3687         uint32_t __iomem *mbox_buf;
3688         volatile uint32_t mbox;
3689         uint32_t hc_copy, ha_copy, resp_data;
3690         int  i;
3691         uint8_t hdrtype;
3692
3693         pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
3694         if (hdrtype != 0x80 ||
3695             (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
3696              FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
3697                 return;
3698
3699         /*
3700          * Tell the other part of the chip to suspend temporarily all
3701          * its DMA activity.
3702          */
3703         resp_buf = phba->MBslimaddr;
3704
3705         /* Disable the error attention */
3706         if (lpfc_readl(phba->HCregaddr, &hc_copy))
3707                 return;
3708         writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
3709         readl(phba->HCregaddr); /* flush */
3710         phba->link_flag |= LS_IGNORE_ERATT;
3711
3712         if (lpfc_readl(phba->HAregaddr, &ha_copy))
3713                 return;
3714         if (ha_copy & HA_ERATT) {
3715                 /* Clear Chip error bit */
3716                 writel(HA_ERATT, phba->HAregaddr);
3717                 phba->pport->stopped = 1;
3718         }
3719
3720         mbox = 0;
3721         ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
3722         ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
3723
3724         writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
3725         mbox_buf = phba->MBslimaddr;
3726         writel(mbox, mbox_buf);
3727
3728         for (i = 0; i < 50; i++) {
3729                 if (lpfc_readl((resp_buf + 1), &resp_data))
3730                         return;
3731                 if (resp_data != ~(BARRIER_TEST_PATTERN))
3732                         mdelay(1);
3733                 else
3734                         break;
3735         }
3736         resp_data = 0;
3737         if (lpfc_readl((resp_buf + 1), &resp_data))
3738                 return;
3739         if (resp_data  != ~(BARRIER_TEST_PATTERN)) {
3740                 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
3741                     phba->pport->stopped)
3742                         goto restore_hc;
3743                 else
3744                         goto clear_errat;
3745         }
3746
3747         ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
3748         resp_data = 0;
3749         for (i = 0; i < 500; i++) {
3750                 if (lpfc_readl(resp_buf, &resp_data))
3751                         return;
3752                 if (resp_data != mbox)
3753                         mdelay(1);
3754                 else
3755                         break;
3756         }
3757
3758 clear_errat:
3759
3760         while (++i < 500) {
3761                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3762                         return;
3763                 if (!(ha_copy & HA_ERATT))
3764                         mdelay(1);
3765                 else
3766                         break;
3767         }
3768
3769         if (readl(phba->HAregaddr) & HA_ERATT) {
3770                 writel(HA_ERATT, phba->HAregaddr);
3771                 phba->pport->stopped = 1;
3772         }
3773
3774 restore_hc:
3775         phba->link_flag &= ~LS_IGNORE_ERATT;
3776         writel(hc_copy, phba->HCregaddr);
3777         readl(phba->HCregaddr); /* flush */
3778 }
3779
3780 /**
3781  * lpfc_sli_brdkill - Issue a kill_board mailbox command
3782  * @phba: Pointer to HBA context object.
3783  *
3784  * This function issues a kill_board mailbox command and waits for
3785  * the error attention interrupt. This function is called for stopping
3786  * the firmware processing. The caller is not required to hold any
3787  * locks. This function calls lpfc_hba_down_post function to free
3788  * any pending commands after the kill. The function will return 1 when it
3789  * fails to kill the board else will return 0.
3790  **/
3791 int
3792 lpfc_sli_brdkill(struct lpfc_hba *phba)
3793 {
3794         struct lpfc_sli *psli;
3795         LPFC_MBOXQ_t *pmb;
3796         uint32_t status;
3797         uint32_t ha_copy;
3798         int retval;
3799         int i = 0;
3800
3801         psli = &phba->sli;
3802
3803         /* Kill HBA */
3804         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3805                         "0329 Kill HBA Data: x%x x%x\n",
3806                         phba->pport->port_state, psli->sli_flag);
3807
3808         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3809         if (!pmb)
3810                 return 1;
3811
3812         /* Disable the error attention */
3813         spin_lock_irq(&phba->hbalock);
3814         if (lpfc_readl(phba->HCregaddr, &status)) {
3815                 spin_unlock_irq(&phba->hbalock);
3816                 mempool_free(pmb, phba->mbox_mem_pool);
3817                 return 1;
3818         }
3819         status &= ~HC_ERINT_ENA;
3820         writel(status, phba->HCregaddr);
3821         readl(phba->HCregaddr); /* flush */
3822         phba->link_flag |= LS_IGNORE_ERATT;
3823         spin_unlock_irq(&phba->hbalock);
3824
3825         lpfc_kill_board(phba, pmb);
3826         pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3827         retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3828
3829         if (retval != MBX_SUCCESS) {
3830                 if (retval != MBX_BUSY)
3831                         mempool_free(pmb, phba->mbox_mem_pool);
3832                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3833                                 "2752 KILL_BOARD command failed retval %d\n",
3834                                 retval);
3835                 spin_lock_irq(&phba->hbalock);
3836                 phba->link_flag &= ~LS_IGNORE_ERATT;
3837                 spin_unlock_irq(&phba->hbalock);
3838                 return 1;
3839         }
3840
3841         spin_lock_irq(&phba->hbalock);
3842         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3843         spin_unlock_irq(&phba->hbalock);
3844
3845         mempool_free(pmb, phba->mbox_mem_pool);
3846
3847         /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
3848          * attention every 100ms for 3 seconds. If we don't get ERATT after
3849          * 3 seconds we still set HBA_ERROR state because the status of the
3850          * board is now undefined.
3851          */
3852         if (lpfc_readl(phba->HAregaddr, &ha_copy))
3853                 return 1;
3854         while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
3855                 mdelay(100);
3856                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3857                         return 1;
3858         }
3859
3860         del_timer_sync(&psli->mbox_tmo);
3861         if (ha_copy & HA_ERATT) {
3862                 writel(HA_ERATT, phba->HAregaddr);
3863                 phba->pport->stopped = 1;
3864         }
3865         spin_lock_irq(&phba->hbalock);
3866         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3867         psli->mbox_active = NULL;
3868         phba->link_flag &= ~LS_IGNORE_ERATT;
3869         spin_unlock_irq(&phba->hbalock);
3870
3871         lpfc_hba_down_post(phba);
3872         phba->link_state = LPFC_HBA_ERROR;
3873
3874         return ha_copy & HA_ERATT ? 0 : 1;
3875 }
3876
3877 /**
3878  * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
3879  * @phba: Pointer to HBA context object.
3880  *
3881  * This function resets the HBA by writing HC_INITFF to the control
3882  * register. After the HBA resets, this function resets all the iocb ring
3883  * indices. This function disables PCI layer parity checking during
3884  * the reset.
3885  * This function returns 0 always.
3886  * The caller is not required to hold any locks.
3887  **/
3888 int
3889 lpfc_sli_brdreset(struct lpfc_hba *phba)
3890 {
3891         struct lpfc_sli *psli;
3892         struct lpfc_sli_ring *pring;
3893         uint16_t cfg_value;
3894         int i;
3895
3896         psli = &phba->sli;
3897
3898         /* Reset HBA */
3899         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3900                         "0325 Reset HBA Data: x%x x%x\n",
3901                         phba->pport->port_state, psli->sli_flag);
3902
3903         /* perform board reset */
3904         phba->fc_eventTag = 0;
3905         phba->link_events = 0;
3906         phba->pport->fc_myDID = 0;
3907         phba->pport->fc_prevDID = 0;
3908
3909         /* Turn off parity checking and serr during the physical reset */
3910         pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3911         pci_write_config_word(phba->pcidev, PCI_COMMAND,
3912                               (cfg_value &
3913                                ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3914
3915         psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
3916
3917         /* Now toggle INITFF bit in the Host Control Register */
3918         writel(HC_INITFF, phba->HCregaddr);
3919         mdelay(1);
3920         readl(phba->HCregaddr); /* flush */
3921         writel(0, phba->HCregaddr);
3922         readl(phba->HCregaddr); /* flush */
3923
3924         /* Restore PCI cmd register */
3925         pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
3926
3927         /* Initialize relevant SLI info */
3928         for (i = 0; i < psli->num_rings; i++) {
3929                 pring = &psli->ring[i];
3930                 pring->flag = 0;
3931                 pring->sli.sli3.rspidx = 0;
3932                 pring->sli.sli3.next_cmdidx  = 0;
3933                 pring->sli.sli3.local_getidx = 0;
3934                 pring->sli.sli3.cmdidx = 0;
3935                 pring->missbufcnt = 0;
3936         }
3937
3938         phba->link_state = LPFC_WARM_START;
3939         return 0;
3940 }
3941
3942 /**
3943  * lpfc_sli4_brdreset - Reset a sli-4 HBA
3944  * @phba: Pointer to HBA context object.
3945  *
3946  * This function resets a SLI4 HBA. This function disables PCI layer parity
3947  * checking during resets the device. The caller is not required to hold
3948  * any locks.
3949  *
3950  * This function returns 0 always.
3951  **/
3952 int
3953 lpfc_sli4_brdreset(struct lpfc_hba *phba)
3954 {
3955         struct lpfc_sli *psli = &phba->sli;
3956         uint16_t cfg_value;
3957         int rc;
3958
3959         /* Reset HBA */
3960         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3961                         "0295 Reset HBA Data: x%x x%x\n",
3962                         phba->pport->port_state, psli->sli_flag);
3963
3964         /* perform board reset */
3965         phba->fc_eventTag = 0;
3966         phba->link_events = 0;
3967         phba->pport->fc_myDID = 0;
3968         phba->pport->fc_prevDID = 0;
3969
3970         spin_lock_irq(&phba->hbalock);
3971         psli->sli_flag &= ~(LPFC_PROCESS_LA);
3972         phba->fcf.fcf_flag = 0;
3973         spin_unlock_irq(&phba->hbalock);
3974
3975         /* Now physically reset the device */
3976         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3977                         "0389 Performing PCI function reset!\n");
3978
3979         /* Turn off parity checking and serr during the physical reset */
3980         pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3981         pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
3982                               ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3983
3984         /* Perform FCoE PCI function reset before freeing queue memory */
3985         rc = lpfc_pci_function_reset(phba);
3986         lpfc_sli4_queue_destroy(phba);
3987
3988         /* Restore PCI cmd register */
3989         pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
3990
3991         return rc;
3992 }
3993
3994 /**
3995  * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
3996  * @phba: Pointer to HBA context object.
3997  *
3998  * This function is called in the SLI initialization code path to
3999  * restart the HBA. The caller is not required to hold any lock.
4000  * This function writes MBX_RESTART mailbox command to the SLIM and
4001  * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4002  * function to free any pending commands. The function enables
4003  * POST only during the first initialization. The function returns zero.
4004  * The function does not guarantee completion of MBX_RESTART mailbox
4005  * command before the return of this function.
4006  **/
4007 static int
4008 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4009 {
4010         MAILBOX_t *mb;
4011         struct lpfc_sli *psli;
4012         volatile uint32_t word0;
4013         void __iomem *to_slim;
4014         uint32_t hba_aer_enabled;
4015
4016         spin_lock_irq(&phba->hbalock);
4017
4018         /* Take PCIe device Advanced Error Reporting (AER) state */
4019         hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4020
4021         psli = &phba->sli;
4022
4023         /* Restart HBA */
4024         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4025                         "0337 Restart HBA Data: x%x x%x\n",
4026                         phba->pport->port_state, psli->sli_flag);
4027
4028         word0 = 0;
4029         mb = (MAILBOX_t *) &word0;
4030         mb->mbxCommand = MBX_RESTART;
4031         mb->mbxHc = 1;
4032
4033         lpfc_reset_barrier(phba);
4034
4035         to_slim = phba->MBslimaddr;
4036         writel(*(uint32_t *) mb, to_slim);
4037         readl(to_slim); /* flush */
4038
4039         /* Only skip post after fc_ffinit is completed */
4040         if (phba->pport->port_state)
4041                 word0 = 1;      /* This is really setting up word1 */
4042         else
4043                 word0 = 0;      /* This is really setting up word1 */
4044         to_slim = phba->MBslimaddr + sizeof (uint32_t);
4045         writel(*(uint32_t *) mb, to_slim);
4046         readl(to_slim); /* flush */
4047
4048         lpfc_sli_brdreset(phba);
4049         phba->pport->stopped = 0;
4050         phba->link_state = LPFC_INIT_START;
4051         phba->hba_flag = 0;
4052         spin_unlock_irq(&phba->hbalock);
4053
4054         memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4055         psli->stats_start = get_seconds();
4056
4057         /* Give the INITFF and Post time to settle. */
4058         mdelay(100);
4059
4060         /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4061         if (hba_aer_enabled)
4062                 pci_disable_pcie_error_reporting(phba->pcidev);
4063
4064         lpfc_hba_down_post(phba);
4065
4066         return 0;
4067 }
4068
4069 /**
4070  * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4071  * @phba: Pointer to HBA context object.
4072  *
4073  * This function is called in the SLI initialization code path to restart
4074  * a SLI4 HBA. The caller is not required to hold any lock.
4075  * At the end of the function, it calls lpfc_hba_down_post function to
4076  * free any pending commands.
4077  **/
4078 static int
4079 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4080 {
4081         struct lpfc_sli *psli = &phba->sli;
4082         uint32_t hba_aer_enabled;
4083         int rc;
4084
4085         /* Restart HBA */
4086         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4087                         "0296 Restart HBA Data: x%x x%x\n",
4088                         phba->pport->port_state, psli->sli_flag);
4089
4090         /* Take PCIe device Advanced Error Reporting (AER) state */
4091         hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4092
4093         rc = lpfc_sli4_brdreset(phba);
4094
4095         spin_lock_irq(&phba->hbalock);
4096         phba->pport->stopped = 0;
4097         phba->link_state = LPFC_INIT_START;
4098         phba->hba_flag = 0;
4099         spin_unlock_irq(&phba->hbalock);
4100
4101         memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4102         psli->stats_start = get_seconds();
4103
4104         /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4105         if (hba_aer_enabled)
4106                 pci_disable_pcie_error_reporting(phba->pcidev);
4107
4108         lpfc_hba_down_post(phba);
4109
4110         return rc;
4111 }
4112
4113 /**
4114  * lpfc_sli_brdrestart - Wrapper func for restarting hba
4115  * @phba: Pointer to HBA context object.
4116  *
4117  * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4118  * API jump table function pointer from the lpfc_hba struct.
4119 **/
4120 int
4121 lpfc_sli_brdrestart(struct lpfc_hba *phba)
4122 {
4123         return phba->lpfc_sli_brdrestart(phba);
4124 }
4125
4126 /**
4127  * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
4128  * @phba: Pointer to HBA context object.
4129  *
4130  * This function is called after a HBA restart to wait for successful
4131  * restart of the HBA. Successful restart of the HBA is indicated by
4132  * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4133  * iteration, the function will restart the HBA again. The function returns
4134  * zero if HBA successfully restarted else returns negative error code.
4135  **/
4136 static int
4137 lpfc_sli_chipset_init(struct lpfc_hba *phba)
4138 {
4139         uint32_t status, i = 0;
4140
4141         /* Read the HBA Host Status Register */
4142         if (lpfc_readl(phba->HSregaddr, &status))
4143                 return -EIO;
4144
4145         /* Check status register to see what current state is */
4146         i = 0;
4147         while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4148
4149                 /* Check every 10ms for 10 retries, then every 100ms for 90
4150                  * retries, then every 1 sec for 50 retires for a total of
4151                  * ~60 seconds before reset the board again and check every
4152                  * 1 sec for 50 retries. The up to 60 seconds before the
4153                  * board ready is required by the Falcon FIPS zeroization
4154                  * complete, and any reset the board in between shall cause
4155                  * restart of zeroization, further delay the board ready.
4156                  */
4157                 if (i++ >= 200) {
4158                         /* Adapter failed to init, timeout, status reg
4159                            <status> */
4160                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4161                                         "0436 Adapter failed to init, "
4162                                         "timeout, status reg x%x, "
4163                                         "FW Data: A8 x%x AC x%x\n", status,
4164                                         readl(phba->MBslimaddr + 0xa8),
4165                                         readl(phba->MBslimaddr + 0xac));
4166                         phba->link_state = LPFC_HBA_ERROR;
4167                         return -ETIMEDOUT;
4168                 }
4169
4170                 /* Check to see if any errors occurred during init */
4171                 if (status & HS_FFERM) {
4172                         /* ERROR: During chipset initialization */
4173                         /* Adapter failed to init, chipset, status reg
4174                            <status> */
4175                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4176                                         "0437 Adapter failed to init, "
4177                                         "chipset, status reg x%x, "
4178                                         "FW Data: A8 x%x AC x%x\n", status,
4179                                         readl(phba->MBslimaddr + 0xa8),
4180                                         readl(phba->MBslimaddr + 0xac));
4181                         phba->link_state = LPFC_HBA_ERROR;
4182                         return -EIO;
4183                 }
4184
4185                 if (i <= 10)
4186                         msleep(10);
4187                 else if (i <= 100)
4188                         msleep(100);
4189                 else
4190                         msleep(1000);
4191
4192                 if (i == 150) {
4193                         /* Do post */
4194                         phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4195                         lpfc_sli_brdrestart(phba);
4196                 }
4197                 /* Read the HBA Host Status Register */
4198                 if (lpfc_readl(phba->HSregaddr, &status))
4199                         return -EIO;
4200         }
4201
4202         /* Check to see if any errors occurred during init */
4203         if (status & HS_FFERM) {
4204                 /* ERROR: During chipset initialization */
4205                 /* Adapter failed to init, chipset, status reg <status> */
4206                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4207                                 "0438 Adapter failed to init, chipset, "
4208                                 "status reg x%x, "
4209                                 "FW Data: A8 x%x AC x%x\n", status,
4210                                 readl(phba->MBslimaddr + 0xa8),
4211                                 readl(phba->MBslimaddr + 0xac));
4212                 phba->link_state = LPFC_HBA_ERROR;
4213                 return -EIO;
4214         }
4215
4216         /* Clear all interrupt enable conditions */
4217         writel(0, phba->HCregaddr);
4218         readl(phba->HCregaddr); /* flush */
4219
4220         /* setup host attn register */
4221         writel(0xffffffff, phba->HAregaddr);
4222         readl(phba->HAregaddr); /* flush */
4223         return 0;
4224 }
4225
4226 /**
4227  * lpfc_sli_hbq_count - Get the number of HBQs to be configured
4228  *
4229  * This function calculates and returns the number of HBQs required to be
4230  * configured.
4231  **/
4232 int
4233 lpfc_sli_hbq_count(void)
4234 {
4235         return ARRAY_SIZE(lpfc_hbq_defs);
4236 }
4237
4238 /**
4239  * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
4240  *
4241  * This function adds the number of hbq entries in every HBQ to get
4242  * the total number of hbq entries required for the HBA and returns
4243  * the total count.
4244  **/
4245 static int
4246 lpfc_sli_hbq_entry_count(void)
4247 {
4248         int  hbq_count = lpfc_sli_hbq_count();
4249         int  count = 0;
4250         int  i;
4251
4252         for (i = 0; i < hbq_count; ++i)
4253                 count += lpfc_hbq_defs[i]->entry_count;
4254         return count;
4255 }
4256
4257 /**
4258  * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
4259  *
4260  * This function calculates amount of memory required for all hbq entries
4261  * to be configured and returns the total memory required.
4262  **/
4263 int
4264 lpfc_sli_hbq_size(void)
4265 {
4266         return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4267 }
4268
4269 /**
4270  * lpfc_sli_hbq_setup - configure and initialize HBQs
4271  * @phba: Pointer to HBA context object.
4272  *
4273  * This function is called during the SLI initialization to configure
4274  * all the HBQs and post buffers to the HBQ. The caller is not
4275  * required to hold any locks. This function will return zero if successful
4276  * else it will return negative error code.
4277  **/
4278 static int
4279 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4280 {
4281         int  hbq_count = lpfc_sli_hbq_count();
4282         LPFC_MBOXQ_t *pmb;
4283         MAILBOX_t *pmbox;
4284         uint32_t hbqno;
4285         uint32_t hbq_entry_index;
4286
4287                                 /* Get a Mailbox buffer to setup mailbox
4288                                  * commands for HBA initialization
4289                                  */
4290         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4291
4292         if (!pmb)
4293                 return -ENOMEM;
4294
4295         pmbox = &pmb->u.mb;
4296
4297         /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4298         phba->link_state = LPFC_INIT_MBX_CMDS;
4299         phba->hbq_in_use = 1;
4300
4301         hbq_entry_index = 0;
4302         for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4303                 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4304                 phba->hbqs[hbqno].hbqPutIdx      = 0;
4305                 phba->hbqs[hbqno].local_hbqGetIdx   = 0;
4306                 phba->hbqs[hbqno].entry_count =
4307                         lpfc_hbq_defs[hbqno]->entry_count;
4308                 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4309                         hbq_entry_index, pmb);
4310                 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4311
4312                 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4313                         /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4314                            mbxStatus <status>, ring <num> */
4315
4316                         lpfc_printf_log(phba, KERN_ERR,
4317                                         LOG_SLI | LOG_VPORT,
4318                                         "1805 Adapter failed to init. "
4319                                         "Data: x%x x%x x%x\n",
4320                                         pmbox->mbxCommand,
4321                                         pmbox->mbxStatus, hbqno);
4322
4323                         phba->link_state = LPFC_HBA_ERROR;
4324                         mempool_free(pmb, phba->mbox_mem_pool);
4325                         return -ENXIO;
4326                 }
4327         }
4328         phba->hbq_count = hbq_count;
4329
4330         mempool_free(pmb, phba->mbox_mem_pool);
4331
4332         /* Initially populate or replenish the HBQs */
4333         for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4334                 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
4335         return 0;
4336 }
4337
4338 /**
4339  * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4340  * @phba: Pointer to HBA context object.
4341  *
4342  * This function is called during the SLI initialization to configure
4343  * all the HBQs and post buffers to the HBQ. The caller is not
4344  * required to hold any locks. This function will return zero if successful
4345  * else it will return negative error code.
4346  **/
4347 static int
4348 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4349 {
4350         phba->hbq_in_use = 1;
4351         phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count;
4352         phba->hbq_count = 1;
4353         /* Initially populate or replenish the HBQs */
4354         lpfc_sli_hbqbuf_init_hbqs(phba, 0);
4355         return 0;
4356 }
4357
4358 /**
4359  * lpfc_sli_config_port - Issue config port mailbox command
4360  * @phba: Pointer to HBA context object.
4361  * @sli_mode: sli mode - 2/3
4362  *
4363  * This function is called by the sli intialization code path
4364  * to issue config_port mailbox command. This function restarts the
4365  * HBA firmware and issues a config_port mailbox command to configure
4366  * the SLI interface in the sli mode specified by sli_mode
4367  * variable. The caller is not required to hold any locks.
4368  * The function returns 0 if successful, else returns negative error
4369  * code.
4370  **/
4371 int
4372 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
4373 {
4374         LPFC_MBOXQ_t *pmb;
4375         uint32_t resetcount = 0, rc = 0, done = 0;
4376
4377         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4378         if (!pmb) {
4379                 phba->link_state = LPFC_HBA_ERROR;
4380                 return -ENOMEM;
4381         }
4382
4383         phba->sli_rev = sli_mode;
4384         while (resetcount < 2 && !done) {
4385                 spin_lock_irq(&phba->hbalock);
4386                 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
4387                 spin_unlock_irq(&phba->hbalock);
4388                 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4389                 lpfc_sli_brdrestart(phba);
4390                 rc = lpfc_sli_chipset_init(phba);
4391                 if (rc)
4392                         break;
4393
4394                 spin_lock_irq(&phba->hbalock);
4395                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4396                 spin_unlock_irq(&phba->hbalock);
4397                 resetcount++;
4398
4399                 /* Call pre CONFIG_PORT mailbox command initialization.  A
4400                  * value of 0 means the call was successful.  Any other
4401                  * nonzero value is a failure, but if ERESTART is returned,
4402                  * the driver may reset the HBA and try again.
4403                  */
4404                 rc = lpfc_config_port_prep(phba);
4405                 if (rc == -ERESTART) {
4406                         phba->link_state = LPFC_LINK_UNKNOWN;
4407                         continue;
4408                 } else if (rc)
4409                         break;
4410
4411                 phba->link_state = LPFC_INIT_MBX_CMDS;
4412                 lpfc_config_port(phba, pmb);
4413                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4414                 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
4415                                         LPFC_SLI3_HBQ_ENABLED |
4416                                         LPFC_SLI3_CRP_ENABLED |
4417                                         LPFC_SLI3_BG_ENABLED |
4418                                         LPFC_SLI3_DSS_ENABLED);
4419                 if (rc != MBX_SUCCESS) {
4420                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4421                                 "0442 Adapter failed to init, mbxCmd x%x "
4422                                 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
4423                                 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
4424                         spin_lock_irq(&phba->hbalock);
4425                         phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
4426                         spin_unlock_irq(&phba->hbalock);
4427                         rc = -ENXIO;
4428                 } else {
4429                         /* Allow asynchronous mailbox command to go through */
4430                         spin_lock_irq(&phba->hbalock);
4431                         phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4432                         spin_unlock_irq(&phba->hbalock);
4433                         done = 1;
4434
4435                         if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
4436                             (pmb->u.mb.un.varCfgPort.gasabt == 0))
4437                                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4438                                         "3110 Port did not grant ASABT\n");
4439                 }
4440         }
4441         if (!done) {
4442                 rc = -EINVAL;
4443                 goto do_prep_failed;
4444         }
4445         if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
4446                 if (!pmb->u.mb.un.varCfgPort.cMA) {
4447                         rc = -ENXIO;
4448                         goto do_prep_failed;
4449                 }
4450                 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
4451                         phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
4452                         phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
4453                         phba->max_vports = (phba->max_vpi > phba->max_vports) ?
4454                                 phba->max_vpi : phba->max_vports;
4455
4456                 } else
4457                         phba->max_vpi = 0;
4458                 phba->fips_level = 0;
4459                 phba->fips_spec_rev = 0;
4460                 if (pmb->u.mb.un.varCfgPort.gdss) {
4461                         phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
4462                         phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
4463                         phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
4464                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4465                                         "2850 Security Crypto Active. FIPS x%d "
4466                                         "(Spec Rev: x%d)",
4467                                         phba->fips_level, phba->fips_spec_rev);
4468                 }
4469                 if (pmb->u.mb.un.varCfgPort.sec_err) {
4470                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4471                                         "2856 Config Port Security Crypto "
4472                                         "Error: x%x ",
4473                                         pmb->u.mb.un.varCfgPort.sec_err);
4474                 }
4475                 if (pmb->u.mb.un.varCfgPort.gerbm)
4476                         phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
4477                 if (pmb->u.mb.un.varCfgPort.gcrp)
4478                         phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
4479
4480                 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
4481                 phba->port_gp = phba->mbox->us.s3_pgp.port;
4482
4483                 if (phba->cfg_enable_bg) {
4484                         if (pmb->u.mb.un.varCfgPort.gbg)
4485                                 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
4486                         else
4487                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4488                                                 "0443 Adapter did not grant "
4489                                                 "BlockGuard\n");
4490                 }
4491         } else {
4492                 phba->hbq_get = NULL;
4493                 phba->port_gp = phba->mbox->us.s2.port;
4494                 phba->max_vpi = 0;
4495         }
4496 do_prep_failed:
4497         mempool_free(pmb, phba->mbox_mem_pool);
4498         return rc;
4499 }
4500
4501
4502 /**
4503  * lpfc_sli_hba_setup - SLI intialization function
4504  * @phba: Pointer to HBA context object.
4505  *
4506  * This function is the main SLI intialization function. This function
4507  * is called by the HBA intialization code, HBA reset code and HBA
4508  * error attention handler code. Caller is not required to hold any
4509  * locks. This function issues config_port mailbox command to configure
4510  * the SLI, setup iocb rings and HBQ rings. In the end the function
4511  * calls the config_port_post function to issue init_link mailbox
4512  * command and to start the discovery. The function will return zero
4513  * if successful, else it will return negative error code.
4514  **/
4515 int
4516 lpfc_sli_hba_setup(struct lpfc_hba *phba)
4517 {
4518         uint32_t rc;
4519         int  mode = 3, i;
4520         int longs;
4521
4522         switch (lpfc_sli_mode) {
4523         case 2:
4524                 if (phba->cfg_enable_npiv) {
4525                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4526                                 "1824 NPIV enabled: Override lpfc_sli_mode "
4527                                 "parameter (%d) to auto (0).\n",
4528                                 lpfc_sli_mode);
4529                         break;
4530                 }
4531                 mode = 2;
4532                 break;
4533         case 0:
4534         case 3:
4535                 break;
4536         default:
4537                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4538                                 "1819 Unrecognized lpfc_sli_mode "
4539                                 "parameter: %d.\n", lpfc_sli_mode);
4540
4541                 break;
4542         }
4543
4544         rc = lpfc_sli_config_port(phba, mode);
4545
4546         if (rc && lpfc_sli_mode == 3)
4547                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4548                                 "1820 Unable to select SLI-3.  "
4549                                 "Not supported by adapter.\n");
4550         if (rc && mode != 2)
4551                 rc = lpfc_sli_config_port(phba, 2);
4552         if (rc)
4553                 goto lpfc_sli_hba_setup_error;
4554
4555         /* Enable PCIe device Advanced Error Reporting (AER) if configured */
4556         if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
4557                 rc = pci_enable_pcie_error_reporting(phba->pcidev);
4558                 if (!rc) {
4559                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4560                                         "2709 This device supports "
4561                                         "Advanced Error Reporting (AER)\n");
4562                         spin_lock_irq(&phba->hbalock);
4563                         phba->hba_flag |= HBA_AER_ENABLED;
4564                         spin_unlock_irq(&phba->hbalock);
4565                 } else {
4566                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4567                                         "2708 This device does not support "
4568                                         "Advanced Error Reporting (AER)\n");
4569                         phba->cfg_aer_support = 0;
4570                 }
4571         }
4572
4573         if (phba->sli_rev == 3) {
4574                 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
4575                 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
4576         } else {
4577                 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
4578                 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
4579                 phba->sli3_options = 0;
4580         }
4581
4582         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4583                         "0444 Firmware in SLI %x mode. Max_vpi %d\n",
4584                         phba->sli_rev, phba->max_vpi);
4585         rc = lpfc_sli_ring_map(phba);
4586
4587         if (rc)
4588                 goto lpfc_sli_hba_setup_error;
4589
4590         /* Initialize VPIs. */
4591         if (phba->sli_rev == LPFC_SLI_REV3) {
4592                 /*
4593                  * The VPI bitmask and physical ID array are allocated
4594                  * and initialized once only - at driver load.  A port
4595                  * reset doesn't need to reinitialize this memory.
4596                  */
4597                 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
4598                         longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
4599                         phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
4600                                                   GFP_KERNEL);
4601                         if (!phba->vpi_bmask) {
4602                                 rc = -ENOMEM;
4603                                 goto lpfc_sli_hba_setup_error;
4604                         }
4605
4606                         phba->vpi_ids = kzalloc(
4607                                         (phba->max_vpi+1) * sizeof(uint16_t),
4608                                         GFP_KERNEL);
4609                         if (!phba->vpi_ids) {
4610                                 kfree(phba->vpi_bmask);
4611                                 rc = -ENOMEM;
4612                                 goto lpfc_sli_hba_setup_error;
4613                         }
4614                         for (i = 0; i < phba->max_vpi; i++)
4615                                 phba->vpi_ids[i] = i;
4616                 }
4617         }
4618
4619         /* Init HBQs */
4620         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
4621                 rc = lpfc_sli_hbq_setup(phba);
4622                 if (rc)
4623                         goto lpfc_sli_hba_setup_error;
4624         }
4625         spin_lock_irq(&phba->hbalock);
4626         phba->sli.sli_flag |= LPFC_PROCESS_LA;
4627         spin_unlock_irq(&phba->hbalock);
4628
4629         rc = lpfc_config_port_post(phba);
4630         if (rc)
4631                 goto lpfc_sli_hba_setup_error;
4632
4633         return rc;
4634
4635 lpfc_sli_hba_setup_error:
4636         phba->link_state = LPFC_HBA_ERROR;
4637         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4638                         "0445 Firmware initialization failed\n");
4639         return rc;
4640 }
4641
4642 /**
4643  * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
4644  * @phba: Pointer to HBA context object.
4645  * @mboxq: mailbox pointer.
4646  * This function issue a dump mailbox command to read config region
4647  * 23 and parse the records in the region and populate driver
4648  * data structure.
4649  **/
4650 static int
4651 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
4652 {
4653         LPFC_MBOXQ_t *mboxq;
4654         struct lpfc_dmabuf *mp;
4655         struct lpfc_mqe *mqe;
4656         uint32_t data_length;
4657         int rc;
4658
4659         /* Program the default value of vlan_id and fc_map */
4660         phba->valid_vlan = 0;
4661         phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4662         phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4663         phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4664
4665         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4666         if (!mboxq)
4667                 return -ENOMEM;
4668
4669         mqe = &mboxq->u.mqe;
4670         if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
4671                 rc = -ENOMEM;
4672                 goto out_free_mboxq;
4673         }
4674
4675         mp = (struct lpfc_dmabuf *) mboxq->context1;
4676         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4677
4678         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4679                         "(%d):2571 Mailbox cmd x%x Status x%x "
4680                         "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4681                         "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4682                         "CQ: x%x x%x x%x x%x\n",
4683                         mboxq->vport ? mboxq->vport->vpi : 0,
4684                         bf_get(lpfc_mqe_command, mqe),
4685                         bf_get(lpfc_mqe_status, mqe),
4686                         mqe->un.mb_words[0], mqe->un.mb_words[1],
4687                         mqe->un.mb_words[2], mqe->un.mb_words[3],
4688                         mqe->un.mb_words[4], mqe->un.mb_words[5],
4689                         mqe->un.mb_words[6], mqe->un.mb_words[7],
4690                         mqe->un.mb_words[8], mqe->un.mb_words[9],
4691                         mqe->un.mb_words[10], mqe->un.mb_words[11],
4692                         mqe->un.mb_words[12], mqe->un.mb_words[13],
4693                         mqe->un.mb_words[14], mqe->un.mb_words[15],
4694                         mqe->un.mb_words[16], mqe->un.mb_words[50],
4695                         mboxq->mcqe.word0,
4696                         mboxq->mcqe.mcqe_tag0,  mboxq->mcqe.mcqe_tag1,
4697                         mboxq->mcqe.trailer);
4698
4699         if (rc) {
4700                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4701                 kfree(mp);
4702                 rc = -EIO;
4703                 goto out_free_mboxq;
4704         }
4705         data_length = mqe->un.mb_words[5];
4706         if (data_length > DMP_RGN23_SIZE) {
4707                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4708                 kfree(mp);
4709                 rc = -EIO;
4710                 goto out_free_mboxq;
4711         }
4712
4713         lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
4714         lpfc_mbuf_free(phba, mp->virt, mp->phys);
4715         kfree(mp);
4716         rc = 0;
4717
4718 out_free_mboxq:
4719         mempool_free(mboxq, phba->mbox_mem_pool);
4720         return rc;
4721 }
4722
4723 /**
4724  * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
4725  * @phba: pointer to lpfc hba data structure.
4726  * @mboxq: pointer to the LPFC_MBOXQ_t structure.
4727  * @vpd: pointer to the memory to hold resulting port vpd data.
4728  * @vpd_size: On input, the number of bytes allocated to @vpd.
4729  *            On output, the number of data bytes in @vpd.
4730  *
4731  * This routine executes a READ_REV SLI4 mailbox command.  In
4732  * addition, this routine gets the port vpd data.
4733  *
4734  * Return codes
4735  *      0 - successful
4736  *      -ENOMEM - could not allocated memory.
4737  **/
4738 static int
4739 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4740                     uint8_t *vpd, uint32_t *vpd_size)
4741 {
4742         int rc = 0;
4743         uint32_t dma_size;
4744         struct lpfc_dmabuf *dmabuf;
4745         struct lpfc_mqe *mqe;
4746
4747         dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4748         if (!dmabuf)
4749                 return -ENOMEM;
4750
4751         /*
4752          * Get a DMA buffer for the vpd data resulting from the READ_REV
4753          * mailbox command.
4754          */
4755         dma_size = *vpd_size;
4756         dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4757                                           dma_size,
4758                                           &dmabuf->phys,
4759                                           GFP_KERNEL);
4760         if (!dmabuf->virt) {
4761                 kfree(dmabuf);
4762                 return -ENOMEM;
4763         }
4764         memset(dmabuf->virt, 0, dma_size);
4765
4766         /*
4767          * The SLI4 implementation of READ_REV conflicts at word1,
4768          * bits 31:16 and SLI4 adds vpd functionality not present
4769          * in SLI3.  This code corrects the conflicts.
4770          */
4771         lpfc_read_rev(phba, mboxq);
4772         mqe = &mboxq->u.mqe;
4773         mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
4774         mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
4775         mqe->un.read_rev.word1 &= 0x0000FFFF;
4776         bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
4777         bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
4778
4779         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4780         if (rc) {
4781                 dma_free_coherent(&phba->pcidev->dev, dma_size,
4782                                   dmabuf->virt, dmabuf->phys);
4783                 kfree(dmabuf);
4784                 return -EIO;
4785         }
4786
4787         /*
4788          * The available vpd length cannot be bigger than the
4789          * DMA buffer passed to the port.  Catch the less than
4790          * case and update the caller's size.
4791          */
4792         if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
4793                 *vpd_size = mqe->un.read_rev.avail_vpd_len;
4794
4795         memcpy(vpd, dmabuf->virt, *vpd_size);
4796
4797         dma_free_coherent(&phba->pcidev->dev, dma_size,
4798                           dmabuf->virt, dmabuf->phys);
4799         kfree(dmabuf);
4800         return 0;
4801 }
4802
4803 /**
4804  * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
4805  * @phba: pointer to lpfc hba data structure.
4806  *
4807  * This routine retrieves SLI4 device physical port name this PCI function
4808  * is attached to.
4809  *
4810  * Return codes
4811  *      0 - successful
4812  *      otherwise - failed to retrieve physical port name
4813  **/
4814 static int
4815 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
4816 {
4817         LPFC_MBOXQ_t *mboxq;
4818         struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
4819         struct lpfc_controller_attribute *cntl_attr;
4820         struct lpfc_mbx_get_port_name *get_port_name;
4821         void *virtaddr = NULL;
4822         uint32_t alloclen, reqlen;
4823         uint32_t shdr_status, shdr_add_status;
4824         union lpfc_sli4_cfg_shdr *shdr;
4825         char cport_name = 0;
4826         int rc;
4827
4828         /* We assume nothing at this point */
4829         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
4830         phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
4831
4832         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4833         if (!mboxq)
4834                 return -ENOMEM;
4835         /* obtain link type and link number via READ_CONFIG */
4836         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
4837         lpfc_sli4_read_config(phba);
4838         if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
4839                 goto retrieve_ppname;
4840
4841         /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
4842         reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
4843         alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
4844                         LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
4845                         LPFC_SLI4_MBX_NEMBED);
4846         if (alloclen < reqlen) {
4847                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4848                                 "3084 Allocated DMA memory size (%d) is "
4849                                 "less than the requested DMA memory size "
4850                                 "(%d)\n", alloclen, reqlen);
4851                 rc = -ENOMEM;
4852                 goto out_free_mboxq;
4853         }
4854         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4855         virtaddr = mboxq->sge_array->addr[0];
4856         mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
4857         shdr = &mbx_cntl_attr->cfg_shdr;
4858         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
4859         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
4860         if (shdr_status || shdr_add_status || rc) {
4861                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4862                                 "3085 Mailbox x%x (x%x/x%x) failed, "
4863                                 "rc:x%x, status:x%x, add_status:x%x\n",
4864                                 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4865                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
4866                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
4867                                 rc, shdr_status, shdr_add_status);
4868                 rc = -ENXIO;
4869                 goto out_free_mboxq;
4870         }
4871         cntl_attr = &mbx_cntl_attr->cntl_attr;
4872         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
4873         phba->sli4_hba.lnk_info.lnk_tp =
4874                 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
4875         phba->sli4_hba.lnk_info.lnk_no =
4876                 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
4877         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4878                         "3086 lnk_type:%d, lnk_numb:%d\n",
4879                         phba->sli4_hba.lnk_info.lnk_tp,
4880                         phba->sli4_hba.lnk_info.lnk_no);
4881
4882 retrieve_ppname:
4883         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
4884                 LPFC_MBOX_OPCODE_GET_PORT_NAME,
4885                 sizeof(struct lpfc_mbx_get_port_name) -
4886                 sizeof(struct lpfc_sli4_cfg_mhdr),
4887                 LPFC_SLI4_MBX_EMBED);
4888         get_port_name = &mboxq->u.mqe.un.get_port_name;
4889         shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
4890         bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
4891         bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
4892                 phba->sli4_hba.lnk_info.lnk_tp);
4893         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4894         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
4895         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
4896         if (shdr_status || shdr_add_status || rc) {
4897                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4898                                 "3087 Mailbox x%x (x%x/x%x) failed: "
4899                                 "rc:x%x, status:x%x, add_status:x%x\n",
4900                                 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4901                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
4902                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
4903                                 rc, shdr_status, shdr_add_status);
4904                 rc = -ENXIO;
4905                 goto out_free_mboxq;
4906         }
4907         switch (phba->sli4_hba.lnk_info.lnk_no) {
4908         case LPFC_LINK_NUMBER_0:
4909                 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
4910                                 &get_port_name->u.response);
4911                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4912                 break;
4913         case LPFC_LINK_NUMBER_1:
4914                 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
4915                                 &get_port_name->u.response);
4916                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4917                 break;
4918         case LPFC_LINK_NUMBER_2:
4919                 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
4920                                 &get_port_name->u.response);
4921                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4922                 break;
4923         case LPFC_LINK_NUMBER_3:
4924                 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
4925                                 &get_port_name->u.response);
4926                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4927                 break;
4928         default:
4929                 break;
4930         }
4931
4932         if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
4933                 phba->Port[0] = cport_name;
4934                 phba->Port[1] = '\0';
4935                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4936                                 "3091 SLI get port name: %s\n", phba->Port);
4937         }
4938
4939 out_free_mboxq:
4940         if (rc != MBX_TIMEOUT) {
4941                 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
4942                         lpfc_sli4_mbox_cmd_free(phba, mboxq);
4943                 else
4944                         mempool_free(mboxq, phba->mbox_mem_pool);
4945         }
4946         return rc;
4947 }
4948
4949 /**
4950  * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
4951  * @phba: pointer to lpfc hba data structure.
4952  *
4953  * This routine is called to explicitly arm the SLI4 device's completion and
4954  * event queues
4955  **/
4956 static void
4957 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4958 {
4959         int fcp_eqidx;
4960
4961         lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
4962         lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
4963         fcp_eqidx = 0;
4964         if (phba->sli4_hba.fcp_cq) {
4965                 do {
4966                         lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
4967                                              LPFC_QUEUE_REARM);
4968                 } while (++fcp_eqidx < phba->cfg_fcp_io_channel);
4969         }
4970         if (phba->sli4_hba.hba_eq) {
4971                 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
4972                      fcp_eqidx++)
4973                         lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx],
4974                                              LPFC_QUEUE_REARM);
4975         }
4976 }
4977
4978 /**
4979  * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
4980  * @phba: Pointer to HBA context object.
4981  * @type: The resource extent type.
4982  * @extnt_count: buffer to hold port available extent count.
4983  * @extnt_size: buffer to hold element count per extent.
4984  *
4985  * This function calls the port and retrievs the number of available
4986  * extents and their size for a particular extent type.
4987  *
4988  * Returns: 0 if successful.  Nonzero otherwise.
4989  **/
4990 int
4991 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
4992                                uint16_t *extnt_count, uint16_t *extnt_size)
4993 {
4994         int rc = 0;
4995         uint32_t length;
4996         uint32_t mbox_tmo;
4997         struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
4998         LPFC_MBOXQ_t *mbox;
4999
5000         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5001         if (!mbox)
5002                 return -ENOMEM;
5003
5004         /* Find out how many extents are available for this resource type */
5005         length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5006                   sizeof(struct lpfc_sli4_cfg_mhdr));
5007         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5008                          LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5009                          length, LPFC_SLI4_MBX_EMBED);
5010
5011         /* Send an extents count of 0 - the GET doesn't use it. */
5012         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5013                                         LPFC_SLI4_MBX_EMBED);
5014         if (unlikely(rc)) {
5015                 rc = -EIO;
5016                 goto err_exit;
5017         }
5018
5019         if (!phba->sli4_hba.intr_enable)
5020                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5021         else {
5022                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5023                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5024         }
5025         if (unlikely(rc)) {
5026                 rc = -EIO;
5027                 goto err_exit;
5028         }
5029
5030         rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5031         if (bf_get(lpfc_mbox_hdr_status,
5032                    &rsrc_info->header.cfg_shdr.response)) {
5033                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5034                                 "2930 Failed to get resource extents "
5035                                 "Status 0x%x Add'l Status 0x%x\n",
5036                                 bf_get(lpfc_mbox_hdr_status,
5037                                        &rsrc_info->header.cfg_shdr.response),
5038                                 bf_get(lpfc_mbox_hdr_add_status,
5039                                        &rsrc_info->header.cfg_shdr.response));
5040                 rc = -EIO;
5041                 goto err_exit;
5042         }
5043
5044         *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5045                               &rsrc_info->u.rsp);
5046         *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5047                              &rsrc_info->u.rsp);
5048
5049         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5050                         "3162 Retrieved extents type-%d from port: count:%d, "
5051                         "size:%d\n", type, *extnt_count, *extnt_size);
5052
5053 err_exit:
5054         mempool_free(mbox, phba->mbox_mem_pool);
5055         return rc;
5056 }
5057
5058 /**
5059  * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5060  * @phba: Pointer to HBA context object.
5061  * @type: The extent type to check.
5062  *
5063  * This function reads the current available extents from the port and checks
5064  * if the extent count or extent size has changed since the last access.
5065  * Callers use this routine post port reset to understand if there is a
5066  * extent reprovisioning requirement.
5067  *
5068  * Returns:
5069  *   -Error: error indicates problem.
5070  *   1: Extent count or size has changed.
5071  *   0: No changes.
5072  **/
5073 static int
5074 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5075 {
5076         uint16_t curr_ext_cnt, rsrc_ext_cnt;
5077         uint16_t size_diff, rsrc_ext_size;
5078         int rc = 0;
5079         struct lpfc_rsrc_blks *rsrc_entry;
5080         struct list_head *rsrc_blk_list = NULL;
5081
5082         size_diff = 0;
5083         curr_ext_cnt = 0;
5084         rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5085                                             &rsrc_ext_cnt,
5086                                             &rsrc_ext_size);
5087         if (unlikely(rc))
5088                 return -EIO;
5089
5090         switch (type) {
5091         case LPFC_RSC_TYPE_FCOE_RPI:
5092                 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5093                 break;
5094         case LPFC_RSC_TYPE_FCOE_VPI:
5095                 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5096                 break;
5097         case LPFC_RSC_TYPE_FCOE_XRI:
5098                 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5099                 break;
5100         case LPFC_RSC_TYPE_FCOE_VFI:
5101                 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5102                 break;
5103         default:
5104                 break;
5105         }
5106
5107         list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5108                 curr_ext_cnt++;
5109                 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5110                         size_diff++;
5111         }
5112
5113         if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5114                 rc = 1;
5115
5116         return rc;
5117 }
5118
5119 /**
5120  * lpfc_sli4_cfg_post_extnts -
5121  * @phba: Pointer to HBA context object.
5122  * @extnt_cnt - number of available extents.
5123  * @type - the extent type (rpi, xri, vfi, vpi).
5124  * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5125  * @mbox - pointer to the caller's allocated mailbox structure.
5126  *
5127  * This function executes the extents allocation request.  It also
5128  * takes care of the amount of memory needed to allocate or get the
5129  * allocated extents. It is the caller's responsibility to evaluate
5130  * the response.
5131  *
5132  * Returns:
5133  *   -Error:  Error value describes the condition found.
5134  *   0: if successful
5135  **/
5136 static int
5137 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
5138                           uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5139 {
5140         int rc = 0;
5141         uint32_t req_len;
5142         uint32_t emb_len;
5143         uint32_t alloc_len, mbox_tmo;
5144
5145         /* Calculate the total requested length of the dma memory */
5146         req_len = extnt_cnt * sizeof(uint16_t);
5147
5148         /*
5149          * Calculate the size of an embedded mailbox.  The uint32_t
5150          * accounts for extents-specific word.
5151          */
5152         emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5153                 sizeof(uint32_t);
5154
5155         /*
5156          * Presume the allocation and response will fit into an embedded
5157          * mailbox.  If not true, reconfigure to a non-embedded mailbox.
5158          */
5159         *emb = LPFC_SLI4_MBX_EMBED;
5160         if (req_len > emb_len) {
5161                 req_len = extnt_cnt * sizeof(uint16_t) +
5162                         sizeof(union lpfc_sli4_cfg_shdr) +
5163                         sizeof(uint32_t);
5164                 *emb = LPFC_SLI4_MBX_NEMBED;
5165         }
5166
5167         alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5168                                      LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5169                                      req_len, *emb);
5170         if (alloc_len < req_len) {
5171                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5172                         "2982 Allocated DMA memory size (x%x) is "
5173                         "less than the requested DMA memory "
5174                         "size (x%x)\n", alloc_len, req_len);
5175                 return -ENOMEM;
5176         }
5177         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
5178         if (unlikely(rc))
5179                 return -EIO;
5180
5181         if (!phba->sli4_hba.intr_enable)
5182                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5183         else {
5184                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5185                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5186         }
5187
5188         if (unlikely(rc))
5189                 rc = -EIO;
5190         return rc;
5191 }
5192
5193 /**
5194  * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5195  * @phba: Pointer to HBA context object.
5196  * @type:  The resource extent type to allocate.
5197  *
5198  * This function allocates the number of elements for the specified
5199  * resource type.
5200  **/
5201 static int
5202 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5203 {
5204         bool emb = false;
5205         uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5206         uint16_t rsrc_id, rsrc_start, j, k;
5207         uint16_t *ids;
5208         int i, rc;
5209         unsigned long longs;
5210         unsigned long *bmask;
5211         struct lpfc_rsrc_blks *rsrc_blks;
5212         LPFC_MBOXQ_t *mbox;
5213         uint32_t length;
5214         struct lpfc_id_range *id_array = NULL;
5215         void *virtaddr = NULL;
5216         struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5217         struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5218         struct list_head *ext_blk_list;
5219
5220         rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5221                                             &rsrc_cnt,
5222                                             &rsrc_size);
5223         if (unlikely(rc))
5224                 return -EIO;
5225
5226         if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5227                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5228                         "3009 No available Resource Extents "
5229                         "for resource type 0x%x: Count: 0x%x, "
5230                         "Size 0x%x\n", type, rsrc_cnt,
5231                         rsrc_size);
5232                 return -ENOMEM;
5233         }
5234
5235         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5236                         "2903 Post resource extents type-0x%x: "
5237                         "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
5238
5239         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5240         if (!mbox)
5241                 return -ENOMEM;
5242
5243         rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
5244         if (unlikely(rc)) {
5245                 rc = -EIO;
5246                 goto err_exit;
5247         }
5248
5249         /*
5250          * Figure out where the response is located.  Then get local pointers
5251          * to the response data.  The port does not guarantee to respond to
5252          * all extents counts request so update the local variable with the
5253          * allocated count from the port.
5254          */
5255         if (emb == LPFC_SLI4_MBX_EMBED) {
5256                 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5257                 id_array = &rsrc_ext->u.rsp.id[0];
5258                 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5259         } else {
5260                 virtaddr = mbox->sge_array->addr[0];
5261                 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5262                 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5263                 id_array = &n_rsrc->id;
5264         }
5265
5266         longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5267         rsrc_id_cnt = rsrc_cnt * rsrc_size;
5268
5269         /*
5270          * Based on the resource size and count, correct the base and max
5271          * resource values.
5272          */
5273         length = sizeof(struct lpfc_rsrc_blks);
5274         switch (type) {
5275         case LPFC_RSC_TYPE_FCOE_RPI:
5276                 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5277                                                    sizeof(unsigned long),
5278                                                    GFP_KERNEL);
5279                 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5280                         rc = -ENOMEM;
5281                         goto err_exit;
5282                 }
5283                 phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
5284                                                  sizeof(uint16_t),
5285                                                  GFP_KERNEL);
5286                 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5287                         kfree(phba->sli4_hba.rpi_bmask);
5288                         rc = -ENOMEM;
5289                         goto err_exit;
5290                 }
5291
5292                 /*
5293                  * The next_rpi was initialized with the maximum available
5294                  * count but the port may allocate a smaller number.  Catch
5295                  * that case and update the next_rpi.
5296                  */
5297                 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5298
5299                 /* Initialize local ptrs for common extent processing later. */
5300                 bmask = phba->sli4_hba.rpi_bmask;
5301                 ids = phba->sli4_hba.rpi_ids;
5302                 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5303                 break;
5304         case LPFC_RSC_TYPE_FCOE_VPI:
5305                 phba->vpi_bmask = kzalloc(longs *
5306                                           sizeof(unsigned long),
5307                                           GFP_KERNEL);
5308                 if (unlikely(!phba->vpi_bmask)) {
5309                         rc = -ENOMEM;
5310                         goto err_exit;
5311                 }
5312                 phba->vpi_ids = kzalloc(rsrc_id_cnt *
5313                                          sizeof(uint16_t),
5314                                          GFP_KERNEL);
5315                 if (unlikely(!phba->vpi_ids)) {
5316                         kfree(phba->vpi_bmask);
5317                         rc = -ENOMEM;
5318                         goto err_exit;
5319                 }
5320
5321                 /* Initialize local ptrs for common extent processing later. */
5322                 bmask = phba->vpi_bmask;
5323                 ids = phba->vpi_ids;
5324                 ext_blk_list = &phba->lpfc_vpi_blk_list;
5325                 break;
5326         case LPFC_RSC_TYPE_FCOE_XRI:
5327                 phba->sli4_hba.xri_bmask = kzalloc(longs *
5328                                                    sizeof(unsigned long),
5329                                                    GFP_KERNEL);
5330                 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5331                         rc = -ENOMEM;
5332                         goto err_exit;
5333                 }
5334                 phba->sli4_hba.max_cfg_param.xri_used = 0;
5335                 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
5336                                                  sizeof(uint16_t),
5337                                                  GFP_KERNEL);
5338                 if (unlikely(!phba->sli4_hba.xri_ids)) {
5339                         kfree(phba->sli4_hba.xri_bmask);
5340                         rc = -ENOMEM;
5341                         goto err_exit;
5342                 }
5343
5344                 /* Initialize local ptrs for common extent processing later. */
5345                 bmask = phba->sli4_hba.xri_bmask;
5346                 ids = phba->sli4_hba.xri_ids;
5347                 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5348                 break;
5349         case LPFC_RSC_TYPE_FCOE_VFI:
5350                 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5351                                                    sizeof(unsigned long),
5352                                                    GFP_KERNEL);
5353                 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5354                         rc = -ENOMEM;
5355                         goto err_exit;
5356                 }
5357                 phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
5358                                                  sizeof(uint16_t),
5359                                                  GFP_KERNEL);
5360                 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5361                         kfree(phba->sli4_hba.vfi_bmask);
5362                         rc = -ENOMEM;
5363                         goto err_exit;
5364                 }
5365
5366                 /* Initialize local ptrs for common extent processing later. */
5367                 bmask = phba->sli4_hba.vfi_bmask;
5368                 ids = phba->sli4_hba.vfi_ids;
5369                 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5370                 break;
5371         default:
5372                 /* Unsupported Opcode.  Fail call. */
5373                 id_array = NULL;
5374                 bmask = NULL;
5375                 ids = NULL;
5376                 ext_blk_list = NULL;
5377                 goto err_exit;
5378         }
5379
5380         /*
5381          * Complete initializing the extent configuration with the
5382          * allocated ids assigned to this function.  The bitmask serves
5383          * as an index into the array and manages the available ids.  The
5384          * array just stores the ids communicated to the port via the wqes.
5385          */
5386         for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
5387                 if ((i % 2) == 0)
5388                         rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
5389                                          &id_array[k]);
5390                 else
5391                         rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
5392                                          &id_array[k]);
5393
5394                 rsrc_blks = kzalloc(length, GFP_KERNEL);
5395                 if (unlikely(!rsrc_blks)) {
5396                         rc = -ENOMEM;
5397                         kfree(bmask);
5398                         kfree(ids);
5399                         goto err_exit;
5400                 }
5401                 rsrc_blks->rsrc_start = rsrc_id;
5402                 rsrc_blks->rsrc_size = rsrc_size;
5403                 list_add_tail(&rsrc_blks->list, ext_blk_list);
5404                 rsrc_start = rsrc_id;
5405                 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0))
5406                         phba->sli4_hba.scsi_xri_start = rsrc_start +
5407                                 lpfc_sli4_get_els_iocb_cnt(phba);
5408
5409                 while (rsrc_id < (rsrc_start + rsrc_size)) {
5410                         ids[j] = rsrc_id;
5411                         rsrc_id++;
5412                         j++;
5413                 }
5414                 /* Entire word processed.  Get next word.*/
5415                 if ((i % 2) == 1)
5416                         k++;
5417         }
5418  err_exit:
5419         lpfc_sli4_mbox_cmd_free(phba, mbox);
5420         return rc;
5421 }
5422
5423 /**
5424  * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
5425  * @phba: Pointer to HBA context object.
5426  * @type: the extent's type.
5427  *
5428  * This function deallocates all extents of a particular resource type.
5429  * SLI4 does not allow for deallocating a particular extent range.  It
5430  * is the caller's responsibility to release all kernel memory resources.
5431  **/
5432 static int
5433 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
5434 {
5435         int rc;
5436         uint32_t length, mbox_tmo = 0;
5437         LPFC_MBOXQ_t *mbox;
5438         struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
5439         struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
5440
5441         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5442         if (!mbox)
5443                 return -ENOMEM;
5444
5445         /*
5446          * This function sends an embedded mailbox because it only sends the
5447          * the resource type.  All extents of this type are released by the
5448          * port.
5449          */
5450         length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
5451                   sizeof(struct lpfc_sli4_cfg_mhdr));
5452         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5453                          LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
5454                          length, LPFC_SLI4_MBX_EMBED);
5455
5456         /* Send an extents count of 0 - the dealloc doesn't use it. */
5457         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5458                                         LPFC_SLI4_MBX_EMBED);
5459         if (unlikely(rc)) {
5460                 rc = -EIO;
5461                 goto out_free_mbox;
5462         }
5463         if (!phba->sli4_hba.intr_enable)
5464                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5465         else {
5466                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5467                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5468         }
5469         if (unlikely(rc)) {
5470                 rc = -EIO;
5471                 goto out_free_mbox;
5472         }
5473
5474         dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
5475         if (bf_get(lpfc_mbox_hdr_status,
5476                    &dealloc_rsrc->header.cfg_shdr.response)) {
5477                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5478                                 "2919 Failed to release resource extents "
5479                                 "for type %d - Status 0x%x Add'l Status 0x%x. "
5480                                 "Resource memory not released.\n",
5481                                 type,
5482                                 bf_get(lpfc_mbox_hdr_status,
5483                                     &dealloc_rsrc->header.cfg_shdr.response),
5484                                 bf_get(lpfc_mbox_hdr_add_status,
5485                                     &dealloc_rsrc->header.cfg_shdr.response));
5486                 rc = -EIO;
5487                 goto out_free_mbox;
5488         }
5489
5490         /* Release kernel memory resources for the specific type. */
5491         switch (type) {
5492         case LPFC_RSC_TYPE_FCOE_VPI:
5493                 kfree(phba->vpi_bmask);
5494                 kfree(phba->vpi_ids);
5495                 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5496                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5497                                     &phba->lpfc_vpi_blk_list, list) {
5498                         list_del_init(&rsrc_blk->list);
5499                         kfree(rsrc_blk);
5500                 }
5501                 break;
5502         case LPFC_RSC_TYPE_FCOE_XRI:
5503                 kfree(phba->sli4_hba.xri_bmask);
5504                 kfree(phba->sli4_hba.xri_ids);
5505                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5506                                     &phba->sli4_hba.lpfc_xri_blk_list, list) {
5507                         list_del_init(&rsrc_blk->list);
5508                         kfree(rsrc_blk);
5509                 }
5510                 break;
5511         case LPFC_RSC_TYPE_FCOE_VFI:
5512                 kfree(phba->sli4_hba.vfi_bmask);
5513                 kfree(phba->sli4_hba.vfi_ids);
5514                 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5515                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5516                                     &phba->sli4_hba.lpfc_vfi_blk_list, list) {
5517                         list_del_init(&rsrc_blk->list);
5518                         kfree(rsrc_blk);
5519                 }
5520                 break;
5521         case LPFC_RSC_TYPE_FCOE_RPI:
5522                 /* RPI bitmask and physical id array are cleaned up earlier. */
5523                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5524                                     &phba->sli4_hba.lpfc_rpi_blk_list, list) {
5525                         list_del_init(&rsrc_blk->list);
5526                         kfree(rsrc_blk);
5527                 }
5528                 break;
5529         default:
5530                 break;
5531         }
5532
5533         bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5534
5535  out_free_mbox:
5536         mempool_free(mbox, phba->mbox_mem_pool);
5537         return rc;
5538 }
5539
5540 /**
5541  * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
5542  * @phba: Pointer to HBA context object.
5543  *
5544  * This function allocates all SLI4 resource identifiers.
5545  **/
5546 int
5547 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5548 {
5549         int i, rc, error = 0;
5550         uint16_t count, base;
5551         unsigned long longs;
5552
5553         if (!phba->sli4_hba.rpi_hdrs_in_use)
5554                 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
5555         if (phba->sli4_hba.extents_in_use) {
5556                 /*
5557                  * The port supports resource extents. The XRI, VPI, VFI, RPI
5558                  * resource extent count must be read and allocated before
5559                  * provisioning the resource id arrays.
5560                  */
5561                 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5562                     LPFC_IDX_RSRC_RDY) {
5563                         /*
5564                          * Extent-based resources are set - the driver could
5565                          * be in a port reset. Figure out if any corrective
5566                          * actions need to be taken.
5567                          */
5568                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5569                                                  LPFC_RSC_TYPE_FCOE_VFI);
5570                         if (rc != 0)
5571                                 error++;
5572                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5573                                                  LPFC_RSC_TYPE_FCOE_VPI);
5574                         if (rc != 0)
5575                                 error++;
5576                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5577                                                  LPFC_RSC_TYPE_FCOE_XRI);
5578                         if (rc != 0)
5579                                 error++;
5580                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5581                                                  LPFC_RSC_TYPE_FCOE_RPI);
5582                         if (rc != 0)
5583                                 error++;
5584
5585                         /*
5586                          * It's possible that the number of resources
5587                          * provided to this port instance changed between
5588                          * resets.  Detect this condition and reallocate
5589                          * resources.  Otherwise, there is no action.
5590                          */
5591                         if (error) {
5592                                 lpfc_printf_log(phba, KERN_INFO,
5593                                                 LOG_MBOX | LOG_INIT,
5594                                                 "2931 Detected extent resource "
5595                                                 "change.  Reallocating all "
5596                                                 "extents.\n");
5597                                 rc = lpfc_sli4_dealloc_extent(phba,
5598                                                  LPFC_RSC_TYPE_FCOE_VFI);
5599                                 rc = lpfc_sli4_dealloc_extent(phba,
5600                                                  LPFC_RSC_TYPE_FCOE_VPI);
5601                                 rc = lpfc_sli4_dealloc_extent(phba,
5602                                                  LPFC_RSC_TYPE_FCOE_XRI);
5603                                 rc = lpfc_sli4_dealloc_extent(phba,
5604                                                  LPFC_RSC_TYPE_FCOE_RPI);
5605                         } else
5606                                 return 0;
5607                 }
5608
5609                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5610                 if (unlikely(rc))
5611                         goto err_exit;
5612
5613                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5614                 if (unlikely(rc))
5615                         goto err_exit;
5616
5617                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5618                 if (unlikely(rc))
5619                         goto err_exit;
5620
5621                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5622                 if (unlikely(rc))
5623                         goto err_exit;
5624                 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5625                        LPFC_IDX_RSRC_RDY);
5626                 return rc;
5627         } else {
5628                 /*
5629                  * The port does not support resource extents.  The XRI, VPI,
5630                  * VFI, RPI resource ids were determined from READ_CONFIG.
5631                  * Just allocate the bitmasks and provision the resource id
5632                  * arrays.  If a port reset is active, the resources don't
5633                  * need any action - just exit.
5634                  */
5635                 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5636                     LPFC_IDX_RSRC_RDY) {
5637                         lpfc_sli4_dealloc_resource_identifiers(phba);
5638                         lpfc_sli4_remove_rpis(phba);
5639                 }
5640                 /* RPIs. */
5641                 count = phba->sli4_hba.max_cfg_param.max_rpi;
5642                 if (count <= 0) {
5643                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5644                                         "3279 Invalid provisioning of "
5645                                         "rpi:%d\n", count);
5646                         rc = -EINVAL;
5647                         goto err_exit;
5648                 }
5649                 base = phba->sli4_hba.max_cfg_param.rpi_base;
5650                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5651                 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5652                                                    sizeof(unsigned long),
5653                                                    GFP_KERNEL);
5654                 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5655                         rc = -ENOMEM;
5656                         goto err_exit;
5657                 }
5658                 phba->sli4_hba.rpi_ids = kzalloc(count *
5659                                                  sizeof(uint16_t),
5660                                                  GFP_KERNEL);
5661                 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5662                         rc = -ENOMEM;
5663                         goto free_rpi_bmask;
5664                 }
5665
5666                 for (i = 0; i < count; i++)
5667                         phba->sli4_hba.rpi_ids[i] = base + i;
5668
5669                 /* VPIs. */
5670                 count = phba->sli4_hba.max_cfg_param.max_vpi;
5671                 if (count <= 0) {
5672                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5673                                         "3280 Invalid provisioning of "
5674                                         "vpi:%d\n", count);
5675                         rc = -EINVAL;
5676                         goto free_rpi_ids;
5677                 }
5678                 base = phba->sli4_hba.max_cfg_param.vpi_base;
5679                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5680                 phba->vpi_bmask = kzalloc(longs *
5681                                           sizeof(unsigned long),
5682                                           GFP_KERNEL);
5683                 if (unlikely(!phba->vpi_bmask)) {
5684                         rc = -ENOMEM;
5685                         goto free_rpi_ids;
5686                 }
5687                 phba->vpi_ids = kzalloc(count *
5688                                         sizeof(uint16_t),
5689                                         GFP_KERNEL);
5690                 if (unlikely(!phba->vpi_ids)) {
5691                         rc = -ENOMEM;
5692                         goto free_vpi_bmask;
5693                 }
5694
5695                 for (i = 0; i < count; i++)
5696                         phba->vpi_ids[i] = base + i;
5697
5698                 /* XRIs. */
5699                 count = phba->sli4_hba.max_cfg_param.max_xri;
5700                 if (count <= 0) {
5701                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5702                                         "3281 Invalid provisioning of "
5703                                         "xri:%d\n", count);
5704                         rc = -EINVAL;
5705                         goto free_vpi_ids;
5706                 }
5707                 base = phba->sli4_hba.max_cfg_param.xri_base;
5708                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5709                 phba->sli4_hba.xri_bmask = kzalloc(longs *
5710                                                    sizeof(unsigned long),
5711                                                    GFP_KERNEL);
5712                 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5713                         rc = -ENOMEM;
5714                         goto free_vpi_ids;
5715                 }
5716                 phba->sli4_hba.max_cfg_param.xri_used = 0;
5717                 phba->sli4_hba.xri_ids = kzalloc(count *
5718                                                  sizeof(uint16_t),
5719                                                  GFP_KERNEL);
5720                 if (unlikely(!phba->sli4_hba.xri_ids)) {
5721                         rc = -ENOMEM;
5722                         goto free_xri_bmask;
5723                 }
5724
5725                 for (i = 0; i < count; i++)
5726                         phba->sli4_hba.xri_ids[i] = base + i;
5727
5728                 /* VFIs. */
5729                 count = phba->sli4_hba.max_cfg_param.max_vfi;
5730                 if (count <= 0) {
5731                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5732                                         "3282 Invalid provisioning of "
5733                                         "vfi:%d\n", count);
5734                         rc = -EINVAL;
5735                         goto free_xri_ids;
5736                 }
5737                 base = phba->sli4_hba.max_cfg_param.vfi_base;
5738                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5739                 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5740                                                    sizeof(unsigned long),
5741                                                    GFP_KERNEL);
5742                 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5743                         rc = -ENOMEM;
5744                         goto free_xri_ids;
5745                 }
5746                 phba->sli4_hba.vfi_ids = kzalloc(count *
5747                                                  sizeof(uint16_t),
5748                                                  GFP_KERNEL);
5749                 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5750                         rc = -ENOMEM;
5751                         goto free_vfi_bmask;
5752                 }
5753
5754                 for (i = 0; i < count; i++)
5755                         phba->sli4_hba.vfi_ids[i] = base + i;
5756
5757                 /*
5758                  * Mark all resources ready.  An HBA reset doesn't need
5759                  * to reset the initialization.
5760                  */
5761                 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5762                        LPFC_IDX_RSRC_RDY);
5763                 return 0;
5764         }
5765
5766  free_vfi_bmask:
5767         kfree(phba->sli4_hba.vfi_bmask);
5768  free_xri_ids:
5769         kfree(phba->sli4_hba.xri_ids);
5770  free_xri_bmask:
5771         kfree(phba->sli4_hba.xri_bmask);
5772  free_vpi_ids:
5773         kfree(phba->vpi_ids);
5774  free_vpi_bmask:
5775         kfree(phba->vpi_bmask);
5776  free_rpi_ids:
5777         kfree(phba->sli4_hba.rpi_ids);
5778  free_rpi_bmask:
5779         kfree(phba->sli4_hba.rpi_bmask);
5780  err_exit:
5781         return rc;
5782 }
5783
5784 /**
5785  * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
5786  * @phba: Pointer to HBA context object.
5787  *
5788  * This function allocates the number of elements for the specified
5789  * resource type.
5790  **/
5791 int
5792 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
5793 {
5794         if (phba->sli4_hba.extents_in_use) {
5795                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5796                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5797                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5798                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5799         } else {
5800                 kfree(phba->vpi_bmask);
5801                 kfree(phba->vpi_ids);
5802                 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5803                 kfree(phba->sli4_hba.xri_bmask);
5804                 kfree(phba->sli4_hba.xri_ids);
5805                 kfree(phba->sli4_hba.vfi_bmask);
5806                 kfree(phba->sli4_hba.vfi_ids);
5807                 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5808                 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5809         }
5810
5811         return 0;
5812 }
5813
5814 /**
5815  * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
5816  * @phba: Pointer to HBA context object.
5817  * @type: The resource extent type.
5818  * @extnt_count: buffer to hold port extent count response
5819  * @extnt_size: buffer to hold port extent size response.
5820  *
5821  * This function calls the port to read the host allocated extents
5822  * for a particular type.
5823  **/
5824 int
5825 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
5826                                uint16_t *extnt_cnt, uint16_t *extnt_size)
5827 {
5828         bool emb;
5829         int rc = 0;
5830         uint16_t curr_blks = 0;
5831         uint32_t req_len, emb_len;
5832         uint32_t alloc_len, mbox_tmo;
5833         struct list_head *blk_list_head;
5834         struct lpfc_rsrc_blks *rsrc_blk;
5835         LPFC_MBOXQ_t *mbox;
5836         void *virtaddr = NULL;
5837         struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5838         struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5839         union  lpfc_sli4_cfg_shdr *shdr;
5840
5841         switch (type) {
5842         case LPFC_RSC_TYPE_FCOE_VPI:
5843                 blk_list_head = &phba->lpfc_vpi_blk_list;
5844                 break;
5845         case LPFC_RSC_TYPE_FCOE_XRI:
5846                 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
5847                 break;
5848         case LPFC_RSC_TYPE_FCOE_VFI:
5849                 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
5850                 break;
5851         case LPFC_RSC_TYPE_FCOE_RPI:
5852                 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
5853                 break;
5854         default:
5855                 return -EIO;
5856         }
5857
5858         /* Count the number of extents currently allocatd for this type. */
5859         list_for_each_entry(rsrc_blk, blk_list_head, list) {
5860                 if (curr_blks == 0) {
5861                         /*
5862                          * The GET_ALLOCATED mailbox does not return the size,
5863                          * just the count.  The size should be just the size
5864                          * stored in the current allocated block and all sizes
5865                          * for an extent type are the same so set the return
5866                          * value now.
5867                          */
5868                         *extnt_size = rsrc_blk->rsrc_size;
5869                 }
5870                 curr_blks++;
5871         }
5872
5873         /* Calculate the total requested length of the dma memory. */
5874         req_len = curr_blks * sizeof(uint16_t);
5875
5876         /*
5877          * Calculate the size of an embedded mailbox.  The uint32_t
5878          * accounts for extents-specific word.
5879          */
5880         emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5881                 sizeof(uint32_t);
5882
5883         /*
5884          * Presume the allocation and response will fit into an embedded
5885          * mailbox.  If not true, reconfigure to a non-embedded mailbox.
5886          */
5887         emb = LPFC_SLI4_MBX_EMBED;
5888         req_len = emb_len;
5889         if (req_len > emb_len) {
5890                 req_len = curr_blks * sizeof(uint16_t) +
5891                         sizeof(union lpfc_sli4_cfg_shdr) +
5892                         sizeof(uint32_t);
5893                 emb = LPFC_SLI4_MBX_NEMBED;
5894         }
5895
5896         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5897         if (!mbox)
5898                 return -ENOMEM;
5899         memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
5900
5901         alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5902                                      LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
5903                                      req_len, emb);
5904         if (alloc_len < req_len) {
5905                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5906                         "2983 Allocated DMA memory size (x%x) is "
5907                         "less than the requested DMA memory "
5908                         "size (x%x)\n", alloc_len, req_len);
5909                 rc = -ENOMEM;
5910                 goto err_exit;
5911         }
5912         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
5913         if (unlikely(rc)) {
5914                 rc = -EIO;
5915                 goto err_exit;
5916         }
5917
5918         if (!phba->sli4_hba.intr_enable)
5919                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5920         else {
5921                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5922                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5923         }
5924
5925         if (unlikely(rc)) {
5926                 rc = -EIO;
5927                 goto err_exit;
5928         }
5929
5930         /*
5931          * Figure out where the response is located.  Then get local pointers
5932          * to the response data.  The port does not guarantee to respond to
5933          * all extents counts request so update the local variable with the
5934          * allocated count from the port.
5935          */
5936         if (emb == LPFC_SLI4_MBX_EMBED) {
5937                 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5938                 shdr = &rsrc_ext->header.cfg_shdr;
5939                 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5940         } else {
5941                 virtaddr = mbox->sge_array->addr[0];
5942                 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5943                 shdr = &n_rsrc->cfg_shdr;
5944                 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5945         }
5946
5947         if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
5948                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5949                         "2984 Failed to read allocated resources "
5950                         "for type %d - Status 0x%x Add'l Status 0x%x.\n",
5951                         type,
5952                         bf_get(lpfc_mbox_hdr_status, &shdr->response),
5953                         bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
5954                 rc = -EIO;
5955                 goto err_exit;
5956         }
5957  err_exit:
5958         lpfc_sli4_mbox_cmd_free(phba, mbox);
5959         return rc;
5960 }
5961
5962 /**
5963  * lpfc_sli4_repost_els_sgl_list - Repsot the els buffers sgl pages as block
5964  * @phba: pointer to lpfc hba data structure.
5965  *
5966  * This routine walks the list of els buffers that have been allocated and
5967  * repost them to the port by using SGL block post. This is needed after a
5968  * pci_function_reset/warm_start or start. It attempts to construct blocks
5969  * of els buffer sgls which contains contiguous xris and uses the non-embedded
5970  * SGL block post mailbox commands to post them to the port. For single els
5971  * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
5972  * mailbox command for posting.
5973  *
5974  * Returns: 0 = success, non-zero failure.
5975  **/
5976 static int
5977 lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
5978 {
5979         struct lpfc_sglq *sglq_entry = NULL;
5980         struct lpfc_sglq *sglq_entry_next = NULL;
5981         struct lpfc_sglq *sglq_entry_first = NULL;
5982         int status, post_cnt = 0, num_posted = 0, block_cnt = 0;
5983         int last_xritag = NO_XRI;
5984         LIST_HEAD(prep_sgl_list);
5985         LIST_HEAD(blck_sgl_list);
5986         LIST_HEAD(allc_sgl_list);
5987         LIST_HEAD(post_sgl_list);
5988         LIST_HEAD(free_sgl_list);
5989
5990         spin_lock(&phba->hbalock);
5991         list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
5992         spin_unlock(&phba->hbalock);
5993
5994         list_for_each_entry_safe(sglq_entry, sglq_entry_next,
5995                                  &allc_sgl_list, list) {
5996                 list_del_init(&sglq_entry->list);
5997                 block_cnt++;
5998                 if ((last_xritag != NO_XRI) &&
5999                     (sglq_entry->sli4_xritag != last_xritag + 1)) {
6000                         /* a hole in xri block, form a sgl posting block */
6001                         list_splice_init(&prep_sgl_list, &blck_sgl_list);
6002                         post_cnt = block_cnt - 1;
6003                         /* prepare list for next posting block */
6004                         list_add_tail(&sglq_entry->list, &prep_sgl_list);
6005                         block_cnt = 1;
6006                 } else {
6007                         /* prepare list for next posting block */
6008                         list_add_tail(&sglq_entry->list, &prep_sgl_list);
6009                         /* enough sgls for non-embed sgl mbox command */
6010                         if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
6011                                 list_splice_init(&prep_sgl_list,
6012                                                  &blck_sgl_list);
6013                                 post_cnt = block_cnt;
6014                                 block_cnt = 0;
6015                         }
6016                 }
6017                 num_posted++;
6018
6019                 /* keep track of last sgl's xritag */
6020                 last_xritag = sglq_entry->sli4_xritag;
6021
6022                 /* end of repost sgl list condition for els buffers */
6023                 if (num_posted == phba->sli4_hba.els_xri_cnt) {
6024                         if (post_cnt == 0) {
6025                                 list_splice_init(&prep_sgl_list,
6026                                                  &blck_sgl_list);
6027                                 post_cnt = block_cnt;
6028                         } else if (block_cnt == 1) {
6029                                 status = lpfc_sli4_post_sgl(phba,
6030                                                 sglq_entry->phys, 0,
6031                                                 sglq_entry->sli4_xritag);
6032                                 if (!status) {
6033                                         /* successful, put sgl to posted list */
6034                                         list_add_tail(&sglq_entry->list,
6035                                                       &post_sgl_list);
6036                                 } else {
6037                                         /* Failure, put sgl to free list */
6038                                         lpfc_printf_log(phba, KERN_WARNING,
6039                                                 LOG_SLI,
6040                                                 "3159 Failed to post els "
6041                                                 "sgl, xritag:x%x\n",
6042                                                 sglq_entry->sli4_xritag);
6043                                         list_add_tail(&sglq_entry->list,
6044                                                       &free_sgl_list);
6045                                         spin_lock_irq(&phba->hbalock);
6046                                         phba->sli4_hba.els_xri_cnt--;
6047                                         spin_unlock_irq(&phba->hbalock);
6048                                 }
6049                         }
6050                 }
6051
6052                 /* continue until a nembed page worth of sgls */
6053                 if (post_cnt == 0)
6054                         continue;
6055
6056                 /* post the els buffer list sgls as a block */
6057                 status = lpfc_sli4_post_els_sgl_list(phba, &blck_sgl_list,
6058                                                      post_cnt);
6059
6060                 if (!status) {
6061                         /* success, put sgl list to posted sgl list */
6062                         list_splice_init(&blck_sgl_list, &post_sgl_list);
6063                 } else {
6064                         /* Failure, put sgl list to free sgl list */
6065                         sglq_entry_first = list_first_entry(&blck_sgl_list,
6066                                                             struct lpfc_sglq,
6067                                                             list);
6068                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6069                                         "3160 Failed to post els sgl-list, "
6070                                         "xritag:x%x-x%x\n",
6071                                         sglq_entry_first->sli4_xritag,
6072                                         (sglq_entry_first->sli4_xritag +
6073                                          post_cnt - 1));
6074                         list_splice_init(&blck_sgl_list, &free_sgl_list);
6075                         spin_lock_irq(&phba->hbalock);
6076                         phba->sli4_hba.els_xri_cnt -= post_cnt;
6077                         spin_unlock_irq(&phba->hbalock);
6078                 }
6079
6080                 /* don't reset xirtag due to hole in xri block */
6081                 if (block_cnt == 0)
6082                         last_xritag = NO_XRI;
6083
6084                 /* reset els sgl post count for next round of posting */
6085                 post_cnt = 0;
6086         }
6087
6088         /* free the els sgls failed to post */
6089         lpfc_free_sgl_list(phba, &free_sgl_list);
6090
6091         /* push els sgls posted to the availble list */
6092         if (!list_empty(&post_sgl_list)) {
6093                 spin_lock(&phba->hbalock);
6094                 list_splice_init(&post_sgl_list,
6095                                  &phba->sli4_hba.lpfc_sgl_list);
6096                 spin_unlock(&phba->hbalock);
6097         } else {
6098                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6099                                 "3161 Failure to post els sgl to port.\n");
6100                 return -EIO;
6101         }
6102         return 0;
6103 }
6104
6105 /**
6106  * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
6107  * @phba: Pointer to HBA context object.
6108  *
6109  * This function is the main SLI4 device intialization PCI function. This
6110  * function is called by the HBA intialization code, HBA reset code and
6111  * HBA error attention handler code. Caller is not required to hold any
6112  * locks.
6113  **/
6114 int
6115 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6116 {
6117         int rc;
6118         LPFC_MBOXQ_t *mboxq;
6119         struct lpfc_mqe *mqe;
6120         uint8_t *vpd;
6121         uint32_t vpd_size;
6122         uint32_t ftr_rsp = 0;
6123         struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
6124         struct lpfc_vport *vport = phba->pport;
6125         struct lpfc_dmabuf *mp;
6126
6127         /* Perform a PCI function reset to start from clean */
6128         rc = lpfc_pci_function_reset(phba);
6129         if (unlikely(rc))
6130                 return -ENODEV;
6131
6132         /* Check the HBA Host Status Register for readyness */
6133         rc = lpfc_sli4_post_status_check(phba);
6134         if (unlikely(rc))
6135                 return -ENODEV;
6136         else {
6137                 spin_lock_irq(&phba->hbalock);
6138                 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
6139                 spin_unlock_irq(&phba->hbalock);
6140         }
6141
6142         /*
6143          * Allocate a single mailbox container for initializing the
6144          * port.
6145          */
6146         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6147         if (!mboxq)
6148                 return -ENOMEM;
6149
6150         /* Issue READ_REV to collect vpd and FW information. */
6151         vpd_size = SLI4_PAGE_SIZE;
6152         vpd = kzalloc(vpd_size, GFP_KERNEL);
6153         if (!vpd) {
6154                 rc = -ENOMEM;
6155                 goto out_free_mbox;
6156         }
6157
6158         rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
6159         if (unlikely(rc)) {
6160                 kfree(vpd);
6161                 goto out_free_mbox;
6162         }
6163         mqe = &mboxq->u.mqe;
6164         phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
6165         if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
6166                 phba->hba_flag |= HBA_FCOE_MODE;
6167         else
6168                 phba->hba_flag &= ~HBA_FCOE_MODE;
6169
6170         if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
6171                 LPFC_DCBX_CEE_MODE)
6172                 phba->hba_flag |= HBA_FIP_SUPPORT;
6173         else
6174                 phba->hba_flag &= ~HBA_FIP_SUPPORT;
6175
6176         phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
6177
6178         if (phba->sli_rev != LPFC_SLI_REV4) {
6179                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6180                         "0376 READ_REV Error. SLI Level %d "
6181                         "FCoE enabled %d\n",
6182                         phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
6183                 rc = -EIO;
6184                 kfree(vpd);
6185                 goto out_free_mbox;
6186         }
6187
6188         /*
6189          * Continue initialization with default values even if driver failed
6190          * to read FCoE param config regions, only read parameters if the
6191          * board is FCoE
6192          */
6193         if (phba->hba_flag & HBA_FCOE_MODE &&
6194             lpfc_sli4_read_fcoe_params(phba))
6195                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
6196                         "2570 Failed to read FCoE parameters\n");
6197
6198         /*
6199          * Retrieve sli4 device physical port name, failure of doing it
6200          * is considered as non-fatal.
6201          */
6202         rc = lpfc_sli4_retrieve_pport_name(phba);
6203         if (!rc)
6204                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6205                                 "3080 Successful retrieving SLI4 device "
6206                                 "physical port name: %s.\n", phba->Port);
6207
6208         /*
6209          * Evaluate the read rev and vpd data. Populate the driver
6210          * state with the results. If this routine fails, the failure
6211          * is not fatal as the driver will use generic values.
6212          */
6213         rc = lpfc_parse_vpd(phba, vpd, vpd_size);
6214         if (unlikely(!rc)) {
6215                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6216                                 "0377 Error %d parsing vpd. "
6217                                 "Using defaults.\n", rc);
6218                 rc = 0;
6219         }
6220         kfree(vpd);
6221
6222         /* Save information as VPD data */
6223         phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
6224         phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
6225         phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
6226         phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
6227                                          &mqe->un.read_rev);
6228         phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
6229                                        &mqe->un.read_rev);
6230         phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
6231                                             &mqe->un.read_rev);
6232         phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
6233                                            &mqe->un.read_rev);
6234         phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
6235         memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
6236         phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
6237         memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
6238         phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
6239         memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
6240         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6241                         "(%d):0380 READ_REV Status x%x "
6242                         "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
6243                         mboxq->vport ? mboxq->vport->vpi : 0,
6244                         bf_get(lpfc_mqe_status, mqe),
6245                         phba->vpd.rev.opFwName,
6246                         phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
6247                         phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
6248
6249         /*
6250          * Discover the port's supported feature set and match it against the
6251          * hosts requests.
6252          */
6253         lpfc_request_features(phba, mboxq);
6254         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6255         if (unlikely(rc)) {
6256                 rc = -EIO;
6257                 goto out_free_mbox;
6258         }
6259
6260         /*
6261          * The port must support FCP initiator mode as this is the
6262          * only mode running in the host.
6263          */
6264         if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
6265                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6266                                 "0378 No support for fcpi mode.\n");
6267                 ftr_rsp++;
6268         }
6269         if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
6270                 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
6271         else
6272                 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
6273         /*
6274          * If the port cannot support the host's requested features
6275          * then turn off the global config parameters to disable the
6276          * feature in the driver.  This is not a fatal error.
6277          */
6278         phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
6279         if (phba->cfg_enable_bg) {
6280                 if (bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))
6281                         phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
6282                 else
6283                         ftr_rsp++;
6284         }
6285
6286         if (phba->max_vpi && phba->cfg_enable_npiv &&
6287             !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6288                 ftr_rsp++;
6289
6290         if (ftr_rsp) {
6291                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6292                                 "0379 Feature Mismatch Data: x%08x %08x "
6293                                 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
6294                                 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
6295                                 phba->cfg_enable_npiv, phba->max_vpi);
6296                 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
6297                         phba->cfg_enable_bg = 0;
6298                 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6299                         phba->cfg_enable_npiv = 0;
6300         }
6301
6302         /* These SLI3 features are assumed in SLI4 */
6303         spin_lock_irq(&phba->hbalock);
6304         phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
6305         spin_unlock_irq(&phba->hbalock);
6306
6307         /*
6308          * Allocate all resources (xri,rpi,vpi,vfi) now.  Subsequent
6309          * calls depends on these resources to complete port setup.
6310          */
6311         rc = lpfc_sli4_alloc_resource_identifiers(phba);
6312         if (rc) {
6313                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6314                                 "2920 Failed to alloc Resource IDs "
6315                                 "rc = x%x\n", rc);
6316                 goto out_free_mbox;
6317         }
6318
6319         /* Read the port's service parameters. */
6320         rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
6321         if (rc) {
6322                 phba->link_state = LPFC_HBA_ERROR;
6323                 rc = -ENOMEM;
6324                 goto out_free_mbox;
6325         }
6326
6327         mboxq->vport = vport;
6328         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6329         mp = (struct lpfc_dmabuf *) mboxq->context1;
6330         if (rc == MBX_SUCCESS) {
6331                 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
6332                 rc = 0;
6333         }
6334
6335         /*
6336          * This memory was allocated by the lpfc_read_sparam routine. Release
6337          * it to the mbuf pool.
6338          */
6339         lpfc_mbuf_free(phba, mp->virt, mp->phys);
6340         kfree(mp);
6341         mboxq->context1 = NULL;
6342         if (unlikely(rc)) {
6343                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6344                                 "0382 READ_SPARAM command failed "
6345                                 "status %d, mbxStatus x%x\n",
6346                                 rc, bf_get(lpfc_mqe_status, mqe));
6347                 phba->link_state = LPFC_HBA_ERROR;
6348                 rc = -EIO;
6349                 goto out_free_mbox;
6350         }
6351
6352         lpfc_update_vport_wwn(vport);
6353
6354         /* Update the fc_host data structures with new wwn. */
6355         fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
6356         fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
6357
6358         /* update host els and scsi xri-sgl sizes and mappings */
6359         rc = lpfc_sli4_xri_sgl_update(phba);
6360         if (unlikely(rc)) {
6361                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6362                                 "1400 Failed to update xri-sgl size and "
6363                                 "mapping: %d\n", rc);
6364                 goto out_free_mbox;
6365         }
6366
6367         /* register the els sgl pool to the port */
6368         rc = lpfc_sli4_repost_els_sgl_list(phba);
6369         if (unlikely(rc)) {
6370                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6371                                 "0582 Error %d during els sgl post "
6372                                 "operation\n", rc);
6373                 rc = -ENODEV;
6374                 goto out_free_mbox;
6375         }
6376
6377         /* register the allocated scsi sgl pool to the port */
6378         rc = lpfc_sli4_repost_scsi_sgl_list(phba);
6379         if (unlikely(rc)) {
6380                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6381                                 "0383 Error %d during scsi sgl post "
6382                                 "operation\n", rc);
6383                 /* Some Scsi buffers were moved to the abort scsi list */
6384                 /* A pci function reset will repost them */
6385                 rc = -ENODEV;
6386                 goto out_free_mbox;
6387         }
6388
6389         /* Post the rpi header region to the device. */
6390         rc = lpfc_sli4_post_all_rpi_hdrs(phba);
6391         if (unlikely(rc)) {
6392                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6393                                 "0393 Error %d during rpi post operation\n",
6394                                 rc);
6395                 rc = -ENODEV;
6396                 goto out_free_mbox;
6397         }
6398         lpfc_sli4_node_prep(phba);
6399
6400         /* Create all the SLI4 queues */
6401         rc = lpfc_sli4_queue_create(phba);
6402         if (rc) {
6403                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6404                                 "3089 Failed to allocate queues\n");
6405                 rc = -ENODEV;
6406                 goto out_stop_timers;
6407         }
6408         /* Set up all the queues to the device */
6409         rc = lpfc_sli4_queue_setup(phba);
6410         if (unlikely(rc)) {
6411                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6412                                 "0381 Error %d during queue setup.\n ", rc);
6413                 goto out_destroy_queue;
6414         }
6415
6416         /* Arm the CQs and then EQs on device */
6417         lpfc_sli4_arm_cqeq_intr(phba);
6418
6419         /* Indicate device interrupt mode */
6420         phba->sli4_hba.intr_enable = 1;
6421
6422         /* Allow asynchronous mailbox command to go through */
6423         spin_lock_irq(&phba->hbalock);
6424         phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
6425         spin_unlock_irq(&phba->hbalock);
6426
6427         /* Post receive buffers to the device */
6428         lpfc_sli4_rb_setup(phba);
6429
6430         /* Reset HBA FCF states after HBA reset */
6431         phba->fcf.fcf_flag = 0;
6432         phba->fcf.current_rec.flag = 0;
6433
6434         /* Start the ELS watchdog timer */
6435         mod_timer(&vport->els_tmofunc,
6436                   jiffies + HZ * (phba->fc_ratov * 2));
6437
6438         /* Start heart beat timer */
6439         mod_timer(&phba->hb_tmofunc,
6440                   jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
6441         phba->hb_outstanding = 0;
6442         phba->last_completion_time = jiffies;
6443
6444         /* Start error attention (ERATT) polling timer */
6445         mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
6446
6447         /* Enable PCIe device Advanced Error Reporting (AER) if configured */
6448         if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
6449                 rc = pci_enable_pcie_error_reporting(phba->pcidev);
6450                 if (!rc) {
6451                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6452                                         "2829 This device supports "
6453                                         "Advanced Error Reporting (AER)\n");
6454                         spin_lock_irq(&phba->hbalock);
6455                         phba->hba_flag |= HBA_AER_ENABLED;
6456                         spin_unlock_irq(&phba->hbalock);
6457                 } else {
6458                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6459                                         "2830 This device does not support "
6460                                         "Advanced Error Reporting (AER)\n");
6461                         phba->cfg_aer_support = 0;
6462                 }
6463                 rc = 0;
6464         }
6465
6466         if (!(phba->hba_flag & HBA_FCOE_MODE)) {
6467                 /*
6468                  * The FC Port needs to register FCFI (index 0)
6469                  */
6470                 lpfc_reg_fcfi(phba, mboxq);
6471                 mboxq->vport = phba->pport;
6472                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6473                 if (rc != MBX_SUCCESS)
6474                         goto out_unset_queue;
6475                 rc = 0;
6476                 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
6477                                         &mboxq->u.mqe.un.reg_fcfi);
6478
6479                 /* Check if the port is configured to be disabled */
6480                 lpfc_sli_read_link_ste(phba);
6481         }
6482
6483         /*
6484          * The port is ready, set the host's link state to LINK_DOWN
6485          * in preparation for link interrupts.
6486          */
6487         spin_lock_irq(&phba->hbalock);
6488         phba->link_state = LPFC_LINK_DOWN;
6489         spin_unlock_irq(&phba->hbalock);
6490         if (!(phba->hba_flag & HBA_FCOE_MODE) &&
6491             (phba->hba_flag & LINK_DISABLED)) {
6492                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
6493                                 "3103 Adapter Link is disabled.\n");
6494                 lpfc_down_link(phba, mboxq);
6495                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6496                 if (rc != MBX_SUCCESS) {
6497                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
6498                                         "3104 Adapter failed to issue "
6499                                         "DOWN_LINK mbox cmd, rc:x%x\n", rc);
6500                         goto out_unset_queue;
6501                 }
6502         } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
6503                 /* don't perform init_link on SLI4 FC port loopback test */
6504                 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
6505                         rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
6506                         if (rc)
6507                                 goto out_unset_queue;
6508                 }
6509         }
6510         mempool_free(mboxq, phba->mbox_mem_pool);
6511         return rc;
6512 out_unset_queue:
6513         /* Unset all the queues set up in this routine when error out */
6514         lpfc_sli4_queue_unset(phba);
6515 out_destroy_queue:
6516         lpfc_sli4_queue_destroy(phba);
6517 out_stop_timers:
6518         lpfc_stop_hba_timers(phba);
6519 out_free_mbox:
6520         mempool_free(mboxq, phba->mbox_mem_pool);
6521         return rc;
6522 }
6523
6524 /**
6525  * lpfc_mbox_timeout - Timeout call back function for mbox timer
6526  * @ptr: context object - pointer to hba structure.
6527  *
6528  * This is the callback function for mailbox timer. The mailbox
6529  * timer is armed when a new mailbox command is issued and the timer
6530  * is deleted when the mailbox complete. The function is called by
6531  * the kernel timer code when a mailbox does not complete within
6532  * expected time. This function wakes up the worker thread to
6533  * process the mailbox timeout and returns. All the processing is
6534  * done by the worker thread function lpfc_mbox_timeout_handler.
6535  **/
6536 void
6537 lpfc_mbox_timeout(unsigned long ptr)
6538 {
6539         struct lpfc_hba  *phba = (struct lpfc_hba *) ptr;
6540         unsigned long iflag;
6541         uint32_t tmo_posted;
6542
6543         spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
6544         tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
6545         if (!tmo_posted)
6546                 phba->pport->work_port_events |= WORKER_MBOX_TMO;
6547         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
6548
6549         if (!tmo_posted)
6550                 lpfc_worker_wake_up(phba);
6551         return;
6552 }
6553
6554
6555 /**
6556  * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
6557  * @phba: Pointer to HBA context object.
6558  *
6559  * This function is called from worker thread when a mailbox command times out.
6560  * The caller is not required to hold any locks. This function will reset the
6561  * HBA and recover all the pending commands.
6562  **/
6563 void
6564 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
6565 {
6566         LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
6567         MAILBOX_t *mb = &pmbox->u.mb;
6568         struct lpfc_sli *psli = &phba->sli;
6569         struct lpfc_sli_ring *pring;
6570
6571         /* Check the pmbox pointer first.  There is a race condition
6572          * between the mbox timeout handler getting executed in the
6573          * worklist and the mailbox actually completing. When this
6574          * race condition occurs, the mbox_active will be NULL.
6575          */
6576         spin_lock_irq(&phba->hbalock);
6577         if (pmbox == NULL) {
6578                 lpfc_printf_log(phba, KERN_WARNING,
6579                                 LOG_MBOX | LOG_SLI,
6580                                 "0353 Active Mailbox cleared - mailbox timeout "
6581                                 "exiting\n");
6582                 spin_unlock_irq(&phba->hbalock);
6583                 return;
6584         }
6585
6586         /* Mbox cmd <mbxCommand> timeout */
6587         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6588                         "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
6589                         mb->mbxCommand,
6590                         phba->pport->port_state,
6591                         phba->sli.sli_flag,
6592                         phba->sli.mbox_active);
6593         spin_unlock_irq(&phba->hbalock);
6594
6595         /* Setting state unknown so lpfc_sli_abort_iocb_ring
6596          * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
6597          * it to fail all outstanding SCSI IO.
6598          */
6599         spin_lock_irq(&phba->pport->work_port_lock);
6600         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
6601         spin_unlock_irq(&phba->pport->work_port_lock);
6602         spin_lock_irq(&phba->hbalock);
6603         phba->link_state = LPFC_LINK_UNKNOWN;
6604         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
6605         spin_unlock_irq(&phba->hbalock);
6606
6607         pring = &psli->ring[psli->fcp_ring];
6608         lpfc_sli_abort_iocb_ring(phba, pring);
6609
6610         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6611                         "0345 Resetting board due to mailbox timeout\n");
6612
6613         /* Reset the HBA device */
6614         lpfc_reset_hba(phba);
6615 }
6616
6617 /**
6618  * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
6619  * @phba: Pointer to HBA context object.
6620  * @pmbox: Pointer to mailbox object.
6621  * @flag: Flag indicating how the mailbox need to be processed.
6622  *
6623  * This function is called by discovery code and HBA management code
6624  * to submit a mailbox command to firmware with SLI-3 interface spec. This
6625  * function gets the hbalock to protect the data structures.
6626  * The mailbox command can be submitted in polling mode, in which case
6627  * this function will wait in a polling loop for the completion of the
6628  * mailbox.
6629  * If the mailbox is submitted in no_wait mode (not polling) the
6630  * function will submit the command and returns immediately without waiting
6631  * for the mailbox completion. The no_wait is supported only when HBA
6632  * is in SLI2/SLI3 mode - interrupts are enabled.
6633  * The SLI interface allows only one mailbox pending at a time. If the
6634  * mailbox is issued in polling mode and there is already a mailbox
6635  * pending, then the function will return an error. If the mailbox is issued
6636  * in NO_WAIT mode and there is a mailbox pending already, the function
6637  * will return MBX_BUSY after queuing the mailbox into mailbox queue.
6638  * The sli layer owns the mailbox object until the completion of mailbox
6639  * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
6640  * return codes the caller owns the mailbox command after the return of
6641  * the function.
6642  **/
6643 static int
6644 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
6645                        uint32_t flag)
6646 {
6647         MAILBOX_t *mbx;
6648         struct lpfc_sli *psli = &phba->sli;
6649         uint32_t status, evtctr;
6650         uint32_t ha_copy, hc_copy;
6651         int i;
6652         unsigned long timeout;
6653         unsigned long drvr_flag = 0;
6654         uint32_t word0, ldata;
6655         void __iomem *to_slim;
6656         int processing_queue = 0;
6657
6658         spin_lock_irqsave(&phba->hbalock, drvr_flag);
6659         if (!pmbox) {
6660                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6661                 /* processing mbox queue from intr_handler */
6662                 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
6663                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6664                         return MBX_SUCCESS;
6665                 }
6666                 processing_queue = 1;
6667                 pmbox = lpfc_mbox_get(phba);
6668                 if (!pmbox) {
6669                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6670                         return MBX_SUCCESS;
6671                 }
6672         }
6673
6674         if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
6675                 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
6676                 if(!pmbox->vport) {
6677                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6678                         lpfc_printf_log(phba, KERN_ERR,
6679                                         LOG_MBOX | LOG_VPORT,
6680                                         "1806 Mbox x%x failed. No vport\n",
6681                                         pmbox->u.mb.mbxCommand);
6682                         dump_stack();
6683                         goto out_not_finished;
6684                 }
6685         }
6686
6687         /* If the PCI channel is in offline state, do not post mbox. */
6688         if (unlikely(pci_channel_offline(phba->pcidev))) {
6689                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6690                 goto out_not_finished;
6691         }
6692
6693         /* If HBA has a deferred error attention, fail the iocb. */
6694         if (unlikely(phba->hba_flag & DEFER_ERATT)) {
6695                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6696                 goto out_not_finished;
6697         }
6698
6699         psli = &phba->sli;
6700
6701         mbx = &pmbox->u.mb;
6702         status = MBX_SUCCESS;
6703
6704         if (phba->link_state == LPFC_HBA_ERROR) {
6705                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6706
6707                 /* Mbox command <mbxCommand> cannot issue */
6708                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6709                                 "(%d):0311 Mailbox command x%x cannot "
6710                                 "issue Data: x%x x%x\n",
6711                                 pmbox->vport ? pmbox->vport->vpi : 0,
6712                                 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
6713                 goto out_not_finished;
6714         }
6715
6716         if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
6717                 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
6718                         !(hc_copy & HC_MBINT_ENA)) {
6719                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6720                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6721                                 "(%d):2528 Mailbox command x%x cannot "
6722                                 "issue Data: x%x x%x\n",
6723                                 pmbox->vport ? pmbox->vport->vpi : 0,
6724                                 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
6725                         goto out_not_finished;
6726                 }
6727         }
6728
6729         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
6730                 /* Polling for a mbox command when another one is already active
6731                  * is not allowed in SLI. Also, the driver must have established
6732                  * SLI2 mode to queue and process multiple mbox commands.
6733                  */
6734
6735                 if (flag & MBX_POLL) {
6736                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6737
6738                         /* Mbox command <mbxCommand> cannot issue */
6739                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6740                                         "(%d):2529 Mailbox command x%x "
6741                                         "cannot issue Data: x%x x%x\n",
6742                                         pmbox->vport ? pmbox->vport->vpi : 0,
6743                                         pmbox->u.mb.mbxCommand,
6744                                         psli->sli_flag, flag);
6745                         goto out_not_finished;
6746                 }
6747
6748                 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
6749                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6750                         /* Mbox command <mbxCommand> cannot issue */
6751                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6752                                         "(%d):2530 Mailbox command x%x "
6753                                         "cannot issue Data: x%x x%x\n",
6754                                         pmbox->vport ? pmbox->vport->vpi : 0,
6755                                         pmbox->u.mb.mbxCommand,
6756                                         psli->sli_flag, flag);
6757                         goto out_not_finished;
6758                 }
6759
6760                 /* Another mailbox command is still being processed, queue this
6761                  * command to be processed later.
6762                  */
6763                 lpfc_mbox_put(phba, pmbox);
6764
6765                 /* Mbox cmd issue - BUSY */
6766                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6767                                 "(%d):0308 Mbox cmd issue - BUSY Data: "
6768                                 "x%x x%x x%x x%x\n",
6769                                 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
6770                                 mbx->mbxCommand, phba->pport->port_state,
6771                                 psli->sli_flag, flag);
6772
6773                 psli->slistat.mbox_busy++;
6774                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6775
6776                 if (pmbox->vport) {
6777                         lpfc_debugfs_disc_trc(pmbox->vport,
6778                                 LPFC_DISC_TRC_MBOX_VPORT,
6779                                 "MBOX Bsy vport:  cmd:x%x mb:x%x x%x",
6780                                 (uint32_t)mbx->mbxCommand,
6781                                 mbx->un.varWords[0], mbx->un.varWords[1]);
6782                 }
6783                 else {
6784                         lpfc_debugfs_disc_trc(phba->pport,
6785                                 LPFC_DISC_TRC_MBOX,
6786                                 "MBOX Bsy:        cmd:x%x mb:x%x x%x",
6787                                 (uint32_t)mbx->mbxCommand,
6788                                 mbx->un.varWords[0], mbx->un.varWords[1]);
6789                 }
6790
6791                 return MBX_BUSY;
6792         }
6793
6794         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
6795
6796         /* If we are not polling, we MUST be in SLI2 mode */
6797         if (flag != MBX_POLL) {
6798                 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
6799                     (mbx->mbxCommand != MBX_KILL_BOARD)) {
6800                         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6801                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6802                         /* Mbox command <mbxCommand> cannot issue */
6803                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6804                                         "(%d):2531 Mailbox command x%x "
6805                                         "cannot issue Data: x%x x%x\n",
6806                                         pmbox->vport ? pmbox->vport->vpi : 0,
6807                                         pmbox->u.mb.mbxCommand,
6808                                         psli->sli_flag, flag);
6809                         goto out_not_finished;
6810                 }
6811                 /* timeout active mbox command */
6812                 mod_timer(&psli->mbox_tmo, (jiffies +
6813                                (HZ * lpfc_mbox_tmo_val(phba, pmbox))));
6814         }
6815
6816         /* Mailbox cmd <cmd> issue */
6817         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6818                         "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
6819                         "x%x\n",
6820                         pmbox->vport ? pmbox->vport->vpi : 0,
6821                         mbx->mbxCommand, phba->pport->port_state,
6822                         psli->sli_flag, flag);
6823
6824         if (mbx->mbxCommand != MBX_HEARTBEAT) {
6825                 if (pmbox->vport) {
6826                         lpfc_debugfs_disc_trc(pmbox->vport,
6827                                 LPFC_DISC_TRC_MBOX_VPORT,
6828                                 "MBOX Send vport: cmd:x%x mb:x%x x%x",
6829                                 (uint32_t)mbx->mbxCommand,
6830                                 mbx->un.varWords[0], mbx->un.varWords[1]);
6831                 }
6832                 else {
6833                         lpfc_debugfs_disc_trc(phba->pport,
6834                                 LPFC_DISC_TRC_MBOX,
6835                                 "MBOX Send:       cmd:x%x mb:x%x x%x",
6836                                 (uint32_t)mbx->mbxCommand,
6837                                 mbx->un.varWords[0], mbx->un.varWords[1]);
6838                 }
6839         }
6840
6841         psli->slistat.mbox_cmd++;
6842         evtctr = psli->slistat.mbox_event;
6843
6844         /* next set own bit for the adapter and copy over command word */
6845         mbx->mbxOwner = OWN_CHIP;
6846
6847         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
6848                 /* Populate mbox extension offset word. */
6849                 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
6850                         *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
6851                                 = (uint8_t *)phba->mbox_ext
6852                                   - (uint8_t *)phba->mbox;
6853                 }
6854
6855                 /* Copy the mailbox extension data */
6856                 if (pmbox->in_ext_byte_len && pmbox->context2) {
6857                         lpfc_sli_pcimem_bcopy(pmbox->context2,
6858                                 (uint8_t *)phba->mbox_ext,
6859                                 pmbox->in_ext_byte_len);
6860                 }
6861                 /* Copy command data to host SLIM area */
6862                 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
6863         } else {
6864                 /* Populate mbox extension offset word. */
6865                 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
6866                         *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
6867                                 = MAILBOX_HBA_EXT_OFFSET;
6868
6869                 /* Copy the mailbox extension data */
6870                 if (pmbox->in_ext_byte_len && pmbox->context2) {
6871                         lpfc_memcpy_to_slim(phba->MBslimaddr +
6872                                 MAILBOX_HBA_EXT_OFFSET,
6873                                 pmbox->context2, pmbox->in_ext_byte_len);
6874
6875                 }
6876                 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
6877                         /* copy command data into host mbox for cmpl */
6878                         lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
6879                 }
6880
6881                 /* First copy mbox command data to HBA SLIM, skip past first
6882                    word */
6883                 to_slim = phba->MBslimaddr + sizeof (uint32_t);
6884                 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
6885                             MAILBOX_CMD_SIZE - sizeof (uint32_t));
6886
6887                 /* Next copy over first word, with mbxOwner set */
6888                 ldata = *((uint32_t *)mbx);
6889                 to_slim = phba->MBslimaddr;
6890                 writel(ldata, to_slim);
6891                 readl(to_slim); /* flush */
6892
6893                 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
6894                         /* switch over to host mailbox */
6895                         psli->sli_flag |= LPFC_SLI_ACTIVE;
6896                 }
6897         }
6898
6899         wmb();
6900
6901         switch (flag) {
6902         case MBX_NOWAIT:
6903                 /* Set up reference to mailbox command */
6904                 psli->mbox_active = pmbox;
6905                 /* Interrupt board to do it */
6906                 writel(CA_MBATT, phba->CAregaddr);
6907                 readl(phba->CAregaddr); /* flush */
6908                 /* Don't wait for it to finish, just return */
6909                 break;
6910
6911         case MBX_POLL:
6912                 /* Set up null reference to mailbox command */
6913                 psli->mbox_active = NULL;
6914                 /* Interrupt board to do it */
6915                 writel(CA_MBATT, phba->CAregaddr);
6916                 readl(phba->CAregaddr); /* flush */
6917
6918                 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
6919                         /* First read mbox status word */
6920                         word0 = *((uint32_t *)phba->mbox);
6921                         word0 = le32_to_cpu(word0);
6922                 } else {
6923                         /* First read mbox status word */
6924                         if (lpfc_readl(phba->MBslimaddr, &word0)) {
6925                                 spin_unlock_irqrestore(&phba->hbalock,
6926                                                        drvr_flag);
6927                                 goto out_not_finished;
6928                         }
6929                 }
6930
6931                 /* Read the HBA Host Attention Register */
6932                 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
6933                         spin_unlock_irqrestore(&phba->hbalock,
6934                                                        drvr_flag);
6935                         goto out_not_finished;
6936                 }
6937                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
6938                                                         1000) + jiffies;
6939                 i = 0;
6940                 /* Wait for command to complete */
6941                 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
6942                        (!(ha_copy & HA_MBATT) &&
6943                         (phba->link_state > LPFC_WARM_START))) {
6944                         if (time_after(jiffies, timeout)) {
6945                                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6946                                 spin_unlock_irqrestore(&phba->hbalock,
6947                                                        drvr_flag);
6948                                 goto out_not_finished;
6949                         }
6950
6951                         /* Check if we took a mbox interrupt while we were
6952                            polling */
6953                         if (((word0 & OWN_CHIP) != OWN_CHIP)
6954                             && (evtctr != psli->slistat.mbox_event))
6955                                 break;
6956
6957                         if (i++ > 10) {
6958                                 spin_unlock_irqrestore(&phba->hbalock,
6959                                                        drvr_flag);
6960                                 msleep(1);
6961                                 spin_lock_irqsave(&phba->hbalock, drvr_flag);
6962                         }
6963
6964                         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
6965                                 /* First copy command data */
6966                                 word0 = *((uint32_t *)phba->mbox);
6967                                 word0 = le32_to_cpu(word0);
6968                                 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
6969                                         MAILBOX_t *slimmb;
6970                                         uint32_t slimword0;
6971                                         /* Check real SLIM for any errors */
6972                                         slimword0 = readl(phba->MBslimaddr);
6973                                         slimmb = (MAILBOX_t *) & slimword0;
6974                                         if (((slimword0 & OWN_CHIP) != OWN_CHIP)
6975                                             && slimmb->mbxStatus) {
6976                                                 psli->sli_flag &=
6977                                                     ~LPFC_SLI_ACTIVE;
6978                                                 word0 = slimword0;
6979                                         }
6980                                 }
6981                         } else {
6982                                 /* First copy command data */
6983                                 word0 = readl(phba->MBslimaddr);
6984                         }
6985                         /* Read the HBA Host Attention Register */
6986                         if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
6987                                 spin_unlock_irqrestore(&phba->hbalock,
6988                                                        drvr_flag);
6989                                 goto out_not_finished;
6990                         }
6991                 }
6992
6993                 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
6994                         /* copy results back to user */
6995                         lpfc_sli_pcimem_bcopy(phba->mbox, mbx, MAILBOX_CMD_SIZE);
6996                         /* Copy the mailbox extension data */
6997                         if (pmbox->out_ext_byte_len && pmbox->context2) {
6998                                 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
6999                                                       pmbox->context2,
7000                                                       pmbox->out_ext_byte_len);
7001                         }
7002                 } else {
7003                         /* First copy command data */
7004                         lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
7005                                                         MAILBOX_CMD_SIZE);
7006                         /* Copy the mailbox extension data */
7007                         if (pmbox->out_ext_byte_len && pmbox->context2) {
7008                                 lpfc_memcpy_from_slim(pmbox->context2,
7009                                         phba->MBslimaddr +
7010                                         MAILBOX_HBA_EXT_OFFSET,
7011                                         pmbox->out_ext_byte_len);
7012                         }
7013                 }
7014
7015                 writel(HA_MBATT, phba->HAregaddr);
7016                 readl(phba->HAregaddr); /* flush */
7017
7018                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7019                 status = mbx->mbxStatus;
7020         }
7021
7022         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7023         return status;
7024
7025 out_not_finished:
7026         if (processing_queue) {
7027                 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
7028                 lpfc_mbox_cmpl_put(phba, pmbox);
7029         }
7030         return MBX_NOT_FINISHED;
7031 }
7032
7033 /**
7034  * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
7035  * @phba: Pointer to HBA context object.
7036  *
7037  * The function blocks the posting of SLI4 asynchronous mailbox commands from
7038  * the driver internal pending mailbox queue. It will then try to wait out the
7039  * possible outstanding mailbox command before return.
7040  *
7041  * Returns:
7042  *      0 - the outstanding mailbox command completed; otherwise, the wait for
7043  *      the outstanding mailbox command timed out.
7044  **/
7045 static int
7046 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
7047 {
7048         struct lpfc_sli *psli = &phba->sli;
7049         int rc = 0;
7050         unsigned long timeout = 0;
7051
7052         /* Mark the asynchronous mailbox command posting as blocked */
7053         spin_lock_irq(&phba->hbalock);
7054         psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
7055         /* Determine how long we might wait for the active mailbox
7056          * command to be gracefully completed by firmware.
7057          */
7058         if (phba->sli.mbox_active)
7059                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
7060                                                 phba->sli.mbox_active) *
7061                                                 1000) + jiffies;
7062         spin_unlock_irq(&phba->hbalock);
7063
7064         /* Wait for the outstnading mailbox command to complete */
7065         while (phba->sli.mbox_active) {
7066                 /* Check active mailbox complete status every 2ms */
7067                 msleep(2);
7068                 if (time_after(jiffies, timeout)) {
7069                         /* Timeout, marked the outstanding cmd not complete */
7070                         rc = 1;
7071                         break;
7072                 }
7073         }
7074
7075         /* Can not cleanly block async mailbox command, fails it */
7076         if (rc) {
7077                 spin_lock_irq(&phba->hbalock);
7078                 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7079                 spin_unlock_irq(&phba->hbalock);
7080         }
7081         return rc;
7082 }
7083
7084 /**
7085  * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
7086  * @phba: Pointer to HBA context object.
7087  *
7088  * The function unblocks and resume posting of SLI4 asynchronous mailbox
7089  * commands from the driver internal pending mailbox queue. It makes sure
7090  * that there is no outstanding mailbox command before resuming posting
7091  * asynchronous mailbox commands. If, for any reason, there is outstanding
7092  * mailbox command, it will try to wait it out before resuming asynchronous
7093  * mailbox command posting.
7094  **/
7095 static void
7096 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
7097 {
7098         struct lpfc_sli *psli = &phba->sli;
7099
7100         spin_lock_irq(&phba->hbalock);
7101         if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7102                 /* Asynchronous mailbox posting is not blocked, do nothing */
7103                 spin_unlock_irq(&phba->hbalock);
7104                 return;
7105         }
7106
7107         /* Outstanding synchronous mailbox command is guaranteed to be done,
7108          * successful or timeout, after timing-out the outstanding mailbox
7109          * command shall always be removed, so just unblock posting async
7110          * mailbox command and resume
7111          */
7112         psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7113         spin_unlock_irq(&phba->hbalock);
7114
7115         /* wake up worker thread to post asynchronlous mailbox command */
7116         lpfc_worker_wake_up(phba);
7117 }
7118
7119 /**
7120  * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
7121  * @phba: Pointer to HBA context object.
7122  * @mboxq: Pointer to mailbox object.
7123  *
7124  * The function waits for the bootstrap mailbox register ready bit from
7125  * port for twice the regular mailbox command timeout value.
7126  *
7127  *      0 - no timeout on waiting for bootstrap mailbox register ready.
7128  *      MBXERR_ERROR - wait for bootstrap mailbox register timed out.
7129  **/
7130 static int
7131 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7132 {
7133         uint32_t db_ready;
7134         unsigned long timeout;
7135         struct lpfc_register bmbx_reg;
7136
7137         timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
7138                                    * 1000) + jiffies;
7139
7140         do {
7141                 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
7142                 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
7143                 if (!db_ready)
7144                         msleep(2);
7145
7146                 if (time_after(jiffies, timeout))
7147                         return MBXERR_ERROR;
7148         } while (!db_ready);
7149
7150         return 0;
7151 }
7152
7153 /**
7154  * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
7155  * @phba: Pointer to HBA context object.
7156  * @mboxq: Pointer to mailbox object.
7157  *
7158  * The function posts a mailbox to the port.  The mailbox is expected
7159  * to be comletely filled in and ready for the port to operate on it.
7160  * This routine executes a synchronous completion operation on the
7161  * mailbox by polling for its completion.
7162  *
7163  * The caller must not be holding any locks when calling this routine.
7164  *
7165  * Returns:
7166  *      MBX_SUCCESS - mailbox posted successfully
7167  *      Any of the MBX error values.
7168  **/
7169 static int
7170 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7171 {
7172         int rc = MBX_SUCCESS;
7173         unsigned long iflag;
7174         uint32_t mcqe_status;
7175         uint32_t mbx_cmnd;
7176         struct lpfc_sli *psli = &phba->sli;
7177         struct lpfc_mqe *mb = &mboxq->u.mqe;
7178         struct lpfc_bmbx_create *mbox_rgn;
7179         struct dma_address *dma_address;
7180
7181         /*
7182          * Only one mailbox can be active to the bootstrap mailbox region
7183          * at a time and there is no queueing provided.
7184          */
7185         spin_lock_irqsave(&phba->hbalock, iflag);
7186         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7187                 spin_unlock_irqrestore(&phba->hbalock, iflag);
7188                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7189                                 "(%d):2532 Mailbox command x%x (x%x/x%x) "
7190                                 "cannot issue Data: x%x x%x\n",
7191                                 mboxq->vport ? mboxq->vport->vpi : 0,
7192                                 mboxq->u.mb.mbxCommand,
7193                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7194                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7195                                 psli->sli_flag, MBX_POLL);
7196                 return MBXERR_ERROR;
7197         }
7198         /* The server grabs the token and owns it until release */
7199         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7200         phba->sli.mbox_active = mboxq;
7201         spin_unlock_irqrestore(&phba->hbalock, iflag);
7202
7203         /* wait for bootstrap mbox register for readyness */
7204         rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7205         if (rc)
7206                 goto exit;
7207
7208         /*
7209          * Initialize the bootstrap memory region to avoid stale data areas
7210          * in the mailbox post.  Then copy the caller's mailbox contents to
7211          * the bmbx mailbox region.
7212          */
7213         mbx_cmnd = bf_get(lpfc_mqe_command, mb);
7214         memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
7215         lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
7216                               sizeof(struct lpfc_mqe));
7217
7218         /* Post the high mailbox dma address to the port and wait for ready. */
7219         dma_address = &phba->sli4_hba.bmbx.dma_address;
7220         writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
7221
7222         /* wait for bootstrap mbox register for hi-address write done */
7223         rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7224         if (rc)
7225                 goto exit;
7226
7227         /* Post the low mailbox dma address to the port. */
7228         writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
7229
7230         /* wait for bootstrap mbox register for low address write done */
7231         rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7232         if (rc)
7233                 goto exit;
7234
7235         /*
7236          * Read the CQ to ensure the mailbox has completed.
7237          * If so, update the mailbox status so that the upper layers
7238          * can complete the request normally.
7239          */
7240         lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
7241                               sizeof(struct lpfc_mqe));
7242         mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
7243         lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
7244                               sizeof(struct lpfc_mcqe));
7245         mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
7246         /*
7247          * When the CQE status indicates a failure and the mailbox status
7248          * indicates success then copy the CQE status into the mailbox status
7249          * (and prefix it with x4000).
7250          */
7251         if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
7252                 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
7253                         bf_set(lpfc_mqe_status, mb,
7254                                (LPFC_MBX_ERROR_RANGE | mcqe_status));
7255                 rc = MBXERR_ERROR;
7256         } else
7257                 lpfc_sli4_swap_str(phba, mboxq);
7258
7259         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7260                         "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
7261                         "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
7262                         " x%x x%x CQ: x%x x%x x%x x%x\n",
7263                         mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
7264                         lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7265                         lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7266                         bf_get(lpfc_mqe_status, mb),
7267                         mb->un.mb_words[0], mb->un.mb_words[1],
7268                         mb->un.mb_words[2], mb->un.mb_words[3],
7269                         mb->un.mb_words[4], mb->un.mb_words[5],
7270                         mb->un.mb_words[6], mb->un.mb_words[7],
7271                         mb->un.mb_words[8], mb->un.mb_words[9],
7272                         mb->un.mb_words[10], mb->un.mb_words[11],
7273                         mb->un.mb_words[12], mboxq->mcqe.word0,
7274                         mboxq->mcqe.mcqe_tag0,  mboxq->mcqe.mcqe_tag1,
7275                         mboxq->mcqe.trailer);
7276 exit:
7277         /* We are holding the token, no needed for lock when release */
7278         spin_lock_irqsave(&phba->hbalock, iflag);
7279         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7280         phba->sli.mbox_active = NULL;
7281         spin_unlock_irqrestore(&phba->hbalock, iflag);
7282         return rc;
7283 }
7284
7285 /**
7286  * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
7287  * @phba: Pointer to HBA context object.
7288  * @pmbox: Pointer to mailbox object.
7289  * @flag: Flag indicating how the mailbox need to be processed.
7290  *
7291  * This function is called by discovery code and HBA management code to submit
7292  * a mailbox command to firmware with SLI-4 interface spec.
7293  *
7294  * Return codes the caller owns the mailbox command after the return of the
7295  * function.
7296  **/
7297 static int
7298 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
7299                        uint32_t flag)
7300 {
7301         struct lpfc_sli *psli = &phba->sli;
7302         unsigned long iflags;
7303         int rc;
7304
7305         /* dump from issue mailbox command if setup */
7306         lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
7307
7308         rc = lpfc_mbox_dev_check(phba);
7309         if (unlikely(rc)) {
7310                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7311                                 "(%d):2544 Mailbox command x%x (x%x/x%x) "
7312                                 "cannot issue Data: x%x x%x\n",
7313                                 mboxq->vport ? mboxq->vport->vpi : 0,
7314                                 mboxq->u.mb.mbxCommand,
7315                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7316                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7317                                 psli->sli_flag, flag);
7318                 goto out_not_finished;
7319         }
7320
7321         /* Detect polling mode and jump to a handler */
7322         if (!phba->sli4_hba.intr_enable) {
7323                 if (flag == MBX_POLL)
7324                         rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
7325                 else
7326                         rc = -EIO;
7327                 if (rc != MBX_SUCCESS)
7328                         lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7329                                         "(%d):2541 Mailbox command x%x "
7330                                         "(x%x/x%x) failure: "
7331                                         "mqe_sta: x%x mcqe_sta: x%x/x%x "
7332                                         "Data: x%x x%x\n,",
7333                                         mboxq->vport ? mboxq->vport->vpi : 0,
7334                                         mboxq->u.mb.mbxCommand,
7335                                         lpfc_sli_config_mbox_subsys_get(phba,
7336                                                                         mboxq),
7337                                         lpfc_sli_config_mbox_opcode_get(phba,
7338                                                                         mboxq),
7339                                         bf_get(lpfc_mqe_status, &mboxq->u.mqe),
7340                                         bf_get(lpfc_mcqe_status, &mboxq->mcqe),
7341                                         bf_get(lpfc_mcqe_ext_status,
7342                                                &mboxq->mcqe),
7343                                         psli->sli_flag, flag);
7344                 return rc;
7345         } else if (flag == MBX_POLL) {
7346                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7347                                 "(%d):2542 Try to issue mailbox command "
7348                                 "x%x (x%x/x%x) synchronously ahead of async"
7349                                 "mailbox command queue: x%x x%x\n",
7350                                 mboxq->vport ? mboxq->vport->vpi : 0,
7351                                 mboxq->u.mb.mbxCommand,
7352                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7353                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7354                                 psli->sli_flag, flag);
7355                 /* Try to block the asynchronous mailbox posting */
7356                 rc = lpfc_sli4_async_mbox_block(phba);
7357                 if (!rc) {
7358                         /* Successfully blocked, now issue sync mbox cmd */
7359                         rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
7360                         if (rc != MBX_SUCCESS)
7361                                 lpfc_printf_log(phba, KERN_WARNING,
7362                                         LOG_MBOX | LOG_SLI,
7363                                         "(%d):2597 Sync Mailbox command "
7364                                         "x%x (x%x/x%x) failure: "
7365                                         "mqe_sta: x%x mcqe_sta: x%x/x%x "
7366                                         "Data: x%x x%x\n,",
7367                                         mboxq->vport ? mboxq->vport->vpi : 0,
7368                                         mboxq->u.mb.mbxCommand,
7369                                         lpfc_sli_config_mbox_subsys_get(phba,
7370                                                                         mboxq),
7371                                         lpfc_sli_config_mbox_opcode_get(phba,
7372                                                                         mboxq),
7373                                         bf_get(lpfc_mqe_status, &mboxq->u.mqe),
7374                                         bf_get(lpfc_mcqe_status, &mboxq->mcqe),
7375                                         bf_get(lpfc_mcqe_ext_status,
7376                                                &mboxq->mcqe),
7377                                         psli->sli_flag, flag);
7378                         /* Unblock the async mailbox posting afterward */
7379                         lpfc_sli4_async_mbox_unblock(phba);
7380                 }
7381                 return rc;
7382         }
7383
7384         /* Now, interrupt mode asynchrous mailbox command */
7385         rc = lpfc_mbox_cmd_check(phba, mboxq);
7386         if (rc) {
7387                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7388                                 "(%d):2543 Mailbox command x%x (x%x/x%x) "
7389                                 "cannot issue Data: x%x x%x\n",
7390                                 mboxq->vport ? mboxq->vport->vpi : 0,
7391                                 mboxq->u.mb.mbxCommand,
7392                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7393                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7394                                 psli->sli_flag, flag);
7395                 goto out_not_finished;
7396         }
7397
7398         /* Put the mailbox command to the driver internal FIFO */
7399         psli->slistat.mbox_busy++;
7400         spin_lock_irqsave(&phba->hbalock, iflags);
7401         lpfc_mbox_put(phba, mboxq);
7402         spin_unlock_irqrestore(&phba->hbalock, iflags);
7403         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7404                         "(%d):0354 Mbox cmd issue - Enqueue Data: "
7405                         "x%x (x%x/x%x) x%x x%x x%x\n",
7406                         mboxq->vport ? mboxq->vport->vpi : 0xffffff,
7407                         bf_get(lpfc_mqe_command, &mboxq->u.mqe),
7408                         lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7409                         lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7410                         phba->pport->port_state,
7411                         psli->sli_flag, MBX_NOWAIT);
7412         /* Wake up worker thread to transport mailbox command from head */
7413         lpfc_worker_wake_up(phba);
7414
7415         return MBX_BUSY;
7416
7417 out_not_finished:
7418         return MBX_NOT_FINISHED;
7419 }
7420
7421 /**
7422  * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
7423  * @phba: Pointer to HBA context object.
7424  *
7425  * This function is called by worker thread to send a mailbox command to
7426  * SLI4 HBA firmware.
7427  *
7428  **/
7429 int
7430 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
7431 {
7432         struct lpfc_sli *psli = &phba->sli;
7433         LPFC_MBOXQ_t *mboxq;
7434         int rc = MBX_SUCCESS;
7435         unsigned long iflags;
7436         struct lpfc_mqe *mqe;
7437         uint32_t mbx_cmnd;
7438
7439         /* Check interrupt mode before post async mailbox command */
7440         if (unlikely(!phba->sli4_hba.intr_enable))
7441                 return MBX_NOT_FINISHED;
7442
7443         /* Check for mailbox command service token */
7444         spin_lock_irqsave(&phba->hbalock, iflags);
7445         if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7446                 spin_unlock_irqrestore(&phba->hbalock, iflags);
7447                 return MBX_NOT_FINISHED;
7448         }
7449         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7450                 spin_unlock_irqrestore(&phba->hbalock, iflags);
7451                 return MBX_NOT_FINISHED;
7452         }
7453         if (unlikely(phba->sli.mbox_active)) {
7454                 spin_unlock_irqrestore(&phba->hbalock, iflags);
7455                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7456                                 "0384 There is pending active mailbox cmd\n");
7457                 return MBX_NOT_FINISHED;
7458         }
7459         /* Take the mailbox command service token */
7460         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7461
7462         /* Get the next mailbox command from head of queue */
7463         mboxq = lpfc_mbox_get(phba);
7464
7465         /* If no more mailbox command waiting for post, we're done */
7466         if (!mboxq) {
7467                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7468                 spin_unlock_irqrestore(&phba->hbalock, iflags);
7469                 return MBX_SUCCESS;
7470         }
7471         phba->sli.mbox_active = mboxq;
7472         spin_unlock_irqrestore(&phba->hbalock, iflags);
7473
7474         /* Check device readiness for posting mailbox command */
7475         rc = lpfc_mbox_dev_check(phba);
7476         if (unlikely(rc))
7477                 /* Driver clean routine will clean up pending mailbox */
7478                 goto out_not_finished;
7479
7480         /* Prepare the mbox command to be posted */
7481         mqe = &mboxq->u.mqe;
7482         mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
7483
7484         /* Start timer for the mbox_tmo and log some mailbox post messages */
7485         mod_timer(&psli->mbox_tmo, (jiffies +
7486                   (HZ * lpfc_mbox_tmo_val(phba, mboxq))));
7487
7488         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7489                         "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
7490                         "x%x x%x\n",
7491                         mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
7492                         lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7493                         lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7494                         phba->pport->port_state, psli->sli_flag);
7495
7496         if (mbx_cmnd != MBX_HEARTBEAT) {
7497                 if (mboxq->vport) {
7498                         lpfc_debugfs_disc_trc(mboxq->vport,
7499                                 LPFC_DISC_TRC_MBOX_VPORT,
7500                                 "MBOX Send vport: cmd:x%x mb:x%x x%x",
7501                                 mbx_cmnd, mqe->un.mb_words[0],
7502                                 mqe->un.mb_words[1]);
7503                 } else {
7504                         lpfc_debugfs_disc_trc(phba->pport,
7505                                 LPFC_DISC_TRC_MBOX,
7506                                 "MBOX Send: cmd:x%x mb:x%x x%x",
7507                                 mbx_cmnd, mqe->un.mb_words[0],
7508                                 mqe->un.mb_words[1]);
7509                 }
7510         }
7511         psli->slistat.mbox_cmd++;
7512
7513         /* Post the mailbox command to the port */
7514         rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
7515         if (rc != MBX_SUCCESS) {
7516                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7517                                 "(%d):2533 Mailbox command x%x (x%x/x%x) "
7518                                 "cannot issue Data: x%x x%x\n",
7519                                 mboxq->vport ? mboxq->vport->vpi : 0,
7520                                 mboxq->u.mb.mbxCommand,
7521                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7522                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7523                                 psli->sli_flag, MBX_NOWAIT);
7524                 goto out_not_finished;
7525         }
7526
7527         return rc;
7528
7529 out_not_finished:
7530         spin_lock_irqsave(&phba->hbalock, iflags);
7531         if (phba->sli.mbox_active) {
7532                 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
7533                 __lpfc_mbox_cmpl_put(phba, mboxq);
7534                 /* Release the token */
7535                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7536                 phba->sli.mbox_active = NULL;
7537         }
7538         spin_unlock_irqrestore(&phba->hbalock, iflags);
7539
7540         return MBX_NOT_FINISHED;
7541 }
7542
7543 /**
7544  * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
7545  * @phba: Pointer to HBA context object.
7546  * @pmbox: Pointer to mailbox object.
7547  * @flag: Flag indicating how the mailbox need to be processed.
7548  *
7549  * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
7550  * the API jump table function pointer from the lpfc_hba struct.
7551  *
7552  * Return codes the caller owns the mailbox command after the return of the
7553  * function.
7554  **/
7555 int
7556 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
7557 {
7558         return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
7559 }
7560
7561 /**
7562  * lpfc_mbox_api_table_setup - Set up mbox api function jump table
7563  * @phba: The hba struct for which this call is being executed.
7564  * @dev_grp: The HBA PCI-Device group number.
7565  *
7566  * This routine sets up the mbox interface API function jump table in @phba
7567  * struct.
7568  * Returns: 0 - success, -ENODEV - failure.
7569  **/
7570 int
7571 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7572 {
7573
7574         switch (dev_grp) {
7575         case LPFC_PCI_DEV_LP:
7576                 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
7577                 phba->lpfc_sli_handle_slow_ring_event =
7578                                 lpfc_sli_handle_slow_ring_event_s3;
7579                 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
7580                 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
7581                 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
7582                 break;
7583         case LPFC_PCI_DEV_OC:
7584                 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
7585                 phba->lpfc_sli_handle_slow_ring_event =
7586                                 lpfc_sli_handle_slow_ring_event_s4;
7587                 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
7588                 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
7589                 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
7590                 break;
7591         default:
7592                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7593                                 "1420 Invalid HBA PCI-device group: 0x%x\n",
7594                                 dev_grp);
7595                 return -ENODEV;
7596                 break;
7597         }
7598         return 0;
7599 }
7600
7601 /**
7602  * __lpfc_sli_ringtx_put - Add an iocb to the txq
7603  * @phba: Pointer to HBA context object.
7604  * @pring: Pointer to driver SLI ring object.
7605  * @piocb: Pointer to address of newly added command iocb.
7606  *
7607  * This function is called with hbalock held to add a command
7608  * iocb to the txq when SLI layer cannot submit the command iocb
7609  * to the ring.
7610  **/
7611 void
7612 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7613                     struct lpfc_iocbq *piocb)
7614 {
7615         /* Insert the caller's iocb in the txq tail for later processing. */
7616         list_add_tail(&piocb->list, &pring->txq);
7617         pring->txq_cnt++;
7618 }
7619
7620 /**
7621  * lpfc_sli_next_iocb - Get the next iocb in the txq
7622  * @phba: Pointer to HBA context object.
7623  * @pring: Pointer to driver SLI ring object.
7624  * @piocb: Pointer to address of newly added command iocb.
7625  *
7626  * This function is called with hbalock held before a new
7627  * iocb is submitted to the firmware. This function checks
7628  * txq to flush the iocbs in txq to Firmware before
7629  * submitting new iocbs to the Firmware.
7630  * If there are iocbs in the txq which need to be submitted
7631  * to firmware, lpfc_sli_next_iocb returns the first element
7632  * of the txq after dequeuing it from txq.
7633  * If there is no iocb in the txq then the function will return
7634  * *piocb and *piocb is set to NULL. Caller needs to check
7635  * *piocb to find if there are more commands in the txq.
7636  **/
7637 static struct lpfc_iocbq *
7638 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7639                    struct lpfc_iocbq **piocb)
7640 {
7641         struct lpfc_iocbq * nextiocb;
7642
7643         nextiocb = lpfc_sli_ringtx_get(phba, pring);
7644         if (!nextiocb) {
7645                 nextiocb = *piocb;
7646                 *piocb = NULL;
7647         }
7648
7649         return nextiocb;
7650 }
7651
7652 /**
7653  * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
7654  * @phba: Pointer to HBA context object.
7655  * @ring_number: SLI ring number to issue iocb on.
7656  * @piocb: Pointer to command iocb.
7657  * @flag: Flag indicating if this command can be put into txq.
7658  *
7659  * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
7660  * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
7661  * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
7662  * flag is turned on, the function returns IOCB_ERROR. When the link is down,
7663  * this function allows only iocbs for posting buffers. This function finds
7664  * next available slot in the command ring and posts the command to the
7665  * available slot and writes the port attention register to request HBA start
7666  * processing new iocb. If there is no slot available in the ring and
7667  * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
7668  * the function returns IOCB_BUSY.
7669  *
7670  * This function is called with hbalock held. The function will return success
7671  * after it successfully submit the iocb to firmware or after adding to the
7672  * txq.
7673  **/
7674 static int
7675 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
7676                     struct lpfc_iocbq *piocb, uint32_t flag)
7677 {
7678         struct lpfc_iocbq *nextiocb;
7679         IOCB_t *iocb;
7680         struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
7681
7682         if (piocb->iocb_cmpl && (!piocb->vport) &&
7683            (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
7684            (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
7685                 lpfc_printf_log(phba, KERN_ERR,
7686                                 LOG_SLI | LOG_VPORT,
7687                                 "1807 IOCB x%x failed. No vport\n",
7688                                 piocb->iocb.ulpCommand);
7689                 dump_stack();
7690                 return IOCB_ERROR;
7691         }
7692
7693
7694         /* If the PCI channel is in offline state, do not post iocbs. */
7695         if (unlikely(pci_channel_offline(phba->pcidev)))
7696                 return IOCB_ERROR;
7697
7698         /* If HBA has a deferred error attention, fail the iocb. */
7699         if (unlikely(phba->hba_flag & DEFER_ERATT))
7700                 return IOCB_ERROR;
7701
7702         /*
7703          * We should never get an IOCB if we are in a < LINK_DOWN state
7704          */
7705         if (unlikely(phba->link_state < LPFC_LINK_DOWN))
7706                 return IOCB_ERROR;
7707
7708         /*
7709          * Check to see if we are blocking IOCB processing because of a
7710          * outstanding event.
7711          */
7712         if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
7713                 goto iocb_busy;
7714
7715         if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
7716                 /*
7717                  * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
7718                  * can be issued if the link is not up.
7719                  */
7720                 switch (piocb->iocb.ulpCommand) {
7721                 case CMD_GEN_REQUEST64_CR:
7722                 case CMD_GEN_REQUEST64_CX:
7723                         if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
7724                                 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
7725                                         FC_RCTL_DD_UNSOL_CMD) ||
7726                                 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
7727                                         MENLO_TRANSPORT_TYPE))
7728
7729                                 goto iocb_busy;
7730                         break;
7731                 case CMD_QUE_RING_BUF_CN:
7732                 case CMD_QUE_RING_BUF64_CN:
7733                         /*
7734                          * For IOCBs, like QUE_RING_BUF, that have no rsp ring
7735                          * completion, iocb_cmpl MUST be 0.
7736                          */
7737                         if (piocb->iocb_cmpl)
7738                                 piocb->iocb_cmpl = NULL;
7739                         /*FALLTHROUGH*/
7740                 case CMD_CREATE_XRI_CR:
7741                 case CMD_CLOSE_XRI_CN:
7742                 case CMD_CLOSE_XRI_CX:
7743                         break;
7744                 default:
7745                         goto iocb_busy;
7746                 }
7747
7748         /*
7749          * For FCP commands, we must be in a state where we can process link
7750          * attention events.
7751          */
7752         } else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
7753                             !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
7754                 goto iocb_busy;
7755         }
7756
7757         while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
7758                (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
7759                 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
7760
7761         if (iocb)
7762                 lpfc_sli_update_ring(phba, pring);
7763         else
7764                 lpfc_sli_update_full_ring(phba, pring);
7765
7766         if (!piocb)
7767                 return IOCB_SUCCESS;
7768
7769         goto out_busy;
7770
7771  iocb_busy:
7772         pring->stats.iocb_cmd_delay++;
7773
7774  out_busy:
7775
7776         if (!(flag & SLI_IOCB_RET_IOCB)) {
7777                 __lpfc_sli_ringtx_put(phba, pring, piocb);
7778                 return IOCB_SUCCESS;
7779         }
7780
7781         return IOCB_BUSY;
7782 }
7783
7784 /**
7785  * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
7786  * @phba: Pointer to HBA context object.
7787  * @piocb: Pointer to command iocb.
7788  * @sglq: Pointer to the scatter gather queue object.
7789  *
7790  * This routine converts the bpl or bde that is in the IOCB
7791  * to a sgl list for the sli4 hardware. The physical address
7792  * of the bpl/bde is converted back to a virtual address.
7793  * If the IOCB contains a BPL then the list of BDE's is
7794  * converted to sli4_sge's. If the IOCB contains a single
7795  * BDE then it is converted to a single sli_sge.
7796  * The IOCB is still in cpu endianess so the contents of
7797  * the bpl can be used without byte swapping.
7798  *
7799  * Returns valid XRI = Success, NO_XRI = Failure.
7800 **/
7801 static uint16_t
7802 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
7803                 struct lpfc_sglq *sglq)
7804 {
7805         uint16_t xritag = NO_XRI;
7806         struct ulp_bde64 *bpl = NULL;
7807         struct ulp_bde64 bde;
7808         struct sli4_sge *sgl  = NULL;
7809         struct lpfc_dmabuf *dmabuf;
7810         IOCB_t *icmd;
7811         int numBdes = 0;
7812         int i = 0;
7813         uint32_t offset = 0; /* accumulated offset in the sg request list */
7814         int inbound = 0; /* number of sg reply entries inbound from firmware */
7815
7816         if (!piocbq || !sglq)
7817                 return xritag;
7818
7819         sgl  = (struct sli4_sge *)sglq->sgl;
7820         icmd = &piocbq->iocb;
7821         if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
7822                 return sglq->sli4_xritag;
7823         if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
7824                 numBdes = icmd->un.genreq64.bdl.bdeSize /
7825                                 sizeof(struct ulp_bde64);
7826                 /* The addrHigh and addrLow fields within the IOCB
7827                  * have not been byteswapped yet so there is no
7828                  * need to swap them back.
7829                  */
7830                 if (piocbq->context3)
7831                         dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
7832                 else
7833                         return xritag;
7834
7835                 bpl  = (struct ulp_bde64 *)dmabuf->virt;
7836                 if (!bpl)
7837                         return xritag;
7838
7839                 for (i = 0; i < numBdes; i++) {
7840                         /* Should already be byte swapped. */
7841                         sgl->addr_hi = bpl->addrHigh;
7842                         sgl->addr_lo = bpl->addrLow;
7843
7844                         sgl->word2 = le32_to_cpu(sgl->word2);
7845                         if ((i+1) == numBdes)
7846                                 bf_set(lpfc_sli4_sge_last, sgl, 1);
7847                         else
7848                                 bf_set(lpfc_sli4_sge_last, sgl, 0);
7849                         /* swap the size field back to the cpu so we
7850                          * can assign it to the sgl.
7851                          */
7852                         bde.tus.w = le32_to_cpu(bpl->tus.w);
7853                         sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
7854                         /* The offsets in the sgl need to be accumulated
7855                          * separately for the request and reply lists.
7856                          * The request is always first, the reply follows.
7857                          */
7858                         if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
7859                                 /* add up the reply sg entries */
7860                                 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
7861                                         inbound++;
7862                                 /* first inbound? reset the offset */
7863                                 if (inbound == 1)
7864                                         offset = 0;
7865                                 bf_set(lpfc_sli4_sge_offset, sgl, offset);
7866                                 bf_set(lpfc_sli4_sge_type, sgl,
7867                                         LPFC_SGE_TYPE_DATA);
7868                                 offset += bde.tus.f.bdeSize;
7869                         }
7870                         sgl->word2 = cpu_to_le32(sgl->word2);
7871                         bpl++;
7872                         sgl++;
7873                 }
7874         } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
7875                         /* The addrHigh and addrLow fields of the BDE have not
7876                          * been byteswapped yet so they need to be swapped
7877                          * before putting them in the sgl.
7878                          */
7879                         sgl->addr_hi =
7880                                 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
7881                         sgl->addr_lo =
7882                                 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
7883                         sgl->word2 = le32_to_cpu(sgl->word2);
7884                         bf_set(lpfc_sli4_sge_last, sgl, 1);
7885                         sgl->word2 = cpu_to_le32(sgl->word2);
7886                         sgl->sge_len =
7887                                 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
7888         }
7889         return sglq->sli4_xritag;
7890 }
7891
7892 /**
7893  * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
7894  * @phba: Pointer to HBA context object.
7895  *
7896  * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
7897  * distribution.  This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
7898  * held.
7899  *
7900  * Return: index into SLI4 fast-path FCP queue index.
7901  **/
7902 static inline uint32_t
7903 lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
7904 {
7905         int i;
7906
7907         if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU)
7908                 i = smp_processor_id();
7909         else
7910                 i = atomic_add_return(1, &phba->fcp_qidx);
7911
7912         i = (i % phba->cfg_fcp_io_channel);
7913         return i;
7914 }
7915
7916 /**
7917  * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
7918  * @phba: Pointer to HBA context object.
7919  * @piocb: Pointer to command iocb.
7920  * @wqe: Pointer to the work queue entry.
7921  *
7922  * This routine converts the iocb command to its Work Queue Entry
7923  * equivalent. The wqe pointer should not have any fields set when
7924  * this routine is called because it will memcpy over them.
7925  * This routine does not set the CQ_ID or the WQEC bits in the
7926  * wqe.
7927  *
7928  * Returns: 0 = Success, IOCB_ERROR = Failure.
7929  **/
7930 static int
7931 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7932                 union lpfc_wqe *wqe)
7933 {
7934         uint32_t xmit_len = 0, total_len = 0;
7935         uint8_t ct = 0;
7936         uint32_t fip;
7937         uint32_t abort_tag;
7938         uint8_t command_type = ELS_COMMAND_NON_FIP;
7939         uint8_t cmnd;
7940         uint16_t xritag;
7941         uint16_t abrt_iotag;
7942         struct lpfc_iocbq *abrtiocbq;
7943         struct ulp_bde64 *bpl = NULL;
7944         uint32_t els_id = LPFC_ELS_ID_DEFAULT;
7945         int numBdes, i;
7946         struct ulp_bde64 bde;
7947         struct lpfc_nodelist *ndlp;
7948         uint32_t *pcmd;
7949         uint32_t if_type;
7950
7951         fip = phba->hba_flag & HBA_FIP_SUPPORT;
7952         /* The fcp commands will set command type */
7953         if (iocbq->iocb_flag &  LPFC_IO_FCP)
7954                 command_type = FCP_COMMAND;
7955         else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
7956                 command_type = ELS_COMMAND_FIP;
7957         else
7958                 command_type = ELS_COMMAND_NON_FIP;
7959
7960         /* Some of the fields are in the right position already */
7961         memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
7962         abort_tag = (uint32_t) iocbq->iotag;
7963         xritag = iocbq->sli4_xritag;
7964         wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */
7965         /* words0-2 bpl convert bde */
7966         if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
7967                 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
7968                                 sizeof(struct ulp_bde64);
7969                 bpl  = (struct ulp_bde64 *)
7970                         ((struct lpfc_dmabuf *)iocbq->context3)->virt;
7971                 if (!bpl)
7972                         return IOCB_ERROR;
7973
7974                 /* Should already be byte swapped. */
7975                 wqe->generic.bde.addrHigh =  le32_to_cpu(bpl->addrHigh);
7976                 wqe->generic.bde.addrLow =  le32_to_cpu(bpl->addrLow);
7977                 /* swap the size field back to the cpu so we
7978                  * can assign it to the sgl.
7979                  */
7980                 wqe->generic.bde.tus.w  = le32_to_cpu(bpl->tus.w);
7981                 xmit_len = wqe->generic.bde.tus.f.bdeSize;
7982                 total_len = 0;
7983                 for (i = 0; i < numBdes; i++) {
7984                         bde.tus.w  = le32_to_cpu(bpl[i].tus.w);
7985                         total_len += bde.tus.f.bdeSize;
7986                 }
7987         } else
7988                 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
7989
7990         iocbq->iocb.ulpIoTag = iocbq->iotag;
7991         cmnd = iocbq->iocb.ulpCommand;
7992
7993         switch (iocbq->iocb.ulpCommand) {
7994         case CMD_ELS_REQUEST64_CR:
7995                 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
7996                         ndlp = iocbq->context_un.ndlp;
7997                 else
7998                         ndlp = (struct lpfc_nodelist *)iocbq->context1;
7999                 if (!iocbq->iocb.ulpLe) {
8000                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8001                                 "2007 Only Limited Edition cmd Format"
8002                                 " supported 0x%x\n",
8003                                 iocbq->iocb.ulpCommand);
8004                         return IOCB_ERROR;
8005                 }
8006
8007                 wqe->els_req.payload_len = xmit_len;
8008                 /* Els_reguest64 has a TMO */
8009                 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
8010                         iocbq->iocb.ulpTimeout);
8011                 /* Need a VF for word 4 set the vf bit*/
8012                 bf_set(els_req64_vf, &wqe->els_req, 0);
8013                 /* And a VFID for word 12 */
8014                 bf_set(els_req64_vfid, &wqe->els_req, 0);
8015                 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
8016                 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8017                        iocbq->iocb.ulpContext);
8018                 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
8019                 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
8020                 /* CCP CCPE PV PRI in word10 were set in the memcpy */
8021                 if (command_type == ELS_COMMAND_FIP)
8022                         els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
8023                                         >> LPFC_FIP_ELS_ID_SHIFT);
8024                 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
8025                                         iocbq->context2)->virt);
8026                 if_type = bf_get(lpfc_sli_intf_if_type,
8027                                         &phba->sli4_hba.sli_intf);
8028                 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
8029                         if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
8030                                 *pcmd == ELS_CMD_SCR ||
8031                                 *pcmd == ELS_CMD_FDISC ||
8032                                 *pcmd == ELS_CMD_LOGO ||
8033                                 *pcmd == ELS_CMD_PLOGI)) {
8034                                 bf_set(els_req64_sp, &wqe->els_req, 1);
8035                                 bf_set(els_req64_sid, &wqe->els_req,
8036                                         iocbq->vport->fc_myDID);
8037                                 if ((*pcmd == ELS_CMD_FLOGI) &&
8038                                         !(phba->fc_topology ==
8039                                                 LPFC_TOPOLOGY_LOOP))
8040                                         bf_set(els_req64_sid, &wqe->els_req, 0);
8041                                 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
8042                                 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8043                                         phba->vpi_ids[iocbq->vport->vpi]);
8044                         } else if (pcmd && iocbq->context1) {
8045                                 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
8046                                 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8047                                         phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8048                         }
8049                 }
8050                 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
8051                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8052                 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
8053                 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
8054                 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
8055                 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
8056                 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
8057                 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
8058                 break;
8059         case CMD_XMIT_SEQUENCE64_CX:
8060                 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
8061                        iocbq->iocb.un.ulpWord[3]);
8062                 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
8063                        iocbq->iocb.unsli3.rcvsli3.ox_id);
8064                 /* The entire sequence is transmitted for this IOCB */
8065                 xmit_len = total_len;
8066                 cmnd = CMD_XMIT_SEQUENCE64_CR;
8067                 if (phba->link_flag & LS_LOOPBACK_MODE)
8068                         bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
8069         case CMD_XMIT_SEQUENCE64_CR:
8070                 /* word3 iocb=io_tag32 wqe=reserved */
8071                 wqe->xmit_sequence.rsvd3 = 0;
8072                 /* word4 relative_offset memcpy */
8073                 /* word5 r_ctl/df_ctl memcpy */
8074                 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
8075                 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
8076                 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
8077                        LPFC_WQE_IOD_WRITE);
8078                 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
8079                        LPFC_WQE_LENLOC_WORD12);
8080                 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
8081                 wqe->xmit_sequence.xmit_len = xmit_len;
8082                 command_type = OTHER_COMMAND;
8083                 break;
8084         case CMD_XMIT_BCAST64_CN:
8085                 /* word3 iocb=iotag32 wqe=seq_payload_len */
8086                 wqe->xmit_bcast64.seq_payload_len = xmit_len;
8087                 /* word4 iocb=rsvd wqe=rsvd */
8088                 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
8089                 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
8090                 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
8091                         ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8092                 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
8093                 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
8094                 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
8095                        LPFC_WQE_LENLOC_WORD3);
8096                 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
8097                 break;
8098         case CMD_FCP_IWRITE64_CR:
8099                 command_type = FCP_COMMAND_DATA_OUT;
8100                 /* word3 iocb=iotag wqe=payload_offset_len */
8101                 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8102                 wqe->fcp_iwrite.payload_offset_len =
8103                         xmit_len + sizeof(struct fcp_rsp);
8104                 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
8105                 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
8106                 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
8107                        iocbq->iocb.ulpFCP2Rcvy);
8108                 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
8109                 /* Always open the exchange */
8110                 bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0);
8111                 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
8112                 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
8113                        LPFC_WQE_LENLOC_WORD4);
8114                 bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
8115                 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
8116                 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
8117                 break;
8118         case CMD_FCP_IREAD64_CR:
8119                 /* word3 iocb=iotag wqe=payload_offset_len */
8120                 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8121                 wqe->fcp_iread.payload_offset_len =
8122                         xmit_len + sizeof(struct fcp_rsp);
8123                 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
8124                 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
8125                 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
8126                        iocbq->iocb.ulpFCP2Rcvy);
8127                 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
8128                 /* Always open the exchange */
8129                 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
8130                 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
8131                 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
8132                        LPFC_WQE_LENLOC_WORD4);
8133                 bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
8134                 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
8135                 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
8136                 break;
8137         case CMD_FCP_ICMND64_CR:
8138                 /* word3 iocb=IO_TAG wqe=reserved */
8139                 wqe->fcp_icmd.rsrvd3 = 0;
8140                 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
8141                 /* Always open the exchange */
8142                 bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0);
8143                 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
8144                 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
8145                 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
8146                 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
8147                        LPFC_WQE_LENLOC_NONE);
8148                 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
8149                 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
8150                        iocbq->iocb.ulpFCP2Rcvy);
8151                 break;
8152         case CMD_GEN_REQUEST64_CR:
8153                 /* For this command calculate the xmit length of the
8154                  * request bde.
8155                  */
8156                 xmit_len = 0;
8157                 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
8158                         sizeof(struct ulp_bde64);
8159                 for (i = 0; i < numBdes; i++) {
8160                         bde.tus.w = le32_to_cpu(bpl[i].tus.w);
8161                         if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
8162                                 break;
8163                         xmit_len += bde.tus.f.bdeSize;
8164                 }
8165                 /* word3 iocb=IO_TAG wqe=request_payload_len */
8166                 wqe->gen_req.request_payload_len = xmit_len;
8167                 /* word4 iocb=parameter wqe=relative_offset memcpy */
8168                 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
8169                 /* word6 context tag copied in memcpy */
8170                 if (iocbq->iocb.ulpCt_h  || iocbq->iocb.ulpCt_l) {
8171                         ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
8172                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8173                                 "2015 Invalid CT %x command 0x%x\n",
8174                                 ct, iocbq->iocb.ulpCommand);
8175                         return IOCB_ERROR;
8176                 }
8177                 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
8178                 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
8179                 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
8180                 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
8181                 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
8182                 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
8183                 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
8184                 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
8185                 command_type = OTHER_COMMAND;
8186                 break;
8187         case CMD_XMIT_ELS_RSP64_CX:
8188                 ndlp = (struct lpfc_nodelist *)iocbq->context1;
8189                 /* words0-2 BDE memcpy */
8190                 /* word3 iocb=iotag32 wqe=response_payload_len */
8191                 wqe->xmit_els_rsp.response_payload_len = xmit_len;
8192                 /* word4 */
8193                 wqe->xmit_els_rsp.word4 = 0;
8194                 /* word5 iocb=rsvd wge=did */
8195                 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
8196                          iocbq->iocb.un.xseq64.xmit_els_remoteID);
8197
8198                 if_type = bf_get(lpfc_sli_intf_if_type,
8199                                         &phba->sli4_hba.sli_intf);
8200                 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
8201                         if (iocbq->vport->fc_flag & FC_PT2PT) {
8202                                 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
8203                                 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
8204                                         iocbq->vport->fc_myDID);
8205                                 if (iocbq->vport->fc_myDID == Fabric_DID) {
8206                                         bf_set(wqe_els_did,
8207                                                 &wqe->xmit_els_rsp.wqe_dest, 0);
8208                                 }
8209                         }
8210                 }
8211                 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
8212                        ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8213                 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
8214                 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
8215                        iocbq->iocb.unsli3.rcvsli3.ox_id);
8216                 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
8217                         bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
8218                                phba->vpi_ids[iocbq->vport->vpi]);
8219                 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
8220                 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
8221                 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
8222                 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
8223                        LPFC_WQE_LENLOC_WORD3);
8224                 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
8225                 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
8226                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8227                 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
8228                                         iocbq->context2)->virt);
8229                 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
8230                                 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
8231                                 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
8232                                         iocbq->vport->fc_myDID);
8233                                 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
8234                                 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
8235                                         phba->vpi_ids[phba->pport->vpi]);
8236                 }
8237                 command_type = OTHER_COMMAND;
8238                 break;
8239         case CMD_CLOSE_XRI_CN:
8240         case CMD_ABORT_XRI_CN:
8241         case CMD_ABORT_XRI_CX:
8242                 /* words 0-2 memcpy should be 0 rserved */
8243                 /* port will send abts */
8244                 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
8245                 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
8246                         abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
8247                         fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
8248                 } else
8249                         fip = 0;
8250
8251                 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
8252                         /*
8253                          * The link is down, or the command was ELS_FIP
8254                          * so the fw does not need to send abts
8255                          * on the wire.
8256                          */
8257                         bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
8258                 else
8259                         bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
8260                 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
8261                 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
8262                 wqe->abort_cmd.rsrvd5 = 0;
8263                 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
8264                         ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8265                 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
8266                 /*
8267                  * The abort handler will send us CMD_ABORT_XRI_CN or
8268                  * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
8269                  */
8270                 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
8271                 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
8272                 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
8273                        LPFC_WQE_LENLOC_NONE);
8274                 cmnd = CMD_ABORT_XRI_CX;
8275                 command_type = OTHER_COMMAND;
8276                 xritag = 0;
8277                 break;
8278         case CMD_XMIT_BLS_RSP64_CX:
8279                 ndlp = (struct lpfc_nodelist *)iocbq->context1;
8280                 /* As BLS ABTS RSP WQE is very different from other WQEs,
8281                  * we re-construct this WQE here based on information in
8282                  * iocbq from scratch.
8283                  */
8284                 memset(wqe, 0, sizeof(union lpfc_wqe));
8285                 /* OX_ID is invariable to who sent ABTS to CT exchange */
8286                 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
8287                        bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
8288                 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
8289                     LPFC_ABTS_UNSOL_INT) {
8290                         /* ABTS sent by initiator to CT exchange, the
8291                          * RX_ID field will be filled with the newly
8292                          * allocated responder XRI.
8293                          */
8294                         bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
8295                                iocbq->sli4_xritag);
8296                 } else {
8297                         /* ABTS sent by responder to CT exchange, the
8298                          * RX_ID field will be filled with the responder
8299                          * RX_ID from ABTS.
8300                          */
8301                         bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
8302                                bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
8303                 }
8304                 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
8305                 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
8306
8307                 /* Use CT=VPI */
8308                 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
8309                         ndlp->nlp_DID);
8310                 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
8311                         iocbq->iocb.ulpContext);
8312                 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
8313                 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
8314                         phba->vpi_ids[phba->pport->vpi]);
8315                 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
8316                 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
8317                        LPFC_WQE_LENLOC_NONE);
8318                 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
8319                 command_type = OTHER_COMMAND;
8320                 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
8321                         bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
8322                                bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
8323                         bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
8324                                bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
8325                         bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
8326                                bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
8327                 }
8328
8329                 break;
8330         case CMD_XRI_ABORTED_CX:
8331         case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
8332         case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
8333         case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
8334         case CMD_FCP_TRSP64_CX: /* Target mode rcv */
8335         case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
8336         default:
8337                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8338                                 "2014 Invalid command 0x%x\n",
8339                                 iocbq->iocb.ulpCommand);
8340                 return IOCB_ERROR;
8341                 break;
8342         }
8343
8344         if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
8345                 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
8346         else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
8347                 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
8348         else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
8349                 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
8350         iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
8351                               LPFC_IO_DIF_INSERT);
8352         bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
8353         bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
8354         wqe->generic.wqe_com.abort_tag = abort_tag;
8355         bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
8356         bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
8357         bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
8358         bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
8359         return 0;
8360 }
8361
8362 /**
8363  * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
8364  * @phba: Pointer to HBA context object.
8365  * @ring_number: SLI ring number to issue iocb on.
8366  * @piocb: Pointer to command iocb.
8367  * @flag: Flag indicating if this command can be put into txq.
8368  *
8369  * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
8370  * an iocb command to an HBA with SLI-4 interface spec.
8371  *
8372  * This function is called with hbalock held. The function will return success
8373  * after it successfully submit the iocb to firmware or after adding to the
8374  * txq.
8375  **/
8376 static int
8377 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
8378                          struct lpfc_iocbq *piocb, uint32_t flag)
8379 {
8380         struct lpfc_sglq *sglq;
8381         union lpfc_wqe wqe;
8382         struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
8383
8384         if (piocb->sli4_xritag == NO_XRI) {
8385                 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
8386                     piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
8387                         sglq = NULL;
8388                 else {
8389                         if (pring->txq_cnt) {
8390                                 if (!(flag & SLI_IOCB_RET_IOCB)) {
8391                                         __lpfc_sli_ringtx_put(phba,
8392                                                 pring, piocb);
8393                                         return IOCB_SUCCESS;
8394                                 } else {
8395                                         return IOCB_BUSY;
8396                                 }
8397                         } else {
8398                                 sglq = __lpfc_sli_get_sglq(phba, piocb);
8399                                 if (!sglq) {
8400                                         if (!(flag & SLI_IOCB_RET_IOCB)) {
8401                                                 __lpfc_sli_ringtx_put(phba,
8402                                                                 pring,
8403                                                                 piocb);
8404                                                 return IOCB_SUCCESS;
8405                                         } else
8406                                                 return IOCB_BUSY;
8407                                 }
8408                         }
8409                 }
8410         } else if (piocb->iocb_flag &  LPFC_IO_FCP) {
8411                 /* These IO's already have an XRI and a mapped sgl. */
8412                 sglq = NULL;
8413         } else {
8414                 /*
8415                  * This is a continuation of a commandi,(CX) so this
8416                  * sglq is on the active list
8417                  */
8418                 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
8419                 if (!sglq)
8420                         return IOCB_ERROR;
8421         }
8422
8423         if (sglq) {
8424                 piocb->sli4_lxritag = sglq->sli4_lxritag;
8425                 piocb->sli4_xritag = sglq->sli4_xritag;
8426                 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
8427                         return IOCB_ERROR;
8428         }
8429
8430         if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
8431                 return IOCB_ERROR;
8432
8433         if ((piocb->iocb_flag & LPFC_IO_FCP) ||
8434                 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
8435                 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
8436                                      &wqe))
8437                         return IOCB_ERROR;
8438         } else {
8439                 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
8440                         return IOCB_ERROR;
8441         }
8442         lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
8443
8444         return 0;
8445 }
8446
8447 /**
8448  * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
8449  *
8450  * This routine wraps the actual lockless version for issusing IOCB function
8451  * pointer from the lpfc_hba struct.
8452  *
8453  * Return codes:
8454  *      IOCB_ERROR - Error
8455  *      IOCB_SUCCESS - Success
8456  *      IOCB_BUSY - Busy
8457  **/
8458 int
8459 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
8460                 struct lpfc_iocbq *piocb, uint32_t flag)
8461 {
8462         return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
8463 }
8464
8465 /**
8466  * lpfc_sli_api_table_setup - Set up sli api function jump table
8467  * @phba: The hba struct for which this call is being executed.
8468  * @dev_grp: The HBA PCI-Device group number.
8469  *
8470  * This routine sets up the SLI interface API function jump table in @phba
8471  * struct.
8472  * Returns: 0 - success, -ENODEV - failure.
8473  **/
8474 int
8475 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8476 {
8477
8478         switch (dev_grp) {
8479         case LPFC_PCI_DEV_LP:
8480                 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
8481                 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
8482                 break;
8483         case LPFC_PCI_DEV_OC:
8484                 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
8485                 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
8486                 break;
8487         default:
8488                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8489                                 "1419 Invalid HBA PCI-device group: 0x%x\n",
8490                                 dev_grp);
8491                 return -ENODEV;
8492                 break;
8493         }
8494         phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
8495         return 0;
8496 }
8497
8498 /**
8499  * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
8500  * @phba: Pointer to HBA context object.
8501  * @pring: Pointer to driver SLI ring object.
8502  * @piocb: Pointer to command iocb.
8503  * @flag: Flag indicating if this command can be put into txq.
8504  *
8505  * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
8506  * function. This function gets the hbalock and calls
8507  * __lpfc_sli_issue_iocb function and will return the error returned
8508  * by __lpfc_sli_issue_iocb function. This wrapper is used by
8509  * functions which do not hold hbalock.
8510  **/
8511 int
8512 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
8513                     struct lpfc_iocbq *piocb, uint32_t flag)
8514 {
8515         struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
8516         struct lpfc_sli_ring *pring;
8517         struct lpfc_queue *fpeq;
8518         struct lpfc_eqe *eqe;
8519         unsigned long iflags;
8520         int rc, idx;
8521
8522         if (phba->sli_rev == LPFC_SLI_REV4) {
8523                 if (piocb->iocb_flag &  LPFC_IO_FCP) {
8524                         if (unlikely(!phba->sli4_hba.fcp_wq))
8525                                 return IOCB_ERROR;
8526                         idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
8527                         piocb->fcp_wqidx = idx;
8528                         ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
8529
8530                         pring = &phba->sli.ring[ring_number];
8531                         spin_lock_irqsave(&pring->ring_lock, iflags);
8532                         rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
8533                                 flag);
8534                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
8535
8536                         if (lpfc_fcp_look_ahead) {
8537                                 fcp_eq_hdl = &phba->sli4_hba.fcp_eq_hdl[idx];
8538
8539                                 if (atomic_dec_and_test(&fcp_eq_hdl->
8540                                         fcp_eq_in_use)) {
8541
8542                                         /* Get associated EQ with this index */
8543                                         fpeq = phba->sli4_hba.hba_eq[idx];
8544
8545                                         /* Turn off interrupts from this EQ */
8546                                         lpfc_sli4_eq_clr_intr(fpeq);
8547
8548                                         /*
8549                                          * Process all the events on FCP EQ
8550                                          */
8551                                         while ((eqe = lpfc_sli4_eq_get(fpeq))) {
8552                                                 lpfc_sli4_hba_handle_eqe(phba,
8553                                                         eqe, idx);
8554                                                 fpeq->EQ_processed++;
8555                                         }
8556
8557                                         /* Always clear and re-arm the EQ */
8558                                         lpfc_sli4_eq_release(fpeq,
8559                                                 LPFC_QUEUE_REARM);
8560                                 }
8561                                 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
8562                         }
8563                 } else {
8564                         pring = &phba->sli.ring[ring_number];
8565                         spin_lock_irqsave(&pring->ring_lock, iflags);
8566                         rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
8567                                 flag);
8568                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
8569
8570                 }
8571         } else {
8572                 /* For now, SLI2/3 will still use hbalock */
8573                 spin_lock_irqsave(&phba->hbalock, iflags);
8574                 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
8575                 spin_unlock_irqrestore(&phba->hbalock, iflags);
8576         }
8577         return rc;
8578 }
8579
8580 /**
8581  * lpfc_extra_ring_setup - Extra ring setup function
8582  * @phba: Pointer to HBA context object.
8583  *
8584  * This function is called while driver attaches with the
8585  * HBA to setup the extra ring. The extra ring is used
8586  * only when driver needs to support target mode functionality
8587  * or IP over FC functionalities.
8588  *
8589  * This function is called with no lock held.
8590  **/
8591 static int
8592 lpfc_extra_ring_setup( struct lpfc_hba *phba)
8593 {
8594         struct lpfc_sli *psli;
8595         struct lpfc_sli_ring *pring;
8596
8597         psli = &phba->sli;
8598
8599         /* Adjust cmd/rsp ring iocb entries more evenly */
8600
8601         /* Take some away from the FCP ring */
8602         pring = &psli->ring[psli->fcp_ring];
8603         pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8604         pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8605         pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8606         pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
8607
8608         /* and give them to the extra ring */
8609         pring = &psli->ring[psli->extra_ring];
8610
8611         pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8612         pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8613         pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8614         pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
8615
8616         /* Setup default profile for this ring */
8617         pring->iotag_max = 4096;
8618         pring->num_mask = 1;
8619         pring->prt[0].profile = 0;      /* Mask 0 */
8620         pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
8621         pring->prt[0].type = phba->cfg_multi_ring_type;
8622         pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
8623         return 0;
8624 }
8625
8626 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
8627  * @phba: Pointer to HBA context object.
8628  * @iocbq: Pointer to iocb object.
8629  *
8630  * The async_event handler calls this routine when it receives
8631  * an ASYNC_STATUS_CN event from the port.  The port generates
8632  * this event when an Abort Sequence request to an rport fails
8633  * twice in succession.  The abort could be originated by the
8634  * driver or by the port.  The ABTS could have been for an ELS
8635  * or FCP IO.  The port only generates this event when an ABTS
8636  * fails to complete after one retry.
8637  */
8638 static void
8639 lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
8640                           struct lpfc_iocbq *iocbq)
8641 {
8642         struct lpfc_nodelist *ndlp = NULL;
8643         uint16_t rpi = 0, vpi = 0;
8644         struct lpfc_vport *vport = NULL;
8645
8646         /* The rpi in the ulpContext is vport-sensitive. */
8647         vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
8648         rpi = iocbq->iocb.ulpContext;
8649
8650         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8651                         "3092 Port generated ABTS async event "
8652                         "on vpi %d rpi %d status 0x%x\n",
8653                         vpi, rpi, iocbq->iocb.ulpStatus);
8654
8655         vport = lpfc_find_vport_by_vpid(phba, vpi);
8656         if (!vport)
8657                 goto err_exit;
8658         ndlp = lpfc_findnode_rpi(vport, rpi);
8659         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
8660                 goto err_exit;
8661
8662         if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
8663                 lpfc_sli_abts_recover_port(vport, ndlp);
8664         return;
8665
8666  err_exit:
8667         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8668                         "3095 Event Context not found, no "
8669                         "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
8670                         iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
8671                         vpi, rpi);
8672 }
8673
8674 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
8675  * @phba: pointer to HBA context object.
8676  * @ndlp: nodelist pointer for the impacted rport.
8677  * @axri: pointer to the wcqe containing the failed exchange.
8678  *
8679  * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
8680  * port.  The port generates this event when an abort exchange request to an
8681  * rport fails twice in succession with no reply.  The abort could be originated
8682  * by the driver or by the port.  The ABTS could have been for an ELS or FCP IO.
8683  */
8684 void
8685 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
8686                            struct lpfc_nodelist *ndlp,
8687                            struct sli4_wcqe_xri_aborted *axri)
8688 {
8689         struct lpfc_vport *vport;
8690         uint32_t ext_status = 0;
8691
8692         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
8693                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8694                                 "3115 Node Context not found, driver "
8695                                 "ignoring abts err event\n");
8696                 return;
8697         }
8698
8699         vport = ndlp->vport;
8700         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8701                         "3116 Port generated FCP XRI ABORT event on "
8702                         "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
8703                         ndlp->vport->vpi, ndlp->nlp_rpi,
8704                         bf_get(lpfc_wcqe_xa_xri, axri),
8705                         bf_get(lpfc_wcqe_xa_status, axri),
8706                         axri->parameter);
8707
8708         /*
8709          * Catch the ABTS protocol failure case.  Older OCe FW releases returned
8710          * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
8711          * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
8712          */
8713         ext_status = axri->parameter & IOERR_PARAM_MASK;
8714         if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
8715             ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
8716                 lpfc_sli_abts_recover_port(vport, ndlp);
8717 }
8718
8719 /**
8720  * lpfc_sli_async_event_handler - ASYNC iocb handler function
8721  * @phba: Pointer to HBA context object.
8722  * @pring: Pointer to driver SLI ring object.
8723  * @iocbq: Pointer to iocb object.
8724  *
8725  * This function is called by the slow ring event handler
8726  * function when there is an ASYNC event iocb in the ring.
8727  * This function is called with no lock held.
8728  * Currently this function handles only temperature related
8729  * ASYNC events. The function decodes the temperature sensor
8730  * event message and posts events for the management applications.
8731  **/
8732 static void
8733 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
8734         struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
8735 {
8736         IOCB_t *icmd;
8737         uint16_t evt_code;
8738         struct temp_event temp_event_data;
8739         struct Scsi_Host *shost;
8740         uint32_t *iocb_w;
8741
8742         icmd = &iocbq->iocb;
8743         evt_code = icmd->un.asyncstat.evt_code;
8744
8745         switch (evt_code) {
8746         case ASYNC_TEMP_WARN:
8747         case ASYNC_TEMP_SAFE:
8748                 temp_event_data.data = (uint32_t) icmd->ulpContext;
8749                 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
8750                 if (evt_code == ASYNC_TEMP_WARN) {
8751                         temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
8752                         lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
8753                                 "0347 Adapter is very hot, please take "
8754                                 "corrective action. temperature : %d Celsius\n",
8755                                 (uint32_t) icmd->ulpContext);
8756                 } else {
8757                         temp_event_data.event_code = LPFC_NORMAL_TEMP;
8758                         lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
8759                                 "0340 Adapter temperature is OK now. "
8760                                 "temperature : %d Celsius\n",
8761                                 (uint32_t) icmd->ulpContext);
8762                 }
8763
8764                 /* Send temperature change event to applications */
8765                 shost = lpfc_shost_from_vport(phba->pport);
8766                 fc_host_post_vendor_event(shost, fc_get_event_number(),
8767                         sizeof(temp_event_data), (char *) &temp_event_data,
8768                         LPFC_NL_VENDOR_ID);
8769                 break;
8770         case ASYNC_STATUS_CN:
8771                 lpfc_sli_abts_err_handler(phba, iocbq);
8772                 break;
8773         default:
8774                 iocb_w = (uint32_t *) icmd;
8775                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8776                         "0346 Ring %d handler: unexpected ASYNC_STATUS"
8777                         " evt_code 0x%x\n"
8778                         "W0  0x%08x W1  0x%08x W2  0x%08x W3  0x%08x\n"
8779                         "W4  0x%08x W5  0x%08x W6  0x%08x W7  0x%08x\n"
8780                         "W8  0x%08x W9  0x%08x W10 0x%08x W11 0x%08x\n"
8781                         "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
8782                         pring->ringno, icmd->un.asyncstat.evt_code,
8783                         iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
8784                         iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
8785                         iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
8786                         iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
8787
8788                 break;
8789         }
8790 }
8791
8792
8793 /**
8794  * lpfc_sli_setup - SLI ring setup function
8795  * @phba: Pointer to HBA context object.
8796  *
8797  * lpfc_sli_setup sets up rings of the SLI interface with
8798  * number of iocbs per ring and iotags. This function is
8799  * called while driver attach to the HBA and before the
8800  * interrupts are enabled. So there is no need for locking.
8801  *
8802  * This function always returns 0.
8803  **/
8804 int
8805 lpfc_sli_setup(struct lpfc_hba *phba)
8806 {
8807         int i, totiocbsize = 0;
8808         struct lpfc_sli *psli = &phba->sli;
8809         struct lpfc_sli_ring *pring;
8810
8811         psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
8812         if (phba->sli_rev == LPFC_SLI_REV4)
8813                 psli->num_rings += phba->cfg_fcp_io_channel;
8814         psli->sli_flag = 0;
8815         psli->fcp_ring = LPFC_FCP_RING;
8816         psli->next_ring = LPFC_FCP_NEXT_RING;
8817         psli->extra_ring = LPFC_EXTRA_RING;
8818
8819         psli->iocbq_lookup = NULL;
8820         psli->iocbq_lookup_len = 0;
8821         psli->last_iotag = 0;
8822
8823         for (i = 0; i < psli->num_rings; i++) {
8824                 pring = &psli->ring[i];
8825                 switch (i) {
8826                 case LPFC_FCP_RING:     /* ring 0 - FCP */
8827                         /* numCiocb and numRiocb are used in config_port */
8828                         pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
8829                         pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
8830                         pring->sli.sli3.numCiocb +=
8831                                 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8832                         pring->sli.sli3.numRiocb +=
8833                                 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8834                         pring->sli.sli3.numCiocb +=
8835                                 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8836                         pring->sli.sli3.numRiocb +=
8837                                 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
8838                         pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
8839                                                         SLI3_IOCB_CMD_SIZE :
8840                                                         SLI2_IOCB_CMD_SIZE;
8841                         pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
8842                                                         SLI3_IOCB_RSP_SIZE :
8843                                                         SLI2_IOCB_RSP_SIZE;
8844                         pring->iotag_ctr = 0;
8845                         pring->iotag_max =
8846                             (phba->cfg_hba_queue_depth * 2);
8847                         pring->fast_iotag = pring->iotag_max;
8848                         pring->num_mask = 0;
8849                         break;
8850                 case LPFC_EXTRA_RING:   /* ring 1 - EXTRA */
8851                         /* numCiocb and numRiocb are used in config_port */
8852                         pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
8853                         pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
8854                         pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
8855                                                         SLI3_IOCB_CMD_SIZE :
8856                                                         SLI2_IOCB_CMD_SIZE;
8857                         pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
8858                                                         SLI3_IOCB_RSP_SIZE :
8859                                                         SLI2_IOCB_RSP_SIZE;
8860                         pring->iotag_max = phba->cfg_hba_queue_depth;
8861                         pring->num_mask = 0;
8862                         break;
8863                 case LPFC_ELS_RING:     /* ring 2 - ELS / CT */
8864                         /* numCiocb and numRiocb are used in config_port */
8865                         pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
8866                         pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
8867                         pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
8868                                                         SLI3_IOCB_CMD_SIZE :
8869                                                         SLI2_IOCB_CMD_SIZE;
8870                         pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
8871                                                         SLI3_IOCB_RSP_SIZE :
8872                                                         SLI2_IOCB_RSP_SIZE;
8873                         pring->fast_iotag = 0;
8874                         pring->iotag_ctr = 0;
8875                         pring->iotag_max = 4096;
8876                         pring->lpfc_sli_rcv_async_status =
8877                                 lpfc_sli_async_event_handler;
8878                         pring->num_mask = LPFC_MAX_RING_MASK;
8879                         pring->prt[0].profile = 0;      /* Mask 0 */
8880                         pring->prt[0].rctl = FC_RCTL_ELS_REQ;
8881                         pring->prt[0].type = FC_TYPE_ELS;
8882                         pring->prt[0].lpfc_sli_rcv_unsol_event =
8883                             lpfc_els_unsol_event;
8884                         pring->prt[1].profile = 0;      /* Mask 1 */
8885                         pring->prt[1].rctl = FC_RCTL_ELS_REP;
8886                         pring->prt[1].type = FC_TYPE_ELS;
8887                         pring->prt[1].lpfc_sli_rcv_unsol_event =
8888                             lpfc_els_unsol_event;
8889                         pring->prt[2].profile = 0;      /* Mask 2 */
8890                         /* NameServer Inquiry */
8891                         pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
8892                         /* NameServer */
8893                         pring->prt[2].type = FC_TYPE_CT;
8894                         pring->prt[2].lpfc_sli_rcv_unsol_event =
8895                             lpfc_ct_unsol_event;
8896                         pring->prt[3].profile = 0;      /* Mask 3 */
8897                         /* NameServer response */
8898                         pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
8899                         /* NameServer */
8900                         pring->prt[3].type = FC_TYPE_CT;
8901                         pring->prt[3].lpfc_sli_rcv_unsol_event =
8902                             lpfc_ct_unsol_event;
8903                         break;
8904                 }
8905                 totiocbsize += (pring->sli.sli3.numCiocb *
8906                         pring->sli.sli3.sizeCiocb) +
8907                         (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
8908         }
8909         if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
8910                 /* Too many cmd / rsp ring entries in SLI2 SLIM */
8911                 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
8912                        "SLI2 SLIM Data: x%x x%lx\n",
8913                        phba->brd_no, totiocbsize,
8914                        (unsigned long) MAX_SLIM_IOCB_SIZE);
8915         }
8916         if (phba->cfg_multi_ring_support == 2)
8917                 lpfc_extra_ring_setup(phba);
8918
8919         return 0;
8920 }
8921
8922 /**
8923  * lpfc_sli_queue_setup - Queue initialization function
8924  * @phba: Pointer to HBA context object.
8925  *
8926  * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each
8927  * ring. This function also initializes ring indices of each ring.
8928  * This function is called during the initialization of the SLI
8929  * interface of an HBA.
8930  * This function is called with no lock held and always returns
8931  * 1.
8932  **/
8933 int
8934 lpfc_sli_queue_setup(struct lpfc_hba *phba)
8935 {
8936         struct lpfc_sli *psli;
8937         struct lpfc_sli_ring *pring;
8938         int i;
8939
8940         psli = &phba->sli;
8941         spin_lock_irq(&phba->hbalock);
8942         INIT_LIST_HEAD(&psli->mboxq);
8943         INIT_LIST_HEAD(&psli->mboxq_cmpl);
8944         /* Initialize list headers for txq and txcmplq as double linked lists */
8945         for (i = 0; i < psli->num_rings; i++) {
8946                 pring = &psli->ring[i];
8947                 pring->ringno = i;
8948                 pring->sli.sli3.next_cmdidx  = 0;
8949                 pring->sli.sli3.local_getidx = 0;
8950                 pring->sli.sli3.cmdidx = 0;
8951                 INIT_LIST_HEAD(&pring->txq);
8952                 INIT_LIST_HEAD(&pring->txcmplq);
8953                 INIT_LIST_HEAD(&pring->iocb_continueq);
8954                 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
8955                 INIT_LIST_HEAD(&pring->postbufq);
8956                 spin_lock_init(&pring->ring_lock);
8957         }
8958         spin_unlock_irq(&phba->hbalock);
8959         return 1;
8960 }
8961
8962 /**
8963  * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
8964  * @phba: Pointer to HBA context object.
8965  *
8966  * This routine flushes the mailbox command subsystem. It will unconditionally
8967  * flush all the mailbox commands in the three possible stages in the mailbox
8968  * command sub-system: pending mailbox command queue; the outstanding mailbox
8969  * command; and completed mailbox command queue. It is caller's responsibility
8970  * to make sure that the driver is in the proper state to flush the mailbox
8971  * command sub-system. Namely, the posting of mailbox commands into the
8972  * pending mailbox command queue from the various clients must be stopped;
8973  * either the HBA is in a state that it will never works on the outstanding
8974  * mailbox command (such as in EEH or ERATT conditions) or the outstanding
8975  * mailbox command has been completed.
8976  **/
8977 static void
8978 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
8979 {
8980         LIST_HEAD(completions);
8981         struct lpfc_sli *psli = &phba->sli;
8982         LPFC_MBOXQ_t *pmb;
8983         unsigned long iflag;
8984
8985         /* Flush all the mailbox commands in the mbox system */
8986         spin_lock_irqsave(&phba->hbalock, iflag);
8987         /* The pending mailbox command queue */
8988         list_splice_init(&phba->sli.mboxq, &completions);
8989         /* The outstanding active mailbox command */
8990         if (psli->mbox_active) {
8991                 list_add_tail(&psli->mbox_active->list, &completions);
8992                 psli->mbox_active = NULL;
8993                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8994         }
8995         /* The completed mailbox command queue */
8996         list_splice_init(&phba->sli.mboxq_cmpl, &completions);
8997         spin_unlock_irqrestore(&phba->hbalock, iflag);
8998
8999         /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
9000         while (!list_empty(&completions)) {
9001                 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
9002                 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
9003                 if (pmb->mbox_cmpl)
9004                         pmb->mbox_cmpl(phba, pmb);
9005         }
9006 }
9007
9008 /**
9009  * lpfc_sli_host_down - Vport cleanup function
9010  * @vport: Pointer to virtual port object.
9011  *
9012  * lpfc_sli_host_down is called to clean up the resources
9013  * associated with a vport before destroying virtual
9014  * port data structures.
9015  * This function does following operations:
9016  * - Free discovery resources associated with this virtual
9017  *   port.
9018  * - Free iocbs associated with this virtual port in
9019  *   the txq.
9020  * - Send abort for all iocb commands associated with this
9021  *   vport in txcmplq.
9022  *
9023  * This function is called with no lock held and always returns 1.
9024  **/
9025 int
9026 lpfc_sli_host_down(struct lpfc_vport *vport)
9027 {
9028         LIST_HEAD(completions);
9029         struct lpfc_hba *phba = vport->phba;
9030         struct lpfc_sli *psli = &phba->sli;
9031         struct lpfc_sli_ring *pring;
9032         struct lpfc_iocbq *iocb, *next_iocb;
9033         int i;
9034         unsigned long flags = 0;
9035         uint16_t prev_pring_flag;
9036
9037         lpfc_cleanup_discovery_resources(vport);
9038
9039         spin_lock_irqsave(&phba->hbalock, flags);
9040         for (i = 0; i < psli->num_rings; i++) {
9041                 pring = &psli->ring[i];
9042                 prev_pring_flag = pring->flag;
9043                 /* Only slow rings */
9044                 if (pring->ringno == LPFC_ELS_RING) {
9045                         pring->flag |= LPFC_DEFERRED_RING_EVENT;
9046                         /* Set the lpfc data pending flag */
9047                         set_bit(LPFC_DATA_READY, &phba->data_flags);
9048                 }
9049                 /*
9050                  * Error everything on the txq since these iocbs have not been
9051                  * given to the FW yet.
9052                  */
9053                 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
9054                         if (iocb->vport != vport)
9055                                 continue;
9056                         list_move_tail(&iocb->list, &completions);
9057                         pring->txq_cnt--;
9058                 }
9059
9060                 /* Next issue ABTS for everything on the txcmplq */
9061                 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
9062                                                                         list) {
9063                         if (iocb->vport != vport)
9064                                 continue;
9065                         lpfc_sli_issue_abort_iotag(phba, pring, iocb);
9066                 }
9067
9068                 pring->flag = prev_pring_flag;
9069         }
9070
9071         spin_unlock_irqrestore(&phba->hbalock, flags);
9072
9073         /* Cancel all the IOCBs from the completions list */
9074         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9075                               IOERR_SLI_DOWN);
9076         return 1;
9077 }
9078
9079 /**
9080  * lpfc_sli_hba_down - Resource cleanup function for the HBA
9081  * @phba: Pointer to HBA context object.
9082  *
9083  * This function cleans up all iocb, buffers, mailbox commands
9084  * while shutting down the HBA. This function is called with no
9085  * lock held and always returns 1.
9086  * This function does the following to cleanup driver resources:
9087  * - Free discovery resources for each virtual port
9088  * - Cleanup any pending fabric iocbs
9089  * - Iterate through the iocb txq and free each entry
9090  *   in the list.
9091  * - Free up any buffer posted to the HBA
9092  * - Free mailbox commands in the mailbox queue.
9093  **/
9094 int
9095 lpfc_sli_hba_down(struct lpfc_hba *phba)
9096 {
9097         LIST_HEAD(completions);
9098         struct lpfc_sli *psli = &phba->sli;
9099         struct lpfc_sli_ring *pring;
9100         struct lpfc_dmabuf *buf_ptr;
9101         unsigned long flags = 0;
9102         int i;
9103
9104         /* Shutdown the mailbox command sub-system */
9105         lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
9106
9107         lpfc_hba_down_prep(phba);
9108
9109         lpfc_fabric_abort_hba(phba);
9110
9111         spin_lock_irqsave(&phba->hbalock, flags);
9112         for (i = 0; i < psli->num_rings; i++) {
9113                 pring = &psli->ring[i];
9114                 /* Only slow rings */
9115                 if (pring->ringno == LPFC_ELS_RING) {
9116                         pring->flag |= LPFC_DEFERRED_RING_EVENT;
9117                         /* Set the lpfc data pending flag */
9118                         set_bit(LPFC_DATA_READY, &phba->data_flags);
9119                 }
9120
9121                 /*
9122                  * Error everything on the txq since these iocbs have not been
9123                  * given to the FW yet.
9124                  */
9125                 list_splice_init(&pring->txq, &completions);
9126                 pring->txq_cnt = 0;
9127
9128         }
9129         spin_unlock_irqrestore(&phba->hbalock, flags);
9130
9131         /* Cancel all the IOCBs from the completions list */
9132         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9133                               IOERR_SLI_DOWN);
9134
9135         spin_lock_irqsave(&phba->hbalock, flags);
9136         list_splice_init(&phba->elsbuf, &completions);
9137         phba->elsbuf_cnt = 0;
9138         phba->elsbuf_prev_cnt = 0;
9139         spin_unlock_irqrestore(&phba->hbalock, flags);
9140
9141         while (!list_empty(&completions)) {
9142                 list_remove_head(&completions, buf_ptr,
9143                         struct lpfc_dmabuf, list);
9144                 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
9145                 kfree(buf_ptr);
9146         }
9147
9148         /* Return any active mbox cmds */
9149         del_timer_sync(&psli->mbox_tmo);
9150
9151         spin_lock_irqsave(&phba->pport->work_port_lock, flags);
9152         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
9153         spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
9154
9155         return 1;
9156 }
9157
9158 /**
9159  * lpfc_sli_pcimem_bcopy - SLI memory copy function
9160  * @srcp: Source memory pointer.
9161  * @destp: Destination memory pointer.
9162  * @cnt: Number of words required to be copied.
9163  *
9164  * This function is used for copying data between driver memory
9165  * and the SLI memory. This function also changes the endianness
9166  * of each word if native endianness is different from SLI
9167  * endianness. This function can be called with or without
9168  * lock.
9169  **/
9170 void
9171 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
9172 {
9173         uint32_t *src = srcp;
9174         uint32_t *dest = destp;
9175         uint32_t ldata;
9176         int i;
9177
9178         for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
9179                 ldata = *src;
9180                 ldata = le32_to_cpu(ldata);
9181                 *dest = ldata;
9182                 src++;
9183                 dest++;
9184         }
9185 }
9186
9187
9188 /**
9189  * lpfc_sli_bemem_bcopy - SLI memory copy function
9190  * @srcp: Source memory pointer.
9191  * @destp: Destination memory pointer.
9192  * @cnt: Number of words required to be copied.
9193  *
9194  * This function is used for copying data between a data structure
9195  * with big endian representation to local endianness.
9196  * This function can be called with or without lock.
9197  **/
9198 void
9199 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
9200 {
9201         uint32_t *src = srcp;
9202         uint32_t *dest = destp;
9203         uint32_t ldata;
9204         int i;
9205
9206         for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
9207                 ldata = *src;
9208                 ldata = be32_to_cpu(ldata);
9209                 *dest = ldata;
9210                 src++;
9211                 dest++;
9212         }
9213 }
9214
9215 /**
9216  * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
9217  * @phba: Pointer to HBA context object.
9218  * @pring: Pointer to driver SLI ring object.
9219  * @mp: Pointer to driver buffer object.
9220  *
9221  * This function is called with no lock held.
9222  * It always return zero after adding the buffer to the postbufq
9223  * buffer list.
9224  **/
9225 int
9226 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9227                          struct lpfc_dmabuf *mp)
9228 {
9229         /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
9230            later */
9231         spin_lock_irq(&phba->hbalock);
9232         list_add_tail(&mp->list, &pring->postbufq);
9233         pring->postbufq_cnt++;
9234         spin_unlock_irq(&phba->hbalock);
9235         return 0;
9236 }
9237
9238 /**
9239  * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
9240  * @phba: Pointer to HBA context object.
9241  *
9242  * When HBQ is enabled, buffers are searched based on tags. This function
9243  * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
9244  * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
9245  * does not conflict with tags of buffer posted for unsolicited events.
9246  * The function returns the allocated tag. The function is called with
9247  * no locks held.
9248  **/
9249 uint32_t
9250 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
9251 {
9252         spin_lock_irq(&phba->hbalock);
9253         phba->buffer_tag_count++;
9254         /*
9255          * Always set the QUE_BUFTAG_BIT to distiguish between
9256          * a tag assigned by HBQ.
9257          */
9258         phba->buffer_tag_count |= QUE_BUFTAG_BIT;
9259         spin_unlock_irq(&phba->hbalock);
9260         return phba->buffer_tag_count;
9261 }
9262
9263 /**
9264  * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
9265  * @phba: Pointer to HBA context object.
9266  * @pring: Pointer to driver SLI ring object.
9267  * @tag: Buffer tag.
9268  *
9269  * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
9270  * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
9271  * iocb is posted to the response ring with the tag of the buffer.
9272  * This function searches the pring->postbufq list using the tag
9273  * to find buffer associated with CMD_IOCB_RET_XRI64_CX
9274  * iocb. If the buffer is found then lpfc_dmabuf object of the
9275  * buffer is returned to the caller else NULL is returned.
9276  * This function is called with no lock held.
9277  **/
9278 struct lpfc_dmabuf *
9279 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9280                         uint32_t tag)
9281 {
9282         struct lpfc_dmabuf *mp, *next_mp;
9283         struct list_head *slp = &pring->postbufq;
9284
9285         /* Search postbufq, from the beginning, looking for a match on tag */
9286         spin_lock_irq(&phba->hbalock);
9287         list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
9288                 if (mp->buffer_tag == tag) {
9289                         list_del_init(&mp->list);
9290                         pring->postbufq_cnt--;
9291                         spin_unlock_irq(&phba->hbalock);
9292                         return mp;
9293                 }
9294         }
9295
9296         spin_unlock_irq(&phba->hbalock);
9297         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9298                         "0402 Cannot find virtual addr for buffer tag on "
9299                         "ring %d Data x%lx x%p x%p x%x\n",
9300                         pring->ringno, (unsigned long) tag,
9301                         slp->next, slp->prev, pring->postbufq_cnt);
9302
9303         return NULL;
9304 }
9305
9306 /**
9307  * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
9308  * @phba: Pointer to HBA context object.
9309  * @pring: Pointer to driver SLI ring object.
9310  * @phys: DMA address of the buffer.
9311  *
9312  * This function searches the buffer list using the dma_address
9313  * of unsolicited event to find the driver's lpfc_dmabuf object
9314  * corresponding to the dma_address. The function returns the
9315  * lpfc_dmabuf object if a buffer is found else it returns NULL.
9316  * This function is called by the ct and els unsolicited event
9317  * handlers to get the buffer associated with the unsolicited
9318  * event.
9319  *
9320  * This function is called with no lock held.
9321  **/
9322 struct lpfc_dmabuf *
9323 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9324                          dma_addr_t phys)
9325 {
9326         struct lpfc_dmabuf *mp, *next_mp;
9327         struct list_head *slp = &pring->postbufq;
9328
9329         /* Search postbufq, from the beginning, looking for a match on phys */
9330         spin_lock_irq(&phba->hbalock);
9331         list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
9332                 if (mp->phys == phys) {
9333                         list_del_init(&mp->list);
9334                         pring->postbufq_cnt--;
9335                         spin_unlock_irq(&phba->hbalock);
9336                         return mp;
9337                 }
9338         }
9339
9340         spin_unlock_irq(&phba->hbalock);
9341         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9342                         "0410 Cannot find virtual addr for mapped buf on "
9343                         "ring %d Data x%llx x%p x%p x%x\n",
9344                         pring->ringno, (unsigned long long)phys,
9345                         slp->next, slp->prev, pring->postbufq_cnt);
9346         return NULL;
9347 }
9348
9349 /**
9350  * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
9351  * @phba: Pointer to HBA context object.
9352  * @cmdiocb: Pointer to driver command iocb object.
9353  * @rspiocb: Pointer to driver response iocb object.
9354  *
9355  * This function is the completion handler for the abort iocbs for
9356  * ELS commands. This function is called from the ELS ring event
9357  * handler with no lock held. This function frees memory resources
9358  * associated with the abort iocb.
9359  **/
9360 static void
9361 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9362                         struct lpfc_iocbq *rspiocb)
9363 {
9364         IOCB_t *irsp = &rspiocb->iocb;
9365         uint16_t abort_iotag, abort_context;
9366         struct lpfc_iocbq *abort_iocb = NULL;
9367
9368         if (irsp->ulpStatus) {
9369
9370                 /*
9371                  * Assume that the port already completed and returned, or
9372                  * will return the iocb. Just Log the message.
9373                  */
9374                 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
9375                 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
9376
9377                 spin_lock_irq(&phba->hbalock);
9378                 if (phba->sli_rev < LPFC_SLI_REV4) {
9379                         if (abort_iotag != 0 &&
9380                                 abort_iotag <= phba->sli.last_iotag)
9381                                 abort_iocb =
9382                                         phba->sli.iocbq_lookup[abort_iotag];
9383                 } else
9384                         /* For sli4 the abort_tag is the XRI,
9385                          * so the abort routine puts the iotag  of the iocb
9386                          * being aborted in the context field of the abort
9387                          * IOCB.
9388                          */
9389                         abort_iocb = phba->sli.iocbq_lookup[abort_context];
9390
9391                 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
9392                                 "0327 Cannot abort els iocb %p "
9393                                 "with tag %x context %x, abort status %x, "
9394                                 "abort code %x\n",
9395                                 abort_iocb, abort_iotag, abort_context,
9396                                 irsp->ulpStatus, irsp->un.ulpWord[4]);
9397
9398                 spin_unlock_irq(&phba->hbalock);
9399         }
9400         lpfc_sli_release_iocbq(phba, cmdiocb);
9401         return;
9402 }
9403
9404 /**
9405  * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
9406  * @phba: Pointer to HBA context object.
9407  * @cmdiocb: Pointer to driver command iocb object.
9408  * @rspiocb: Pointer to driver response iocb object.
9409  *
9410  * The function is called from SLI ring event handler with no
9411  * lock held. This function is the completion handler for ELS commands
9412  * which are aborted. The function frees memory resources used for
9413  * the aborted ELS commands.
9414  **/
9415 static void
9416 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9417                      struct lpfc_iocbq *rspiocb)
9418 {
9419         IOCB_t *irsp = &rspiocb->iocb;
9420
9421         /* ELS cmd tag <ulpIoTag> completes */
9422         lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
9423                         "0139 Ignoring ELS cmd tag x%x completion Data: "
9424                         "x%x x%x x%x\n",
9425                         irsp->ulpIoTag, irsp->ulpStatus,
9426                         irsp->un.ulpWord[4], irsp->ulpTimeout);
9427         if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
9428                 lpfc_ct_free_iocb(phba, cmdiocb);
9429         else
9430                 lpfc_els_free_iocb(phba, cmdiocb);
9431         return;
9432 }
9433
9434 /**
9435  * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
9436  * @phba: Pointer to HBA context object.
9437  * @pring: Pointer to driver SLI ring object.
9438  * @cmdiocb: Pointer to driver command iocb object.
9439  *
9440  * This function issues an abort iocb for the provided command iocb down to
9441  * the port. Other than the case the outstanding command iocb is an abort
9442  * request, this function issues abort out unconditionally. This function is
9443  * called with hbalock held. The function returns 0 when it fails due to
9444  * memory allocation failure or when the command iocb is an abort request.
9445  **/
9446 static int
9447 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9448                            struct lpfc_iocbq *cmdiocb)
9449 {
9450         struct lpfc_vport *vport = cmdiocb->vport;
9451         struct lpfc_iocbq *abtsiocbp;
9452         IOCB_t *icmd = NULL;
9453         IOCB_t *iabt = NULL;
9454         int retval;
9455         unsigned long iflags;
9456
9457         /*
9458          * There are certain command types we don't want to abort.  And we
9459          * don't want to abort commands that are already in the process of
9460          * being aborted.
9461          */
9462         icmd = &cmdiocb->iocb;
9463         if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
9464             icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
9465             (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
9466                 return 0;
9467
9468         /* issue ABTS for this IOCB based on iotag */
9469         abtsiocbp = __lpfc_sli_get_iocbq(phba);
9470         if (abtsiocbp == NULL)
9471                 return 0;
9472
9473         /* This signals the response to set the correct status
9474          * before calling the completion handler
9475          */
9476         cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
9477
9478         iabt = &abtsiocbp->iocb;
9479         iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
9480         iabt->un.acxri.abortContextTag = icmd->ulpContext;
9481         if (phba->sli_rev == LPFC_SLI_REV4) {
9482                 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
9483                 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
9484         }
9485         else
9486                 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
9487         iabt->ulpLe = 1;
9488         iabt->ulpClass = icmd->ulpClass;
9489
9490         /* ABTS WQE must go to the same WQ as the WQE to be aborted */
9491         abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx;
9492         if (cmdiocb->iocb_flag & LPFC_IO_FCP)
9493                 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
9494
9495         if (phba->link_state >= LPFC_LINK_UP)
9496                 iabt->ulpCommand = CMD_ABORT_XRI_CN;
9497         else
9498                 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
9499
9500         abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
9501
9502         lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
9503                          "0339 Abort xri x%x, original iotag x%x, "
9504                          "abort cmd iotag x%x\n",
9505                          iabt->un.acxri.abortIoTag,
9506                          iabt->un.acxri.abortContextTag,
9507                          abtsiocbp->iotag);
9508
9509         if (phba->sli_rev == LPFC_SLI_REV4) {
9510                 /* Note: both hbalock and ring_lock need to be set here */
9511                 spin_lock_irqsave(&pring->ring_lock, iflags);
9512                 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
9513                         abtsiocbp, 0);
9514                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
9515         } else {
9516                 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
9517                         abtsiocbp, 0);
9518         }
9519
9520         if (retval)
9521                 __lpfc_sli_release_iocbq(phba, abtsiocbp);
9522
9523         /*
9524          * Caller to this routine should check for IOCB_ERROR
9525          * and handle it properly.  This routine no longer removes
9526          * iocb off txcmplq and call compl in case of IOCB_ERROR.
9527          */
9528         return retval;
9529 }
9530
9531 /**
9532  * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
9533  * @phba: Pointer to HBA context object.
9534  * @pring: Pointer to driver SLI ring object.
9535  * @cmdiocb: Pointer to driver command iocb object.
9536  *
9537  * This function issues an abort iocb for the provided command iocb. In case
9538  * of unloading, the abort iocb will not be issued to commands on the ELS
9539  * ring. Instead, the callback function shall be changed to those commands
9540  * so that nothing happens when them finishes. This function is called with
9541  * hbalock held. The function returns 0 when the command iocb is an abort
9542  * request.
9543  **/
9544 int
9545 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9546                            struct lpfc_iocbq *cmdiocb)
9547 {
9548         struct lpfc_vport *vport = cmdiocb->vport;
9549         int retval = IOCB_ERROR;
9550         IOCB_t *icmd = NULL;
9551
9552         /*
9553          * There are certain command types we don't want to abort.  And we
9554          * don't want to abort commands that are already in the process of
9555          * being aborted.
9556          */
9557         icmd = &cmdiocb->iocb;
9558         if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
9559             icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
9560             (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
9561                 return 0;
9562
9563         /*
9564          * If we're unloading, don't abort iocb on the ELS ring, but change
9565          * the callback so that nothing happens when it finishes.
9566          */
9567         if ((vport->load_flag & FC_UNLOADING) &&
9568             (pring->ringno == LPFC_ELS_RING)) {
9569                 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
9570                         cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
9571                 else
9572                         cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
9573                 goto abort_iotag_exit;
9574         }
9575
9576         /* Now, we try to issue the abort to the cmdiocb out */
9577         retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
9578
9579 abort_iotag_exit:
9580         /*
9581          * Caller to this routine should check for IOCB_ERROR
9582          * and handle it properly.  This routine no longer removes
9583          * iocb off txcmplq and call compl in case of IOCB_ERROR.
9584          */
9585         return retval;
9586 }
9587
9588 /**
9589  * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring
9590  * @phba: Pointer to HBA context object.
9591  * @pring: Pointer to driver SLI ring object.
9592  *
9593  * This function aborts all iocbs in the given ring and frees all the iocb
9594  * objects in txq. This function issues abort iocbs unconditionally for all
9595  * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed
9596  * to complete before the return of this function. The caller is not required
9597  * to hold any locks.
9598  **/
9599 static void
9600 lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
9601 {
9602         LIST_HEAD(completions);
9603         struct lpfc_iocbq *iocb, *next_iocb;
9604
9605         if (pring->ringno == LPFC_ELS_RING)
9606                 lpfc_fabric_abort_hba(phba);
9607
9608         spin_lock_irq(&phba->hbalock);
9609
9610         /* Take off all the iocbs on txq for cancelling */
9611         list_splice_init(&pring->txq, &completions);
9612         pring->txq_cnt = 0;
9613
9614         /* Next issue ABTS for everything on the txcmplq */
9615         list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
9616                 lpfc_sli_abort_iotag_issue(phba, pring, iocb);
9617
9618         spin_unlock_irq(&phba->hbalock);
9619
9620         /* Cancel all the IOCBs from the completions list */
9621         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9622                               IOERR_SLI_ABORTED);
9623 }
9624
9625 /**
9626  * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
9627  * @phba: pointer to lpfc HBA data structure.
9628  *
9629  * This routine will abort all pending and outstanding iocbs to an HBA.
9630  **/
9631 void
9632 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
9633 {
9634         struct lpfc_sli *psli = &phba->sli;
9635         struct lpfc_sli_ring *pring;
9636         int i;
9637
9638         for (i = 0; i < psli->num_rings; i++) {
9639                 pring = &psli->ring[i];
9640                 lpfc_sli_iocb_ring_abort(phba, pring);
9641         }
9642 }
9643
9644 /**
9645  * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
9646  * @iocbq: Pointer to driver iocb object.
9647  * @vport: Pointer to driver virtual port object.
9648  * @tgt_id: SCSI ID of the target.
9649  * @lun_id: LUN ID of the scsi device.
9650  * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
9651  *
9652  * This function acts as an iocb filter for functions which abort or count
9653  * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
9654  * 0 if the filtering criteria is met for the given iocb and will return
9655  * 1 if the filtering criteria is not met.
9656  * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
9657  * given iocb is for the SCSI device specified by vport, tgt_id and
9658  * lun_id parameter.
9659  * If ctx_cmd == LPFC_CTX_TGT,  the function returns 0 only if the
9660  * given iocb is for the SCSI target specified by vport and tgt_id
9661  * parameters.
9662  * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
9663  * given iocb is for the SCSI host associated with the given vport.
9664  * This function is called with no locks held.
9665  **/
9666 static int
9667 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
9668                            uint16_t tgt_id, uint64_t lun_id,
9669                            lpfc_ctx_cmd ctx_cmd)
9670 {
9671         struct lpfc_scsi_buf *lpfc_cmd;
9672         int rc = 1;
9673
9674         if (!(iocbq->iocb_flag &  LPFC_IO_FCP))
9675                 return rc;
9676
9677         if (iocbq->vport != vport)
9678                 return rc;
9679
9680         lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
9681
9682         if (lpfc_cmd->pCmd == NULL)
9683                 return rc;
9684
9685         switch (ctx_cmd) {
9686         case LPFC_CTX_LUN:
9687                 if ((lpfc_cmd->rdata->pnode) &&
9688                     (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
9689                     (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
9690                         rc = 0;
9691                 break;
9692         case LPFC_CTX_TGT:
9693                 if ((lpfc_cmd->rdata->pnode) &&
9694                     (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
9695                         rc = 0;
9696                 break;
9697         case LPFC_CTX_HOST:
9698                 rc = 0;
9699                 break;
9700         default:
9701                 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
9702                         __func__, ctx_cmd);
9703                 break;
9704         }
9705
9706         return rc;
9707 }
9708
9709 /**
9710  * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
9711  * @vport: Pointer to virtual port.
9712  * @tgt_id: SCSI ID of the target.
9713  * @lun_id: LUN ID of the scsi device.
9714  * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
9715  *
9716  * This function returns number of FCP commands pending for the vport.
9717  * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
9718  * commands pending on the vport associated with SCSI device specified
9719  * by tgt_id and lun_id parameters.
9720  * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
9721  * commands pending on the vport associated with SCSI target specified
9722  * by tgt_id parameter.
9723  * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
9724  * commands pending on the vport.
9725  * This function returns the number of iocbs which satisfy the filter.
9726  * This function is called without any lock held.
9727  **/
9728 int
9729 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
9730                   lpfc_ctx_cmd ctx_cmd)
9731 {
9732         struct lpfc_hba *phba = vport->phba;
9733         struct lpfc_iocbq *iocbq;
9734         int sum, i;
9735
9736         for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
9737                 iocbq = phba->sli.iocbq_lookup[i];
9738
9739                 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
9740                                                 ctx_cmd) == 0)
9741                         sum++;
9742         }
9743
9744         return sum;
9745 }
9746
9747 /**
9748  * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
9749  * @phba: Pointer to HBA context object
9750  * @cmdiocb: Pointer to command iocb object.
9751  * @rspiocb: Pointer to response iocb object.
9752  *
9753  * This function is called when an aborted FCP iocb completes. This
9754  * function is called by the ring event handler with no lock held.
9755  * This function frees the iocb.
9756  **/
9757 void
9758 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9759                         struct lpfc_iocbq *rspiocb)
9760 {
9761         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9762                         "3096 ABORT_XRI_CN completing on xri x%x "
9763                         "original iotag x%x, abort cmd iotag x%x "
9764                         "status 0x%x, reason 0x%x\n",
9765                         cmdiocb->iocb.un.acxri.abortContextTag,
9766                         cmdiocb->iocb.un.acxri.abortIoTag,
9767                         cmdiocb->iotag, rspiocb->iocb.ulpStatus,
9768                         rspiocb->iocb.un.ulpWord[4]);
9769         lpfc_sli_release_iocbq(phba, cmdiocb);
9770         return;
9771 }
9772
9773 /**
9774  * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
9775  * @vport: Pointer to virtual port.
9776  * @pring: Pointer to driver SLI ring object.
9777  * @tgt_id: SCSI ID of the target.
9778  * @lun_id: LUN ID of the scsi device.
9779  * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
9780  *
9781  * This function sends an abort command for every SCSI command
9782  * associated with the given virtual port pending on the ring
9783  * filtered by lpfc_sli_validate_fcp_iocb function.
9784  * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
9785  * FCP iocbs associated with lun specified by tgt_id and lun_id
9786  * parameters
9787  * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
9788  * FCP iocbs associated with SCSI target specified by tgt_id parameter.
9789  * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
9790  * FCP iocbs associated with virtual port.
9791  * This function returns number of iocbs it failed to abort.
9792  * This function is called with no locks held.
9793  **/
9794 int
9795 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
9796                     uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
9797 {
9798         struct lpfc_hba *phba = vport->phba;
9799         struct lpfc_iocbq *iocbq;
9800         struct lpfc_iocbq *abtsiocb;
9801         IOCB_t *cmd = NULL;
9802         int errcnt = 0, ret_val = 0;
9803         int i;
9804
9805         for (i = 1; i <= phba->sli.last_iotag; i++) {
9806                 iocbq = phba->sli.iocbq_lookup[i];
9807
9808                 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
9809                                                abort_cmd) != 0)
9810                         continue;
9811
9812                 /* issue ABTS for this IOCB based on iotag */
9813                 abtsiocb = lpfc_sli_get_iocbq(phba);
9814                 if (abtsiocb == NULL) {
9815                         errcnt++;
9816                         continue;
9817                 }
9818
9819                 cmd = &iocbq->iocb;
9820                 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
9821                 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
9822                 if (phba->sli_rev == LPFC_SLI_REV4)
9823                         abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
9824                 else
9825                         abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
9826                 abtsiocb->iocb.ulpLe = 1;
9827                 abtsiocb->iocb.ulpClass = cmd->ulpClass;
9828                 abtsiocb->vport = phba->pport;
9829
9830                 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
9831                 abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
9832                 if (iocbq->iocb_flag & LPFC_IO_FCP)
9833                         abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
9834
9835                 if (lpfc_is_link_up(phba))
9836                         abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
9837                 else
9838                         abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
9839
9840                 /* Setup callback routine and issue the command. */
9841                 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
9842                 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
9843                                               abtsiocb, 0);
9844                 if (ret_val == IOCB_ERROR) {
9845                         lpfc_sli_release_iocbq(phba, abtsiocb);
9846                         errcnt++;
9847                         continue;
9848                 }
9849         }
9850
9851         return errcnt;
9852 }
9853
9854 /**
9855  * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
9856  * @phba: Pointer to HBA context object.
9857  * @cmdiocbq: Pointer to command iocb.
9858  * @rspiocbq: Pointer to response iocb.
9859  *
9860  * This function is the completion handler for iocbs issued using
9861  * lpfc_sli_issue_iocb_wait function. This function is called by the
9862  * ring event handler function without any lock held. This function
9863  * can be called from both worker thread context and interrupt
9864  * context. This function also can be called from other thread which
9865  * cleans up the SLI layer objects.
9866  * This function copy the contents of the response iocb to the
9867  * response iocb memory object provided by the caller of
9868  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
9869  * sleeps for the iocb completion.
9870  **/
9871 static void
9872 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
9873                         struct lpfc_iocbq *cmdiocbq,
9874                         struct lpfc_iocbq *rspiocbq)
9875 {
9876         wait_queue_head_t *pdone_q;
9877         unsigned long iflags;
9878         struct lpfc_scsi_buf *lpfc_cmd;
9879
9880         spin_lock_irqsave(&phba->hbalock, iflags);
9881         cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
9882         if (cmdiocbq->context2 && rspiocbq)
9883                 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
9884                        &rspiocbq->iocb, sizeof(IOCB_t));
9885
9886         /* Set the exchange busy flag for task management commands */
9887         if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
9888                 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
9889                 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
9890                         cur_iocbq);
9891                 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
9892         }
9893
9894         pdone_q = cmdiocbq->context_un.wait_queue;
9895         if (pdone_q)
9896                 wake_up(pdone_q);
9897         spin_unlock_irqrestore(&phba->hbalock, iflags);
9898         return;
9899 }
9900
9901 /**
9902  * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
9903  * @phba: Pointer to HBA context object..
9904  * @piocbq: Pointer to command iocb.
9905  * @flag: Flag to test.
9906  *
9907  * This routine grabs the hbalock and then test the iocb_flag to
9908  * see if the passed in flag is set.
9909  * Returns:
9910  * 1 if flag is set.
9911  * 0 if flag is not set.
9912  **/
9913 static int
9914 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
9915                  struct lpfc_iocbq *piocbq, uint32_t flag)
9916 {
9917         unsigned long iflags;
9918         int ret;
9919
9920         spin_lock_irqsave(&phba->hbalock, iflags);
9921         ret = piocbq->iocb_flag & flag;
9922         spin_unlock_irqrestore(&phba->hbalock, iflags);
9923         return ret;
9924
9925 }
9926
9927 /**
9928  * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
9929  * @phba: Pointer to HBA context object..
9930  * @pring: Pointer to sli ring.
9931  * @piocb: Pointer to command iocb.
9932  * @prspiocbq: Pointer to response iocb.
9933  * @timeout: Timeout in number of seconds.
9934  *
9935  * This function issues the iocb to firmware and waits for the
9936  * iocb to complete. If the iocb command is not
9937  * completed within timeout seconds, it returns IOCB_TIMEDOUT.
9938  * Caller should not free the iocb resources if this function
9939  * returns IOCB_TIMEDOUT.
9940  * The function waits for the iocb completion using an
9941  * non-interruptible wait.
9942  * This function will sleep while waiting for iocb completion.
9943  * So, this function should not be called from any context which
9944  * does not allow sleeping. Due to the same reason, this function
9945  * cannot be called with interrupt disabled.
9946  * This function assumes that the iocb completions occur while
9947  * this function sleep. So, this function cannot be called from
9948  * the thread which process iocb completion for this ring.
9949  * This function clears the iocb_flag of the iocb object before
9950  * issuing the iocb and the iocb completion handler sets this
9951  * flag and wakes this thread when the iocb completes.
9952  * The contents of the response iocb will be copied to prspiocbq
9953  * by the completion handler when the command completes.
9954  * This function returns IOCB_SUCCESS when success.
9955  * This function is called with no lock held.
9956  **/
9957 int
9958 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
9959                          uint32_t ring_number,
9960                          struct lpfc_iocbq *piocb,
9961                          struct lpfc_iocbq *prspiocbq,
9962                          uint32_t timeout)
9963 {
9964         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
9965         long timeleft, timeout_req = 0;
9966         int retval = IOCB_SUCCESS;
9967         uint32_t creg_val;
9968         struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
9969         /*
9970          * If the caller has provided a response iocbq buffer, then context2
9971          * is NULL or its an error.
9972          */
9973         if (prspiocbq) {
9974                 if (piocb->context2)
9975                         return IOCB_ERROR;
9976                 piocb->context2 = prspiocbq;
9977         }
9978
9979         piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
9980         piocb->context_un.wait_queue = &done_q;
9981         piocb->iocb_flag &= ~LPFC_IO_WAKE;
9982
9983         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9984                 if (lpfc_readl(phba->HCregaddr, &creg_val))
9985                         return IOCB_ERROR;
9986                 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
9987                 writel(creg_val, phba->HCregaddr);
9988                 readl(phba->HCregaddr); /* flush */
9989         }
9990
9991         retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
9992                                      SLI_IOCB_RET_IOCB);
9993         if (retval == IOCB_SUCCESS) {
9994                 timeout_req = timeout * HZ;
9995                 timeleft = wait_event_timeout(done_q,
9996                                 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
9997                                 timeout_req);
9998
9999                 if (piocb->iocb_flag & LPFC_IO_WAKE) {
10000                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10001                                         "0331 IOCB wake signaled\n");
10002                 } else if (timeleft == 0) {
10003                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10004                                         "0338 IOCB wait timeout error - no "
10005                                         "wake response Data x%x\n", timeout);
10006                         retval = IOCB_TIMEDOUT;
10007                 } else {
10008                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10009                                         "0330 IOCB wake NOT set, "
10010                                         "Data x%x x%lx\n",
10011                                         timeout, (timeleft / jiffies));
10012                         retval = IOCB_TIMEDOUT;
10013                 }
10014         } else if (retval == IOCB_BUSY) {
10015                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10016                         "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
10017                         phba->iocb_cnt, pring->txq_cnt, pring->txcmplq_cnt);
10018                 return retval;
10019         } else {
10020                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10021                                 "0332 IOCB wait issue failed, Data x%x\n",
10022                                 retval);
10023                 retval = IOCB_ERROR;
10024         }
10025
10026         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
10027                 if (lpfc_readl(phba->HCregaddr, &creg_val))
10028                         return IOCB_ERROR;
10029                 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
10030                 writel(creg_val, phba->HCregaddr);
10031                 readl(phba->HCregaddr); /* flush */
10032         }
10033
10034         if (prspiocbq)
10035                 piocb->context2 = NULL;
10036
10037         piocb->context_un.wait_queue = NULL;
10038         piocb->iocb_cmpl = NULL;
10039         return retval;
10040 }
10041
10042 /**
10043  * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
10044  * @phba: Pointer to HBA context object.
10045  * @pmboxq: Pointer to driver mailbox object.
10046  * @timeout: Timeout in number of seconds.
10047  *
10048  * This function issues the mailbox to firmware and waits for the
10049  * mailbox command to complete. If the mailbox command is not
10050  * completed within timeout seconds, it returns MBX_TIMEOUT.
10051  * The function waits for the mailbox completion using an
10052  * interruptible wait. If the thread is woken up due to a
10053  * signal, MBX_TIMEOUT error is returned to the caller. Caller
10054  * should not free the mailbox resources, if this function returns
10055  * MBX_TIMEOUT.
10056  * This function will sleep while waiting for mailbox completion.
10057  * So, this function should not be called from any context which
10058  * does not allow sleeping. Due to the same reason, this function
10059  * cannot be called with interrupt disabled.
10060  * This function assumes that the mailbox completion occurs while
10061  * this function sleep. So, this function cannot be called from
10062  * the worker thread which processes mailbox completion.
10063  * This function is called in the context of HBA management
10064  * applications.
10065  * This function returns MBX_SUCCESS when successful.
10066  * This function is called with no lock held.
10067  **/
10068 int
10069 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
10070                          uint32_t timeout)
10071 {
10072         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
10073         int retval;
10074         unsigned long flag;
10075
10076         /* The caller must leave context1 empty. */
10077         if (pmboxq->context1)
10078                 return MBX_NOT_FINISHED;
10079
10080         pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
10081         /* setup wake call as IOCB callback */
10082         pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
10083         /* setup context field to pass wait_queue pointer to wake function  */
10084         pmboxq->context1 = &done_q;
10085
10086         /* now issue the command */
10087         retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
10088         if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
10089                 wait_event_interruptible_timeout(done_q,
10090                                 pmboxq->mbox_flag & LPFC_MBX_WAKE,
10091                                 timeout * HZ);
10092
10093                 spin_lock_irqsave(&phba->hbalock, flag);
10094                 pmboxq->context1 = NULL;
10095                 /*
10096                  * if LPFC_MBX_WAKE flag is set the mailbox is completed
10097                  * else do not free the resources.
10098                  */
10099                 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
10100                         retval = MBX_SUCCESS;
10101                         lpfc_sli4_swap_str(phba, pmboxq);
10102                 } else {
10103                         retval = MBX_TIMEOUT;
10104                         pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10105                 }
10106                 spin_unlock_irqrestore(&phba->hbalock, flag);
10107         }
10108
10109         return retval;
10110 }
10111
10112 /**
10113  * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
10114  * @phba: Pointer to HBA context.
10115  *
10116  * This function is called to shutdown the driver's mailbox sub-system.
10117  * It first marks the mailbox sub-system is in a block state to prevent
10118  * the asynchronous mailbox command from issued off the pending mailbox
10119  * command queue. If the mailbox command sub-system shutdown is due to
10120  * HBA error conditions such as EEH or ERATT, this routine shall invoke
10121  * the mailbox sub-system flush routine to forcefully bring down the
10122  * mailbox sub-system. Otherwise, if it is due to normal condition (such
10123  * as with offline or HBA function reset), this routine will wait for the
10124  * outstanding mailbox command to complete before invoking the mailbox
10125  * sub-system flush routine to gracefully bring down mailbox sub-system.
10126  **/
10127 void
10128 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
10129 {
10130         struct lpfc_sli *psli = &phba->sli;
10131         unsigned long timeout;
10132
10133         if (mbx_action == LPFC_MBX_NO_WAIT) {
10134                 /* delay 100ms for port state */
10135                 msleep(100);
10136                 lpfc_sli_mbox_sys_flush(phba);
10137                 return;
10138         }
10139         timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
10140
10141         spin_lock_irq(&phba->hbalock);
10142         psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
10143
10144         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
10145                 /* Determine how long we might wait for the active mailbox
10146                  * command to be gracefully completed by firmware.
10147                  */
10148                 if (phba->sli.mbox_active)
10149                         timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
10150                                                 phba->sli.mbox_active) *
10151                                                 1000) + jiffies;
10152                 spin_unlock_irq(&phba->hbalock);
10153
10154                 while (phba->sli.mbox_active) {
10155                         /* Check active mailbox complete status every 2ms */
10156                         msleep(2);
10157                         if (time_after(jiffies, timeout))
10158                                 /* Timeout, let the mailbox flush routine to
10159                                  * forcefully release active mailbox command
10160                                  */
10161                                 break;
10162                 }
10163         } else
10164                 spin_unlock_irq(&phba->hbalock);
10165
10166         lpfc_sli_mbox_sys_flush(phba);
10167 }
10168
10169 /**
10170  * lpfc_sli_eratt_read - read sli-3 error attention events
10171  * @phba: Pointer to HBA context.
10172  *
10173  * This function is called to read the SLI3 device error attention registers
10174  * for possible error attention events. The caller must hold the hostlock
10175  * with spin_lock_irq().
10176  *
10177  * This function returns 1 when there is Error Attention in the Host Attention
10178  * Register and returns 0 otherwise.
10179  **/
10180 static int
10181 lpfc_sli_eratt_read(struct lpfc_hba *phba)
10182 {
10183         uint32_t ha_copy;
10184
10185         /* Read chip Host Attention (HA) register */
10186         if (lpfc_readl(phba->HAregaddr, &ha_copy))
10187                 goto unplug_err;
10188
10189         if (ha_copy & HA_ERATT) {
10190                 /* Read host status register to retrieve error event */
10191                 if (lpfc_sli_read_hs(phba))
10192                         goto unplug_err;
10193
10194                 /* Check if there is a deferred error condition is active */
10195                 if ((HS_FFER1 & phba->work_hs) &&
10196                     ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
10197                       HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
10198                         phba->hba_flag |= DEFER_ERATT;
10199                         /* Clear all interrupt enable conditions */
10200                         writel(0, phba->HCregaddr);
10201                         readl(phba->HCregaddr);
10202                 }
10203
10204                 /* Set the driver HA work bitmap */
10205                 phba->work_ha |= HA_ERATT;
10206                 /* Indicate polling handles this ERATT */
10207                 phba->hba_flag |= HBA_ERATT_HANDLED;
10208                 return 1;
10209         }
10210         return 0;
10211
10212 unplug_err:
10213         /* Set the driver HS work bitmap */
10214         phba->work_hs |= UNPLUG_ERR;
10215         /* Set the driver HA work bitmap */
10216         phba->work_ha |= HA_ERATT;
10217         /* Indicate polling handles this ERATT */
10218         phba->hba_flag |= HBA_ERATT_HANDLED;
10219         return 1;
10220 }
10221
10222 /**
10223  * lpfc_sli4_eratt_read - read sli-4 error attention events
10224  * @phba: Pointer to HBA context.
10225  *
10226  * This function is called to read the SLI4 device error attention registers
10227  * for possible error attention events. The caller must hold the hostlock
10228  * with spin_lock_irq().
10229  *
10230  * This function returns 1 when there is Error Attention in the Host Attention
10231  * Register and returns 0 otherwise.
10232  **/
10233 static int
10234 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
10235 {
10236         uint32_t uerr_sta_hi, uerr_sta_lo;
10237         uint32_t if_type, portsmphr;
10238         struct lpfc_register portstat_reg;
10239
10240         /*
10241          * For now, use the SLI4 device internal unrecoverable error
10242          * registers for error attention. This can be changed later.
10243          */
10244         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10245         switch (if_type) {
10246         case LPFC_SLI_INTF_IF_TYPE_0:
10247                 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
10248                         &uerr_sta_lo) ||
10249                         lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
10250                         &uerr_sta_hi)) {
10251                         phba->work_hs |= UNPLUG_ERR;
10252                         phba->work_ha |= HA_ERATT;
10253                         phba->hba_flag |= HBA_ERATT_HANDLED;
10254                         return 1;
10255                 }
10256                 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
10257                     (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
10258                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10259                                         "1423 HBA Unrecoverable error: "
10260                                         "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
10261                                         "ue_mask_lo_reg=0x%x, "
10262                                         "ue_mask_hi_reg=0x%x\n",
10263                                         uerr_sta_lo, uerr_sta_hi,
10264                                         phba->sli4_hba.ue_mask_lo,
10265                                         phba->sli4_hba.ue_mask_hi);
10266                         phba->work_status[0] = uerr_sta_lo;
10267                         phba->work_status[1] = uerr_sta_hi;
10268                         phba->work_ha |= HA_ERATT;
10269                         phba->hba_flag |= HBA_ERATT_HANDLED;
10270                         return 1;
10271                 }
10272                 break;
10273         case LPFC_SLI_INTF_IF_TYPE_2:
10274                 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
10275                         &portstat_reg.word0) ||
10276                         lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
10277                         &portsmphr)){
10278                         phba->work_hs |= UNPLUG_ERR;
10279                         phba->work_ha |= HA_ERATT;
10280                         phba->hba_flag |= HBA_ERATT_HANDLED;
10281                         return 1;
10282                 }
10283                 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
10284                         phba->work_status[0] =
10285                                 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
10286                         phba->work_status[1] =
10287                                 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
10288                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10289                                         "2885 Port Status Event: "
10290                                         "port status reg 0x%x, "
10291                                         "port smphr reg 0x%x, "
10292                                         "error 1=0x%x, error 2=0x%x\n",
10293                                         portstat_reg.word0,
10294                                         portsmphr,
10295                                         phba->work_status[0],
10296                                         phba->work_status[1]);
10297                         phba->work_ha |= HA_ERATT;
10298                         phba->hba_flag |= HBA_ERATT_HANDLED;
10299                         return 1;
10300                 }
10301                 break;
10302         case LPFC_SLI_INTF_IF_TYPE_1:
10303         default:
10304                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10305                                 "2886 HBA Error Attention on unsupported "
10306                                 "if type %d.", if_type);
10307                 return 1;
10308         }
10309
10310         return 0;
10311 }
10312
10313 /**
10314  * lpfc_sli_check_eratt - check error attention events
10315  * @phba: Pointer to HBA context.
10316  *
10317  * This function is called from timer soft interrupt context to check HBA's
10318  * error attention register bit for error attention events.
10319  *
10320  * This function returns 1 when there is Error Attention in the Host Attention
10321  * Register and returns 0 otherwise.
10322  **/
10323 int
10324 lpfc_sli_check_eratt(struct lpfc_hba *phba)
10325 {
10326         uint32_t ha_copy;
10327
10328         /* If somebody is waiting to handle an eratt, don't process it
10329          * here. The brdkill function will do this.
10330          */
10331         if (phba->link_flag & LS_IGNORE_ERATT)
10332                 return 0;
10333
10334         /* Check if interrupt handler handles this ERATT */
10335         spin_lock_irq(&phba->hbalock);
10336         if (phba->hba_flag & HBA_ERATT_HANDLED) {
10337                 /* Interrupt handler has handled ERATT */
10338                 spin_unlock_irq(&phba->hbalock);
10339                 return 0;
10340         }
10341
10342         /*
10343          * If there is deferred error attention, do not check for error
10344          * attention
10345          */
10346         if (unlikely(phba->hba_flag & DEFER_ERATT)) {
10347                 spin_unlock_irq(&phba->hbalock);
10348                 return 0;
10349         }
10350
10351         /* If PCI channel is offline, don't process it */
10352         if (unlikely(pci_channel_offline(phba->pcidev))) {
10353                 spin_unlock_irq(&phba->hbalock);
10354                 return 0;
10355         }
10356
10357         switch (phba->sli_rev) {
10358         case LPFC_SLI_REV2:
10359         case LPFC_SLI_REV3:
10360                 /* Read chip Host Attention (HA) register */
10361                 ha_copy = lpfc_sli_eratt_read(phba);
10362                 break;
10363         case LPFC_SLI_REV4:
10364                 /* Read device Uncoverable Error (UERR) registers */
10365                 ha_copy = lpfc_sli4_eratt_read(phba);
10366                 break;
10367         default:
10368                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10369                                 "0299 Invalid SLI revision (%d)\n",
10370                                 phba->sli_rev);
10371                 ha_copy = 0;
10372                 break;
10373         }
10374         spin_unlock_irq(&phba->hbalock);
10375
10376         return ha_copy;
10377 }
10378
10379 /**
10380  * lpfc_intr_state_check - Check device state for interrupt handling
10381  * @phba: Pointer to HBA context.
10382  *
10383  * This inline routine checks whether a device or its PCI slot is in a state
10384  * that the interrupt should be handled.
10385  *
10386  * This function returns 0 if the device or the PCI slot is in a state that
10387  * interrupt should be handled, otherwise -EIO.
10388  */
10389 static inline int
10390 lpfc_intr_state_check(struct lpfc_hba *phba)
10391 {
10392         /* If the pci channel is offline, ignore all the interrupts */
10393         if (unlikely(pci_channel_offline(phba->pcidev)))
10394                 return -EIO;
10395
10396         /* Update device level interrupt statistics */
10397         phba->sli.slistat.sli_intr++;
10398
10399         /* Ignore all interrupts during initialization. */
10400         if (unlikely(phba->link_state < LPFC_LINK_DOWN))
10401                 return -EIO;
10402
10403         return 0;
10404 }
10405
10406 /**
10407  * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
10408  * @irq: Interrupt number.
10409  * @dev_id: The device context pointer.
10410  *
10411  * This function is directly called from the PCI layer as an interrupt
10412  * service routine when device with SLI-3 interface spec is enabled with
10413  * MSI-X multi-message interrupt mode and there are slow-path events in
10414  * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
10415  * interrupt mode, this function is called as part of the device-level
10416  * interrupt handler. When the PCI slot is in error recovery or the HBA
10417  * is undergoing initialization, the interrupt handler will not process
10418  * the interrupt. The link attention and ELS ring attention events are
10419  * handled by the worker thread. The interrupt handler signals the worker
10420  * thread and returns for these events. This function is called without
10421  * any lock held. It gets the hbalock to access and update SLI data
10422  * structures.
10423  *
10424  * This function returns IRQ_HANDLED when interrupt is handled else it
10425  * returns IRQ_NONE.
10426  **/
10427 irqreturn_t
10428 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
10429 {
10430         struct lpfc_hba  *phba;
10431         uint32_t ha_copy, hc_copy;
10432         uint32_t work_ha_copy;
10433         unsigned long status;
10434         unsigned long iflag;
10435         uint32_t control;
10436
10437         MAILBOX_t *mbox, *pmbox;
10438         struct lpfc_vport *vport;
10439         struct lpfc_nodelist *ndlp;
10440         struct lpfc_dmabuf *mp;
10441         LPFC_MBOXQ_t *pmb;
10442         int rc;
10443
10444         /*
10445          * Get the driver's phba structure from the dev_id and
10446          * assume the HBA is not interrupting.
10447          */
10448         phba = (struct lpfc_hba *)dev_id;
10449
10450         if (unlikely(!phba))
10451                 return IRQ_NONE;
10452
10453         /*
10454          * Stuff needs to be attented to when this function is invoked as an
10455          * individual interrupt handler in MSI-X multi-message interrupt mode
10456          */
10457         if (phba->intr_type == MSIX) {
10458                 /* Check device state for handling interrupt */
10459                 if (lpfc_intr_state_check(phba))
10460                         return IRQ_NONE;
10461                 /* Need to read HA REG for slow-path events */
10462                 spin_lock_irqsave(&phba->hbalock, iflag);
10463                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
10464                         goto unplug_error;
10465                 /* If somebody is waiting to handle an eratt don't process it
10466                  * here. The brdkill function will do this.
10467                  */
10468                 if (phba->link_flag & LS_IGNORE_ERATT)
10469                         ha_copy &= ~HA_ERATT;
10470                 /* Check the need for handling ERATT in interrupt handler */
10471                 if (ha_copy & HA_ERATT) {
10472                         if (phba->hba_flag & HBA_ERATT_HANDLED)
10473                                 /* ERATT polling has handled ERATT */
10474                                 ha_copy &= ~HA_ERATT;
10475                         else
10476                                 /* Indicate interrupt handler handles ERATT */
10477                                 phba->hba_flag |= HBA_ERATT_HANDLED;
10478                 }
10479
10480                 /*
10481                  * If there is deferred error attention, do not check for any
10482                  * interrupt.
10483                  */
10484                 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
10485                         spin_unlock_irqrestore(&phba->hbalock, iflag);
10486                         return IRQ_NONE;
10487                 }
10488
10489                 /* Clear up only attention source related to slow-path */
10490                 if (lpfc_readl(phba->HCregaddr, &hc_copy))
10491                         goto unplug_error;
10492
10493                 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
10494                         HC_LAINT_ENA | HC_ERINT_ENA),
10495                         phba->HCregaddr);
10496                 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
10497                         phba->HAregaddr);
10498                 writel(hc_copy, phba->HCregaddr);
10499                 readl(phba->HAregaddr); /* flush */
10500                 spin_unlock_irqrestore(&phba->hbalock, iflag);
10501         } else
10502                 ha_copy = phba->ha_copy;
10503
10504         work_ha_copy = ha_copy & phba->work_ha_mask;
10505
10506         if (work_ha_copy) {
10507                 if (work_ha_copy & HA_LATT) {
10508                         if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
10509                                 /*
10510                                  * Turn off Link Attention interrupts
10511                                  * until CLEAR_LA done
10512                                  */
10513                                 spin_lock_irqsave(&phba->hbalock, iflag);
10514                                 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
10515                                 if (lpfc_readl(phba->HCregaddr, &control))
10516                                         goto unplug_error;
10517                                 control &= ~HC_LAINT_ENA;
10518                                 writel(control, phba->HCregaddr);
10519                                 readl(phba->HCregaddr); /* flush */
10520                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
10521                         }
10522                         else
10523                                 work_ha_copy &= ~HA_LATT;
10524                 }
10525
10526                 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
10527                         /*
10528                          * Turn off Slow Rings interrupts, LPFC_ELS_RING is
10529                          * the only slow ring.
10530                          */
10531                         status = (work_ha_copy &
10532                                 (HA_RXMASK  << (4*LPFC_ELS_RING)));
10533                         status >>= (4*LPFC_ELS_RING);
10534                         if (status & HA_RXMASK) {
10535                                 spin_lock_irqsave(&phba->hbalock, iflag);
10536                                 if (lpfc_readl(phba->HCregaddr, &control))
10537                                         goto unplug_error;
10538
10539                                 lpfc_debugfs_slow_ring_trc(phba,
10540                                 "ISR slow ring:   ctl:x%x stat:x%x isrcnt:x%x",
10541                                 control, status,
10542                                 (uint32_t)phba->sli.slistat.sli_intr);
10543
10544                                 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
10545                                         lpfc_debugfs_slow_ring_trc(phba,
10546                                                 "ISR Disable ring:"
10547                                                 "pwork:x%x hawork:x%x wait:x%x",
10548                                                 phba->work_ha, work_ha_copy,
10549                                                 (uint32_t)((unsigned long)
10550                                                 &phba->work_waitq));
10551
10552                                         control &=
10553                                             ~(HC_R0INT_ENA << LPFC_ELS_RING);
10554                                         writel(control, phba->HCregaddr);
10555                                         readl(phba->HCregaddr); /* flush */
10556                                 }
10557                                 else {
10558                                         lpfc_debugfs_slow_ring_trc(phba,
10559                                                 "ISR slow ring:   pwork:"
10560                                                 "x%x hawork:x%x wait:x%x",
10561                                                 phba->work_ha, work_ha_copy,
10562                                                 (uint32_t)((unsigned long)
10563                                                 &phba->work_waitq));
10564                                 }
10565                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
10566                         }
10567                 }
10568                 spin_lock_irqsave(&phba->hbalock, iflag);
10569                 if (work_ha_copy & HA_ERATT) {
10570                         if (lpfc_sli_read_hs(phba))
10571                                 goto unplug_error;
10572                         /*
10573                          * Check if there is a deferred error condition
10574                          * is active
10575                          */
10576                         if ((HS_FFER1 & phba->work_hs) &&
10577                                 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
10578                                   HS_FFER6 | HS_FFER7 | HS_FFER8) &
10579                                   phba->work_hs)) {
10580                                 phba->hba_flag |= DEFER_ERATT;
10581                                 /* Clear all interrupt enable conditions */
10582                                 writel(0, phba->HCregaddr);
10583                                 readl(phba->HCregaddr);
10584                         }
10585                 }
10586
10587                 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
10588                         pmb = phba->sli.mbox_active;
10589                         pmbox = &pmb->u.mb;
10590                         mbox = phba->mbox;
10591                         vport = pmb->vport;
10592
10593                         /* First check out the status word */
10594                         lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
10595                         if (pmbox->mbxOwner != OWN_HOST) {
10596                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
10597                                 /*
10598                                  * Stray Mailbox Interrupt, mbxCommand <cmd>
10599                                  * mbxStatus <status>
10600                                  */
10601                                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
10602                                                 LOG_SLI,
10603                                                 "(%d):0304 Stray Mailbox "
10604                                                 "Interrupt mbxCommand x%x "
10605                                                 "mbxStatus x%x\n",
10606                                                 (vport ? vport->vpi : 0),
10607                                                 pmbox->mbxCommand,
10608                                                 pmbox->mbxStatus);
10609                                 /* clear mailbox attention bit */
10610                                 work_ha_copy &= ~HA_MBATT;
10611                         } else {
10612                                 phba->sli.mbox_active = NULL;
10613                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
10614                                 phba->last_completion_time = jiffies;
10615                                 del_timer(&phba->sli.mbox_tmo);
10616                                 if (pmb->mbox_cmpl) {
10617                                         lpfc_sli_pcimem_bcopy(mbox, pmbox,
10618                                                         MAILBOX_CMD_SIZE);
10619                                         if (pmb->out_ext_byte_len &&
10620                                                 pmb->context2)
10621                                                 lpfc_sli_pcimem_bcopy(
10622                                                 phba->mbox_ext,
10623                                                 pmb->context2,
10624                                                 pmb->out_ext_byte_len);
10625                                 }
10626                                 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
10627                                         pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
10628
10629                                         lpfc_debugfs_disc_trc(vport,
10630                                                 LPFC_DISC_TRC_MBOX_VPORT,
10631                                                 "MBOX dflt rpi: : "
10632                                                 "status:x%x rpi:x%x",
10633                                                 (uint32_t)pmbox->mbxStatus,
10634                                                 pmbox->un.varWords[0], 0);
10635
10636                                         if (!pmbox->mbxStatus) {
10637                                                 mp = (struct lpfc_dmabuf *)
10638                                                         (pmb->context1);
10639                                                 ndlp = (struct lpfc_nodelist *)
10640                                                         pmb->context2;
10641
10642                                                 /* Reg_LOGIN of dflt RPI was
10643                                                  * successful. new lets get
10644                                                  * rid of the RPI using the
10645                                                  * same mbox buffer.
10646                                                  */
10647                                                 lpfc_unreg_login(phba,
10648                                                         vport->vpi,
10649                                                         pmbox->un.varWords[0],
10650                                                         pmb);
10651                                                 pmb->mbox_cmpl =
10652                                                         lpfc_mbx_cmpl_dflt_rpi;
10653                                                 pmb->context1 = mp;
10654                                                 pmb->context2 = ndlp;
10655                                                 pmb->vport = vport;
10656                                                 rc = lpfc_sli_issue_mbox(phba,
10657                                                                 pmb,
10658                                                                 MBX_NOWAIT);
10659                                                 if (rc != MBX_BUSY)
10660                                                         lpfc_printf_log(phba,
10661                                                         KERN_ERR,
10662                                                         LOG_MBOX | LOG_SLI,
10663                                                         "0350 rc should have"
10664                                                         "been MBX_BUSY\n");
10665                                                 if (rc != MBX_NOT_FINISHED)
10666                                                         goto send_current_mbox;
10667                                         }
10668                                 }
10669                                 spin_lock_irqsave(
10670                                                 &phba->pport->work_port_lock,
10671                                                 iflag);
10672                                 phba->pport->work_port_events &=
10673                                         ~WORKER_MBOX_TMO;
10674                                 spin_unlock_irqrestore(
10675                                                 &phba->pport->work_port_lock,
10676                                                 iflag);
10677                                 lpfc_mbox_cmpl_put(phba, pmb);
10678                         }
10679                 } else
10680                         spin_unlock_irqrestore(&phba->hbalock, iflag);
10681
10682                 if ((work_ha_copy & HA_MBATT) &&
10683                     (phba->sli.mbox_active == NULL)) {
10684 send_current_mbox:
10685                         /* Process next mailbox command if there is one */
10686                         do {
10687                                 rc = lpfc_sli_issue_mbox(phba, NULL,
10688                                                          MBX_NOWAIT);
10689                         } while (rc == MBX_NOT_FINISHED);
10690                         if (rc != MBX_SUCCESS)
10691                                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
10692                                                 LOG_SLI, "0349 rc should be "
10693                                                 "MBX_SUCCESS\n");
10694                 }
10695
10696                 spin_lock_irqsave(&phba->hbalock, iflag);
10697                 phba->work_ha |= work_ha_copy;
10698                 spin_unlock_irqrestore(&phba->hbalock, iflag);
10699                 lpfc_worker_wake_up(phba);
10700         }
10701         return IRQ_HANDLED;
10702 unplug_error:
10703         spin_unlock_irqrestore(&phba->hbalock, iflag);
10704         return IRQ_HANDLED;
10705
10706 } /* lpfc_sli_sp_intr_handler */
10707
10708 /**
10709  * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
10710  * @irq: Interrupt number.
10711  * @dev_id: The device context pointer.
10712  *
10713  * This function is directly called from the PCI layer as an interrupt
10714  * service routine when device with SLI-3 interface spec is enabled with
10715  * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
10716  * ring event in the HBA. However, when the device is enabled with either
10717  * MSI or Pin-IRQ interrupt mode, this function is called as part of the
10718  * device-level interrupt handler. When the PCI slot is in error recovery
10719  * or the HBA is undergoing initialization, the interrupt handler will not
10720  * process the interrupt. The SCSI FCP fast-path ring event are handled in
10721  * the intrrupt context. This function is called without any lock held.
10722  * It gets the hbalock to access and update SLI data structures.
10723  *
10724  * This function returns IRQ_HANDLED when interrupt is handled else it
10725  * returns IRQ_NONE.
10726  **/
10727 irqreturn_t
10728 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
10729 {
10730         struct lpfc_hba  *phba;
10731         uint32_t ha_copy;
10732         unsigned long status;
10733         unsigned long iflag;
10734
10735         /* Get the driver's phba structure from the dev_id and
10736          * assume the HBA is not interrupting.
10737          */
10738         phba = (struct lpfc_hba *) dev_id;
10739
10740         if (unlikely(!phba))
10741                 return IRQ_NONE;
10742
10743         /*
10744          * Stuff needs to be attented to when this function is invoked as an
10745          * individual interrupt handler in MSI-X multi-message interrupt mode
10746          */
10747         if (phba->intr_type == MSIX) {
10748                 /* Check device state for handling interrupt */
10749                 if (lpfc_intr_state_check(phba))
10750                         return IRQ_NONE;
10751                 /* Need to read HA REG for FCP ring and other ring events */
10752                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
10753                         return IRQ_HANDLED;
10754                 /* Clear up only attention source related to fast-path */
10755                 spin_lock_irqsave(&phba->hbalock, iflag);
10756                 /*
10757                  * If there is deferred error attention, do not check for
10758                  * any interrupt.
10759                  */
10760                 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
10761                         spin_unlock_irqrestore(&phba->hbalock, iflag);
10762                         return IRQ_NONE;
10763                 }
10764                 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
10765                         phba->HAregaddr);
10766                 readl(phba->HAregaddr); /* flush */
10767                 spin_unlock_irqrestore(&phba->hbalock, iflag);
10768         } else
10769                 ha_copy = phba->ha_copy;
10770
10771         /*
10772          * Process all events on FCP ring. Take the optimized path for FCP IO.
10773          */
10774         ha_copy &= ~(phba->work_ha_mask);
10775
10776         status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
10777         status >>= (4*LPFC_FCP_RING);
10778         if (status & HA_RXMASK)
10779                 lpfc_sli_handle_fast_ring_event(phba,
10780                                                 &phba->sli.ring[LPFC_FCP_RING],
10781                                                 status);
10782
10783         if (phba->cfg_multi_ring_support == 2) {
10784                 /*
10785                  * Process all events on extra ring. Take the optimized path
10786                  * for extra ring IO.
10787                  */
10788                 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
10789                 status >>= (4*LPFC_EXTRA_RING);
10790                 if (status & HA_RXMASK) {
10791                         lpfc_sli_handle_fast_ring_event(phba,
10792                                         &phba->sli.ring[LPFC_EXTRA_RING],
10793                                         status);
10794                 }
10795         }
10796         return IRQ_HANDLED;
10797 }  /* lpfc_sli_fp_intr_handler */
10798
10799 /**
10800  * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
10801  * @irq: Interrupt number.
10802  * @dev_id: The device context pointer.
10803  *
10804  * This function is the HBA device-level interrupt handler to device with
10805  * SLI-3 interface spec, called from the PCI layer when either MSI or
10806  * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
10807  * requires driver attention. This function invokes the slow-path interrupt
10808  * attention handling function and fast-path interrupt attention handling
10809  * function in turn to process the relevant HBA attention events. This
10810  * function is called without any lock held. It gets the hbalock to access
10811  * and update SLI data structures.
10812  *
10813  * This function returns IRQ_HANDLED when interrupt is handled, else it
10814  * returns IRQ_NONE.
10815  **/
10816 irqreturn_t
10817 lpfc_sli_intr_handler(int irq, void *dev_id)
10818 {
10819         struct lpfc_hba  *phba;
10820         irqreturn_t sp_irq_rc, fp_irq_rc;
10821         unsigned long status1, status2;
10822         uint32_t hc_copy;
10823
10824         /*
10825          * Get the driver's phba structure from the dev_id and
10826          * assume the HBA is not interrupting.
10827          */
10828         phba = (struct lpfc_hba *) dev_id;
10829
10830         if (unlikely(!phba))
10831                 return IRQ_NONE;
10832
10833         /* Check device state for handling interrupt */
10834         if (lpfc_intr_state_check(phba))
10835                 return IRQ_NONE;
10836
10837         spin_lock(&phba->hbalock);
10838         if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
10839                 spin_unlock(&phba->hbalock);
10840                 return IRQ_HANDLED;
10841         }
10842
10843         if (unlikely(!phba->ha_copy)) {
10844                 spin_unlock(&phba->hbalock);
10845                 return IRQ_NONE;
10846         } else if (phba->ha_copy & HA_ERATT) {
10847                 if (phba->hba_flag & HBA_ERATT_HANDLED)
10848                         /* ERATT polling has handled ERATT */
10849                         phba->ha_copy &= ~HA_ERATT;
10850                 else
10851                         /* Indicate interrupt handler handles ERATT */
10852                         phba->hba_flag |= HBA_ERATT_HANDLED;
10853         }
10854
10855         /*
10856          * If there is deferred error attention, do not check for any interrupt.
10857          */
10858         if (unlikely(phba->hba_flag & DEFER_ERATT)) {
10859                 spin_unlock(&phba->hbalock);
10860                 return IRQ_NONE;
10861         }
10862
10863         /* Clear attention sources except link and error attentions */
10864         if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
10865                 spin_unlock(&phba->hbalock);
10866                 return IRQ_HANDLED;
10867         }
10868         writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
10869                 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
10870                 phba->HCregaddr);
10871         writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
10872         writel(hc_copy, phba->HCregaddr);
10873         readl(phba->HAregaddr); /* flush */
10874         spin_unlock(&phba->hbalock);
10875
10876         /*
10877          * Invokes slow-path host attention interrupt handling as appropriate.
10878          */
10879
10880         /* status of events with mailbox and link attention */
10881         status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
10882
10883         /* status of events with ELS ring */
10884         status2 = (phba->ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
10885         status2 >>= (4*LPFC_ELS_RING);
10886
10887         if (status1 || (status2 & HA_RXMASK))
10888                 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
10889         else
10890                 sp_irq_rc = IRQ_NONE;
10891
10892         /*
10893          * Invoke fast-path host attention interrupt handling as appropriate.
10894          */
10895
10896         /* status of events with FCP ring */
10897         status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
10898         status1 >>= (4*LPFC_FCP_RING);
10899
10900         /* status of events with extra ring */
10901         if (phba->cfg_multi_ring_support == 2) {
10902                 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
10903                 status2 >>= (4*LPFC_EXTRA_RING);
10904         } else
10905                 status2 = 0;
10906
10907         if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
10908                 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
10909         else
10910                 fp_irq_rc = IRQ_NONE;
10911
10912         /* Return device-level interrupt handling status */
10913         return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
10914 }  /* lpfc_sli_intr_handler */
10915
10916 /**
10917  * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
10918  * @phba: pointer to lpfc hba data structure.
10919  *
10920  * This routine is invoked by the worker thread to process all the pending
10921  * SLI4 FCP abort XRI events.
10922  **/
10923 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
10924 {
10925         struct lpfc_cq_event *cq_event;
10926
10927         /* First, declare the fcp xri abort event has been handled */
10928         spin_lock_irq(&phba->hbalock);
10929         phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
10930         spin_unlock_irq(&phba->hbalock);
10931         /* Now, handle all the fcp xri abort events */
10932         while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
10933                 /* Get the first event from the head of the event queue */
10934                 spin_lock_irq(&phba->hbalock);
10935                 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
10936                                  cq_event, struct lpfc_cq_event, list);
10937                 spin_unlock_irq(&phba->hbalock);
10938                 /* Notify aborted XRI for FCP work queue */
10939                 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
10940                 /* Free the event processed back to the free pool */
10941                 lpfc_sli4_cq_event_release(phba, cq_event);
10942         }
10943 }
10944
10945 /**
10946  * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
10947  * @phba: pointer to lpfc hba data structure.
10948  *
10949  * This routine is invoked by the worker thread to process all the pending
10950  * SLI4 els abort xri events.
10951  **/
10952 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
10953 {
10954         struct lpfc_cq_event *cq_event;
10955
10956         /* First, declare the els xri abort event has been handled */
10957         spin_lock_irq(&phba->hbalock);
10958         phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
10959         spin_unlock_irq(&phba->hbalock);
10960         /* Now, handle all the els xri abort events */
10961         while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
10962                 /* Get the first event from the head of the event queue */
10963                 spin_lock_irq(&phba->hbalock);
10964                 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
10965                                  cq_event, struct lpfc_cq_event, list);
10966                 spin_unlock_irq(&phba->hbalock);
10967                 /* Notify aborted XRI for ELS work queue */
10968                 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
10969                 /* Free the event processed back to the free pool */
10970                 lpfc_sli4_cq_event_release(phba, cq_event);
10971         }
10972 }
10973
10974 /**
10975  * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
10976  * @phba: pointer to lpfc hba data structure
10977  * @pIocbIn: pointer to the rspiocbq
10978  * @pIocbOut: pointer to the cmdiocbq
10979  * @wcqe: pointer to the complete wcqe
10980  *
10981  * This routine transfers the fields of a command iocbq to a response iocbq
10982  * by copying all the IOCB fields from command iocbq and transferring the
10983  * completion status information from the complete wcqe.
10984  **/
10985 static void
10986 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
10987                               struct lpfc_iocbq *pIocbIn,
10988                               struct lpfc_iocbq *pIocbOut,
10989                               struct lpfc_wcqe_complete *wcqe)
10990 {
10991         unsigned long iflags;
10992         uint32_t status;
10993         size_t offset = offsetof(struct lpfc_iocbq, iocb);
10994
10995         memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
10996                sizeof(struct lpfc_iocbq) - offset);
10997         /* Map WCQE parameters into irspiocb parameters */
10998         status = bf_get(lpfc_wcqe_c_status, wcqe);
10999         pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
11000         if (pIocbOut->iocb_flag & LPFC_IO_FCP)
11001                 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
11002                         pIocbIn->iocb.un.fcpi.fcpi_parm =
11003                                         pIocbOut->iocb.un.fcpi.fcpi_parm -
11004                                         wcqe->total_data_placed;
11005                 else
11006                         pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
11007         else {
11008                 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
11009                 pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed;
11010         }
11011
11012         /* Convert BG errors for completion status */
11013         if (status == CQE_STATUS_DI_ERROR) {
11014                 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
11015
11016                 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
11017                         pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
11018                 else
11019                         pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
11020
11021                 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
11022                 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
11023                         pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11024                                 BGS_GUARD_ERR_MASK;
11025                 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
11026                         pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11027                                 BGS_APPTAG_ERR_MASK;
11028                 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
11029                         pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11030                                 BGS_REFTAG_ERR_MASK;
11031
11032                 /* Check to see if there was any good data before the error */
11033                 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
11034                         pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11035                                 BGS_HI_WATER_MARK_PRESENT_MASK;
11036                         pIocbIn->iocb.unsli3.sli3_bg.bghm =
11037                                 wcqe->total_data_placed;
11038                 }
11039
11040                 /*
11041                 * Set ALL the error bits to indicate we don't know what
11042                 * type of error it is.
11043                 */
11044                 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
11045                         pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11046                                 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
11047                                 BGS_GUARD_ERR_MASK);
11048         }
11049
11050         /* Pick up HBA exchange busy condition */
11051         if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
11052                 spin_lock_irqsave(&phba->hbalock, iflags);
11053                 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
11054                 spin_unlock_irqrestore(&phba->hbalock, iflags);
11055         }
11056 }
11057
11058 /**
11059  * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
11060  * @phba: Pointer to HBA context object.
11061  * @wcqe: Pointer to work-queue completion queue entry.
11062  *
11063  * This routine handles an ELS work-queue completion event and construct
11064  * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
11065  * discovery engine to handle.
11066  *
11067  * Return: Pointer to the receive IOCBQ, NULL otherwise.
11068  **/
11069 static struct lpfc_iocbq *
11070 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
11071                                struct lpfc_iocbq *irspiocbq)
11072 {
11073         struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
11074         struct lpfc_iocbq *cmdiocbq;
11075         struct lpfc_wcqe_complete *wcqe;
11076         unsigned long iflags;
11077
11078         wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
11079         spin_lock_irqsave(&pring->ring_lock, iflags);
11080         pring->stats.iocb_event++;
11081         /* Look up the ELS command IOCB and create pseudo response IOCB */
11082         cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
11083                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11084         spin_unlock_irqrestore(&pring->ring_lock, iflags);
11085
11086         if (unlikely(!cmdiocbq)) {
11087                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11088                                 "0386 ELS complete with no corresponding "
11089                                 "cmdiocb: iotag (%d)\n",
11090                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11091                 lpfc_sli_release_iocbq(phba, irspiocbq);
11092                 return NULL;
11093         }
11094
11095         /* Fake the irspiocbq and copy necessary response information */
11096         lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
11097
11098         return irspiocbq;
11099 }
11100
11101 /**
11102  * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
11103  * @phba: Pointer to HBA context object.
11104  * @cqe: Pointer to mailbox completion queue entry.
11105  *
11106  * This routine process a mailbox completion queue entry with asynchrous
11107  * event.
11108  *
11109  * Return: true if work posted to worker thread, otherwise false.
11110  **/
11111 static bool
11112 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
11113 {
11114         struct lpfc_cq_event *cq_event;
11115         unsigned long iflags;
11116
11117         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11118                         "0392 Async Event: word0:x%x, word1:x%x, "
11119                         "word2:x%x, word3:x%x\n", mcqe->word0,
11120                         mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
11121
11122         /* Allocate a new internal CQ_EVENT entry */
11123         cq_event = lpfc_sli4_cq_event_alloc(phba);
11124         if (!cq_event) {
11125                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11126                                 "0394 Failed to allocate CQ_EVENT entry\n");
11127                 return false;
11128         }
11129
11130         /* Move the CQE into an asynchronous event entry */
11131         memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
11132         spin_lock_irqsave(&phba->hbalock, iflags);
11133         list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
11134         /* Set the async event flag */
11135         phba->hba_flag |= ASYNC_EVENT;
11136         spin_unlock_irqrestore(&phba->hbalock, iflags);
11137
11138         return true;
11139 }
11140
11141 /**
11142  * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
11143  * @phba: Pointer to HBA context object.
11144  * @cqe: Pointer to mailbox completion queue entry.
11145  *
11146  * This routine process a mailbox completion queue entry with mailbox
11147  * completion event.
11148  *
11149  * Return: true if work posted to worker thread, otherwise false.
11150  **/
11151 static bool
11152 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
11153 {
11154         uint32_t mcqe_status;
11155         MAILBOX_t *mbox, *pmbox;
11156         struct lpfc_mqe *mqe;
11157         struct lpfc_vport *vport;
11158         struct lpfc_nodelist *ndlp;
11159         struct lpfc_dmabuf *mp;
11160         unsigned long iflags;
11161         LPFC_MBOXQ_t *pmb;
11162         bool workposted = false;
11163         int rc;
11164
11165         /* If not a mailbox complete MCQE, out by checking mailbox consume */
11166         if (!bf_get(lpfc_trailer_completed, mcqe))
11167                 goto out_no_mqe_complete;
11168
11169         /* Get the reference to the active mbox command */
11170         spin_lock_irqsave(&phba->hbalock, iflags);
11171         pmb = phba->sli.mbox_active;
11172         if (unlikely(!pmb)) {
11173                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
11174                                 "1832 No pending MBOX command to handle\n");
11175                 spin_unlock_irqrestore(&phba->hbalock, iflags);
11176                 goto out_no_mqe_complete;
11177         }
11178         spin_unlock_irqrestore(&phba->hbalock, iflags);
11179         mqe = &pmb->u.mqe;
11180         pmbox = (MAILBOX_t *)&pmb->u.mqe;
11181         mbox = phba->mbox;
11182         vport = pmb->vport;
11183
11184         /* Reset heartbeat timer */
11185         phba->last_completion_time = jiffies;
11186         del_timer(&phba->sli.mbox_tmo);
11187
11188         /* Move mbox data to caller's mailbox region, do endian swapping */
11189         if (pmb->mbox_cmpl && mbox)
11190                 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
11191
11192         /*
11193          * For mcqe errors, conditionally move a modified error code to
11194          * the mbox so that the error will not be missed.
11195          */
11196         mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
11197         if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
11198                 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
11199                         bf_set(lpfc_mqe_status, mqe,
11200                                (LPFC_MBX_ERROR_RANGE | mcqe_status));
11201         }
11202         if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
11203                 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
11204                 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
11205                                       "MBOX dflt rpi: status:x%x rpi:x%x",
11206                                       mcqe_status,
11207                                       pmbox->un.varWords[0], 0);
11208                 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
11209                         mp = (struct lpfc_dmabuf *)(pmb->context1);
11210                         ndlp = (struct lpfc_nodelist *)pmb->context2;
11211                         /* Reg_LOGIN of dflt RPI was successful. Now lets get
11212                          * RID of the PPI using the same mbox buffer.
11213                          */
11214                         lpfc_unreg_login(phba, vport->vpi,
11215                                          pmbox->un.varWords[0], pmb);
11216                         pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
11217                         pmb->context1 = mp;
11218                         pmb->context2 = ndlp;
11219                         pmb->vport = vport;
11220                         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
11221                         if (rc != MBX_BUSY)
11222                                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
11223                                                 LOG_SLI, "0385 rc should "
11224                                                 "have been MBX_BUSY\n");
11225                         if (rc != MBX_NOT_FINISHED)
11226                                 goto send_current_mbox;
11227                 }
11228         }
11229         spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
11230         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
11231         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
11232
11233         /* There is mailbox completion work to do */
11234         spin_lock_irqsave(&phba->hbalock, iflags);
11235         __lpfc_mbox_cmpl_put(phba, pmb);
11236         phba->work_ha |= HA_MBATT;
11237         spin_unlock_irqrestore(&phba->hbalock, iflags);
11238         workposted = true;
11239
11240 send_current_mbox:
11241         spin_lock_irqsave(&phba->hbalock, iflags);
11242         /* Release the mailbox command posting token */
11243         phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11244         /* Setting active mailbox pointer need to be in sync to flag clear */
11245         phba->sli.mbox_active = NULL;
11246         spin_unlock_irqrestore(&phba->hbalock, iflags);
11247         /* Wake up worker thread to post the next pending mailbox command */
11248         lpfc_worker_wake_up(phba);
11249 out_no_mqe_complete:
11250         if (bf_get(lpfc_trailer_consumed, mcqe))
11251                 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
11252         return workposted;
11253 }
11254
11255 /**
11256  * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
11257  * @phba: Pointer to HBA context object.
11258  * @cqe: Pointer to mailbox completion queue entry.
11259  *
11260  * This routine process a mailbox completion queue entry, it invokes the
11261  * proper mailbox complete handling or asynchrous event handling routine
11262  * according to the MCQE's async bit.
11263  *
11264  * Return: true if work posted to worker thread, otherwise false.
11265  **/
11266 static bool
11267 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
11268 {
11269         struct lpfc_mcqe mcqe;
11270         bool workposted;
11271
11272         /* Copy the mailbox MCQE and convert endian order as needed */
11273         lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
11274
11275         /* Invoke the proper event handling routine */
11276         if (!bf_get(lpfc_trailer_async, &mcqe))
11277                 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
11278         else
11279                 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
11280         return workposted;
11281 }
11282
11283 /**
11284  * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
11285  * @phba: Pointer to HBA context object.
11286  * @cq: Pointer to associated CQ
11287  * @wcqe: Pointer to work-queue completion queue entry.
11288  *
11289  * This routine handles an ELS work-queue completion event.
11290  *
11291  * Return: true if work posted to worker thread, otherwise false.
11292  **/
11293 static bool
11294 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11295                              struct lpfc_wcqe_complete *wcqe)
11296 {
11297         struct lpfc_iocbq *irspiocbq;
11298         unsigned long iflags;
11299         struct lpfc_sli_ring *pring = cq->pring;
11300
11301         /* Get an irspiocbq for later ELS response processing use */
11302         irspiocbq = lpfc_sli_get_iocbq(phba);
11303         if (!irspiocbq) {
11304                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11305                         "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
11306                         "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
11307                         pring->txq_cnt, phba->iocb_cnt,
11308                         phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt,
11309                         phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt);
11310                 return false;
11311         }
11312
11313         /* Save off the slow-path queue event for work thread to process */
11314         memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
11315         spin_lock_irqsave(&phba->hbalock, iflags);
11316         list_add_tail(&irspiocbq->cq_event.list,
11317                       &phba->sli4_hba.sp_queue_event);
11318         phba->hba_flag |= HBA_SP_QUEUE_EVT;
11319         spin_unlock_irqrestore(&phba->hbalock, iflags);
11320
11321         return true;
11322 }
11323
11324 /**
11325  * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
11326  * @phba: Pointer to HBA context object.
11327  * @wcqe: Pointer to work-queue completion queue entry.
11328  *
11329  * This routine handles slow-path WQ entry comsumed event by invoking the
11330  * proper WQ release routine to the slow-path WQ.
11331  **/
11332 static void
11333 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
11334                              struct lpfc_wcqe_release *wcqe)
11335 {
11336         /* sanity check on queue memory */
11337         if (unlikely(!phba->sli4_hba.els_wq))
11338                 return;
11339         /* Check for the slow-path ELS work queue */
11340         if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
11341                 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
11342                                      bf_get(lpfc_wcqe_r_wqe_index, wcqe));
11343         else
11344                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11345                                 "2579 Slow-path wqe consume event carries "
11346                                 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
11347                                 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
11348                                 phba->sli4_hba.els_wq->queue_id);
11349 }
11350
11351 /**
11352  * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
11353  * @phba: Pointer to HBA context object.
11354  * @cq: Pointer to a WQ completion queue.
11355  * @wcqe: Pointer to work-queue completion queue entry.
11356  *
11357  * This routine handles an XRI abort event.
11358  *
11359  * Return: true if work posted to worker thread, otherwise false.
11360  **/
11361 static bool
11362 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
11363                                    struct lpfc_queue *cq,
11364                                    struct sli4_wcqe_xri_aborted *wcqe)
11365 {
11366         bool workposted = false;
11367         struct lpfc_cq_event *cq_event;
11368         unsigned long iflags;
11369
11370         /* Allocate a new internal CQ_EVENT entry */
11371         cq_event = lpfc_sli4_cq_event_alloc(phba);
11372         if (!cq_event) {
11373                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11374                                 "0602 Failed to allocate CQ_EVENT entry\n");
11375                 return false;
11376         }
11377
11378         /* Move the CQE into the proper xri abort event list */
11379         memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
11380         switch (cq->subtype) {
11381         case LPFC_FCP:
11382                 spin_lock_irqsave(&phba->hbalock, iflags);
11383                 list_add_tail(&cq_event->list,
11384                               &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
11385                 /* Set the fcp xri abort event flag */
11386                 phba->hba_flag |= FCP_XRI_ABORT_EVENT;
11387                 spin_unlock_irqrestore(&phba->hbalock, iflags);
11388                 workposted = true;
11389                 break;
11390         case LPFC_ELS:
11391                 spin_lock_irqsave(&phba->hbalock, iflags);
11392                 list_add_tail(&cq_event->list,
11393                               &phba->sli4_hba.sp_els_xri_aborted_work_queue);
11394                 /* Set the els xri abort event flag */
11395                 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
11396                 spin_unlock_irqrestore(&phba->hbalock, iflags);
11397                 workposted = true;
11398                 break;
11399         default:
11400                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11401                                 "0603 Invalid work queue CQE subtype (x%x)\n",
11402                                 cq->subtype);
11403                 workposted = false;
11404                 break;
11405         }
11406         return workposted;
11407 }
11408
11409 /**
11410  * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
11411  * @phba: Pointer to HBA context object.
11412  * @rcqe: Pointer to receive-queue completion queue entry.
11413  *
11414  * This routine process a receive-queue completion queue entry.
11415  *
11416  * Return: true if work posted to worker thread, otherwise false.
11417  **/
11418 static bool
11419 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
11420 {
11421         bool workposted = false;
11422         struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
11423         struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
11424         struct hbq_dmabuf *dma_buf;
11425         uint32_t status, rq_id;
11426         unsigned long iflags;
11427
11428         /* sanity check on queue memory */
11429         if (unlikely(!hrq) || unlikely(!drq))
11430                 return workposted;
11431
11432         if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
11433                 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
11434         else
11435                 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
11436         if (rq_id != hrq->queue_id)
11437                 goto out;
11438
11439         status = bf_get(lpfc_rcqe_status, rcqe);
11440         switch (status) {
11441         case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
11442                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11443                                 "2537 Receive Frame Truncated!!\n");
11444                 hrq->RQ_buf_trunc++;
11445         case FC_STATUS_RQ_SUCCESS:
11446                 lpfc_sli4_rq_release(hrq, drq);
11447                 spin_lock_irqsave(&phba->hbalock, iflags);
11448                 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
11449                 if (!dma_buf) {
11450                         hrq->RQ_no_buf_found++;
11451                         spin_unlock_irqrestore(&phba->hbalock, iflags);
11452                         goto out;
11453                 }
11454                 hrq->RQ_rcv_buf++;
11455                 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
11456                 /* save off the frame for the word thread to process */
11457                 list_add_tail(&dma_buf->cq_event.list,
11458                               &phba->sli4_hba.sp_queue_event);
11459                 /* Frame received */
11460                 phba->hba_flag |= HBA_SP_QUEUE_EVT;
11461                 spin_unlock_irqrestore(&phba->hbalock, iflags);
11462                 workposted = true;
11463                 break;
11464         case FC_STATUS_INSUFF_BUF_NEED_BUF:
11465         case FC_STATUS_INSUFF_BUF_FRM_DISC:
11466                 hrq->RQ_no_posted_buf++;
11467                 /* Post more buffers if possible */
11468                 spin_lock_irqsave(&phba->hbalock, iflags);
11469                 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
11470                 spin_unlock_irqrestore(&phba->hbalock, iflags);
11471                 workposted = true;
11472                 break;
11473         }
11474 out:
11475         return workposted;
11476 }
11477
11478 /**
11479  * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
11480  * @phba: Pointer to HBA context object.
11481  * @cq: Pointer to the completion queue.
11482  * @wcqe: Pointer to a completion queue entry.
11483  *
11484  * This routine process a slow-path work-queue or receive queue completion queue
11485  * entry.
11486  *
11487  * Return: true if work posted to worker thread, otherwise false.
11488  **/
11489 static bool
11490 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11491                          struct lpfc_cqe *cqe)
11492 {
11493         struct lpfc_cqe cqevt;
11494         bool workposted = false;
11495
11496         /* Copy the work queue CQE and convert endian order if needed */
11497         lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
11498
11499         /* Check and process for different type of WCQE and dispatch */
11500         switch (bf_get(lpfc_cqe_code, &cqevt)) {
11501         case CQE_CODE_COMPL_WQE:
11502                 /* Process the WQ/RQ complete event */
11503                 phba->last_completion_time = jiffies;
11504                 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
11505                                 (struct lpfc_wcqe_complete *)&cqevt);
11506                 break;
11507         case CQE_CODE_RELEASE_WQE:
11508                 /* Process the WQ release event */
11509                 lpfc_sli4_sp_handle_rel_wcqe(phba,
11510                                 (struct lpfc_wcqe_release *)&cqevt);
11511                 break;
11512         case CQE_CODE_XRI_ABORTED:
11513                 /* Process the WQ XRI abort event */
11514                 phba->last_completion_time = jiffies;
11515                 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
11516                                 (struct sli4_wcqe_xri_aborted *)&cqevt);
11517                 break;
11518         case CQE_CODE_RECEIVE:
11519         case CQE_CODE_RECEIVE_V1:
11520                 /* Process the RQ event */
11521                 phba->last_completion_time = jiffies;
11522                 workposted = lpfc_sli4_sp_handle_rcqe(phba,
11523                                 (struct lpfc_rcqe *)&cqevt);
11524                 break;
11525         default:
11526                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11527                                 "0388 Not a valid WCQE code: x%x\n",
11528                                 bf_get(lpfc_cqe_code, &cqevt));
11529                 break;
11530         }
11531         return workposted;
11532 }
11533
11534 /**
11535  * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
11536  * @phba: Pointer to HBA context object.
11537  * @eqe: Pointer to fast-path event queue entry.
11538  *
11539  * This routine process a event queue entry from the slow-path event queue.
11540  * It will check the MajorCode and MinorCode to determine this is for a
11541  * completion event on a completion queue, if not, an error shall be logged
11542  * and just return. Otherwise, it will get to the corresponding completion
11543  * queue and process all the entries on that completion queue, rearm the
11544  * completion queue, and then return.
11545  *
11546  **/
11547 static void
11548 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
11549         struct lpfc_queue *speq)
11550 {
11551         struct lpfc_queue *cq = NULL, *childq;
11552         struct lpfc_cqe *cqe;
11553         bool workposted = false;
11554         int ecount = 0;
11555         uint16_t cqid;
11556
11557         /* Get the reference to the corresponding CQ */
11558         cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
11559
11560         list_for_each_entry(childq, &speq->child_list, list) {
11561                 if (childq->queue_id == cqid) {
11562                         cq = childq;
11563                         break;
11564                 }
11565         }
11566         if (unlikely(!cq)) {
11567                 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
11568                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11569                                         "0365 Slow-path CQ identifier "
11570                                         "(%d) does not exist\n", cqid);
11571                 return;
11572         }
11573
11574         /* Process all the entries to the CQ */
11575         switch (cq->type) {
11576         case LPFC_MCQ:
11577                 while ((cqe = lpfc_sli4_cq_get(cq))) {
11578                         workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
11579                         if (!(++ecount % cq->entry_repost))
11580                                 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11581                         cq->CQ_mbox++;
11582                 }
11583                 break;
11584         case LPFC_WCQ:
11585                 while ((cqe = lpfc_sli4_cq_get(cq))) {
11586                         if (cq->subtype == LPFC_FCP)
11587                                 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq,
11588                                                                        cqe);
11589                         else
11590                                 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
11591                                                                       cqe);
11592                         if (!(++ecount % cq->entry_repost))
11593                                 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11594                 }
11595
11596                 /* Track the max number of CQEs processed in 1 EQ */
11597                 if (ecount > cq->CQ_max_cqe)
11598                         cq->CQ_max_cqe = ecount;
11599                 break;
11600         default:
11601                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11602                                 "0370 Invalid completion queue type (%d)\n",
11603                                 cq->type);
11604                 return;
11605         }
11606
11607         /* Catch the no cq entry condition, log an error */
11608         if (unlikely(ecount == 0))
11609                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11610                                 "0371 No entry from the CQ: identifier "
11611                                 "(x%x), type (%d)\n", cq->queue_id, cq->type);
11612
11613         /* In any case, flash and re-arm the RCQ */
11614         lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
11615
11616         /* wake up worker thread if there are works to be done */
11617         if (workposted)
11618                 lpfc_worker_wake_up(phba);
11619 }
11620
11621 /**
11622  * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
11623  * @phba: Pointer to HBA context object.
11624  * @cq: Pointer to associated CQ
11625  * @wcqe: Pointer to work-queue completion queue entry.
11626  *
11627  * This routine process a fast-path work queue completion entry from fast-path
11628  * event queue for FCP command response completion.
11629  **/
11630 static void
11631 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11632                              struct lpfc_wcqe_complete *wcqe)
11633 {
11634         struct lpfc_sli_ring *pring = cq->pring;
11635         struct lpfc_iocbq *cmdiocbq;
11636         struct lpfc_iocbq irspiocbq;
11637         unsigned long iflags;
11638
11639         /* Check for response status */
11640         if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
11641                 /* If resource errors reported from HBA, reduce queue
11642                  * depth of the SCSI device.
11643                  */
11644                 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
11645                      IOSTAT_LOCAL_REJECT)) &&
11646                     ((wcqe->parameter & IOERR_PARAM_MASK) ==
11647                      IOERR_NO_RESOURCES))
11648                         phba->lpfc_rampdown_queue_depth(phba);
11649
11650                 /* Log the error status */
11651                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11652                                 "0373 FCP complete error: status=x%x, "
11653                                 "hw_status=x%x, total_data_specified=%d, "
11654                                 "parameter=x%x, word3=x%x\n",
11655                                 bf_get(lpfc_wcqe_c_status, wcqe),
11656                                 bf_get(lpfc_wcqe_c_hw_status, wcqe),
11657                                 wcqe->total_data_placed, wcqe->parameter,
11658                                 wcqe->word3);
11659         }
11660
11661         /* Look up the FCP command IOCB and create pseudo response IOCB */
11662         spin_lock_irqsave(&pring->ring_lock, iflags);
11663         pring->stats.iocb_event++;
11664         cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
11665                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11666         spin_unlock_irqrestore(&pring->ring_lock, iflags);
11667         if (unlikely(!cmdiocbq)) {
11668                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11669                                 "0374 FCP complete with no corresponding "
11670                                 "cmdiocb: iotag (%d)\n",
11671                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11672                 return;
11673         }
11674         if (unlikely(!cmdiocbq->iocb_cmpl)) {
11675                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11676                                 "0375 FCP cmdiocb not callback function "
11677                                 "iotag: (%d)\n",
11678                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11679                 return;
11680         }
11681
11682         /* Fake the irspiocb and copy necessary response information */
11683         lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
11684
11685         if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
11686                 spin_lock_irqsave(&phba->hbalock, iflags);
11687                 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
11688                 spin_unlock_irqrestore(&phba->hbalock, iflags);
11689         }
11690
11691         /* Pass the cmd_iocb and the rsp state to the upper layer */
11692         (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
11693 }
11694
11695 /**
11696  * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
11697  * @phba: Pointer to HBA context object.
11698  * @cq: Pointer to completion queue.
11699  * @wcqe: Pointer to work-queue completion queue entry.
11700  *
11701  * This routine handles an fast-path WQ entry comsumed event by invoking the
11702  * proper WQ release routine to the slow-path WQ.
11703  **/
11704 static void
11705 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11706                              struct lpfc_wcqe_release *wcqe)
11707 {
11708         struct lpfc_queue *childwq;
11709         bool wqid_matched = false;
11710         uint16_t fcp_wqid;
11711
11712         /* Check for fast-path FCP work queue release */
11713         fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
11714         list_for_each_entry(childwq, &cq->child_list, list) {
11715                 if (childwq->queue_id == fcp_wqid) {
11716                         lpfc_sli4_wq_release(childwq,
11717                                         bf_get(lpfc_wcqe_r_wqe_index, wcqe));
11718                         wqid_matched = true;
11719                         break;
11720                 }
11721         }
11722         /* Report warning log message if no match found */
11723         if (wqid_matched != true)
11724                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11725                                 "2580 Fast-path wqe consume event carries "
11726                                 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid);
11727 }
11728
11729 /**
11730  * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry
11731  * @cq: Pointer to the completion queue.
11732  * @eqe: Pointer to fast-path completion queue entry.
11733  *
11734  * This routine process a fast-path work queue completion entry from fast-path
11735  * event queue for FCP command response completion.
11736  **/
11737 static int
11738 lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11739                          struct lpfc_cqe *cqe)
11740 {
11741         struct lpfc_wcqe_release wcqe;
11742         bool workposted = false;
11743
11744         /* Copy the work queue CQE and convert endian order if needed */
11745         lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
11746
11747         /* Check and process for different type of WCQE and dispatch */
11748         switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
11749         case CQE_CODE_COMPL_WQE:
11750                 cq->CQ_wq++;
11751                 /* Process the WQ complete event */
11752                 phba->last_completion_time = jiffies;
11753                 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
11754                                 (struct lpfc_wcqe_complete *)&wcqe);
11755                 break;
11756         case CQE_CODE_RELEASE_WQE:
11757                 cq->CQ_release_wqe++;
11758                 /* Process the WQ release event */
11759                 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
11760                                 (struct lpfc_wcqe_release *)&wcqe);
11761                 break;
11762         case CQE_CODE_XRI_ABORTED:
11763                 cq->CQ_xri_aborted++;
11764                 /* Process the WQ XRI abort event */
11765                 phba->last_completion_time = jiffies;
11766                 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
11767                                 (struct sli4_wcqe_xri_aborted *)&wcqe);
11768                 break;
11769         default:
11770                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11771                                 "0144 Not a valid WCQE code: x%x\n",
11772                                 bf_get(lpfc_wcqe_c_code, &wcqe));
11773                 break;
11774         }
11775         return workposted;
11776 }
11777
11778 /**
11779  * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
11780  * @phba: Pointer to HBA context object.
11781  * @eqe: Pointer to fast-path event queue entry.
11782  *
11783  * This routine process a event queue entry from the fast-path event queue.
11784  * It will check the MajorCode and MinorCode to determine this is for a
11785  * completion event on a completion queue, if not, an error shall be logged
11786  * and just return. Otherwise, it will get to the corresponding completion
11787  * queue and process all the entries on the completion queue, rearm the
11788  * completion queue, and then return.
11789  **/
11790 static void
11791 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
11792                         uint32_t qidx)
11793 {
11794         struct lpfc_queue *cq;
11795         struct lpfc_cqe *cqe;
11796         bool workposted = false;
11797         uint16_t cqid;
11798         int ecount = 0;
11799
11800         if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
11801                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11802                                 "0366 Not a valid completion "
11803                                 "event: majorcode=x%x, minorcode=x%x\n",
11804                                 bf_get_le32(lpfc_eqe_major_code, eqe),
11805                                 bf_get_le32(lpfc_eqe_minor_code, eqe));
11806                 return;
11807         }
11808
11809         /* Get the reference to the corresponding CQ */
11810         cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
11811
11812         /* Check if this is a Slow path event */
11813         if (unlikely(cqid != phba->sli4_hba.fcp_cq_map[qidx])) {
11814                 lpfc_sli4_sp_handle_eqe(phba, eqe,
11815                         phba->sli4_hba.hba_eq[qidx]);
11816                 return;
11817         }
11818
11819         if (unlikely(!phba->sli4_hba.fcp_cq)) {
11820                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11821                                 "3146 Fast-path completion queues "
11822                                 "does not exist\n");
11823                 return;
11824         }
11825         cq = phba->sli4_hba.fcp_cq[qidx];
11826         if (unlikely(!cq)) {
11827                 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
11828                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11829                                         "0367 Fast-path completion queue "
11830                                         "(%d) does not exist\n", qidx);
11831                 return;
11832         }
11833
11834         if (unlikely(cqid != cq->queue_id)) {
11835                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11836                                 "0368 Miss-matched fast-path completion "
11837                                 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
11838                                 cqid, cq->queue_id);
11839                 return;
11840         }
11841
11842         /* Process all the entries to the CQ */
11843         while ((cqe = lpfc_sli4_cq_get(cq))) {
11844                 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
11845                 if (!(++ecount % cq->entry_repost))
11846                         lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11847         }
11848
11849         /* Track the max number of CQEs processed in 1 EQ */
11850         if (ecount > cq->CQ_max_cqe)
11851                 cq->CQ_max_cqe = ecount;
11852
11853         /* Catch the no cq entry condition */
11854         if (unlikely(ecount == 0))
11855                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11856                                 "0369 No entry from fast-path completion "
11857                                 "queue fcpcqid=%d\n", cq->queue_id);
11858
11859         /* In any case, flash and re-arm the CQ */
11860         lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
11861
11862         /* wake up worker thread if there are works to be done */
11863         if (workposted)
11864                 lpfc_worker_wake_up(phba);
11865 }
11866
11867 static void
11868 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
11869 {
11870         struct lpfc_eqe *eqe;
11871
11872         /* walk all the EQ entries and drop on the floor */
11873         while ((eqe = lpfc_sli4_eq_get(eq)))
11874                 ;
11875
11876         /* Clear and re-arm the EQ */
11877         lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
11878 }
11879
11880 /**
11881  * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
11882  * @irq: Interrupt number.
11883  * @dev_id: The device context pointer.
11884  *
11885  * This function is directly called from the PCI layer as an interrupt
11886  * service routine when device with SLI-4 interface spec is enabled with
11887  * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
11888  * ring event in the HBA. However, when the device is enabled with either
11889  * MSI or Pin-IRQ interrupt mode, this function is called as part of the
11890  * device-level interrupt handler. When the PCI slot is in error recovery
11891  * or the HBA is undergoing initialization, the interrupt handler will not
11892  * process the interrupt. The SCSI FCP fast-path ring event are handled in
11893  * the intrrupt context. This function is called without any lock held.
11894  * It gets the hbalock to access and update SLI data structures. Note that,
11895  * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
11896  * equal to that of FCP CQ index.
11897  *
11898  * The link attention and ELS ring attention events are handled
11899  * by the worker thread. The interrupt handler signals the worker thread
11900  * and returns for these events. This function is called without any lock
11901  * held. It gets the hbalock to access and update SLI data structures.
11902  *
11903  * This function returns IRQ_HANDLED when interrupt is handled else it
11904  * returns IRQ_NONE.
11905  **/
11906 irqreturn_t
11907 lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
11908 {
11909         struct lpfc_hba *phba;
11910         struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
11911         struct lpfc_queue *fpeq;
11912         struct lpfc_eqe *eqe;
11913         unsigned long iflag;
11914         int ecount = 0;
11915         int fcp_eqidx;
11916
11917         /* Get the driver's phba structure from the dev_id */
11918         fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
11919         phba = fcp_eq_hdl->phba;
11920         fcp_eqidx = fcp_eq_hdl->idx;
11921
11922         if (unlikely(!phba))
11923                 return IRQ_NONE;
11924         if (unlikely(!phba->sli4_hba.hba_eq))
11925                 return IRQ_NONE;
11926
11927         /* Get to the EQ struct associated with this vector */
11928         fpeq = phba->sli4_hba.hba_eq[fcp_eqidx];
11929         if (unlikely(!fpeq))
11930                 return IRQ_NONE;
11931
11932         if (lpfc_fcp_look_ahead) {
11933                 if (atomic_dec_and_test(&fcp_eq_hdl->fcp_eq_in_use))
11934                         lpfc_sli4_eq_clr_intr(fpeq);
11935                 else {
11936                         atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
11937                         return IRQ_NONE;
11938                 }
11939         }
11940
11941         /* Check device state for handling interrupt */
11942         if (unlikely(lpfc_intr_state_check(phba))) {
11943                 fpeq->EQ_badstate++;
11944                 /* Check again for link_state with lock held */
11945                 spin_lock_irqsave(&phba->hbalock, iflag);
11946                 if (phba->link_state < LPFC_LINK_DOWN)
11947                         /* Flush, clear interrupt, and rearm the EQ */
11948                         lpfc_sli4_eq_flush(phba, fpeq);
11949                 spin_unlock_irqrestore(&phba->hbalock, iflag);
11950                 if (lpfc_fcp_look_ahead)
11951                         atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
11952                 return IRQ_NONE;
11953         }
11954
11955         /*
11956          * Process all the event on FCP fast-path EQ
11957          */
11958         while ((eqe = lpfc_sli4_eq_get(fpeq))) {
11959                 lpfc_sli4_hba_handle_eqe(phba, eqe, fcp_eqidx);
11960                 if (!(++ecount % fpeq->entry_repost))
11961                         lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
11962                 fpeq->EQ_processed++;
11963         }
11964
11965         /* Track the max number of EQEs processed in 1 intr */
11966         if (ecount > fpeq->EQ_max_eqe)
11967                 fpeq->EQ_max_eqe = ecount;
11968
11969         /* Always clear and re-arm the fast-path EQ */
11970         lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
11971
11972         if (unlikely(ecount == 0)) {
11973                 fpeq->EQ_no_entry++;
11974
11975                 if (lpfc_fcp_look_ahead) {
11976                         atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
11977                         return IRQ_NONE;
11978                 }
11979
11980                 if (phba->intr_type == MSIX)
11981                         /* MSI-X treated interrupt served as no EQ share INT */
11982                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11983                                         "0358 MSI-X interrupt with no EQE\n");
11984                 else
11985                         /* Non MSI-X treated on interrupt as EQ share INT */
11986                         return IRQ_NONE;
11987         }
11988
11989         if (lpfc_fcp_look_ahead)
11990                 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
11991         return IRQ_HANDLED;
11992 } /* lpfc_sli4_fp_intr_handler */
11993
11994 /**
11995  * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
11996  * @irq: Interrupt number.
11997  * @dev_id: The device context pointer.
11998  *
11999  * This function is the device-level interrupt handler to device with SLI-4
12000  * interface spec, called from the PCI layer when either MSI or Pin-IRQ
12001  * interrupt mode is enabled and there is an event in the HBA which requires
12002  * driver attention. This function invokes the slow-path interrupt attention
12003  * handling function and fast-path interrupt attention handling function in
12004  * turn to process the relevant HBA attention events. This function is called
12005  * without any lock held. It gets the hbalock to access and update SLI data
12006  * structures.
12007  *
12008  * This function returns IRQ_HANDLED when interrupt is handled, else it
12009  * returns IRQ_NONE.
12010  **/
12011 irqreturn_t
12012 lpfc_sli4_intr_handler(int irq, void *dev_id)
12013 {
12014         struct lpfc_hba  *phba;
12015         irqreturn_t hba_irq_rc;
12016         bool hba_handled = false;
12017         int fcp_eqidx;
12018
12019         /* Get the driver's phba structure from the dev_id */
12020         phba = (struct lpfc_hba *)dev_id;
12021
12022         if (unlikely(!phba))
12023                 return IRQ_NONE;
12024
12025         /*
12026          * Invoke fast-path host attention interrupt handling as appropriate.
12027          */
12028         for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
12029                 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
12030                                         &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
12031                 if (hba_irq_rc == IRQ_HANDLED)
12032                         hba_handled |= true;
12033         }
12034
12035         return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
12036 } /* lpfc_sli4_intr_handler */
12037
12038 /**
12039  * lpfc_sli4_queue_free - free a queue structure and associated memory
12040  * @queue: The queue structure to free.
12041  *
12042  * This function frees a queue structure and the DMAable memory used for
12043  * the host resident queue. This function must be called after destroying the
12044  * queue on the HBA.
12045  **/
12046 void
12047 lpfc_sli4_queue_free(struct lpfc_queue *queue)
12048 {
12049         struct lpfc_dmabuf *dmabuf;
12050
12051         if (!queue)
12052                 return;
12053
12054         while (!list_empty(&queue->page_list)) {
12055                 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
12056                                  list);
12057                 dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
12058                                   dmabuf->virt, dmabuf->phys);
12059                 kfree(dmabuf);
12060         }
12061         kfree(queue);
12062         return;
12063 }
12064
12065 /**
12066  * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
12067  * @phba: The HBA that this queue is being created on.
12068  * @entry_size: The size of each queue entry for this queue.
12069  * @entry count: The number of entries that this queue will handle.
12070  *
12071  * This function allocates a queue structure and the DMAable memory used for
12072  * the host resident queue. This function must be called before creating the
12073  * queue on the HBA.
12074  **/
12075 struct lpfc_queue *
12076 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
12077                       uint32_t entry_count)
12078 {
12079         struct lpfc_queue *queue;
12080         struct lpfc_dmabuf *dmabuf;
12081         int x, total_qe_count;
12082         void *dma_pointer;
12083         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12084
12085         if (!phba->sli4_hba.pc_sli4_params.supported)
12086                 hw_page_size = SLI4_PAGE_SIZE;
12087
12088         queue = kzalloc(sizeof(struct lpfc_queue) +
12089                         (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
12090         if (!queue)
12091                 return NULL;
12092         queue->page_count = (ALIGN(entry_size * entry_count,
12093                         hw_page_size))/hw_page_size;
12094         INIT_LIST_HEAD(&queue->list);
12095         INIT_LIST_HEAD(&queue->page_list);
12096         INIT_LIST_HEAD(&queue->child_list);
12097         for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
12098                 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
12099                 if (!dmabuf)
12100                         goto out_fail;
12101                 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
12102                                                   hw_page_size, &dmabuf->phys,
12103                                                   GFP_KERNEL);
12104                 if (!dmabuf->virt) {
12105                         kfree(dmabuf);
12106                         goto out_fail;
12107                 }
12108                 memset(dmabuf->virt, 0, hw_page_size);
12109                 dmabuf->buffer_tag = x;
12110                 list_add_tail(&dmabuf->list, &queue->page_list);
12111                 /* initialize queue's entry array */
12112                 dma_pointer = dmabuf->virt;
12113                 for (; total_qe_count < entry_count &&
12114                      dma_pointer < (hw_page_size + dmabuf->virt);
12115                      total_qe_count++, dma_pointer += entry_size) {
12116                         queue->qe[total_qe_count].address = dma_pointer;
12117                 }
12118         }
12119         queue->entry_size = entry_size;
12120         queue->entry_count = entry_count;
12121
12122         /*
12123          * entry_repost is calculated based on the number of entries in the
12124          * queue. This works out except for RQs. If buffers are NOT initially
12125          * posted for every RQE, entry_repost should be adjusted accordingly.
12126          */
12127         queue->entry_repost = (entry_count >> 3);
12128         if (queue->entry_repost < LPFC_QUEUE_MIN_REPOST)
12129                 queue->entry_repost = LPFC_QUEUE_MIN_REPOST;
12130         queue->phba = phba;
12131
12132         return queue;
12133 out_fail:
12134         lpfc_sli4_queue_free(queue);
12135         return NULL;
12136 }
12137
12138 /**
12139  * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
12140  * @phba: HBA structure that indicates port to create a queue on.
12141  * @pci_barset: PCI BAR set flag.
12142  *
12143  * This function shall perform iomap of the specified PCI BAR address to host
12144  * memory address if not already done so and return it. The returned host
12145  * memory address can be NULL.
12146  */
12147 static void __iomem *
12148 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
12149 {
12150         struct pci_dev *pdev;
12151         unsigned long bar_map, bar_map_len;
12152
12153         if (!phba->pcidev)
12154                 return NULL;
12155         else
12156                 pdev = phba->pcidev;
12157
12158         switch (pci_barset) {
12159         case WQ_PCI_BAR_0_AND_1:
12160                 if (!phba->pci_bar0_memmap_p) {
12161                         bar_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
12162                         bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
12163                         phba->pci_bar0_memmap_p = ioremap(bar_map, bar_map_len);
12164                 }
12165                 return phba->pci_bar0_memmap_p;
12166         case WQ_PCI_BAR_2_AND_3:
12167                 if (!phba->pci_bar2_memmap_p) {
12168                         bar_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
12169                         bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
12170                         phba->pci_bar2_memmap_p = ioremap(bar_map, bar_map_len);
12171                 }
12172                 return phba->pci_bar2_memmap_p;
12173         case WQ_PCI_BAR_4_AND_5:
12174                 if (!phba->pci_bar4_memmap_p) {
12175                         bar_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
12176                         bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
12177                         phba->pci_bar4_memmap_p = ioremap(bar_map, bar_map_len);
12178                 }
12179                 return phba->pci_bar4_memmap_p;
12180         default:
12181                 break;
12182         }
12183         return NULL;
12184 }
12185
12186 /**
12187  * lpfc_modify_fcp_eq_delay - Modify Delay Multiplier on FCP EQs
12188  * @phba: HBA structure that indicates port to create a queue on.
12189  * @startq: The starting FCP EQ to modify
12190  *
12191  * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
12192  *
12193  * The @phba struct is used to send mailbox command to HBA. The @startq
12194  * is used to get the starting FCP EQ to change.
12195  * This function is asynchronous and will wait for the mailbox
12196  * command to finish before continuing.
12197  *
12198  * On success this function will return a zero. If unable to allocate enough
12199  * memory this function will return -ENOMEM. If the queue create mailbox command
12200  * fails this function will return -ENXIO.
12201  **/
12202 uint32_t
12203 lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
12204 {
12205         struct lpfc_mbx_modify_eq_delay *eq_delay;
12206         LPFC_MBOXQ_t *mbox;
12207         struct lpfc_queue *eq;
12208         int cnt, rc, length, status = 0;
12209         uint32_t shdr_status, shdr_add_status;
12210         uint32_t result;
12211         int fcp_eqidx;
12212         union lpfc_sli4_cfg_shdr *shdr;
12213         uint16_t dmult;
12214
12215         if (startq >= phba->cfg_fcp_io_channel)
12216                 return 0;
12217
12218         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12219         if (!mbox)
12220                 return -ENOMEM;
12221         length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
12222                   sizeof(struct lpfc_sli4_cfg_mhdr));
12223         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12224                          LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
12225                          length, LPFC_SLI4_MBX_EMBED);
12226         eq_delay = &mbox->u.mqe.un.eq_delay;
12227
12228         /* Calculate delay multiper from maximum interrupt per second */
12229         result = phba->cfg_fcp_imax / phba->cfg_fcp_io_channel;
12230         if (result > LPFC_DMULT_CONST)
12231                 dmult = 0;
12232         else
12233                 dmult = LPFC_DMULT_CONST/result - 1;
12234
12235         cnt = 0;
12236         for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_io_channel;
12237             fcp_eqidx++) {
12238                 eq = phba->sli4_hba.hba_eq[fcp_eqidx];
12239                 if (!eq)
12240                         continue;
12241                 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
12242                 eq_delay->u.request.eq[cnt].phase = 0;
12243                 eq_delay->u.request.eq[cnt].delay_multi = dmult;
12244                 cnt++;
12245                 if (cnt >= LPFC_MAX_EQ_DELAY)
12246                         break;
12247         }
12248         eq_delay->u.request.num_eq = cnt;
12249
12250         mbox->vport = phba->pport;
12251         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12252         mbox->context1 = NULL;
12253         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12254         shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
12255         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12256         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12257         if (shdr_status || shdr_add_status || rc) {
12258                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12259                                 "2512 MODIFY_EQ_DELAY mailbox failed with "
12260                                 "status x%x add_status x%x, mbx status x%x\n",
12261                                 shdr_status, shdr_add_status, rc);
12262                 status = -ENXIO;
12263         }
12264         mempool_free(mbox, phba->mbox_mem_pool);
12265         return status;
12266 }
12267
12268 /**
12269  * lpfc_eq_create - Create an Event Queue on the HBA
12270  * @phba: HBA structure that indicates port to create a queue on.
12271  * @eq: The queue structure to use to create the event queue.
12272  * @imax: The maximum interrupt per second limit.
12273  *
12274  * This function creates an event queue, as detailed in @eq, on a port,
12275  * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
12276  *
12277  * The @phba struct is used to send mailbox command to HBA. The @eq struct
12278  * is used to get the entry count and entry size that are necessary to
12279  * determine the number of pages to allocate and use for this queue. This
12280  * function will send the EQ_CREATE mailbox command to the HBA to setup the
12281  * event queue. This function is asynchronous and will wait for the mailbox
12282  * command to finish before continuing.
12283  *
12284  * On success this function will return a zero. If unable to allocate enough
12285  * memory this function will return -ENOMEM. If the queue create mailbox command
12286  * fails this function will return -ENXIO.
12287  **/
12288 uint32_t
12289 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
12290 {
12291         struct lpfc_mbx_eq_create *eq_create;
12292         LPFC_MBOXQ_t *mbox;
12293         int rc, length, status = 0;
12294         struct lpfc_dmabuf *dmabuf;
12295         uint32_t shdr_status, shdr_add_status;
12296         union lpfc_sli4_cfg_shdr *shdr;
12297         uint16_t dmult;
12298         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12299
12300         /* sanity check on queue memory */
12301         if (!eq)
12302                 return -ENODEV;
12303         if (!phba->sli4_hba.pc_sli4_params.supported)
12304                 hw_page_size = SLI4_PAGE_SIZE;
12305
12306         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12307         if (!mbox)
12308                 return -ENOMEM;
12309         length = (sizeof(struct lpfc_mbx_eq_create) -
12310                   sizeof(struct lpfc_sli4_cfg_mhdr));
12311         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12312                          LPFC_MBOX_OPCODE_EQ_CREATE,
12313                          length, LPFC_SLI4_MBX_EMBED);
12314         eq_create = &mbox->u.mqe.un.eq_create;
12315         bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
12316                eq->page_count);
12317         bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
12318                LPFC_EQE_SIZE);
12319         bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
12320         /* Calculate delay multiper from maximum interrupt per second */
12321         if (imax > LPFC_DMULT_CONST)
12322                 dmult = 0;
12323         else
12324                 dmult = LPFC_DMULT_CONST/imax - 1;
12325         bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
12326                dmult);
12327         switch (eq->entry_count) {
12328         default:
12329                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12330                                 "0360 Unsupported EQ count. (%d)\n",
12331                                 eq->entry_count);
12332                 if (eq->entry_count < 256)
12333                         return -EINVAL;
12334                 /* otherwise default to smallest count (drop through) */
12335         case 256:
12336                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
12337                        LPFC_EQ_CNT_256);
12338                 break;
12339         case 512:
12340                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
12341                        LPFC_EQ_CNT_512);
12342                 break;
12343         case 1024:
12344                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
12345                        LPFC_EQ_CNT_1024);
12346                 break;
12347         case 2048:
12348                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
12349                        LPFC_EQ_CNT_2048);
12350                 break;
12351         case 4096:
12352                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
12353                        LPFC_EQ_CNT_4096);
12354                 break;
12355         }
12356         list_for_each_entry(dmabuf, &eq->page_list, list) {
12357                 memset(dmabuf->virt, 0, hw_page_size);
12358                 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
12359                                         putPaddrLow(dmabuf->phys);
12360                 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
12361                                         putPaddrHigh(dmabuf->phys);
12362         }
12363         mbox->vport = phba->pport;
12364         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12365         mbox->context1 = NULL;
12366         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12367         shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
12368         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12369         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12370         if (shdr_status || shdr_add_status || rc) {
12371                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12372                                 "2500 EQ_CREATE mailbox failed with "
12373                                 "status x%x add_status x%x, mbx status x%x\n",
12374                                 shdr_status, shdr_add_status, rc);
12375                 status = -ENXIO;
12376         }
12377         eq->type = LPFC_EQ;
12378         eq->subtype = LPFC_NONE;
12379         eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
12380         if (eq->queue_id == 0xFFFF)
12381                 status = -ENXIO;
12382         eq->host_index = 0;
12383         eq->hba_index = 0;
12384
12385         mempool_free(mbox, phba->mbox_mem_pool);
12386         return status;
12387 }
12388
12389 /**
12390  * lpfc_cq_create - Create a Completion Queue on the HBA
12391  * @phba: HBA structure that indicates port to create a queue on.
12392  * @cq: The queue structure to use to create the completion queue.
12393  * @eq: The event queue to bind this completion queue to.
12394  *
12395  * This function creates a completion queue, as detailed in @wq, on a port,
12396  * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
12397  *
12398  * The @phba struct is used to send mailbox command to HBA. The @cq struct
12399  * is used to get the entry count and entry size that are necessary to
12400  * determine the number of pages to allocate and use for this queue. The @eq
12401  * is used to indicate which event queue to bind this completion queue to. This
12402  * function will send the CQ_CREATE mailbox command to the HBA to setup the
12403  * completion queue. This function is asynchronous and will wait for the mailbox
12404  * command to finish before continuing.
12405  *
12406  * On success this function will return a zero. If unable to allocate enough
12407  * memory this function will return -ENOMEM. If the queue create mailbox command
12408  * fails this function will return -ENXIO.
12409  **/
12410 uint32_t
12411 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
12412                struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
12413 {
12414         struct lpfc_mbx_cq_create *cq_create;
12415         struct lpfc_dmabuf *dmabuf;
12416         LPFC_MBOXQ_t *mbox;
12417         int rc, length, status = 0;
12418         uint32_t shdr_status, shdr_add_status;
12419         union lpfc_sli4_cfg_shdr *shdr;
12420         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12421
12422         /* sanity check on queue memory */
12423         if (!cq || !eq)
12424                 return -ENODEV;
12425         if (!phba->sli4_hba.pc_sli4_params.supported)
12426                 hw_page_size = SLI4_PAGE_SIZE;
12427
12428         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12429         if (!mbox)
12430                 return -ENOMEM;
12431         length = (sizeof(struct lpfc_mbx_cq_create) -
12432                   sizeof(struct lpfc_sli4_cfg_mhdr));
12433         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12434                          LPFC_MBOX_OPCODE_CQ_CREATE,
12435                          length, LPFC_SLI4_MBX_EMBED);
12436         cq_create = &mbox->u.mqe.un.cq_create;
12437         shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
12438         bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
12439                     cq->page_count);
12440         bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
12441         bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
12442         bf_set(lpfc_mbox_hdr_version, &shdr->request,
12443                phba->sli4_hba.pc_sli4_params.cqv);
12444         if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
12445                 /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */
12446                 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1);
12447                 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
12448                        eq->queue_id);
12449         } else {
12450                 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
12451                        eq->queue_id);
12452         }
12453         switch (cq->entry_count) {
12454         default:
12455                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12456                                 "0361 Unsupported CQ count. (%d)\n",
12457                                 cq->entry_count);
12458                 if (cq->entry_count < 256) {
12459                         status = -EINVAL;
12460                         goto out;
12461                 }
12462                 /* otherwise default to smallest count (drop through) */
12463         case 256:
12464                 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
12465                        LPFC_CQ_CNT_256);
12466                 break;
12467         case 512:
12468                 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
12469                        LPFC_CQ_CNT_512);
12470                 break;
12471         case 1024:
12472                 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
12473                        LPFC_CQ_CNT_1024);
12474                 break;
12475         }
12476         list_for_each_entry(dmabuf, &cq->page_list, list) {
12477                 memset(dmabuf->virt, 0, hw_page_size);
12478                 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
12479                                         putPaddrLow(dmabuf->phys);
12480                 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
12481                                         putPaddrHigh(dmabuf->phys);
12482         }
12483         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12484
12485         /* The IOCTL status is embedded in the mailbox subheader. */
12486         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12487         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12488         if (shdr_status || shdr_add_status || rc) {
12489                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12490                                 "2501 CQ_CREATE mailbox failed with "
12491                                 "status x%x add_status x%x, mbx status x%x\n",
12492                                 shdr_status, shdr_add_status, rc);
12493                 status = -ENXIO;
12494                 goto out;
12495         }
12496         cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
12497         if (cq->queue_id == 0xFFFF) {
12498                 status = -ENXIO;
12499                 goto out;
12500         }
12501         /* link the cq onto the parent eq child list */
12502         list_add_tail(&cq->list, &eq->child_list);
12503         /* Set up completion queue's type and subtype */
12504         cq->type = type;
12505         cq->subtype = subtype;
12506         cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
12507         cq->assoc_qid = eq->queue_id;
12508         cq->host_index = 0;
12509         cq->hba_index = 0;
12510
12511 out:
12512         mempool_free(mbox, phba->mbox_mem_pool);
12513         return status;
12514 }
12515
12516 /**
12517  * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
12518  * @phba: HBA structure that indicates port to create a queue on.
12519  * @mq: The queue structure to use to create the mailbox queue.
12520  * @mbox: An allocated pointer to type LPFC_MBOXQ_t
12521  * @cq: The completion queue to associate with this cq.
12522  *
12523  * This function provides failback (fb) functionality when the
12524  * mq_create_ext fails on older FW generations.  It's purpose is identical
12525  * to mq_create_ext otherwise.
12526  *
12527  * This routine cannot fail as all attributes were previously accessed and
12528  * initialized in mq_create_ext.
12529  **/
12530 static void
12531 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
12532                        LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
12533 {
12534         struct lpfc_mbx_mq_create *mq_create;
12535         struct lpfc_dmabuf *dmabuf;
12536         int length;
12537
12538         length = (sizeof(struct lpfc_mbx_mq_create) -
12539                   sizeof(struct lpfc_sli4_cfg_mhdr));
12540         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12541                          LPFC_MBOX_OPCODE_MQ_CREATE,
12542                          length, LPFC_SLI4_MBX_EMBED);
12543         mq_create = &mbox->u.mqe.un.mq_create;
12544         bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
12545                mq->page_count);
12546         bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
12547                cq->queue_id);
12548         bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
12549         switch (mq->entry_count) {
12550         case 16:
12551                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
12552                        LPFC_MQ_RING_SIZE_16);
12553                 break;
12554         case 32:
12555                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
12556                        LPFC_MQ_RING_SIZE_32);
12557                 break;
12558         case 64:
12559                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
12560                        LPFC_MQ_RING_SIZE_64);
12561                 break;
12562         case 128:
12563                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
12564                        LPFC_MQ_RING_SIZE_128);
12565                 break;
12566         }
12567         list_for_each_entry(dmabuf, &mq->page_list, list) {
12568                 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
12569                         putPaddrLow(dmabuf->phys);
12570                 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
12571                         putPaddrHigh(dmabuf->phys);
12572         }
12573 }
12574
12575 /**
12576  * lpfc_mq_create - Create a mailbox Queue on the HBA
12577  * @phba: HBA structure that indicates port to create a queue on.
12578  * @mq: The queue structure to use to create the mailbox queue.
12579  * @cq: The completion queue to associate with this cq.
12580  * @subtype: The queue's subtype.
12581  *
12582  * This function creates a mailbox queue, as detailed in @mq, on a port,
12583  * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
12584  *
12585  * The @phba struct is used to send mailbox command to HBA. The @cq struct
12586  * is used to get the entry count and entry size that are necessary to
12587  * determine the number of pages to allocate and use for this queue. This
12588  * function will send the MQ_CREATE mailbox command to the HBA to setup the
12589  * mailbox queue. This function is asynchronous and will wait for the mailbox
12590  * command to finish before continuing.
12591  *
12592  * On success this function will return a zero. If unable to allocate enough
12593  * memory this function will return -ENOMEM. If the queue create mailbox command
12594  * fails this function will return -ENXIO.
12595  **/
12596 int32_t
12597 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
12598                struct lpfc_queue *cq, uint32_t subtype)
12599 {
12600         struct lpfc_mbx_mq_create *mq_create;
12601         struct lpfc_mbx_mq_create_ext *mq_create_ext;
12602         struct lpfc_dmabuf *dmabuf;
12603         LPFC_MBOXQ_t *mbox;
12604         int rc, length, status = 0;
12605         uint32_t shdr_status, shdr_add_status;
12606         union lpfc_sli4_cfg_shdr *shdr;
12607         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12608
12609         /* sanity check on queue memory */
12610         if (!mq || !cq)
12611                 return -ENODEV;
12612         if (!phba->sli4_hba.pc_sli4_params.supported)
12613                 hw_page_size = SLI4_PAGE_SIZE;
12614
12615         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12616         if (!mbox)
12617                 return -ENOMEM;
12618         length = (sizeof(struct lpfc_mbx_mq_create_ext) -
12619                   sizeof(struct lpfc_sli4_cfg_mhdr));
12620         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12621                          LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
12622                          length, LPFC_SLI4_MBX_EMBED);
12623
12624         mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
12625         shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
12626         bf_set(lpfc_mbx_mq_create_ext_num_pages,
12627                &mq_create_ext->u.request, mq->page_count);
12628         bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
12629                &mq_create_ext->u.request, 1);
12630         bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
12631                &mq_create_ext->u.request, 1);
12632         bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
12633                &mq_create_ext->u.request, 1);
12634         bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
12635                &mq_create_ext->u.request, 1);
12636         bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
12637                &mq_create_ext->u.request, 1);
12638         bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
12639         bf_set(lpfc_mbox_hdr_version, &shdr->request,
12640                phba->sli4_hba.pc_sli4_params.mqv);
12641         if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
12642                 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
12643                        cq->queue_id);
12644         else
12645                 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
12646                        cq->queue_id);
12647         switch (mq->entry_count) {
12648         default:
12649                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12650                                 "0362 Unsupported MQ count. (%d)\n",
12651                                 mq->entry_count);
12652                 if (mq->entry_count < 16) {
12653                         status = -EINVAL;
12654                         goto out;
12655                 }
12656                 /* otherwise default to smallest count (drop through) */
12657         case 16:
12658                 bf_set(lpfc_mq_context_ring_size,
12659                        &mq_create_ext->u.request.context,
12660                        LPFC_MQ_RING_SIZE_16);
12661                 break;
12662         case 32:
12663                 bf_set(lpfc_mq_context_ring_size,
12664                        &mq_create_ext->u.request.context,
12665                        LPFC_MQ_RING_SIZE_32);
12666                 break;
12667         case 64:
12668                 bf_set(lpfc_mq_context_ring_size,
12669                        &mq_create_ext->u.request.context,
12670                        LPFC_MQ_RING_SIZE_64);
12671                 break;
12672         case 128:
12673                 bf_set(lpfc_mq_context_ring_size,
12674                        &mq_create_ext->u.request.context,
12675                        LPFC_MQ_RING_SIZE_128);
12676                 break;
12677         }
12678         list_for_each_entry(dmabuf, &mq->page_list, list) {
12679                 memset(dmabuf->virt, 0, hw_page_size);
12680                 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
12681                                         putPaddrLow(dmabuf->phys);
12682                 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
12683                                         putPaddrHigh(dmabuf->phys);
12684         }
12685         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12686         mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
12687                               &mq_create_ext->u.response);
12688         if (rc != MBX_SUCCESS) {
12689                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12690                                 "2795 MQ_CREATE_EXT failed with "
12691                                 "status x%x. Failback to MQ_CREATE.\n",
12692                                 rc);
12693                 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
12694                 mq_create = &mbox->u.mqe.un.mq_create;
12695                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12696                 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
12697                 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
12698                                       &mq_create->u.response);
12699         }
12700
12701         /* The IOCTL status is embedded in the mailbox subheader. */
12702         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12703         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12704         if (shdr_status || shdr_add_status || rc) {
12705                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12706                                 "2502 MQ_CREATE mailbox failed with "
12707                                 "status x%x add_status x%x, mbx status x%x\n",
12708                                 shdr_status, shdr_add_status, rc);
12709                 status = -ENXIO;
12710                 goto out;
12711         }
12712         if (mq->queue_id == 0xFFFF) {
12713                 status = -ENXIO;
12714                 goto out;
12715         }
12716         mq->type = LPFC_MQ;
12717         mq->assoc_qid = cq->queue_id;
12718         mq->subtype = subtype;
12719         mq->host_index = 0;
12720         mq->hba_index = 0;
12721
12722         /* link the mq onto the parent cq child list */
12723         list_add_tail(&mq->list, &cq->child_list);
12724 out:
12725         mempool_free(mbox, phba->mbox_mem_pool);
12726         return status;
12727 }
12728
12729 /**
12730  * lpfc_wq_create - Create a Work Queue on the HBA
12731  * @phba: HBA structure that indicates port to create a queue on.
12732  * @wq: The queue structure to use to create the work queue.
12733  * @cq: The completion queue to bind this work queue to.
12734  * @subtype: The subtype of the work queue indicating its functionality.
12735  *
12736  * This function creates a work queue, as detailed in @wq, on a port, described
12737  * by @phba by sending a WQ_CREATE mailbox command to the HBA.
12738  *
12739  * The @phba struct is used to send mailbox command to HBA. The @wq struct
12740  * is used to get the entry count and entry size that are necessary to
12741  * determine the number of pages to allocate and use for this queue. The @cq
12742  * is used to indicate which completion queue to bind this work queue to. This
12743  * function will send the WQ_CREATE mailbox command to the HBA to setup the
12744  * work queue. This function is asynchronous and will wait for the mailbox
12745  * command to finish before continuing.
12746  *
12747  * On success this function will return a zero. If unable to allocate enough
12748  * memory this function will return -ENOMEM. If the queue create mailbox command
12749  * fails this function will return -ENXIO.
12750  **/
12751 uint32_t
12752 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
12753                struct lpfc_queue *cq, uint32_t subtype)
12754 {
12755         struct lpfc_mbx_wq_create *wq_create;
12756         struct lpfc_dmabuf *dmabuf;
12757         LPFC_MBOXQ_t *mbox;
12758         int rc, length, status = 0;
12759         uint32_t shdr_status, shdr_add_status;
12760         union lpfc_sli4_cfg_shdr *shdr;
12761         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12762         struct dma_address *page;
12763         void __iomem *bar_memmap_p;
12764         uint32_t db_offset;
12765         uint16_t pci_barset;
12766
12767         /* sanity check on queue memory */
12768         if (!wq || !cq)
12769                 return -ENODEV;
12770         if (!phba->sli4_hba.pc_sli4_params.supported)
12771                 hw_page_size = SLI4_PAGE_SIZE;
12772
12773         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12774         if (!mbox)
12775                 return -ENOMEM;
12776         length = (sizeof(struct lpfc_mbx_wq_create) -
12777                   sizeof(struct lpfc_sli4_cfg_mhdr));
12778         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
12779                          LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
12780                          length, LPFC_SLI4_MBX_EMBED);
12781         wq_create = &mbox->u.mqe.un.wq_create;
12782         shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
12783         bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
12784                     wq->page_count);
12785         bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
12786                     cq->queue_id);
12787         bf_set(lpfc_mbox_hdr_version, &shdr->request,
12788                phba->sli4_hba.pc_sli4_params.wqv);
12789
12790         if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) {
12791                 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
12792                        wq->entry_count);
12793                 switch (wq->entry_size) {
12794                 default:
12795                 case 64:
12796                         bf_set(lpfc_mbx_wq_create_wqe_size,
12797                                &wq_create->u.request_1,
12798                                LPFC_WQ_WQE_SIZE_64);
12799                         break;
12800                 case 128:
12801                         bf_set(lpfc_mbx_wq_create_wqe_size,
12802                                &wq_create->u.request_1,
12803                                LPFC_WQ_WQE_SIZE_128);
12804                         break;
12805                 }
12806                 bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1,
12807                        (PAGE_SIZE/SLI4_PAGE_SIZE));
12808                 page = wq_create->u.request_1.page;
12809         } else {
12810                 page = wq_create->u.request.page;
12811         }
12812         list_for_each_entry(dmabuf, &wq->page_list, list) {
12813                 memset(dmabuf->virt, 0, hw_page_size);
12814                 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
12815                 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
12816         }
12817
12818         if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
12819                 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
12820
12821         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12822         /* The IOCTL status is embedded in the mailbox subheader. */
12823         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12824         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12825         if (shdr_status || shdr_add_status || rc) {
12826                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12827                                 "2503 WQ_CREATE mailbox failed with "
12828                                 "status x%x add_status x%x, mbx status x%x\n",
12829                                 shdr_status, shdr_add_status, rc);
12830                 status = -ENXIO;
12831                 goto out;
12832         }
12833         wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
12834         if (wq->queue_id == 0xFFFF) {
12835                 status = -ENXIO;
12836                 goto out;
12837         }
12838         if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
12839                 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
12840                                        &wq_create->u.response);
12841                 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
12842                     (wq->db_format != LPFC_DB_RING_FORMAT)) {
12843                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12844                                         "3265 WQ[%d] doorbell format not "
12845                                         "supported: x%x\n", wq->queue_id,
12846                                         wq->db_format);
12847                         status = -EINVAL;
12848                         goto out;
12849                 }
12850                 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
12851                                     &wq_create->u.response);
12852                 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
12853                 if (!bar_memmap_p) {
12854                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12855                                         "3263 WQ[%d] failed to memmap pci "
12856                                         "barset:x%x\n", wq->queue_id,
12857                                         pci_barset);
12858                         status = -ENOMEM;
12859                         goto out;
12860                 }
12861                 db_offset = wq_create->u.response.doorbell_offset;
12862                 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
12863                     (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
12864                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12865                                         "3252 WQ[%d] doorbell offset not "
12866                                         "supported: x%x\n", wq->queue_id,
12867                                         db_offset);
12868                         status = -EINVAL;
12869                         goto out;
12870                 }
12871                 wq->db_regaddr = bar_memmap_p + db_offset;
12872                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12873                                 "3264 WQ[%d]: barset:x%x, offset:x%x\n",
12874                                 wq->queue_id, pci_barset, db_offset);
12875         } else {
12876                 wq->db_format = LPFC_DB_LIST_FORMAT;
12877                 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
12878         }
12879         wq->type = LPFC_WQ;
12880         wq->assoc_qid = cq->queue_id;
12881         wq->subtype = subtype;
12882         wq->host_index = 0;
12883         wq->hba_index = 0;
12884         wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
12885
12886         /* link the wq onto the parent cq child list */
12887         list_add_tail(&wq->list, &cq->child_list);
12888 out:
12889         mempool_free(mbox, phba->mbox_mem_pool);
12890         return status;
12891 }
12892
12893 /**
12894  * lpfc_rq_adjust_repost - Adjust entry_repost for an RQ
12895  * @phba: HBA structure that indicates port to create a queue on.
12896  * @rq:   The queue structure to use for the receive queue.
12897  * @qno:  The associated HBQ number
12898  *
12899  *
12900  * For SLI4 we need to adjust the RQ repost value based on
12901  * the number of buffers that are initially posted to the RQ.
12902  */
12903 void
12904 lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno)
12905 {
12906         uint32_t cnt;
12907
12908         /* sanity check on queue memory */
12909         if (!rq)
12910                 return;
12911         cnt = lpfc_hbq_defs[qno]->entry_count;
12912
12913         /* Recalc repost for RQs based on buffers initially posted */
12914         cnt = (cnt >> 3);
12915         if (cnt < LPFC_QUEUE_MIN_REPOST)
12916                 cnt = LPFC_QUEUE_MIN_REPOST;
12917
12918         rq->entry_repost = cnt;
12919 }
12920
12921 /**
12922  * lpfc_rq_create - Create a Receive Queue on the HBA
12923  * @phba: HBA structure that indicates port to create a queue on.
12924  * @hrq: The queue structure to use to create the header receive queue.
12925  * @drq: The queue structure to use to create the data receive queue.
12926  * @cq: The completion queue to bind this work queue to.
12927  *
12928  * This function creates a receive buffer queue pair , as detailed in @hrq and
12929  * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
12930  * to the HBA.
12931  *
12932  * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
12933  * struct is used to get the entry count that is necessary to determine the
12934  * number of pages to use for this queue. The @cq is used to indicate which
12935  * completion queue to bind received buffers that are posted to these queues to.
12936  * This function will send the RQ_CREATE mailbox command to the HBA to setup the
12937  * receive queue pair. This function is asynchronous and will wait for the
12938  * mailbox command to finish before continuing.
12939  *
12940  * On success this function will return a zero. If unable to allocate enough
12941  * memory this function will return -ENOMEM. If the queue create mailbox command
12942  * fails this function will return -ENXIO.
12943  **/
12944 uint32_t
12945 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
12946                struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
12947 {
12948         struct lpfc_mbx_rq_create *rq_create;
12949         struct lpfc_dmabuf *dmabuf;
12950         LPFC_MBOXQ_t *mbox;
12951         int rc, length, status = 0;
12952         uint32_t shdr_status, shdr_add_status;
12953         union lpfc_sli4_cfg_shdr *shdr;
12954         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12955         void __iomem *bar_memmap_p;
12956         uint32_t db_offset;
12957         uint16_t pci_barset;
12958
12959         /* sanity check on queue memory */
12960         if (!hrq || !drq || !cq)
12961                 return -ENODEV;
12962         if (!phba->sli4_hba.pc_sli4_params.supported)
12963                 hw_page_size = SLI4_PAGE_SIZE;
12964
12965         if (hrq->entry_count != drq->entry_count)
12966                 return -EINVAL;
12967         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12968         if (!mbox)
12969                 return -ENOMEM;
12970         length = (sizeof(struct lpfc_mbx_rq_create) -
12971                   sizeof(struct lpfc_sli4_cfg_mhdr));
12972         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
12973                          LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
12974                          length, LPFC_SLI4_MBX_EMBED);
12975         rq_create = &mbox->u.mqe.un.rq_create;
12976         shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
12977         bf_set(lpfc_mbox_hdr_version, &shdr->request,
12978                phba->sli4_hba.pc_sli4_params.rqv);
12979         if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
12980                 bf_set(lpfc_rq_context_rqe_count_1,
12981                        &rq_create->u.request.context,
12982                        hrq->entry_count);
12983                 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
12984                 bf_set(lpfc_rq_context_rqe_size,
12985                        &rq_create->u.request.context,
12986                        LPFC_RQE_SIZE_8);
12987                 bf_set(lpfc_rq_context_page_size,
12988                        &rq_create->u.request.context,
12989                        (PAGE_SIZE/SLI4_PAGE_SIZE));
12990         } else {
12991                 switch (hrq->entry_count) {
12992                 default:
12993                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12994                                         "2535 Unsupported RQ count. (%d)\n",
12995                                         hrq->entry_count);
12996                         if (hrq->entry_count < 512) {
12997                                 status = -EINVAL;
12998                                 goto out;
12999                         }
13000                         /* otherwise default to smallest count (drop through) */
13001                 case 512:
13002                         bf_set(lpfc_rq_context_rqe_count,
13003                                &rq_create->u.request.context,
13004                                LPFC_RQ_RING_SIZE_512);
13005                         break;
13006                 case 1024:
13007                         bf_set(lpfc_rq_context_rqe_count,
13008                                &rq_create->u.request.context,
13009                                LPFC_RQ_RING_SIZE_1024);
13010                         break;
13011                 case 2048:
13012                         bf_set(lpfc_rq_context_rqe_count,
13013                                &rq_create->u.request.context,
13014                                LPFC_RQ_RING_SIZE_2048);
13015                         break;
13016                 case 4096:
13017                         bf_set(lpfc_rq_context_rqe_count,
13018                                &rq_create->u.request.context,
13019                                LPFC_RQ_RING_SIZE_4096);
13020                         break;
13021                 }
13022                 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
13023                        LPFC_HDR_BUF_SIZE);
13024         }
13025         bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
13026                cq->queue_id);
13027         bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
13028                hrq->page_count);
13029         list_for_each_entry(dmabuf, &hrq->page_list, list) {
13030                 memset(dmabuf->virt, 0, hw_page_size);
13031                 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
13032                                         putPaddrLow(dmabuf->phys);
13033                 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
13034                                         putPaddrHigh(dmabuf->phys);
13035         }
13036         if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
13037                 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
13038
13039         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13040         /* The IOCTL status is embedded in the mailbox subheader. */
13041         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13042         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13043         if (shdr_status || shdr_add_status || rc) {
13044                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13045                                 "2504 RQ_CREATE mailbox failed with "
13046                                 "status x%x add_status x%x, mbx status x%x\n",
13047                                 shdr_status, shdr_add_status, rc);
13048                 status = -ENXIO;
13049                 goto out;
13050         }
13051         hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
13052         if (hrq->queue_id == 0xFFFF) {
13053                 status = -ENXIO;
13054                 goto out;
13055         }
13056
13057         if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
13058                 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
13059                                         &rq_create->u.response);
13060                 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
13061                     (hrq->db_format != LPFC_DB_RING_FORMAT)) {
13062                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13063                                         "3262 RQ [%d] doorbell format not "
13064                                         "supported: x%x\n", hrq->queue_id,
13065                                         hrq->db_format);
13066                         status = -EINVAL;
13067                         goto out;
13068                 }
13069
13070                 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
13071                                     &rq_create->u.response);
13072                 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
13073                 if (!bar_memmap_p) {
13074                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13075                                         "3269 RQ[%d] failed to memmap pci "
13076                                         "barset:x%x\n", hrq->queue_id,
13077                                         pci_barset);
13078                         status = -ENOMEM;
13079                         goto out;
13080                 }
13081
13082                 db_offset = rq_create->u.response.doorbell_offset;
13083                 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
13084                     (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
13085                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13086                                         "3270 RQ[%d] doorbell offset not "
13087                                         "supported: x%x\n", hrq->queue_id,
13088                                         db_offset);
13089                         status = -EINVAL;
13090                         goto out;
13091                 }
13092                 hrq->db_regaddr = bar_memmap_p + db_offset;
13093                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13094                                 "3266 RQ[qid:%d]: barset:x%x, offset:x%x\n",
13095                                 hrq->queue_id, pci_barset, db_offset);
13096         } else {
13097                 hrq->db_format = LPFC_DB_RING_FORMAT;
13098                 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
13099         }
13100         hrq->type = LPFC_HRQ;
13101         hrq->assoc_qid = cq->queue_id;
13102         hrq->subtype = subtype;
13103         hrq->host_index = 0;
13104         hrq->hba_index = 0;
13105
13106         /* now create the data queue */
13107         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13108                          LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
13109                          length, LPFC_SLI4_MBX_EMBED);
13110         bf_set(lpfc_mbox_hdr_version, &shdr->request,
13111                phba->sli4_hba.pc_sli4_params.rqv);
13112         if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
13113                 bf_set(lpfc_rq_context_rqe_count_1,
13114                        &rq_create->u.request.context, hrq->entry_count);
13115                 rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE;
13116                 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
13117                        LPFC_RQE_SIZE_8);
13118                 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
13119                        (PAGE_SIZE/SLI4_PAGE_SIZE));
13120         } else {
13121                 switch (drq->entry_count) {
13122                 default:
13123                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13124                                         "2536 Unsupported RQ count. (%d)\n",
13125                                         drq->entry_count);
13126                         if (drq->entry_count < 512) {
13127                                 status = -EINVAL;
13128                                 goto out;
13129                         }
13130                         /* otherwise default to smallest count (drop through) */
13131                 case 512:
13132                         bf_set(lpfc_rq_context_rqe_count,
13133                                &rq_create->u.request.context,
13134                                LPFC_RQ_RING_SIZE_512);
13135                         break;
13136                 case 1024:
13137                         bf_set(lpfc_rq_context_rqe_count,
13138                                &rq_create->u.request.context,
13139                                LPFC_RQ_RING_SIZE_1024);
13140                         break;
13141                 case 2048:
13142                         bf_set(lpfc_rq_context_rqe_count,
13143                                &rq_create->u.request.context,
13144                                LPFC_RQ_RING_SIZE_2048);
13145                         break;
13146                 case 4096:
13147                         bf_set(lpfc_rq_context_rqe_count,
13148                                &rq_create->u.request.context,
13149                                LPFC_RQ_RING_SIZE_4096);
13150                         break;
13151                 }
13152                 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
13153                        LPFC_DATA_BUF_SIZE);
13154         }
13155         bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
13156                cq->queue_id);
13157         bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
13158                drq->page_count);
13159         list_for_each_entry(dmabuf, &drq->page_list, list) {
13160                 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
13161                                         putPaddrLow(dmabuf->phys);
13162                 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
13163                                         putPaddrHigh(dmabuf->phys);
13164         }
13165         if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
13166                 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
13167         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13168         /* The IOCTL status is embedded in the mailbox subheader. */
13169         shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
13170         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13171         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13172         if (shdr_status || shdr_add_status || rc) {
13173                 status = -ENXIO;
13174                 goto out;
13175         }
13176         drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
13177         if (drq->queue_id == 0xFFFF) {
13178                 status = -ENXIO;
13179                 goto out;
13180         }
13181         drq->type = LPFC_DRQ;
13182         drq->assoc_qid = cq->queue_id;
13183         drq->subtype = subtype;
13184         drq->host_index = 0;
13185         drq->hba_index = 0;
13186
13187         /* link the header and data RQs onto the parent cq child list */
13188         list_add_tail(&hrq->list, &cq->child_list);
13189         list_add_tail(&drq->list, &cq->child_list);
13190
13191 out:
13192         mempool_free(mbox, phba->mbox_mem_pool);
13193         return status;
13194 }
13195
13196 /**
13197  * lpfc_eq_destroy - Destroy an event Queue on the HBA
13198  * @eq: The queue structure associated with the queue to destroy.
13199  *
13200  * This function destroys a queue, as detailed in @eq by sending an mailbox
13201  * command, specific to the type of queue, to the HBA.
13202  *
13203  * The @eq struct is used to get the queue ID of the queue to destroy.
13204  *
13205  * On success this function will return a zero. If the queue destroy mailbox
13206  * command fails this function will return -ENXIO.
13207  **/
13208 uint32_t
13209 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
13210 {
13211         LPFC_MBOXQ_t *mbox;
13212         int rc, length, status = 0;
13213         uint32_t shdr_status, shdr_add_status;
13214         union lpfc_sli4_cfg_shdr *shdr;
13215
13216         /* sanity check on queue memory */
13217         if (!eq)
13218                 return -ENODEV;
13219         mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
13220         if (!mbox)
13221                 return -ENOMEM;
13222         length = (sizeof(struct lpfc_mbx_eq_destroy) -
13223                   sizeof(struct lpfc_sli4_cfg_mhdr));
13224         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13225                          LPFC_MBOX_OPCODE_EQ_DESTROY,
13226                          length, LPFC_SLI4_MBX_EMBED);
13227         bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
13228                eq->queue_id);
13229         mbox->vport = eq->phba->pport;
13230         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13231
13232         rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
13233         /* The IOCTL status is embedded in the mailbox subheader. */
13234         shdr = (union lpfc_sli4_cfg_shdr *)
13235                 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
13236         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13237         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13238         if (shdr_status || shdr_add_status || rc) {
13239                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13240                                 "2505 EQ_DESTROY mailbox failed with "
13241                                 "status x%x add_status x%x, mbx status x%x\n",
13242                                 shdr_status, shdr_add_status, rc);
13243                 status = -ENXIO;
13244         }
13245
13246         /* Remove eq from any list */
13247         list_del_init(&eq->list);
13248         mempool_free(mbox, eq->phba->mbox_mem_pool);
13249         return status;
13250 }
13251
13252 /**
13253  * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
13254  * @cq: The queue structure associated with the queue to destroy.
13255  *
13256  * This function destroys a queue, as detailed in @cq by sending an mailbox
13257  * command, specific to the type of queue, to the HBA.
13258  *
13259  * The @cq struct is used to get the queue ID of the queue to destroy.
13260  *
13261  * On success this function will return a zero. If the queue destroy mailbox
13262  * command fails this function will return -ENXIO.
13263  **/
13264 uint32_t
13265 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
13266 {
13267         LPFC_MBOXQ_t *mbox;
13268         int rc, length, status = 0;
13269         uint32_t shdr_status, shdr_add_status;
13270         union lpfc_sli4_cfg_shdr *shdr;
13271
13272         /* sanity check on queue memory */
13273         if (!cq)
13274                 return -ENODEV;
13275         mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
13276         if (!mbox)
13277                 return -ENOMEM;
13278         length = (sizeof(struct lpfc_mbx_cq_destroy) -
13279                   sizeof(struct lpfc_sli4_cfg_mhdr));
13280         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13281                          LPFC_MBOX_OPCODE_CQ_DESTROY,
13282                          length, LPFC_SLI4_MBX_EMBED);
13283         bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
13284                cq->queue_id);
13285         mbox->vport = cq->phba->pport;
13286         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13287         rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
13288         /* The IOCTL status is embedded in the mailbox subheader. */
13289         shdr = (union lpfc_sli4_cfg_shdr *)
13290                 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
13291         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13292         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13293         if (shdr_status || shdr_add_status || rc) {
13294                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13295                                 "2506 CQ_DESTROY mailbox failed with "
13296                                 "status x%x add_status x%x, mbx status x%x\n",
13297                                 shdr_status, shdr_add_status, rc);
13298                 status = -ENXIO;
13299         }
13300         /* Remove cq from any list */
13301         list_del_init(&cq->list);
13302         mempool_free(mbox, cq->phba->mbox_mem_pool);
13303         return status;
13304 }
13305
13306 /**
13307  * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
13308  * @qm: The queue structure associated with the queue to destroy.
13309  *
13310  * This function destroys a queue, as detailed in @mq by sending an mailbox
13311  * command, specific to the type of queue, to the HBA.
13312  *
13313  * The @mq struct is used to get the queue ID of the queue to destroy.
13314  *
13315  * On success this function will return a zero. If the queue destroy mailbox
13316  * command fails this function will return -ENXIO.
13317  **/
13318 uint32_t
13319 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
13320 {
13321         LPFC_MBOXQ_t *mbox;
13322         int rc, length, status = 0;
13323         uint32_t shdr_status, shdr_add_status;
13324         union lpfc_sli4_cfg_shdr *shdr;
13325
13326         /* sanity check on queue memory */
13327         if (!mq)
13328                 return -ENODEV;
13329         mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
13330         if (!mbox)
13331                 return -ENOMEM;
13332         length = (sizeof(struct lpfc_mbx_mq_destroy) -
13333                   sizeof(struct lpfc_sli4_cfg_mhdr));
13334         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13335                          LPFC_MBOX_OPCODE_MQ_DESTROY,
13336                          length, LPFC_SLI4_MBX_EMBED);
13337         bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
13338                mq->queue_id);
13339         mbox->vport = mq->phba->pport;
13340         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13341         rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
13342         /* The IOCTL status is embedded in the mailbox subheader. */
13343         shdr = (union lpfc_sli4_cfg_shdr *)
13344                 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
13345         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13346         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13347         if (shdr_status || shdr_add_status || rc) {
13348                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13349                                 "2507 MQ_DESTROY mailbox failed with "
13350                                 "status x%x add_status x%x, mbx status x%x\n",
13351                                 shdr_status, shdr_add_status, rc);
13352                 status = -ENXIO;
13353         }
13354         /* Remove mq from any list */
13355         list_del_init(&mq->list);
13356         mempool_free(mbox, mq->phba->mbox_mem_pool);
13357         return status;
13358 }
13359
13360 /**
13361  * lpfc_wq_destroy - Destroy a Work Queue on the HBA
13362  * @wq: The queue structure associated with the queue to destroy.
13363  *
13364  * This function destroys a queue, as detailed in @wq by sending an mailbox
13365  * command, specific to the type of queue, to the HBA.
13366  *
13367  * The @wq struct is used to get the queue ID of the queue to destroy.
13368  *
13369  * On success this function will return a zero. If the queue destroy mailbox
13370  * command fails this function will return -ENXIO.
13371  **/
13372 uint32_t
13373 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
13374 {
13375         LPFC_MBOXQ_t *mbox;
13376         int rc, length, status = 0;
13377         uint32_t shdr_status, shdr_add_status;
13378         union lpfc_sli4_cfg_shdr *shdr;
13379
13380         /* sanity check on queue memory */
13381         if (!wq)
13382                 return -ENODEV;
13383         mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
13384         if (!mbox)
13385                 return -ENOMEM;
13386         length = (sizeof(struct lpfc_mbx_wq_destroy) -
13387                   sizeof(struct lpfc_sli4_cfg_mhdr));
13388         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13389                          LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
13390                          length, LPFC_SLI4_MBX_EMBED);
13391         bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
13392                wq->queue_id);
13393         mbox->vport = wq->phba->pport;
13394         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13395         rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
13396         shdr = (union lpfc_sli4_cfg_shdr *)
13397                 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
13398         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13399         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13400         if (shdr_status || shdr_add_status || rc) {
13401                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13402                                 "2508 WQ_DESTROY mailbox failed with "
13403                                 "status x%x add_status x%x, mbx status x%x\n",
13404                                 shdr_status, shdr_add_status, rc);
13405                 status = -ENXIO;
13406         }
13407         /* Remove wq from any list */
13408         list_del_init(&wq->list);
13409         mempool_free(mbox, wq->phba->mbox_mem_pool);
13410         return status;
13411 }
13412
13413 /**
13414  * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
13415  * @rq: The queue structure associated with the queue to destroy.
13416  *
13417  * This function destroys a queue, as detailed in @rq by sending an mailbox
13418  * command, specific to the type of queue, to the HBA.
13419  *
13420  * The @rq struct is used to get the queue ID of the queue to destroy.
13421  *
13422  * On success this function will return a zero. If the queue destroy mailbox
13423  * command fails this function will return -ENXIO.
13424  **/
13425 uint32_t
13426 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
13427                 struct lpfc_queue *drq)
13428 {
13429         LPFC_MBOXQ_t *mbox;
13430         int rc, length, status = 0;
13431         uint32_t shdr_status, shdr_add_status;
13432         union lpfc_sli4_cfg_shdr *shdr;
13433
13434         /* sanity check on queue memory */
13435         if (!hrq || !drq)
13436                 return -ENODEV;
13437         mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
13438         if (!mbox)
13439                 return -ENOMEM;
13440         length = (sizeof(struct lpfc_mbx_rq_destroy) -
13441                   sizeof(struct lpfc_sli4_cfg_mhdr));
13442         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13443                          LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
13444                          length, LPFC_SLI4_MBX_EMBED);
13445         bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
13446                hrq->queue_id);
13447         mbox->vport = hrq->phba->pport;
13448         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13449         rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
13450         /* The IOCTL status is embedded in the mailbox subheader. */
13451         shdr = (union lpfc_sli4_cfg_shdr *)
13452                 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
13453         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13454         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13455         if (shdr_status || shdr_add_status || rc) {
13456                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13457                                 "2509 RQ_DESTROY mailbox failed with "
13458                                 "status x%x add_status x%x, mbx status x%x\n",
13459                                 shdr_status, shdr_add_status, rc);
13460                 if (rc != MBX_TIMEOUT)
13461                         mempool_free(mbox, hrq->phba->mbox_mem_pool);
13462                 return -ENXIO;
13463         }
13464         bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
13465                drq->queue_id);
13466         rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
13467         shdr = (union lpfc_sli4_cfg_shdr *)
13468                 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
13469         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13470         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13471         if (shdr_status || shdr_add_status || rc) {
13472                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13473                                 "2510 RQ_DESTROY mailbox failed with "
13474                                 "status x%x add_status x%x, mbx status x%x\n",
13475                                 shdr_status, shdr_add_status, rc);
13476                 status = -ENXIO;
13477         }
13478         list_del_init(&hrq->list);
13479         list_del_init(&drq->list);
13480         mempool_free(mbox, hrq->phba->mbox_mem_pool);
13481         return status;
13482 }
13483
13484 /**
13485  * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
13486  * @phba: The virtual port for which this call being executed.
13487  * @pdma_phys_addr0: Physical address of the 1st SGL page.
13488  * @pdma_phys_addr1: Physical address of the 2nd SGL page.
13489  * @xritag: the xritag that ties this io to the SGL pages.
13490  *
13491  * This routine will post the sgl pages for the IO that has the xritag
13492  * that is in the iocbq structure. The xritag is assigned during iocbq
13493  * creation and persists for as long as the driver is loaded.
13494  * if the caller has fewer than 256 scatter gather segments to map then
13495  * pdma_phys_addr1 should be 0.
13496  * If the caller needs to map more than 256 scatter gather segment then
13497  * pdma_phys_addr1 should be a valid physical address.
13498  * physical address for SGLs must be 64 byte aligned.
13499  * If you are going to map 2 SGL's then the first one must have 256 entries
13500  * the second sgl can have between 1 and 256 entries.
13501  *
13502  * Return codes:
13503  *      0 - Success
13504  *      -ENXIO, -ENOMEM - Failure
13505  **/
13506 int
13507 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
13508                 dma_addr_t pdma_phys_addr0,
13509                 dma_addr_t pdma_phys_addr1,
13510                 uint16_t xritag)
13511 {
13512         struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
13513         LPFC_MBOXQ_t *mbox;
13514         int rc;
13515         uint32_t shdr_status, shdr_add_status;
13516         uint32_t mbox_tmo;
13517         union lpfc_sli4_cfg_shdr *shdr;
13518
13519         if (xritag == NO_XRI) {
13520                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13521                                 "0364 Invalid param:\n");
13522                 return -EINVAL;
13523         }
13524
13525         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13526         if (!mbox)
13527                 return -ENOMEM;
13528
13529         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13530                         LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
13531                         sizeof(struct lpfc_mbx_post_sgl_pages) -
13532                         sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
13533
13534         post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
13535                                 &mbox->u.mqe.un.post_sgl_pages;
13536         bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
13537         bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
13538
13539         post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
13540                                 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
13541         post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
13542                                 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
13543
13544         post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
13545                                 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
13546         post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
13547                                 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
13548         if (!phba->sli4_hba.intr_enable)
13549                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13550         else {
13551                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
13552                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
13553         }
13554         /* The IOCTL status is embedded in the mailbox subheader. */
13555         shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
13556         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13557         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13558         if (rc != MBX_TIMEOUT)
13559                 mempool_free(mbox, phba->mbox_mem_pool);
13560         if (shdr_status || shdr_add_status || rc) {
13561                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13562                                 "2511 POST_SGL mailbox failed with "
13563                                 "status x%x add_status x%x, mbx status x%x\n",
13564                                 shdr_status, shdr_add_status, rc);
13565                 rc = -ENXIO;
13566         }
13567         return 0;
13568 }
13569
13570 /**
13571  * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
13572  * @phba: pointer to lpfc hba data structure.
13573  *
13574  * This routine is invoked to post rpi header templates to the
13575  * HBA consistent with the SLI-4 interface spec.  This routine
13576  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
13577  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
13578  *
13579  * Returns
13580  *      A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
13581  *      LPFC_RPI_ALLOC_ERROR if no rpis are available.
13582  **/
13583 uint16_t
13584 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
13585 {
13586         unsigned long xri;
13587
13588         /*
13589          * Fetch the next logical xri.  Because this index is logical,
13590          * the driver starts at 0 each time.
13591          */
13592         spin_lock_irq(&phba->hbalock);
13593         xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
13594                                  phba->sli4_hba.max_cfg_param.max_xri, 0);
13595         if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
13596                 spin_unlock_irq(&phba->hbalock);
13597                 return NO_XRI;
13598         } else {
13599                 set_bit(xri, phba->sli4_hba.xri_bmask);
13600                 phba->sli4_hba.max_cfg_param.xri_used++;
13601         }
13602         spin_unlock_irq(&phba->hbalock);
13603         return xri;
13604 }
13605
13606 /**
13607  * lpfc_sli4_free_xri - Release an xri for reuse.
13608  * @phba: pointer to lpfc hba data structure.
13609  *
13610  * This routine is invoked to release an xri to the pool of
13611  * available rpis maintained by the driver.
13612  **/
13613 void
13614 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
13615 {
13616         if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
13617                 phba->sli4_hba.max_cfg_param.xri_used--;
13618         }
13619 }
13620
13621 /**
13622  * lpfc_sli4_free_xri - Release an xri for reuse.
13623  * @phba: pointer to lpfc hba data structure.
13624  *
13625  * This routine is invoked to release an xri to the pool of
13626  * available rpis maintained by the driver.
13627  **/
13628 void
13629 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
13630 {
13631         spin_lock_irq(&phba->hbalock);
13632         __lpfc_sli4_free_xri(phba, xri);
13633         spin_unlock_irq(&phba->hbalock);
13634 }
13635
13636 /**
13637  * lpfc_sli4_next_xritag - Get an xritag for the io
13638  * @phba: Pointer to HBA context object.
13639  *
13640  * This function gets an xritag for the iocb. If there is no unused xritag
13641  * it will return 0xffff.
13642  * The function returns the allocated xritag if successful, else returns zero.
13643  * Zero is not a valid xritag.
13644  * The caller is not required to hold any lock.
13645  **/
13646 uint16_t
13647 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
13648 {
13649         uint16_t xri_index;
13650
13651         xri_index = lpfc_sli4_alloc_xri(phba);
13652         if (xri_index == NO_XRI)
13653                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13654                                 "2004 Failed to allocate XRI.last XRITAG is %d"
13655                                 " Max XRI is %d, Used XRI is %d\n",
13656                                 xri_index,
13657                                 phba->sli4_hba.max_cfg_param.max_xri,
13658                                 phba->sli4_hba.max_cfg_param.xri_used);
13659         return xri_index;
13660 }
13661
13662 /**
13663  * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port.
13664  * @phba: pointer to lpfc hba data structure.
13665  * @post_sgl_list: pointer to els sgl entry list.
13666  * @count: number of els sgl entries on the list.
13667  *
13668  * This routine is invoked to post a block of driver's sgl pages to the
13669  * HBA using non-embedded mailbox command. No Lock is held. This routine
13670  * is only called when the driver is loading and after all IO has been
13671  * stopped.
13672  **/
13673 static int
13674 lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba,
13675                             struct list_head *post_sgl_list,
13676                             int post_cnt)
13677 {
13678         struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
13679         struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
13680         struct sgl_page_pairs *sgl_pg_pairs;
13681         void *viraddr;
13682         LPFC_MBOXQ_t *mbox;
13683         uint32_t reqlen, alloclen, pg_pairs;
13684         uint32_t mbox_tmo;
13685         uint16_t xritag_start = 0;
13686         int rc = 0;
13687         uint32_t shdr_status, shdr_add_status;
13688         union lpfc_sli4_cfg_shdr *shdr;
13689
13690         reqlen = phba->sli4_hba.els_xri_cnt * sizeof(struct sgl_page_pairs) +
13691                  sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
13692         if (reqlen > SLI4_PAGE_SIZE) {
13693                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13694                                 "2559 Block sgl registration required DMA "
13695                                 "size (%d) great than a page\n", reqlen);
13696                 return -ENOMEM;
13697         }
13698         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13699         if (!mbox)
13700                 return -ENOMEM;
13701
13702         /* Allocate DMA memory and set up the non-embedded mailbox command */
13703         alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13704                          LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
13705                          LPFC_SLI4_MBX_NEMBED);
13706
13707         if (alloclen < reqlen) {
13708                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13709                                 "0285 Allocated DMA memory size (%d) is "
13710                                 "less than the requested DMA memory "
13711                                 "size (%d)\n", alloclen, reqlen);
13712                 lpfc_sli4_mbox_cmd_free(phba, mbox);
13713                 return -ENOMEM;
13714         }
13715         /* Set up the SGL pages in the non-embedded DMA pages */
13716         viraddr = mbox->sge_array->addr[0];
13717         sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
13718         sgl_pg_pairs = &sgl->sgl_pg_pairs;
13719
13720         pg_pairs = 0;
13721         list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
13722                 /* Set up the sge entry */
13723                 sgl_pg_pairs->sgl_pg0_addr_lo =
13724                                 cpu_to_le32(putPaddrLow(sglq_entry->phys));
13725                 sgl_pg_pairs->sgl_pg0_addr_hi =
13726                                 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
13727                 sgl_pg_pairs->sgl_pg1_addr_lo =
13728                                 cpu_to_le32(putPaddrLow(0));
13729                 sgl_pg_pairs->sgl_pg1_addr_hi =
13730                                 cpu_to_le32(putPaddrHigh(0));
13731
13732                 /* Keep the first xritag on the list */
13733                 if (pg_pairs == 0)
13734                         xritag_start = sglq_entry->sli4_xritag;
13735                 sgl_pg_pairs++;
13736                 pg_pairs++;
13737         }
13738
13739         /* Complete initialization and perform endian conversion. */
13740         bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
13741         bf_set(lpfc_post_sgl_pages_xricnt, sgl, phba->sli4_hba.els_xri_cnt);
13742         sgl->word0 = cpu_to_le32(sgl->word0);
13743         if (!phba->sli4_hba.intr_enable)
13744                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13745         else {
13746                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
13747                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
13748         }
13749         shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
13750         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13751         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13752         if (rc != MBX_TIMEOUT)
13753                 lpfc_sli4_mbox_cmd_free(phba, mbox);
13754         if (shdr_status || shdr_add_status || rc) {
13755                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13756                                 "2513 POST_SGL_BLOCK mailbox command failed "
13757                                 "status x%x add_status x%x mbx status x%x\n",
13758                                 shdr_status, shdr_add_status, rc);
13759                 rc = -ENXIO;
13760         }
13761         return rc;
13762 }
13763
13764 /**
13765  * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
13766  * @phba: pointer to lpfc hba data structure.
13767  * @sblist: pointer to scsi buffer list.
13768  * @count: number of scsi buffers on the list.
13769  *
13770  * This routine is invoked to post a block of @count scsi sgl pages from a
13771  * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
13772  * No Lock is held.
13773  *
13774  **/
13775 int
13776 lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
13777                               struct list_head *sblist,
13778                               int count)
13779 {
13780         struct lpfc_scsi_buf *psb;
13781         struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
13782         struct sgl_page_pairs *sgl_pg_pairs;
13783         void *viraddr;
13784         LPFC_MBOXQ_t *mbox;
13785         uint32_t reqlen, alloclen, pg_pairs;
13786         uint32_t mbox_tmo;
13787         uint16_t xritag_start = 0;
13788         int rc = 0;
13789         uint32_t shdr_status, shdr_add_status;
13790         dma_addr_t pdma_phys_bpl1;
13791         union lpfc_sli4_cfg_shdr *shdr;
13792
13793         /* Calculate the requested length of the dma memory */
13794         reqlen = count * sizeof(struct sgl_page_pairs) +
13795                  sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
13796         if (reqlen > SLI4_PAGE_SIZE) {
13797                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13798                                 "0217 Block sgl registration required DMA "
13799                                 "size (%d) great than a page\n", reqlen);
13800                 return -ENOMEM;
13801         }
13802         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13803         if (!mbox) {
13804                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13805                                 "0283 Failed to allocate mbox cmd memory\n");
13806                 return -ENOMEM;
13807         }
13808
13809         /* Allocate DMA memory and set up the non-embedded mailbox command */
13810         alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13811                                 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
13812                                 LPFC_SLI4_MBX_NEMBED);
13813
13814         if (alloclen < reqlen) {
13815                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13816                                 "2561 Allocated DMA memory size (%d) is "
13817                                 "less than the requested DMA memory "
13818                                 "size (%d)\n", alloclen, reqlen);
13819                 lpfc_sli4_mbox_cmd_free(phba, mbox);
13820                 return -ENOMEM;
13821         }
13822
13823         /* Get the first SGE entry from the non-embedded DMA memory */
13824         viraddr = mbox->sge_array->addr[0];
13825
13826         /* Set up the SGL pages in the non-embedded DMA pages */
13827         sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
13828         sgl_pg_pairs = &sgl->sgl_pg_pairs;
13829
13830         pg_pairs = 0;
13831         list_for_each_entry(psb, sblist, list) {
13832                 /* Set up the sge entry */
13833                 sgl_pg_pairs->sgl_pg0_addr_lo =
13834                         cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
13835                 sgl_pg_pairs->sgl_pg0_addr_hi =
13836                         cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
13837                 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
13838                         pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
13839                 else
13840                         pdma_phys_bpl1 = 0;
13841                 sgl_pg_pairs->sgl_pg1_addr_lo =
13842                         cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
13843                 sgl_pg_pairs->sgl_pg1_addr_hi =
13844                         cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
13845                 /* Keep the first xritag on the list */
13846                 if (pg_pairs == 0)
13847                         xritag_start = psb->cur_iocbq.sli4_xritag;
13848                 sgl_pg_pairs++;
13849                 pg_pairs++;
13850         }
13851         bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
13852         bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
13853         /* Perform endian conversion if necessary */
13854         sgl->word0 = cpu_to_le32(sgl->word0);
13855
13856         if (!phba->sli4_hba.intr_enable)
13857                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13858         else {
13859                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
13860                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
13861         }
13862         shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
13863         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13864         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13865         if (rc != MBX_TIMEOUT)
13866                 lpfc_sli4_mbox_cmd_free(phba, mbox);
13867         if (shdr_status || shdr_add_status || rc) {
13868                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13869                                 "2564 POST_SGL_BLOCK mailbox command failed "
13870                                 "status x%x add_status x%x mbx status x%x\n",
13871                                 shdr_status, shdr_add_status, rc);
13872                 rc = -ENXIO;
13873         }
13874         return rc;
13875 }
13876
13877 /**
13878  * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
13879  * @phba: pointer to lpfc_hba struct that the frame was received on
13880  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
13881  *
13882  * This function checks the fields in the @fc_hdr to see if the FC frame is a
13883  * valid type of frame that the LPFC driver will handle. This function will
13884  * return a zero if the frame is a valid frame or a non zero value when the
13885  * frame does not pass the check.
13886  **/
13887 static int
13888 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
13889 {
13890         /*  make rctl_names static to save stack space */
13891         static char *rctl_names[] = FC_RCTL_NAMES_INIT;
13892         char *type_names[] = FC_TYPE_NAMES_INIT;
13893         struct fc_vft_header *fc_vft_hdr;
13894         uint32_t *header = (uint32_t *) fc_hdr;
13895
13896         switch (fc_hdr->fh_r_ctl) {
13897         case FC_RCTL_DD_UNCAT:          /* uncategorized information */
13898         case FC_RCTL_DD_SOL_DATA:       /* solicited data */
13899         case FC_RCTL_DD_UNSOL_CTL:      /* unsolicited control */
13900         case FC_RCTL_DD_SOL_CTL:        /* solicited control or reply */
13901         case FC_RCTL_DD_UNSOL_DATA:     /* unsolicited data */
13902         case FC_RCTL_DD_DATA_DESC:      /* data descriptor */
13903         case FC_RCTL_DD_UNSOL_CMD:      /* unsolicited command */
13904         case FC_RCTL_DD_CMD_STATUS:     /* command status */
13905         case FC_RCTL_ELS_REQ:   /* extended link services request */
13906         case FC_RCTL_ELS_REP:   /* extended link services reply */
13907         case FC_RCTL_ELS4_REQ:  /* FC-4 ELS request */
13908         case FC_RCTL_ELS4_REP:  /* FC-4 ELS reply */
13909         case FC_RCTL_BA_NOP:    /* basic link service NOP */
13910         case FC_RCTL_BA_ABTS:   /* basic link service abort */
13911         case FC_RCTL_BA_RMC:    /* remove connection */
13912         case FC_RCTL_BA_ACC:    /* basic accept */
13913         case FC_RCTL_BA_RJT:    /* basic reject */
13914         case FC_RCTL_BA_PRMT:
13915         case FC_RCTL_ACK_1:     /* acknowledge_1 */
13916         case FC_RCTL_ACK_0:     /* acknowledge_0 */
13917         case FC_RCTL_P_RJT:     /* port reject */
13918         case FC_RCTL_F_RJT:     /* fabric reject */
13919         case FC_RCTL_P_BSY:     /* port busy */
13920         case FC_RCTL_F_BSY:     /* fabric busy to data frame */
13921         case FC_RCTL_F_BSYL:    /* fabric busy to link control frame */
13922         case FC_RCTL_LCR:       /* link credit reset */
13923         case FC_RCTL_END:       /* end */
13924                 break;
13925         case FC_RCTL_VFTH:      /* Virtual Fabric tagging Header */
13926                 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
13927                 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
13928                 return lpfc_fc_frame_check(phba, fc_hdr);
13929         default:
13930                 goto drop;
13931         }
13932         switch (fc_hdr->fh_type) {
13933         case FC_TYPE_BLS:
13934         case FC_TYPE_ELS:
13935         case FC_TYPE_FCP:
13936         case FC_TYPE_CT:
13937                 break;
13938         case FC_TYPE_IP:
13939         case FC_TYPE_ILS:
13940         default:
13941                 goto drop;
13942         }
13943
13944         lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
13945                         "2538 Received frame rctl:%s type:%s "
13946                         "Frame Data:%08x %08x %08x %08x %08x %08x\n",
13947                         rctl_names[fc_hdr->fh_r_ctl],
13948                         type_names[fc_hdr->fh_type],
13949                         be32_to_cpu(header[0]), be32_to_cpu(header[1]),
13950                         be32_to_cpu(header[2]), be32_to_cpu(header[3]),
13951                         be32_to_cpu(header[4]), be32_to_cpu(header[5]));
13952         return 0;
13953 drop:
13954         lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
13955                         "2539 Dropped frame rctl:%s type:%s\n",
13956                         rctl_names[fc_hdr->fh_r_ctl],
13957                         type_names[fc_hdr->fh_type]);
13958         return 1;
13959 }
13960
13961 /**
13962  * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
13963  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
13964  *
13965  * This function processes the FC header to retrieve the VFI from the VF
13966  * header, if one exists. This function will return the VFI if one exists
13967  * or 0 if no VSAN Header exists.
13968  **/
13969 static uint32_t
13970 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
13971 {
13972         struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
13973
13974         if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
13975                 return 0;
13976         return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
13977 }
13978
13979 /**
13980  * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
13981  * @phba: Pointer to the HBA structure to search for the vport on
13982  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
13983  * @fcfi: The FC Fabric ID that the frame came from
13984  *
13985  * This function searches the @phba for a vport that matches the content of the
13986  * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
13987  * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
13988  * returns the matching vport pointer or NULL if unable to match frame to a
13989  * vport.
13990  **/
13991 static struct lpfc_vport *
13992 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
13993                        uint16_t fcfi)
13994 {
13995         struct lpfc_vport **vports;
13996         struct lpfc_vport *vport = NULL;
13997         int i;
13998         uint32_t did = (fc_hdr->fh_d_id[0] << 16 |
13999                         fc_hdr->fh_d_id[1] << 8 |
14000                         fc_hdr->fh_d_id[2]);
14001
14002         if (did == Fabric_DID)
14003                 return phba->pport;
14004         if ((phba->pport->fc_flag & FC_PT2PT) &&
14005                 !(phba->link_state == LPFC_HBA_READY))
14006                 return phba->pport;
14007
14008         vports = lpfc_create_vport_work_array(phba);
14009         if (vports != NULL)
14010                 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
14011                         if (phba->fcf.fcfi == fcfi &&
14012                             vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
14013                             vports[i]->fc_myDID == did) {
14014                                 vport = vports[i];
14015                                 break;
14016                         }
14017                 }
14018         lpfc_destroy_vport_work_array(phba, vports);
14019         return vport;
14020 }
14021
14022 /**
14023  * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
14024  * @vport: The vport to work on.
14025  *
14026  * This function updates the receive sequence time stamp for this vport. The
14027  * receive sequence time stamp indicates the time that the last frame of the
14028  * the sequence that has been idle for the longest amount of time was received.
14029  * the driver uses this time stamp to indicate if any received sequences have
14030  * timed out.
14031  **/
14032 void
14033 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
14034 {
14035         struct lpfc_dmabuf *h_buf;
14036         struct hbq_dmabuf *dmabuf = NULL;
14037
14038         /* get the oldest sequence on the rcv list */
14039         h_buf = list_get_first(&vport->rcv_buffer_list,
14040                                struct lpfc_dmabuf, list);
14041         if (!h_buf)
14042                 return;
14043         dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
14044         vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
14045 }
14046
14047 /**
14048  * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
14049  * @vport: The vport that the received sequences were sent to.
14050  *
14051  * This function cleans up all outstanding received sequences. This is called
14052  * by the driver when a link event or user action invalidates all the received
14053  * sequences.
14054  **/
14055 void
14056 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
14057 {
14058         struct lpfc_dmabuf *h_buf, *hnext;
14059         struct lpfc_dmabuf *d_buf, *dnext;
14060         struct hbq_dmabuf *dmabuf = NULL;
14061
14062         /* start with the oldest sequence on the rcv list */
14063         list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
14064                 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
14065                 list_del_init(&dmabuf->hbuf.list);
14066                 list_for_each_entry_safe(d_buf, dnext,
14067                                          &dmabuf->dbuf.list, list) {
14068                         list_del_init(&d_buf->list);
14069                         lpfc_in_buf_free(vport->phba, d_buf);
14070                 }
14071                 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
14072         }
14073 }
14074
14075 /**
14076  * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
14077  * @vport: The vport that the received sequences were sent to.
14078  *
14079  * This function determines whether any received sequences have timed out by
14080  * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
14081  * indicates that there is at least one timed out sequence this routine will
14082  * go through the received sequences one at a time from most inactive to most
14083  * active to determine which ones need to be cleaned up. Once it has determined
14084  * that a sequence needs to be cleaned up it will simply free up the resources
14085  * without sending an abort.
14086  **/
14087 void
14088 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
14089 {
14090         struct lpfc_dmabuf *h_buf, *hnext;
14091         struct lpfc_dmabuf *d_buf, *dnext;
14092         struct hbq_dmabuf *dmabuf = NULL;
14093         unsigned long timeout;
14094         int abort_count = 0;
14095
14096         timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
14097                    vport->rcv_buffer_time_stamp);
14098         if (list_empty(&vport->rcv_buffer_list) ||
14099             time_before(jiffies, timeout))
14100                 return;
14101         /* start with the oldest sequence on the rcv list */
14102         list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
14103                 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
14104                 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
14105                            dmabuf->time_stamp);
14106                 if (time_before(jiffies, timeout))
14107                         break;
14108                 abort_count++;
14109                 list_del_init(&dmabuf->hbuf.list);
14110                 list_for_each_entry_safe(d_buf, dnext,
14111                                          &dmabuf->dbuf.list, list) {
14112                         list_del_init(&d_buf->list);
14113                         lpfc_in_buf_free(vport->phba, d_buf);
14114                 }
14115                 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
14116         }
14117         if (abort_count)
14118                 lpfc_update_rcv_time_stamp(vport);
14119 }
14120
14121 /**
14122  * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
14123  * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
14124  *
14125  * This function searches through the existing incomplete sequences that have
14126  * been sent to this @vport. If the frame matches one of the incomplete
14127  * sequences then the dbuf in the @dmabuf is added to the list of frames that
14128  * make up that sequence. If no sequence is found that matches this frame then
14129  * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
14130  * This function returns a pointer to the first dmabuf in the sequence list that
14131  * the frame was linked to.
14132  **/
14133 static struct hbq_dmabuf *
14134 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
14135 {
14136         struct fc_frame_header *new_hdr;
14137         struct fc_frame_header *temp_hdr;
14138         struct lpfc_dmabuf *d_buf;
14139         struct lpfc_dmabuf *h_buf;
14140         struct hbq_dmabuf *seq_dmabuf = NULL;
14141         struct hbq_dmabuf *temp_dmabuf = NULL;
14142
14143         INIT_LIST_HEAD(&dmabuf->dbuf.list);
14144         dmabuf->time_stamp = jiffies;
14145         new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
14146         /* Use the hdr_buf to find the sequence that this frame belongs to */
14147         list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
14148                 temp_hdr = (struct fc_frame_header *)h_buf->virt;
14149                 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
14150                     (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
14151                     (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
14152                         continue;
14153                 /* found a pending sequence that matches this frame */
14154                 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
14155                 break;
14156         }
14157         if (!seq_dmabuf) {
14158                 /*
14159                  * This indicates first frame received for this sequence.
14160                  * Queue the buffer on the vport's rcv_buffer_list.
14161                  */
14162                 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
14163                 lpfc_update_rcv_time_stamp(vport);
14164                 return dmabuf;
14165         }
14166         temp_hdr = seq_dmabuf->hbuf.virt;
14167         if (be16_to_cpu(new_hdr->fh_seq_cnt) <
14168                 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
14169                 list_del_init(&seq_dmabuf->hbuf.list);
14170                 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
14171                 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
14172                 lpfc_update_rcv_time_stamp(vport);
14173                 return dmabuf;
14174         }
14175         /* move this sequence to the tail to indicate a young sequence */
14176         list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
14177         seq_dmabuf->time_stamp = jiffies;
14178         lpfc_update_rcv_time_stamp(vport);
14179         if (list_empty(&seq_dmabuf->dbuf.list)) {
14180                 temp_hdr = dmabuf->hbuf.virt;
14181                 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
14182                 return seq_dmabuf;
14183         }
14184         /* find the correct place in the sequence to insert this frame */
14185         list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
14186                 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
14187                 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
14188                 /*
14189                  * If the frame's sequence count is greater than the frame on
14190                  * the list then insert the frame right after this frame
14191                  */
14192                 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
14193                         be16_to_cpu(temp_hdr->fh_seq_cnt)) {
14194                         list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
14195                         return seq_dmabuf;
14196                 }
14197         }
14198         return NULL;
14199 }
14200
14201 /**
14202  * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
14203  * @vport: pointer to a vitural port
14204  * @dmabuf: pointer to a dmabuf that describes the FC sequence
14205  *
14206  * This function tries to abort from the partially assembed sequence, described
14207  * by the information from basic abbort @dmabuf. It checks to see whether such
14208  * partially assembled sequence held by the driver. If so, it shall free up all
14209  * the frames from the partially assembled sequence.
14210  *
14211  * Return
14212  * true  -- if there is matching partially assembled sequence present and all
14213  *          the frames freed with the sequence;
14214  * false -- if there is no matching partially assembled sequence present so
14215  *          nothing got aborted in the lower layer driver
14216  **/
14217 static bool
14218 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
14219                             struct hbq_dmabuf *dmabuf)
14220 {
14221         struct fc_frame_header *new_hdr;
14222         struct fc_frame_header *temp_hdr;
14223         struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
14224         struct hbq_dmabuf *seq_dmabuf = NULL;
14225
14226         /* Use the hdr_buf to find the sequence that matches this frame */
14227         INIT_LIST_HEAD(&dmabuf->dbuf.list);
14228         INIT_LIST_HEAD(&dmabuf->hbuf.list);
14229         new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
14230         list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
14231                 temp_hdr = (struct fc_frame_header *)h_buf->virt;
14232                 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
14233                     (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
14234                     (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
14235                         continue;
14236                 /* found a pending sequence that matches this frame */
14237                 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
14238                 break;
14239         }
14240
14241         /* Free up all the frames from the partially assembled sequence */
14242         if (seq_dmabuf) {
14243                 list_for_each_entry_safe(d_buf, n_buf,
14244                                          &seq_dmabuf->dbuf.list, list) {
14245                         list_del_init(&d_buf->list);
14246                         lpfc_in_buf_free(vport->phba, d_buf);
14247                 }
14248                 return true;
14249         }
14250         return false;
14251 }
14252
14253 /**
14254  * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
14255  * @vport: pointer to a vitural port
14256  * @dmabuf: pointer to a dmabuf that describes the FC sequence
14257  *
14258  * This function tries to abort from the assembed sequence from upper level
14259  * protocol, described by the information from basic abbort @dmabuf. It
14260  * checks to see whether such pending context exists at upper level protocol.
14261  * If so, it shall clean up the pending context.
14262  *
14263  * Return
14264  * true  -- if there is matching pending context of the sequence cleaned
14265  *          at ulp;
14266  * false -- if there is no matching pending context of the sequence present
14267  *          at ulp.
14268  **/
14269 static bool
14270 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
14271 {
14272         struct lpfc_hba *phba = vport->phba;
14273         int handled;
14274
14275         /* Accepting abort at ulp with SLI4 only */
14276         if (phba->sli_rev < LPFC_SLI_REV4)
14277                 return false;
14278
14279         /* Register all caring upper level protocols to attend abort */
14280         handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
14281         if (handled)
14282                 return true;
14283
14284         return false;
14285 }
14286
14287 /**
14288  * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
14289  * @phba: Pointer to HBA context object.
14290  * @cmd_iocbq: pointer to the command iocbq structure.
14291  * @rsp_iocbq: pointer to the response iocbq structure.
14292  *
14293  * This function handles the sequence abort response iocb command complete
14294  * event. It properly releases the memory allocated to the sequence abort
14295  * accept iocb.
14296  **/
14297 static void
14298 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
14299                              struct lpfc_iocbq *cmd_iocbq,
14300                              struct lpfc_iocbq *rsp_iocbq)
14301 {
14302         struct lpfc_nodelist *ndlp;
14303
14304         if (cmd_iocbq) {
14305                 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
14306                 lpfc_nlp_put(ndlp);
14307                 lpfc_nlp_not_used(ndlp);
14308                 lpfc_sli_release_iocbq(phba, cmd_iocbq);
14309         }
14310
14311         /* Failure means BLS ABORT RSP did not get delivered to remote node*/
14312         if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
14313                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14314                         "3154 BLS ABORT RSP failed, data:  x%x/x%x\n",
14315                         rsp_iocbq->iocb.ulpStatus,
14316                         rsp_iocbq->iocb.un.ulpWord[4]);
14317 }
14318
14319 /**
14320  * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
14321  * @phba: Pointer to HBA context object.
14322  * @xri: xri id in transaction.
14323  *
14324  * This function validates the xri maps to the known range of XRIs allocated an
14325  * used by the driver.
14326  **/
14327 uint16_t
14328 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
14329                       uint16_t xri)
14330 {
14331         int i;
14332
14333         for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
14334                 if (xri == phba->sli4_hba.xri_ids[i])
14335                         return i;
14336         }
14337         return NO_XRI;
14338 }
14339
14340 /**
14341  * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
14342  * @phba: Pointer to HBA context object.
14343  * @fc_hdr: pointer to a FC frame header.
14344  *
14345  * This function sends a basic response to a previous unsol sequence abort
14346  * event after aborting the sequence handling.
14347  **/
14348 static void
14349 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
14350                         struct fc_frame_header *fc_hdr, bool aborted)
14351 {
14352         struct lpfc_hba *phba = vport->phba;
14353         struct lpfc_iocbq *ctiocb = NULL;
14354         struct lpfc_nodelist *ndlp;
14355         uint16_t oxid, rxid, xri, lxri;
14356         uint32_t sid, fctl;
14357         IOCB_t *icmd;
14358         int rc;
14359
14360         if (!lpfc_is_link_up(phba))
14361                 return;
14362
14363         sid = sli4_sid_from_fc_hdr(fc_hdr);
14364         oxid = be16_to_cpu(fc_hdr->fh_ox_id);
14365         rxid = be16_to_cpu(fc_hdr->fh_rx_id);
14366
14367         ndlp = lpfc_findnode_did(vport, sid);
14368         if (!ndlp) {
14369                 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
14370                 if (!ndlp) {
14371                         lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
14372                                          "1268 Failed to allocate ndlp for "
14373                                          "oxid:x%x SID:x%x\n", oxid, sid);
14374                         return;
14375                 }
14376                 lpfc_nlp_init(vport, ndlp, sid);
14377                 /* Put ndlp onto pport node list */
14378                 lpfc_enqueue_node(vport, ndlp);
14379         } else if (!NLP_CHK_NODE_ACT(ndlp)) {
14380                 /* re-setup ndlp without removing from node list */
14381                 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
14382                 if (!ndlp) {
14383                         lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
14384                                          "3275 Failed to active ndlp found "
14385                                          "for oxid:x%x SID:x%x\n", oxid, sid);
14386                         return;
14387                 }
14388         }
14389
14390         /* Allocate buffer for rsp iocb */
14391         ctiocb = lpfc_sli_get_iocbq(phba);
14392         if (!ctiocb)
14393                 return;
14394
14395         /* Extract the F_CTL field from FC_HDR */
14396         fctl = sli4_fctl_from_fc_hdr(fc_hdr);
14397
14398         icmd = &ctiocb->iocb;
14399         icmd->un.xseq64.bdl.bdeSize = 0;
14400         icmd->un.xseq64.bdl.ulpIoTag32 = 0;
14401         icmd->un.xseq64.w5.hcsw.Dfctl = 0;
14402         icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
14403         icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
14404
14405         /* Fill in the rest of iocb fields */
14406         icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
14407         icmd->ulpBdeCount = 0;
14408         icmd->ulpLe = 1;
14409         icmd->ulpClass = CLASS3;
14410         icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
14411         ctiocb->context1 = lpfc_nlp_get(ndlp);
14412
14413         ctiocb->iocb_cmpl = NULL;
14414         ctiocb->vport = phba->pport;
14415         ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
14416         ctiocb->sli4_lxritag = NO_XRI;
14417         ctiocb->sli4_xritag = NO_XRI;
14418
14419         if (fctl & FC_FC_EX_CTX)
14420                 /* Exchange responder sent the abort so we
14421                  * own the oxid.
14422                  */
14423                 xri = oxid;
14424         else
14425                 xri = rxid;
14426         lxri = lpfc_sli4_xri_inrange(phba, xri);
14427         if (lxri != NO_XRI)
14428                 lpfc_set_rrq_active(phba, ndlp, lxri,
14429                         (xri == oxid) ? rxid : oxid, 0);
14430         /* For BA_ABTS from exchange responder, if the logical xri with
14431          * the oxid maps to the FCP XRI range, the port no longer has
14432          * that exchange context, send a BLS_RJT. Override the IOCB for
14433          * a BA_RJT.
14434          */
14435         if ((fctl & FC_FC_EX_CTX) &&
14436             (lxri > lpfc_sli4_get_els_iocb_cnt(phba))) {
14437                 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
14438                 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
14439                 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
14440                 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
14441         }
14442
14443         /* If BA_ABTS failed to abort a partially assembled receive sequence,
14444          * the driver no longer has that exchange, send a BLS_RJT. Override
14445          * the IOCB for a BA_RJT.
14446          */
14447         if (aborted == false) {
14448                 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
14449                 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
14450                 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
14451                 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
14452         }
14453
14454         if (fctl & FC_FC_EX_CTX) {
14455                 /* ABTS sent by responder to CT exchange, construction
14456                  * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
14457                  * field and RX_ID from ABTS for RX_ID field.
14458                  */
14459                 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
14460         } else {
14461                 /* ABTS sent by initiator to CT exchange, construction
14462                  * of BA_ACC will need to allocate a new XRI as for the
14463                  * XRI_TAG field.
14464                  */
14465                 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
14466         }
14467         bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
14468         bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
14469
14470         /* Xmit CT abts response on exchange <xid> */
14471         lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
14472                          "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
14473                          icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
14474
14475         rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
14476         if (rc == IOCB_ERROR) {
14477                 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
14478                                  "2925 Failed to issue CT ABTS RSP x%x on "
14479                                  "xri x%x, Data x%x\n",
14480                                  icmd->un.xseq64.w5.hcsw.Rctl, oxid,
14481                                  phba->link_state);
14482                 lpfc_nlp_put(ndlp);
14483                 ctiocb->context1 = NULL;
14484                 lpfc_sli_release_iocbq(phba, ctiocb);
14485         }
14486 }
14487
14488 /**
14489  * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
14490  * @vport: Pointer to the vport on which this sequence was received
14491  * @dmabuf: pointer to a dmabuf that describes the FC sequence
14492  *
14493  * This function handles an SLI-4 unsolicited abort event. If the unsolicited
14494  * receive sequence is only partially assembed by the driver, it shall abort
14495  * the partially assembled frames for the sequence. Otherwise, if the
14496  * unsolicited receive sequence has been completely assembled and passed to
14497  * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
14498  * unsolicited sequence has been aborted. After that, it will issue a basic
14499  * accept to accept the abort.
14500  **/
14501 void
14502 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
14503                              struct hbq_dmabuf *dmabuf)
14504 {
14505         struct lpfc_hba *phba = vport->phba;
14506         struct fc_frame_header fc_hdr;
14507         uint32_t fctl;
14508         bool aborted;
14509
14510         /* Make a copy of fc_hdr before the dmabuf being released */
14511         memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
14512         fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
14513
14514         if (fctl & FC_FC_EX_CTX) {
14515                 /* ABTS by responder to exchange, no cleanup needed */
14516                 aborted = true;
14517         } else {
14518                 /* ABTS by initiator to exchange, need to do cleanup */
14519                 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
14520                 if (aborted == false)
14521                         aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
14522         }
14523         lpfc_in_buf_free(phba, &dmabuf->dbuf);
14524
14525         /* Respond with BA_ACC or BA_RJT accordingly */
14526         lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
14527 }
14528
14529 /**
14530  * lpfc_seq_complete - Indicates if a sequence is complete
14531  * @dmabuf: pointer to a dmabuf that describes the FC sequence
14532  *
14533  * This function checks the sequence, starting with the frame described by
14534  * @dmabuf, to see if all the frames associated with this sequence are present.
14535  * the frames associated with this sequence are linked to the @dmabuf using the
14536  * dbuf list. This function looks for two major things. 1) That the first frame
14537  * has a sequence count of zero. 2) There is a frame with last frame of sequence
14538  * set. 3) That there are no holes in the sequence count. The function will
14539  * return 1 when the sequence is complete, otherwise it will return 0.
14540  **/
14541 static int
14542 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
14543 {
14544         struct fc_frame_header *hdr;
14545         struct lpfc_dmabuf *d_buf;
14546         struct hbq_dmabuf *seq_dmabuf;
14547         uint32_t fctl;
14548         int seq_count = 0;
14549
14550         hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
14551         /* make sure first fame of sequence has a sequence count of zero */
14552         if (hdr->fh_seq_cnt != seq_count)
14553                 return 0;
14554         fctl = (hdr->fh_f_ctl[0] << 16 |
14555                 hdr->fh_f_ctl[1] << 8 |
14556                 hdr->fh_f_ctl[2]);
14557         /* If last frame of sequence we can return success. */
14558         if (fctl & FC_FC_END_SEQ)
14559                 return 1;
14560         list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
14561                 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
14562                 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
14563                 /* If there is a hole in the sequence count then fail. */
14564                 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
14565                         return 0;
14566                 fctl = (hdr->fh_f_ctl[0] << 16 |
14567                         hdr->fh_f_ctl[1] << 8 |
14568                         hdr->fh_f_ctl[2]);
14569                 /* If last frame of sequence we can return success. */
14570                 if (fctl & FC_FC_END_SEQ)
14571                         return 1;
14572         }
14573         return 0;
14574 }
14575
14576 /**
14577  * lpfc_prep_seq - Prep sequence for ULP processing
14578  * @vport: Pointer to the vport on which this sequence was received
14579  * @dmabuf: pointer to a dmabuf that describes the FC sequence
14580  *
14581  * This function takes a sequence, described by a list of frames, and creates
14582  * a list of iocbq structures to describe the sequence. This iocbq list will be
14583  * used to issue to the generic unsolicited sequence handler. This routine
14584  * returns a pointer to the first iocbq in the list. If the function is unable
14585  * to allocate an iocbq then it throw out the received frames that were not
14586  * able to be described and return a pointer to the first iocbq. If unable to
14587  * allocate any iocbqs (including the first) this function will return NULL.
14588  **/
14589 static struct lpfc_iocbq *
14590 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
14591 {
14592         struct hbq_dmabuf *hbq_buf;
14593         struct lpfc_dmabuf *d_buf, *n_buf;
14594         struct lpfc_iocbq *first_iocbq, *iocbq;
14595         struct fc_frame_header *fc_hdr;
14596         uint32_t sid;
14597         uint32_t len, tot_len;
14598         struct ulp_bde64 *pbde;
14599
14600         fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
14601         /* remove from receive buffer list */
14602         list_del_init(&seq_dmabuf->hbuf.list);
14603         lpfc_update_rcv_time_stamp(vport);
14604         /* get the Remote Port's SID */
14605         sid = sli4_sid_from_fc_hdr(fc_hdr);
14606         tot_len = 0;
14607         /* Get an iocbq struct to fill in. */
14608         first_iocbq = lpfc_sli_get_iocbq(vport->phba);
14609         if (first_iocbq) {
14610                 /* Initialize the first IOCB. */
14611                 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
14612                 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
14613
14614                 /* Check FC Header to see what TYPE of frame we are rcv'ing */
14615                 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
14616                         first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
14617                         first_iocbq->iocb.un.rcvels.parmRo =
14618                                 sli4_did_from_fc_hdr(fc_hdr);
14619                         first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
14620                 } else
14621                         first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
14622                 first_iocbq->iocb.ulpContext = NO_XRI;
14623                 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
14624                         be16_to_cpu(fc_hdr->fh_ox_id);
14625                 /* iocbq is prepped for internal consumption.  Physical vpi. */
14626                 first_iocbq->iocb.unsli3.rcvsli3.vpi =
14627                         vport->phba->vpi_ids[vport->vpi];
14628                 /* put the first buffer into the first IOCBq */
14629                 first_iocbq->context2 = &seq_dmabuf->dbuf;
14630                 first_iocbq->context3 = NULL;
14631                 first_iocbq->iocb.ulpBdeCount = 1;
14632                 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
14633                                                         LPFC_DATA_BUF_SIZE;
14634                 first_iocbq->iocb.un.rcvels.remoteID = sid;
14635                 tot_len = bf_get(lpfc_rcqe_length,
14636                                        &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
14637                 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
14638         }
14639         iocbq = first_iocbq;
14640         /*
14641          * Each IOCBq can have two Buffers assigned, so go through the list
14642          * of buffers for this sequence and save two buffers in each IOCBq
14643          */
14644         list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
14645                 if (!iocbq) {
14646                         lpfc_in_buf_free(vport->phba, d_buf);
14647                         continue;
14648                 }
14649                 if (!iocbq->context3) {
14650                         iocbq->context3 = d_buf;
14651                         iocbq->iocb.ulpBdeCount++;
14652                         pbde = (struct ulp_bde64 *)
14653                                         &iocbq->iocb.unsli3.sli3Words[4];
14654                         pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
14655
14656                         /* We need to get the size out of the right CQE */
14657                         hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
14658                         len = bf_get(lpfc_rcqe_length,
14659                                        &hbq_buf->cq_event.cqe.rcqe_cmpl);
14660                         iocbq->iocb.unsli3.rcvsli3.acc_len += len;
14661                         tot_len += len;
14662                 } else {
14663                         iocbq = lpfc_sli_get_iocbq(vport->phba);
14664                         if (!iocbq) {
14665                                 if (first_iocbq) {
14666                                         first_iocbq->iocb.ulpStatus =
14667                                                         IOSTAT_FCP_RSP_ERROR;
14668                                         first_iocbq->iocb.un.ulpWord[4] =
14669                                                         IOERR_NO_RESOURCES;
14670                                 }
14671                                 lpfc_in_buf_free(vport->phba, d_buf);
14672                                 continue;
14673                         }
14674                         iocbq->context2 = d_buf;
14675                         iocbq->context3 = NULL;
14676                         iocbq->iocb.ulpBdeCount = 1;
14677                         iocbq->iocb.un.cont64[0].tus.f.bdeSize =
14678                                                         LPFC_DATA_BUF_SIZE;
14679
14680                         /* We need to get the size out of the right CQE */
14681                         hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
14682                         len = bf_get(lpfc_rcqe_length,
14683                                        &hbq_buf->cq_event.cqe.rcqe_cmpl);
14684                         tot_len += len;
14685                         iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
14686
14687                         iocbq->iocb.un.rcvels.remoteID = sid;
14688                         list_add_tail(&iocbq->list, &first_iocbq->list);
14689                 }
14690         }
14691         return first_iocbq;
14692 }
14693
14694 static void
14695 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
14696                           struct hbq_dmabuf *seq_dmabuf)
14697 {
14698         struct fc_frame_header *fc_hdr;
14699         struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
14700         struct lpfc_hba *phba = vport->phba;
14701
14702         fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
14703         iocbq = lpfc_prep_seq(vport, seq_dmabuf);
14704         if (!iocbq) {
14705                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14706                                 "2707 Ring %d handler: Failed to allocate "
14707                                 "iocb Rctl x%x Type x%x received\n",
14708                                 LPFC_ELS_RING,
14709                                 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
14710                 return;
14711         }
14712         if (!lpfc_complete_unsol_iocb(phba,
14713                                       &phba->sli.ring[LPFC_ELS_RING],
14714                                       iocbq, fc_hdr->fh_r_ctl,
14715                                       fc_hdr->fh_type))
14716                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14717                                 "2540 Ring %d handler: unexpected Rctl "
14718                                 "x%x Type x%x received\n",
14719                                 LPFC_ELS_RING,
14720                                 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
14721
14722         /* Free iocb created in lpfc_prep_seq */
14723         list_for_each_entry_safe(curr_iocb, next_iocb,
14724                 &iocbq->list, list) {
14725                 list_del_init(&curr_iocb->list);
14726                 lpfc_sli_release_iocbq(phba, curr_iocb);
14727         }
14728         lpfc_sli_release_iocbq(phba, iocbq);
14729 }
14730
14731 /**
14732  * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
14733  * @phba: Pointer to HBA context object.
14734  *
14735  * This function is called with no lock held. This function processes all
14736  * the received buffers and gives it to upper layers when a received buffer
14737  * indicates that it is the final frame in the sequence. The interrupt
14738  * service routine processes received buffers at interrupt contexts and adds
14739  * received dma buffers to the rb_pend_list queue and signals the worker thread.
14740  * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
14741  * appropriate receive function when the final frame in a sequence is received.
14742  **/
14743 void
14744 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
14745                                  struct hbq_dmabuf *dmabuf)
14746 {
14747         struct hbq_dmabuf *seq_dmabuf;
14748         struct fc_frame_header *fc_hdr;
14749         struct lpfc_vport *vport;
14750         uint32_t fcfi;
14751         uint32_t did;
14752
14753         /* Process each received buffer */
14754         fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
14755         /* check to see if this a valid type of frame */
14756         if (lpfc_fc_frame_check(phba, fc_hdr)) {
14757                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14758                 return;
14759         }
14760         if ((bf_get(lpfc_cqe_code,
14761                     &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
14762                 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
14763                               &dmabuf->cq_event.cqe.rcqe_cmpl);
14764         else
14765                 fcfi = bf_get(lpfc_rcqe_fcf_id,
14766                               &dmabuf->cq_event.cqe.rcqe_cmpl);
14767
14768         vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
14769         if (!vport) {
14770                 /* throw out the frame */
14771                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14772                 return;
14773         }
14774
14775         /* d_id this frame is directed to */
14776         did = sli4_did_from_fc_hdr(fc_hdr);
14777
14778         /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
14779         if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
14780                 (did != Fabric_DID)) {
14781                 /*
14782                  * Throw out the frame if we are not pt2pt.
14783                  * The pt2pt protocol allows for discovery frames
14784                  * to be received without a registered VPI.
14785                  */
14786                 if (!(vport->fc_flag & FC_PT2PT) ||
14787                         (phba->link_state == LPFC_HBA_READY)) {
14788                         lpfc_in_buf_free(phba, &dmabuf->dbuf);
14789                         return;
14790                 }
14791         }
14792
14793         /* Handle the basic abort sequence (BA_ABTS) event */
14794         if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
14795                 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
14796                 return;
14797         }
14798
14799         /* Link this frame */
14800         seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
14801         if (!seq_dmabuf) {
14802                 /* unable to add frame to vport - throw it out */
14803                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14804                 return;
14805         }
14806         /* If not last frame in sequence continue processing frames. */
14807         if (!lpfc_seq_complete(seq_dmabuf))
14808                 return;
14809
14810         /* Send the complete sequence to the upper layer protocol */
14811         lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
14812 }
14813
14814 /**
14815  * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
14816  * @phba: pointer to lpfc hba data structure.
14817  *
14818  * This routine is invoked to post rpi header templates to the
14819  * HBA consistent with the SLI-4 interface spec.  This routine
14820  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
14821  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
14822  *
14823  * This routine does not require any locks.  It's usage is expected
14824  * to be driver load or reset recovery when the driver is
14825  * sequential.
14826  *
14827  * Return codes
14828  *      0 - successful
14829  *      -EIO - The mailbox failed to complete successfully.
14830  *      When this error occurs, the driver is not guaranteed
14831  *      to have any rpi regions posted to the device and
14832  *      must either attempt to repost the regions or take a
14833  *      fatal error.
14834  **/
14835 int
14836 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
14837 {
14838         struct lpfc_rpi_hdr *rpi_page;
14839         uint32_t rc = 0;
14840         uint16_t lrpi = 0;
14841
14842         /* SLI4 ports that support extents do not require RPI headers. */
14843         if (!phba->sli4_hba.rpi_hdrs_in_use)
14844                 goto exit;
14845         if (phba->sli4_hba.extents_in_use)
14846                 return -EIO;
14847
14848         list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
14849                 /*
14850                  * Assign the rpi headers a physical rpi only if the driver
14851                  * has not initialized those resources.  A port reset only
14852                  * needs the headers posted.
14853                  */
14854                 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
14855                     LPFC_RPI_RSRC_RDY)
14856                         rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
14857
14858                 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
14859                 if (rc != MBX_SUCCESS) {
14860                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14861                                         "2008 Error %d posting all rpi "
14862                                         "headers\n", rc);
14863                         rc = -EIO;
14864                         break;
14865                 }
14866         }
14867
14868  exit:
14869         bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
14870                LPFC_RPI_RSRC_RDY);
14871         return rc;
14872 }
14873
14874 /**
14875  * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
14876  * @phba: pointer to lpfc hba data structure.
14877  * @rpi_page:  pointer to the rpi memory region.
14878  *
14879  * This routine is invoked to post a single rpi header to the
14880  * HBA consistent with the SLI-4 interface spec.  This memory region
14881  * maps up to 64 rpi context regions.
14882  *
14883  * Return codes
14884  *      0 - successful
14885  *      -ENOMEM - No available memory
14886  *      -EIO - The mailbox failed to complete successfully.
14887  **/
14888 int
14889 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
14890 {
14891         LPFC_MBOXQ_t *mboxq;
14892         struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
14893         uint32_t rc = 0;
14894         uint32_t shdr_status, shdr_add_status;
14895         union lpfc_sli4_cfg_shdr *shdr;
14896
14897         /* SLI4 ports that support extents do not require RPI headers. */
14898         if (!phba->sli4_hba.rpi_hdrs_in_use)
14899                 return rc;
14900         if (phba->sli4_hba.extents_in_use)
14901                 return -EIO;
14902
14903         /* The port is notified of the header region via a mailbox command. */
14904         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14905         if (!mboxq) {
14906                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14907                                 "2001 Unable to allocate memory for issuing "
14908                                 "SLI_CONFIG_SPECIAL mailbox command\n");
14909                 return -ENOMEM;
14910         }
14911
14912         /* Post all rpi memory regions to the port. */
14913         hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
14914         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
14915                          LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
14916                          sizeof(struct lpfc_mbx_post_hdr_tmpl) -
14917                          sizeof(struct lpfc_sli4_cfg_mhdr),
14918                          LPFC_SLI4_MBX_EMBED);
14919
14920
14921         /* Post the physical rpi to the port for this rpi header. */
14922         bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
14923                rpi_page->start_rpi);
14924         bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
14925                hdr_tmpl, rpi_page->page_count);
14926
14927         hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
14928         hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
14929         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
14930         shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
14931         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14932         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14933         if (rc != MBX_TIMEOUT)
14934                 mempool_free(mboxq, phba->mbox_mem_pool);
14935         if (shdr_status || shdr_add_status || rc) {
14936                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14937                                 "2514 POST_RPI_HDR mailbox failed with "
14938                                 "status x%x add_status x%x, mbx status x%x\n",
14939                                 shdr_status, shdr_add_status, rc);
14940                 rc = -ENXIO;
14941         }
14942         return rc;
14943 }
14944
14945 /**
14946  * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
14947  * @phba: pointer to lpfc hba data structure.
14948  *
14949  * This routine is invoked to post rpi header templates to the
14950  * HBA consistent with the SLI-4 interface spec.  This routine
14951  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
14952  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
14953  *
14954  * Returns
14955  *      A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
14956  *      LPFC_RPI_ALLOC_ERROR if no rpis are available.
14957  **/
14958 int
14959 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
14960 {
14961         unsigned long rpi;
14962         uint16_t max_rpi, rpi_limit;
14963         uint16_t rpi_remaining, lrpi = 0;
14964         struct lpfc_rpi_hdr *rpi_hdr;
14965
14966         max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
14967         rpi_limit = phba->sli4_hba.next_rpi;
14968
14969         /*
14970          * Fetch the next logical rpi.  Because this index is logical,
14971          * the  driver starts at 0 each time.
14972          */
14973         spin_lock_irq(&phba->hbalock);
14974         rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
14975         if (rpi >= rpi_limit)
14976                 rpi = LPFC_RPI_ALLOC_ERROR;
14977         else {
14978                 set_bit(rpi, phba->sli4_hba.rpi_bmask);
14979                 phba->sli4_hba.max_cfg_param.rpi_used++;
14980                 phba->sli4_hba.rpi_count++;
14981         }
14982
14983         /*
14984          * Don't try to allocate more rpi header regions if the device limit
14985          * has been exhausted.
14986          */
14987         if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
14988             (phba->sli4_hba.rpi_count >= max_rpi)) {
14989                 spin_unlock_irq(&phba->hbalock);
14990                 return rpi;
14991         }
14992
14993         /*
14994          * RPI header postings are not required for SLI4 ports capable of
14995          * extents.
14996          */
14997         if (!phba->sli4_hba.rpi_hdrs_in_use) {
14998                 spin_unlock_irq(&phba->hbalock);
14999                 return rpi;
15000         }
15001
15002         /*
15003          * If the driver is running low on rpi resources, allocate another
15004          * page now.  Note that the next_rpi value is used because
15005          * it represents how many are actually in use whereas max_rpi notes
15006          * how many are supported max by the device.
15007          */
15008         rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
15009         spin_unlock_irq(&phba->hbalock);
15010         if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
15011                 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
15012                 if (!rpi_hdr) {
15013                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15014                                         "2002 Error Could not grow rpi "
15015                                         "count\n");
15016                 } else {
15017                         lrpi = rpi_hdr->start_rpi;
15018                         rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
15019                         lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
15020                 }
15021         }
15022
15023         return rpi;
15024 }
15025
15026 /**
15027  * lpfc_sli4_free_rpi - Release an rpi for reuse.
15028  * @phba: pointer to lpfc hba data structure.
15029  *
15030  * This routine is invoked to release an rpi to the pool of
15031  * available rpis maintained by the driver.
15032  **/
15033 void
15034 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
15035 {
15036         if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
15037                 phba->sli4_hba.rpi_count--;
15038                 phba->sli4_hba.max_cfg_param.rpi_used--;
15039         }
15040 }
15041
15042 /**
15043  * lpfc_sli4_free_rpi - Release an rpi for reuse.
15044  * @phba: pointer to lpfc hba data structure.
15045  *
15046  * This routine is invoked to release an rpi to the pool of
15047  * available rpis maintained by the driver.
15048  **/
15049 void
15050 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
15051 {
15052         spin_lock_irq(&phba->hbalock);
15053         __lpfc_sli4_free_rpi(phba, rpi);
15054         spin_unlock_irq(&phba->hbalock);
15055 }
15056
15057 /**
15058  * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
15059  * @phba: pointer to lpfc hba data structure.
15060  *
15061  * This routine is invoked to remove the memory region that
15062  * provided rpi via a bitmask.
15063  **/
15064 void
15065 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
15066 {
15067         kfree(phba->sli4_hba.rpi_bmask);
15068         kfree(phba->sli4_hba.rpi_ids);
15069         bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
15070 }
15071
15072 /**
15073  * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
15074  * @phba: pointer to lpfc hba data structure.
15075  *
15076  * This routine is invoked to remove the memory region that
15077  * provided rpi via a bitmask.
15078  **/
15079 int
15080 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
15081         void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
15082 {
15083         LPFC_MBOXQ_t *mboxq;
15084         struct lpfc_hba *phba = ndlp->phba;
15085         int rc;
15086
15087         /* The port is notified of the header region via a mailbox command. */
15088         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15089         if (!mboxq)
15090                 return -ENOMEM;
15091
15092         /* Post all rpi memory regions to the port. */
15093         lpfc_resume_rpi(mboxq, ndlp);
15094         if (cmpl) {
15095                 mboxq->mbox_cmpl = cmpl;
15096                 mboxq->context1 = arg;
15097                 mboxq->context2 = ndlp;
15098         } else
15099                 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15100         mboxq->vport = ndlp->vport;
15101         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
15102         if (rc == MBX_NOT_FINISHED) {
15103                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15104                                 "2010 Resume RPI Mailbox failed "
15105                                 "status %d, mbxStatus x%x\n", rc,
15106                                 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
15107                 mempool_free(mboxq, phba->mbox_mem_pool);
15108                 return -EIO;
15109         }
15110         return 0;
15111 }
15112
15113 /**
15114  * lpfc_sli4_init_vpi - Initialize a vpi with the port
15115  * @vport: Pointer to the vport for which the vpi is being initialized
15116  *
15117  * This routine is invoked to activate a vpi with the port.
15118  *
15119  * Returns:
15120  *    0 success
15121  *    -Evalue otherwise
15122  **/
15123 int
15124 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
15125 {
15126         LPFC_MBOXQ_t *mboxq;
15127         int rc = 0;
15128         int retval = MBX_SUCCESS;
15129         uint32_t mbox_tmo;
15130         struct lpfc_hba *phba = vport->phba;
15131         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15132         if (!mboxq)
15133                 return -ENOMEM;
15134         lpfc_init_vpi(phba, mboxq, vport->vpi);
15135         mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
15136         rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
15137         if (rc != MBX_SUCCESS) {
15138                 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
15139                                 "2022 INIT VPI Mailbox failed "
15140                                 "status %d, mbxStatus x%x\n", rc,
15141                                 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
15142                 retval = -EIO;
15143         }
15144         if (rc != MBX_TIMEOUT)
15145                 mempool_free(mboxq, vport->phba->mbox_mem_pool);
15146
15147         return retval;
15148 }
15149
15150 /**
15151  * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
15152  * @phba: pointer to lpfc hba data structure.
15153  * @mboxq: Pointer to mailbox object.
15154  *
15155  * This routine is invoked to manually add a single FCF record. The caller
15156  * must pass a completely initialized FCF_Record.  This routine takes
15157  * care of the nonembedded mailbox operations.
15158  **/
15159 static void
15160 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
15161 {
15162         void *virt_addr;
15163         union lpfc_sli4_cfg_shdr *shdr;
15164         uint32_t shdr_status, shdr_add_status;
15165
15166         virt_addr = mboxq->sge_array->addr[0];
15167         /* The IOCTL status is embedded in the mailbox subheader. */
15168         shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
15169         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15170         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15171
15172         if ((shdr_status || shdr_add_status) &&
15173                 (shdr_status != STATUS_FCF_IN_USE))
15174                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15175                         "2558 ADD_FCF_RECORD mailbox failed with "
15176                         "status x%x add_status x%x\n",
15177                         shdr_status, shdr_add_status);
15178
15179         lpfc_sli4_mbox_cmd_free(phba, mboxq);
15180 }
15181
15182 /**
15183  * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
15184  * @phba: pointer to lpfc hba data structure.
15185  * @fcf_record:  pointer to the initialized fcf record to add.
15186  *
15187  * This routine is invoked to manually add a single FCF record. The caller
15188  * must pass a completely initialized FCF_Record.  This routine takes
15189  * care of the nonembedded mailbox operations.
15190  **/
15191 int
15192 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
15193 {
15194         int rc = 0;
15195         LPFC_MBOXQ_t *mboxq;
15196         uint8_t *bytep;
15197         void *virt_addr;
15198         dma_addr_t phys_addr;
15199         struct lpfc_mbx_sge sge;
15200         uint32_t alloc_len, req_len;
15201         uint32_t fcfindex;
15202
15203         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15204         if (!mboxq) {
15205                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15206                         "2009 Failed to allocate mbox for ADD_FCF cmd\n");
15207                 return -ENOMEM;
15208         }
15209
15210         req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
15211                   sizeof(uint32_t);
15212
15213         /* Allocate DMA memory and set up the non-embedded mailbox command */
15214         alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
15215                                      LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
15216                                      req_len, LPFC_SLI4_MBX_NEMBED);
15217         if (alloc_len < req_len) {
15218                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15219                         "2523 Allocated DMA memory size (x%x) is "
15220                         "less than the requested DMA memory "
15221                         "size (x%x)\n", alloc_len, req_len);
15222                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
15223                 return -ENOMEM;
15224         }
15225
15226         /*
15227          * Get the first SGE entry from the non-embedded DMA memory.  This
15228          * routine only uses a single SGE.
15229          */
15230         lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
15231         phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
15232         virt_addr = mboxq->sge_array->addr[0];
15233         /*
15234          * Configure the FCF record for FCFI 0.  This is the driver's
15235          * hardcoded default and gets used in nonFIP mode.
15236          */
15237         fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
15238         bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
15239         lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
15240
15241         /*
15242          * Copy the fcf_index and the FCF Record Data. The data starts after
15243          * the FCoE header plus word10. The data copy needs to be endian
15244          * correct.
15245          */
15246         bytep += sizeof(uint32_t);
15247         lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
15248         mboxq->vport = phba->pport;
15249         mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
15250         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
15251         if (rc == MBX_NOT_FINISHED) {
15252                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15253                         "2515 ADD_FCF_RECORD mailbox failed with "
15254                         "status 0x%x\n", rc);
15255                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
15256                 rc = -EIO;
15257         } else
15258                 rc = 0;
15259
15260         return rc;
15261 }
15262
15263 /**
15264  * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
15265  * @phba: pointer to lpfc hba data structure.
15266  * @fcf_record:  pointer to the fcf record to write the default data.
15267  * @fcf_index: FCF table entry index.
15268  *
15269  * This routine is invoked to build the driver's default FCF record.  The
15270  * values used are hardcoded.  This routine handles memory initialization.
15271  *
15272  **/
15273 void
15274 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
15275                                 struct fcf_record *fcf_record,
15276                                 uint16_t fcf_index)
15277 {
15278         memset(fcf_record, 0, sizeof(struct fcf_record));
15279         fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
15280         fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
15281         fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
15282         bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
15283         bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
15284         bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
15285         bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
15286         bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
15287         bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
15288         bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
15289         bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
15290         bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
15291         bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
15292         bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
15293         bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
15294         bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
15295                 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
15296         /* Set the VLAN bit map */
15297         if (phba->valid_vlan) {
15298                 fcf_record->vlan_bitmap[phba->vlan_id / 8]
15299                         = 1 << (phba->vlan_id % 8);
15300         }
15301 }
15302
15303 /**
15304  * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
15305  * @phba: pointer to lpfc hba data structure.
15306  * @fcf_index: FCF table entry offset.
15307  *
15308  * This routine is invoked to scan the entire FCF table by reading FCF
15309  * record and processing it one at a time starting from the @fcf_index
15310  * for initial FCF discovery or fast FCF failover rediscovery.
15311  *
15312  * Return 0 if the mailbox command is submitted successfully, none 0
15313  * otherwise.
15314  **/
15315 int
15316 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
15317 {
15318         int rc = 0, error;
15319         LPFC_MBOXQ_t *mboxq;
15320
15321         phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
15322         phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
15323         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15324         if (!mboxq) {
15325                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15326                                 "2000 Failed to allocate mbox for "
15327                                 "READ_FCF cmd\n");
15328                 error = -ENOMEM;
15329                 goto fail_fcf_scan;
15330         }
15331         /* Construct the read FCF record mailbox command */
15332         rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
15333         if (rc) {
15334                 error = -EINVAL;
15335                 goto fail_fcf_scan;
15336         }
15337         /* Issue the mailbox command asynchronously */
15338         mboxq->vport = phba->pport;
15339         mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
15340
15341         spin_lock_irq(&phba->hbalock);
15342         phba->hba_flag |= FCF_TS_INPROG;
15343         spin_unlock_irq(&phba->hbalock);
15344
15345         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
15346         if (rc == MBX_NOT_FINISHED)
15347                 error = -EIO;
15348         else {
15349                 /* Reset eligible FCF count for new scan */
15350                 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
15351                         phba->fcf.eligible_fcf_cnt = 0;
15352                 error = 0;
15353         }
15354 fail_fcf_scan:
15355         if (error) {
15356                 if (mboxq)
15357                         lpfc_sli4_mbox_cmd_free(phba, mboxq);
15358                 /* FCF scan failed, clear FCF_TS_INPROG flag */
15359                 spin_lock_irq(&phba->hbalock);
15360                 phba->hba_flag &= ~FCF_TS_INPROG;
15361                 spin_unlock_irq(&phba->hbalock);
15362         }
15363         return error;
15364 }
15365
15366 /**
15367  * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
15368  * @phba: pointer to lpfc hba data structure.
15369  * @fcf_index: FCF table entry offset.
15370  *
15371  * This routine is invoked to read an FCF record indicated by @fcf_index
15372  * and to use it for FLOGI roundrobin FCF failover.
15373  *
15374  * Return 0 if the mailbox command is submitted successfully, none 0
15375  * otherwise.
15376  **/
15377 int
15378 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
15379 {
15380         int rc = 0, error;
15381         LPFC_MBOXQ_t *mboxq;
15382
15383         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15384         if (!mboxq) {
15385                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
15386                                 "2763 Failed to allocate mbox for "
15387                                 "READ_FCF cmd\n");
15388                 error = -ENOMEM;
15389                 goto fail_fcf_read;
15390         }
15391         /* Construct the read FCF record mailbox command */
15392         rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
15393         if (rc) {
15394                 error = -EINVAL;
15395                 goto fail_fcf_read;
15396         }
15397         /* Issue the mailbox command asynchronously */
15398         mboxq->vport = phba->pport;
15399         mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
15400         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
15401         if (rc == MBX_NOT_FINISHED)
15402                 error = -EIO;
15403         else
15404                 error = 0;
15405
15406 fail_fcf_read:
15407         if (error && mboxq)
15408                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
15409         return error;
15410 }
15411
15412 /**
15413  * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
15414  * @phba: pointer to lpfc hba data structure.
15415  * @fcf_index: FCF table entry offset.
15416  *
15417  * This routine is invoked to read an FCF record indicated by @fcf_index to
15418  * determine whether it's eligible for FLOGI roundrobin failover list.
15419  *
15420  * Return 0 if the mailbox command is submitted successfully, none 0
15421  * otherwise.
15422  **/
15423 int
15424 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
15425 {
15426         int rc = 0, error;
15427         LPFC_MBOXQ_t *mboxq;
15428
15429         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15430         if (!mboxq) {
15431                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
15432                                 "2758 Failed to allocate mbox for "
15433                                 "READ_FCF cmd\n");
15434                                 error = -ENOMEM;
15435                                 goto fail_fcf_read;
15436         }
15437         /* Construct the read FCF record mailbox command */
15438         rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
15439         if (rc) {
15440                 error = -EINVAL;
15441                 goto fail_fcf_read;
15442         }
15443         /* Issue the mailbox command asynchronously */
15444         mboxq->vport = phba->pport;
15445         mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
15446         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
15447         if (rc == MBX_NOT_FINISHED)
15448                 error = -EIO;
15449         else
15450                 error = 0;
15451
15452 fail_fcf_read:
15453         if (error && mboxq)
15454                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
15455         return error;
15456 }
15457
15458 /**
15459  * lpfc_check_next_fcf_pri
15460  * phba pointer to the lpfc_hba struct for this port.
15461  * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
15462  * routine when the rr_bmask is empty. The FCF indecies are put into the
15463  * rr_bmask based on their priority level. Starting from the highest priority
15464  * to the lowest. The most likely FCF candidate will be in the highest
15465  * priority group. When this routine is called it searches the fcf_pri list for
15466  * next lowest priority group and repopulates the rr_bmask with only those
15467  * fcf_indexes.
15468  * returns:
15469  * 1=success 0=failure
15470  **/
15471 int
15472 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
15473 {
15474         uint16_t next_fcf_pri;
15475         uint16_t last_index;
15476         struct lpfc_fcf_pri *fcf_pri;
15477         int rc;
15478         int ret = 0;
15479
15480         last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
15481                         LPFC_SLI4_FCF_TBL_INDX_MAX);
15482         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
15483                         "3060 Last IDX %d\n", last_index);
15484         if (list_empty(&phba->fcf.fcf_pri_list)) {
15485                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
15486                         "3061 Last IDX %d\n", last_index);
15487                 return 0; /* Empty rr list */
15488         }
15489         next_fcf_pri = 0;
15490         /*
15491          * Clear the rr_bmask and set all of the bits that are at this
15492          * priority.
15493          */
15494         memset(phba->fcf.fcf_rr_bmask, 0,
15495                         sizeof(*phba->fcf.fcf_rr_bmask));
15496         spin_lock_irq(&phba->hbalock);
15497         list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
15498                 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
15499                         continue;
15500                 /*
15501                  * the 1st priority that has not FLOGI failed
15502                  * will be the highest.
15503                  */
15504                 if (!next_fcf_pri)
15505                         next_fcf_pri = fcf_pri->fcf_rec.priority;
15506                 spin_unlock_irq(&phba->hbalock);
15507                 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
15508                         rc = lpfc_sli4_fcf_rr_index_set(phba,
15509                                                 fcf_pri->fcf_rec.fcf_index);
15510                         if (rc)
15511                                 return 0;
15512                 }
15513                 spin_lock_irq(&phba->hbalock);
15514         }
15515         /*
15516          * if next_fcf_pri was not set above and the list is not empty then
15517          * we have failed flogis on all of them. So reset flogi failed
15518          * and start at the beginning.
15519          */
15520         if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
15521                 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
15522                         fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
15523                         /*
15524                          * the 1st priority that has not FLOGI failed
15525                          * will be the highest.
15526                          */
15527                         if (!next_fcf_pri)
15528                                 next_fcf_pri = fcf_pri->fcf_rec.priority;
15529                         spin_unlock_irq(&phba->hbalock);
15530                         if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
15531                                 rc = lpfc_sli4_fcf_rr_index_set(phba,
15532                                                 fcf_pri->fcf_rec.fcf_index);
15533                                 if (rc)
15534                                         return 0;
15535                         }
15536                         spin_lock_irq(&phba->hbalock);
15537                 }
15538         } else
15539                 ret = 1;
15540         spin_unlock_irq(&phba->hbalock);
15541
15542         return ret;
15543 }
15544 /**
15545  * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
15546  * @phba: pointer to lpfc hba data structure.
15547  *
15548  * This routine is to get the next eligible FCF record index in a round
15549  * robin fashion. If the next eligible FCF record index equals to the
15550  * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
15551  * shall be returned, otherwise, the next eligible FCF record's index
15552  * shall be returned.
15553  **/
15554 uint16_t
15555 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
15556 {
15557         uint16_t next_fcf_index;
15558
15559 initial_priority:
15560         /* Search start from next bit of currently registered FCF index */
15561         next_fcf_index = phba->fcf.current_rec.fcf_indx;
15562
15563 next_priority:
15564         /* Determine the next fcf index to check */
15565         next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
15566         next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
15567                                        LPFC_SLI4_FCF_TBL_INDX_MAX,
15568                                        next_fcf_index);
15569
15570         /* Wrap around condition on phba->fcf.fcf_rr_bmask */
15571         if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
15572                 /*
15573                  * If we have wrapped then we need to clear the bits that
15574                  * have been tested so that we can detect when we should
15575                  * change the priority level.
15576                  */
15577                 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
15578                                                LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
15579         }
15580
15581
15582         /* Check roundrobin failover list empty condition */
15583         if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
15584                 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
15585                 /*
15586                  * If next fcf index is not found check if there are lower
15587                  * Priority level fcf's in the fcf_priority list.
15588                  * Set up the rr_bmask with all of the avaiable fcf bits
15589                  * at that level and continue the selection process.
15590                  */
15591                 if (lpfc_check_next_fcf_pri_level(phba))
15592                         goto initial_priority;
15593                 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
15594                                 "2844 No roundrobin failover FCF available\n");
15595                 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
15596                         return LPFC_FCOE_FCF_NEXT_NONE;
15597                 else {
15598                         lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
15599                                 "3063 Only FCF available idx %d, flag %x\n",
15600                                 next_fcf_index,
15601                         phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
15602                         return next_fcf_index;
15603                 }
15604         }
15605
15606         if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
15607                 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
15608                 LPFC_FCF_FLOGI_FAILED)
15609                 goto next_priority;
15610
15611         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
15612                         "2845 Get next roundrobin failover FCF (x%x)\n",
15613                         next_fcf_index);
15614
15615         return next_fcf_index;
15616 }
15617
15618 /**
15619  * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
15620  * @phba: pointer to lpfc hba data structure.
15621  *
15622  * This routine sets the FCF record index in to the eligible bmask for
15623  * roundrobin failover search. It checks to make sure that the index
15624  * does not go beyond the range of the driver allocated bmask dimension
15625  * before setting the bit.
15626  *
15627  * Returns 0 if the index bit successfully set, otherwise, it returns
15628  * -EINVAL.
15629  **/
15630 int
15631 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
15632 {
15633         if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
15634                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
15635                                 "2610 FCF (x%x) reached driver's book "
15636                                 "keeping dimension:x%x\n",
15637                                 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
15638                 return -EINVAL;
15639         }
15640         /* Set the eligible FCF record index bmask */
15641         set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
15642
15643         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
15644                         "2790 Set FCF (x%x) to roundrobin FCF failover "
15645                         "bmask\n", fcf_index);
15646
15647         return 0;
15648 }
15649
15650 /**
15651  * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
15652  * @phba: pointer to lpfc hba data structure.
15653  *
15654  * This routine clears the FCF record index from the eligible bmask for
15655  * roundrobin failover search. It checks to make sure that the index
15656  * does not go beyond the range of the driver allocated bmask dimension
15657  * before clearing the bit.
15658  **/
15659 void
15660 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
15661 {
15662         struct lpfc_fcf_pri *fcf_pri;
15663         if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
15664                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
15665                                 "2762 FCF (x%x) reached driver's book "
15666                                 "keeping dimension:x%x\n",
15667                                 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
15668                 return;
15669         }
15670         /* Clear the eligible FCF record index bmask */
15671         spin_lock_irq(&phba->hbalock);
15672         list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
15673                 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
15674                         list_del_init(&fcf_pri->list);
15675                         break;
15676                 }
15677         }
15678         spin_unlock_irq(&phba->hbalock);
15679         clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
15680
15681         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
15682                         "2791 Clear FCF (x%x) from roundrobin failover "
15683                         "bmask\n", fcf_index);
15684 }
15685
15686 /**
15687  * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
15688  * @phba: pointer to lpfc hba data structure.
15689  *
15690  * This routine is the completion routine for the rediscover FCF table mailbox
15691  * command. If the mailbox command returned failure, it will try to stop the
15692  * FCF rediscover wait timer.
15693  **/
15694 void
15695 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
15696 {
15697         struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
15698         uint32_t shdr_status, shdr_add_status;
15699
15700         redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
15701
15702         shdr_status = bf_get(lpfc_mbox_hdr_status,
15703                              &redisc_fcf->header.cfg_shdr.response);
15704         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
15705                              &redisc_fcf->header.cfg_shdr.response);
15706         if (shdr_status || shdr_add_status) {
15707                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
15708                                 "2746 Requesting for FCF rediscovery failed "
15709                                 "status x%x add_status x%x\n",
15710                                 shdr_status, shdr_add_status);
15711                 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
15712                         spin_lock_irq(&phba->hbalock);
15713                         phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
15714                         spin_unlock_irq(&phba->hbalock);
15715                         /*
15716                          * CVL event triggered FCF rediscover request failed,
15717                          * last resort to re-try current registered FCF entry.
15718                          */
15719                         lpfc_retry_pport_discovery(phba);
15720                 } else {
15721                         spin_lock_irq(&phba->hbalock);
15722                         phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
15723                         spin_unlock_irq(&phba->hbalock);
15724                         /*
15725                          * DEAD FCF event triggered FCF rediscover request
15726                          * failed, last resort to fail over as a link down
15727                          * to FCF registration.
15728                          */
15729                         lpfc_sli4_fcf_dead_failthrough(phba);
15730                 }
15731         } else {
15732                 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
15733                                 "2775 Start FCF rediscover quiescent timer\n");
15734                 /*
15735                  * Start FCF rediscovery wait timer for pending FCF
15736                  * before rescan FCF record table.
15737                  */
15738                 lpfc_fcf_redisc_wait_start_timer(phba);
15739         }
15740
15741         mempool_free(mbox, phba->mbox_mem_pool);
15742 }
15743
15744 /**
15745  * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
15746  * @phba: pointer to lpfc hba data structure.
15747  *
15748  * This routine is invoked to request for rediscovery of the entire FCF table
15749  * by the port.
15750  **/
15751 int
15752 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
15753 {
15754         LPFC_MBOXQ_t *mbox;
15755         struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
15756         int rc, length;
15757
15758         /* Cancel retry delay timers to all vports before FCF rediscover */
15759         lpfc_cancel_all_vport_retry_delay_timer(phba);
15760
15761         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15762         if (!mbox) {
15763                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15764                                 "2745 Failed to allocate mbox for "
15765                                 "requesting FCF rediscover.\n");
15766                 return -ENOMEM;
15767         }
15768
15769         length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
15770                   sizeof(struct lpfc_sli4_cfg_mhdr));
15771         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15772                          LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
15773                          length, LPFC_SLI4_MBX_EMBED);
15774
15775         redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
15776         /* Set count to 0 for invalidating the entire FCF database */
15777         bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
15778
15779         /* Issue the mailbox command asynchronously */
15780         mbox->vport = phba->pport;
15781         mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
15782         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
15783
15784         if (rc == MBX_NOT_FINISHED) {
15785                 mempool_free(mbox, phba->mbox_mem_pool);
15786                 return -EIO;
15787         }
15788         return 0;
15789 }
15790
15791 /**
15792  * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
15793  * @phba: pointer to lpfc hba data structure.
15794  *
15795  * This function is the failover routine as a last resort to the FCF DEAD
15796  * event when driver failed to perform fast FCF failover.
15797  **/
15798 void
15799 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
15800 {
15801         uint32_t link_state;
15802
15803         /*
15804          * Last resort as FCF DEAD event failover will treat this as
15805          * a link down, but save the link state because we don't want
15806          * it to be changed to Link Down unless it is already down.
15807          */
15808         link_state = phba->link_state;
15809         lpfc_linkdown(phba);
15810         phba->link_state = link_state;
15811
15812         /* Unregister FCF if no devices connected to it */
15813         lpfc_unregister_unused_fcf(phba);
15814 }
15815
15816 /**
15817  * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
15818  * @phba: pointer to lpfc hba data structure.
15819  * @rgn23_data: pointer to configure region 23 data.
15820  *
15821  * This function gets SLI3 port configure region 23 data through memory dump
15822  * mailbox command. When it successfully retrieves data, the size of the data
15823  * will be returned, otherwise, 0 will be returned.
15824  **/
15825 static uint32_t
15826 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
15827 {
15828         LPFC_MBOXQ_t *pmb = NULL;
15829         MAILBOX_t *mb;
15830         uint32_t offset = 0;
15831         int rc;
15832
15833         if (!rgn23_data)
15834                 return 0;
15835
15836         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15837         if (!pmb) {
15838                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15839                                 "2600 failed to allocate mailbox memory\n");
15840                 return 0;
15841         }
15842         mb = &pmb->u.mb;
15843
15844         do {
15845                 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
15846                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
15847
15848                 if (rc != MBX_SUCCESS) {
15849                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15850                                         "2601 failed to read config "
15851                                         "region 23, rc 0x%x Status 0x%x\n",
15852                                         rc, mb->mbxStatus);
15853                         mb->un.varDmp.word_cnt = 0;
15854                 }
15855                 /*
15856                  * dump mem may return a zero when finished or we got a
15857                  * mailbox error, either way we are done.
15858                  */
15859                 if (mb->un.varDmp.word_cnt == 0)
15860                         break;
15861                 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
15862                         mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
15863
15864                 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
15865                                        rgn23_data + offset,
15866                                        mb->un.varDmp.word_cnt);
15867                 offset += mb->un.varDmp.word_cnt;
15868         } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
15869
15870         mempool_free(pmb, phba->mbox_mem_pool);
15871         return offset;
15872 }
15873
15874 /**
15875  * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
15876  * @phba: pointer to lpfc hba data structure.
15877  * @rgn23_data: pointer to configure region 23 data.
15878  *
15879  * This function gets SLI4 port configure region 23 data through memory dump
15880  * mailbox command. When it successfully retrieves data, the size of the data
15881  * will be returned, otherwise, 0 will be returned.
15882  **/
15883 static uint32_t
15884 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
15885 {
15886         LPFC_MBOXQ_t *mboxq = NULL;
15887         struct lpfc_dmabuf *mp = NULL;
15888         struct lpfc_mqe *mqe;
15889         uint32_t data_length = 0;
15890         int rc;
15891
15892         if (!rgn23_data)
15893                 return 0;
15894
15895         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15896         if (!mboxq) {
15897                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15898                                 "3105 failed to allocate mailbox memory\n");
15899                 return 0;
15900         }
15901
15902         if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
15903                 goto out;
15904         mqe = &mboxq->u.mqe;
15905         mp = (struct lpfc_dmabuf *) mboxq->context1;
15906         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
15907         if (rc)
15908                 goto out;
15909         data_length = mqe->un.mb_words[5];
15910         if (data_length == 0)
15911                 goto out;
15912         if (data_length > DMP_RGN23_SIZE) {
15913                 data_length = 0;
15914                 goto out;
15915         }
15916         lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
15917 out:
15918         mempool_free(mboxq, phba->mbox_mem_pool);
15919         if (mp) {
15920                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
15921                 kfree(mp);
15922         }
15923         return data_length;
15924 }
15925
15926 /**
15927  * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
15928  * @phba: pointer to lpfc hba data structure.
15929  *
15930  * This function read region 23 and parse TLV for port status to
15931  * decide if the user disaled the port. If the TLV indicates the
15932  * port is disabled, the hba_flag is set accordingly.
15933  **/
15934 void
15935 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
15936 {
15937         uint8_t *rgn23_data = NULL;
15938         uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
15939         uint32_t offset = 0;
15940
15941         /* Get adapter Region 23 data */
15942         rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
15943         if (!rgn23_data)
15944                 goto out;
15945
15946         if (phba->sli_rev < LPFC_SLI_REV4)
15947                 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
15948         else {
15949                 if_type = bf_get(lpfc_sli_intf_if_type,
15950                                  &phba->sli4_hba.sli_intf);
15951                 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
15952                         goto out;
15953                 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
15954         }
15955
15956         if (!data_size)
15957                 goto out;
15958
15959         /* Check the region signature first */
15960         if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
15961                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15962                         "2619 Config region 23 has bad signature\n");
15963                         goto out;
15964         }
15965         offset += 4;
15966
15967         /* Check the data structure version */
15968         if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
15969                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15970                         "2620 Config region 23 has bad version\n");
15971                 goto out;
15972         }
15973         offset += 4;
15974
15975         /* Parse TLV entries in the region */
15976         while (offset < data_size) {
15977                 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
15978                         break;
15979                 /*
15980                  * If the TLV is not driver specific TLV or driver id is
15981                  * not linux driver id, skip the record.
15982                  */
15983                 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
15984                     (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
15985                     (rgn23_data[offset + 3] != 0)) {
15986                         offset += rgn23_data[offset + 1] * 4 + 4;
15987                         continue;
15988                 }
15989
15990                 /* Driver found a driver specific TLV in the config region */
15991                 sub_tlv_len = rgn23_data[offset + 1] * 4;
15992                 offset += 4;
15993                 tlv_offset = 0;
15994
15995                 /*
15996                  * Search for configured port state sub-TLV.
15997                  */
15998                 while ((offset < data_size) &&
15999                         (tlv_offset < sub_tlv_len)) {
16000                         if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
16001                                 offset += 4;
16002                                 tlv_offset += 4;
16003                                 break;
16004                         }
16005                         if (rgn23_data[offset] != PORT_STE_TYPE) {
16006                                 offset += rgn23_data[offset + 1] * 4 + 4;
16007                                 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
16008                                 continue;
16009                         }
16010
16011                         /* This HBA contains PORT_STE configured */
16012                         if (!rgn23_data[offset + 2])
16013                                 phba->hba_flag |= LINK_DISABLED;
16014
16015                         goto out;
16016                 }
16017         }
16018
16019 out:
16020         kfree(rgn23_data);
16021         return;
16022 }
16023
16024 /**
16025  * lpfc_wr_object - write an object to the firmware
16026  * @phba: HBA structure that indicates port to create a queue on.
16027  * @dmabuf_list: list of dmabufs to write to the port.
16028  * @size: the total byte value of the objects to write to the port.
16029  * @offset: the current offset to be used to start the transfer.
16030  *
16031  * This routine will create a wr_object mailbox command to send to the port.
16032  * the mailbox command will be constructed using the dma buffers described in
16033  * @dmabuf_list to create a list of BDEs. This routine will fill in as many
16034  * BDEs that the imbedded mailbox can support. The @offset variable will be
16035  * used to indicate the starting offset of the transfer and will also return
16036  * the offset after the write object mailbox has completed. @size is used to
16037  * determine the end of the object and whether the eof bit should be set.
16038  *
16039  * Return 0 is successful and offset will contain the the new offset to use
16040  * for the next write.
16041  * Return negative value for error cases.
16042  **/
16043 int
16044 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
16045                uint32_t size, uint32_t *offset)
16046 {
16047         struct lpfc_mbx_wr_object *wr_object;
16048         LPFC_MBOXQ_t *mbox;
16049         int rc = 0, i = 0;
16050         uint32_t shdr_status, shdr_add_status;
16051         uint32_t mbox_tmo;
16052         union lpfc_sli4_cfg_shdr *shdr;
16053         struct lpfc_dmabuf *dmabuf;
16054         uint32_t written = 0;
16055
16056         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16057         if (!mbox)
16058                 return -ENOMEM;
16059
16060         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16061                         LPFC_MBOX_OPCODE_WRITE_OBJECT,
16062                         sizeof(struct lpfc_mbx_wr_object) -
16063                         sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
16064
16065         wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
16066         wr_object->u.request.write_offset = *offset;
16067         sprintf((uint8_t *)wr_object->u.request.object_name, "/");
16068         wr_object->u.request.object_name[0] =
16069                 cpu_to_le32(wr_object->u.request.object_name[0]);
16070         bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
16071         list_for_each_entry(dmabuf, dmabuf_list, list) {
16072                 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
16073                         break;
16074                 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
16075                 wr_object->u.request.bde[i].addrHigh =
16076                         putPaddrHigh(dmabuf->phys);
16077                 if (written + SLI4_PAGE_SIZE >= size) {
16078                         wr_object->u.request.bde[i].tus.f.bdeSize =
16079                                 (size - written);
16080                         written += (size - written);
16081                         bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
16082                 } else {
16083                         wr_object->u.request.bde[i].tus.f.bdeSize =
16084                                 SLI4_PAGE_SIZE;
16085                         written += SLI4_PAGE_SIZE;
16086                 }
16087                 i++;
16088         }
16089         wr_object->u.request.bde_count = i;
16090         bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
16091         if (!phba->sli4_hba.intr_enable)
16092                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16093         else {
16094                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16095                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16096         }
16097         /* The IOCTL status is embedded in the mailbox subheader. */
16098         shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
16099         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16100         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16101         if (rc != MBX_TIMEOUT)
16102                 mempool_free(mbox, phba->mbox_mem_pool);
16103         if (shdr_status || shdr_add_status || rc) {
16104                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16105                                 "3025 Write Object mailbox failed with "
16106                                 "status x%x add_status x%x, mbx status x%x\n",
16107                                 shdr_status, shdr_add_status, rc);
16108                 rc = -ENXIO;
16109         } else
16110                 *offset += wr_object->u.response.actual_write_length;
16111         return rc;
16112 }
16113
16114 /**
16115  * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
16116  * @vport: pointer to vport data structure.
16117  *
16118  * This function iterate through the mailboxq and clean up all REG_LOGIN
16119  * and REG_VPI mailbox commands associated with the vport. This function
16120  * is called when driver want to restart discovery of the vport due to
16121  * a Clear Virtual Link event.
16122  **/
16123 void
16124 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
16125 {
16126         struct lpfc_hba *phba = vport->phba;
16127         LPFC_MBOXQ_t *mb, *nextmb;
16128         struct lpfc_dmabuf *mp;
16129         struct lpfc_nodelist *ndlp;
16130         struct lpfc_nodelist *act_mbx_ndlp = NULL;
16131         struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
16132         LIST_HEAD(mbox_cmd_list);
16133         uint8_t restart_loop;
16134
16135         /* Clean up internally queued mailbox commands with the vport */
16136         spin_lock_irq(&phba->hbalock);
16137         list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
16138                 if (mb->vport != vport)
16139                         continue;
16140
16141                 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
16142                         (mb->u.mb.mbxCommand != MBX_REG_VPI))
16143                         continue;
16144
16145                 list_del(&mb->list);
16146                 list_add_tail(&mb->list, &mbox_cmd_list);
16147         }
16148         /* Clean up active mailbox command with the vport */
16149         mb = phba->sli.mbox_active;
16150         if (mb && (mb->vport == vport)) {
16151                 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
16152                         (mb->u.mb.mbxCommand == MBX_REG_VPI))
16153                         mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16154                 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
16155                         act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2;
16156                         /* Put reference count for delayed processing */
16157                         act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
16158                         /* Unregister the RPI when mailbox complete */
16159                         mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
16160                 }
16161         }
16162         /* Cleanup any mailbox completions which are not yet processed */
16163         do {
16164                 restart_loop = 0;
16165                 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
16166                         /*
16167                          * If this mailox is already processed or it is
16168                          * for another vport ignore it.
16169                          */
16170                         if ((mb->vport != vport) ||
16171                                 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
16172                                 continue;
16173
16174                         if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
16175                                 (mb->u.mb.mbxCommand != MBX_REG_VPI))
16176                                 continue;
16177
16178                         mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16179                         if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
16180                                 ndlp = (struct lpfc_nodelist *)mb->context2;
16181                                 /* Unregister the RPI when mailbox complete */
16182                                 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
16183                                 restart_loop = 1;
16184                                 spin_unlock_irq(&phba->hbalock);
16185                                 spin_lock(shost->host_lock);
16186                                 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
16187                                 spin_unlock(shost->host_lock);
16188                                 spin_lock_irq(&phba->hbalock);
16189                                 break;
16190                         }
16191                 }
16192         } while (restart_loop);
16193
16194         spin_unlock_irq(&phba->hbalock);
16195
16196         /* Release the cleaned-up mailbox commands */
16197         while (!list_empty(&mbox_cmd_list)) {
16198                 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
16199                 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
16200                         mp = (struct lpfc_dmabuf *) (mb->context1);
16201                         if (mp) {
16202                                 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
16203                                 kfree(mp);
16204                         }
16205                         ndlp = (struct lpfc_nodelist *) mb->context2;
16206                         mb->context2 = NULL;
16207                         if (ndlp) {
16208                                 spin_lock(shost->host_lock);
16209                                 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
16210                                 spin_unlock(shost->host_lock);
16211                                 lpfc_nlp_put(ndlp);
16212                         }
16213                 }
16214                 mempool_free(mb, phba->mbox_mem_pool);
16215         }
16216
16217         /* Release the ndlp with the cleaned-up active mailbox command */
16218         if (act_mbx_ndlp) {
16219                 spin_lock(shost->host_lock);
16220                 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
16221                 spin_unlock(shost->host_lock);
16222                 lpfc_nlp_put(act_mbx_ndlp);
16223         }
16224 }
16225
16226 /**
16227  * lpfc_drain_txq - Drain the txq
16228  * @phba: Pointer to HBA context object.
16229  *
16230  * This function attempt to submit IOCBs on the txq
16231  * to the adapter.  For SLI4 adapters, the txq contains
16232  * ELS IOCBs that have been deferred because the there
16233  * are no SGLs.  This congestion can occur with large
16234  * vport counts during node discovery.
16235  **/
16236
16237 uint32_t
16238 lpfc_drain_txq(struct lpfc_hba *phba)
16239 {
16240         LIST_HEAD(completions);
16241         struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
16242         struct lpfc_iocbq *piocbq = 0;
16243         unsigned long iflags = 0;
16244         char *fail_msg = NULL;
16245         struct lpfc_sglq *sglq;
16246         union lpfc_wqe wqe;
16247
16248         spin_lock_irqsave(&phba->hbalock, iflags);
16249         if (pring->txq_cnt > pring->txq_max)
16250                 pring->txq_max = pring->txq_cnt;
16251
16252         spin_unlock_irqrestore(&phba->hbalock, iflags);
16253
16254         while (pring->txq_cnt) {
16255                 spin_lock_irqsave(&phba->hbalock, iflags);
16256
16257                 piocbq = lpfc_sli_ringtx_get(phba, pring);
16258                 if (!piocbq) {
16259                         spin_unlock_irqrestore(&phba->hbalock, iflags);
16260                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16261                                 "2823 txq empty and txq_cnt is %d\n ",
16262                                 pring->txq_cnt);
16263                         break;
16264                 }
16265                 sglq = __lpfc_sli_get_sglq(phba, piocbq);
16266                 if (!sglq) {
16267                         __lpfc_sli_ringtx_put(phba, pring, piocbq);
16268                         spin_unlock_irqrestore(&phba->hbalock, iflags);
16269                         break;
16270                 }
16271
16272                 /* The xri and iocb resources secured,
16273                  * attempt to issue request
16274                  */
16275                 piocbq->sli4_lxritag = sglq->sli4_lxritag;
16276                 piocbq->sli4_xritag = sglq->sli4_xritag;
16277                 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
16278                         fail_msg = "to convert bpl to sgl";
16279                 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
16280                         fail_msg = "to convert iocb to wqe";
16281                 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
16282                         fail_msg = " - Wq is full";
16283                 else
16284                         lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
16285
16286                 if (fail_msg) {
16287                         /* Failed means we can't issue and need to cancel */
16288                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16289                                         "2822 IOCB failed %s iotag 0x%x "
16290                                         "xri 0x%x\n",
16291                                         fail_msg,
16292                                         piocbq->iotag, piocbq->sli4_xritag);
16293                         list_add_tail(&piocbq->list, &completions);
16294                 }
16295                 spin_unlock_irqrestore(&phba->hbalock, iflags);
16296         }
16297
16298         /* Cancel all the IOCBs that cannot be issued */
16299         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
16300                                 IOERR_SLI_ABORTED);
16301
16302         return pring->txq_cnt;
16303 }