1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/interrupt.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/lockdep.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36 #include <scsi/fc/fc_fs.h>
37 #include <linux/aer.h>
39 #include <asm/set_memory.h>
42 #include <linux/nvme-fc-driver.h>
47 #include "lpfc_sli4.h"
49 #include "lpfc_disc.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_nvme.h"
53 #include "lpfc_nvmet.h"
54 #include "lpfc_crtn.h"
55 #include "lpfc_logmsg.h"
56 #include "lpfc_compat.h"
57 #include "lpfc_debugfs.h"
58 #include "lpfc_vport.h"
59 #include "lpfc_version.h"
61 /* There are only four IOCB completion types. */
62 typedef enum _lpfc_iocb_type {
70 /* Provide function prototypes local to this module. */
71 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
73 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
74 uint8_t *, uint32_t *);
75 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
77 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
79 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
80 struct hbq_dmabuf *dmabuf);
81 static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
82 struct lpfc_queue *cq, struct lpfc_cqe *cqe);
83 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
85 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
86 struct lpfc_queue *eq,
87 struct lpfc_eqe *eqe);
88 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
89 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
92 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
97 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
99 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
100 * @srcp: Source memory pointer.
101 * @destp: Destination memory pointer.
102 * @cnt: Number of words required to be copied.
103 * Must be a multiple of sizeof(uint64_t)
105 * This function is used for copying data between driver memory
106 * and the SLI WQ. This function also changes the endianness
107 * of each word if native endianness is different from SLI
108 * endianness. This function can be called with or without
112 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
114 uint64_t *src = srcp;
115 uint64_t *dest = destp;
118 for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
122 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
126 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
127 * @q: The Work Queue to operate on.
128 * @wqe: The work Queue Entry to put on the Work queue.
130 * This routine will copy the contents of @wqe to the next available entry on
131 * the @q. This function will then ring the Work Queue Doorbell to signal the
132 * HBA to start processing the Work Queue Entry. This function returns 0 if
133 * successful. If no entries are available on @q then this function will return
135 * The caller is expected to hold the hbalock when calling this routine.
138 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
140 union lpfc_wqe *temp_wqe;
141 struct lpfc_register doorbell;
148 /* sanity check on queue memory */
151 temp_wqe = lpfc_sli4_qe(q, q->host_index);
153 /* If the host has not yet processed the next entry then we are done */
154 idx = ((q->host_index + 1) % q->entry_count);
155 if (idx == q->hba_index) {
160 /* set consumption flag every once in a while */
161 if (!((q->host_index + 1) % q->notify_interval))
162 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
164 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
165 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
166 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
167 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
168 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
169 /* write to DPP aperture taking advatage of Combined Writes */
170 tmp = (uint8_t *)temp_wqe;
172 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
173 __raw_writeq(*((uint64_t *)(tmp + i)),
176 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
177 __raw_writel(*((uint32_t *)(tmp + i)),
181 /* ensure WQE bcopy and DPP flushed before doorbell write */
184 /* Update the host index before invoking device */
185 host_index = q->host_index;
191 if (q->db_format == LPFC_DB_LIST_FORMAT) {
192 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
193 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
194 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
195 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
197 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
200 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
201 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
203 /* Leave bits <23:16> clear for if_type 6 dpp */
204 if_type = bf_get(lpfc_sli_intf_if_type,
205 &q->phba->sli4_hba.sli_intf);
206 if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
207 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
210 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
211 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
212 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
216 writel(doorbell.word0, q->db_regaddr);
222 * lpfc_sli4_wq_release - Updates internal hba index for WQ
223 * @q: The Work Queue to operate on.
224 * @index: The index to advance the hba index to.
226 * This routine will update the HBA index of a queue to reflect consumption of
227 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
228 * an entry the host calls this function to update the queue's internal
229 * pointers. This routine returns the number of entries that were consumed by
233 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
235 uint32_t released = 0;
237 /* sanity check on queue memory */
241 if (q->hba_index == index)
244 q->hba_index = ((q->hba_index + 1) % q->entry_count);
246 } while (q->hba_index != index);
251 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
252 * @q: The Mailbox Queue to operate on.
253 * @wqe: The Mailbox Queue Entry to put on the Work queue.
255 * This routine will copy the contents of @mqe to the next available entry on
256 * the @q. This function will then ring the Work Queue Doorbell to signal the
257 * HBA to start processing the Work Queue Entry. This function returns 0 if
258 * successful. If no entries are available on @q then this function will return
260 * The caller is expected to hold the hbalock when calling this routine.
263 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
265 struct lpfc_mqe *temp_mqe;
266 struct lpfc_register doorbell;
268 /* sanity check on queue memory */
271 temp_mqe = lpfc_sli4_qe(q, q->host_index);
273 /* If the host has not yet processed the next entry then we are done */
274 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
276 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
277 /* Save off the mailbox pointer for completion */
278 q->phba->mbox = (MAILBOX_t *)temp_mqe;
280 /* Update the host index before invoking device */
281 q->host_index = ((q->host_index + 1) % q->entry_count);
285 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
286 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
287 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
292 * lpfc_sli4_mq_release - Updates internal hba index for MQ
293 * @q: The Mailbox Queue to operate on.
295 * This routine will update the HBA index of a queue to reflect consumption of
296 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
297 * an entry the host calls this function to update the queue's internal
298 * pointers. This routine returns the number of entries that were consumed by
302 lpfc_sli4_mq_release(struct lpfc_queue *q)
304 /* sanity check on queue memory */
308 /* Clear the mailbox pointer for completion */
309 q->phba->mbox = NULL;
310 q->hba_index = ((q->hba_index + 1) % q->entry_count);
315 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
316 * @q: The Event Queue to get the first valid EQE from
318 * This routine will get the first valid Event Queue Entry from @q, update
319 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
320 * the Queue (no more work to do), or the Queue is full of EQEs that have been
321 * processed, but not popped back to the HBA then this routine will return NULL.
323 static struct lpfc_eqe *
324 lpfc_sli4_eq_get(struct lpfc_queue *q)
326 struct lpfc_eqe *eqe;
328 /* sanity check on queue memory */
331 eqe = lpfc_sli4_qe(q, q->host_index);
333 /* If the next EQE is not valid then we are done */
334 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
338 * insert barrier for instruction interlock : data from the hardware
339 * must have the valid bit checked before it can be copied and acted
340 * upon. Speculative instructions were allowing a bcopy at the start
341 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
342 * after our return, to copy data before the valid bit check above
343 * was done. As such, some of the copied data was stale. The barrier
344 * ensures the check is before any data is copied.
351 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
352 * @q: The Event Queue to disable interrupts
356 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
358 struct lpfc_register doorbell;
361 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
362 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
363 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
364 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
365 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
366 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
370 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
371 * @q: The Event Queue to disable interrupts
375 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
377 struct lpfc_register doorbell;
380 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
381 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
385 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
386 * @phba: adapter with EQ
387 * @q: The Event Queue that the host has completed processing for.
388 * @count: Number of elements that have been consumed
389 * @arm: Indicates whether the host wants to arms this CQ.
391 * This routine will notify the HBA, by ringing the doorbell, that count
392 * number of EQEs have been processed. The @arm parameter indicates whether
393 * the queue should be rearmed when ringing the doorbell.
396 lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
397 uint32_t count, bool arm)
399 struct lpfc_register doorbell;
401 /* sanity check on queue memory */
402 if (unlikely(!q || (count == 0 && !arm)))
405 /* ring doorbell for number popped */
408 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
409 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
411 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
412 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
413 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
414 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
415 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
416 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
417 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
418 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
419 readl(q->phba->sli4_hba.EQDBregaddr);
423 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
424 * @phba: adapter with EQ
425 * @q: The Event Queue that the host has completed processing for.
426 * @count: Number of elements that have been consumed
427 * @arm: Indicates whether the host wants to arms this CQ.
429 * This routine will notify the HBA, by ringing the doorbell, that count
430 * number of EQEs have been processed. The @arm parameter indicates whether
431 * the queue should be rearmed when ringing the doorbell.
434 lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
435 uint32_t count, bool arm)
437 struct lpfc_register doorbell;
439 /* sanity check on queue memory */
440 if (unlikely(!q || (count == 0 && !arm)))
443 /* ring doorbell for number popped */
446 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
447 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
448 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
449 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
450 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
451 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
452 readl(q->phba->sli4_hba.EQDBregaddr);
456 __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
457 struct lpfc_eqe *eqe)
459 if (!phba->sli4_hba.pc_sli4_params.eqav)
460 bf_set_le32(lpfc_eqe_valid, eqe, 0);
462 eq->host_index = ((eq->host_index + 1) % eq->entry_count);
464 /* if the index wrapped around, toggle the valid bit */
465 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
466 eq->qe_valid = (eq->qe_valid) ? 0 : 1;
470 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
472 struct lpfc_eqe *eqe;
475 /* walk all the EQ entries and drop on the floor */
476 eqe = lpfc_sli4_eq_get(eq);
478 __lpfc_sli4_consume_eqe(phba, eq, eqe);
480 eqe = lpfc_sli4_eq_get(eq);
483 /* Clear and re-arm the EQ */
484 phba->sli4_hba.sli4_write_eq_db(phba, eq, count, LPFC_QUEUE_REARM);
488 lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
491 struct lpfc_eqe *eqe;
492 int count = 0, consumed = 0;
494 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
497 eqe = lpfc_sli4_eq_get(eq);
499 lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
500 __lpfc_sli4_consume_eqe(phba, eq, eqe);
503 if (!(++count % eq->max_proc_limit))
506 if (!(count % eq->notify_interval)) {
507 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
512 eqe = lpfc_sli4_eq_get(eq);
514 eq->EQ_processed += count;
516 /* Track the max number of EQEs processed in 1 intr */
517 if (count > eq->EQ_max_eqe)
518 eq->EQ_max_eqe = count;
520 eq->queue_claimed = 0;
523 /* Always clear the EQ. */
524 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
530 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
531 * @q: The Completion Queue to get the first valid CQE from
533 * This routine will get the first valid Completion Queue Entry from @q, update
534 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
535 * the Queue (no more work to do), or the Queue is full of CQEs that have been
536 * processed, but not popped back to the HBA then this routine will return NULL.
538 static struct lpfc_cqe *
539 lpfc_sli4_cq_get(struct lpfc_queue *q)
541 struct lpfc_cqe *cqe;
543 /* sanity check on queue memory */
546 cqe = lpfc_sli4_qe(q, q->host_index);
548 /* If the next CQE is not valid then we are done */
549 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
553 * insert barrier for instruction interlock : data from the hardware
554 * must have the valid bit checked before it can be copied and acted
555 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
556 * instructions allowing action on content before valid bit checked,
557 * add barrier here as well. May not be needed as "content" is a
558 * single 32-bit entity here (vs multi word structure for cq's).
565 __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
566 struct lpfc_cqe *cqe)
568 if (!phba->sli4_hba.pc_sli4_params.cqav)
569 bf_set_le32(lpfc_cqe_valid, cqe, 0);
571 cq->host_index = ((cq->host_index + 1) % cq->entry_count);
573 /* if the index wrapped around, toggle the valid bit */
574 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
575 cq->qe_valid = (cq->qe_valid) ? 0 : 1;
579 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
580 * @phba: the adapter with the CQ
581 * @q: The Completion Queue that the host has completed processing for.
582 * @count: the number of elements that were consumed
583 * @arm: Indicates whether the host wants to arms this CQ.
585 * This routine will notify the HBA, by ringing the doorbell, that the
586 * CQEs have been processed. The @arm parameter specifies whether the
587 * queue should be rearmed when ringing the doorbell.
590 lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
591 uint32_t count, bool arm)
593 struct lpfc_register doorbell;
595 /* sanity check on queue memory */
596 if (unlikely(!q || (count == 0 && !arm)))
599 /* ring doorbell for number popped */
602 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
603 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
604 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
605 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
606 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
607 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
608 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
612 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
613 * @phba: the adapter with the CQ
614 * @q: The Completion Queue that the host has completed processing for.
615 * @count: the number of elements that were consumed
616 * @arm: Indicates whether the host wants to arms this CQ.
618 * This routine will notify the HBA, by ringing the doorbell, that the
619 * CQEs have been processed. The @arm parameter specifies whether the
620 * queue should be rearmed when ringing the doorbell.
623 lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
624 uint32_t count, bool arm)
626 struct lpfc_register doorbell;
628 /* sanity check on queue memory */
629 if (unlikely(!q || (count == 0 && !arm)))
632 /* ring doorbell for number popped */
635 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
636 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
637 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
638 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
642 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
643 * @q: The Header Receive Queue to operate on.
644 * @wqe: The Receive Queue Entry to put on the Receive queue.
646 * This routine will copy the contents of @wqe to the next available entry on
647 * the @q. This function will then ring the Receive Queue Doorbell to signal the
648 * HBA to start processing the Receive Queue Entry. This function returns the
649 * index that the rqe was copied to if successful. If no entries are available
650 * on @q then this function will return -ENOMEM.
651 * The caller is expected to hold the hbalock when calling this routine.
654 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
655 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
657 struct lpfc_rqe *temp_hrqe;
658 struct lpfc_rqe *temp_drqe;
659 struct lpfc_register doorbell;
663 /* sanity check on queue memory */
664 if (unlikely(!hq) || unlikely(!dq))
666 hq_put_index = hq->host_index;
667 dq_put_index = dq->host_index;
668 temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
669 temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
671 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
673 if (hq_put_index != dq_put_index)
675 /* If the host has not yet processed the next entry then we are done */
676 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
678 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
679 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
681 /* Update the host index to point to the next slot */
682 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
683 dq->host_index = ((dq_put_index + 1) % dq->entry_count);
686 /* Ring The Header Receive Queue Doorbell */
687 if (!(hq->host_index % hq->notify_interval)) {
689 if (hq->db_format == LPFC_DB_RING_FORMAT) {
690 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
691 hq->notify_interval);
692 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
693 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
694 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
695 hq->notify_interval);
696 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
698 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
702 writel(doorbell.word0, hq->db_regaddr);
708 * lpfc_sli4_rq_release - Updates internal hba index for RQ
709 * @q: The Header Receive Queue to operate on.
711 * This routine will update the HBA index of a queue to reflect consumption of
712 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
713 * consumed an entry the host calls this function to update the queue's
714 * internal pointers. This routine returns the number of entries that were
715 * consumed by the HBA.
718 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
720 /* sanity check on queue memory */
721 if (unlikely(!hq) || unlikely(!dq))
724 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
726 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
727 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
732 * lpfc_cmd_iocb - Get next command iocb entry in the ring
733 * @phba: Pointer to HBA context object.
734 * @pring: Pointer to driver SLI ring object.
736 * This function returns pointer to next command iocb entry
737 * in the command ring. The caller must hold hbalock to prevent
738 * other threads consume the next command iocb.
739 * SLI-2/SLI-3 provide different sized iocbs.
741 static inline IOCB_t *
742 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
744 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
745 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
749 * lpfc_resp_iocb - Get next response iocb entry in the ring
750 * @phba: Pointer to HBA context object.
751 * @pring: Pointer to driver SLI ring object.
753 * This function returns pointer to next response iocb entry
754 * in the response ring. The caller must hold hbalock to make sure
755 * that no other thread consume the next response iocb.
756 * SLI-2/SLI-3 provide different sized iocbs.
758 static inline IOCB_t *
759 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
761 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
762 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
766 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
767 * @phba: Pointer to HBA context object.
769 * This function is called with hbalock held. This function
770 * allocates a new driver iocb object from the iocb pool. If the
771 * allocation is successful, it returns pointer to the newly
772 * allocated iocb object else it returns NULL.
775 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
777 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
778 struct lpfc_iocbq * iocbq = NULL;
780 lockdep_assert_held(&phba->hbalock);
782 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
785 if (phba->iocb_cnt > phba->iocb_max)
786 phba->iocb_max = phba->iocb_cnt;
791 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
792 * @phba: Pointer to HBA context object.
793 * @xritag: XRI value.
795 * This function clears the sglq pointer from the array of acive
796 * sglq's. The xritag that is passed in is used to index into the
797 * array. Before the xritag can be used it needs to be adjusted
798 * by subtracting the xribase.
800 * Returns sglq ponter = success, NULL = Failure.
803 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
805 struct lpfc_sglq *sglq;
807 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
808 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
813 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
814 * @phba: Pointer to HBA context object.
815 * @xritag: XRI value.
817 * This function returns the sglq pointer from the array of acive
818 * sglq's. The xritag that is passed in is used to index into the
819 * array. Before the xritag can be used it needs to be adjusted
820 * by subtracting the xribase.
822 * Returns sglq ponter = success, NULL = Failure.
825 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
827 struct lpfc_sglq *sglq;
829 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
834 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
835 * @phba: Pointer to HBA context object.
836 * @xritag: xri used in this exchange.
837 * @rrq: The RRQ to be cleared.
841 lpfc_clr_rrq_active(struct lpfc_hba *phba,
843 struct lpfc_node_rrq *rrq)
845 struct lpfc_nodelist *ndlp = NULL;
847 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
848 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
850 /* The target DID could have been swapped (cable swap)
851 * we should use the ndlp from the findnode if it is
854 if ((!ndlp) && rrq->ndlp)
860 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
863 rrq->rrq_stop_time = 0;
866 mempool_free(rrq, phba->rrq_pool);
870 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
871 * @phba: Pointer to HBA context object.
873 * This function is called with hbalock held. This function
874 * Checks if stop_time (ratov from setting rrq active) has
875 * been reached, if it has and the send_rrq flag is set then
876 * it will call lpfc_send_rrq. If the send_rrq flag is not set
877 * then it will just call the routine to clear the rrq and
878 * free the rrq resource.
879 * The timer is set to the next rrq that is going to expire before
880 * leaving the routine.
884 lpfc_handle_rrq_active(struct lpfc_hba *phba)
886 struct lpfc_node_rrq *rrq;
887 struct lpfc_node_rrq *nextrrq;
888 unsigned long next_time;
889 unsigned long iflags;
892 spin_lock_irqsave(&phba->hbalock, iflags);
893 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
894 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
895 list_for_each_entry_safe(rrq, nextrrq,
896 &phba->active_rrq_list, list) {
897 if (time_after(jiffies, rrq->rrq_stop_time))
898 list_move(&rrq->list, &send_rrq);
899 else if (time_before(rrq->rrq_stop_time, next_time))
900 next_time = rrq->rrq_stop_time;
902 spin_unlock_irqrestore(&phba->hbalock, iflags);
903 if ((!list_empty(&phba->active_rrq_list)) &&
904 (!(phba->pport->load_flag & FC_UNLOADING)))
905 mod_timer(&phba->rrq_tmr, next_time);
906 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
907 list_del(&rrq->list);
908 if (!rrq->send_rrq) {
909 /* this call will free the rrq */
910 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
911 } else if (lpfc_send_rrq(phba, rrq)) {
912 /* if we send the rrq then the completion handler
913 * will clear the bit in the xribitmap.
915 lpfc_clr_rrq_active(phba, rrq->xritag,
922 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
923 * @vport: Pointer to vport context object.
924 * @xri: The xri used in the exchange.
925 * @did: The targets DID for this exchange.
927 * returns NULL = rrq not found in the phba->active_rrq_list.
928 * rrq = rrq for this xri and target.
930 struct lpfc_node_rrq *
931 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
933 struct lpfc_hba *phba = vport->phba;
934 struct lpfc_node_rrq *rrq;
935 struct lpfc_node_rrq *nextrrq;
936 unsigned long iflags;
938 if (phba->sli_rev != LPFC_SLI_REV4)
940 spin_lock_irqsave(&phba->hbalock, iflags);
941 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
942 if (rrq->vport == vport && rrq->xritag == xri &&
943 rrq->nlp_DID == did){
944 list_del(&rrq->list);
945 spin_unlock_irqrestore(&phba->hbalock, iflags);
949 spin_unlock_irqrestore(&phba->hbalock, iflags);
954 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
955 * @vport: Pointer to vport context object.
956 * @ndlp: Pointer to the lpfc_node_list structure.
957 * If ndlp is NULL Remove all active RRQs for this vport from the
958 * phba->active_rrq_list and clear the rrq.
959 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
962 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
965 struct lpfc_hba *phba = vport->phba;
966 struct lpfc_node_rrq *rrq;
967 struct lpfc_node_rrq *nextrrq;
968 unsigned long iflags;
971 if (phba->sli_rev != LPFC_SLI_REV4)
974 lpfc_sli4_vport_delete_els_xri_aborted(vport);
975 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
977 spin_lock_irqsave(&phba->hbalock, iflags);
978 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
979 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
980 list_move(&rrq->list, &rrq_list);
981 spin_unlock_irqrestore(&phba->hbalock, iflags);
983 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
984 list_del(&rrq->list);
985 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
990 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
991 * @phba: Pointer to HBA context object.
992 * @ndlp: Targets nodelist pointer for this exchange.
993 * @xritag the xri in the bitmap to test.
995 * This function returns:
996 * 0 = rrq not active for this xri
997 * 1 = rrq is valid for this xri.
1000 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1005 if (!ndlp->active_rrqs_xri_bitmap)
1007 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1014 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1015 * @phba: Pointer to HBA context object.
1016 * @ndlp: nodelist pointer for this target.
1017 * @xritag: xri used in this exchange.
1018 * @rxid: Remote Exchange ID.
1019 * @send_rrq: Flag used to determine if we should send rrq els cmd.
1021 * This function takes the hbalock.
1022 * The active bit is always set in the active rrq xri_bitmap even
1023 * if there is no slot avaiable for the other rrq information.
1025 * returns 0 rrq actived for this xri
1026 * < 0 No memory or invalid ndlp.
1029 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1030 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1032 unsigned long iflags;
1033 struct lpfc_node_rrq *rrq;
1039 if (!phba->cfg_enable_rrq)
1042 spin_lock_irqsave(&phba->hbalock, iflags);
1043 if (phba->pport->load_flag & FC_UNLOADING) {
1044 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1049 * set the active bit even if there is no mem available.
1051 if (NLP_CHK_FREE_REQ(ndlp))
1054 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1057 if (!ndlp->active_rrqs_xri_bitmap)
1060 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1063 spin_unlock_irqrestore(&phba->hbalock, iflags);
1064 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
1066 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1067 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1068 " DID:0x%x Send:%d\n",
1069 xritag, rxid, ndlp->nlp_DID, send_rrq);
1072 if (phba->cfg_enable_rrq == 1)
1073 rrq->send_rrq = send_rrq;
1076 rrq->xritag = xritag;
1077 rrq->rrq_stop_time = jiffies +
1078 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1080 rrq->nlp_DID = ndlp->nlp_DID;
1081 rrq->vport = ndlp->vport;
1083 spin_lock_irqsave(&phba->hbalock, iflags);
1084 empty = list_empty(&phba->active_rrq_list);
1085 list_add_tail(&rrq->list, &phba->active_rrq_list);
1086 phba->hba_flag |= HBA_RRQ_ACTIVE;
1088 lpfc_worker_wake_up(phba);
1089 spin_unlock_irqrestore(&phba->hbalock, iflags);
1092 spin_unlock_irqrestore(&phba->hbalock, iflags);
1093 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1094 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1095 " DID:0x%x Send:%d\n",
1096 xritag, rxid, ndlp->nlp_DID, send_rrq);
1101 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
1102 * @phba: Pointer to HBA context object.
1103 * @piocb: Pointer to the iocbq.
1105 * The driver calls this function with either the nvme ls ring lock
1106 * or the fc els ring lock held depending on the iocb usage. This function
1107 * gets a new driver sglq object from the sglq list. If the list is not empty
1108 * then it is successful, it returns pointer to the newly allocated sglq
1109 * object else it returns NULL.
1111 static struct lpfc_sglq *
1112 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1114 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1115 struct lpfc_sglq *sglq = NULL;
1116 struct lpfc_sglq *start_sglq = NULL;
1117 struct lpfc_io_buf *lpfc_cmd;
1118 struct lpfc_nodelist *ndlp;
1119 struct lpfc_sli_ring *pring = NULL;
1122 if (piocbq->iocb_flag & LPFC_IO_NVME_LS)
1123 pring = phba->sli4_hba.nvmels_wq->pring;
1125 pring = lpfc_phba_elsring(phba);
1127 lockdep_assert_held(&pring->ring_lock);
1129 if (piocbq->iocb_flag & LPFC_IO_FCP) {
1130 lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
1131 ndlp = lpfc_cmd->rdata->pnode;
1132 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
1133 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
1134 ndlp = piocbq->context_un.ndlp;
1135 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
1136 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
1139 ndlp = piocbq->context_un.ndlp;
1141 ndlp = piocbq->context1;
1144 spin_lock(&phba->sli4_hba.sgl_list_lock);
1145 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1150 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1151 test_bit(sglq->sli4_lxritag,
1152 ndlp->active_rrqs_xri_bitmap)) {
1153 /* This xri has an rrq outstanding for this DID.
1154 * put it back in the list and get another xri.
1156 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1158 list_remove_head(lpfc_els_sgl_list, sglq,
1159 struct lpfc_sglq, list);
1160 if (sglq == start_sglq) {
1161 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1169 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1170 sglq->state = SGL_ALLOCATED;
1172 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1177 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1178 * @phba: Pointer to HBA context object.
1179 * @piocb: Pointer to the iocbq.
1181 * This function is called with the sgl_list lock held. This function
1182 * gets a new driver sglq object from the sglq list. If the
1183 * list is not empty then it is successful, it returns pointer to the newly
1184 * allocated sglq object else it returns NULL.
1187 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1189 struct list_head *lpfc_nvmet_sgl_list;
1190 struct lpfc_sglq *sglq = NULL;
1192 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1194 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1196 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1199 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1200 sglq->state = SGL_ALLOCATED;
1205 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1206 * @phba: Pointer to HBA context object.
1208 * This function is called with no lock held. This function
1209 * allocates a new driver iocb object from the iocb pool. If the
1210 * allocation is successful, it returns pointer to the newly
1211 * allocated iocb object else it returns NULL.
1214 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1216 struct lpfc_iocbq * iocbq = NULL;
1217 unsigned long iflags;
1219 spin_lock_irqsave(&phba->hbalock, iflags);
1220 iocbq = __lpfc_sli_get_iocbq(phba);
1221 spin_unlock_irqrestore(&phba->hbalock, iflags);
1226 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1227 * @phba: Pointer to HBA context object.
1228 * @iocbq: Pointer to driver iocb object.
1230 * This function is called with hbalock held to release driver
1231 * iocb object to the iocb pool. The iotag in the iocb object
1232 * does not change for each use of the iocb object. This function
1233 * clears all other fields of the iocb object when it is freed.
1234 * The sqlq structure that holds the xritag and phys and virtual
1235 * mappings for the scatter gather list is retrieved from the
1236 * active array of sglq. The get of the sglq pointer also clears
1237 * the entry in the array. If the status of the IO indiactes that
1238 * this IO was aborted then the sglq entry it put on the
1239 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1240 * IO has good status or fails for any other reason then the sglq
1241 * entry is added to the free list (lpfc_els_sgl_list).
1244 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1246 struct lpfc_sglq *sglq;
1247 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1248 unsigned long iflag = 0;
1249 struct lpfc_sli_ring *pring;
1251 lockdep_assert_held(&phba->hbalock);
1253 if (iocbq->sli4_xritag == NO_XRI)
1256 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1260 if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1261 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1263 sglq->state = SGL_FREED;
1265 list_add_tail(&sglq->list,
1266 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1267 spin_unlock_irqrestore(
1268 &phba->sli4_hba.sgl_list_lock, iflag);
1272 pring = phba->sli4_hba.els_wq->pring;
1273 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1274 (sglq->state != SGL_XRI_ABORTED)) {
1275 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1277 list_add(&sglq->list,
1278 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1279 spin_unlock_irqrestore(
1280 &phba->sli4_hba.sgl_list_lock, iflag);
1282 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1284 sglq->state = SGL_FREED;
1286 list_add_tail(&sglq->list,
1287 &phba->sli4_hba.lpfc_els_sgl_list);
1288 spin_unlock_irqrestore(
1289 &phba->sli4_hba.sgl_list_lock, iflag);
1291 /* Check if TXQ queue needs to be serviced */
1292 if (!list_empty(&pring->txq))
1293 lpfc_worker_wake_up(phba);
1299 * Clean all volatile data fields, preserve iotag and node struct.
1301 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1302 iocbq->sli4_lxritag = NO_XRI;
1303 iocbq->sli4_xritag = NO_XRI;
1304 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1306 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1311 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1312 * @phba: Pointer to HBA context object.
1313 * @iocbq: Pointer to driver iocb object.
1315 * This function is called with hbalock held to release driver
1316 * iocb object to the iocb pool. The iotag in the iocb object
1317 * does not change for each use of the iocb object. This function
1318 * clears all other fields of the iocb object when it is freed.
1321 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1323 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1325 lockdep_assert_held(&phba->hbalock);
1328 * Clean all volatile data fields, preserve iotag and node struct.
1330 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1331 iocbq->sli4_xritag = NO_XRI;
1332 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1336 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1337 * @phba: Pointer to HBA context object.
1338 * @iocbq: Pointer to driver iocb object.
1340 * This function is called with hbalock held to release driver
1341 * iocb object to the iocb pool. The iotag in the iocb object
1342 * does not change for each use of the iocb object. This function
1343 * clears all other fields of the iocb object when it is freed.
1346 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1348 lockdep_assert_held(&phba->hbalock);
1350 phba->__lpfc_sli_release_iocbq(phba, iocbq);
1355 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1356 * @phba: Pointer to HBA context object.
1357 * @iocbq: Pointer to driver iocb object.
1359 * This function is called with no lock held to release the iocb to
1363 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1365 unsigned long iflags;
1368 * Clean all volatile data fields, preserve iotag and node struct.
1370 spin_lock_irqsave(&phba->hbalock, iflags);
1371 __lpfc_sli_release_iocbq(phba, iocbq);
1372 spin_unlock_irqrestore(&phba->hbalock, iflags);
1376 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1377 * @phba: Pointer to HBA context object.
1378 * @iocblist: List of IOCBs.
1379 * @ulpstatus: ULP status in IOCB command field.
1380 * @ulpWord4: ULP word-4 in IOCB command field.
1382 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1383 * on the list by invoking the complete callback function associated with the
1384 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1388 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1389 uint32_t ulpstatus, uint32_t ulpWord4)
1391 struct lpfc_iocbq *piocb;
1393 while (!list_empty(iocblist)) {
1394 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1395 if (!piocb->iocb_cmpl) {
1396 if (piocb->iocb_flag & LPFC_IO_NVME)
1397 lpfc_nvme_cancel_iocb(phba, piocb);
1399 lpfc_sli_release_iocbq(phba, piocb);
1401 piocb->iocb.ulpStatus = ulpstatus;
1402 piocb->iocb.un.ulpWord[4] = ulpWord4;
1403 (piocb->iocb_cmpl) (phba, piocb, piocb);
1410 * lpfc_sli_iocb_cmd_type - Get the iocb type
1411 * @iocb_cmnd: iocb command code.
1413 * This function is called by ring event handler function to get the iocb type.
1414 * This function translates the iocb command to an iocb command type used to
1415 * decide the final disposition of each completed IOCB.
1416 * The function returns
1417 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1418 * LPFC_SOL_IOCB if it is a solicited iocb completion
1419 * LPFC_ABORT_IOCB if it is an abort iocb
1420 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1422 * The caller is not required to hold any lock.
1424 static lpfc_iocb_type
1425 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1427 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1429 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1432 switch (iocb_cmnd) {
1433 case CMD_XMIT_SEQUENCE_CR:
1434 case CMD_XMIT_SEQUENCE_CX:
1435 case CMD_XMIT_BCAST_CN:
1436 case CMD_XMIT_BCAST_CX:
1437 case CMD_ELS_REQUEST_CR:
1438 case CMD_ELS_REQUEST_CX:
1439 case CMD_CREATE_XRI_CR:
1440 case CMD_CREATE_XRI_CX:
1441 case CMD_GET_RPI_CN:
1442 case CMD_XMIT_ELS_RSP_CX:
1443 case CMD_GET_RPI_CR:
1444 case CMD_FCP_IWRITE_CR:
1445 case CMD_FCP_IWRITE_CX:
1446 case CMD_FCP_IREAD_CR:
1447 case CMD_FCP_IREAD_CX:
1448 case CMD_FCP_ICMND_CR:
1449 case CMD_FCP_ICMND_CX:
1450 case CMD_FCP_TSEND_CX:
1451 case CMD_FCP_TRSP_CX:
1452 case CMD_FCP_TRECEIVE_CX:
1453 case CMD_FCP_AUTO_TRSP_CX:
1454 case CMD_ADAPTER_MSG:
1455 case CMD_ADAPTER_DUMP:
1456 case CMD_XMIT_SEQUENCE64_CR:
1457 case CMD_XMIT_SEQUENCE64_CX:
1458 case CMD_XMIT_BCAST64_CN:
1459 case CMD_XMIT_BCAST64_CX:
1460 case CMD_ELS_REQUEST64_CR:
1461 case CMD_ELS_REQUEST64_CX:
1462 case CMD_FCP_IWRITE64_CR:
1463 case CMD_FCP_IWRITE64_CX:
1464 case CMD_FCP_IREAD64_CR:
1465 case CMD_FCP_IREAD64_CX:
1466 case CMD_FCP_ICMND64_CR:
1467 case CMD_FCP_ICMND64_CX:
1468 case CMD_FCP_TSEND64_CX:
1469 case CMD_FCP_TRSP64_CX:
1470 case CMD_FCP_TRECEIVE64_CX:
1471 case CMD_GEN_REQUEST64_CR:
1472 case CMD_GEN_REQUEST64_CX:
1473 case CMD_XMIT_ELS_RSP64_CX:
1474 case DSSCMD_IWRITE64_CR:
1475 case DSSCMD_IWRITE64_CX:
1476 case DSSCMD_IREAD64_CR:
1477 case DSSCMD_IREAD64_CX:
1478 type = LPFC_SOL_IOCB;
1480 case CMD_ABORT_XRI_CN:
1481 case CMD_ABORT_XRI_CX:
1482 case CMD_CLOSE_XRI_CN:
1483 case CMD_CLOSE_XRI_CX:
1484 case CMD_XRI_ABORTED_CX:
1485 case CMD_ABORT_MXRI64_CN:
1486 case CMD_XMIT_BLS_RSP64_CX:
1487 type = LPFC_ABORT_IOCB;
1489 case CMD_RCV_SEQUENCE_CX:
1490 case CMD_RCV_ELS_REQ_CX:
1491 case CMD_RCV_SEQUENCE64_CX:
1492 case CMD_RCV_ELS_REQ64_CX:
1493 case CMD_ASYNC_STATUS:
1494 case CMD_IOCB_RCV_SEQ64_CX:
1495 case CMD_IOCB_RCV_ELS64_CX:
1496 case CMD_IOCB_RCV_CONT64_CX:
1497 case CMD_IOCB_RET_XRI64_CX:
1498 type = LPFC_UNSOL_IOCB;
1500 case CMD_IOCB_XMIT_MSEQ64_CR:
1501 case CMD_IOCB_XMIT_MSEQ64_CX:
1502 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1503 case CMD_IOCB_RCV_ELS_LIST64_CX:
1504 case CMD_IOCB_CLOSE_EXTENDED_CN:
1505 case CMD_IOCB_ABORT_EXTENDED_CN:
1506 case CMD_IOCB_RET_HBQE64_CN:
1507 case CMD_IOCB_FCP_IBIDIR64_CR:
1508 case CMD_IOCB_FCP_IBIDIR64_CX:
1509 case CMD_IOCB_FCP_ITASKMGT64_CX:
1510 case CMD_IOCB_LOGENTRY_CN:
1511 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1512 printk("%s - Unhandled SLI-3 Command x%x\n",
1513 __func__, iocb_cmnd);
1514 type = LPFC_UNKNOWN_IOCB;
1517 type = LPFC_UNKNOWN_IOCB;
1525 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1526 * @phba: Pointer to HBA context object.
1528 * This function is called from SLI initialization code
1529 * to configure every ring of the HBA's SLI interface. The
1530 * caller is not required to hold any lock. This function issues
1531 * a config_ring mailbox command for each ring.
1532 * This function returns zero if successful else returns a negative
1536 lpfc_sli_ring_map(struct lpfc_hba *phba)
1538 struct lpfc_sli *psli = &phba->sli;
1543 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1547 phba->link_state = LPFC_INIT_MBX_CMDS;
1548 for (i = 0; i < psli->num_rings; i++) {
1549 lpfc_config_ring(phba, i, pmb);
1550 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1551 if (rc != MBX_SUCCESS) {
1552 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1553 "0446 Adapter failed to init (%d), "
1554 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1556 rc, pmbox->mbxCommand,
1557 pmbox->mbxStatus, i);
1558 phba->link_state = LPFC_HBA_ERROR;
1563 mempool_free(pmb, phba->mbox_mem_pool);
1568 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1569 * @phba: Pointer to HBA context object.
1570 * @pring: Pointer to driver SLI ring object.
1571 * @piocb: Pointer to the driver iocb object.
1573 * The driver calls this function with the hbalock held for SLI3 ports or
1574 * the ring lock held for SLI4 ports. The function adds the
1575 * new iocb to txcmplq of the given ring. This function always returns
1576 * 0. If this function is called for ELS ring, this function checks if
1577 * there is a vport associated with the ELS command. This function also
1578 * starts els_tmofunc timer if this is an ELS command.
1581 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1582 struct lpfc_iocbq *piocb)
1584 if (phba->sli_rev == LPFC_SLI_REV4)
1585 lockdep_assert_held(&pring->ring_lock);
1587 lockdep_assert_held(&phba->hbalock);
1591 list_add_tail(&piocb->list, &pring->txcmplq);
1592 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1593 pring->txcmplq_cnt++;
1595 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1596 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1597 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1598 BUG_ON(!piocb->vport);
1599 if (!(piocb->vport->load_flag & FC_UNLOADING))
1600 mod_timer(&piocb->vport->els_tmofunc,
1602 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1609 * lpfc_sli_ringtx_get - Get first element of the txq
1610 * @phba: Pointer to HBA context object.
1611 * @pring: Pointer to driver SLI ring object.
1613 * This function is called with hbalock held to get next
1614 * iocb in txq of the given ring. If there is any iocb in
1615 * the txq, the function returns first iocb in the list after
1616 * removing the iocb from the list, else it returns NULL.
1619 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1621 struct lpfc_iocbq *cmd_iocb;
1623 lockdep_assert_held(&phba->hbalock);
1625 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1630 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1631 * @phba: Pointer to HBA context object.
1632 * @pring: Pointer to driver SLI ring object.
1634 * This function is called with hbalock held and the caller must post the
1635 * iocb without releasing the lock. If the caller releases the lock,
1636 * iocb slot returned by the function is not guaranteed to be available.
1637 * The function returns pointer to the next available iocb slot if there
1638 * is available slot in the ring, else it returns NULL.
1639 * If the get index of the ring is ahead of the put index, the function
1640 * will post an error attention event to the worker thread to take the
1641 * HBA to offline state.
1644 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1646 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1647 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1649 lockdep_assert_held(&phba->hbalock);
1651 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1652 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1653 pring->sli.sli3.next_cmdidx = 0;
1655 if (unlikely(pring->sli.sli3.local_getidx ==
1656 pring->sli.sli3.next_cmdidx)) {
1658 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1660 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1661 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1662 "0315 Ring %d issue: portCmdGet %d "
1663 "is bigger than cmd ring %d\n",
1665 pring->sli.sli3.local_getidx,
1668 phba->link_state = LPFC_HBA_ERROR;
1670 * All error attention handlers are posted to
1673 phba->work_ha |= HA_ERATT;
1674 phba->work_hs = HS_FFER3;
1676 lpfc_worker_wake_up(phba);
1681 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1685 return lpfc_cmd_iocb(phba, pring);
1689 * lpfc_sli_next_iotag - Get an iotag for the iocb
1690 * @phba: Pointer to HBA context object.
1691 * @iocbq: Pointer to driver iocb object.
1693 * This function gets an iotag for the iocb. If there is no unused iotag and
1694 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1695 * array and assigns a new iotag.
1696 * The function returns the allocated iotag if successful, else returns zero.
1697 * Zero is not a valid iotag.
1698 * The caller is not required to hold any lock.
1701 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1703 struct lpfc_iocbq **new_arr;
1704 struct lpfc_iocbq **old_arr;
1706 struct lpfc_sli *psli = &phba->sli;
1709 spin_lock_irq(&phba->hbalock);
1710 iotag = psli->last_iotag;
1711 if(++iotag < psli->iocbq_lookup_len) {
1712 psli->last_iotag = iotag;
1713 psli->iocbq_lookup[iotag] = iocbq;
1714 spin_unlock_irq(&phba->hbalock);
1715 iocbq->iotag = iotag;
1717 } else if (psli->iocbq_lookup_len < (0xffff
1718 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1719 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1720 spin_unlock_irq(&phba->hbalock);
1721 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
1724 spin_lock_irq(&phba->hbalock);
1725 old_arr = psli->iocbq_lookup;
1726 if (new_len <= psli->iocbq_lookup_len) {
1727 /* highly unprobable case */
1729 iotag = psli->last_iotag;
1730 if(++iotag < psli->iocbq_lookup_len) {
1731 psli->last_iotag = iotag;
1732 psli->iocbq_lookup[iotag] = iocbq;
1733 spin_unlock_irq(&phba->hbalock);
1734 iocbq->iotag = iotag;
1737 spin_unlock_irq(&phba->hbalock);
1740 if (psli->iocbq_lookup)
1741 memcpy(new_arr, old_arr,
1742 ((psli->last_iotag + 1) *
1743 sizeof (struct lpfc_iocbq *)));
1744 psli->iocbq_lookup = new_arr;
1745 psli->iocbq_lookup_len = new_len;
1746 psli->last_iotag = iotag;
1747 psli->iocbq_lookup[iotag] = iocbq;
1748 spin_unlock_irq(&phba->hbalock);
1749 iocbq->iotag = iotag;
1754 spin_unlock_irq(&phba->hbalock);
1756 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1757 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1764 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1765 * @phba: Pointer to HBA context object.
1766 * @pring: Pointer to driver SLI ring object.
1767 * @iocb: Pointer to iocb slot in the ring.
1768 * @nextiocb: Pointer to driver iocb object which need to be
1769 * posted to firmware.
1771 * This function is called with hbalock held to post a new iocb to
1772 * the firmware. This function copies the new iocb to ring iocb slot and
1773 * updates the ring pointers. It adds the new iocb to txcmplq if there is
1774 * a completion call back for this iocb else the function will free the
1778 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1779 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1781 lockdep_assert_held(&phba->hbalock);
1785 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1788 if (pring->ringno == LPFC_ELS_RING) {
1789 lpfc_debugfs_slow_ring_trc(phba,
1790 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1791 *(((uint32_t *) &nextiocb->iocb) + 4),
1792 *(((uint32_t *) &nextiocb->iocb) + 6),
1793 *(((uint32_t *) &nextiocb->iocb) + 7));
1797 * Issue iocb command to adapter
1799 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1801 pring->stats.iocb_cmd++;
1804 * If there is no completion routine to call, we can release the
1805 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1806 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1808 if (nextiocb->iocb_cmpl)
1809 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1811 __lpfc_sli_release_iocbq(phba, nextiocb);
1814 * Let the HBA know what IOCB slot will be the next one the
1815 * driver will put a command into.
1817 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1818 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1822 * lpfc_sli_update_full_ring - Update the chip attention register
1823 * @phba: Pointer to HBA context object.
1824 * @pring: Pointer to driver SLI ring object.
1826 * The caller is not required to hold any lock for calling this function.
1827 * This function updates the chip attention bits for the ring to inform firmware
1828 * that there are pending work to be done for this ring and requests an
1829 * interrupt when there is space available in the ring. This function is
1830 * called when the driver is unable to post more iocbs to the ring due
1831 * to unavailability of space in the ring.
1834 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1836 int ringno = pring->ringno;
1838 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1843 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1844 * The HBA will tell us when an IOCB entry is available.
1846 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1847 readl(phba->CAregaddr); /* flush */
1849 pring->stats.iocb_cmd_full++;
1853 * lpfc_sli_update_ring - Update chip attention register
1854 * @phba: Pointer to HBA context object.
1855 * @pring: Pointer to driver SLI ring object.
1857 * This function updates the chip attention register bit for the
1858 * given ring to inform HBA that there is more work to be done
1859 * in this ring. The caller is not required to hold any lock.
1862 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1864 int ringno = pring->ringno;
1867 * Tell the HBA that there is work to do in this ring.
1869 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1871 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1872 readl(phba->CAregaddr); /* flush */
1877 * lpfc_sli_resume_iocb - Process iocbs in the txq
1878 * @phba: Pointer to HBA context object.
1879 * @pring: Pointer to driver SLI ring object.
1881 * This function is called with hbalock held to post pending iocbs
1882 * in the txq to the firmware. This function is called when driver
1883 * detects space available in the ring.
1886 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1889 struct lpfc_iocbq *nextiocb;
1891 lockdep_assert_held(&phba->hbalock);
1895 * (a) there is anything on the txq to send
1897 * (c) link attention events can be processed (fcp ring only)
1898 * (d) IOCB processing is not blocked by the outstanding mbox command.
1901 if (lpfc_is_link_up(phba) &&
1902 (!list_empty(&pring->txq)) &&
1903 (pring->ringno != LPFC_FCP_RING ||
1904 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1906 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1907 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1908 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1911 lpfc_sli_update_ring(phba, pring);
1913 lpfc_sli_update_full_ring(phba, pring);
1920 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
1921 * @phba: Pointer to HBA context object.
1922 * @hbqno: HBQ number.
1924 * This function is called with hbalock held to get the next
1925 * available slot for the given HBQ. If there is free slot
1926 * available for the HBQ it will return pointer to the next available
1927 * HBQ entry else it will return NULL.
1929 static struct lpfc_hbq_entry *
1930 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1932 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1934 lockdep_assert_held(&phba->hbalock);
1936 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1937 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1938 hbqp->next_hbqPutIdx = 0;
1940 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1941 uint32_t raw_index = phba->hbq_get[hbqno];
1942 uint32_t getidx = le32_to_cpu(raw_index);
1944 hbqp->local_hbqGetIdx = getidx;
1946 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1947 lpfc_printf_log(phba, KERN_ERR,
1948 LOG_SLI | LOG_VPORT,
1949 "1802 HBQ %d: local_hbqGetIdx "
1950 "%u is > than hbqp->entry_count %u\n",
1951 hbqno, hbqp->local_hbqGetIdx,
1954 phba->link_state = LPFC_HBA_ERROR;
1958 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1962 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1967 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
1968 * @phba: Pointer to HBA context object.
1970 * This function is called with no lock held to free all the
1971 * hbq buffers while uninitializing the SLI interface. It also
1972 * frees the HBQ buffers returned by the firmware but not yet
1973 * processed by the upper layers.
1976 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1978 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1979 struct hbq_dmabuf *hbq_buf;
1980 unsigned long flags;
1983 hbq_count = lpfc_sli_hbq_count();
1984 /* Return all memory used by all HBQs */
1985 spin_lock_irqsave(&phba->hbalock, flags);
1986 for (i = 0; i < hbq_count; ++i) {
1987 list_for_each_entry_safe(dmabuf, next_dmabuf,
1988 &phba->hbqs[i].hbq_buffer_list, list) {
1989 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1990 list_del(&hbq_buf->dbuf.list);
1991 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1993 phba->hbqs[i].buffer_count = 0;
1996 /* Mark the HBQs not in use */
1997 phba->hbq_in_use = 0;
1998 spin_unlock_irqrestore(&phba->hbalock, flags);
2002 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
2003 * @phba: Pointer to HBA context object.
2004 * @hbqno: HBQ number.
2005 * @hbq_buf: Pointer to HBQ buffer.
2007 * This function is called with the hbalock held to post a
2008 * hbq buffer to the firmware. If the function finds an empty
2009 * slot in the HBQ, it will post the buffer. The function will return
2010 * pointer to the hbq entry if it successfully post the buffer
2011 * else it will return NULL.
2014 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2015 struct hbq_dmabuf *hbq_buf)
2017 lockdep_assert_held(&phba->hbalock);
2018 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2022 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2023 * @phba: Pointer to HBA context object.
2024 * @hbqno: HBQ number.
2025 * @hbq_buf: Pointer to HBQ buffer.
2027 * This function is called with the hbalock held to post a hbq buffer to the
2028 * firmware. If the function finds an empty slot in the HBQ, it will post the
2029 * buffer and place it on the hbq_buffer_list. The function will return zero if
2030 * it successfully post the buffer else it will return an error.
2033 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2034 struct hbq_dmabuf *hbq_buf)
2036 struct lpfc_hbq_entry *hbqe;
2037 dma_addr_t physaddr = hbq_buf->dbuf.phys;
2039 lockdep_assert_held(&phba->hbalock);
2040 /* Get next HBQ entry slot to use */
2041 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2043 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2045 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2046 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
2047 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2048 hbqe->bde.tus.f.bdeFlags = 0;
2049 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2050 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2052 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2053 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2055 readl(phba->hbq_put + hbqno);
2056 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2063 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2064 * @phba: Pointer to HBA context object.
2065 * @hbqno: HBQ number.
2066 * @hbq_buf: Pointer to HBQ buffer.
2068 * This function is called with the hbalock held to post an RQE to the SLI4
2069 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2070 * the hbq_buffer_list and return zero, otherwise it will return an error.
2073 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2074 struct hbq_dmabuf *hbq_buf)
2077 struct lpfc_rqe hrqe;
2078 struct lpfc_rqe drqe;
2079 struct lpfc_queue *hrq;
2080 struct lpfc_queue *drq;
2082 if (hbqno != LPFC_ELS_HBQ)
2084 hrq = phba->sli4_hba.hdr_rq;
2085 drq = phba->sli4_hba.dat_rq;
2087 lockdep_assert_held(&phba->hbalock);
2088 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2089 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2090 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2091 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2092 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2095 hbq_buf->tag = (rc | (hbqno << 16));
2096 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2100 /* HBQ for ELS and CT traffic. */
2101 static struct lpfc_hbq_init lpfc_els_hbq = {
2106 .ring_mask = (1 << LPFC_ELS_RING),
2113 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2118 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
2119 * @phba: Pointer to HBA context object.
2120 * @hbqno: HBQ number.
2121 * @count: Number of HBQ buffers to be posted.
2123 * This function is called with no lock held to post more hbq buffers to the
2124 * given HBQ. The function returns the number of HBQ buffers successfully
2128 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2130 uint32_t i, posted = 0;
2131 unsigned long flags;
2132 struct hbq_dmabuf *hbq_buffer;
2133 LIST_HEAD(hbq_buf_list);
2134 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2137 if ((phba->hbqs[hbqno].buffer_count + count) >
2138 lpfc_hbq_defs[hbqno]->entry_count)
2139 count = lpfc_hbq_defs[hbqno]->entry_count -
2140 phba->hbqs[hbqno].buffer_count;
2143 /* Allocate HBQ entries */
2144 for (i = 0; i < count; i++) {
2145 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2148 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2150 /* Check whether HBQ is still in use */
2151 spin_lock_irqsave(&phba->hbalock, flags);
2152 if (!phba->hbq_in_use)
2154 while (!list_empty(&hbq_buf_list)) {
2155 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2157 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2159 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2160 phba->hbqs[hbqno].buffer_count++;
2163 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2165 spin_unlock_irqrestore(&phba->hbalock, flags);
2168 spin_unlock_irqrestore(&phba->hbalock, flags);
2169 while (!list_empty(&hbq_buf_list)) {
2170 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2172 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2178 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
2179 * @phba: Pointer to HBA context object.
2182 * This function posts more buffers to the HBQ. This function
2183 * is called with no lock held. The function returns the number of HBQ entries
2184 * successfully allocated.
2187 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2189 if (phba->sli_rev == LPFC_SLI_REV4)
2192 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2193 lpfc_hbq_defs[qno]->add_count);
2197 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
2198 * @phba: Pointer to HBA context object.
2199 * @qno: HBQ queue number.
2201 * This function is called from SLI initialization code path with
2202 * no lock held to post initial HBQ buffers to firmware. The
2203 * function returns the number of HBQ entries successfully allocated.
2206 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2208 if (phba->sli_rev == LPFC_SLI_REV4)
2209 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2210 lpfc_hbq_defs[qno]->entry_count);
2212 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2213 lpfc_hbq_defs[qno]->init_count);
2217 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2218 * @phba: Pointer to HBA context object.
2219 * @hbqno: HBQ number.
2221 * This function removes the first hbq buffer on an hbq list and returns a
2222 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2224 static struct hbq_dmabuf *
2225 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2227 struct lpfc_dmabuf *d_buf;
2229 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2232 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2236 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2237 * @phba: Pointer to HBA context object.
2238 * @hbqno: HBQ number.
2240 * This function removes the first RQ buffer on an RQ buffer list and returns a
2241 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2243 static struct rqb_dmabuf *
2244 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2246 struct lpfc_dmabuf *h_buf;
2247 struct lpfc_rqb *rqbp;
2250 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2251 struct lpfc_dmabuf, list);
2254 rqbp->buffer_count--;
2255 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2259 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2260 * @phba: Pointer to HBA context object.
2261 * @tag: Tag of the hbq buffer.
2263 * This function searches for the hbq buffer associated with the given tag in
2264 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2265 * otherwise it returns NULL.
2267 static struct hbq_dmabuf *
2268 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2270 struct lpfc_dmabuf *d_buf;
2271 struct hbq_dmabuf *hbq_buf;
2275 if (hbqno >= LPFC_MAX_HBQS)
2278 spin_lock_irq(&phba->hbalock);
2279 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2280 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2281 if (hbq_buf->tag == tag) {
2282 spin_unlock_irq(&phba->hbalock);
2286 spin_unlock_irq(&phba->hbalock);
2287 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
2288 "1803 Bad hbq tag. Data: x%x x%x\n",
2289 tag, phba->hbqs[tag >> 16].buffer_count);
2294 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2295 * @phba: Pointer to HBA context object.
2296 * @hbq_buffer: Pointer to HBQ buffer.
2298 * This function is called with hbalock. This function gives back
2299 * the hbq buffer to firmware. If the HBQ does not have space to
2300 * post the buffer, it will free the buffer.
2303 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2308 hbqno = hbq_buffer->tag >> 16;
2309 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2310 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2315 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2316 * @mbxCommand: mailbox command code.
2318 * This function is called by the mailbox event handler function to verify
2319 * that the completed mailbox command is a legitimate mailbox command. If the
2320 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2321 * and the mailbox event handler will take the HBA offline.
2324 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2328 switch (mbxCommand) {
2332 case MBX_WRITE_VPARMS:
2333 case MBX_RUN_BIU_DIAG:
2336 case MBX_CONFIG_LINK:
2337 case MBX_CONFIG_RING:
2338 case MBX_RESET_RING:
2339 case MBX_READ_CONFIG:
2340 case MBX_READ_RCONFIG:
2341 case MBX_READ_SPARM:
2342 case MBX_READ_STATUS:
2346 case MBX_READ_LNK_STAT:
2348 case MBX_UNREG_LOGIN:
2350 case MBX_DUMP_MEMORY:
2351 case MBX_DUMP_CONTEXT:
2354 case MBX_UPDATE_CFG:
2356 case MBX_DEL_LD_ENTRY:
2357 case MBX_RUN_PROGRAM:
2359 case MBX_SET_VARIABLE:
2360 case MBX_UNREG_D_ID:
2361 case MBX_KILL_BOARD:
2362 case MBX_CONFIG_FARP:
2365 case MBX_RUN_BIU_DIAG64:
2366 case MBX_CONFIG_PORT:
2367 case MBX_READ_SPARM64:
2368 case MBX_READ_RPI64:
2369 case MBX_REG_LOGIN64:
2370 case MBX_READ_TOPOLOGY:
2373 case MBX_LOAD_EXP_ROM:
2374 case MBX_ASYNCEVT_ENABLE:
2378 case MBX_PORT_CAPABILITIES:
2379 case MBX_PORT_IOV_CONTROL:
2380 case MBX_SLI4_CONFIG:
2381 case MBX_SLI4_REQ_FTRS:
2383 case MBX_UNREG_FCFI:
2388 case MBX_RESUME_RPI:
2389 case MBX_READ_EVENT_LOG_STATUS:
2390 case MBX_READ_EVENT_LOG:
2391 case MBX_SECURITY_MGMT:
2393 case MBX_ACCESS_VDATA:
2404 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2405 * @phba: Pointer to HBA context object.
2406 * @pmboxq: Pointer to mailbox command.
2408 * This is completion handler function for mailbox commands issued from
2409 * lpfc_sli_issue_mbox_wait function. This function is called by the
2410 * mailbox event handler function with no lock held. This function
2411 * will wake up thread waiting on the wait queue pointed by context1
2415 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2417 unsigned long drvr_flag;
2418 struct completion *pmbox_done;
2421 * If pmbox_done is empty, the driver thread gave up waiting and
2422 * continued running.
2424 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2425 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2426 pmbox_done = (struct completion *)pmboxq->context3;
2428 complete(pmbox_done);
2429 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2434 __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2436 unsigned long iflags;
2438 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2439 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
2440 spin_lock_irqsave(&vport->phba->ndlp_lock, iflags);
2441 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2442 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
2443 spin_unlock_irqrestore(&vport->phba->ndlp_lock, iflags);
2445 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2449 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2450 * @phba: Pointer to HBA context object.
2451 * @pmb: Pointer to mailbox object.
2453 * This function is the default mailbox completion handler. It
2454 * frees the memory resources associated with the completed mailbox
2455 * command. If the completed command is a REG_LOGIN mailbox command,
2456 * this function will issue a UREG_LOGIN to re-claim the RPI.
2459 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2461 struct lpfc_vport *vport = pmb->vport;
2462 struct lpfc_dmabuf *mp;
2463 struct lpfc_nodelist *ndlp;
2464 struct Scsi_Host *shost;
2468 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
2471 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2476 * If a REG_LOGIN succeeded after node is destroyed or node
2477 * is in re-discovery driver need to cleanup the RPI.
2479 if (!(phba->pport->load_flag & FC_UNLOADING) &&
2480 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2481 !pmb->u.mb.mbxStatus) {
2482 rpi = pmb->u.mb.un.varWords[0];
2483 vpi = pmb->u.mb.un.varRegLogin.vpi;
2484 if (phba->sli_rev == LPFC_SLI_REV4)
2485 vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
2486 lpfc_unreg_login(phba, vpi, rpi, pmb);
2488 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2489 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2490 if (rc != MBX_NOT_FINISHED)
2494 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2495 !(phba->pport->load_flag & FC_UNLOADING) &&
2496 !pmb->u.mb.mbxStatus) {
2497 shost = lpfc_shost_from_vport(vport);
2498 spin_lock_irq(shost->host_lock);
2499 vport->vpi_state |= LPFC_VPI_REGISTERED;
2500 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2501 spin_unlock_irq(shost->host_lock);
2504 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2505 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2507 pmb->ctx_buf = NULL;
2508 pmb->ctx_ndlp = NULL;
2511 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2512 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2514 /* Check to see if there are any deferred events to process */
2518 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2519 "1438 UNREG cmpl deferred mbox x%x "
2520 "on NPort x%x Data: x%x x%x %px\n",
2521 ndlp->nlp_rpi, ndlp->nlp_DID,
2522 ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp);
2524 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2525 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2526 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2527 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2528 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2530 __lpfc_sli_rpi_release(vport, ndlp);
2532 if (vport->load_flag & FC_UNLOADING)
2534 pmb->ctx_ndlp = NULL;
2538 /* Check security permission status on INIT_LINK mailbox command */
2539 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2540 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2541 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2542 "2860 SLI authentication is required "
2543 "for INIT_LINK but has not done yet\n");
2545 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2546 lpfc_sli4_mbox_cmd_free(phba, pmb);
2548 mempool_free(pmb, phba->mbox_mem_pool);
2551 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2552 * @phba: Pointer to HBA context object.
2553 * @pmb: Pointer to mailbox object.
2555 * This function is the unreg rpi mailbox completion handler. It
2556 * frees the memory resources associated with the completed mailbox
2557 * command. An additional refrenece is put on the ndlp to prevent
2558 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2559 * the unreg mailbox command completes, this routine puts the
2564 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2566 struct lpfc_vport *vport = pmb->vport;
2567 struct lpfc_nodelist *ndlp;
2569 ndlp = pmb->ctx_ndlp;
2570 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2571 if (phba->sli_rev == LPFC_SLI_REV4 &&
2572 (bf_get(lpfc_sli_intf_if_type,
2573 &phba->sli4_hba.sli_intf) >=
2574 LPFC_SLI_INTF_IF_TYPE_2)) {
2577 vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2578 "0010 UNREG_LOGIN vpi:%x "
2579 "rpi:%x DID:%x defer x%x flg x%x "
2581 vport->vpi, ndlp->nlp_rpi,
2582 ndlp->nlp_DID, ndlp->nlp_defer_did,
2584 ndlp->nlp_usg_map, ndlp);
2585 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2588 /* Check to see if there are any deferred
2591 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2592 (ndlp->nlp_defer_did !=
2593 NLP_EVT_NOTHING_PENDING)) {
2595 vport, KERN_INFO, LOG_DISCOVERY,
2596 "4111 UNREG cmpl deferred "
2598 "NPort x%x Data: x%x x%px\n",
2599 ndlp->nlp_rpi, ndlp->nlp_DID,
2600 ndlp->nlp_defer_did, ndlp);
2601 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2602 ndlp->nlp_defer_did =
2603 NLP_EVT_NOTHING_PENDING;
2604 lpfc_issue_els_plogi(
2605 vport, ndlp->nlp_DID, 0);
2607 __lpfc_sli_rpi_release(vport, ndlp);
2613 mempool_free(pmb, phba->mbox_mem_pool);
2617 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2618 * @phba: Pointer to HBA context object.
2620 * This function is called with no lock held. This function processes all
2621 * the completed mailbox commands and gives it to upper layers. The interrupt
2622 * service routine processes mailbox completion interrupt and adds completed
2623 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2624 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2625 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2626 * function returns the mailbox commands to the upper layer by calling the
2627 * completion handler function of each mailbox.
2630 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2637 phba->sli.slistat.mbox_event++;
2639 /* Get all completed mailboxe buffers into the cmplq */
2640 spin_lock_irq(&phba->hbalock);
2641 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2642 spin_unlock_irq(&phba->hbalock);
2644 /* Get a Mailbox buffer to setup mailbox commands for callback */
2646 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2652 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2654 lpfc_debugfs_disc_trc(pmb->vport,
2655 LPFC_DISC_TRC_MBOX_VPORT,
2656 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2657 (uint32_t)pmbox->mbxCommand,
2658 pmbox->un.varWords[0],
2659 pmbox->un.varWords[1]);
2662 lpfc_debugfs_disc_trc(phba->pport,
2664 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2665 (uint32_t)pmbox->mbxCommand,
2666 pmbox->un.varWords[0],
2667 pmbox->un.varWords[1]);
2672 * It is a fatal error if unknown mbox command completion.
2674 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2676 /* Unknown mailbox command compl */
2677 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2678 "(%d):0323 Unknown Mailbox command "
2679 "x%x (x%x/x%x) Cmpl\n",
2680 pmb->vport ? pmb->vport->vpi : 0,
2682 lpfc_sli_config_mbox_subsys_get(phba,
2684 lpfc_sli_config_mbox_opcode_get(phba,
2686 phba->link_state = LPFC_HBA_ERROR;
2687 phba->work_hs = HS_FFER3;
2688 lpfc_handle_eratt(phba);
2692 if (pmbox->mbxStatus) {
2693 phba->sli.slistat.mbox_stat_err++;
2694 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2695 /* Mbox cmd cmpl error - RETRYing */
2696 lpfc_printf_log(phba, KERN_INFO,
2698 "(%d):0305 Mbox cmd cmpl "
2699 "error - RETRYing Data: x%x "
2700 "(x%x/x%x) x%x x%x x%x\n",
2701 pmb->vport ? pmb->vport->vpi : 0,
2703 lpfc_sli_config_mbox_subsys_get(phba,
2705 lpfc_sli_config_mbox_opcode_get(phba,
2708 pmbox->un.varWords[0],
2709 pmb->vport->port_state);
2710 pmbox->mbxStatus = 0;
2711 pmbox->mbxOwner = OWN_HOST;
2712 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2713 if (rc != MBX_NOT_FINISHED)
2718 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2719 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2720 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
2721 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2723 pmb->vport ? pmb->vport->vpi : 0,
2725 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2726 lpfc_sli_config_mbox_opcode_get(phba, pmb),
2728 *((uint32_t *) pmbox),
2729 pmbox->un.varWords[0],
2730 pmbox->un.varWords[1],
2731 pmbox->un.varWords[2],
2732 pmbox->un.varWords[3],
2733 pmbox->un.varWords[4],
2734 pmbox->un.varWords[5],
2735 pmbox->un.varWords[6],
2736 pmbox->un.varWords[7],
2737 pmbox->un.varWords[8],
2738 pmbox->un.varWords[9],
2739 pmbox->un.varWords[10]);
2742 pmb->mbox_cmpl(phba,pmb);
2748 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2749 * @phba: Pointer to HBA context object.
2750 * @pring: Pointer to driver SLI ring object.
2753 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2754 * is set in the tag the buffer is posted for a particular exchange,
2755 * the function will return the buffer without replacing the buffer.
2756 * If the buffer is for unsolicited ELS or CT traffic, this function
2757 * returns the buffer and also posts another buffer to the firmware.
2759 static struct lpfc_dmabuf *
2760 lpfc_sli_get_buff(struct lpfc_hba *phba,
2761 struct lpfc_sli_ring *pring,
2764 struct hbq_dmabuf *hbq_entry;
2766 if (tag & QUE_BUFTAG_BIT)
2767 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2768 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2771 return &hbq_entry->dbuf;
2775 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2776 * @phba: Pointer to HBA context object.
2777 * @pring: Pointer to driver SLI ring object.
2778 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2779 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2780 * @fch_type: the type for the first frame of the sequence.
2782 * This function is called with no lock held. This function uses the r_ctl and
2783 * type of the received sequence to find the correct callback function to call
2784 * to process the sequence.
2787 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2788 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2795 lpfc_nvmet_unsol_ls_event(phba, pring, saveq);
2801 /* unSolicited Responses */
2802 if (pring->prt[0].profile) {
2803 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2804 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2808 /* We must search, based on rctl / type
2809 for the right routine */
2810 for (i = 0; i < pring->num_mask; i++) {
2811 if ((pring->prt[i].rctl == fch_r_ctl) &&
2812 (pring->prt[i].type == fch_type)) {
2813 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2814 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2815 (phba, pring, saveq);
2823 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
2824 * @phba: Pointer to HBA context object.
2825 * @pring: Pointer to driver SLI ring object.
2826 * @saveq: Pointer to the unsolicited iocb.
2828 * This function is called with no lock held by the ring event handler
2829 * when there is an unsolicited iocb posted to the response ring by the
2830 * firmware. This function gets the buffer associated with the iocbs
2831 * and calls the event handler for the ring. This function handles both
2832 * qring buffers and hbq buffers.
2833 * When the function returns 1 the caller can free the iocb object otherwise
2834 * upper layer functions will free the iocb objects.
2837 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2838 struct lpfc_iocbq *saveq)
2842 uint32_t Rctl, Type;
2843 struct lpfc_iocbq *iocbq;
2844 struct lpfc_dmabuf *dmzbuf;
2846 irsp = &(saveq->iocb);
2848 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2849 if (pring->lpfc_sli_rcv_async_status)
2850 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2852 lpfc_printf_log(phba,
2855 "0316 Ring %d handler: unexpected "
2856 "ASYNC_STATUS iocb received evt_code "
2859 irsp->un.asyncstat.evt_code);
2863 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2864 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2865 if (irsp->ulpBdeCount > 0) {
2866 dmzbuf = lpfc_sli_get_buff(phba, pring,
2867 irsp->un.ulpWord[3]);
2868 lpfc_in_buf_free(phba, dmzbuf);
2871 if (irsp->ulpBdeCount > 1) {
2872 dmzbuf = lpfc_sli_get_buff(phba, pring,
2873 irsp->unsli3.sli3Words[3]);
2874 lpfc_in_buf_free(phba, dmzbuf);
2877 if (irsp->ulpBdeCount > 2) {
2878 dmzbuf = lpfc_sli_get_buff(phba, pring,
2879 irsp->unsli3.sli3Words[7]);
2880 lpfc_in_buf_free(phba, dmzbuf);
2886 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2887 if (irsp->ulpBdeCount != 0) {
2888 saveq->context2 = lpfc_sli_get_buff(phba, pring,
2889 irsp->un.ulpWord[3]);
2890 if (!saveq->context2)
2891 lpfc_printf_log(phba,
2894 "0341 Ring %d Cannot find buffer for "
2895 "an unsolicited iocb. tag 0x%x\n",
2897 irsp->un.ulpWord[3]);
2899 if (irsp->ulpBdeCount == 2) {
2900 saveq->context3 = lpfc_sli_get_buff(phba, pring,
2901 irsp->unsli3.sli3Words[7]);
2902 if (!saveq->context3)
2903 lpfc_printf_log(phba,
2906 "0342 Ring %d Cannot find buffer for an"
2907 " unsolicited iocb. tag 0x%x\n",
2909 irsp->unsli3.sli3Words[7]);
2911 list_for_each_entry(iocbq, &saveq->list, list) {
2912 irsp = &(iocbq->iocb);
2913 if (irsp->ulpBdeCount != 0) {
2914 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2915 irsp->un.ulpWord[3]);
2916 if (!iocbq->context2)
2917 lpfc_printf_log(phba,
2920 "0343 Ring %d Cannot find "
2921 "buffer for an unsolicited iocb"
2922 ". tag 0x%x\n", pring->ringno,
2923 irsp->un.ulpWord[3]);
2925 if (irsp->ulpBdeCount == 2) {
2926 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2927 irsp->unsli3.sli3Words[7]);
2928 if (!iocbq->context3)
2929 lpfc_printf_log(phba,
2932 "0344 Ring %d Cannot find "
2933 "buffer for an unsolicited "
2936 irsp->unsli3.sli3Words[7]);
2940 if (irsp->ulpBdeCount != 0 &&
2941 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2942 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2945 /* search continue save q for same XRI */
2946 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2947 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2948 saveq->iocb.unsli3.rcvsli3.ox_id) {
2949 list_add_tail(&saveq->list, &iocbq->list);
2955 list_add_tail(&saveq->clist,
2956 &pring->iocb_continue_saveq);
2957 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2958 list_del_init(&iocbq->clist);
2960 irsp = &(saveq->iocb);
2964 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2965 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2966 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2967 Rctl = FC_RCTL_ELS_REQ;
2970 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2971 Rctl = w5p->hcsw.Rctl;
2972 Type = w5p->hcsw.Type;
2974 /* Firmware Workaround */
2975 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2976 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2977 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
2978 Rctl = FC_RCTL_ELS_REQ;
2980 w5p->hcsw.Rctl = Rctl;
2981 w5p->hcsw.Type = Type;
2985 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
2986 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2987 "0313 Ring %d handler: unexpected Rctl x%x "
2988 "Type x%x received\n",
2989 pring->ringno, Rctl, Type);
2995 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
2996 * @phba: Pointer to HBA context object.
2997 * @pring: Pointer to driver SLI ring object.
2998 * @prspiocb: Pointer to response iocb object.
3000 * This function looks up the iocb_lookup table to get the command iocb
3001 * corresponding to the given response iocb using the iotag of the
3002 * response iocb. The driver calls this function with the hbalock held
3003 * for SLI3 ports or the ring lock held for SLI4 ports.
3004 * This function returns the command iocb object if it finds the command
3005 * iocb else returns NULL.
3007 static struct lpfc_iocbq *
3008 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
3009 struct lpfc_sli_ring *pring,
3010 struct lpfc_iocbq *prspiocb)
3012 struct lpfc_iocbq *cmd_iocb = NULL;
3014 spinlock_t *temp_lock = NULL;
3015 unsigned long iflag = 0;
3017 if (phba->sli_rev == LPFC_SLI_REV4)
3018 temp_lock = &pring->ring_lock;
3020 temp_lock = &phba->hbalock;
3022 spin_lock_irqsave(temp_lock, iflag);
3023 iotag = prspiocb->iocb.ulpIoTag;
3025 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3026 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3027 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3028 /* remove from txcmpl queue list */
3029 list_del_init(&cmd_iocb->list);
3030 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3031 pring->txcmplq_cnt--;
3032 spin_unlock_irqrestore(temp_lock, iflag);
3037 spin_unlock_irqrestore(temp_lock, iflag);
3038 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3039 "0317 iotag x%x is out of "
3040 "range: max iotag x%x wd0 x%x\n",
3041 iotag, phba->sli.last_iotag,
3042 *(((uint32_t *) &prspiocb->iocb) + 7));
3047 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3048 * @phba: Pointer to HBA context object.
3049 * @pring: Pointer to driver SLI ring object.
3052 * This function looks up the iocb_lookup table to get the command iocb
3053 * corresponding to the given iotag. The driver calls this function with
3054 * the ring lock held because this function is an SLI4 port only helper.
3055 * This function returns the command iocb object if it finds the command
3056 * iocb else returns NULL.
3058 static struct lpfc_iocbq *
3059 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3060 struct lpfc_sli_ring *pring, uint16_t iotag)
3062 struct lpfc_iocbq *cmd_iocb = NULL;
3063 spinlock_t *temp_lock = NULL;
3064 unsigned long iflag = 0;
3066 if (phba->sli_rev == LPFC_SLI_REV4)
3067 temp_lock = &pring->ring_lock;
3069 temp_lock = &phba->hbalock;
3071 spin_lock_irqsave(temp_lock, iflag);
3072 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3073 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3074 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3075 /* remove from txcmpl queue list */
3076 list_del_init(&cmd_iocb->list);
3077 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3078 pring->txcmplq_cnt--;
3079 spin_unlock_irqrestore(temp_lock, iflag);
3084 spin_unlock_irqrestore(temp_lock, iflag);
3085 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3086 "0372 iotag x%x lookup error: max iotag (x%x) "
3088 iotag, phba->sli.last_iotag,
3089 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
3094 * lpfc_sli_process_sol_iocb - process solicited iocb completion
3095 * @phba: Pointer to HBA context object.
3096 * @pring: Pointer to driver SLI ring object.
3097 * @saveq: Pointer to the response iocb to be processed.
3099 * This function is called by the ring event handler for non-fcp
3100 * rings when there is a new response iocb in the response ring.
3101 * The caller is not required to hold any locks. This function
3102 * gets the command iocb associated with the response iocb and
3103 * calls the completion handler for the command iocb. If there
3104 * is no completion handler, the function will free the resources
3105 * associated with command iocb. If the response iocb is for
3106 * an already aborted command iocb, the status of the completion
3107 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3108 * This function always returns 1.
3111 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3112 struct lpfc_iocbq *saveq)
3114 struct lpfc_iocbq *cmdiocbp;
3116 unsigned long iflag;
3118 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3120 if (cmdiocbp->iocb_cmpl) {
3122 * If an ELS command failed send an event to mgmt
3125 if (saveq->iocb.ulpStatus &&
3126 (pring->ringno == LPFC_ELS_RING) &&
3127 (cmdiocbp->iocb.ulpCommand ==
3128 CMD_ELS_REQUEST64_CR))
3129 lpfc_send_els_failure_event(phba,
3133 * Post all ELS completions to the worker thread.
3134 * All other are passed to the completion callback.
3136 if (pring->ringno == LPFC_ELS_RING) {
3137 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3138 (cmdiocbp->iocb_flag &
3139 LPFC_DRIVER_ABORTED)) {
3140 spin_lock_irqsave(&phba->hbalock,
3142 cmdiocbp->iocb_flag &=
3143 ~LPFC_DRIVER_ABORTED;
3144 spin_unlock_irqrestore(&phba->hbalock,
3146 saveq->iocb.ulpStatus =
3147 IOSTAT_LOCAL_REJECT;
3148 saveq->iocb.un.ulpWord[4] =
3151 /* Firmware could still be in progress
3152 * of DMAing payload, so don't free data
3153 * buffer till after a hbeat.
3155 spin_lock_irqsave(&phba->hbalock,
3157 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
3158 spin_unlock_irqrestore(&phba->hbalock,
3161 if (phba->sli_rev == LPFC_SLI_REV4) {
3162 if (saveq->iocb_flag &
3163 LPFC_EXCHANGE_BUSY) {
3164 /* Set cmdiocb flag for the
3165 * exchange busy so sgl (xri)
3166 * will not be released until
3167 * the abort xri is received
3171 &phba->hbalock, iflag);
3172 cmdiocbp->iocb_flag |=
3174 spin_unlock_irqrestore(
3175 &phba->hbalock, iflag);
3177 if (cmdiocbp->iocb_flag &
3178 LPFC_DRIVER_ABORTED) {
3180 * Clear LPFC_DRIVER_ABORTED
3181 * bit in case it was driver
3185 &phba->hbalock, iflag);
3186 cmdiocbp->iocb_flag &=
3187 ~LPFC_DRIVER_ABORTED;
3188 spin_unlock_irqrestore(
3189 &phba->hbalock, iflag);
3190 cmdiocbp->iocb.ulpStatus =
3191 IOSTAT_LOCAL_REJECT;
3192 cmdiocbp->iocb.un.ulpWord[4] =
3193 IOERR_ABORT_REQUESTED;
3195 * For SLI4, irsiocb contains
3196 * NO_XRI in sli_xritag, it
3197 * shall not affect releasing
3198 * sgl (xri) process.
3200 saveq->iocb.ulpStatus =
3201 IOSTAT_LOCAL_REJECT;
3202 saveq->iocb.un.ulpWord[4] =
3205 &phba->hbalock, iflag);
3207 LPFC_DELAY_MEM_FREE;
3208 spin_unlock_irqrestore(
3209 &phba->hbalock, iflag);
3213 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
3215 lpfc_sli_release_iocbq(phba, cmdiocbp);
3218 * Unknown initiating command based on the response iotag.
3219 * This could be the case on the ELS ring because of
3222 if (pring->ringno != LPFC_ELS_RING) {
3224 * Ring <ringno> handler: unexpected completion IoTag
3227 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3228 "0322 Ring %d handler: "
3229 "unexpected completion IoTag x%x "
3230 "Data: x%x x%x x%x x%x\n",
3232 saveq->iocb.ulpIoTag,
3233 saveq->iocb.ulpStatus,
3234 saveq->iocb.un.ulpWord[4],
3235 saveq->iocb.ulpCommand,
3236 saveq->iocb.ulpContext);
3244 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
3245 * @phba: Pointer to HBA context object.
3246 * @pring: Pointer to driver SLI ring object.
3248 * This function is called from the iocb ring event handlers when
3249 * put pointer is ahead of the get pointer for a ring. This function signal
3250 * an error attention condition to the worker thread and the worker
3251 * thread will transition the HBA to offline state.
3254 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3256 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3258 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3259 * rsp ring <portRspMax>
3261 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3262 "0312 Ring %d handler: portRspPut %d "
3263 "is bigger than rsp ring %d\n",
3264 pring->ringno, le32_to_cpu(pgp->rspPutInx),
3265 pring->sli.sli3.numRiocb);
3267 phba->link_state = LPFC_HBA_ERROR;
3270 * All error attention handlers are posted to
3273 phba->work_ha |= HA_ERATT;
3274 phba->work_hs = HS_FFER3;
3276 lpfc_worker_wake_up(phba);
3282 * lpfc_poll_eratt - Error attention polling timer timeout handler
3283 * @ptr: Pointer to address of HBA context object.
3285 * This function is invoked by the Error Attention polling timer when the
3286 * timer times out. It will check the SLI Error Attention register for
3287 * possible attention events. If so, it will post an Error Attention event
3288 * and wake up worker thread to process it. Otherwise, it will set up the
3289 * Error Attention polling timer for the next poll.
3291 void lpfc_poll_eratt(struct timer_list *t)
3293 struct lpfc_hba *phba;
3295 uint64_t sli_intr, cnt;
3297 phba = from_timer(phba, t, eratt_poll);
3299 /* Here we will also keep track of interrupts per sec of the hba */
3300 sli_intr = phba->sli.slistat.sli_intr;
3302 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3303 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3306 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3308 /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3309 do_div(cnt, phba->eratt_poll_interval);
3310 phba->sli.slistat.sli_ips = cnt;
3312 phba->sli.slistat.sli_prev_intr = sli_intr;
3314 /* Check chip HA register for error event */
3315 eratt = lpfc_sli_check_eratt(phba);
3318 /* Tell the worker thread there is work to do */
3319 lpfc_worker_wake_up(phba);
3321 /* Restart the timer for next eratt poll */
3322 mod_timer(&phba->eratt_poll,
3324 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3330 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3331 * @phba: Pointer to HBA context object.
3332 * @pring: Pointer to driver SLI ring object.
3333 * @mask: Host attention register mask for this ring.
3335 * This function is called from the interrupt context when there is a ring
3336 * event for the fcp ring. The caller does not hold any lock.
3337 * The function processes each response iocb in the response ring until it
3338 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3339 * LE bit set. The function will call the completion handler of the command iocb
3340 * if the response iocb indicates a completion for a command iocb or it is
3341 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3342 * function if this is an unsolicited iocb.
3343 * This routine presumes LPFC_FCP_RING handling and doesn't bother
3344 * to check it explicitly.
3347 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3348 struct lpfc_sli_ring *pring, uint32_t mask)
3350 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3351 IOCB_t *irsp = NULL;
3352 IOCB_t *entry = NULL;
3353 struct lpfc_iocbq *cmdiocbq = NULL;
3354 struct lpfc_iocbq rspiocbq;
3356 uint32_t portRspPut, portRspMax;
3358 lpfc_iocb_type type;
3359 unsigned long iflag;
3360 uint32_t rsp_cmpl = 0;
3362 spin_lock_irqsave(&phba->hbalock, iflag);
3363 pring->stats.iocb_event++;
3366 * The next available response entry should never exceed the maximum
3367 * entries. If it does, treat it as an adapter hardware error.
3369 portRspMax = pring->sli.sli3.numRiocb;
3370 portRspPut = le32_to_cpu(pgp->rspPutInx);
3371 if (unlikely(portRspPut >= portRspMax)) {
3372 lpfc_sli_rsp_pointers_error(phba, pring);
3373 spin_unlock_irqrestore(&phba->hbalock, iflag);
3376 if (phba->fcp_ring_in_use) {
3377 spin_unlock_irqrestore(&phba->hbalock, iflag);
3380 phba->fcp_ring_in_use = 1;
3383 while (pring->sli.sli3.rspidx != portRspPut) {
3385 * Fetch an entry off the ring and copy it into a local data
3386 * structure. The copy involves a byte-swap since the
3387 * network byte order and pci byte orders are different.
3389 entry = lpfc_resp_iocb(phba, pring);
3390 phba->last_completion_time = jiffies;
3392 if (++pring->sli.sli3.rspidx >= portRspMax)
3393 pring->sli.sli3.rspidx = 0;
3395 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3396 (uint32_t *) &rspiocbq.iocb,
3397 phba->iocb_rsp_size);
3398 INIT_LIST_HEAD(&(rspiocbq.list));
3399 irsp = &rspiocbq.iocb;
3401 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3402 pring->stats.iocb_rsp++;
3405 if (unlikely(irsp->ulpStatus)) {
3407 * If resource errors reported from HBA, reduce
3408 * queuedepths of the SCSI device.
3410 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3411 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3412 IOERR_NO_RESOURCES)) {
3413 spin_unlock_irqrestore(&phba->hbalock, iflag);
3414 phba->lpfc_rampdown_queue_depth(phba);
3415 spin_lock_irqsave(&phba->hbalock, iflag);
3418 /* Rsp ring <ringno> error: IOCB */
3419 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3420 "0336 Rsp Ring %d error: IOCB Data: "
3421 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3423 irsp->un.ulpWord[0],
3424 irsp->un.ulpWord[1],
3425 irsp->un.ulpWord[2],
3426 irsp->un.ulpWord[3],
3427 irsp->un.ulpWord[4],
3428 irsp->un.ulpWord[5],
3429 *(uint32_t *)&irsp->un1,
3430 *((uint32_t *)&irsp->un1 + 1));
3434 case LPFC_ABORT_IOCB:
3437 * Idle exchange closed via ABTS from port. No iocb
3438 * resources need to be recovered.
3440 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3441 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3442 "0333 IOCB cmd 0x%x"
3443 " processed. Skipping"
3449 spin_unlock_irqrestore(&phba->hbalock, iflag);
3450 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3452 spin_lock_irqsave(&phba->hbalock, iflag);
3453 if (unlikely(!cmdiocbq))
3455 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3456 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3457 if (cmdiocbq->iocb_cmpl) {
3458 spin_unlock_irqrestore(&phba->hbalock, iflag);
3459 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3461 spin_lock_irqsave(&phba->hbalock, iflag);
3464 case LPFC_UNSOL_IOCB:
3465 spin_unlock_irqrestore(&phba->hbalock, iflag);
3466 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
3467 spin_lock_irqsave(&phba->hbalock, iflag);
3470 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3471 char adaptermsg[LPFC_MAX_ADPTMSG];
3472 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3473 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3475 dev_warn(&((phba->pcidev)->dev),
3477 phba->brd_no, adaptermsg);
3479 /* Unknown IOCB command */
3480 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3481 "0334 Unknown IOCB command "
3482 "Data: x%x, x%x x%x x%x x%x\n",
3483 type, irsp->ulpCommand,
3492 * The response IOCB has been processed. Update the ring
3493 * pointer in SLIM. If the port response put pointer has not
3494 * been updated, sync the pgp->rspPutInx and fetch the new port
3495 * response put pointer.
3497 writel(pring->sli.sli3.rspidx,
3498 &phba->host_gp[pring->ringno].rspGetInx);
3500 if (pring->sli.sli3.rspidx == portRspPut)
3501 portRspPut = le32_to_cpu(pgp->rspPutInx);
3504 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3505 pring->stats.iocb_rsp_full++;
3506 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3507 writel(status, phba->CAregaddr);
3508 readl(phba->CAregaddr);
3510 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3511 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3512 pring->stats.iocb_cmd_empty++;
3514 /* Force update of the local copy of cmdGetInx */
3515 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3516 lpfc_sli_resume_iocb(phba, pring);
3518 if ((pring->lpfc_sli_cmd_available))
3519 (pring->lpfc_sli_cmd_available) (phba, pring);
3523 phba->fcp_ring_in_use = 0;
3524 spin_unlock_irqrestore(&phba->hbalock, iflag);
3529 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3530 * @phba: Pointer to HBA context object.
3531 * @pring: Pointer to driver SLI ring object.
3532 * @rspiocbp: Pointer to driver response IOCB object.
3534 * This function is called from the worker thread when there is a slow-path
3535 * response IOCB to process. This function chains all the response iocbs until
3536 * seeing the iocb with the LE bit set. The function will call
3537 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3538 * completion of a command iocb. The function will call the
3539 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3540 * The function frees the resources or calls the completion handler if this
3541 * iocb is an abort completion. The function returns NULL when the response
3542 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3543 * this function shall chain the iocb on to the iocb_continueq and return the
3544 * response iocb passed in.
3546 static struct lpfc_iocbq *
3547 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3548 struct lpfc_iocbq *rspiocbp)
3550 struct lpfc_iocbq *saveq;
3551 struct lpfc_iocbq *cmdiocbp;
3552 struct lpfc_iocbq *next_iocb;
3553 IOCB_t *irsp = NULL;
3554 uint32_t free_saveq;
3555 uint8_t iocb_cmd_type;
3556 lpfc_iocb_type type;
3557 unsigned long iflag;
3560 spin_lock_irqsave(&phba->hbalock, iflag);
3561 /* First add the response iocb to the countinueq list */
3562 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3563 pring->iocb_continueq_cnt++;
3565 /* Now, determine whether the list is completed for processing */
3566 irsp = &rspiocbp->iocb;
3569 * By default, the driver expects to free all resources
3570 * associated with this iocb completion.
3573 saveq = list_get_first(&pring->iocb_continueq,
3574 struct lpfc_iocbq, list);
3575 irsp = &(saveq->iocb);
3576 list_del_init(&pring->iocb_continueq);
3577 pring->iocb_continueq_cnt = 0;
3579 pring->stats.iocb_rsp++;
3582 * If resource errors reported from HBA, reduce
3583 * queuedepths of the SCSI device.
3585 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3586 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3587 IOERR_NO_RESOURCES)) {
3588 spin_unlock_irqrestore(&phba->hbalock, iflag);
3589 phba->lpfc_rampdown_queue_depth(phba);
3590 spin_lock_irqsave(&phba->hbalock, iflag);
3593 if (irsp->ulpStatus) {
3594 /* Rsp ring <ringno> error: IOCB */
3595 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3596 "0328 Rsp Ring %d error: "
3601 "x%x x%x x%x x%x\n",
3603 irsp->un.ulpWord[0],
3604 irsp->un.ulpWord[1],
3605 irsp->un.ulpWord[2],
3606 irsp->un.ulpWord[3],
3607 irsp->un.ulpWord[4],
3608 irsp->un.ulpWord[5],
3609 *(((uint32_t *) irsp) + 6),
3610 *(((uint32_t *) irsp) + 7),
3611 *(((uint32_t *) irsp) + 8),
3612 *(((uint32_t *) irsp) + 9),
3613 *(((uint32_t *) irsp) + 10),
3614 *(((uint32_t *) irsp) + 11),
3615 *(((uint32_t *) irsp) + 12),
3616 *(((uint32_t *) irsp) + 13),
3617 *(((uint32_t *) irsp) + 14),
3618 *(((uint32_t *) irsp) + 15));
3622 * Fetch the IOCB command type and call the correct completion
3623 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3624 * get freed back to the lpfc_iocb_list by the discovery
3627 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3628 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3631 spin_unlock_irqrestore(&phba->hbalock, iflag);
3632 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3633 spin_lock_irqsave(&phba->hbalock, iflag);
3636 case LPFC_UNSOL_IOCB:
3637 spin_unlock_irqrestore(&phba->hbalock, iflag);
3638 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3639 spin_lock_irqsave(&phba->hbalock, iflag);
3644 case LPFC_ABORT_IOCB:
3646 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) {
3647 spin_unlock_irqrestore(&phba->hbalock, iflag);
3648 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3650 spin_lock_irqsave(&phba->hbalock, iflag);
3653 /* Call the specified completion routine */
3654 if (cmdiocbp->iocb_cmpl) {
3655 spin_unlock_irqrestore(&phba->hbalock,
3657 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3659 spin_lock_irqsave(&phba->hbalock,
3662 __lpfc_sli_release_iocbq(phba,
3667 case LPFC_UNKNOWN_IOCB:
3668 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3669 char adaptermsg[LPFC_MAX_ADPTMSG];
3670 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3671 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3673 dev_warn(&((phba->pcidev)->dev),
3675 phba->brd_no, adaptermsg);
3677 /* Unknown IOCB command */
3678 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3679 "0335 Unknown IOCB "
3680 "command Data: x%x "
3691 list_for_each_entry_safe(rspiocbp, next_iocb,
3692 &saveq->list, list) {
3693 list_del_init(&rspiocbp->list);
3694 __lpfc_sli_release_iocbq(phba, rspiocbp);
3696 __lpfc_sli_release_iocbq(phba, saveq);
3700 spin_unlock_irqrestore(&phba->hbalock, iflag);
3705 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
3706 * @phba: Pointer to HBA context object.
3707 * @pring: Pointer to driver SLI ring object.
3708 * @mask: Host attention register mask for this ring.
3710 * This routine wraps the actual slow_ring event process routine from the
3711 * API jump table function pointer from the lpfc_hba struct.
3714 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3715 struct lpfc_sli_ring *pring, uint32_t mask)
3717 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3721 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3722 * @phba: Pointer to HBA context object.
3723 * @pring: Pointer to driver SLI ring object.
3724 * @mask: Host attention register mask for this ring.
3726 * This function is called from the worker thread when there is a ring event
3727 * for non-fcp rings. The caller does not hold any lock. The function will
3728 * remove each response iocb in the response ring and calls the handle
3729 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3732 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3733 struct lpfc_sli_ring *pring, uint32_t mask)
3735 struct lpfc_pgp *pgp;
3737 IOCB_t *irsp = NULL;
3738 struct lpfc_iocbq *rspiocbp = NULL;
3739 uint32_t portRspPut, portRspMax;
3740 unsigned long iflag;
3743 pgp = &phba->port_gp[pring->ringno];
3744 spin_lock_irqsave(&phba->hbalock, iflag);
3745 pring->stats.iocb_event++;
3748 * The next available response entry should never exceed the maximum
3749 * entries. If it does, treat it as an adapter hardware error.
3751 portRspMax = pring->sli.sli3.numRiocb;
3752 portRspPut = le32_to_cpu(pgp->rspPutInx);
3753 if (portRspPut >= portRspMax) {
3755 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3756 * rsp ring <portRspMax>
3758 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3759 "0303 Ring %d handler: portRspPut %d "
3760 "is bigger than rsp ring %d\n",
3761 pring->ringno, portRspPut, portRspMax);
3763 phba->link_state = LPFC_HBA_ERROR;
3764 spin_unlock_irqrestore(&phba->hbalock, iflag);
3766 phba->work_hs = HS_FFER3;
3767 lpfc_handle_eratt(phba);
3773 while (pring->sli.sli3.rspidx != portRspPut) {
3775 * Build a completion list and call the appropriate handler.
3776 * The process is to get the next available response iocb, get
3777 * a free iocb from the list, copy the response data into the
3778 * free iocb, insert to the continuation list, and update the
3779 * next response index to slim. This process makes response
3780 * iocb's in the ring available to DMA as fast as possible but
3781 * pays a penalty for a copy operation. Since the iocb is
3782 * only 32 bytes, this penalty is considered small relative to
3783 * the PCI reads for register values and a slim write. When
3784 * the ulpLe field is set, the entire Command has been
3787 entry = lpfc_resp_iocb(phba, pring);
3789 phba->last_completion_time = jiffies;
3790 rspiocbp = __lpfc_sli_get_iocbq(phba);
3791 if (rspiocbp == NULL) {
3792 printk(KERN_ERR "%s: out of buffers! Failing "
3793 "completion.\n", __func__);
3797 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3798 phba->iocb_rsp_size);
3799 irsp = &rspiocbp->iocb;
3801 if (++pring->sli.sli3.rspidx >= portRspMax)
3802 pring->sli.sli3.rspidx = 0;
3804 if (pring->ringno == LPFC_ELS_RING) {
3805 lpfc_debugfs_slow_ring_trc(phba,
3806 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3807 *(((uint32_t *) irsp) + 4),
3808 *(((uint32_t *) irsp) + 6),
3809 *(((uint32_t *) irsp) + 7));
3812 writel(pring->sli.sli3.rspidx,
3813 &phba->host_gp[pring->ringno].rspGetInx);
3815 spin_unlock_irqrestore(&phba->hbalock, iflag);
3816 /* Handle the response IOCB */
3817 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3818 spin_lock_irqsave(&phba->hbalock, iflag);
3821 * If the port response put pointer has not been updated, sync
3822 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3823 * response put pointer.
3825 if (pring->sli.sli3.rspidx == portRspPut) {
3826 portRspPut = le32_to_cpu(pgp->rspPutInx);
3828 } /* while (pring->sli.sli3.rspidx != portRspPut) */
3830 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
3831 /* At least one response entry has been freed */
3832 pring->stats.iocb_rsp_full++;
3833 /* SET RxRE_RSP in Chip Att register */
3834 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3835 writel(status, phba->CAregaddr);
3836 readl(phba->CAregaddr); /* flush */
3838 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3839 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3840 pring->stats.iocb_cmd_empty++;
3842 /* Force update of the local copy of cmdGetInx */
3843 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3844 lpfc_sli_resume_iocb(phba, pring);
3846 if ((pring->lpfc_sli_cmd_available))
3847 (pring->lpfc_sli_cmd_available) (phba, pring);
3851 spin_unlock_irqrestore(&phba->hbalock, iflag);
3856 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3857 * @phba: Pointer to HBA context object.
3858 * @pring: Pointer to driver SLI ring object.
3859 * @mask: Host attention register mask for this ring.
3861 * This function is called from the worker thread when there is a pending
3862 * ELS response iocb on the driver internal slow-path response iocb worker
3863 * queue. The caller does not hold any lock. The function will remove each
3864 * response iocb from the response worker queue and calls the handle
3865 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3868 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3869 struct lpfc_sli_ring *pring, uint32_t mask)
3871 struct lpfc_iocbq *irspiocbq;
3872 struct hbq_dmabuf *dmabuf;
3873 struct lpfc_cq_event *cq_event;
3874 unsigned long iflag;
3877 spin_lock_irqsave(&phba->hbalock, iflag);
3878 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3879 spin_unlock_irqrestore(&phba->hbalock, iflag);
3880 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
3881 /* Get the response iocb from the head of work queue */
3882 spin_lock_irqsave(&phba->hbalock, iflag);
3883 list_remove_head(&phba->sli4_hba.sp_queue_event,
3884 cq_event, struct lpfc_cq_event, list);
3885 spin_unlock_irqrestore(&phba->hbalock, iflag);
3887 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3888 case CQE_CODE_COMPL_WQE:
3889 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3891 /* Translate ELS WCQE to response IOCBQ */
3892 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3895 lpfc_sli_sp_handle_rspiocb(phba, pring,
3899 case CQE_CODE_RECEIVE:
3900 case CQE_CODE_RECEIVE_V1:
3901 dmabuf = container_of(cq_event, struct hbq_dmabuf,
3903 lpfc_sli4_handle_received_buffer(phba, dmabuf);
3910 /* Limit the number of events to 64 to avoid soft lockups */
3917 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
3918 * @phba: Pointer to HBA context object.
3919 * @pring: Pointer to driver SLI ring object.
3921 * This function aborts all iocbs in the given ring and frees all the iocb
3922 * objects in txq. This function issues an abort iocb for all the iocb commands
3923 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3924 * the return of this function. The caller is not required to hold any locks.
3927 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3929 LIST_HEAD(completions);
3930 struct lpfc_iocbq *iocb, *next_iocb;
3932 if (pring->ringno == LPFC_ELS_RING) {
3933 lpfc_fabric_abort_hba(phba);
3936 /* Error everything on txq and txcmplq
3939 if (phba->sli_rev >= LPFC_SLI_REV4) {
3940 spin_lock_irq(&pring->ring_lock);
3941 list_splice_init(&pring->txq, &completions);
3943 spin_unlock_irq(&pring->ring_lock);
3945 spin_lock_irq(&phba->hbalock);
3946 /* Next issue ABTS for everything on the txcmplq */
3947 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3948 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3949 spin_unlock_irq(&phba->hbalock);
3951 spin_lock_irq(&phba->hbalock);
3952 list_splice_init(&pring->txq, &completions);
3955 /* Next issue ABTS for everything on the txcmplq */
3956 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3957 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3958 spin_unlock_irq(&phba->hbalock);
3961 /* Cancel all the IOCBs from the completions list */
3962 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3967 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
3968 * @phba: Pointer to HBA context object.
3969 * @pring: Pointer to driver SLI ring object.
3971 * This function aborts all iocbs in FCP rings and frees all the iocb
3972 * objects in txq. This function issues an abort iocb for all the iocb commands
3973 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3974 * the return of this function. The caller is not required to hold any locks.
3977 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
3979 struct lpfc_sli *psli = &phba->sli;
3980 struct lpfc_sli_ring *pring;
3983 /* Look on all the FCP Rings for the iotag */
3984 if (phba->sli_rev >= LPFC_SLI_REV4) {
3985 for (i = 0; i < phba->cfg_hdw_queue; i++) {
3986 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
3987 lpfc_sli_abort_iocb_ring(phba, pring);
3990 pring = &psli->sli3_ring[LPFC_FCP_RING];
3991 lpfc_sli_abort_iocb_ring(phba, pring);
3996 * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
3997 * @phba: Pointer to HBA context object.
3999 * This function flushes all iocbs in the IO ring and frees all the iocb
4000 * objects in txq and txcmplq. This function will not issue abort iocbs
4001 * for all the iocb commands in txcmplq, they will just be returned with
4002 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4003 * slot has been permanently disabled.
4006 lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
4010 struct lpfc_sli *psli = &phba->sli;
4011 struct lpfc_sli_ring *pring;
4013 struct lpfc_iocbq *piocb, *next_iocb;
4015 spin_lock_irq(&phba->hbalock);
4016 /* Indicate the I/O queues are flushed */
4017 phba->hba_flag |= HBA_IOQ_FLUSH;
4018 spin_unlock_irq(&phba->hbalock);
4020 /* Look on all the FCP Rings for the iotag */
4021 if (phba->sli_rev >= LPFC_SLI_REV4) {
4022 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4023 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4025 spin_lock_irq(&pring->ring_lock);
4026 /* Retrieve everything on txq */
4027 list_splice_init(&pring->txq, &txq);
4028 list_for_each_entry_safe(piocb, next_iocb,
4029 &pring->txcmplq, list)
4030 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4031 /* Retrieve everything on the txcmplq */
4032 list_splice_init(&pring->txcmplq, &txcmplq);
4034 pring->txcmplq_cnt = 0;
4035 spin_unlock_irq(&pring->ring_lock);
4038 lpfc_sli_cancel_iocbs(phba, &txq,
4039 IOSTAT_LOCAL_REJECT,
4041 /* Flush the txcmpq */
4042 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4043 IOSTAT_LOCAL_REJECT,
4047 pring = &psli->sli3_ring[LPFC_FCP_RING];
4049 spin_lock_irq(&phba->hbalock);
4050 /* Retrieve everything on txq */
4051 list_splice_init(&pring->txq, &txq);
4052 list_for_each_entry_safe(piocb, next_iocb,
4053 &pring->txcmplq, list)
4054 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4055 /* Retrieve everything on the txcmplq */
4056 list_splice_init(&pring->txcmplq, &txcmplq);
4058 pring->txcmplq_cnt = 0;
4059 spin_unlock_irq(&phba->hbalock);
4062 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4064 /* Flush the txcmpq */
4065 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4071 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
4072 * @phba: Pointer to HBA context object.
4073 * @mask: Bit mask to be checked.
4075 * This function reads the host status register and compares
4076 * with the provided bit mask to check if HBA completed
4077 * the restart. This function will wait in a loop for the
4078 * HBA to complete restart. If the HBA does not restart within
4079 * 15 iterations, the function will reset the HBA again. The
4080 * function returns 1 when HBA fail to restart otherwise returns
4084 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4090 /* Read the HBA Host Status Register */
4091 if (lpfc_readl(phba->HSregaddr, &status))
4095 * Check status register every 100ms for 5 retries, then every
4096 * 500ms for 5, then every 2.5 sec for 5, then reset board and
4097 * every 2.5 sec for 4.
4098 * Break our of the loop if errors occurred during init.
4100 while (((status & mask) != mask) &&
4101 !(status & HS_FFERM) &&
4113 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4114 lpfc_sli_brdrestart(phba);
4116 /* Read the HBA Host Status Register */
4117 if (lpfc_readl(phba->HSregaddr, &status)) {
4123 /* Check to see if any errors occurred during init */
4124 if ((status & HS_FFERM) || (i >= 20)) {
4125 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4126 "2751 Adapter failed to restart, "
4127 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4129 readl(phba->MBslimaddr + 0xa8),
4130 readl(phba->MBslimaddr + 0xac));
4131 phba->link_state = LPFC_HBA_ERROR;
4139 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4140 * @phba: Pointer to HBA context object.
4141 * @mask: Bit mask to be checked.
4143 * This function checks the host status register to check if HBA is
4144 * ready. This function will wait in a loop for the HBA to be ready
4145 * If the HBA is not ready , the function will will reset the HBA PCI
4146 * function again. The function returns 1 when HBA fail to be ready
4147 * otherwise returns zero.
4150 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4155 /* Read the HBA Host Status Register */
4156 status = lpfc_sli4_post_status_check(phba);
4159 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4160 lpfc_sli_brdrestart(phba);
4161 status = lpfc_sli4_post_status_check(phba);
4164 /* Check to see if any errors occurred during init */
4166 phba->link_state = LPFC_HBA_ERROR;
4169 phba->sli4_hba.intr_enable = 0;
4175 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4176 * @phba: Pointer to HBA context object.
4177 * @mask: Bit mask to be checked.
4179 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4180 * from the API jump table function pointer from the lpfc_hba struct.
4183 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4185 return phba->lpfc_sli_brdready(phba, mask);
4188 #define BARRIER_TEST_PATTERN (0xdeadbeef)
4191 * lpfc_reset_barrier - Make HBA ready for HBA reset
4192 * @phba: Pointer to HBA context object.
4194 * This function is called before resetting an HBA. This function is called
4195 * with hbalock held and requests HBA to quiesce DMAs before a reset.
4197 void lpfc_reset_barrier(struct lpfc_hba *phba)
4199 uint32_t __iomem *resp_buf;
4200 uint32_t __iomem *mbox_buf;
4201 volatile uint32_t mbox;
4202 uint32_t hc_copy, ha_copy, resp_data;
4206 lockdep_assert_held(&phba->hbalock);
4208 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4209 if (hdrtype != 0x80 ||
4210 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4211 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4215 * Tell the other part of the chip to suspend temporarily all
4218 resp_buf = phba->MBslimaddr;
4220 /* Disable the error attention */
4221 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4223 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4224 readl(phba->HCregaddr); /* flush */
4225 phba->link_flag |= LS_IGNORE_ERATT;
4227 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4229 if (ha_copy & HA_ERATT) {
4230 /* Clear Chip error bit */
4231 writel(HA_ERATT, phba->HAregaddr);
4232 phba->pport->stopped = 1;
4236 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4237 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4239 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4240 mbox_buf = phba->MBslimaddr;
4241 writel(mbox, mbox_buf);
4243 for (i = 0; i < 50; i++) {
4244 if (lpfc_readl((resp_buf + 1), &resp_data))
4246 if (resp_data != ~(BARRIER_TEST_PATTERN))
4252 if (lpfc_readl((resp_buf + 1), &resp_data))
4254 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
4255 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4256 phba->pport->stopped)
4262 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
4264 for (i = 0; i < 500; i++) {
4265 if (lpfc_readl(resp_buf, &resp_data))
4267 if (resp_data != mbox)
4276 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4278 if (!(ha_copy & HA_ERATT))
4284 if (readl(phba->HAregaddr) & HA_ERATT) {
4285 writel(HA_ERATT, phba->HAregaddr);
4286 phba->pport->stopped = 1;
4290 phba->link_flag &= ~LS_IGNORE_ERATT;
4291 writel(hc_copy, phba->HCregaddr);
4292 readl(phba->HCregaddr); /* flush */
4296 * lpfc_sli_brdkill - Issue a kill_board mailbox command
4297 * @phba: Pointer to HBA context object.
4299 * This function issues a kill_board mailbox command and waits for
4300 * the error attention interrupt. This function is called for stopping
4301 * the firmware processing. The caller is not required to hold any
4302 * locks. This function calls lpfc_hba_down_post function to free
4303 * any pending commands after the kill. The function will return 1 when it
4304 * fails to kill the board else will return 0.
4307 lpfc_sli_brdkill(struct lpfc_hba *phba)
4309 struct lpfc_sli *psli;
4319 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4320 "0329 Kill HBA Data: x%x x%x\n",
4321 phba->pport->port_state, psli->sli_flag);
4323 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4327 /* Disable the error attention */
4328 spin_lock_irq(&phba->hbalock);
4329 if (lpfc_readl(phba->HCregaddr, &status)) {
4330 spin_unlock_irq(&phba->hbalock);
4331 mempool_free(pmb, phba->mbox_mem_pool);
4334 status &= ~HC_ERINT_ENA;
4335 writel(status, phba->HCregaddr);
4336 readl(phba->HCregaddr); /* flush */
4337 phba->link_flag |= LS_IGNORE_ERATT;
4338 spin_unlock_irq(&phba->hbalock);
4340 lpfc_kill_board(phba, pmb);
4341 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4342 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4344 if (retval != MBX_SUCCESS) {
4345 if (retval != MBX_BUSY)
4346 mempool_free(pmb, phba->mbox_mem_pool);
4347 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4348 "2752 KILL_BOARD command failed retval %d\n",
4350 spin_lock_irq(&phba->hbalock);
4351 phba->link_flag &= ~LS_IGNORE_ERATT;
4352 spin_unlock_irq(&phba->hbalock);
4356 spin_lock_irq(&phba->hbalock);
4357 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4358 spin_unlock_irq(&phba->hbalock);
4360 mempool_free(pmb, phba->mbox_mem_pool);
4362 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4363 * attention every 100ms for 3 seconds. If we don't get ERATT after
4364 * 3 seconds we still set HBA_ERROR state because the status of the
4365 * board is now undefined.
4367 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4369 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4371 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4375 del_timer_sync(&psli->mbox_tmo);
4376 if (ha_copy & HA_ERATT) {
4377 writel(HA_ERATT, phba->HAregaddr);
4378 phba->pport->stopped = 1;
4380 spin_lock_irq(&phba->hbalock);
4381 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4382 psli->mbox_active = NULL;
4383 phba->link_flag &= ~LS_IGNORE_ERATT;
4384 spin_unlock_irq(&phba->hbalock);
4386 lpfc_hba_down_post(phba);
4387 phba->link_state = LPFC_HBA_ERROR;
4389 return ha_copy & HA_ERATT ? 0 : 1;
4393 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
4394 * @phba: Pointer to HBA context object.
4396 * This function resets the HBA by writing HC_INITFF to the control
4397 * register. After the HBA resets, this function resets all the iocb ring
4398 * indices. This function disables PCI layer parity checking during
4400 * This function returns 0 always.
4401 * The caller is not required to hold any locks.
4404 lpfc_sli_brdreset(struct lpfc_hba *phba)
4406 struct lpfc_sli *psli;
4407 struct lpfc_sli_ring *pring;
4414 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4415 "0325 Reset HBA Data: x%x x%x\n",
4416 (phba->pport) ? phba->pport->port_state : 0,
4419 /* perform board reset */
4420 phba->fc_eventTag = 0;
4421 phba->link_events = 0;
4423 phba->pport->fc_myDID = 0;
4424 phba->pport->fc_prevDID = 0;
4427 /* Turn off parity checking and serr during the physical reset */
4428 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
4431 pci_write_config_word(phba->pcidev, PCI_COMMAND,
4433 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4435 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4437 /* Now toggle INITFF bit in the Host Control Register */
4438 writel(HC_INITFF, phba->HCregaddr);
4440 readl(phba->HCregaddr); /* flush */
4441 writel(0, phba->HCregaddr);
4442 readl(phba->HCregaddr); /* flush */
4444 /* Restore PCI cmd register */
4445 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4447 /* Initialize relevant SLI info */
4448 for (i = 0; i < psli->num_rings; i++) {
4449 pring = &psli->sli3_ring[i];
4451 pring->sli.sli3.rspidx = 0;
4452 pring->sli.sli3.next_cmdidx = 0;
4453 pring->sli.sli3.local_getidx = 0;
4454 pring->sli.sli3.cmdidx = 0;
4455 pring->missbufcnt = 0;
4458 phba->link_state = LPFC_WARM_START;
4463 * lpfc_sli4_brdreset - Reset a sli-4 HBA
4464 * @phba: Pointer to HBA context object.
4466 * This function resets a SLI4 HBA. This function disables PCI layer parity
4467 * checking during resets the device. The caller is not required to hold
4470 * This function returns 0 on success else returns negative error code.
4473 lpfc_sli4_brdreset(struct lpfc_hba *phba)
4475 struct lpfc_sli *psli = &phba->sli;
4480 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4481 "0295 Reset HBA Data: x%x x%x x%x\n",
4482 phba->pport->port_state, psli->sli_flag,
4485 /* perform board reset */
4486 phba->fc_eventTag = 0;
4487 phba->link_events = 0;
4488 phba->pport->fc_myDID = 0;
4489 phba->pport->fc_prevDID = 0;
4491 spin_lock_irq(&phba->hbalock);
4492 psli->sli_flag &= ~(LPFC_PROCESS_LA);
4493 phba->fcf.fcf_flag = 0;
4494 spin_unlock_irq(&phba->hbalock);
4496 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4497 if (phba->hba_flag & HBA_FW_DUMP_OP) {
4498 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4502 /* Now physically reset the device */
4503 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4504 "0389 Performing PCI function reset!\n");
4506 /* Turn off parity checking and serr during the physical reset */
4507 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
4508 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4509 "3205 PCI read Config failed\n");
4513 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4514 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4516 /* Perform FCoE PCI function reset before freeing queue memory */
4517 rc = lpfc_pci_function_reset(phba);
4519 /* Restore PCI cmd register */
4520 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4526 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
4527 * @phba: Pointer to HBA context object.
4529 * This function is called in the SLI initialization code path to
4530 * restart the HBA. The caller is not required to hold any lock.
4531 * This function writes MBX_RESTART mailbox command to the SLIM and
4532 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4533 * function to free any pending commands. The function enables
4534 * POST only during the first initialization. The function returns zero.
4535 * The function does not guarantee completion of MBX_RESTART mailbox
4536 * command before the return of this function.
4539 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4542 struct lpfc_sli *psli;
4543 volatile uint32_t word0;
4544 void __iomem *to_slim;
4545 uint32_t hba_aer_enabled;
4547 spin_lock_irq(&phba->hbalock);
4549 /* Take PCIe device Advanced Error Reporting (AER) state */
4550 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4555 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4556 "0337 Restart HBA Data: x%x x%x\n",
4557 (phba->pport) ? phba->pport->port_state : 0,
4561 mb = (MAILBOX_t *) &word0;
4562 mb->mbxCommand = MBX_RESTART;
4565 lpfc_reset_barrier(phba);
4567 to_slim = phba->MBslimaddr;
4568 writel(*(uint32_t *) mb, to_slim);
4569 readl(to_slim); /* flush */
4571 /* Only skip post after fc_ffinit is completed */
4572 if (phba->pport && phba->pport->port_state)
4573 word0 = 1; /* This is really setting up word1 */
4575 word0 = 0; /* This is really setting up word1 */
4576 to_slim = phba->MBslimaddr + sizeof (uint32_t);
4577 writel(*(uint32_t *) mb, to_slim);
4578 readl(to_slim); /* flush */
4580 lpfc_sli_brdreset(phba);
4582 phba->pport->stopped = 0;
4583 phba->link_state = LPFC_INIT_START;
4585 spin_unlock_irq(&phba->hbalock);
4587 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4588 psli->stats_start = ktime_get_seconds();
4590 /* Give the INITFF and Post time to settle. */
4593 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4594 if (hba_aer_enabled)
4595 pci_disable_pcie_error_reporting(phba->pcidev);
4597 lpfc_hba_down_post(phba);
4603 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4604 * @phba: Pointer to HBA context object.
4606 * This function is called in the SLI initialization code path to restart
4607 * a SLI4 HBA. The caller is not required to hold any lock.
4608 * At the end of the function, it calls lpfc_hba_down_post function to
4609 * free any pending commands.
4612 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4614 struct lpfc_sli *psli = &phba->sli;
4615 uint32_t hba_aer_enabled;
4619 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4620 "0296 Restart HBA Data: x%x x%x\n",
4621 phba->pport->port_state, psli->sli_flag);
4623 /* Take PCIe device Advanced Error Reporting (AER) state */
4624 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4626 rc = lpfc_sli4_brdreset(phba);
4628 phba->link_state = LPFC_HBA_ERROR;
4629 goto hba_down_queue;
4632 spin_lock_irq(&phba->hbalock);
4633 phba->pport->stopped = 0;
4634 phba->link_state = LPFC_INIT_START;
4636 spin_unlock_irq(&phba->hbalock);
4638 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4639 psli->stats_start = ktime_get_seconds();
4641 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4642 if (hba_aer_enabled)
4643 pci_disable_pcie_error_reporting(phba->pcidev);
4646 lpfc_hba_down_post(phba);
4647 lpfc_sli4_queue_destroy(phba);
4653 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4654 * @phba: Pointer to HBA context object.
4656 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4657 * API jump table function pointer from the lpfc_hba struct.
4660 lpfc_sli_brdrestart(struct lpfc_hba *phba)
4662 return phba->lpfc_sli_brdrestart(phba);
4666 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
4667 * @phba: Pointer to HBA context object.
4669 * This function is called after a HBA restart to wait for successful
4670 * restart of the HBA. Successful restart of the HBA is indicated by
4671 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4672 * iteration, the function will restart the HBA again. The function returns
4673 * zero if HBA successfully restarted else returns negative error code.
4676 lpfc_sli_chipset_init(struct lpfc_hba *phba)
4678 uint32_t status, i = 0;
4680 /* Read the HBA Host Status Register */
4681 if (lpfc_readl(phba->HSregaddr, &status))
4684 /* Check status register to see what current state is */
4686 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4688 /* Check every 10ms for 10 retries, then every 100ms for 90
4689 * retries, then every 1 sec for 50 retires for a total of
4690 * ~60 seconds before reset the board again and check every
4691 * 1 sec for 50 retries. The up to 60 seconds before the
4692 * board ready is required by the Falcon FIPS zeroization
4693 * complete, and any reset the board in between shall cause
4694 * restart of zeroization, further delay the board ready.
4697 /* Adapter failed to init, timeout, status reg
4699 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4700 "0436 Adapter failed to init, "
4701 "timeout, status reg x%x, "
4702 "FW Data: A8 x%x AC x%x\n", status,
4703 readl(phba->MBslimaddr + 0xa8),
4704 readl(phba->MBslimaddr + 0xac));
4705 phba->link_state = LPFC_HBA_ERROR;
4709 /* Check to see if any errors occurred during init */
4710 if (status & HS_FFERM) {
4711 /* ERROR: During chipset initialization */
4712 /* Adapter failed to init, chipset, status reg
4714 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4715 "0437 Adapter failed to init, "
4716 "chipset, status reg x%x, "
4717 "FW Data: A8 x%x AC x%x\n", status,
4718 readl(phba->MBslimaddr + 0xa8),
4719 readl(phba->MBslimaddr + 0xac));
4720 phba->link_state = LPFC_HBA_ERROR;
4733 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4734 lpfc_sli_brdrestart(phba);
4736 /* Read the HBA Host Status Register */
4737 if (lpfc_readl(phba->HSregaddr, &status))
4741 /* Check to see if any errors occurred during init */
4742 if (status & HS_FFERM) {
4743 /* ERROR: During chipset initialization */
4744 /* Adapter failed to init, chipset, status reg <status> */
4745 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4746 "0438 Adapter failed to init, chipset, "
4748 "FW Data: A8 x%x AC x%x\n", status,
4749 readl(phba->MBslimaddr + 0xa8),
4750 readl(phba->MBslimaddr + 0xac));
4751 phba->link_state = LPFC_HBA_ERROR;
4755 /* Clear all interrupt enable conditions */
4756 writel(0, phba->HCregaddr);
4757 readl(phba->HCregaddr); /* flush */
4759 /* setup host attn register */
4760 writel(0xffffffff, phba->HAregaddr);
4761 readl(phba->HAregaddr); /* flush */
4766 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
4768 * This function calculates and returns the number of HBQs required to be
4772 lpfc_sli_hbq_count(void)
4774 return ARRAY_SIZE(lpfc_hbq_defs);
4778 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
4780 * This function adds the number of hbq entries in every HBQ to get
4781 * the total number of hbq entries required for the HBA and returns
4785 lpfc_sli_hbq_entry_count(void)
4787 int hbq_count = lpfc_sli_hbq_count();
4791 for (i = 0; i < hbq_count; ++i)
4792 count += lpfc_hbq_defs[i]->entry_count;
4797 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
4799 * This function calculates amount of memory required for all hbq entries
4800 * to be configured and returns the total memory required.
4803 lpfc_sli_hbq_size(void)
4805 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4809 * lpfc_sli_hbq_setup - configure and initialize HBQs
4810 * @phba: Pointer to HBA context object.
4812 * This function is called during the SLI initialization to configure
4813 * all the HBQs and post buffers to the HBQ. The caller is not
4814 * required to hold any locks. This function will return zero if successful
4815 * else it will return negative error code.
4818 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4820 int hbq_count = lpfc_sli_hbq_count();
4824 uint32_t hbq_entry_index;
4826 /* Get a Mailbox buffer to setup mailbox
4827 * commands for HBA initialization
4829 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4836 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4837 phba->link_state = LPFC_INIT_MBX_CMDS;
4838 phba->hbq_in_use = 1;
4840 hbq_entry_index = 0;
4841 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4842 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4843 phba->hbqs[hbqno].hbqPutIdx = 0;
4844 phba->hbqs[hbqno].local_hbqGetIdx = 0;
4845 phba->hbqs[hbqno].entry_count =
4846 lpfc_hbq_defs[hbqno]->entry_count;
4847 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4848 hbq_entry_index, pmb);
4849 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4851 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4852 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4853 mbxStatus <status>, ring <num> */
4855 lpfc_printf_log(phba, KERN_ERR,
4856 LOG_SLI | LOG_VPORT,
4857 "1805 Adapter failed to init. "
4858 "Data: x%x x%x x%x\n",
4860 pmbox->mbxStatus, hbqno);
4862 phba->link_state = LPFC_HBA_ERROR;
4863 mempool_free(pmb, phba->mbox_mem_pool);
4867 phba->hbq_count = hbq_count;
4869 mempool_free(pmb, phba->mbox_mem_pool);
4871 /* Initially populate or replenish the HBQs */
4872 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4873 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
4878 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4879 * @phba: Pointer to HBA context object.
4881 * This function is called during the SLI initialization to configure
4882 * all the HBQs and post buffers to the HBQ. The caller is not
4883 * required to hold any locks. This function will return zero if successful
4884 * else it will return negative error code.
4887 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4889 phba->hbq_in_use = 1;
4890 phba->hbqs[LPFC_ELS_HBQ].entry_count =
4891 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
4892 phba->hbq_count = 1;
4893 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
4894 /* Initially populate or replenish the HBQs */
4899 * lpfc_sli_config_port - Issue config port mailbox command
4900 * @phba: Pointer to HBA context object.
4901 * @sli_mode: sli mode - 2/3
4903 * This function is called by the sli initialization code path
4904 * to issue config_port mailbox command. This function restarts the
4905 * HBA firmware and issues a config_port mailbox command to configure
4906 * the SLI interface in the sli mode specified by sli_mode
4907 * variable. The caller is not required to hold any locks.
4908 * The function returns 0 if successful, else returns negative error
4912 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
4915 uint32_t resetcount = 0, rc = 0, done = 0;
4917 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4919 phba->link_state = LPFC_HBA_ERROR;
4923 phba->sli_rev = sli_mode;
4924 while (resetcount < 2 && !done) {
4925 spin_lock_irq(&phba->hbalock);
4926 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
4927 spin_unlock_irq(&phba->hbalock);
4928 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4929 lpfc_sli_brdrestart(phba);
4930 rc = lpfc_sli_chipset_init(phba);
4934 spin_lock_irq(&phba->hbalock);
4935 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4936 spin_unlock_irq(&phba->hbalock);
4939 /* Call pre CONFIG_PORT mailbox command initialization. A
4940 * value of 0 means the call was successful. Any other
4941 * nonzero value is a failure, but if ERESTART is returned,
4942 * the driver may reset the HBA and try again.
4944 rc = lpfc_config_port_prep(phba);
4945 if (rc == -ERESTART) {
4946 phba->link_state = LPFC_LINK_UNKNOWN;
4951 phba->link_state = LPFC_INIT_MBX_CMDS;
4952 lpfc_config_port(phba, pmb);
4953 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4954 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
4955 LPFC_SLI3_HBQ_ENABLED |
4956 LPFC_SLI3_CRP_ENABLED |
4957 LPFC_SLI3_DSS_ENABLED);
4958 if (rc != MBX_SUCCESS) {
4959 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4960 "0442 Adapter failed to init, mbxCmd x%x "
4961 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
4962 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
4963 spin_lock_irq(&phba->hbalock);
4964 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
4965 spin_unlock_irq(&phba->hbalock);
4968 /* Allow asynchronous mailbox command to go through */
4969 spin_lock_irq(&phba->hbalock);
4970 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4971 spin_unlock_irq(&phba->hbalock);
4974 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
4975 (pmb->u.mb.un.varCfgPort.gasabt == 0))
4976 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4977 "3110 Port did not grant ASABT\n");
4982 goto do_prep_failed;
4984 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
4985 if (!pmb->u.mb.un.varCfgPort.cMA) {
4987 goto do_prep_failed;
4989 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
4990 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
4991 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
4992 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
4993 phba->max_vpi : phba->max_vports;
4997 phba->fips_level = 0;
4998 phba->fips_spec_rev = 0;
4999 if (pmb->u.mb.un.varCfgPort.gdss) {
5000 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
5001 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
5002 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
5003 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5004 "2850 Security Crypto Active. FIPS x%d "
5006 phba->fips_level, phba->fips_spec_rev);
5008 if (pmb->u.mb.un.varCfgPort.sec_err) {
5009 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5010 "2856 Config Port Security Crypto "
5012 pmb->u.mb.un.varCfgPort.sec_err);
5014 if (pmb->u.mb.un.varCfgPort.gerbm)
5015 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
5016 if (pmb->u.mb.un.varCfgPort.gcrp)
5017 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
5019 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5020 phba->port_gp = phba->mbox->us.s3_pgp.port;
5022 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5023 if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5024 phba->cfg_enable_bg = 0;
5025 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5026 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5027 "0443 Adapter did not grant "
5032 phba->hbq_get = NULL;
5033 phba->port_gp = phba->mbox->us.s2.port;
5037 mempool_free(pmb, phba->mbox_mem_pool);
5043 * lpfc_sli_hba_setup - SLI initialization function
5044 * @phba: Pointer to HBA context object.
5046 * This function is the main SLI initialization function. This function
5047 * is called by the HBA initialization code, HBA reset code and HBA
5048 * error attention handler code. Caller is not required to hold any
5049 * locks. This function issues config_port mailbox command to configure
5050 * the SLI, setup iocb rings and HBQ rings. In the end the function
5051 * calls the config_port_post function to issue init_link mailbox
5052 * command and to start the discovery. The function will return zero
5053 * if successful, else it will return negative error code.
5056 lpfc_sli_hba_setup(struct lpfc_hba *phba)
5062 switch (phba->cfg_sli_mode) {
5064 if (phba->cfg_enable_npiv) {
5065 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5066 "1824 NPIV enabled: Override sli_mode "
5067 "parameter (%d) to auto (0).\n",
5068 phba->cfg_sli_mode);
5077 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5078 "1819 Unrecognized sli_mode parameter: %d.\n",
5079 phba->cfg_sli_mode);
5083 phba->fcp_embed_io = 0; /* SLI4 FC support only */
5085 rc = lpfc_sli_config_port(phba, mode);
5087 if (rc && phba->cfg_sli_mode == 3)
5088 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5089 "1820 Unable to select SLI-3. "
5090 "Not supported by adapter.\n");
5091 if (rc && mode != 2)
5092 rc = lpfc_sli_config_port(phba, 2);
5093 else if (rc && mode == 2)
5094 rc = lpfc_sli_config_port(phba, 3);
5096 goto lpfc_sli_hba_setup_error;
5098 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
5099 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5100 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5102 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5103 "2709 This device supports "
5104 "Advanced Error Reporting (AER)\n");
5105 spin_lock_irq(&phba->hbalock);
5106 phba->hba_flag |= HBA_AER_ENABLED;
5107 spin_unlock_irq(&phba->hbalock);
5109 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5110 "2708 This device does not support "
5111 "Advanced Error Reporting (AER): %d\n",
5113 phba->cfg_aer_support = 0;
5117 if (phba->sli_rev == 3) {
5118 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5119 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5121 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5122 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5123 phba->sli3_options = 0;
5126 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5127 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5128 phba->sli_rev, phba->max_vpi);
5129 rc = lpfc_sli_ring_map(phba);
5132 goto lpfc_sli_hba_setup_error;
5134 /* Initialize VPIs. */
5135 if (phba->sli_rev == LPFC_SLI_REV3) {
5137 * The VPI bitmask and physical ID array are allocated
5138 * and initialized once only - at driver load. A port
5139 * reset doesn't need to reinitialize this memory.
5141 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5142 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5143 phba->vpi_bmask = kcalloc(longs,
5144 sizeof(unsigned long),
5146 if (!phba->vpi_bmask) {
5148 goto lpfc_sli_hba_setup_error;
5151 phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5154 if (!phba->vpi_ids) {
5155 kfree(phba->vpi_bmask);
5157 goto lpfc_sli_hba_setup_error;
5159 for (i = 0; i < phba->max_vpi; i++)
5160 phba->vpi_ids[i] = i;
5165 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5166 rc = lpfc_sli_hbq_setup(phba);
5168 goto lpfc_sli_hba_setup_error;
5170 spin_lock_irq(&phba->hbalock);
5171 phba->sli.sli_flag |= LPFC_PROCESS_LA;
5172 spin_unlock_irq(&phba->hbalock);
5174 rc = lpfc_config_port_post(phba);
5176 goto lpfc_sli_hba_setup_error;
5180 lpfc_sli_hba_setup_error:
5181 phba->link_state = LPFC_HBA_ERROR;
5182 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5183 "0445 Firmware initialization failed\n");
5188 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5189 * @phba: Pointer to HBA context object.
5190 * @mboxq: mailbox pointer.
5191 * This function issue a dump mailbox command to read config region
5192 * 23 and parse the records in the region and populate driver
5196 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5198 LPFC_MBOXQ_t *mboxq;
5199 struct lpfc_dmabuf *mp;
5200 struct lpfc_mqe *mqe;
5201 uint32_t data_length;
5204 /* Program the default value of vlan_id and fc_map */
5205 phba->valid_vlan = 0;
5206 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5207 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5208 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5210 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5214 mqe = &mboxq->u.mqe;
5215 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5217 goto out_free_mboxq;
5220 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
5221 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5223 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5224 "(%d):2571 Mailbox cmd x%x Status x%x "
5225 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5226 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5227 "CQ: x%x x%x x%x x%x\n",
5228 mboxq->vport ? mboxq->vport->vpi : 0,
5229 bf_get(lpfc_mqe_command, mqe),
5230 bf_get(lpfc_mqe_status, mqe),
5231 mqe->un.mb_words[0], mqe->un.mb_words[1],
5232 mqe->un.mb_words[2], mqe->un.mb_words[3],
5233 mqe->un.mb_words[4], mqe->un.mb_words[5],
5234 mqe->un.mb_words[6], mqe->un.mb_words[7],
5235 mqe->un.mb_words[8], mqe->un.mb_words[9],
5236 mqe->un.mb_words[10], mqe->un.mb_words[11],
5237 mqe->un.mb_words[12], mqe->un.mb_words[13],
5238 mqe->un.mb_words[14], mqe->un.mb_words[15],
5239 mqe->un.mb_words[16], mqe->un.mb_words[50],
5241 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5242 mboxq->mcqe.trailer);
5245 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5248 goto out_free_mboxq;
5250 data_length = mqe->un.mb_words[5];
5251 if (data_length > DMP_RGN23_SIZE) {
5252 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5255 goto out_free_mboxq;
5258 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5259 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5264 mempool_free(mboxq, phba->mbox_mem_pool);
5269 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5270 * @phba: pointer to lpfc hba data structure.
5271 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5272 * @vpd: pointer to the memory to hold resulting port vpd data.
5273 * @vpd_size: On input, the number of bytes allocated to @vpd.
5274 * On output, the number of data bytes in @vpd.
5276 * This routine executes a READ_REV SLI4 mailbox command. In
5277 * addition, this routine gets the port vpd data.
5281 * -ENOMEM - could not allocated memory.
5284 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5285 uint8_t *vpd, uint32_t *vpd_size)
5289 struct lpfc_dmabuf *dmabuf;
5290 struct lpfc_mqe *mqe;
5292 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5297 * Get a DMA buffer for the vpd data resulting from the READ_REV
5300 dma_size = *vpd_size;
5301 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5302 &dmabuf->phys, GFP_KERNEL);
5303 if (!dmabuf->virt) {
5309 * The SLI4 implementation of READ_REV conflicts at word1,
5310 * bits 31:16 and SLI4 adds vpd functionality not present
5311 * in SLI3. This code corrects the conflicts.
5313 lpfc_read_rev(phba, mboxq);
5314 mqe = &mboxq->u.mqe;
5315 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5316 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5317 mqe->un.read_rev.word1 &= 0x0000FFFF;
5318 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5319 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5321 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5323 dma_free_coherent(&phba->pcidev->dev, dma_size,
5324 dmabuf->virt, dmabuf->phys);
5330 * The available vpd length cannot be bigger than the
5331 * DMA buffer passed to the port. Catch the less than
5332 * case and update the caller's size.
5334 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5335 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5337 memcpy(vpd, dmabuf->virt, *vpd_size);
5339 dma_free_coherent(&phba->pcidev->dev, dma_size,
5340 dmabuf->virt, dmabuf->phys);
5346 * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
5347 * @phba: pointer to lpfc hba data structure.
5349 * This routine retrieves SLI4 device physical port name this PCI function
5354 * otherwise - failed to retrieve controller attributes
5357 lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
5359 LPFC_MBOXQ_t *mboxq;
5360 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5361 struct lpfc_controller_attribute *cntl_attr;
5362 void *virtaddr = NULL;
5363 uint32_t alloclen, reqlen;
5364 uint32_t shdr_status, shdr_add_status;
5365 union lpfc_sli4_cfg_shdr *shdr;
5368 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5372 /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
5373 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5374 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5375 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5376 LPFC_SLI4_MBX_NEMBED);
5378 if (alloclen < reqlen) {
5379 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5380 "3084 Allocated DMA memory size (%d) is "
5381 "less than the requested DMA memory size "
5382 "(%d)\n", alloclen, reqlen);
5384 goto out_free_mboxq;
5386 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5387 virtaddr = mboxq->sge_array->addr[0];
5388 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5389 shdr = &mbx_cntl_attr->cfg_shdr;
5390 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5391 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5392 if (shdr_status || shdr_add_status || rc) {
5393 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5394 "3085 Mailbox x%x (x%x/x%x) failed, "
5395 "rc:x%x, status:x%x, add_status:x%x\n",
5396 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5397 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5398 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5399 rc, shdr_status, shdr_add_status);
5401 goto out_free_mboxq;
5404 cntl_attr = &mbx_cntl_attr->cntl_attr;
5405 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5406 phba->sli4_hba.lnk_info.lnk_tp =
5407 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5408 phba->sli4_hba.lnk_info.lnk_no =
5409 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5411 memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
5412 strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
5413 sizeof(phba->BIOSVersion));
5415 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5416 "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n",
5417 phba->sli4_hba.lnk_info.lnk_tp,
5418 phba->sli4_hba.lnk_info.lnk_no,
5421 if (rc != MBX_TIMEOUT) {
5422 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5423 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5425 mempool_free(mboxq, phba->mbox_mem_pool);
5431 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5432 * @phba: pointer to lpfc hba data structure.
5434 * This routine retrieves SLI4 device physical port name this PCI function
5439 * otherwise - failed to retrieve physical port name
5442 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5444 LPFC_MBOXQ_t *mboxq;
5445 struct lpfc_mbx_get_port_name *get_port_name;
5446 uint32_t shdr_status, shdr_add_status;
5447 union lpfc_sli4_cfg_shdr *shdr;
5448 char cport_name = 0;
5451 /* We assume nothing at this point */
5452 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5453 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5455 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5458 /* obtain link type and link number via READ_CONFIG */
5459 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5460 lpfc_sli4_read_config(phba);
5461 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5462 goto retrieve_ppname;
5464 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5465 rc = lpfc_sli4_get_ctl_attr(phba);
5467 goto out_free_mboxq;
5470 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5471 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5472 sizeof(struct lpfc_mbx_get_port_name) -
5473 sizeof(struct lpfc_sli4_cfg_mhdr),
5474 LPFC_SLI4_MBX_EMBED);
5475 get_port_name = &mboxq->u.mqe.un.get_port_name;
5476 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5477 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5478 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5479 phba->sli4_hba.lnk_info.lnk_tp);
5480 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5481 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5482 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5483 if (shdr_status || shdr_add_status || rc) {
5484 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5485 "3087 Mailbox x%x (x%x/x%x) failed: "
5486 "rc:x%x, status:x%x, add_status:x%x\n",
5487 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5488 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5489 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5490 rc, shdr_status, shdr_add_status);
5492 goto out_free_mboxq;
5494 switch (phba->sli4_hba.lnk_info.lnk_no) {
5495 case LPFC_LINK_NUMBER_0:
5496 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5497 &get_port_name->u.response);
5498 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5500 case LPFC_LINK_NUMBER_1:
5501 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5502 &get_port_name->u.response);
5503 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5505 case LPFC_LINK_NUMBER_2:
5506 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5507 &get_port_name->u.response);
5508 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5510 case LPFC_LINK_NUMBER_3:
5511 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5512 &get_port_name->u.response);
5513 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5519 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5520 phba->Port[0] = cport_name;
5521 phba->Port[1] = '\0';
5522 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5523 "3091 SLI get port name: %s\n", phba->Port);
5527 if (rc != MBX_TIMEOUT) {
5528 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5529 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5531 mempool_free(mboxq, phba->mbox_mem_pool);
5537 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5538 * @phba: pointer to lpfc hba data structure.
5540 * This routine is called to explicitly arm the SLI4 device's completion and
5544 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5547 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
5548 struct lpfc_sli4_hdw_queue *qp;
5549 struct lpfc_queue *eq;
5551 sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
5552 sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
5553 if (sli4_hba->nvmels_cq)
5554 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
5557 if (sli4_hba->hdwq) {
5558 /* Loop thru all Hardware Queues */
5559 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
5560 qp = &sli4_hba->hdwq[qidx];
5561 /* ARM the corresponding CQ */
5562 sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
5566 /* Loop thru all IRQ vectors */
5567 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
5568 eq = sli4_hba->hba_eq_hdl[qidx].eq;
5569 /* ARM the corresponding EQ */
5570 sli4_hba->sli4_write_eq_db(phba, eq,
5571 0, LPFC_QUEUE_REARM);
5575 if (phba->nvmet_support) {
5576 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
5577 sli4_hba->sli4_write_cq_db(phba,
5578 sli4_hba->nvmet_cqset[qidx], 0,
5585 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5586 * @phba: Pointer to HBA context object.
5587 * @type: The resource extent type.
5588 * @extnt_count: buffer to hold port available extent count.
5589 * @extnt_size: buffer to hold element count per extent.
5591 * This function calls the port and retrievs the number of available
5592 * extents and their size for a particular extent type.
5594 * Returns: 0 if successful. Nonzero otherwise.
5597 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5598 uint16_t *extnt_count, uint16_t *extnt_size)
5603 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5606 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5610 /* Find out how many extents are available for this resource type */
5611 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5612 sizeof(struct lpfc_sli4_cfg_mhdr));
5613 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5614 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5615 length, LPFC_SLI4_MBX_EMBED);
5617 /* Send an extents count of 0 - the GET doesn't use it. */
5618 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5619 LPFC_SLI4_MBX_EMBED);
5625 if (!phba->sli4_hba.intr_enable)
5626 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5628 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5629 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5636 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5637 if (bf_get(lpfc_mbox_hdr_status,
5638 &rsrc_info->header.cfg_shdr.response)) {
5639 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5640 "2930 Failed to get resource extents "
5641 "Status 0x%x Add'l Status 0x%x\n",
5642 bf_get(lpfc_mbox_hdr_status,
5643 &rsrc_info->header.cfg_shdr.response),
5644 bf_get(lpfc_mbox_hdr_add_status,
5645 &rsrc_info->header.cfg_shdr.response));
5650 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5652 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5655 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5656 "3162 Retrieved extents type-%d from port: count:%d, "
5657 "size:%d\n", type, *extnt_count, *extnt_size);
5660 mempool_free(mbox, phba->mbox_mem_pool);
5665 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5666 * @phba: Pointer to HBA context object.
5667 * @type: The extent type to check.
5669 * This function reads the current available extents from the port and checks
5670 * if the extent count or extent size has changed since the last access.
5671 * Callers use this routine post port reset to understand if there is a
5672 * extent reprovisioning requirement.
5675 * -Error: error indicates problem.
5676 * 1: Extent count or size has changed.
5680 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5682 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5683 uint16_t size_diff, rsrc_ext_size;
5685 struct lpfc_rsrc_blks *rsrc_entry;
5686 struct list_head *rsrc_blk_list = NULL;
5690 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5697 case LPFC_RSC_TYPE_FCOE_RPI:
5698 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5700 case LPFC_RSC_TYPE_FCOE_VPI:
5701 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5703 case LPFC_RSC_TYPE_FCOE_XRI:
5704 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5706 case LPFC_RSC_TYPE_FCOE_VFI:
5707 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5713 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5715 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5719 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5726 * lpfc_sli4_cfg_post_extnts -
5727 * @phba: Pointer to HBA context object.
5728 * @extnt_cnt - number of available extents.
5729 * @type - the extent type (rpi, xri, vfi, vpi).
5730 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5731 * @mbox - pointer to the caller's allocated mailbox structure.
5733 * This function executes the extents allocation request. It also
5734 * takes care of the amount of memory needed to allocate or get the
5735 * allocated extents. It is the caller's responsibility to evaluate
5739 * -Error: Error value describes the condition found.
5743 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
5744 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5749 uint32_t alloc_len, mbox_tmo;
5751 /* Calculate the total requested length of the dma memory */
5752 req_len = extnt_cnt * sizeof(uint16_t);
5755 * Calculate the size of an embedded mailbox. The uint32_t
5756 * accounts for extents-specific word.
5758 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5762 * Presume the allocation and response will fit into an embedded
5763 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5765 *emb = LPFC_SLI4_MBX_EMBED;
5766 if (req_len > emb_len) {
5767 req_len = extnt_cnt * sizeof(uint16_t) +
5768 sizeof(union lpfc_sli4_cfg_shdr) +
5770 *emb = LPFC_SLI4_MBX_NEMBED;
5773 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5774 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5776 if (alloc_len < req_len) {
5777 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5778 "2982 Allocated DMA memory size (x%x) is "
5779 "less than the requested DMA memory "
5780 "size (x%x)\n", alloc_len, req_len);
5783 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
5787 if (!phba->sli4_hba.intr_enable)
5788 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5790 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5791 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5800 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5801 * @phba: Pointer to HBA context object.
5802 * @type: The resource extent type to allocate.
5804 * This function allocates the number of elements for the specified
5808 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5811 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5812 uint16_t rsrc_id, rsrc_start, j, k;
5815 unsigned long longs;
5816 unsigned long *bmask;
5817 struct lpfc_rsrc_blks *rsrc_blks;
5820 struct lpfc_id_range *id_array = NULL;
5821 void *virtaddr = NULL;
5822 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5823 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5824 struct list_head *ext_blk_list;
5826 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5832 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5833 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5834 "3009 No available Resource Extents "
5835 "for resource type 0x%x: Count: 0x%x, "
5836 "Size 0x%x\n", type, rsrc_cnt,
5841 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5842 "2903 Post resource extents type-0x%x: "
5843 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
5845 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5849 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
5856 * Figure out where the response is located. Then get local pointers
5857 * to the response data. The port does not guarantee to respond to
5858 * all extents counts request so update the local variable with the
5859 * allocated count from the port.
5861 if (emb == LPFC_SLI4_MBX_EMBED) {
5862 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5863 id_array = &rsrc_ext->u.rsp.id[0];
5864 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5866 virtaddr = mbox->sge_array->addr[0];
5867 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5868 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5869 id_array = &n_rsrc->id;
5872 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5873 rsrc_id_cnt = rsrc_cnt * rsrc_size;
5876 * Based on the resource size and count, correct the base and max
5879 length = sizeof(struct lpfc_rsrc_blks);
5881 case LPFC_RSC_TYPE_FCOE_RPI:
5882 phba->sli4_hba.rpi_bmask = kcalloc(longs,
5883 sizeof(unsigned long),
5885 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5889 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
5892 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5893 kfree(phba->sli4_hba.rpi_bmask);
5899 * The next_rpi was initialized with the maximum available
5900 * count but the port may allocate a smaller number. Catch
5901 * that case and update the next_rpi.
5903 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5905 /* Initialize local ptrs for common extent processing later. */
5906 bmask = phba->sli4_hba.rpi_bmask;
5907 ids = phba->sli4_hba.rpi_ids;
5908 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5910 case LPFC_RSC_TYPE_FCOE_VPI:
5911 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
5913 if (unlikely(!phba->vpi_bmask)) {
5917 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
5919 if (unlikely(!phba->vpi_ids)) {
5920 kfree(phba->vpi_bmask);
5925 /* Initialize local ptrs for common extent processing later. */
5926 bmask = phba->vpi_bmask;
5927 ids = phba->vpi_ids;
5928 ext_blk_list = &phba->lpfc_vpi_blk_list;
5930 case LPFC_RSC_TYPE_FCOE_XRI:
5931 phba->sli4_hba.xri_bmask = kcalloc(longs,
5932 sizeof(unsigned long),
5934 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5938 phba->sli4_hba.max_cfg_param.xri_used = 0;
5939 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
5942 if (unlikely(!phba->sli4_hba.xri_ids)) {
5943 kfree(phba->sli4_hba.xri_bmask);
5948 /* Initialize local ptrs for common extent processing later. */
5949 bmask = phba->sli4_hba.xri_bmask;
5950 ids = phba->sli4_hba.xri_ids;
5951 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5953 case LPFC_RSC_TYPE_FCOE_VFI:
5954 phba->sli4_hba.vfi_bmask = kcalloc(longs,
5955 sizeof(unsigned long),
5957 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5961 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
5964 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5965 kfree(phba->sli4_hba.vfi_bmask);
5970 /* Initialize local ptrs for common extent processing later. */
5971 bmask = phba->sli4_hba.vfi_bmask;
5972 ids = phba->sli4_hba.vfi_ids;
5973 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5976 /* Unsupported Opcode. Fail call. */
5980 ext_blk_list = NULL;
5985 * Complete initializing the extent configuration with the
5986 * allocated ids assigned to this function. The bitmask serves
5987 * as an index into the array and manages the available ids. The
5988 * array just stores the ids communicated to the port via the wqes.
5990 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
5992 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
5995 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
5998 rsrc_blks = kzalloc(length, GFP_KERNEL);
5999 if (unlikely(!rsrc_blks)) {
6005 rsrc_blks->rsrc_start = rsrc_id;
6006 rsrc_blks->rsrc_size = rsrc_size;
6007 list_add_tail(&rsrc_blks->list, ext_blk_list);
6008 rsrc_start = rsrc_id;
6009 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
6010 phba->sli4_hba.io_xri_start = rsrc_start +
6011 lpfc_sli4_get_iocb_cnt(phba);
6014 while (rsrc_id < (rsrc_start + rsrc_size)) {
6019 /* Entire word processed. Get next word.*/
6024 lpfc_sli4_mbox_cmd_free(phba, mbox);
6031 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6032 * @phba: Pointer to HBA context object.
6033 * @type: the extent's type.
6035 * This function deallocates all extents of a particular resource type.
6036 * SLI4 does not allow for deallocating a particular extent range. It
6037 * is the caller's responsibility to release all kernel memory resources.
6040 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6043 uint32_t length, mbox_tmo = 0;
6045 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6046 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6048 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6053 * This function sends an embedded mailbox because it only sends the
6054 * the resource type. All extents of this type are released by the
6057 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6058 sizeof(struct lpfc_sli4_cfg_mhdr));
6059 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6060 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6061 length, LPFC_SLI4_MBX_EMBED);
6063 /* Send an extents count of 0 - the dealloc doesn't use it. */
6064 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6065 LPFC_SLI4_MBX_EMBED);
6070 if (!phba->sli4_hba.intr_enable)
6071 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6073 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6074 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6081 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6082 if (bf_get(lpfc_mbox_hdr_status,
6083 &dealloc_rsrc->header.cfg_shdr.response)) {
6084 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6085 "2919 Failed to release resource extents "
6086 "for type %d - Status 0x%x Add'l Status 0x%x. "
6087 "Resource memory not released.\n",
6089 bf_get(lpfc_mbox_hdr_status,
6090 &dealloc_rsrc->header.cfg_shdr.response),
6091 bf_get(lpfc_mbox_hdr_add_status,
6092 &dealloc_rsrc->header.cfg_shdr.response));
6097 /* Release kernel memory resources for the specific type. */
6099 case LPFC_RSC_TYPE_FCOE_VPI:
6100 kfree(phba->vpi_bmask);
6101 kfree(phba->vpi_ids);
6102 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6103 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6104 &phba->lpfc_vpi_blk_list, list) {
6105 list_del_init(&rsrc_blk->list);
6108 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6110 case LPFC_RSC_TYPE_FCOE_XRI:
6111 kfree(phba->sli4_hba.xri_bmask);
6112 kfree(phba->sli4_hba.xri_ids);
6113 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6114 &phba->sli4_hba.lpfc_xri_blk_list, list) {
6115 list_del_init(&rsrc_blk->list);
6119 case LPFC_RSC_TYPE_FCOE_VFI:
6120 kfree(phba->sli4_hba.vfi_bmask);
6121 kfree(phba->sli4_hba.vfi_ids);
6122 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6123 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6124 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6125 list_del_init(&rsrc_blk->list);
6129 case LPFC_RSC_TYPE_FCOE_RPI:
6130 /* RPI bitmask and physical id array are cleaned up earlier. */
6131 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6132 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6133 list_del_init(&rsrc_blk->list);
6141 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6144 mempool_free(mbox, phba->mbox_mem_pool);
6149 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6154 len = sizeof(struct lpfc_mbx_set_feature) -
6155 sizeof(struct lpfc_sli4_cfg_mhdr);
6156 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6157 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6158 LPFC_SLI4_MBX_EMBED);
6161 case LPFC_SET_UE_RECOVERY:
6162 bf_set(lpfc_mbx_set_feature_UER,
6163 &mbox->u.mqe.un.set_feature, 1);
6164 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6165 mbox->u.mqe.un.set_feature.param_len = 8;
6167 case LPFC_SET_MDS_DIAGS:
6168 bf_set(lpfc_mbx_set_feature_mds,
6169 &mbox->u.mqe.un.set_feature, 1);
6170 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6171 &mbox->u.mqe.un.set_feature, 1);
6172 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6173 mbox->u.mqe.un.set_feature.param_len = 8;
6181 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6182 * @phba: Pointer to HBA context object.
6184 * Disable FW logging into host memory on the adapter. To
6185 * be done before reading logs from the host memory.
6188 lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6190 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6192 ras_fwlog->ras_active = false;
6194 /* Disable FW logging to host memory */
6195 writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6196 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6200 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6201 * @phba: Pointer to HBA context object.
6203 * This function is called to free memory allocated for RAS FW logging
6204 * support in the driver.
6207 lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6209 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6210 struct lpfc_dmabuf *dmabuf, *next;
6212 if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6213 list_for_each_entry_safe(dmabuf, next,
6214 &ras_fwlog->fwlog_buff_list,
6216 list_del(&dmabuf->list);
6217 dma_free_coherent(&phba->pcidev->dev,
6218 LPFC_RAS_MAX_ENTRY_SIZE,
6219 dmabuf->virt, dmabuf->phys);
6224 if (ras_fwlog->lwpd.virt) {
6225 dma_free_coherent(&phba->pcidev->dev,
6226 sizeof(uint32_t) * 2,
6227 ras_fwlog->lwpd.virt,
6228 ras_fwlog->lwpd.phys);
6229 ras_fwlog->lwpd.virt = NULL;
6232 ras_fwlog->ras_active = false;
6236 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6237 * @phba: Pointer to HBA context object.
6238 * @fwlog_buff_count: Count of buffers to be created.
6240 * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6241 * to update FW log is posted to the adapter.
6242 * Buffer count is calculated based on module param ras_fwlog_buffsize
6243 * Size of each buffer posted to FW is 64K.
6247 lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6248 uint32_t fwlog_buff_count)
6250 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6251 struct lpfc_dmabuf *dmabuf;
6254 /* Initialize List */
6255 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6257 /* Allocate memory for the LWPD */
6258 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6259 sizeof(uint32_t) * 2,
6260 &ras_fwlog->lwpd.phys,
6262 if (!ras_fwlog->lwpd.virt) {
6263 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6264 "6185 LWPD Memory Alloc Failed\n");
6269 ras_fwlog->fw_buffcount = fwlog_buff_count;
6270 for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6271 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6275 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6276 "6186 Memory Alloc failed FW logging");
6280 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6281 LPFC_RAS_MAX_ENTRY_SIZE,
6282 &dmabuf->phys, GFP_KERNEL);
6283 if (!dmabuf->virt) {
6286 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6287 "6187 DMA Alloc Failed FW logging");
6290 dmabuf->buffer_tag = i;
6291 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6296 lpfc_sli4_ras_dma_free(phba);
6302 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6303 * @phba: pointer to lpfc hba data structure.
6304 * @pmboxq: pointer to the driver internal queue element for mailbox command.
6306 * Completion handler for driver's RAS MBX command to the device.
6309 lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6312 union lpfc_sli4_cfg_shdr *shdr;
6313 uint32_t shdr_status, shdr_add_status;
6314 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6318 shdr = (union lpfc_sli4_cfg_shdr *)
6319 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6320 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6321 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6323 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
6324 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
6325 "6188 FW LOG mailbox "
6326 "completed with status x%x add_status x%x,"
6327 " mbx status x%x\n",
6328 shdr_status, shdr_add_status, mb->mbxStatus);
6330 ras_fwlog->ras_hwsupport = false;
6334 ras_fwlog->ras_active = true;
6335 mempool_free(pmb, phba->mbox_mem_pool);
6340 /* Free RAS DMA memory */
6341 lpfc_sli4_ras_dma_free(phba);
6342 mempool_free(pmb, phba->mbox_mem_pool);
6346 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
6347 * @phba: pointer to lpfc hba data structure.
6348 * @fwlog_level: Logging verbosity level.
6349 * @fwlog_enable: Enable/Disable logging.
6351 * Initialize memory and post mailbox command to enable FW logging in host
6355 lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
6356 uint32_t fwlog_level,
6357 uint32_t fwlog_enable)
6359 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6360 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
6361 struct lpfc_dmabuf *dmabuf;
6363 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
6366 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
6367 phba->cfg_ras_fwlog_buffsize);
6368 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
6371 * If re-enabling FW logging support use earlier allocated
6372 * DMA buffers while posting MBX command.
6374 if (!ras_fwlog->lwpd.virt) {
6375 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
6377 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6378 "6189 FW Log Memory Allocation Failed");
6383 /* Setup Mailbox command */
6384 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6386 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6387 "6190 RAS MBX Alloc Failed");
6392 ras_fwlog->fw_loglevel = fwlog_level;
6393 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
6394 sizeof(struct lpfc_sli4_cfg_mhdr));
6396 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
6397 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
6398 len, LPFC_SLI4_MBX_EMBED);
6400 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
6401 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
6403 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
6404 ras_fwlog->fw_loglevel);
6405 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
6406 ras_fwlog->fw_buffcount);
6407 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
6408 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
6410 /* Update DMA buffer address */
6411 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
6412 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
6414 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
6415 putPaddrLow(dmabuf->phys);
6417 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
6418 putPaddrHigh(dmabuf->phys);
6421 /* Update LPWD address */
6422 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
6423 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
6425 mbox->vport = phba->pport;
6426 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
6428 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6430 if (rc == MBX_NOT_FINISHED) {
6431 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6432 "6191 FW-Log Mailbox failed. "
6433 "status %d mbxStatus : x%x", rc,
6434 bf_get(lpfc_mqe_status, &mbox->u.mqe));
6435 mempool_free(mbox, phba->mbox_mem_pool);
6442 lpfc_sli4_ras_dma_free(phba);
6448 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
6449 * @phba: Pointer to HBA context object.
6451 * Check if RAS is supported on the adapter and initialize it.
6454 lpfc_sli4_ras_setup(struct lpfc_hba *phba)
6456 /* Check RAS FW Log needs to be enabled or not */
6457 if (lpfc_check_fwlog_support(phba))
6460 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6461 LPFC_RAS_ENABLE_LOGGING);
6465 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
6466 * @phba: Pointer to HBA context object.
6468 * This function allocates all SLI4 resource identifiers.
6471 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
6473 int i, rc, error = 0;
6474 uint16_t count, base;
6475 unsigned long longs;
6477 if (!phba->sli4_hba.rpi_hdrs_in_use)
6478 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6479 if (phba->sli4_hba.extents_in_use) {
6481 * The port supports resource extents. The XRI, VPI, VFI, RPI
6482 * resource extent count must be read and allocated before
6483 * provisioning the resource id arrays.
6485 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6486 LPFC_IDX_RSRC_RDY) {
6488 * Extent-based resources are set - the driver could
6489 * be in a port reset. Figure out if any corrective
6490 * actions need to be taken.
6492 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6493 LPFC_RSC_TYPE_FCOE_VFI);
6496 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6497 LPFC_RSC_TYPE_FCOE_VPI);
6500 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6501 LPFC_RSC_TYPE_FCOE_XRI);
6504 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6505 LPFC_RSC_TYPE_FCOE_RPI);
6510 * It's possible that the number of resources
6511 * provided to this port instance changed between
6512 * resets. Detect this condition and reallocate
6513 * resources. Otherwise, there is no action.
6516 lpfc_printf_log(phba, KERN_INFO,
6517 LOG_MBOX | LOG_INIT,
6518 "2931 Detected extent resource "
6519 "change. Reallocating all "
6521 rc = lpfc_sli4_dealloc_extent(phba,
6522 LPFC_RSC_TYPE_FCOE_VFI);
6523 rc = lpfc_sli4_dealloc_extent(phba,
6524 LPFC_RSC_TYPE_FCOE_VPI);
6525 rc = lpfc_sli4_dealloc_extent(phba,
6526 LPFC_RSC_TYPE_FCOE_XRI);
6527 rc = lpfc_sli4_dealloc_extent(phba,
6528 LPFC_RSC_TYPE_FCOE_RPI);
6533 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6537 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6541 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6545 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6548 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6553 * The port does not support resource extents. The XRI, VPI,
6554 * VFI, RPI resource ids were determined from READ_CONFIG.
6555 * Just allocate the bitmasks and provision the resource id
6556 * arrays. If a port reset is active, the resources don't
6557 * need any action - just exit.
6559 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6560 LPFC_IDX_RSRC_RDY) {
6561 lpfc_sli4_dealloc_resource_identifiers(phba);
6562 lpfc_sli4_remove_rpis(phba);
6565 count = phba->sli4_hba.max_cfg_param.max_rpi;
6567 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6568 "3279 Invalid provisioning of "
6573 base = phba->sli4_hba.max_cfg_param.rpi_base;
6574 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6575 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6576 sizeof(unsigned long),
6578 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6582 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
6584 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6586 goto free_rpi_bmask;
6589 for (i = 0; i < count; i++)
6590 phba->sli4_hba.rpi_ids[i] = base + i;
6593 count = phba->sli4_hba.max_cfg_param.max_vpi;
6595 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6596 "3280 Invalid provisioning of "
6601 base = phba->sli4_hba.max_cfg_param.vpi_base;
6602 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6603 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6605 if (unlikely(!phba->vpi_bmask)) {
6609 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
6611 if (unlikely(!phba->vpi_ids)) {
6613 goto free_vpi_bmask;
6616 for (i = 0; i < count; i++)
6617 phba->vpi_ids[i] = base + i;
6620 count = phba->sli4_hba.max_cfg_param.max_xri;
6622 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6623 "3281 Invalid provisioning of "
6628 base = phba->sli4_hba.max_cfg_param.xri_base;
6629 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6630 phba->sli4_hba.xri_bmask = kcalloc(longs,
6631 sizeof(unsigned long),
6633 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6637 phba->sli4_hba.max_cfg_param.xri_used = 0;
6638 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
6640 if (unlikely(!phba->sli4_hba.xri_ids)) {
6642 goto free_xri_bmask;
6645 for (i = 0; i < count; i++)
6646 phba->sli4_hba.xri_ids[i] = base + i;
6649 count = phba->sli4_hba.max_cfg_param.max_vfi;
6651 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6652 "3282 Invalid provisioning of "
6657 base = phba->sli4_hba.max_cfg_param.vfi_base;
6658 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6659 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6660 sizeof(unsigned long),
6662 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6666 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
6668 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6670 goto free_vfi_bmask;
6673 for (i = 0; i < count; i++)
6674 phba->sli4_hba.vfi_ids[i] = base + i;
6677 * Mark all resources ready. An HBA reset doesn't need
6678 * to reset the initialization.
6680 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6686 kfree(phba->sli4_hba.vfi_bmask);
6687 phba->sli4_hba.vfi_bmask = NULL;
6689 kfree(phba->sli4_hba.xri_ids);
6690 phba->sli4_hba.xri_ids = NULL;
6692 kfree(phba->sli4_hba.xri_bmask);
6693 phba->sli4_hba.xri_bmask = NULL;
6695 kfree(phba->vpi_ids);
6696 phba->vpi_ids = NULL;
6698 kfree(phba->vpi_bmask);
6699 phba->vpi_bmask = NULL;
6701 kfree(phba->sli4_hba.rpi_ids);
6702 phba->sli4_hba.rpi_ids = NULL;
6704 kfree(phba->sli4_hba.rpi_bmask);
6705 phba->sli4_hba.rpi_bmask = NULL;
6711 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
6712 * @phba: Pointer to HBA context object.
6714 * This function allocates the number of elements for the specified
6718 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
6720 if (phba->sli4_hba.extents_in_use) {
6721 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6722 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6723 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6724 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6726 kfree(phba->vpi_bmask);
6727 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6728 kfree(phba->vpi_ids);
6729 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6730 kfree(phba->sli4_hba.xri_bmask);
6731 kfree(phba->sli4_hba.xri_ids);
6732 kfree(phba->sli4_hba.vfi_bmask);
6733 kfree(phba->sli4_hba.vfi_ids);
6734 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6735 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6742 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
6743 * @phba: Pointer to HBA context object.
6744 * @type: The resource extent type.
6745 * @extnt_count: buffer to hold port extent count response
6746 * @extnt_size: buffer to hold port extent size response.
6748 * This function calls the port to read the host allocated extents
6749 * for a particular type.
6752 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
6753 uint16_t *extnt_cnt, uint16_t *extnt_size)
6757 uint16_t curr_blks = 0;
6758 uint32_t req_len, emb_len;
6759 uint32_t alloc_len, mbox_tmo;
6760 struct list_head *blk_list_head;
6761 struct lpfc_rsrc_blks *rsrc_blk;
6763 void *virtaddr = NULL;
6764 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6765 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6766 union lpfc_sli4_cfg_shdr *shdr;
6769 case LPFC_RSC_TYPE_FCOE_VPI:
6770 blk_list_head = &phba->lpfc_vpi_blk_list;
6772 case LPFC_RSC_TYPE_FCOE_XRI:
6773 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
6775 case LPFC_RSC_TYPE_FCOE_VFI:
6776 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
6778 case LPFC_RSC_TYPE_FCOE_RPI:
6779 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
6785 /* Count the number of extents currently allocatd for this type. */
6786 list_for_each_entry(rsrc_blk, blk_list_head, list) {
6787 if (curr_blks == 0) {
6789 * The GET_ALLOCATED mailbox does not return the size,
6790 * just the count. The size should be just the size
6791 * stored in the current allocated block and all sizes
6792 * for an extent type are the same so set the return
6795 *extnt_size = rsrc_blk->rsrc_size;
6801 * Calculate the size of an embedded mailbox. The uint32_t
6802 * accounts for extents-specific word.
6804 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6808 * Presume the allocation and response will fit into an embedded
6809 * mailbox. If not true, reconfigure to a non-embedded mailbox.
6811 emb = LPFC_SLI4_MBX_EMBED;
6813 if (req_len > emb_len) {
6814 req_len = curr_blks * sizeof(uint16_t) +
6815 sizeof(union lpfc_sli4_cfg_shdr) +
6817 emb = LPFC_SLI4_MBX_NEMBED;
6820 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6823 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
6825 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6826 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
6828 if (alloc_len < req_len) {
6829 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6830 "2983 Allocated DMA memory size (x%x) is "
6831 "less than the requested DMA memory "
6832 "size (x%x)\n", alloc_len, req_len);
6836 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
6842 if (!phba->sli4_hba.intr_enable)
6843 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6845 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6846 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6855 * Figure out where the response is located. Then get local pointers
6856 * to the response data. The port does not guarantee to respond to
6857 * all extents counts request so update the local variable with the
6858 * allocated count from the port.
6860 if (emb == LPFC_SLI4_MBX_EMBED) {
6861 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6862 shdr = &rsrc_ext->header.cfg_shdr;
6863 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6865 virtaddr = mbox->sge_array->addr[0];
6866 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6867 shdr = &n_rsrc->cfg_shdr;
6868 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6871 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
6872 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6873 "2984 Failed to read allocated resources "
6874 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
6876 bf_get(lpfc_mbox_hdr_status, &shdr->response),
6877 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
6882 lpfc_sli4_mbox_cmd_free(phba, mbox);
6887 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
6888 * @phba: pointer to lpfc hba data structure.
6889 * @pring: Pointer to driver SLI ring object.
6890 * @sgl_list: linked link of sgl buffers to post
6891 * @cnt: number of linked list buffers
6893 * This routine walks the list of buffers that have been allocated and
6894 * repost them to the port by using SGL block post. This is needed after a
6895 * pci_function_reset/warm_start or start. It attempts to construct blocks
6896 * of buffer sgls which contains contiguous xris and uses the non-embedded
6897 * SGL block post mailbox commands to post them to the port. For single
6898 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
6899 * mailbox command for posting.
6901 * Returns: 0 = success, non-zero failure.
6904 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
6905 struct list_head *sgl_list, int cnt)
6907 struct lpfc_sglq *sglq_entry = NULL;
6908 struct lpfc_sglq *sglq_entry_next = NULL;
6909 struct lpfc_sglq *sglq_entry_first = NULL;
6910 int status, total_cnt;
6911 int post_cnt = 0, num_posted = 0, block_cnt = 0;
6912 int last_xritag = NO_XRI;
6913 LIST_HEAD(prep_sgl_list);
6914 LIST_HEAD(blck_sgl_list);
6915 LIST_HEAD(allc_sgl_list);
6916 LIST_HEAD(post_sgl_list);
6917 LIST_HEAD(free_sgl_list);
6919 spin_lock_irq(&phba->hbalock);
6920 spin_lock(&phba->sli4_hba.sgl_list_lock);
6921 list_splice_init(sgl_list, &allc_sgl_list);
6922 spin_unlock(&phba->sli4_hba.sgl_list_lock);
6923 spin_unlock_irq(&phba->hbalock);
6926 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
6927 &allc_sgl_list, list) {
6928 list_del_init(&sglq_entry->list);
6930 if ((last_xritag != NO_XRI) &&
6931 (sglq_entry->sli4_xritag != last_xritag + 1)) {
6932 /* a hole in xri block, form a sgl posting block */
6933 list_splice_init(&prep_sgl_list, &blck_sgl_list);
6934 post_cnt = block_cnt - 1;
6935 /* prepare list for next posting block */
6936 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6939 /* prepare list for next posting block */
6940 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6941 /* enough sgls for non-embed sgl mbox command */
6942 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
6943 list_splice_init(&prep_sgl_list,
6945 post_cnt = block_cnt;
6951 /* keep track of last sgl's xritag */
6952 last_xritag = sglq_entry->sli4_xritag;
6954 /* end of repost sgl list condition for buffers */
6955 if (num_posted == total_cnt) {
6956 if (post_cnt == 0) {
6957 list_splice_init(&prep_sgl_list,
6959 post_cnt = block_cnt;
6960 } else if (block_cnt == 1) {
6961 status = lpfc_sli4_post_sgl(phba,
6962 sglq_entry->phys, 0,
6963 sglq_entry->sli4_xritag);
6965 /* successful, put sgl to posted list */
6966 list_add_tail(&sglq_entry->list,
6969 /* Failure, put sgl to free list */
6970 lpfc_printf_log(phba, KERN_WARNING,
6972 "3159 Failed to post "
6973 "sgl, xritag:x%x\n",
6974 sglq_entry->sli4_xritag);
6975 list_add_tail(&sglq_entry->list,
6982 /* continue until a nembed page worth of sgls */
6986 /* post the buffer list sgls as a block */
6987 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
6991 /* success, put sgl list to posted sgl list */
6992 list_splice_init(&blck_sgl_list, &post_sgl_list);
6994 /* Failure, put sgl list to free sgl list */
6995 sglq_entry_first = list_first_entry(&blck_sgl_list,
6998 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6999 "3160 Failed to post sgl-list, "
7001 sglq_entry_first->sli4_xritag,
7002 (sglq_entry_first->sli4_xritag +
7004 list_splice_init(&blck_sgl_list, &free_sgl_list);
7005 total_cnt -= post_cnt;
7008 /* don't reset xirtag due to hole in xri block */
7010 last_xritag = NO_XRI;
7012 /* reset sgl post count for next round of posting */
7016 /* free the sgls failed to post */
7017 lpfc_free_sgl_list(phba, &free_sgl_list);
7019 /* push sgls posted to the available list */
7020 if (!list_empty(&post_sgl_list)) {
7021 spin_lock_irq(&phba->hbalock);
7022 spin_lock(&phba->sli4_hba.sgl_list_lock);
7023 list_splice_init(&post_sgl_list, sgl_list);
7024 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7025 spin_unlock_irq(&phba->hbalock);
7027 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7028 "3161 Failure to post sgl to port.\n");
7032 /* return the number of XRIs actually posted */
7037 * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
7038 * @phba: pointer to lpfc hba data structure.
7040 * This routine walks the list of nvme buffers that have been allocated and
7041 * repost them to the port by using SGL block post. This is needed after a
7042 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7043 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
7044 * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
7046 * Returns: 0 = success, non-zero failure.
7049 lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
7051 LIST_HEAD(post_nblist);
7052 int num_posted, rc = 0;
7054 /* get all NVME buffers need to repost to a local list */
7055 lpfc_io_buf_flush(phba, &post_nblist);
7057 /* post the list of nvme buffer sgls to port if available */
7058 if (!list_empty(&post_nblist)) {
7059 num_posted = lpfc_sli4_post_io_sgl_list(
7060 phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
7061 /* failed to post any nvme buffer, return error */
7062 if (num_posted == 0)
7069 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7073 len = sizeof(struct lpfc_mbx_set_host_data) -
7074 sizeof(struct lpfc_sli4_cfg_mhdr);
7075 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7076 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7077 LPFC_SLI4_MBX_EMBED);
7079 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
7080 mbox->u.mqe.un.set_host_data.param_len =
7081 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
7082 snprintf(mbox->u.mqe.un.set_host_data.data,
7083 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7084 "Linux %s v"LPFC_DRIVER_VERSION,
7085 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7089 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
7090 struct lpfc_queue *drq, int count, int idx)
7093 struct lpfc_rqe hrqe;
7094 struct lpfc_rqe drqe;
7095 struct lpfc_rqb *rqbp;
7096 unsigned long flags;
7097 struct rqb_dmabuf *rqb_buffer;
7098 LIST_HEAD(rqb_buf_list);
7100 spin_lock_irqsave(&phba->hbalock, flags);
7102 for (i = 0; i < count; i++) {
7103 /* IF RQ is already full, don't bother */
7104 if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
7106 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7109 rqb_buffer->hrq = hrq;
7110 rqb_buffer->drq = drq;
7111 rqb_buffer->idx = idx;
7112 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7114 while (!list_empty(&rqb_buf_list)) {
7115 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7118 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7119 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7120 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7121 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7122 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7124 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7125 "6421 Cannot post to HRQ %d: %x %x %x "
7133 rqbp->rqb_free_buffer(phba, rqb_buffer);
7135 list_add_tail(&rqb_buffer->hbuf.list,
7136 &rqbp->rqb_buffer_list);
7137 rqbp->buffer_count++;
7140 spin_unlock_irqrestore(&phba->hbalock, flags);
7145 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
7146 * @phba: Pointer to HBA context object.
7148 * This function is the main SLI4 device initialization PCI function. This
7149 * function is called by the HBA initialization code, HBA reset code and
7150 * HBA error attention handler code. Caller is not required to hold any
7154 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
7156 int rc, i, cnt, len;
7157 LPFC_MBOXQ_t *mboxq;
7158 struct lpfc_mqe *mqe;
7161 uint32_t ftr_rsp = 0;
7162 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
7163 struct lpfc_vport *vport = phba->pport;
7164 struct lpfc_dmabuf *mp;
7165 struct lpfc_rqb *rqbp;
7167 /* Perform a PCI function reset to start from clean */
7168 rc = lpfc_pci_function_reset(phba);
7172 /* Check the HBA Host Status Register for readyness */
7173 rc = lpfc_sli4_post_status_check(phba);
7177 spin_lock_irq(&phba->hbalock);
7178 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
7179 spin_unlock_irq(&phba->hbalock);
7183 * Allocate a single mailbox container for initializing the
7186 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7190 /* Issue READ_REV to collect vpd and FW information. */
7191 vpd_size = SLI4_PAGE_SIZE;
7192 vpd = kzalloc(vpd_size, GFP_KERNEL);
7198 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
7204 mqe = &mboxq->u.mqe;
7205 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
7206 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
7207 phba->hba_flag |= HBA_FCOE_MODE;
7208 phba->fcp_embed_io = 0; /* SLI4 FC support only */
7210 phba->hba_flag &= ~HBA_FCOE_MODE;
7213 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
7215 phba->hba_flag |= HBA_FIP_SUPPORT;
7217 phba->hba_flag &= ~HBA_FIP_SUPPORT;
7219 phba->hba_flag &= ~HBA_IOQ_FLUSH;
7221 if (phba->sli_rev != LPFC_SLI_REV4) {
7222 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7223 "0376 READ_REV Error. SLI Level %d "
7224 "FCoE enabled %d\n",
7225 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
7232 * Continue initialization with default values even if driver failed
7233 * to read FCoE param config regions, only read parameters if the
7236 if (phba->hba_flag & HBA_FCOE_MODE &&
7237 lpfc_sli4_read_fcoe_params(phba))
7238 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
7239 "2570 Failed to read FCoE parameters\n");
7242 * Retrieve sli4 device physical port name, failure of doing it
7243 * is considered as non-fatal.
7245 rc = lpfc_sli4_retrieve_pport_name(phba);
7247 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7248 "3080 Successful retrieving SLI4 device "
7249 "physical port name: %s.\n", phba->Port);
7251 rc = lpfc_sli4_get_ctl_attr(phba);
7253 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7254 "8351 Successful retrieving SLI4 device "
7258 * Evaluate the read rev and vpd data. Populate the driver
7259 * state with the results. If this routine fails, the failure
7260 * is not fatal as the driver will use generic values.
7262 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
7263 if (unlikely(!rc)) {
7264 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7265 "0377 Error %d parsing vpd. "
7266 "Using defaults.\n", rc);
7271 /* Save information as VPD data */
7272 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
7273 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
7276 * This is because first G7 ASIC doesn't support the standard
7277 * 0x5a NVME cmd descriptor type/subtype
7279 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7280 LPFC_SLI_INTF_IF_TYPE_6) &&
7281 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
7282 (phba->vpd.rev.smRev == 0) &&
7283 (phba->cfg_nvme_embed_cmd == 1))
7284 phba->cfg_nvme_embed_cmd = 0;
7286 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
7287 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
7289 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
7291 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
7293 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
7295 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
7296 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
7297 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
7298 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
7299 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
7300 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
7301 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7302 "(%d):0380 READ_REV Status x%x "
7303 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
7304 mboxq->vport ? mboxq->vport->vpi : 0,
7305 bf_get(lpfc_mqe_status, mqe),
7306 phba->vpd.rev.opFwName,
7307 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
7308 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
7310 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
7311 rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
7312 if (phba->pport->cfg_lun_queue_depth > rc) {
7313 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7314 "3362 LUN queue depth changed from %d to %d\n",
7315 phba->pport->cfg_lun_queue_depth, rc);
7316 phba->pport->cfg_lun_queue_depth = rc;
7319 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7320 LPFC_SLI_INTF_IF_TYPE_0) {
7321 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
7322 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7323 if (rc == MBX_SUCCESS) {
7324 phba->hba_flag |= HBA_RECOVERABLE_UE;
7325 /* Set 1Sec interval to detect UE */
7326 phba->eratt_poll_interval = 1;
7327 phba->sli4_hba.ue_to_sr = bf_get(
7328 lpfc_mbx_set_feature_UESR,
7329 &mboxq->u.mqe.un.set_feature);
7330 phba->sli4_hba.ue_to_rp = bf_get(
7331 lpfc_mbx_set_feature_UERP,
7332 &mboxq->u.mqe.un.set_feature);
7336 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
7337 /* Enable MDS Diagnostics only if the SLI Port supports it */
7338 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
7339 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7340 if (rc != MBX_SUCCESS)
7341 phba->mds_diags_support = 0;
7345 * Discover the port's supported feature set and match it against the
7348 lpfc_request_features(phba, mboxq);
7349 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7356 * The port must support FCP initiator mode as this is the
7357 * only mode running in the host.
7359 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
7360 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7361 "0378 No support for fcpi mode.\n");
7365 /* Performance Hints are ONLY for FCoE */
7366 if (phba->hba_flag & HBA_FCOE_MODE) {
7367 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
7368 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
7370 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
7374 * If the port cannot support the host's requested features
7375 * then turn off the global config parameters to disable the
7376 * feature in the driver. This is not a fatal error.
7378 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
7379 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
7380 phba->cfg_enable_bg = 0;
7381 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
7386 if (phba->max_vpi && phba->cfg_enable_npiv &&
7387 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7391 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7392 "0379 Feature Mismatch Data: x%08x %08x "
7393 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
7394 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
7395 phba->cfg_enable_npiv, phba->max_vpi);
7396 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
7397 phba->cfg_enable_bg = 0;
7398 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7399 phba->cfg_enable_npiv = 0;
7402 /* These SLI3 features are assumed in SLI4 */
7403 spin_lock_irq(&phba->hbalock);
7404 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
7405 spin_unlock_irq(&phba->hbalock);
7408 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
7409 * calls depends on these resources to complete port setup.
7411 rc = lpfc_sli4_alloc_resource_identifiers(phba);
7413 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7414 "2920 Failed to alloc Resource IDs "
7419 lpfc_set_host_data(phba, mboxq);
7421 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7423 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7424 "2134 Failed to set host os driver version %x",
7428 /* Read the port's service parameters. */
7429 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
7431 phba->link_state = LPFC_HBA_ERROR;
7436 mboxq->vport = vport;
7437 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7438 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
7439 if (rc == MBX_SUCCESS) {
7440 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
7445 * This memory was allocated by the lpfc_read_sparam routine. Release
7446 * it to the mbuf pool.
7448 lpfc_mbuf_free(phba, mp->virt, mp->phys);
7450 mboxq->ctx_buf = NULL;
7452 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7453 "0382 READ_SPARAM command failed "
7454 "status %d, mbxStatus x%x\n",
7455 rc, bf_get(lpfc_mqe_status, mqe));
7456 phba->link_state = LPFC_HBA_ERROR;
7461 lpfc_update_vport_wwn(vport);
7463 /* Update the fc_host data structures with new wwn. */
7464 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
7465 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
7467 /* Create all the SLI4 queues */
7468 rc = lpfc_sli4_queue_create(phba);
7470 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7471 "3089 Failed to allocate queues\n");
7475 /* Set up all the queues to the device */
7476 rc = lpfc_sli4_queue_setup(phba);
7478 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7479 "0381 Error %d during queue setup.\n ", rc);
7480 goto out_stop_timers;
7482 /* Initialize the driver internal SLI layer lists. */
7483 lpfc_sli4_setup(phba);
7484 lpfc_sli4_queue_init(phba);
7486 /* update host els xri-sgl sizes and mappings */
7487 rc = lpfc_sli4_els_sgl_update(phba);
7489 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7490 "1400 Failed to update xri-sgl size and "
7491 "mapping: %d\n", rc);
7492 goto out_destroy_queue;
7495 /* register the els sgl pool to the port */
7496 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
7497 phba->sli4_hba.els_xri_cnt);
7498 if (unlikely(rc < 0)) {
7499 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7500 "0582 Error %d during els sgl post "
7503 goto out_destroy_queue;
7505 phba->sli4_hba.els_xri_cnt = rc;
7507 if (phba->nvmet_support) {
7508 /* update host nvmet xri-sgl sizes and mappings */
7509 rc = lpfc_sli4_nvmet_sgl_update(phba);
7511 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7512 "6308 Failed to update nvmet-sgl size "
7513 "and mapping: %d\n", rc);
7514 goto out_destroy_queue;
7517 /* register the nvmet sgl pool to the port */
7518 rc = lpfc_sli4_repost_sgl_list(
7520 &phba->sli4_hba.lpfc_nvmet_sgl_list,
7521 phba->sli4_hba.nvmet_xri_cnt);
7522 if (unlikely(rc < 0)) {
7523 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7524 "3117 Error %d during nvmet "
7527 goto out_destroy_queue;
7529 phba->sli4_hba.nvmet_xri_cnt = rc;
7531 cnt = phba->cfg_iocb_cnt * 1024;
7532 /* We need 1 iocbq for every SGL, for IO processing */
7533 cnt += phba->sli4_hba.nvmet_xri_cnt;
7535 /* update host common xri-sgl sizes and mappings */
7536 rc = lpfc_sli4_io_sgl_update(phba);
7538 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7539 "6082 Failed to update nvme-sgl size "
7540 "and mapping: %d\n", rc);
7541 goto out_destroy_queue;
7544 /* register the allocated common sgl pool to the port */
7545 rc = lpfc_sli4_repost_io_sgl_list(phba);
7547 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7548 "6116 Error %d during nvme sgl post "
7550 /* Some NVME buffers were moved to abort nvme list */
7551 /* A pci function reset will repost them */
7553 goto out_destroy_queue;
7555 cnt = phba->cfg_iocb_cnt * 1024;
7558 if (!phba->sli.iocbq_lookup) {
7559 /* Initialize and populate the iocb list per host */
7560 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7561 "2821 initialize iocb list %d total %d\n",
7562 phba->cfg_iocb_cnt, cnt);
7563 rc = lpfc_init_iocb_list(phba, cnt);
7565 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7566 "1413 Failed to init iocb list.\n");
7567 goto out_destroy_queue;
7571 if (phba->nvmet_support)
7572 lpfc_nvmet_create_targetport(phba);
7574 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
7575 /* Post initial buffers to all RQs created */
7576 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
7577 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
7578 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
7579 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
7580 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
7581 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
7582 rqbp->buffer_count = 0;
7584 lpfc_post_rq_buffer(
7585 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
7586 phba->sli4_hba.nvmet_mrq_data[i],
7587 phba->cfg_nvmet_mrq_post, i);
7591 /* Post the rpi header region to the device. */
7592 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
7594 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7595 "0393 Error %d during rpi post operation\n",
7598 goto out_destroy_queue;
7600 lpfc_sli4_node_prep(phba);
7602 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
7603 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
7605 * The FC Port needs to register FCFI (index 0)
7607 lpfc_reg_fcfi(phba, mboxq);
7608 mboxq->vport = phba->pport;
7609 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7610 if (rc != MBX_SUCCESS)
7611 goto out_unset_queue;
7613 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
7614 &mboxq->u.mqe.un.reg_fcfi);
7616 /* We are a NVME Target mode with MRQ > 1 */
7618 /* First register the FCFI */
7619 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
7620 mboxq->vport = phba->pport;
7621 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7622 if (rc != MBX_SUCCESS)
7623 goto out_unset_queue;
7625 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
7626 &mboxq->u.mqe.un.reg_fcfi_mrq);
7628 /* Next register the MRQs */
7629 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
7630 mboxq->vport = phba->pport;
7631 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7632 if (rc != MBX_SUCCESS)
7633 goto out_unset_queue;
7636 /* Check if the port is configured to be disabled */
7637 lpfc_sli_read_link_ste(phba);
7640 /* Don't post more new bufs if repost already recovered
7643 if (phba->nvmet_support == 0) {
7644 if (phba->sli4_hba.io_xri_cnt == 0) {
7645 len = lpfc_new_io_buf(
7646 phba, phba->sli4_hba.io_xri_max);
7649 goto out_unset_queue;
7652 if (phba->cfg_xri_rebalancing)
7653 lpfc_create_multixri_pools(phba);
7656 phba->cfg_xri_rebalancing = 0;
7659 /* Allow asynchronous mailbox command to go through */
7660 spin_lock_irq(&phba->hbalock);
7661 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7662 spin_unlock_irq(&phba->hbalock);
7664 /* Post receive buffers to the device */
7665 lpfc_sli4_rb_setup(phba);
7667 /* Reset HBA FCF states after HBA reset */
7668 phba->fcf.fcf_flag = 0;
7669 phba->fcf.current_rec.flag = 0;
7671 /* Start the ELS watchdog timer */
7672 mod_timer(&vport->els_tmofunc,
7673 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
7675 /* Start heart beat timer */
7676 mod_timer(&phba->hb_tmofunc,
7677 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
7678 phba->hb_outstanding = 0;
7679 phba->last_completion_time = jiffies;
7681 /* start eq_delay heartbeat */
7682 if (phba->cfg_auto_imax)
7683 queue_delayed_work(phba->wq, &phba->eq_delay_work,
7684 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
7686 /* Start error attention (ERATT) polling timer */
7687 mod_timer(&phba->eratt_poll,
7688 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
7690 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
7691 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
7692 rc = pci_enable_pcie_error_reporting(phba->pcidev);
7694 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7695 "2829 This device supports "
7696 "Advanced Error Reporting (AER)\n");
7697 spin_lock_irq(&phba->hbalock);
7698 phba->hba_flag |= HBA_AER_ENABLED;
7699 spin_unlock_irq(&phba->hbalock);
7701 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7702 "2830 This device does not support "
7703 "Advanced Error Reporting (AER)\n");
7704 phba->cfg_aer_support = 0;
7710 * The port is ready, set the host's link state to LINK_DOWN
7711 * in preparation for link interrupts.
7713 spin_lock_irq(&phba->hbalock);
7714 phba->link_state = LPFC_LINK_DOWN;
7716 /* Check if physical ports are trunked */
7717 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
7718 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
7719 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
7720 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
7721 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
7722 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
7723 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
7724 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
7725 spin_unlock_irq(&phba->hbalock);
7727 /* Arm the CQs and then EQs on device */
7728 lpfc_sli4_arm_cqeq_intr(phba);
7730 /* Indicate device interrupt mode */
7731 phba->sli4_hba.intr_enable = 1;
7733 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
7734 (phba->hba_flag & LINK_DISABLED)) {
7735 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7736 "3103 Adapter Link is disabled.\n");
7737 lpfc_down_link(phba, mboxq);
7738 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7739 if (rc != MBX_SUCCESS) {
7740 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7741 "3104 Adapter failed to issue "
7742 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
7743 goto out_io_buff_free;
7745 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
7746 /* don't perform init_link on SLI4 FC port loopback test */
7747 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
7748 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
7750 goto out_io_buff_free;
7753 mempool_free(mboxq, phba->mbox_mem_pool);
7756 /* Free allocated IO Buffers */
7759 /* Unset all the queues set up in this routine when error out */
7760 lpfc_sli4_queue_unset(phba);
7762 lpfc_free_iocb_list(phba);
7763 lpfc_sli4_queue_destroy(phba);
7765 lpfc_stop_hba_timers(phba);
7767 mempool_free(mboxq, phba->mbox_mem_pool);
7772 * lpfc_mbox_timeout - Timeout call back function for mbox timer
7773 * @ptr: context object - pointer to hba structure.
7775 * This is the callback function for mailbox timer. The mailbox
7776 * timer is armed when a new mailbox command is issued and the timer
7777 * is deleted when the mailbox complete. The function is called by
7778 * the kernel timer code when a mailbox does not complete within
7779 * expected time. This function wakes up the worker thread to
7780 * process the mailbox timeout and returns. All the processing is
7781 * done by the worker thread function lpfc_mbox_timeout_handler.
7784 lpfc_mbox_timeout(struct timer_list *t)
7786 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
7787 unsigned long iflag;
7788 uint32_t tmo_posted;
7790 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
7791 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
7793 phba->pport->work_port_events |= WORKER_MBOX_TMO;
7794 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
7797 lpfc_worker_wake_up(phba);
7802 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
7804 * @phba: Pointer to HBA context object.
7806 * This function checks if any mailbox completions are present on the mailbox
7810 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
7814 struct lpfc_queue *mcq;
7815 struct lpfc_mcqe *mcqe;
7816 bool pending_completions = false;
7819 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7822 /* Check for completions on mailbox completion queue */
7824 mcq = phba->sli4_hba.mbx_cq;
7825 idx = mcq->hba_index;
7826 qe_valid = mcq->qe_valid;
7827 while (bf_get_le32(lpfc_cqe_valid,
7828 (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
7829 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
7830 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
7831 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
7832 pending_completions = true;
7835 idx = (idx + 1) % mcq->entry_count;
7836 if (mcq->hba_index == idx)
7839 /* if the index wrapped around, toggle the valid bit */
7840 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
7841 qe_valid = (qe_valid) ? 0 : 1;
7843 return pending_completions;
7848 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
7850 * @phba: Pointer to HBA context object.
7852 * For sli4, it is possible to miss an interrupt. As such mbox completions
7853 * maybe missed causing erroneous mailbox timeouts to occur. This function
7854 * checks to see if mbox completions are on the mailbox completion queue
7855 * and will process all the completions associated with the eq for the
7856 * mailbox completion queue.
7859 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
7861 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
7863 struct lpfc_queue *fpeq = NULL;
7864 struct lpfc_queue *eq;
7867 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7870 /* Find the EQ associated with the mbox CQ */
7871 if (sli4_hba->hdwq) {
7872 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
7873 eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
7874 if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
7883 /* Turn off interrupts from this EQ */
7885 sli4_hba->sli4_eq_clr_intr(fpeq);
7887 /* Check to see if a mbox completion is pending */
7889 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
7892 * If a mbox completion is pending, process all the events on EQ
7893 * associated with the mbox completion queue (this could include
7894 * mailbox commands, async events, els commands, receive queue data
7899 /* process and rearm the EQ */
7900 lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
7902 /* Always clear and re-arm the EQ */
7903 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
7905 return mbox_pending;
7910 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
7911 * @phba: Pointer to HBA context object.
7913 * This function is called from worker thread when a mailbox command times out.
7914 * The caller is not required to hold any locks. This function will reset the
7915 * HBA and recover all the pending commands.
7918 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
7920 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
7921 MAILBOX_t *mb = NULL;
7923 struct lpfc_sli *psli = &phba->sli;
7925 /* If the mailbox completed, process the completion and return */
7926 if (lpfc_sli4_process_missed_mbox_completions(phba))
7931 /* Check the pmbox pointer first. There is a race condition
7932 * between the mbox timeout handler getting executed in the
7933 * worklist and the mailbox actually completing. When this
7934 * race condition occurs, the mbox_active will be NULL.
7936 spin_lock_irq(&phba->hbalock);
7937 if (pmbox == NULL) {
7938 lpfc_printf_log(phba, KERN_WARNING,
7940 "0353 Active Mailbox cleared - mailbox timeout "
7942 spin_unlock_irq(&phba->hbalock);
7946 /* Mbox cmd <mbxCommand> timeout */
7947 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7948 "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
7950 phba->pport->port_state,
7952 phba->sli.mbox_active);
7953 spin_unlock_irq(&phba->hbalock);
7955 /* Setting state unknown so lpfc_sli_abort_iocb_ring
7956 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
7957 * it to fail all outstanding SCSI IO.
7959 spin_lock_irq(&phba->pport->work_port_lock);
7960 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
7961 spin_unlock_irq(&phba->pport->work_port_lock);
7962 spin_lock_irq(&phba->hbalock);
7963 phba->link_state = LPFC_LINK_UNKNOWN;
7964 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
7965 spin_unlock_irq(&phba->hbalock);
7967 lpfc_sli_abort_fcp_rings(phba);
7969 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7970 "0345 Resetting board due to mailbox timeout\n");
7972 /* Reset the HBA device */
7973 lpfc_reset_hba(phba);
7977 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
7978 * @phba: Pointer to HBA context object.
7979 * @pmbox: Pointer to mailbox object.
7980 * @flag: Flag indicating how the mailbox need to be processed.
7982 * This function is called by discovery code and HBA management code
7983 * to submit a mailbox command to firmware with SLI-3 interface spec. This
7984 * function gets the hbalock to protect the data structures.
7985 * The mailbox command can be submitted in polling mode, in which case
7986 * this function will wait in a polling loop for the completion of the
7988 * If the mailbox is submitted in no_wait mode (not polling) the
7989 * function will submit the command and returns immediately without waiting
7990 * for the mailbox completion. The no_wait is supported only when HBA
7991 * is in SLI2/SLI3 mode - interrupts are enabled.
7992 * The SLI interface allows only one mailbox pending at a time. If the
7993 * mailbox is issued in polling mode and there is already a mailbox
7994 * pending, then the function will return an error. If the mailbox is issued
7995 * in NO_WAIT mode and there is a mailbox pending already, the function
7996 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
7997 * The sli layer owns the mailbox object until the completion of mailbox
7998 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
7999 * return codes the caller owns the mailbox command after the return of
8003 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
8007 struct lpfc_sli *psli = &phba->sli;
8008 uint32_t status, evtctr;
8009 uint32_t ha_copy, hc_copy;
8011 unsigned long timeout;
8012 unsigned long drvr_flag = 0;
8013 uint32_t word0, ldata;
8014 void __iomem *to_slim;
8015 int processing_queue = 0;
8017 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8019 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8020 /* processing mbox queue from intr_handler */
8021 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8022 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8025 processing_queue = 1;
8026 pmbox = lpfc_mbox_get(phba);
8028 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8033 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
8034 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
8036 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8037 lpfc_printf_log(phba, KERN_ERR,
8038 LOG_MBOX | LOG_VPORT,
8039 "1806 Mbox x%x failed. No vport\n",
8040 pmbox->u.mb.mbxCommand);
8042 goto out_not_finished;
8046 /* If the PCI channel is in offline state, do not post mbox. */
8047 if (unlikely(pci_channel_offline(phba->pcidev))) {
8048 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8049 goto out_not_finished;
8052 /* If HBA has a deferred error attention, fail the iocb. */
8053 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
8054 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8055 goto out_not_finished;
8061 status = MBX_SUCCESS;
8063 if (phba->link_state == LPFC_HBA_ERROR) {
8064 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8066 /* Mbox command <mbxCommand> cannot issue */
8067 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8068 "(%d):0311 Mailbox command x%x cannot "
8069 "issue Data: x%x x%x\n",
8070 pmbox->vport ? pmbox->vport->vpi : 0,
8071 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8072 goto out_not_finished;
8075 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
8076 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
8077 !(hc_copy & HC_MBINT_ENA)) {
8078 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8079 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8080 "(%d):2528 Mailbox command x%x cannot "
8081 "issue Data: x%x x%x\n",
8082 pmbox->vport ? pmbox->vport->vpi : 0,
8083 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8084 goto out_not_finished;
8088 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8089 /* Polling for a mbox command when another one is already active
8090 * is not allowed in SLI. Also, the driver must have established
8091 * SLI2 mode to queue and process multiple mbox commands.
8094 if (flag & MBX_POLL) {
8095 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8097 /* Mbox command <mbxCommand> cannot issue */
8098 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8099 "(%d):2529 Mailbox command x%x "
8100 "cannot issue Data: x%x x%x\n",
8101 pmbox->vport ? pmbox->vport->vpi : 0,
8102 pmbox->u.mb.mbxCommand,
8103 psli->sli_flag, flag);
8104 goto out_not_finished;
8107 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
8108 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8109 /* Mbox command <mbxCommand> cannot issue */
8110 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8111 "(%d):2530 Mailbox command x%x "
8112 "cannot issue Data: x%x x%x\n",
8113 pmbox->vport ? pmbox->vport->vpi : 0,
8114 pmbox->u.mb.mbxCommand,
8115 psli->sli_flag, flag);
8116 goto out_not_finished;
8119 /* Another mailbox command is still being processed, queue this
8120 * command to be processed later.
8122 lpfc_mbox_put(phba, pmbox);
8124 /* Mbox cmd issue - BUSY */
8125 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8126 "(%d):0308 Mbox cmd issue - BUSY Data: "
8127 "x%x x%x x%x x%x\n",
8128 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
8130 phba->pport ? phba->pport->port_state : 0xff,
8131 psli->sli_flag, flag);
8133 psli->slistat.mbox_busy++;
8134 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8137 lpfc_debugfs_disc_trc(pmbox->vport,
8138 LPFC_DISC_TRC_MBOX_VPORT,
8139 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
8140 (uint32_t)mbx->mbxCommand,
8141 mbx->un.varWords[0], mbx->un.varWords[1]);
8144 lpfc_debugfs_disc_trc(phba->pport,
8146 "MBOX Bsy: cmd:x%x mb:x%x x%x",
8147 (uint32_t)mbx->mbxCommand,
8148 mbx->un.varWords[0], mbx->un.varWords[1]);
8154 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8156 /* If we are not polling, we MUST be in SLI2 mode */
8157 if (flag != MBX_POLL) {
8158 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
8159 (mbx->mbxCommand != MBX_KILL_BOARD)) {
8160 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8161 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8162 /* Mbox command <mbxCommand> cannot issue */
8163 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8164 "(%d):2531 Mailbox command x%x "
8165 "cannot issue Data: x%x x%x\n",
8166 pmbox->vport ? pmbox->vport->vpi : 0,
8167 pmbox->u.mb.mbxCommand,
8168 psli->sli_flag, flag);
8169 goto out_not_finished;
8171 /* timeout active mbox command */
8172 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8174 mod_timer(&psli->mbox_tmo, jiffies + timeout);
8177 /* Mailbox cmd <cmd> issue */
8178 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8179 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
8181 pmbox->vport ? pmbox->vport->vpi : 0,
8183 phba->pport ? phba->pport->port_state : 0xff,
8184 psli->sli_flag, flag);
8186 if (mbx->mbxCommand != MBX_HEARTBEAT) {
8188 lpfc_debugfs_disc_trc(pmbox->vport,
8189 LPFC_DISC_TRC_MBOX_VPORT,
8190 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8191 (uint32_t)mbx->mbxCommand,
8192 mbx->un.varWords[0], mbx->un.varWords[1]);
8195 lpfc_debugfs_disc_trc(phba->pport,
8197 "MBOX Send: cmd:x%x mb:x%x x%x",
8198 (uint32_t)mbx->mbxCommand,
8199 mbx->un.varWords[0], mbx->un.varWords[1]);
8203 psli->slistat.mbox_cmd++;
8204 evtctr = psli->slistat.mbox_event;
8206 /* next set own bit for the adapter and copy over command word */
8207 mbx->mbxOwner = OWN_CHIP;
8209 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8210 /* Populate mbox extension offset word. */
8211 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
8212 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8213 = (uint8_t *)phba->mbox_ext
8214 - (uint8_t *)phba->mbox;
8217 /* Copy the mailbox extension data */
8218 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
8219 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
8220 (uint8_t *)phba->mbox_ext,
8221 pmbox->in_ext_byte_len);
8223 /* Copy command data to host SLIM area */
8224 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
8226 /* Populate mbox extension offset word. */
8227 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
8228 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8229 = MAILBOX_HBA_EXT_OFFSET;
8231 /* Copy the mailbox extension data */
8232 if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
8233 lpfc_memcpy_to_slim(phba->MBslimaddr +
8234 MAILBOX_HBA_EXT_OFFSET,
8235 pmbox->ctx_buf, pmbox->in_ext_byte_len);
8237 if (mbx->mbxCommand == MBX_CONFIG_PORT)
8238 /* copy command data into host mbox for cmpl */
8239 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
8242 /* First copy mbox command data to HBA SLIM, skip past first
8244 to_slim = phba->MBslimaddr + sizeof (uint32_t);
8245 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
8246 MAILBOX_CMD_SIZE - sizeof (uint32_t));
8248 /* Next copy over first word, with mbxOwner set */
8249 ldata = *((uint32_t *)mbx);
8250 to_slim = phba->MBslimaddr;
8251 writel(ldata, to_slim);
8252 readl(to_slim); /* flush */
8254 if (mbx->mbxCommand == MBX_CONFIG_PORT)
8255 /* switch over to host mailbox */
8256 psli->sli_flag |= LPFC_SLI_ACTIVE;
8263 /* Set up reference to mailbox command */
8264 psli->mbox_active = pmbox;
8265 /* Interrupt board to do it */
8266 writel(CA_MBATT, phba->CAregaddr);
8267 readl(phba->CAregaddr); /* flush */
8268 /* Don't wait for it to finish, just return */
8272 /* Set up null reference to mailbox command */
8273 psli->mbox_active = NULL;
8274 /* Interrupt board to do it */
8275 writel(CA_MBATT, phba->CAregaddr);
8276 readl(phba->CAregaddr); /* flush */
8278 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8279 /* First read mbox status word */
8280 word0 = *((uint32_t *)phba->mbox);
8281 word0 = le32_to_cpu(word0);
8283 /* First read mbox status word */
8284 if (lpfc_readl(phba->MBslimaddr, &word0)) {
8285 spin_unlock_irqrestore(&phba->hbalock,
8287 goto out_not_finished;
8291 /* Read the HBA Host Attention Register */
8292 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8293 spin_unlock_irqrestore(&phba->hbalock,
8295 goto out_not_finished;
8297 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8300 /* Wait for command to complete */
8301 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
8302 (!(ha_copy & HA_MBATT) &&
8303 (phba->link_state > LPFC_WARM_START))) {
8304 if (time_after(jiffies, timeout)) {
8305 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8306 spin_unlock_irqrestore(&phba->hbalock,
8308 goto out_not_finished;
8311 /* Check if we took a mbox interrupt while we were
8313 if (((word0 & OWN_CHIP) != OWN_CHIP)
8314 && (evtctr != psli->slistat.mbox_event))
8318 spin_unlock_irqrestore(&phba->hbalock,
8321 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8324 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8325 /* First copy command data */
8326 word0 = *((uint32_t *)phba->mbox);
8327 word0 = le32_to_cpu(word0);
8328 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
8331 /* Check real SLIM for any errors */
8332 slimword0 = readl(phba->MBslimaddr);
8333 slimmb = (MAILBOX_t *) & slimword0;
8334 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
8335 && slimmb->mbxStatus) {
8342 /* First copy command data */
8343 word0 = readl(phba->MBslimaddr);
8345 /* Read the HBA Host Attention Register */
8346 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8347 spin_unlock_irqrestore(&phba->hbalock,
8349 goto out_not_finished;
8353 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8354 /* copy results back to user */
8355 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
8357 /* Copy the mailbox extension data */
8358 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8359 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
8361 pmbox->out_ext_byte_len);
8364 /* First copy command data */
8365 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
8367 /* Copy the mailbox extension data */
8368 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8369 lpfc_memcpy_from_slim(
8372 MAILBOX_HBA_EXT_OFFSET,
8373 pmbox->out_ext_byte_len);
8377 writel(HA_MBATT, phba->HAregaddr);
8378 readl(phba->HAregaddr); /* flush */
8380 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8381 status = mbx->mbxStatus;
8384 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8388 if (processing_queue) {
8389 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
8390 lpfc_mbox_cmpl_put(phba, pmbox);
8392 return MBX_NOT_FINISHED;
8396 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
8397 * @phba: Pointer to HBA context object.
8399 * The function blocks the posting of SLI4 asynchronous mailbox commands from
8400 * the driver internal pending mailbox queue. It will then try to wait out the
8401 * possible outstanding mailbox command before return.
8404 * 0 - the outstanding mailbox command completed; otherwise, the wait for
8405 * the outstanding mailbox command timed out.
8408 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
8410 struct lpfc_sli *psli = &phba->sli;
8412 unsigned long timeout = 0;
8414 /* Mark the asynchronous mailbox command posting as blocked */
8415 spin_lock_irq(&phba->hbalock);
8416 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
8417 /* Determine how long we might wait for the active mailbox
8418 * command to be gracefully completed by firmware.
8420 if (phba->sli.mbox_active)
8421 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
8422 phba->sli.mbox_active) *
8424 spin_unlock_irq(&phba->hbalock);
8426 /* Make sure the mailbox is really active */
8428 lpfc_sli4_process_missed_mbox_completions(phba);
8430 /* Wait for the outstnading mailbox command to complete */
8431 while (phba->sli.mbox_active) {
8432 /* Check active mailbox complete status every 2ms */
8434 if (time_after(jiffies, timeout)) {
8435 /* Timeout, marked the outstanding cmd not complete */
8441 /* Can not cleanly block async mailbox command, fails it */
8443 spin_lock_irq(&phba->hbalock);
8444 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8445 spin_unlock_irq(&phba->hbalock);
8451 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
8452 * @phba: Pointer to HBA context object.
8454 * The function unblocks and resume posting of SLI4 asynchronous mailbox
8455 * commands from the driver internal pending mailbox queue. It makes sure
8456 * that there is no outstanding mailbox command before resuming posting
8457 * asynchronous mailbox commands. If, for any reason, there is outstanding
8458 * mailbox command, it will try to wait it out before resuming asynchronous
8459 * mailbox command posting.
8462 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
8464 struct lpfc_sli *psli = &phba->sli;
8466 spin_lock_irq(&phba->hbalock);
8467 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8468 /* Asynchronous mailbox posting is not blocked, do nothing */
8469 spin_unlock_irq(&phba->hbalock);
8473 /* Outstanding synchronous mailbox command is guaranteed to be done,
8474 * successful or timeout, after timing-out the outstanding mailbox
8475 * command shall always be removed, so just unblock posting async
8476 * mailbox command and resume
8478 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8479 spin_unlock_irq(&phba->hbalock);
8481 /* wake up worker thread to post asynchronlous mailbox command */
8482 lpfc_worker_wake_up(phba);
8486 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
8487 * @phba: Pointer to HBA context object.
8488 * @mboxq: Pointer to mailbox object.
8490 * The function waits for the bootstrap mailbox register ready bit from
8491 * port for twice the regular mailbox command timeout value.
8493 * 0 - no timeout on waiting for bootstrap mailbox register ready.
8494 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
8497 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8500 unsigned long timeout;
8501 struct lpfc_register bmbx_reg;
8503 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
8507 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
8508 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
8512 if (time_after(jiffies, timeout))
8513 return MBXERR_ERROR;
8514 } while (!db_ready);
8520 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
8521 * @phba: Pointer to HBA context object.
8522 * @mboxq: Pointer to mailbox object.
8524 * The function posts a mailbox to the port. The mailbox is expected
8525 * to be comletely filled in and ready for the port to operate on it.
8526 * This routine executes a synchronous completion operation on the
8527 * mailbox by polling for its completion.
8529 * The caller must not be holding any locks when calling this routine.
8532 * MBX_SUCCESS - mailbox posted successfully
8533 * Any of the MBX error values.
8536 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8538 int rc = MBX_SUCCESS;
8539 unsigned long iflag;
8540 uint32_t mcqe_status;
8542 struct lpfc_sli *psli = &phba->sli;
8543 struct lpfc_mqe *mb = &mboxq->u.mqe;
8544 struct lpfc_bmbx_create *mbox_rgn;
8545 struct dma_address *dma_address;
8548 * Only one mailbox can be active to the bootstrap mailbox region
8549 * at a time and there is no queueing provided.
8551 spin_lock_irqsave(&phba->hbalock, iflag);
8552 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8553 spin_unlock_irqrestore(&phba->hbalock, iflag);
8554 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8555 "(%d):2532 Mailbox command x%x (x%x/x%x) "
8556 "cannot issue Data: x%x x%x\n",
8557 mboxq->vport ? mboxq->vport->vpi : 0,
8558 mboxq->u.mb.mbxCommand,
8559 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8560 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8561 psli->sli_flag, MBX_POLL);
8562 return MBXERR_ERROR;
8564 /* The server grabs the token and owns it until release */
8565 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8566 phba->sli.mbox_active = mboxq;
8567 spin_unlock_irqrestore(&phba->hbalock, iflag);
8569 /* wait for bootstrap mbox register for readyness */
8570 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8574 * Initialize the bootstrap memory region to avoid stale data areas
8575 * in the mailbox post. Then copy the caller's mailbox contents to
8576 * the bmbx mailbox region.
8578 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
8579 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
8580 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
8581 sizeof(struct lpfc_mqe));
8583 /* Post the high mailbox dma address to the port and wait for ready. */
8584 dma_address = &phba->sli4_hba.bmbx.dma_address;
8585 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
8587 /* wait for bootstrap mbox register for hi-address write done */
8588 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8592 /* Post the low mailbox dma address to the port. */
8593 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
8595 /* wait for bootstrap mbox register for low address write done */
8596 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8601 * Read the CQ to ensure the mailbox has completed.
8602 * If so, update the mailbox status so that the upper layers
8603 * can complete the request normally.
8605 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
8606 sizeof(struct lpfc_mqe));
8607 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
8608 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
8609 sizeof(struct lpfc_mcqe));
8610 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
8612 * When the CQE status indicates a failure and the mailbox status
8613 * indicates success then copy the CQE status into the mailbox status
8614 * (and prefix it with x4000).
8616 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
8617 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
8618 bf_set(lpfc_mqe_status, mb,
8619 (LPFC_MBX_ERROR_RANGE | mcqe_status));
8622 lpfc_sli4_swap_str(phba, mboxq);
8624 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8625 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
8626 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
8627 " x%x x%x CQ: x%x x%x x%x x%x\n",
8628 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8629 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8630 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8631 bf_get(lpfc_mqe_status, mb),
8632 mb->un.mb_words[0], mb->un.mb_words[1],
8633 mb->un.mb_words[2], mb->un.mb_words[3],
8634 mb->un.mb_words[4], mb->un.mb_words[5],
8635 mb->un.mb_words[6], mb->un.mb_words[7],
8636 mb->un.mb_words[8], mb->un.mb_words[9],
8637 mb->un.mb_words[10], mb->un.mb_words[11],
8638 mb->un.mb_words[12], mboxq->mcqe.word0,
8639 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
8640 mboxq->mcqe.trailer);
8642 /* We are holding the token, no needed for lock when release */
8643 spin_lock_irqsave(&phba->hbalock, iflag);
8644 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8645 phba->sli.mbox_active = NULL;
8646 spin_unlock_irqrestore(&phba->hbalock, iflag);
8651 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
8652 * @phba: Pointer to HBA context object.
8653 * @pmbox: Pointer to mailbox object.
8654 * @flag: Flag indicating how the mailbox need to be processed.
8656 * This function is called by discovery code and HBA management code to submit
8657 * a mailbox command to firmware with SLI-4 interface spec.
8659 * Return codes the caller owns the mailbox command after the return of the
8663 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
8666 struct lpfc_sli *psli = &phba->sli;
8667 unsigned long iflags;
8670 /* dump from issue mailbox command if setup */
8671 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
8673 rc = lpfc_mbox_dev_check(phba);
8675 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8676 "(%d):2544 Mailbox command x%x (x%x/x%x) "
8677 "cannot issue Data: x%x x%x\n",
8678 mboxq->vport ? mboxq->vport->vpi : 0,
8679 mboxq->u.mb.mbxCommand,
8680 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8681 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8682 psli->sli_flag, flag);
8683 goto out_not_finished;
8686 /* Detect polling mode and jump to a handler */
8687 if (!phba->sli4_hba.intr_enable) {
8688 if (flag == MBX_POLL)
8689 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8692 if (rc != MBX_SUCCESS)
8693 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8694 "(%d):2541 Mailbox command x%x "
8695 "(x%x/x%x) failure: "
8696 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8698 mboxq->vport ? mboxq->vport->vpi : 0,
8699 mboxq->u.mb.mbxCommand,
8700 lpfc_sli_config_mbox_subsys_get(phba,
8702 lpfc_sli_config_mbox_opcode_get(phba,
8704 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8705 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8706 bf_get(lpfc_mcqe_ext_status,
8708 psli->sli_flag, flag);
8710 } else if (flag == MBX_POLL) {
8711 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8712 "(%d):2542 Try to issue mailbox command "
8713 "x%x (x%x/x%x) synchronously ahead of async "
8714 "mailbox command queue: x%x x%x\n",
8715 mboxq->vport ? mboxq->vport->vpi : 0,
8716 mboxq->u.mb.mbxCommand,
8717 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8718 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8719 psli->sli_flag, flag);
8720 /* Try to block the asynchronous mailbox posting */
8721 rc = lpfc_sli4_async_mbox_block(phba);
8723 /* Successfully blocked, now issue sync mbox cmd */
8724 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8725 if (rc != MBX_SUCCESS)
8726 lpfc_printf_log(phba, KERN_WARNING,
8728 "(%d):2597 Sync Mailbox command "
8729 "x%x (x%x/x%x) failure: "
8730 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8732 mboxq->vport ? mboxq->vport->vpi : 0,
8733 mboxq->u.mb.mbxCommand,
8734 lpfc_sli_config_mbox_subsys_get(phba,
8736 lpfc_sli_config_mbox_opcode_get(phba,
8738 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8739 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8740 bf_get(lpfc_mcqe_ext_status,
8742 psli->sli_flag, flag);
8743 /* Unblock the async mailbox posting afterward */
8744 lpfc_sli4_async_mbox_unblock(phba);
8749 /* Now, interrupt mode asynchrous mailbox command */
8750 rc = lpfc_mbox_cmd_check(phba, mboxq);
8752 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8753 "(%d):2543 Mailbox command x%x (x%x/x%x) "
8754 "cannot issue Data: x%x x%x\n",
8755 mboxq->vport ? mboxq->vport->vpi : 0,
8756 mboxq->u.mb.mbxCommand,
8757 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8758 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8759 psli->sli_flag, flag);
8760 goto out_not_finished;
8763 /* Put the mailbox command to the driver internal FIFO */
8764 psli->slistat.mbox_busy++;
8765 spin_lock_irqsave(&phba->hbalock, iflags);
8766 lpfc_mbox_put(phba, mboxq);
8767 spin_unlock_irqrestore(&phba->hbalock, iflags);
8768 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8769 "(%d):0354 Mbox cmd issue - Enqueue Data: "
8770 "x%x (x%x/x%x) x%x x%x x%x\n",
8771 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
8772 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8773 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8774 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8775 phba->pport->port_state,
8776 psli->sli_flag, MBX_NOWAIT);
8777 /* Wake up worker thread to transport mailbox command from head */
8778 lpfc_worker_wake_up(phba);
8783 return MBX_NOT_FINISHED;
8787 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
8788 * @phba: Pointer to HBA context object.
8790 * This function is called by worker thread to send a mailbox command to
8791 * SLI4 HBA firmware.
8795 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
8797 struct lpfc_sli *psli = &phba->sli;
8798 LPFC_MBOXQ_t *mboxq;
8799 int rc = MBX_SUCCESS;
8800 unsigned long iflags;
8801 struct lpfc_mqe *mqe;
8804 /* Check interrupt mode before post async mailbox command */
8805 if (unlikely(!phba->sli4_hba.intr_enable))
8806 return MBX_NOT_FINISHED;
8808 /* Check for mailbox command service token */
8809 spin_lock_irqsave(&phba->hbalock, iflags);
8810 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8811 spin_unlock_irqrestore(&phba->hbalock, iflags);
8812 return MBX_NOT_FINISHED;
8814 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8815 spin_unlock_irqrestore(&phba->hbalock, iflags);
8816 return MBX_NOT_FINISHED;
8818 if (unlikely(phba->sli.mbox_active)) {
8819 spin_unlock_irqrestore(&phba->hbalock, iflags);
8820 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8821 "0384 There is pending active mailbox cmd\n");
8822 return MBX_NOT_FINISHED;
8824 /* Take the mailbox command service token */
8825 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8827 /* Get the next mailbox command from head of queue */
8828 mboxq = lpfc_mbox_get(phba);
8830 /* If no more mailbox command waiting for post, we're done */
8832 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8833 spin_unlock_irqrestore(&phba->hbalock, iflags);
8836 phba->sli.mbox_active = mboxq;
8837 spin_unlock_irqrestore(&phba->hbalock, iflags);
8839 /* Check device readiness for posting mailbox command */
8840 rc = lpfc_mbox_dev_check(phba);
8842 /* Driver clean routine will clean up pending mailbox */
8843 goto out_not_finished;
8845 /* Prepare the mbox command to be posted */
8846 mqe = &mboxq->u.mqe;
8847 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
8849 /* Start timer for the mbox_tmo and log some mailbox post messages */
8850 mod_timer(&psli->mbox_tmo, (jiffies +
8851 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
8853 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8854 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
8856 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8857 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8858 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8859 phba->pport->port_state, psli->sli_flag);
8861 if (mbx_cmnd != MBX_HEARTBEAT) {
8863 lpfc_debugfs_disc_trc(mboxq->vport,
8864 LPFC_DISC_TRC_MBOX_VPORT,
8865 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8866 mbx_cmnd, mqe->un.mb_words[0],
8867 mqe->un.mb_words[1]);
8869 lpfc_debugfs_disc_trc(phba->pport,
8871 "MBOX Send: cmd:x%x mb:x%x x%x",
8872 mbx_cmnd, mqe->un.mb_words[0],
8873 mqe->un.mb_words[1]);
8876 psli->slistat.mbox_cmd++;
8878 /* Post the mailbox command to the port */
8879 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
8880 if (rc != MBX_SUCCESS) {
8881 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8882 "(%d):2533 Mailbox command x%x (x%x/x%x) "
8883 "cannot issue Data: x%x x%x\n",
8884 mboxq->vport ? mboxq->vport->vpi : 0,
8885 mboxq->u.mb.mbxCommand,
8886 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8887 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8888 psli->sli_flag, MBX_NOWAIT);
8889 goto out_not_finished;
8895 spin_lock_irqsave(&phba->hbalock, iflags);
8896 if (phba->sli.mbox_active) {
8897 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
8898 __lpfc_mbox_cmpl_put(phba, mboxq);
8899 /* Release the token */
8900 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8901 phba->sli.mbox_active = NULL;
8903 spin_unlock_irqrestore(&phba->hbalock, iflags);
8905 return MBX_NOT_FINISHED;
8909 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
8910 * @phba: Pointer to HBA context object.
8911 * @pmbox: Pointer to mailbox object.
8912 * @flag: Flag indicating how the mailbox need to be processed.
8914 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
8915 * the API jump table function pointer from the lpfc_hba struct.
8917 * Return codes the caller owns the mailbox command after the return of the
8921 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
8923 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
8927 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
8928 * @phba: The hba struct for which this call is being executed.
8929 * @dev_grp: The HBA PCI-Device group number.
8931 * This routine sets up the mbox interface API function jump table in @phba
8933 * Returns: 0 - success, -ENODEV - failure.
8936 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8940 case LPFC_PCI_DEV_LP:
8941 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
8942 phba->lpfc_sli_handle_slow_ring_event =
8943 lpfc_sli_handle_slow_ring_event_s3;
8944 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
8945 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
8946 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
8948 case LPFC_PCI_DEV_OC:
8949 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
8950 phba->lpfc_sli_handle_slow_ring_event =
8951 lpfc_sli_handle_slow_ring_event_s4;
8952 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
8953 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
8954 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
8957 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8958 "1420 Invalid HBA PCI-device group: 0x%x\n",
8967 * __lpfc_sli_ringtx_put - Add an iocb to the txq
8968 * @phba: Pointer to HBA context object.
8969 * @pring: Pointer to driver SLI ring object.
8970 * @piocb: Pointer to address of newly added command iocb.
8972 * This function is called with hbalock held to add a command
8973 * iocb to the txq when SLI layer cannot submit the command iocb
8977 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8978 struct lpfc_iocbq *piocb)
8980 lockdep_assert_held(&phba->hbalock);
8981 /* Insert the caller's iocb in the txq tail for later processing. */
8982 list_add_tail(&piocb->list, &pring->txq);
8986 * lpfc_sli_next_iocb - Get the next iocb in the txq
8987 * @phba: Pointer to HBA context object.
8988 * @pring: Pointer to driver SLI ring object.
8989 * @piocb: Pointer to address of newly added command iocb.
8991 * This function is called with hbalock held before a new
8992 * iocb is submitted to the firmware. This function checks
8993 * txq to flush the iocbs in txq to Firmware before
8994 * submitting new iocbs to the Firmware.
8995 * If there are iocbs in the txq which need to be submitted
8996 * to firmware, lpfc_sli_next_iocb returns the first element
8997 * of the txq after dequeuing it from txq.
8998 * If there is no iocb in the txq then the function will return
8999 * *piocb and *piocb is set to NULL. Caller needs to check
9000 * *piocb to find if there are more commands in the txq.
9002 static struct lpfc_iocbq *
9003 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9004 struct lpfc_iocbq **piocb)
9006 struct lpfc_iocbq * nextiocb;
9008 lockdep_assert_held(&phba->hbalock);
9010 nextiocb = lpfc_sli_ringtx_get(phba, pring);
9020 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
9021 * @phba: Pointer to HBA context object.
9022 * @ring_number: SLI ring number to issue iocb on.
9023 * @piocb: Pointer to command iocb.
9024 * @flag: Flag indicating if this command can be put into txq.
9026 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
9027 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
9028 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
9029 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
9030 * this function allows only iocbs for posting buffers. This function finds
9031 * next available slot in the command ring and posts the command to the
9032 * available slot and writes the port attention register to request HBA start
9033 * processing new iocb. If there is no slot available in the ring and
9034 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
9035 * the function returns IOCB_BUSY.
9037 * This function is called with hbalock held. The function will return success
9038 * after it successfully submit the iocb to firmware or after adding to the
9042 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
9043 struct lpfc_iocbq *piocb, uint32_t flag)
9045 struct lpfc_iocbq *nextiocb;
9047 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
9049 lockdep_assert_held(&phba->hbalock);
9051 if (piocb->iocb_cmpl && (!piocb->vport) &&
9052 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
9053 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
9054 lpfc_printf_log(phba, KERN_ERR,
9055 LOG_SLI | LOG_VPORT,
9056 "1807 IOCB x%x failed. No vport\n",
9057 piocb->iocb.ulpCommand);
9063 /* If the PCI channel is in offline state, do not post iocbs. */
9064 if (unlikely(pci_channel_offline(phba->pcidev)))
9067 /* If HBA has a deferred error attention, fail the iocb. */
9068 if (unlikely(phba->hba_flag & DEFER_ERATT))
9072 * We should never get an IOCB if we are in a < LINK_DOWN state
9074 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
9078 * Check to see if we are blocking IOCB processing because of a
9079 * outstanding event.
9081 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
9084 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
9086 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
9087 * can be issued if the link is not up.
9089 switch (piocb->iocb.ulpCommand) {
9090 case CMD_GEN_REQUEST64_CR:
9091 case CMD_GEN_REQUEST64_CX:
9092 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
9093 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
9094 FC_RCTL_DD_UNSOL_CMD) ||
9095 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
9096 MENLO_TRANSPORT_TYPE))
9100 case CMD_QUE_RING_BUF_CN:
9101 case CMD_QUE_RING_BUF64_CN:
9103 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
9104 * completion, iocb_cmpl MUST be 0.
9106 if (piocb->iocb_cmpl)
9107 piocb->iocb_cmpl = NULL;
9109 case CMD_CREATE_XRI_CR:
9110 case CMD_CLOSE_XRI_CN:
9111 case CMD_CLOSE_XRI_CX:
9118 * For FCP commands, we must be in a state where we can process link
9121 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
9122 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
9126 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
9127 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
9128 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
9131 lpfc_sli_update_ring(phba, pring);
9133 lpfc_sli_update_full_ring(phba, pring);
9136 return IOCB_SUCCESS;
9141 pring->stats.iocb_cmd_delay++;
9145 if (!(flag & SLI_IOCB_RET_IOCB)) {
9146 __lpfc_sli_ringtx_put(phba, pring, piocb);
9147 return IOCB_SUCCESS;
9154 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
9155 * @phba: Pointer to HBA context object.
9156 * @piocb: Pointer to command iocb.
9157 * @sglq: Pointer to the scatter gather queue object.
9159 * This routine converts the bpl or bde that is in the IOCB
9160 * to a sgl list for the sli4 hardware. The physical address
9161 * of the bpl/bde is converted back to a virtual address.
9162 * If the IOCB contains a BPL then the list of BDE's is
9163 * converted to sli4_sge's. If the IOCB contains a single
9164 * BDE then it is converted to a single sli_sge.
9165 * The IOCB is still in cpu endianess so the contents of
9166 * the bpl can be used without byte swapping.
9168 * Returns valid XRI = Success, NO_XRI = Failure.
9171 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
9172 struct lpfc_sglq *sglq)
9174 uint16_t xritag = NO_XRI;
9175 struct ulp_bde64 *bpl = NULL;
9176 struct ulp_bde64 bde;
9177 struct sli4_sge *sgl = NULL;
9178 struct lpfc_dmabuf *dmabuf;
9182 uint32_t offset = 0; /* accumulated offset in the sg request list */
9183 int inbound = 0; /* number of sg reply entries inbound from firmware */
9185 if (!piocbq || !sglq)
9188 sgl = (struct sli4_sge *)sglq->sgl;
9189 icmd = &piocbq->iocb;
9190 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
9191 return sglq->sli4_xritag;
9192 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9193 numBdes = icmd->un.genreq64.bdl.bdeSize /
9194 sizeof(struct ulp_bde64);
9195 /* The addrHigh and addrLow fields within the IOCB
9196 * have not been byteswapped yet so there is no
9197 * need to swap them back.
9199 if (piocbq->context3)
9200 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
9204 bpl = (struct ulp_bde64 *)dmabuf->virt;
9208 for (i = 0; i < numBdes; i++) {
9209 /* Should already be byte swapped. */
9210 sgl->addr_hi = bpl->addrHigh;
9211 sgl->addr_lo = bpl->addrLow;
9213 sgl->word2 = le32_to_cpu(sgl->word2);
9214 if ((i+1) == numBdes)
9215 bf_set(lpfc_sli4_sge_last, sgl, 1);
9217 bf_set(lpfc_sli4_sge_last, sgl, 0);
9218 /* swap the size field back to the cpu so we
9219 * can assign it to the sgl.
9221 bde.tus.w = le32_to_cpu(bpl->tus.w);
9222 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
9223 /* The offsets in the sgl need to be accumulated
9224 * separately for the request and reply lists.
9225 * The request is always first, the reply follows.
9227 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
9228 /* add up the reply sg entries */
9229 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
9231 /* first inbound? reset the offset */
9234 bf_set(lpfc_sli4_sge_offset, sgl, offset);
9235 bf_set(lpfc_sli4_sge_type, sgl,
9236 LPFC_SGE_TYPE_DATA);
9237 offset += bde.tus.f.bdeSize;
9239 sgl->word2 = cpu_to_le32(sgl->word2);
9243 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
9244 /* The addrHigh and addrLow fields of the BDE have not
9245 * been byteswapped yet so they need to be swapped
9246 * before putting them in the sgl.
9249 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
9251 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
9252 sgl->word2 = le32_to_cpu(sgl->word2);
9253 bf_set(lpfc_sli4_sge_last, sgl, 1);
9254 sgl->word2 = cpu_to_le32(sgl->word2);
9256 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
9258 return sglq->sli4_xritag;
9262 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
9263 * @phba: Pointer to HBA context object.
9264 * @piocb: Pointer to command iocb.
9265 * @wqe: Pointer to the work queue entry.
9267 * This routine converts the iocb command to its Work Queue Entry
9268 * equivalent. The wqe pointer should not have any fields set when
9269 * this routine is called because it will memcpy over them.
9270 * This routine does not set the CQ_ID or the WQEC bits in the
9273 * Returns: 0 = Success, IOCB_ERROR = Failure.
9276 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
9277 union lpfc_wqe128 *wqe)
9279 uint32_t xmit_len = 0, total_len = 0;
9283 uint8_t command_type = ELS_COMMAND_NON_FIP;
9286 uint16_t abrt_iotag;
9287 struct lpfc_iocbq *abrtiocbq;
9288 struct ulp_bde64 *bpl = NULL;
9289 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
9291 struct ulp_bde64 bde;
9292 struct lpfc_nodelist *ndlp;
9296 fip = phba->hba_flag & HBA_FIP_SUPPORT;
9297 /* The fcp commands will set command type */
9298 if (iocbq->iocb_flag & LPFC_IO_FCP)
9299 command_type = FCP_COMMAND;
9300 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
9301 command_type = ELS_COMMAND_FIP;
9303 command_type = ELS_COMMAND_NON_FIP;
9305 if (phba->fcp_embed_io)
9306 memset(wqe, 0, sizeof(union lpfc_wqe128));
9307 /* Some of the fields are in the right position already */
9308 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
9309 /* The ct field has moved so reset */
9310 wqe->generic.wqe_com.word7 = 0;
9311 wqe->generic.wqe_com.word10 = 0;
9313 abort_tag = (uint32_t) iocbq->iotag;
9314 xritag = iocbq->sli4_xritag;
9315 /* words0-2 bpl convert bde */
9316 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9317 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9318 sizeof(struct ulp_bde64);
9319 bpl = (struct ulp_bde64 *)
9320 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
9324 /* Should already be byte swapped. */
9325 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
9326 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
9327 /* swap the size field back to the cpu so we
9328 * can assign it to the sgl.
9330 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
9331 xmit_len = wqe->generic.bde.tus.f.bdeSize;
9333 for (i = 0; i < numBdes; i++) {
9334 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9335 total_len += bde.tus.f.bdeSize;
9338 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
9340 iocbq->iocb.ulpIoTag = iocbq->iotag;
9341 cmnd = iocbq->iocb.ulpCommand;
9343 switch (iocbq->iocb.ulpCommand) {
9344 case CMD_ELS_REQUEST64_CR:
9345 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
9346 ndlp = iocbq->context_un.ndlp;
9348 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9349 if (!iocbq->iocb.ulpLe) {
9350 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9351 "2007 Only Limited Edition cmd Format"
9352 " supported 0x%x\n",
9353 iocbq->iocb.ulpCommand);
9357 wqe->els_req.payload_len = xmit_len;
9358 /* Els_reguest64 has a TMO */
9359 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
9360 iocbq->iocb.ulpTimeout);
9361 /* Need a VF for word 4 set the vf bit*/
9362 bf_set(els_req64_vf, &wqe->els_req, 0);
9363 /* And a VFID for word 12 */
9364 bf_set(els_req64_vfid, &wqe->els_req, 0);
9365 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9366 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9367 iocbq->iocb.ulpContext);
9368 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
9369 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
9370 /* CCP CCPE PV PRI in word10 were set in the memcpy */
9371 if (command_type == ELS_COMMAND_FIP)
9372 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
9373 >> LPFC_FIP_ELS_ID_SHIFT);
9374 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9375 iocbq->context2)->virt);
9376 if_type = bf_get(lpfc_sli_intf_if_type,
9377 &phba->sli4_hba.sli_intf);
9378 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9379 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
9380 *pcmd == ELS_CMD_SCR ||
9381 *pcmd == ELS_CMD_RSCN_XMT ||
9382 *pcmd == ELS_CMD_FDISC ||
9383 *pcmd == ELS_CMD_LOGO ||
9384 *pcmd == ELS_CMD_PLOGI)) {
9385 bf_set(els_req64_sp, &wqe->els_req, 1);
9386 bf_set(els_req64_sid, &wqe->els_req,
9387 iocbq->vport->fc_myDID);
9388 if ((*pcmd == ELS_CMD_FLOGI) &&
9389 !(phba->fc_topology ==
9390 LPFC_TOPOLOGY_LOOP))
9391 bf_set(els_req64_sid, &wqe->els_req, 0);
9392 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
9393 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9394 phba->vpi_ids[iocbq->vport->vpi]);
9395 } else if (pcmd && iocbq->context1) {
9396 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
9397 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9398 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9401 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
9402 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9403 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
9404 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
9405 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
9406 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
9407 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9408 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
9409 wqe->els_req.max_response_payload_len = total_len - xmit_len;
9411 case CMD_XMIT_SEQUENCE64_CX:
9412 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
9413 iocbq->iocb.un.ulpWord[3]);
9414 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
9415 iocbq->iocb.unsli3.rcvsli3.ox_id);
9416 /* The entire sequence is transmitted for this IOCB */
9417 xmit_len = total_len;
9418 cmnd = CMD_XMIT_SEQUENCE64_CR;
9419 if (phba->link_flag & LS_LOOPBACK_MODE)
9420 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
9422 case CMD_XMIT_SEQUENCE64_CR:
9423 /* word3 iocb=io_tag32 wqe=reserved */
9424 wqe->xmit_sequence.rsvd3 = 0;
9425 /* word4 relative_offset memcpy */
9426 /* word5 r_ctl/df_ctl memcpy */
9427 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
9428 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
9429 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
9430 LPFC_WQE_IOD_WRITE);
9431 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
9432 LPFC_WQE_LENLOC_WORD12);
9433 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
9434 wqe->xmit_sequence.xmit_len = xmit_len;
9435 command_type = OTHER_COMMAND;
9437 case CMD_XMIT_BCAST64_CN:
9438 /* word3 iocb=iotag32 wqe=seq_payload_len */
9439 wqe->xmit_bcast64.seq_payload_len = xmit_len;
9440 /* word4 iocb=rsvd wqe=rsvd */
9441 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
9442 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
9443 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
9444 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9445 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
9446 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
9447 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
9448 LPFC_WQE_LENLOC_WORD3);
9449 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
9451 case CMD_FCP_IWRITE64_CR:
9452 command_type = FCP_COMMAND_DATA_OUT;
9453 /* word3 iocb=iotag wqe=payload_offset_len */
9454 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9455 bf_set(payload_offset_len, &wqe->fcp_iwrite,
9456 xmit_len + sizeof(struct fcp_rsp));
9457 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
9459 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9460 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9461 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
9462 iocbq->iocb.ulpFCP2Rcvy);
9463 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
9464 /* Always open the exchange */
9465 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
9466 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
9467 LPFC_WQE_LENLOC_WORD4);
9468 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
9469 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
9470 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9471 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
9472 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
9473 if (iocbq->priority) {
9474 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9475 (iocbq->priority << 1));
9477 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9478 (phba->cfg_XLanePriority << 1));
9481 /* Note, word 10 is already initialized to 0 */
9483 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9484 if (phba->cfg_enable_pbde)
9485 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
9487 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
9489 if (phba->fcp_embed_io) {
9490 struct lpfc_io_buf *lpfc_cmd;
9491 struct sli4_sge *sgl;
9492 struct fcp_cmnd *fcp_cmnd;
9495 /* 128 byte wqe support here */
9497 lpfc_cmd = iocbq->context1;
9498 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9499 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9501 /* Word 0-2 - FCP_CMND */
9502 wqe->generic.bde.tus.f.bdeFlags =
9503 BUFF_TYPE_BDE_IMMED;
9504 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9505 wqe->generic.bde.addrHigh = 0;
9506 wqe->generic.bde.addrLow = 88; /* Word 22 */
9508 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
9509 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
9511 /* Word 22-29 FCP CMND Payload */
9512 ptr = &wqe->words[22];
9513 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9516 case CMD_FCP_IREAD64_CR:
9517 /* word3 iocb=iotag wqe=payload_offset_len */
9518 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9519 bf_set(payload_offset_len, &wqe->fcp_iread,
9520 xmit_len + sizeof(struct fcp_rsp));
9521 bf_set(cmd_buff_len, &wqe->fcp_iread,
9523 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9524 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9525 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
9526 iocbq->iocb.ulpFCP2Rcvy);
9527 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
9528 /* Always open the exchange */
9529 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
9530 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
9531 LPFC_WQE_LENLOC_WORD4);
9532 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
9533 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
9534 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9535 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
9536 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
9537 if (iocbq->priority) {
9538 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9539 (iocbq->priority << 1));
9541 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9542 (phba->cfg_XLanePriority << 1));
9545 /* Note, word 10 is already initialized to 0 */
9547 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9548 if (phba->cfg_enable_pbde)
9549 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
9551 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
9553 if (phba->fcp_embed_io) {
9554 struct lpfc_io_buf *lpfc_cmd;
9555 struct sli4_sge *sgl;
9556 struct fcp_cmnd *fcp_cmnd;
9559 /* 128 byte wqe support here */
9561 lpfc_cmd = iocbq->context1;
9562 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9563 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9565 /* Word 0-2 - FCP_CMND */
9566 wqe->generic.bde.tus.f.bdeFlags =
9567 BUFF_TYPE_BDE_IMMED;
9568 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9569 wqe->generic.bde.addrHigh = 0;
9570 wqe->generic.bde.addrLow = 88; /* Word 22 */
9572 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
9573 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
9575 /* Word 22-29 FCP CMND Payload */
9576 ptr = &wqe->words[22];
9577 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9580 case CMD_FCP_ICMND64_CR:
9581 /* word3 iocb=iotag wqe=payload_offset_len */
9582 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9583 bf_set(payload_offset_len, &wqe->fcp_icmd,
9584 xmit_len + sizeof(struct fcp_rsp));
9585 bf_set(cmd_buff_len, &wqe->fcp_icmd,
9587 /* word3 iocb=IO_TAG wqe=reserved */
9588 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
9589 /* Always open the exchange */
9590 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
9591 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
9592 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
9593 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
9594 LPFC_WQE_LENLOC_NONE);
9595 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
9596 iocbq->iocb.ulpFCP2Rcvy);
9597 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9598 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
9599 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
9600 if (iocbq->priority) {
9601 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9602 (iocbq->priority << 1));
9604 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9605 (phba->cfg_XLanePriority << 1));
9608 /* Note, word 10 is already initialized to 0 */
9610 if (phba->fcp_embed_io) {
9611 struct lpfc_io_buf *lpfc_cmd;
9612 struct sli4_sge *sgl;
9613 struct fcp_cmnd *fcp_cmnd;
9616 /* 128 byte wqe support here */
9618 lpfc_cmd = iocbq->context1;
9619 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9620 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9622 /* Word 0-2 - FCP_CMND */
9623 wqe->generic.bde.tus.f.bdeFlags =
9624 BUFF_TYPE_BDE_IMMED;
9625 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9626 wqe->generic.bde.addrHigh = 0;
9627 wqe->generic.bde.addrLow = 88; /* Word 22 */
9629 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
9630 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
9632 /* Word 22-29 FCP CMND Payload */
9633 ptr = &wqe->words[22];
9634 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9637 case CMD_GEN_REQUEST64_CR:
9638 /* For this command calculate the xmit length of the
9642 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9643 sizeof(struct ulp_bde64);
9644 for (i = 0; i < numBdes; i++) {
9645 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9646 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
9648 xmit_len += bde.tus.f.bdeSize;
9650 /* word3 iocb=IO_TAG wqe=request_payload_len */
9651 wqe->gen_req.request_payload_len = xmit_len;
9652 /* word4 iocb=parameter wqe=relative_offset memcpy */
9653 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
9654 /* word6 context tag copied in memcpy */
9655 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
9656 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9657 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9658 "2015 Invalid CT %x command 0x%x\n",
9659 ct, iocbq->iocb.ulpCommand);
9662 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
9663 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
9664 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
9665 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
9666 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
9667 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
9668 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9669 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
9670 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
9671 command_type = OTHER_COMMAND;
9673 case CMD_XMIT_ELS_RSP64_CX:
9674 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9675 /* words0-2 BDE memcpy */
9676 /* word3 iocb=iotag32 wqe=response_payload_len */
9677 wqe->xmit_els_rsp.response_payload_len = xmit_len;
9679 wqe->xmit_els_rsp.word4 = 0;
9680 /* word5 iocb=rsvd wge=did */
9681 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
9682 iocbq->iocb.un.xseq64.xmit_els_remoteID);
9684 if_type = bf_get(lpfc_sli_intf_if_type,
9685 &phba->sli4_hba.sli_intf);
9686 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9687 if (iocbq->vport->fc_flag & FC_PT2PT) {
9688 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9689 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9690 iocbq->vport->fc_myDID);
9691 if (iocbq->vport->fc_myDID == Fabric_DID) {
9693 &wqe->xmit_els_rsp.wqe_dest, 0);
9697 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
9698 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9699 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
9700 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
9701 iocbq->iocb.unsli3.rcvsli3.ox_id);
9702 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
9703 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9704 phba->vpi_ids[iocbq->vport->vpi]);
9705 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
9706 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
9707 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
9708 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
9709 LPFC_WQE_LENLOC_WORD3);
9710 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
9711 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
9712 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9713 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9714 iocbq->context2)->virt);
9715 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
9716 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9717 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9718 iocbq->vport->fc_myDID);
9719 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
9720 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9721 phba->vpi_ids[phba->pport->vpi]);
9723 command_type = OTHER_COMMAND;
9725 case CMD_CLOSE_XRI_CN:
9726 case CMD_ABORT_XRI_CN:
9727 case CMD_ABORT_XRI_CX:
9728 /* words 0-2 memcpy should be 0 rserved */
9729 /* port will send abts */
9730 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
9731 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
9732 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
9733 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
9737 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
9739 * The link is down, or the command was ELS_FIP
9740 * so the fw does not need to send abts
9743 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
9745 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
9746 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
9747 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
9748 wqe->abort_cmd.rsrvd5 = 0;
9749 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
9750 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9751 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
9753 * The abort handler will send us CMD_ABORT_XRI_CN or
9754 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
9756 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
9757 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
9758 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
9759 LPFC_WQE_LENLOC_NONE);
9760 cmnd = CMD_ABORT_XRI_CX;
9761 command_type = OTHER_COMMAND;
9764 case CMD_XMIT_BLS_RSP64_CX:
9765 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9766 /* As BLS ABTS RSP WQE is very different from other WQEs,
9767 * we re-construct this WQE here based on information in
9768 * iocbq from scratch.
9770 memset(wqe, 0, sizeof(*wqe));
9771 /* OX_ID is invariable to who sent ABTS to CT exchange */
9772 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
9773 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
9774 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
9775 LPFC_ABTS_UNSOL_INT) {
9776 /* ABTS sent by initiator to CT exchange, the
9777 * RX_ID field will be filled with the newly
9778 * allocated responder XRI.
9780 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9781 iocbq->sli4_xritag);
9783 /* ABTS sent by responder to CT exchange, the
9784 * RX_ID field will be filled with the responder
9787 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9788 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
9790 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
9791 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
9794 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
9796 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
9797 iocbq->iocb.ulpContext);
9798 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
9799 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
9800 phba->vpi_ids[phba->pport->vpi]);
9801 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
9802 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
9803 LPFC_WQE_LENLOC_NONE);
9804 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
9805 command_type = OTHER_COMMAND;
9806 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
9807 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
9808 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
9809 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
9810 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
9811 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
9812 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
9816 case CMD_SEND_FRAME:
9817 bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_SEND_FRAME);
9818 bf_set(wqe_sof, &wqe->generic.wqe_com, 0x2E); /* SOF byte */
9819 bf_set(wqe_eof, &wqe->generic.wqe_com, 0x41); /* EOF byte */
9820 bf_set(wqe_lenloc, &wqe->generic.wqe_com, 1);
9821 bf_set(wqe_xbl, &wqe->generic.wqe_com, 1);
9822 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
9823 bf_set(wqe_xc, &wqe->generic.wqe_com, 1);
9824 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 0xA);
9825 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
9826 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9827 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9829 case CMD_XRI_ABORTED_CX:
9830 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
9831 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
9832 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
9833 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
9834 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
9836 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9837 "2014 Invalid command 0x%x\n",
9838 iocbq->iocb.ulpCommand);
9843 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
9844 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
9845 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
9846 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
9847 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
9848 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
9849 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
9850 LPFC_IO_DIF_INSERT);
9851 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9852 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9853 wqe->generic.wqe_com.abort_tag = abort_tag;
9854 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
9855 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
9856 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
9857 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
9862 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
9863 * @phba: Pointer to HBA context object.
9864 * @ring_number: SLI ring number to issue iocb on.
9865 * @piocb: Pointer to command iocb.
9866 * @flag: Flag indicating if this command can be put into txq.
9868 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
9869 * an iocb command to an HBA with SLI-4 interface spec.
9871 * This function is called with hbalock held. The function will return success
9872 * after it successfully submit the iocb to firmware or after adding to the
9876 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
9877 struct lpfc_iocbq *piocb, uint32_t flag)
9879 struct lpfc_sglq *sglq;
9880 union lpfc_wqe128 wqe;
9881 struct lpfc_queue *wq;
9882 struct lpfc_sli_ring *pring;
9885 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
9886 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
9887 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
9889 wq = phba->sli4_hba.els_wq;
9892 /* Get corresponding ring */
9896 * The WQE can be either 64 or 128 bytes,
9899 lockdep_assert_held(&pring->ring_lock);
9901 if (piocb->sli4_xritag == NO_XRI) {
9902 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
9903 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
9906 if (!list_empty(&pring->txq)) {
9907 if (!(flag & SLI_IOCB_RET_IOCB)) {
9908 __lpfc_sli_ringtx_put(phba,
9910 return IOCB_SUCCESS;
9915 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
9917 if (!(flag & SLI_IOCB_RET_IOCB)) {
9918 __lpfc_sli_ringtx_put(phba,
9921 return IOCB_SUCCESS;
9927 } else if (piocb->iocb_flag & LPFC_IO_FCP)
9928 /* These IO's already have an XRI and a mapped sgl. */
9932 * This is a continuation of a commandi,(CX) so this
9933 * sglq is on the active list
9935 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
9941 piocb->sli4_lxritag = sglq->sli4_lxritag;
9942 piocb->sli4_xritag = sglq->sli4_xritag;
9943 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
9947 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
9950 if (lpfc_sli4_wq_put(wq, &wqe))
9952 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
9958 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
9960 * This routine wraps the actual lockless version for issusing IOCB function
9961 * pointer from the lpfc_hba struct.
9964 * IOCB_ERROR - Error
9965 * IOCB_SUCCESS - Success
9969 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
9970 struct lpfc_iocbq *piocb, uint32_t flag)
9972 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9976 * lpfc_sli_api_table_setup - Set up sli api function jump table
9977 * @phba: The hba struct for which this call is being executed.
9978 * @dev_grp: The HBA PCI-Device group number.
9980 * This routine sets up the SLI interface API function jump table in @phba
9982 * Returns: 0 - success, -ENODEV - failure.
9985 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
9989 case LPFC_PCI_DEV_LP:
9990 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
9991 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
9993 case LPFC_PCI_DEV_OC:
9994 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
9995 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
9998 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9999 "1419 Invalid HBA PCI-device group: 0x%x\n",
10004 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
10009 * lpfc_sli4_calc_ring - Calculates which ring to use
10010 * @phba: Pointer to HBA context object.
10011 * @piocb: Pointer to command iocb.
10013 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
10014 * hba_wqidx, thus we need to calculate the corresponding ring.
10015 * Since ABORTS must go on the same WQ of the command they are
10016 * aborting, we use command's hba_wqidx.
10018 struct lpfc_sli_ring *
10019 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
10021 struct lpfc_io_buf *lpfc_cmd;
10023 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
10024 if (unlikely(!phba->sli4_hba.hdwq))
10027 * for abort iocb hba_wqidx should already
10028 * be setup based on what work queue we used.
10030 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
10031 lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
10032 piocb->hba_wqidx = lpfc_cmd->hdwq_no;
10034 return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
10036 if (unlikely(!phba->sli4_hba.els_wq))
10038 piocb->hba_wqidx = 0;
10039 return phba->sli4_hba.els_wq->pring;
10044 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
10045 * @phba: Pointer to HBA context object.
10046 * @pring: Pointer to driver SLI ring object.
10047 * @piocb: Pointer to command iocb.
10048 * @flag: Flag indicating if this command can be put into txq.
10050 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
10051 * function. This function gets the hbalock and calls
10052 * __lpfc_sli_issue_iocb function and will return the error returned
10053 * by __lpfc_sli_issue_iocb function. This wrapper is used by
10054 * functions which do not hold hbalock.
10057 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10058 struct lpfc_iocbq *piocb, uint32_t flag)
10060 struct lpfc_sli_ring *pring;
10061 struct lpfc_queue *eq;
10062 unsigned long iflags;
10065 if (phba->sli_rev == LPFC_SLI_REV4) {
10066 eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
10068 pring = lpfc_sli4_calc_ring(phba, piocb);
10069 if (unlikely(pring == NULL))
10072 spin_lock_irqsave(&pring->ring_lock, iflags);
10073 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10074 spin_unlock_irqrestore(&pring->ring_lock, iflags);
10076 lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
10078 /* For now, SLI2/3 will still use hbalock */
10079 spin_lock_irqsave(&phba->hbalock, iflags);
10080 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10081 spin_unlock_irqrestore(&phba->hbalock, iflags);
10087 * lpfc_extra_ring_setup - Extra ring setup function
10088 * @phba: Pointer to HBA context object.
10090 * This function is called while driver attaches with the
10091 * HBA to setup the extra ring. The extra ring is used
10092 * only when driver needs to support target mode functionality
10093 * or IP over FC functionalities.
10095 * This function is called with no lock held. SLI3 only.
10098 lpfc_extra_ring_setup( struct lpfc_hba *phba)
10100 struct lpfc_sli *psli;
10101 struct lpfc_sli_ring *pring;
10105 /* Adjust cmd/rsp ring iocb entries more evenly */
10107 /* Take some away from the FCP ring */
10108 pring = &psli->sli3_ring[LPFC_FCP_RING];
10109 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10110 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10111 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10112 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10114 /* and give them to the extra ring */
10115 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
10117 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10118 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10119 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10120 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10122 /* Setup default profile for this ring */
10123 pring->iotag_max = 4096;
10124 pring->num_mask = 1;
10125 pring->prt[0].profile = 0; /* Mask 0 */
10126 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
10127 pring->prt[0].type = phba->cfg_multi_ring_type;
10128 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
10132 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
10133 * @phba: Pointer to HBA context object.
10134 * @iocbq: Pointer to iocb object.
10136 * The async_event handler calls this routine when it receives
10137 * an ASYNC_STATUS_CN event from the port. The port generates
10138 * this event when an Abort Sequence request to an rport fails
10139 * twice in succession. The abort could be originated by the
10140 * driver or by the port. The ABTS could have been for an ELS
10141 * or FCP IO. The port only generates this event when an ABTS
10142 * fails to complete after one retry.
10145 lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
10146 struct lpfc_iocbq *iocbq)
10148 struct lpfc_nodelist *ndlp = NULL;
10149 uint16_t rpi = 0, vpi = 0;
10150 struct lpfc_vport *vport = NULL;
10152 /* The rpi in the ulpContext is vport-sensitive. */
10153 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
10154 rpi = iocbq->iocb.ulpContext;
10156 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10157 "3092 Port generated ABTS async event "
10158 "on vpi %d rpi %d status 0x%x\n",
10159 vpi, rpi, iocbq->iocb.ulpStatus);
10161 vport = lpfc_find_vport_by_vpid(phba, vpi);
10164 ndlp = lpfc_findnode_rpi(vport, rpi);
10165 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
10168 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
10169 lpfc_sli_abts_recover_port(vport, ndlp);
10173 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10174 "3095 Event Context not found, no "
10175 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
10176 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
10180 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
10181 * @phba: pointer to HBA context object.
10182 * @ndlp: nodelist pointer for the impacted rport.
10183 * @axri: pointer to the wcqe containing the failed exchange.
10185 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
10186 * port. The port generates this event when an abort exchange request to an
10187 * rport fails twice in succession with no reply. The abort could be originated
10188 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
10191 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
10192 struct lpfc_nodelist *ndlp,
10193 struct sli4_wcqe_xri_aborted *axri)
10195 struct lpfc_vport *vport;
10196 uint32_t ext_status = 0;
10198 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
10199 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10200 "3115 Node Context not found, driver "
10201 "ignoring abts err event\n");
10205 vport = ndlp->vport;
10206 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10207 "3116 Port generated FCP XRI ABORT event on "
10208 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
10209 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
10210 bf_get(lpfc_wcqe_xa_xri, axri),
10211 bf_get(lpfc_wcqe_xa_status, axri),
10215 * Catch the ABTS protocol failure case. Older OCe FW releases returned
10216 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
10217 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
10219 ext_status = axri->parameter & IOERR_PARAM_MASK;
10220 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
10221 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
10222 lpfc_sli_abts_recover_port(vport, ndlp);
10226 * lpfc_sli_async_event_handler - ASYNC iocb handler function
10227 * @phba: Pointer to HBA context object.
10228 * @pring: Pointer to driver SLI ring object.
10229 * @iocbq: Pointer to iocb object.
10231 * This function is called by the slow ring event handler
10232 * function when there is an ASYNC event iocb in the ring.
10233 * This function is called with no lock held.
10234 * Currently this function handles only temperature related
10235 * ASYNC events. The function decodes the temperature sensor
10236 * event message and posts events for the management applications.
10239 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
10240 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
10244 struct temp_event temp_event_data;
10245 struct Scsi_Host *shost;
10248 icmd = &iocbq->iocb;
10249 evt_code = icmd->un.asyncstat.evt_code;
10251 switch (evt_code) {
10252 case ASYNC_TEMP_WARN:
10253 case ASYNC_TEMP_SAFE:
10254 temp_event_data.data = (uint32_t) icmd->ulpContext;
10255 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
10256 if (evt_code == ASYNC_TEMP_WARN) {
10257 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
10258 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10259 "0347 Adapter is very hot, please take "
10260 "corrective action. temperature : %d Celsius\n",
10261 (uint32_t) icmd->ulpContext);
10263 temp_event_data.event_code = LPFC_NORMAL_TEMP;
10264 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10265 "0340 Adapter temperature is OK now. "
10266 "temperature : %d Celsius\n",
10267 (uint32_t) icmd->ulpContext);
10270 /* Send temperature change event to applications */
10271 shost = lpfc_shost_from_vport(phba->pport);
10272 fc_host_post_vendor_event(shost, fc_get_event_number(),
10273 sizeof(temp_event_data), (char *) &temp_event_data,
10274 LPFC_NL_VENDOR_ID);
10276 case ASYNC_STATUS_CN:
10277 lpfc_sli_abts_err_handler(phba, iocbq);
10280 iocb_w = (uint32_t *) icmd;
10281 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10282 "0346 Ring %d handler: unexpected ASYNC_STATUS"
10284 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
10285 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
10286 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
10287 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
10288 pring->ringno, icmd->un.asyncstat.evt_code,
10289 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
10290 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
10291 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
10292 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
10300 * lpfc_sli4_setup - SLI ring setup function
10301 * @phba: Pointer to HBA context object.
10303 * lpfc_sli_setup sets up rings of the SLI interface with
10304 * number of iocbs per ring and iotags. This function is
10305 * called while driver attach to the HBA and before the
10306 * interrupts are enabled. So there is no need for locking.
10308 * This function always returns 0.
10311 lpfc_sli4_setup(struct lpfc_hba *phba)
10313 struct lpfc_sli_ring *pring;
10315 pring = phba->sli4_hba.els_wq->pring;
10316 pring->num_mask = LPFC_MAX_RING_MASK;
10317 pring->prt[0].profile = 0; /* Mask 0 */
10318 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10319 pring->prt[0].type = FC_TYPE_ELS;
10320 pring->prt[0].lpfc_sli_rcv_unsol_event =
10321 lpfc_els_unsol_event;
10322 pring->prt[1].profile = 0; /* Mask 1 */
10323 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10324 pring->prt[1].type = FC_TYPE_ELS;
10325 pring->prt[1].lpfc_sli_rcv_unsol_event =
10326 lpfc_els_unsol_event;
10327 pring->prt[2].profile = 0; /* Mask 2 */
10328 /* NameServer Inquiry */
10329 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10331 pring->prt[2].type = FC_TYPE_CT;
10332 pring->prt[2].lpfc_sli_rcv_unsol_event =
10333 lpfc_ct_unsol_event;
10334 pring->prt[3].profile = 0; /* Mask 3 */
10335 /* NameServer response */
10336 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10338 pring->prt[3].type = FC_TYPE_CT;
10339 pring->prt[3].lpfc_sli_rcv_unsol_event =
10340 lpfc_ct_unsol_event;
10345 * lpfc_sli_setup - SLI ring setup function
10346 * @phba: Pointer to HBA context object.
10348 * lpfc_sli_setup sets up rings of the SLI interface with
10349 * number of iocbs per ring and iotags. This function is
10350 * called while driver attach to the HBA and before the
10351 * interrupts are enabled. So there is no need for locking.
10353 * This function always returns 0. SLI3 only.
10356 lpfc_sli_setup(struct lpfc_hba *phba)
10358 int i, totiocbsize = 0;
10359 struct lpfc_sli *psli = &phba->sli;
10360 struct lpfc_sli_ring *pring;
10362 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
10363 psli->sli_flag = 0;
10365 psli->iocbq_lookup = NULL;
10366 psli->iocbq_lookup_len = 0;
10367 psli->last_iotag = 0;
10369 for (i = 0; i < psli->num_rings; i++) {
10370 pring = &psli->sli3_ring[i];
10372 case LPFC_FCP_RING: /* ring 0 - FCP */
10373 /* numCiocb and numRiocb are used in config_port */
10374 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
10375 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
10376 pring->sli.sli3.numCiocb +=
10377 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10378 pring->sli.sli3.numRiocb +=
10379 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10380 pring->sli.sli3.numCiocb +=
10381 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10382 pring->sli.sli3.numRiocb +=
10383 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10384 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10385 SLI3_IOCB_CMD_SIZE :
10386 SLI2_IOCB_CMD_SIZE;
10387 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10388 SLI3_IOCB_RSP_SIZE :
10389 SLI2_IOCB_RSP_SIZE;
10390 pring->iotag_ctr = 0;
10392 (phba->cfg_hba_queue_depth * 2);
10393 pring->fast_iotag = pring->iotag_max;
10394 pring->num_mask = 0;
10396 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
10397 /* numCiocb and numRiocb are used in config_port */
10398 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
10399 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
10400 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10401 SLI3_IOCB_CMD_SIZE :
10402 SLI2_IOCB_CMD_SIZE;
10403 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10404 SLI3_IOCB_RSP_SIZE :
10405 SLI2_IOCB_RSP_SIZE;
10406 pring->iotag_max = phba->cfg_hba_queue_depth;
10407 pring->num_mask = 0;
10409 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
10410 /* numCiocb and numRiocb are used in config_port */
10411 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
10412 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
10413 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10414 SLI3_IOCB_CMD_SIZE :
10415 SLI2_IOCB_CMD_SIZE;
10416 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10417 SLI3_IOCB_RSP_SIZE :
10418 SLI2_IOCB_RSP_SIZE;
10419 pring->fast_iotag = 0;
10420 pring->iotag_ctr = 0;
10421 pring->iotag_max = 4096;
10422 pring->lpfc_sli_rcv_async_status =
10423 lpfc_sli_async_event_handler;
10424 pring->num_mask = LPFC_MAX_RING_MASK;
10425 pring->prt[0].profile = 0; /* Mask 0 */
10426 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10427 pring->prt[0].type = FC_TYPE_ELS;
10428 pring->prt[0].lpfc_sli_rcv_unsol_event =
10429 lpfc_els_unsol_event;
10430 pring->prt[1].profile = 0; /* Mask 1 */
10431 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10432 pring->prt[1].type = FC_TYPE_ELS;
10433 pring->prt[1].lpfc_sli_rcv_unsol_event =
10434 lpfc_els_unsol_event;
10435 pring->prt[2].profile = 0; /* Mask 2 */
10436 /* NameServer Inquiry */
10437 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10439 pring->prt[2].type = FC_TYPE_CT;
10440 pring->prt[2].lpfc_sli_rcv_unsol_event =
10441 lpfc_ct_unsol_event;
10442 pring->prt[3].profile = 0; /* Mask 3 */
10443 /* NameServer response */
10444 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10446 pring->prt[3].type = FC_TYPE_CT;
10447 pring->prt[3].lpfc_sli_rcv_unsol_event =
10448 lpfc_ct_unsol_event;
10451 totiocbsize += (pring->sli.sli3.numCiocb *
10452 pring->sli.sli3.sizeCiocb) +
10453 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
10455 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
10456 /* Too many cmd / rsp ring entries in SLI2 SLIM */
10457 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
10458 "SLI2 SLIM Data: x%x x%lx\n",
10459 phba->brd_no, totiocbsize,
10460 (unsigned long) MAX_SLIM_IOCB_SIZE);
10462 if (phba->cfg_multi_ring_support == 2)
10463 lpfc_extra_ring_setup(phba);
10469 * lpfc_sli4_queue_init - Queue initialization function
10470 * @phba: Pointer to HBA context object.
10472 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
10473 * ring. This function also initializes ring indices of each ring.
10474 * This function is called during the initialization of the SLI
10475 * interface of an HBA.
10476 * This function is called with no lock held and always returns
10480 lpfc_sli4_queue_init(struct lpfc_hba *phba)
10482 struct lpfc_sli *psli;
10483 struct lpfc_sli_ring *pring;
10487 spin_lock_irq(&phba->hbalock);
10488 INIT_LIST_HEAD(&psli->mboxq);
10489 INIT_LIST_HEAD(&psli->mboxq_cmpl);
10490 /* Initialize list headers for txq and txcmplq as double linked lists */
10491 for (i = 0; i < phba->cfg_hdw_queue; i++) {
10492 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
10494 pring->ringno = LPFC_FCP_RING;
10495 pring->txcmplq_cnt = 0;
10496 INIT_LIST_HEAD(&pring->txq);
10497 INIT_LIST_HEAD(&pring->txcmplq);
10498 INIT_LIST_HEAD(&pring->iocb_continueq);
10499 spin_lock_init(&pring->ring_lock);
10501 pring = phba->sli4_hba.els_wq->pring;
10503 pring->ringno = LPFC_ELS_RING;
10504 pring->txcmplq_cnt = 0;
10505 INIT_LIST_HEAD(&pring->txq);
10506 INIT_LIST_HEAD(&pring->txcmplq);
10507 INIT_LIST_HEAD(&pring->iocb_continueq);
10508 spin_lock_init(&pring->ring_lock);
10510 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10511 pring = phba->sli4_hba.nvmels_wq->pring;
10513 pring->ringno = LPFC_ELS_RING;
10514 pring->txcmplq_cnt = 0;
10515 INIT_LIST_HEAD(&pring->txq);
10516 INIT_LIST_HEAD(&pring->txcmplq);
10517 INIT_LIST_HEAD(&pring->iocb_continueq);
10518 spin_lock_init(&pring->ring_lock);
10521 spin_unlock_irq(&phba->hbalock);
10525 * lpfc_sli_queue_init - Queue initialization function
10526 * @phba: Pointer to HBA context object.
10528 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
10529 * ring. This function also initializes ring indices of each ring.
10530 * This function is called during the initialization of the SLI
10531 * interface of an HBA.
10532 * This function is called with no lock held and always returns
10536 lpfc_sli_queue_init(struct lpfc_hba *phba)
10538 struct lpfc_sli *psli;
10539 struct lpfc_sli_ring *pring;
10543 spin_lock_irq(&phba->hbalock);
10544 INIT_LIST_HEAD(&psli->mboxq);
10545 INIT_LIST_HEAD(&psli->mboxq_cmpl);
10546 /* Initialize list headers for txq and txcmplq as double linked lists */
10547 for (i = 0; i < psli->num_rings; i++) {
10548 pring = &psli->sli3_ring[i];
10550 pring->sli.sli3.next_cmdidx = 0;
10551 pring->sli.sli3.local_getidx = 0;
10552 pring->sli.sli3.cmdidx = 0;
10553 INIT_LIST_HEAD(&pring->iocb_continueq);
10554 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
10555 INIT_LIST_HEAD(&pring->postbufq);
10557 INIT_LIST_HEAD(&pring->txq);
10558 INIT_LIST_HEAD(&pring->txcmplq);
10559 spin_lock_init(&pring->ring_lock);
10561 spin_unlock_irq(&phba->hbalock);
10565 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
10566 * @phba: Pointer to HBA context object.
10568 * This routine flushes the mailbox command subsystem. It will unconditionally
10569 * flush all the mailbox commands in the three possible stages in the mailbox
10570 * command sub-system: pending mailbox command queue; the outstanding mailbox
10571 * command; and completed mailbox command queue. It is caller's responsibility
10572 * to make sure that the driver is in the proper state to flush the mailbox
10573 * command sub-system. Namely, the posting of mailbox commands into the
10574 * pending mailbox command queue from the various clients must be stopped;
10575 * either the HBA is in a state that it will never works on the outstanding
10576 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
10577 * mailbox command has been completed.
10580 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
10582 LIST_HEAD(completions);
10583 struct lpfc_sli *psli = &phba->sli;
10585 unsigned long iflag;
10587 /* Disable softirqs, including timers from obtaining phba->hbalock */
10588 local_bh_disable();
10590 /* Flush all the mailbox commands in the mbox system */
10591 spin_lock_irqsave(&phba->hbalock, iflag);
10593 /* The pending mailbox command queue */
10594 list_splice_init(&phba->sli.mboxq, &completions);
10595 /* The outstanding active mailbox command */
10596 if (psli->mbox_active) {
10597 list_add_tail(&psli->mbox_active->list, &completions);
10598 psli->mbox_active = NULL;
10599 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10601 /* The completed mailbox command queue */
10602 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
10603 spin_unlock_irqrestore(&phba->hbalock, iflag);
10605 /* Enable softirqs again, done with phba->hbalock */
10608 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
10609 while (!list_empty(&completions)) {
10610 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
10611 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
10612 if (pmb->mbox_cmpl)
10613 pmb->mbox_cmpl(phba, pmb);
10618 * lpfc_sli_host_down - Vport cleanup function
10619 * @vport: Pointer to virtual port object.
10621 * lpfc_sli_host_down is called to clean up the resources
10622 * associated with a vport before destroying virtual
10623 * port data structures.
10624 * This function does following operations:
10625 * - Free discovery resources associated with this virtual
10627 * - Free iocbs associated with this virtual port in
10629 * - Send abort for all iocb commands associated with this
10630 * vport in txcmplq.
10632 * This function is called with no lock held and always returns 1.
10635 lpfc_sli_host_down(struct lpfc_vport *vport)
10637 LIST_HEAD(completions);
10638 struct lpfc_hba *phba = vport->phba;
10639 struct lpfc_sli *psli = &phba->sli;
10640 struct lpfc_queue *qp = NULL;
10641 struct lpfc_sli_ring *pring;
10642 struct lpfc_iocbq *iocb, *next_iocb;
10644 unsigned long flags = 0;
10645 uint16_t prev_pring_flag;
10647 lpfc_cleanup_discovery_resources(vport);
10649 spin_lock_irqsave(&phba->hbalock, flags);
10652 * Error everything on the txq since these iocbs
10653 * have not been given to the FW yet.
10654 * Also issue ABTS for everything on the txcmplq
10656 if (phba->sli_rev != LPFC_SLI_REV4) {
10657 for (i = 0; i < psli->num_rings; i++) {
10658 pring = &psli->sli3_ring[i];
10659 prev_pring_flag = pring->flag;
10660 /* Only slow rings */
10661 if (pring->ringno == LPFC_ELS_RING) {
10662 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10663 /* Set the lpfc data pending flag */
10664 set_bit(LPFC_DATA_READY, &phba->data_flags);
10666 list_for_each_entry_safe(iocb, next_iocb,
10667 &pring->txq, list) {
10668 if (iocb->vport != vport)
10670 list_move_tail(&iocb->list, &completions);
10672 list_for_each_entry_safe(iocb, next_iocb,
10673 &pring->txcmplq, list) {
10674 if (iocb->vport != vport)
10676 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10678 pring->flag = prev_pring_flag;
10681 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10685 if (pring == phba->sli4_hba.els_wq->pring) {
10686 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10687 /* Set the lpfc data pending flag */
10688 set_bit(LPFC_DATA_READY, &phba->data_flags);
10690 prev_pring_flag = pring->flag;
10691 spin_lock_irq(&pring->ring_lock);
10692 list_for_each_entry_safe(iocb, next_iocb,
10693 &pring->txq, list) {
10694 if (iocb->vport != vport)
10696 list_move_tail(&iocb->list, &completions);
10698 spin_unlock_irq(&pring->ring_lock);
10699 list_for_each_entry_safe(iocb, next_iocb,
10700 &pring->txcmplq, list) {
10701 if (iocb->vport != vport)
10703 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10705 pring->flag = prev_pring_flag;
10708 spin_unlock_irqrestore(&phba->hbalock, flags);
10710 /* Cancel all the IOCBs from the completions list */
10711 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10717 * lpfc_sli_hba_down - Resource cleanup function for the HBA
10718 * @phba: Pointer to HBA context object.
10720 * This function cleans up all iocb, buffers, mailbox commands
10721 * while shutting down the HBA. This function is called with no
10722 * lock held and always returns 1.
10723 * This function does the following to cleanup driver resources:
10724 * - Free discovery resources for each virtual port
10725 * - Cleanup any pending fabric iocbs
10726 * - Iterate through the iocb txq and free each entry
10728 * - Free up any buffer posted to the HBA
10729 * - Free mailbox commands in the mailbox queue.
10732 lpfc_sli_hba_down(struct lpfc_hba *phba)
10734 LIST_HEAD(completions);
10735 struct lpfc_sli *psli = &phba->sli;
10736 struct lpfc_queue *qp = NULL;
10737 struct lpfc_sli_ring *pring;
10738 struct lpfc_dmabuf *buf_ptr;
10739 unsigned long flags = 0;
10742 /* Shutdown the mailbox command sub-system */
10743 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
10745 lpfc_hba_down_prep(phba);
10747 /* Disable softirqs, including timers from obtaining phba->hbalock */
10748 local_bh_disable();
10750 lpfc_fabric_abort_hba(phba);
10752 spin_lock_irqsave(&phba->hbalock, flags);
10755 * Error everything on the txq since these iocbs
10756 * have not been given to the FW yet.
10758 if (phba->sli_rev != LPFC_SLI_REV4) {
10759 for (i = 0; i < psli->num_rings; i++) {
10760 pring = &psli->sli3_ring[i];
10761 /* Only slow rings */
10762 if (pring->ringno == LPFC_ELS_RING) {
10763 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10764 /* Set the lpfc data pending flag */
10765 set_bit(LPFC_DATA_READY, &phba->data_flags);
10767 list_splice_init(&pring->txq, &completions);
10770 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10774 spin_lock(&pring->ring_lock);
10775 list_splice_init(&pring->txq, &completions);
10776 spin_unlock(&pring->ring_lock);
10777 if (pring == phba->sli4_hba.els_wq->pring) {
10778 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10779 /* Set the lpfc data pending flag */
10780 set_bit(LPFC_DATA_READY, &phba->data_flags);
10784 spin_unlock_irqrestore(&phba->hbalock, flags);
10786 /* Cancel all the IOCBs from the completions list */
10787 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10790 spin_lock_irqsave(&phba->hbalock, flags);
10791 list_splice_init(&phba->elsbuf, &completions);
10792 phba->elsbuf_cnt = 0;
10793 phba->elsbuf_prev_cnt = 0;
10794 spin_unlock_irqrestore(&phba->hbalock, flags);
10796 while (!list_empty(&completions)) {
10797 list_remove_head(&completions, buf_ptr,
10798 struct lpfc_dmabuf, list);
10799 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
10803 /* Enable softirqs again, done with phba->hbalock */
10806 /* Return any active mbox cmds */
10807 del_timer_sync(&psli->mbox_tmo);
10809 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
10810 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
10811 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
10817 * lpfc_sli_pcimem_bcopy - SLI memory copy function
10818 * @srcp: Source memory pointer.
10819 * @destp: Destination memory pointer.
10820 * @cnt: Number of words required to be copied.
10822 * This function is used for copying data between driver memory
10823 * and the SLI memory. This function also changes the endianness
10824 * of each word if native endianness is different from SLI
10825 * endianness. This function can be called with or without
10829 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
10831 uint32_t *src = srcp;
10832 uint32_t *dest = destp;
10836 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
10838 ldata = le32_to_cpu(ldata);
10847 * lpfc_sli_bemem_bcopy - SLI memory copy function
10848 * @srcp: Source memory pointer.
10849 * @destp: Destination memory pointer.
10850 * @cnt: Number of words required to be copied.
10852 * This function is used for copying data between a data structure
10853 * with big endian representation to local endianness.
10854 * This function can be called with or without lock.
10857 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
10859 uint32_t *src = srcp;
10860 uint32_t *dest = destp;
10864 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
10866 ldata = be32_to_cpu(ldata);
10874 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
10875 * @phba: Pointer to HBA context object.
10876 * @pring: Pointer to driver SLI ring object.
10877 * @mp: Pointer to driver buffer object.
10879 * This function is called with no lock held.
10880 * It always return zero after adding the buffer to the postbufq
10884 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10885 struct lpfc_dmabuf *mp)
10887 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
10889 spin_lock_irq(&phba->hbalock);
10890 list_add_tail(&mp->list, &pring->postbufq);
10891 pring->postbufq_cnt++;
10892 spin_unlock_irq(&phba->hbalock);
10897 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
10898 * @phba: Pointer to HBA context object.
10900 * When HBQ is enabled, buffers are searched based on tags. This function
10901 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
10902 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
10903 * does not conflict with tags of buffer posted for unsolicited events.
10904 * The function returns the allocated tag. The function is called with
10908 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
10910 spin_lock_irq(&phba->hbalock);
10911 phba->buffer_tag_count++;
10913 * Always set the QUE_BUFTAG_BIT to distiguish between
10914 * a tag assigned by HBQ.
10916 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
10917 spin_unlock_irq(&phba->hbalock);
10918 return phba->buffer_tag_count;
10922 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
10923 * @phba: Pointer to HBA context object.
10924 * @pring: Pointer to driver SLI ring object.
10925 * @tag: Buffer tag.
10927 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
10928 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
10929 * iocb is posted to the response ring with the tag of the buffer.
10930 * This function searches the pring->postbufq list using the tag
10931 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
10932 * iocb. If the buffer is found then lpfc_dmabuf object of the
10933 * buffer is returned to the caller else NULL is returned.
10934 * This function is called with no lock held.
10936 struct lpfc_dmabuf *
10937 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10940 struct lpfc_dmabuf *mp, *next_mp;
10941 struct list_head *slp = &pring->postbufq;
10943 /* Search postbufq, from the beginning, looking for a match on tag */
10944 spin_lock_irq(&phba->hbalock);
10945 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10946 if (mp->buffer_tag == tag) {
10947 list_del_init(&mp->list);
10948 pring->postbufq_cnt--;
10949 spin_unlock_irq(&phba->hbalock);
10954 spin_unlock_irq(&phba->hbalock);
10955 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10956 "0402 Cannot find virtual addr for buffer tag on "
10957 "ring %d Data x%lx x%px x%px x%x\n",
10958 pring->ringno, (unsigned long) tag,
10959 slp->next, slp->prev, pring->postbufq_cnt);
10965 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
10966 * @phba: Pointer to HBA context object.
10967 * @pring: Pointer to driver SLI ring object.
10968 * @phys: DMA address of the buffer.
10970 * This function searches the buffer list using the dma_address
10971 * of unsolicited event to find the driver's lpfc_dmabuf object
10972 * corresponding to the dma_address. The function returns the
10973 * lpfc_dmabuf object if a buffer is found else it returns NULL.
10974 * This function is called by the ct and els unsolicited event
10975 * handlers to get the buffer associated with the unsolicited
10978 * This function is called with no lock held.
10980 struct lpfc_dmabuf *
10981 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10984 struct lpfc_dmabuf *mp, *next_mp;
10985 struct list_head *slp = &pring->postbufq;
10987 /* Search postbufq, from the beginning, looking for a match on phys */
10988 spin_lock_irq(&phba->hbalock);
10989 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10990 if (mp->phys == phys) {
10991 list_del_init(&mp->list);
10992 pring->postbufq_cnt--;
10993 spin_unlock_irq(&phba->hbalock);
10998 spin_unlock_irq(&phba->hbalock);
10999 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11000 "0410 Cannot find virtual addr for mapped buf on "
11001 "ring %d Data x%llx x%px x%px x%x\n",
11002 pring->ringno, (unsigned long long)phys,
11003 slp->next, slp->prev, pring->postbufq_cnt);
11008 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
11009 * @phba: Pointer to HBA context object.
11010 * @cmdiocb: Pointer to driver command iocb object.
11011 * @rspiocb: Pointer to driver response iocb object.
11013 * This function is the completion handler for the abort iocbs for
11014 * ELS commands. This function is called from the ELS ring event
11015 * handler with no lock held. This function frees memory resources
11016 * associated with the abort iocb.
11019 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11020 struct lpfc_iocbq *rspiocb)
11022 IOCB_t *irsp = &rspiocb->iocb;
11023 uint16_t abort_iotag, abort_context;
11024 struct lpfc_iocbq *abort_iocb = NULL;
11026 if (irsp->ulpStatus) {
11029 * Assume that the port already completed and returned, or
11030 * will return the iocb. Just Log the message.
11032 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
11033 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
11035 spin_lock_irq(&phba->hbalock);
11036 if (phba->sli_rev < LPFC_SLI_REV4) {
11037 if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
11038 irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
11039 irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
11040 spin_unlock_irq(&phba->hbalock);
11043 if (abort_iotag != 0 &&
11044 abort_iotag <= phba->sli.last_iotag)
11046 phba->sli.iocbq_lookup[abort_iotag];
11048 /* For sli4 the abort_tag is the XRI,
11049 * so the abort routine puts the iotag of the iocb
11050 * being aborted in the context field of the abort
11053 abort_iocb = phba->sli.iocbq_lookup[abort_context];
11055 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
11056 "0327 Cannot abort els iocb x%px "
11057 "with tag %x context %x, abort status %x, "
11059 abort_iocb, abort_iotag, abort_context,
11060 irsp->ulpStatus, irsp->un.ulpWord[4]);
11062 spin_unlock_irq(&phba->hbalock);
11065 lpfc_sli_release_iocbq(phba, cmdiocb);
11070 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
11071 * @phba: Pointer to HBA context object.
11072 * @cmdiocb: Pointer to driver command iocb object.
11073 * @rspiocb: Pointer to driver response iocb object.
11075 * The function is called from SLI ring event handler with no
11076 * lock held. This function is the completion handler for ELS commands
11077 * which are aborted. The function frees memory resources used for
11078 * the aborted ELS commands.
11081 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11082 struct lpfc_iocbq *rspiocb)
11084 IOCB_t *irsp = &rspiocb->iocb;
11086 /* ELS cmd tag <ulpIoTag> completes */
11087 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
11088 "0139 Ignoring ELS cmd tag x%x completion Data: "
11090 irsp->ulpIoTag, irsp->ulpStatus,
11091 irsp->un.ulpWord[4], irsp->ulpTimeout);
11092 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
11093 lpfc_ct_free_iocb(phba, cmdiocb);
11095 lpfc_els_free_iocb(phba, cmdiocb);
11100 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
11101 * @phba: Pointer to HBA context object.
11102 * @pring: Pointer to driver SLI ring object.
11103 * @cmdiocb: Pointer to driver command iocb object.
11105 * This function issues an abort iocb for the provided command iocb down to
11106 * the port. Other than the case the outstanding command iocb is an abort
11107 * request, this function issues abort out unconditionally. This function is
11108 * called with hbalock held. The function returns 0 when it fails due to
11109 * memory allocation failure or when the command iocb is an abort request.
11112 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11113 struct lpfc_iocbq *cmdiocb)
11115 struct lpfc_vport *vport = cmdiocb->vport;
11116 struct lpfc_iocbq *abtsiocbp;
11117 IOCB_t *icmd = NULL;
11118 IOCB_t *iabt = NULL;
11120 unsigned long iflags;
11121 struct lpfc_nodelist *ndlp;
11123 lockdep_assert_held(&phba->hbalock);
11126 * There are certain command types we don't want to abort. And we
11127 * don't want to abort commands that are already in the process of
11130 icmd = &cmdiocb->iocb;
11131 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11132 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11133 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11136 /* issue ABTS for this IOCB based on iotag */
11137 abtsiocbp = __lpfc_sli_get_iocbq(phba);
11138 if (abtsiocbp == NULL)
11141 /* This signals the response to set the correct status
11142 * before calling the completion handler
11144 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
11146 iabt = &abtsiocbp->iocb;
11147 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
11148 iabt->un.acxri.abortContextTag = icmd->ulpContext;
11149 if (phba->sli_rev == LPFC_SLI_REV4) {
11150 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
11151 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
11153 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
11154 if (pring->ringno == LPFC_ELS_RING) {
11155 ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
11156 iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
11160 iabt->ulpClass = icmd->ulpClass;
11162 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11163 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
11164 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
11165 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
11166 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
11167 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
11169 if (phba->link_state >= LPFC_LINK_UP)
11170 iabt->ulpCommand = CMD_ABORT_XRI_CN;
11172 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
11174 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
11175 abtsiocbp->vport = vport;
11177 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
11178 "0339 Abort xri x%x, original iotag x%x, "
11179 "abort cmd iotag x%x\n",
11180 iabt->un.acxri.abortIoTag,
11181 iabt->un.acxri.abortContextTag,
11184 if (phba->sli_rev == LPFC_SLI_REV4) {
11185 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
11186 if (unlikely(pring == NULL))
11188 /* Note: both hbalock and ring_lock need to be set here */
11189 spin_lock_irqsave(&pring->ring_lock, iflags);
11190 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11192 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11194 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11199 __lpfc_sli_release_iocbq(phba, abtsiocbp);
11202 * Caller to this routine should check for IOCB_ERROR
11203 * and handle it properly. This routine no longer removes
11204 * iocb off txcmplq and call compl in case of IOCB_ERROR.
11210 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
11211 * @phba: Pointer to HBA context object.
11212 * @pring: Pointer to driver SLI ring object.
11213 * @cmdiocb: Pointer to driver command iocb object.
11215 * This function issues an abort iocb for the provided command iocb. In case
11216 * of unloading, the abort iocb will not be issued to commands on the ELS
11217 * ring. Instead, the callback function shall be changed to those commands
11218 * so that nothing happens when them finishes. This function is called with
11219 * hbalock held. The function returns 0 when the command iocb is an abort
11223 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11224 struct lpfc_iocbq *cmdiocb)
11226 struct lpfc_vport *vport = cmdiocb->vport;
11227 int retval = IOCB_ERROR;
11228 IOCB_t *icmd = NULL;
11230 lockdep_assert_held(&phba->hbalock);
11233 * There are certain command types we don't want to abort. And we
11234 * don't want to abort commands that are already in the process of
11237 icmd = &cmdiocb->iocb;
11238 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11239 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11240 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11244 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11245 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11247 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11248 goto abort_iotag_exit;
11252 * If we're unloading, don't abort iocb on the ELS ring, but change
11253 * the callback so that nothing happens when it finishes.
11255 if ((vport->load_flag & FC_UNLOADING) &&
11256 (pring->ringno == LPFC_ELS_RING)) {
11257 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11258 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11260 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11261 goto abort_iotag_exit;
11264 /* Now, we try to issue the abort to the cmdiocb out */
11265 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
11269 * Caller to this routine should check for IOCB_ERROR
11270 * and handle it properly. This routine no longer removes
11271 * iocb off txcmplq and call compl in case of IOCB_ERROR.
11277 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
11278 * @phba: pointer to lpfc HBA data structure.
11280 * This routine will abort all pending and outstanding iocbs to an HBA.
11283 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
11285 struct lpfc_sli *psli = &phba->sli;
11286 struct lpfc_sli_ring *pring;
11287 struct lpfc_queue *qp = NULL;
11290 if (phba->sli_rev != LPFC_SLI_REV4) {
11291 for (i = 0; i < psli->num_rings; i++) {
11292 pring = &psli->sli3_ring[i];
11293 lpfc_sli_abort_iocb_ring(phba, pring);
11297 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11301 lpfc_sli_abort_iocb_ring(phba, pring);
11306 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
11307 * @iocbq: Pointer to driver iocb object.
11308 * @vport: Pointer to driver virtual port object.
11309 * @tgt_id: SCSI ID of the target.
11310 * @lun_id: LUN ID of the scsi device.
11311 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
11313 * This function acts as an iocb filter for functions which abort or count
11314 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
11315 * 0 if the filtering criteria is met for the given iocb and will return
11316 * 1 if the filtering criteria is not met.
11317 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
11318 * given iocb is for the SCSI device specified by vport, tgt_id and
11319 * lun_id parameter.
11320 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
11321 * given iocb is for the SCSI target specified by vport and tgt_id
11323 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
11324 * given iocb is for the SCSI host associated with the given vport.
11325 * This function is called with no locks held.
11328 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
11329 uint16_t tgt_id, uint64_t lun_id,
11330 lpfc_ctx_cmd ctx_cmd)
11332 struct lpfc_io_buf *lpfc_cmd;
11335 if (iocbq->vport != vport)
11338 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
11339 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
11342 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11344 if (lpfc_cmd->pCmd == NULL)
11349 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11350 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
11351 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
11355 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11356 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
11359 case LPFC_CTX_HOST:
11363 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
11364 __func__, ctx_cmd);
11372 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
11373 * @vport: Pointer to virtual port.
11374 * @tgt_id: SCSI ID of the target.
11375 * @lun_id: LUN ID of the scsi device.
11376 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11378 * This function returns number of FCP commands pending for the vport.
11379 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
11380 * commands pending on the vport associated with SCSI device specified
11381 * by tgt_id and lun_id parameters.
11382 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
11383 * commands pending on the vport associated with SCSI target specified
11384 * by tgt_id parameter.
11385 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
11386 * commands pending on the vport.
11387 * This function returns the number of iocbs which satisfy the filter.
11388 * This function is called without any lock held.
11391 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
11392 lpfc_ctx_cmd ctx_cmd)
11394 struct lpfc_hba *phba = vport->phba;
11395 struct lpfc_iocbq *iocbq;
11398 spin_lock_irq(&phba->hbalock);
11399 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
11400 iocbq = phba->sli.iocbq_lookup[i];
11402 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
11406 spin_unlock_irq(&phba->hbalock);
11412 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
11413 * @phba: Pointer to HBA context object
11414 * @cmdiocb: Pointer to command iocb object.
11415 * @rspiocb: Pointer to response iocb object.
11417 * This function is called when an aborted FCP iocb completes. This
11418 * function is called by the ring event handler with no lock held.
11419 * This function frees the iocb.
11422 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11423 struct lpfc_iocbq *rspiocb)
11425 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11426 "3096 ABORT_XRI_CN completing on rpi x%x "
11427 "original iotag x%x, abort cmd iotag x%x "
11428 "status 0x%x, reason 0x%x\n",
11429 cmdiocb->iocb.un.acxri.abortContextTag,
11430 cmdiocb->iocb.un.acxri.abortIoTag,
11431 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
11432 rspiocb->iocb.un.ulpWord[4]);
11433 lpfc_sli_release_iocbq(phba, cmdiocb);
11438 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
11439 * @vport: Pointer to virtual port.
11440 * @pring: Pointer to driver SLI ring object.
11441 * @tgt_id: SCSI ID of the target.
11442 * @lun_id: LUN ID of the scsi device.
11443 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11445 * This function sends an abort command for every SCSI command
11446 * associated with the given virtual port pending on the ring
11447 * filtered by lpfc_sli_validate_fcp_iocb function.
11448 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
11449 * FCP iocbs associated with lun specified by tgt_id and lun_id
11451 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
11452 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11453 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
11454 * FCP iocbs associated with virtual port.
11455 * This function returns number of iocbs it failed to abort.
11456 * This function is called with no locks held.
11459 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11460 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
11462 struct lpfc_hba *phba = vport->phba;
11463 struct lpfc_iocbq *iocbq;
11464 struct lpfc_iocbq *abtsiocb;
11465 struct lpfc_sli_ring *pring_s4;
11466 IOCB_t *cmd = NULL;
11467 int errcnt = 0, ret_val = 0;
11470 /* all I/Os are in process of being flushed */
11471 if (phba->hba_flag & HBA_IOQ_FLUSH)
11474 for (i = 1; i <= phba->sli.last_iotag; i++) {
11475 iocbq = phba->sli.iocbq_lookup[i];
11477 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11482 * If the iocbq is already being aborted, don't take a second
11483 * action, but do count it.
11485 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11488 /* issue ABTS for this IOCB based on iotag */
11489 abtsiocb = lpfc_sli_get_iocbq(phba);
11490 if (abtsiocb == NULL) {
11495 /* indicate the IO is being aborted by the driver. */
11496 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11498 cmd = &iocbq->iocb;
11499 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11500 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
11501 if (phba->sli_rev == LPFC_SLI_REV4)
11502 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
11504 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
11505 abtsiocb->iocb.ulpLe = 1;
11506 abtsiocb->iocb.ulpClass = cmd->ulpClass;
11507 abtsiocb->vport = vport;
11509 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11510 abtsiocb->hba_wqidx = iocbq->hba_wqidx;
11511 if (iocbq->iocb_flag & LPFC_IO_FCP)
11512 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
11513 if (iocbq->iocb_flag & LPFC_IO_FOF)
11514 abtsiocb->iocb_flag |= LPFC_IO_FOF;
11516 if (lpfc_is_link_up(phba))
11517 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11519 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11521 /* Setup callback routine and issue the command. */
11522 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11523 if (phba->sli_rev == LPFC_SLI_REV4) {
11524 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
11527 ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11530 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
11532 if (ret_val == IOCB_ERROR) {
11533 lpfc_sli_release_iocbq(phba, abtsiocb);
11543 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
11544 * @vport: Pointer to virtual port.
11545 * @pring: Pointer to driver SLI ring object.
11546 * @tgt_id: SCSI ID of the target.
11547 * @lun_id: LUN ID of the scsi device.
11548 * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11550 * This function sends an abort command for every SCSI command
11551 * associated with the given virtual port pending on the ring
11552 * filtered by lpfc_sli_validate_fcp_iocb function.
11553 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
11554 * FCP iocbs associated with lun specified by tgt_id and lun_id
11556 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
11557 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11558 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
11559 * FCP iocbs associated with virtual port.
11560 * This function returns number of iocbs it aborted .
11561 * This function is called with no locks held right after a taskmgmt
11565 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11566 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
11568 struct lpfc_hba *phba = vport->phba;
11569 struct lpfc_io_buf *lpfc_cmd;
11570 struct lpfc_iocbq *abtsiocbq;
11571 struct lpfc_nodelist *ndlp;
11572 struct lpfc_iocbq *iocbq;
11574 int sum, i, ret_val;
11575 unsigned long iflags;
11576 struct lpfc_sli_ring *pring_s4 = NULL;
11578 spin_lock_irqsave(&phba->hbalock, iflags);
11580 /* all I/Os are in process of being flushed */
11581 if (phba->hba_flag & HBA_IOQ_FLUSH) {
11582 spin_unlock_irqrestore(&phba->hbalock, iflags);
11587 for (i = 1; i <= phba->sli.last_iotag; i++) {
11588 iocbq = phba->sli.iocbq_lookup[i];
11590 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11594 /* Guard against IO completion being called at same time */
11595 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11596 spin_lock(&lpfc_cmd->buf_lock);
11598 if (!lpfc_cmd->pCmd) {
11599 spin_unlock(&lpfc_cmd->buf_lock);
11603 if (phba->sli_rev == LPFC_SLI_REV4) {
11605 phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
11607 spin_unlock(&lpfc_cmd->buf_lock);
11610 /* Note: both hbalock and ring_lock must be set here */
11611 spin_lock(&pring_s4->ring_lock);
11615 * If the iocbq is already being aborted, don't take a second
11616 * action, but do count it.
11618 if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
11619 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
11620 if (phba->sli_rev == LPFC_SLI_REV4)
11621 spin_unlock(&pring_s4->ring_lock);
11622 spin_unlock(&lpfc_cmd->buf_lock);
11626 /* issue ABTS for this IOCB based on iotag */
11627 abtsiocbq = __lpfc_sli_get_iocbq(phba);
11629 if (phba->sli_rev == LPFC_SLI_REV4)
11630 spin_unlock(&pring_s4->ring_lock);
11631 spin_unlock(&lpfc_cmd->buf_lock);
11635 icmd = &iocbq->iocb;
11636 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11637 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
11638 if (phba->sli_rev == LPFC_SLI_REV4)
11639 abtsiocbq->iocb.un.acxri.abortIoTag =
11640 iocbq->sli4_xritag;
11642 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
11643 abtsiocbq->iocb.ulpLe = 1;
11644 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
11645 abtsiocbq->vport = vport;
11647 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11648 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
11649 if (iocbq->iocb_flag & LPFC_IO_FCP)
11650 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
11651 if (iocbq->iocb_flag & LPFC_IO_FOF)
11652 abtsiocbq->iocb_flag |= LPFC_IO_FOF;
11654 ndlp = lpfc_cmd->rdata->pnode;
11656 if (lpfc_is_link_up(phba) &&
11657 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
11658 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11660 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11662 /* Setup callback routine and issue the command. */
11663 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11666 * Indicate the IO is being aborted by the driver and set
11667 * the caller's flag into the aborted IO.
11669 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11671 if (phba->sli_rev == LPFC_SLI_REV4) {
11672 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11674 spin_unlock(&pring_s4->ring_lock);
11676 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
11680 spin_unlock(&lpfc_cmd->buf_lock);
11682 if (ret_val == IOCB_ERROR)
11683 __lpfc_sli_release_iocbq(phba, abtsiocbq);
11687 spin_unlock_irqrestore(&phba->hbalock, iflags);
11692 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
11693 * @phba: Pointer to HBA context object.
11694 * @cmdiocbq: Pointer to command iocb.
11695 * @rspiocbq: Pointer to response iocb.
11697 * This function is the completion handler for iocbs issued using
11698 * lpfc_sli_issue_iocb_wait function. This function is called by the
11699 * ring event handler function without any lock held. This function
11700 * can be called from both worker thread context and interrupt
11701 * context. This function also can be called from other thread which
11702 * cleans up the SLI layer objects.
11703 * This function copy the contents of the response iocb to the
11704 * response iocb memory object provided by the caller of
11705 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
11706 * sleeps for the iocb completion.
11709 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
11710 struct lpfc_iocbq *cmdiocbq,
11711 struct lpfc_iocbq *rspiocbq)
11713 wait_queue_head_t *pdone_q;
11714 unsigned long iflags;
11715 struct lpfc_io_buf *lpfc_cmd;
11717 spin_lock_irqsave(&phba->hbalock, iflags);
11718 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
11721 * A time out has occurred for the iocb. If a time out
11722 * completion handler has been supplied, call it. Otherwise,
11723 * just free the iocbq.
11726 spin_unlock_irqrestore(&phba->hbalock, iflags);
11727 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
11728 cmdiocbq->wait_iocb_cmpl = NULL;
11729 if (cmdiocbq->iocb_cmpl)
11730 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
11732 lpfc_sli_release_iocbq(phba, cmdiocbq);
11736 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
11737 if (cmdiocbq->context2 && rspiocbq)
11738 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
11739 &rspiocbq->iocb, sizeof(IOCB_t));
11741 /* Set the exchange busy flag for task management commands */
11742 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
11743 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
11744 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
11746 if (rspiocbq && (rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY))
11747 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
11749 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
11752 pdone_q = cmdiocbq->context_un.wait_queue;
11755 spin_unlock_irqrestore(&phba->hbalock, iflags);
11760 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
11761 * @phba: Pointer to HBA context object..
11762 * @piocbq: Pointer to command iocb.
11763 * @flag: Flag to test.
11765 * This routine grabs the hbalock and then test the iocb_flag to
11766 * see if the passed in flag is set.
11768 * 1 if flag is set.
11769 * 0 if flag is not set.
11772 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
11773 struct lpfc_iocbq *piocbq, uint32_t flag)
11775 unsigned long iflags;
11778 spin_lock_irqsave(&phba->hbalock, iflags);
11779 ret = piocbq->iocb_flag & flag;
11780 spin_unlock_irqrestore(&phba->hbalock, iflags);
11786 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
11787 * @phba: Pointer to HBA context object..
11788 * @pring: Pointer to sli ring.
11789 * @piocb: Pointer to command iocb.
11790 * @prspiocbq: Pointer to response iocb.
11791 * @timeout: Timeout in number of seconds.
11793 * This function issues the iocb to firmware and waits for the
11794 * iocb to complete. The iocb_cmpl field of the shall be used
11795 * to handle iocbs which time out. If the field is NULL, the
11796 * function shall free the iocbq structure. If more clean up is
11797 * needed, the caller is expected to provide a completion function
11798 * that will provide the needed clean up. If the iocb command is
11799 * not completed within timeout seconds, the function will either
11800 * free the iocbq structure (if iocb_cmpl == NULL) or execute the
11801 * completion function set in the iocb_cmpl field and then return
11802 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
11803 * resources if this function returns IOCB_TIMEDOUT.
11804 * The function waits for the iocb completion using an
11805 * non-interruptible wait.
11806 * This function will sleep while waiting for iocb completion.
11807 * So, this function should not be called from any context which
11808 * does not allow sleeping. Due to the same reason, this function
11809 * cannot be called with interrupt disabled.
11810 * This function assumes that the iocb completions occur while
11811 * this function sleep. So, this function cannot be called from
11812 * the thread which process iocb completion for this ring.
11813 * This function clears the iocb_flag of the iocb object before
11814 * issuing the iocb and the iocb completion handler sets this
11815 * flag and wakes this thread when the iocb completes.
11816 * The contents of the response iocb will be copied to prspiocbq
11817 * by the completion handler when the command completes.
11818 * This function returns IOCB_SUCCESS when success.
11819 * This function is called with no lock held.
11822 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
11823 uint32_t ring_number,
11824 struct lpfc_iocbq *piocb,
11825 struct lpfc_iocbq *prspiocbq,
11828 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
11829 long timeleft, timeout_req = 0;
11830 int retval = IOCB_SUCCESS;
11832 struct lpfc_iocbq *iocb;
11834 int txcmplq_cnt = 0;
11835 struct lpfc_sli_ring *pring;
11836 unsigned long iflags;
11837 bool iocb_completed = true;
11839 if (phba->sli_rev >= LPFC_SLI_REV4)
11840 pring = lpfc_sli4_calc_ring(phba, piocb);
11842 pring = &phba->sli.sli3_ring[ring_number];
11844 * If the caller has provided a response iocbq buffer, then context2
11845 * is NULL or its an error.
11848 if (piocb->context2)
11850 piocb->context2 = prspiocbq;
11853 piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
11854 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
11855 piocb->context_un.wait_queue = &done_q;
11856 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
11858 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
11859 if (lpfc_readl(phba->HCregaddr, &creg_val))
11861 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
11862 writel(creg_val, phba->HCregaddr);
11863 readl(phba->HCregaddr); /* flush */
11866 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
11867 SLI_IOCB_RET_IOCB);
11868 if (retval == IOCB_SUCCESS) {
11869 timeout_req = msecs_to_jiffies(timeout * 1000);
11870 timeleft = wait_event_timeout(done_q,
11871 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
11873 spin_lock_irqsave(&phba->hbalock, iflags);
11874 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
11877 * IOCB timed out. Inform the wake iocb wait
11878 * completion function and set local status
11881 iocb_completed = false;
11882 piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
11884 spin_unlock_irqrestore(&phba->hbalock, iflags);
11885 if (iocb_completed) {
11886 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11887 "0331 IOCB wake signaled\n");
11888 /* Note: we are not indicating if the IOCB has a success
11889 * status or not - that's for the caller to check.
11890 * IOCB_SUCCESS means just that the command was sent and
11891 * completed. Not that it completed successfully.
11893 } else if (timeleft == 0) {
11894 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11895 "0338 IOCB wait timeout error - no "
11896 "wake response Data x%x\n", timeout);
11897 retval = IOCB_TIMEDOUT;
11899 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11900 "0330 IOCB wake NOT set, "
11902 timeout, (timeleft / jiffies));
11903 retval = IOCB_TIMEDOUT;
11905 } else if (retval == IOCB_BUSY) {
11906 if (phba->cfg_log_verbose & LOG_SLI) {
11907 list_for_each_entry(iocb, &pring->txq, list) {
11910 list_for_each_entry(iocb, &pring->txcmplq, list) {
11913 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11914 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
11915 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
11919 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11920 "0332 IOCB wait issue failed, Data x%x\n",
11922 retval = IOCB_ERROR;
11925 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
11926 if (lpfc_readl(phba->HCregaddr, &creg_val))
11928 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
11929 writel(creg_val, phba->HCregaddr);
11930 readl(phba->HCregaddr); /* flush */
11934 piocb->context2 = NULL;
11936 piocb->context_un.wait_queue = NULL;
11937 piocb->iocb_cmpl = NULL;
11942 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
11943 * @phba: Pointer to HBA context object.
11944 * @pmboxq: Pointer to driver mailbox object.
11945 * @timeout: Timeout in number of seconds.
11947 * This function issues the mailbox to firmware and waits for the
11948 * mailbox command to complete. If the mailbox command is not
11949 * completed within timeout seconds, it returns MBX_TIMEOUT.
11950 * The function waits for the mailbox completion using an
11951 * interruptible wait. If the thread is woken up due to a
11952 * signal, MBX_TIMEOUT error is returned to the caller. Caller
11953 * should not free the mailbox resources, if this function returns
11955 * This function will sleep while waiting for mailbox completion.
11956 * So, this function should not be called from any context which
11957 * does not allow sleeping. Due to the same reason, this function
11958 * cannot be called with interrupt disabled.
11959 * This function assumes that the mailbox completion occurs while
11960 * this function sleep. So, this function cannot be called from
11961 * the worker thread which processes mailbox completion.
11962 * This function is called in the context of HBA management
11964 * This function returns MBX_SUCCESS when successful.
11965 * This function is called with no lock held.
11968 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
11971 struct completion mbox_done;
11973 unsigned long flag;
11975 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
11976 /* setup wake call as IOCB callback */
11977 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
11979 /* setup context3 field to pass wait_queue pointer to wake function */
11980 init_completion(&mbox_done);
11981 pmboxq->context3 = &mbox_done;
11982 /* now issue the command */
11983 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
11984 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
11985 wait_for_completion_timeout(&mbox_done,
11986 msecs_to_jiffies(timeout * 1000));
11988 spin_lock_irqsave(&phba->hbalock, flag);
11989 pmboxq->context3 = NULL;
11991 * if LPFC_MBX_WAKE flag is set the mailbox is completed
11992 * else do not free the resources.
11994 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
11995 retval = MBX_SUCCESS;
11997 retval = MBX_TIMEOUT;
11998 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12000 spin_unlock_irqrestore(&phba->hbalock, flag);
12006 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
12007 * @phba: Pointer to HBA context.
12009 * This function is called to shutdown the driver's mailbox sub-system.
12010 * It first marks the mailbox sub-system is in a block state to prevent
12011 * the asynchronous mailbox command from issued off the pending mailbox
12012 * command queue. If the mailbox command sub-system shutdown is due to
12013 * HBA error conditions such as EEH or ERATT, this routine shall invoke
12014 * the mailbox sub-system flush routine to forcefully bring down the
12015 * mailbox sub-system. Otherwise, if it is due to normal condition (such
12016 * as with offline or HBA function reset), this routine will wait for the
12017 * outstanding mailbox command to complete before invoking the mailbox
12018 * sub-system flush routine to gracefully bring down mailbox sub-system.
12021 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
12023 struct lpfc_sli *psli = &phba->sli;
12024 unsigned long timeout;
12026 if (mbx_action == LPFC_MBX_NO_WAIT) {
12027 /* delay 100ms for port state */
12029 lpfc_sli_mbox_sys_flush(phba);
12032 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
12034 /* Disable softirqs, including timers from obtaining phba->hbalock */
12035 local_bh_disable();
12037 spin_lock_irq(&phba->hbalock);
12038 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
12040 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
12041 /* Determine how long we might wait for the active mailbox
12042 * command to be gracefully completed by firmware.
12044 if (phba->sli.mbox_active)
12045 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
12046 phba->sli.mbox_active) *
12048 spin_unlock_irq(&phba->hbalock);
12050 /* Enable softirqs again, done with phba->hbalock */
12053 while (phba->sli.mbox_active) {
12054 /* Check active mailbox complete status every 2ms */
12056 if (time_after(jiffies, timeout))
12057 /* Timeout, let the mailbox flush routine to
12058 * forcefully release active mailbox command
12063 spin_unlock_irq(&phba->hbalock);
12065 /* Enable softirqs again, done with phba->hbalock */
12069 lpfc_sli_mbox_sys_flush(phba);
12073 * lpfc_sli_eratt_read - read sli-3 error attention events
12074 * @phba: Pointer to HBA context.
12076 * This function is called to read the SLI3 device error attention registers
12077 * for possible error attention events. The caller must hold the hostlock
12078 * with spin_lock_irq().
12080 * This function returns 1 when there is Error Attention in the Host Attention
12081 * Register and returns 0 otherwise.
12084 lpfc_sli_eratt_read(struct lpfc_hba *phba)
12088 /* Read chip Host Attention (HA) register */
12089 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12092 if (ha_copy & HA_ERATT) {
12093 /* Read host status register to retrieve error event */
12094 if (lpfc_sli_read_hs(phba))
12097 /* Check if there is a deferred error condition is active */
12098 if ((HS_FFER1 & phba->work_hs) &&
12099 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12100 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
12101 phba->hba_flag |= DEFER_ERATT;
12102 /* Clear all interrupt enable conditions */
12103 writel(0, phba->HCregaddr);
12104 readl(phba->HCregaddr);
12107 /* Set the driver HA work bitmap */
12108 phba->work_ha |= HA_ERATT;
12109 /* Indicate polling handles this ERATT */
12110 phba->hba_flag |= HBA_ERATT_HANDLED;
12116 /* Set the driver HS work bitmap */
12117 phba->work_hs |= UNPLUG_ERR;
12118 /* Set the driver HA work bitmap */
12119 phba->work_ha |= HA_ERATT;
12120 /* Indicate polling handles this ERATT */
12121 phba->hba_flag |= HBA_ERATT_HANDLED;
12126 * lpfc_sli4_eratt_read - read sli-4 error attention events
12127 * @phba: Pointer to HBA context.
12129 * This function is called to read the SLI4 device error attention registers
12130 * for possible error attention events. The caller must hold the hostlock
12131 * with spin_lock_irq().
12133 * This function returns 1 when there is Error Attention in the Host Attention
12134 * Register and returns 0 otherwise.
12137 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
12139 uint32_t uerr_sta_hi, uerr_sta_lo;
12140 uint32_t if_type, portsmphr;
12141 struct lpfc_register portstat_reg;
12144 * For now, use the SLI4 device internal unrecoverable error
12145 * registers for error attention. This can be changed later.
12147 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12149 case LPFC_SLI_INTF_IF_TYPE_0:
12150 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
12152 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
12154 phba->work_hs |= UNPLUG_ERR;
12155 phba->work_ha |= HA_ERATT;
12156 phba->hba_flag |= HBA_ERATT_HANDLED;
12159 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
12160 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
12161 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12162 "1423 HBA Unrecoverable error: "
12163 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
12164 "ue_mask_lo_reg=0x%x, "
12165 "ue_mask_hi_reg=0x%x\n",
12166 uerr_sta_lo, uerr_sta_hi,
12167 phba->sli4_hba.ue_mask_lo,
12168 phba->sli4_hba.ue_mask_hi);
12169 phba->work_status[0] = uerr_sta_lo;
12170 phba->work_status[1] = uerr_sta_hi;
12171 phba->work_ha |= HA_ERATT;
12172 phba->hba_flag |= HBA_ERATT_HANDLED;
12176 case LPFC_SLI_INTF_IF_TYPE_2:
12177 case LPFC_SLI_INTF_IF_TYPE_6:
12178 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
12179 &portstat_reg.word0) ||
12180 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
12182 phba->work_hs |= UNPLUG_ERR;
12183 phba->work_ha |= HA_ERATT;
12184 phba->hba_flag |= HBA_ERATT_HANDLED;
12187 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
12188 phba->work_status[0] =
12189 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
12190 phba->work_status[1] =
12191 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
12192 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12193 "2885 Port Status Event: "
12194 "port status reg 0x%x, "
12195 "port smphr reg 0x%x, "
12196 "error 1=0x%x, error 2=0x%x\n",
12197 portstat_reg.word0,
12199 phba->work_status[0],
12200 phba->work_status[1]);
12201 phba->work_ha |= HA_ERATT;
12202 phba->hba_flag |= HBA_ERATT_HANDLED;
12206 case LPFC_SLI_INTF_IF_TYPE_1:
12208 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12209 "2886 HBA Error Attention on unsupported "
12210 "if type %d.", if_type);
12218 * lpfc_sli_check_eratt - check error attention events
12219 * @phba: Pointer to HBA context.
12221 * This function is called from timer soft interrupt context to check HBA's
12222 * error attention register bit for error attention events.
12224 * This function returns 1 when there is Error Attention in the Host Attention
12225 * Register and returns 0 otherwise.
12228 lpfc_sli_check_eratt(struct lpfc_hba *phba)
12232 /* If somebody is waiting to handle an eratt, don't process it
12233 * here. The brdkill function will do this.
12235 if (phba->link_flag & LS_IGNORE_ERATT)
12238 /* Check if interrupt handler handles this ERATT */
12239 spin_lock_irq(&phba->hbalock);
12240 if (phba->hba_flag & HBA_ERATT_HANDLED) {
12241 /* Interrupt handler has handled ERATT */
12242 spin_unlock_irq(&phba->hbalock);
12247 * If there is deferred error attention, do not check for error
12250 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12251 spin_unlock_irq(&phba->hbalock);
12255 /* If PCI channel is offline, don't process it */
12256 if (unlikely(pci_channel_offline(phba->pcidev))) {
12257 spin_unlock_irq(&phba->hbalock);
12261 switch (phba->sli_rev) {
12262 case LPFC_SLI_REV2:
12263 case LPFC_SLI_REV3:
12264 /* Read chip Host Attention (HA) register */
12265 ha_copy = lpfc_sli_eratt_read(phba);
12267 case LPFC_SLI_REV4:
12268 /* Read device Uncoverable Error (UERR) registers */
12269 ha_copy = lpfc_sli4_eratt_read(phba);
12272 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12273 "0299 Invalid SLI revision (%d)\n",
12278 spin_unlock_irq(&phba->hbalock);
12284 * lpfc_intr_state_check - Check device state for interrupt handling
12285 * @phba: Pointer to HBA context.
12287 * This inline routine checks whether a device or its PCI slot is in a state
12288 * that the interrupt should be handled.
12290 * This function returns 0 if the device or the PCI slot is in a state that
12291 * interrupt should be handled, otherwise -EIO.
12294 lpfc_intr_state_check(struct lpfc_hba *phba)
12296 /* If the pci channel is offline, ignore all the interrupts */
12297 if (unlikely(pci_channel_offline(phba->pcidev)))
12300 /* Update device level interrupt statistics */
12301 phba->sli.slistat.sli_intr++;
12303 /* Ignore all interrupts during initialization. */
12304 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
12311 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
12312 * @irq: Interrupt number.
12313 * @dev_id: The device context pointer.
12315 * This function is directly called from the PCI layer as an interrupt
12316 * service routine when device with SLI-3 interface spec is enabled with
12317 * MSI-X multi-message interrupt mode and there are slow-path events in
12318 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
12319 * interrupt mode, this function is called as part of the device-level
12320 * interrupt handler. When the PCI slot is in error recovery or the HBA
12321 * is undergoing initialization, the interrupt handler will not process
12322 * the interrupt. The link attention and ELS ring attention events are
12323 * handled by the worker thread. The interrupt handler signals the worker
12324 * thread and returns for these events. This function is called without
12325 * any lock held. It gets the hbalock to access and update SLI data
12328 * This function returns IRQ_HANDLED when interrupt is handled else it
12329 * returns IRQ_NONE.
12332 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
12334 struct lpfc_hba *phba;
12335 uint32_t ha_copy, hc_copy;
12336 uint32_t work_ha_copy;
12337 unsigned long status;
12338 unsigned long iflag;
12341 MAILBOX_t *mbox, *pmbox;
12342 struct lpfc_vport *vport;
12343 struct lpfc_nodelist *ndlp;
12344 struct lpfc_dmabuf *mp;
12349 * Get the driver's phba structure from the dev_id and
12350 * assume the HBA is not interrupting.
12352 phba = (struct lpfc_hba *)dev_id;
12354 if (unlikely(!phba))
12358 * Stuff needs to be attented to when this function is invoked as an
12359 * individual interrupt handler in MSI-X multi-message interrupt mode
12361 if (phba->intr_type == MSIX) {
12362 /* Check device state for handling interrupt */
12363 if (lpfc_intr_state_check(phba))
12365 /* Need to read HA REG for slow-path events */
12366 spin_lock_irqsave(&phba->hbalock, iflag);
12367 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12369 /* If somebody is waiting to handle an eratt don't process it
12370 * here. The brdkill function will do this.
12372 if (phba->link_flag & LS_IGNORE_ERATT)
12373 ha_copy &= ~HA_ERATT;
12374 /* Check the need for handling ERATT in interrupt handler */
12375 if (ha_copy & HA_ERATT) {
12376 if (phba->hba_flag & HBA_ERATT_HANDLED)
12377 /* ERATT polling has handled ERATT */
12378 ha_copy &= ~HA_ERATT;
12380 /* Indicate interrupt handler handles ERATT */
12381 phba->hba_flag |= HBA_ERATT_HANDLED;
12385 * If there is deferred error attention, do not check for any
12388 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12389 spin_unlock_irqrestore(&phba->hbalock, iflag);
12393 /* Clear up only attention source related to slow-path */
12394 if (lpfc_readl(phba->HCregaddr, &hc_copy))
12397 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
12398 HC_LAINT_ENA | HC_ERINT_ENA),
12400 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
12402 writel(hc_copy, phba->HCregaddr);
12403 readl(phba->HAregaddr); /* flush */
12404 spin_unlock_irqrestore(&phba->hbalock, iflag);
12406 ha_copy = phba->ha_copy;
12408 work_ha_copy = ha_copy & phba->work_ha_mask;
12410 if (work_ha_copy) {
12411 if (work_ha_copy & HA_LATT) {
12412 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
12414 * Turn off Link Attention interrupts
12415 * until CLEAR_LA done
12417 spin_lock_irqsave(&phba->hbalock, iflag);
12418 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
12419 if (lpfc_readl(phba->HCregaddr, &control))
12421 control &= ~HC_LAINT_ENA;
12422 writel(control, phba->HCregaddr);
12423 readl(phba->HCregaddr); /* flush */
12424 spin_unlock_irqrestore(&phba->hbalock, iflag);
12427 work_ha_copy &= ~HA_LATT;
12430 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
12432 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
12433 * the only slow ring.
12435 status = (work_ha_copy &
12436 (HA_RXMASK << (4*LPFC_ELS_RING)));
12437 status >>= (4*LPFC_ELS_RING);
12438 if (status & HA_RXMASK) {
12439 spin_lock_irqsave(&phba->hbalock, iflag);
12440 if (lpfc_readl(phba->HCregaddr, &control))
12443 lpfc_debugfs_slow_ring_trc(phba,
12444 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
12446 (uint32_t)phba->sli.slistat.sli_intr);
12448 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
12449 lpfc_debugfs_slow_ring_trc(phba,
12450 "ISR Disable ring:"
12451 "pwork:x%x hawork:x%x wait:x%x",
12452 phba->work_ha, work_ha_copy,
12453 (uint32_t)((unsigned long)
12454 &phba->work_waitq));
12457 ~(HC_R0INT_ENA << LPFC_ELS_RING);
12458 writel(control, phba->HCregaddr);
12459 readl(phba->HCregaddr); /* flush */
12462 lpfc_debugfs_slow_ring_trc(phba,
12463 "ISR slow ring: pwork:"
12464 "x%x hawork:x%x wait:x%x",
12465 phba->work_ha, work_ha_copy,
12466 (uint32_t)((unsigned long)
12467 &phba->work_waitq));
12469 spin_unlock_irqrestore(&phba->hbalock, iflag);
12472 spin_lock_irqsave(&phba->hbalock, iflag);
12473 if (work_ha_copy & HA_ERATT) {
12474 if (lpfc_sli_read_hs(phba))
12477 * Check if there is a deferred error condition
12480 if ((HS_FFER1 & phba->work_hs) &&
12481 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12482 HS_FFER6 | HS_FFER7 | HS_FFER8) &
12484 phba->hba_flag |= DEFER_ERATT;
12485 /* Clear all interrupt enable conditions */
12486 writel(0, phba->HCregaddr);
12487 readl(phba->HCregaddr);
12491 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
12492 pmb = phba->sli.mbox_active;
12493 pmbox = &pmb->u.mb;
12495 vport = pmb->vport;
12497 /* First check out the status word */
12498 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
12499 if (pmbox->mbxOwner != OWN_HOST) {
12500 spin_unlock_irqrestore(&phba->hbalock, iflag);
12502 * Stray Mailbox Interrupt, mbxCommand <cmd>
12503 * mbxStatus <status>
12505 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12507 "(%d):0304 Stray Mailbox "
12508 "Interrupt mbxCommand x%x "
12510 (vport ? vport->vpi : 0),
12513 /* clear mailbox attention bit */
12514 work_ha_copy &= ~HA_MBATT;
12516 phba->sli.mbox_active = NULL;
12517 spin_unlock_irqrestore(&phba->hbalock, iflag);
12518 phba->last_completion_time = jiffies;
12519 del_timer(&phba->sli.mbox_tmo);
12520 if (pmb->mbox_cmpl) {
12521 lpfc_sli_pcimem_bcopy(mbox, pmbox,
12523 if (pmb->out_ext_byte_len &&
12525 lpfc_sli_pcimem_bcopy(
12528 pmb->out_ext_byte_len);
12530 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12531 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12533 lpfc_debugfs_disc_trc(vport,
12534 LPFC_DISC_TRC_MBOX_VPORT,
12535 "MBOX dflt rpi: : "
12536 "status:x%x rpi:x%x",
12537 (uint32_t)pmbox->mbxStatus,
12538 pmbox->un.varWords[0], 0);
12540 if (!pmbox->mbxStatus) {
12541 mp = (struct lpfc_dmabuf *)
12543 ndlp = (struct lpfc_nodelist *)
12546 /* Reg_LOGIN of dflt RPI was
12547 * successful. new lets get
12548 * rid of the RPI using the
12549 * same mbox buffer.
12551 lpfc_unreg_login(phba,
12553 pmbox->un.varWords[0],
12556 lpfc_mbx_cmpl_dflt_rpi;
12558 pmb->ctx_ndlp = ndlp;
12559 pmb->vport = vport;
12560 rc = lpfc_sli_issue_mbox(phba,
12563 if (rc != MBX_BUSY)
12564 lpfc_printf_log(phba,
12566 LOG_MBOX | LOG_SLI,
12567 "0350 rc should have"
12568 "been MBX_BUSY\n");
12569 if (rc != MBX_NOT_FINISHED)
12570 goto send_current_mbox;
12574 &phba->pport->work_port_lock,
12576 phba->pport->work_port_events &=
12578 spin_unlock_irqrestore(
12579 &phba->pport->work_port_lock,
12581 lpfc_mbox_cmpl_put(phba, pmb);
12584 spin_unlock_irqrestore(&phba->hbalock, iflag);
12586 if ((work_ha_copy & HA_MBATT) &&
12587 (phba->sli.mbox_active == NULL)) {
12589 /* Process next mailbox command if there is one */
12591 rc = lpfc_sli_issue_mbox(phba, NULL,
12593 } while (rc == MBX_NOT_FINISHED);
12594 if (rc != MBX_SUCCESS)
12595 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12596 LOG_SLI, "0349 rc should be "
12600 spin_lock_irqsave(&phba->hbalock, iflag);
12601 phba->work_ha |= work_ha_copy;
12602 spin_unlock_irqrestore(&phba->hbalock, iflag);
12603 lpfc_worker_wake_up(phba);
12605 return IRQ_HANDLED;
12607 spin_unlock_irqrestore(&phba->hbalock, iflag);
12608 return IRQ_HANDLED;
12610 } /* lpfc_sli_sp_intr_handler */
12613 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
12614 * @irq: Interrupt number.
12615 * @dev_id: The device context pointer.
12617 * This function is directly called from the PCI layer as an interrupt
12618 * service routine when device with SLI-3 interface spec is enabled with
12619 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
12620 * ring event in the HBA. However, when the device is enabled with either
12621 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
12622 * device-level interrupt handler. When the PCI slot is in error recovery
12623 * or the HBA is undergoing initialization, the interrupt handler will not
12624 * process the interrupt. The SCSI FCP fast-path ring event are handled in
12625 * the intrrupt context. This function is called without any lock held.
12626 * It gets the hbalock to access and update SLI data structures.
12628 * This function returns IRQ_HANDLED when interrupt is handled else it
12629 * returns IRQ_NONE.
12632 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
12634 struct lpfc_hba *phba;
12636 unsigned long status;
12637 unsigned long iflag;
12638 struct lpfc_sli_ring *pring;
12640 /* Get the driver's phba structure from the dev_id and
12641 * assume the HBA is not interrupting.
12643 phba = (struct lpfc_hba *) dev_id;
12645 if (unlikely(!phba))
12649 * Stuff needs to be attented to when this function is invoked as an
12650 * individual interrupt handler in MSI-X multi-message interrupt mode
12652 if (phba->intr_type == MSIX) {
12653 /* Check device state for handling interrupt */
12654 if (lpfc_intr_state_check(phba))
12656 /* Need to read HA REG for FCP ring and other ring events */
12657 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12658 return IRQ_HANDLED;
12659 /* Clear up only attention source related to fast-path */
12660 spin_lock_irqsave(&phba->hbalock, iflag);
12662 * If there is deferred error attention, do not check for
12665 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12666 spin_unlock_irqrestore(&phba->hbalock, iflag);
12669 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
12671 readl(phba->HAregaddr); /* flush */
12672 spin_unlock_irqrestore(&phba->hbalock, iflag);
12674 ha_copy = phba->ha_copy;
12677 * Process all events on FCP ring. Take the optimized path for FCP IO.
12679 ha_copy &= ~(phba->work_ha_mask);
12681 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12682 status >>= (4*LPFC_FCP_RING);
12683 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12684 if (status & HA_RXMASK)
12685 lpfc_sli_handle_fast_ring_event(phba, pring, status);
12687 if (phba->cfg_multi_ring_support == 2) {
12689 * Process all events on extra ring. Take the optimized path
12690 * for extra ring IO.
12692 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12693 status >>= (4*LPFC_EXTRA_RING);
12694 if (status & HA_RXMASK) {
12695 lpfc_sli_handle_fast_ring_event(phba,
12696 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
12700 return IRQ_HANDLED;
12701 } /* lpfc_sli_fp_intr_handler */
12704 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
12705 * @irq: Interrupt number.
12706 * @dev_id: The device context pointer.
12708 * This function is the HBA device-level interrupt handler to device with
12709 * SLI-3 interface spec, called from the PCI layer when either MSI or
12710 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
12711 * requires driver attention. This function invokes the slow-path interrupt
12712 * attention handling function and fast-path interrupt attention handling
12713 * function in turn to process the relevant HBA attention events. This
12714 * function is called without any lock held. It gets the hbalock to access
12715 * and update SLI data structures.
12717 * This function returns IRQ_HANDLED when interrupt is handled, else it
12718 * returns IRQ_NONE.
12721 lpfc_sli_intr_handler(int irq, void *dev_id)
12723 struct lpfc_hba *phba;
12724 irqreturn_t sp_irq_rc, fp_irq_rc;
12725 unsigned long status1, status2;
12729 * Get the driver's phba structure from the dev_id and
12730 * assume the HBA is not interrupting.
12732 phba = (struct lpfc_hba *) dev_id;
12734 if (unlikely(!phba))
12737 /* Check device state for handling interrupt */
12738 if (lpfc_intr_state_check(phba))
12741 spin_lock(&phba->hbalock);
12742 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
12743 spin_unlock(&phba->hbalock);
12744 return IRQ_HANDLED;
12747 if (unlikely(!phba->ha_copy)) {
12748 spin_unlock(&phba->hbalock);
12750 } else if (phba->ha_copy & HA_ERATT) {
12751 if (phba->hba_flag & HBA_ERATT_HANDLED)
12752 /* ERATT polling has handled ERATT */
12753 phba->ha_copy &= ~HA_ERATT;
12755 /* Indicate interrupt handler handles ERATT */
12756 phba->hba_flag |= HBA_ERATT_HANDLED;
12760 * If there is deferred error attention, do not check for any interrupt.
12762 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12763 spin_unlock(&phba->hbalock);
12767 /* Clear attention sources except link and error attentions */
12768 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
12769 spin_unlock(&phba->hbalock);
12770 return IRQ_HANDLED;
12772 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
12773 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
12775 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
12776 writel(hc_copy, phba->HCregaddr);
12777 readl(phba->HAregaddr); /* flush */
12778 spin_unlock(&phba->hbalock);
12781 * Invokes slow-path host attention interrupt handling as appropriate.
12784 /* status of events with mailbox and link attention */
12785 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
12787 /* status of events with ELS ring */
12788 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
12789 status2 >>= (4*LPFC_ELS_RING);
12791 if (status1 || (status2 & HA_RXMASK))
12792 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
12794 sp_irq_rc = IRQ_NONE;
12797 * Invoke fast-path host attention interrupt handling as appropriate.
12800 /* status of events with FCP ring */
12801 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12802 status1 >>= (4*LPFC_FCP_RING);
12804 /* status of events with extra ring */
12805 if (phba->cfg_multi_ring_support == 2) {
12806 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12807 status2 >>= (4*LPFC_EXTRA_RING);
12811 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
12812 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
12814 fp_irq_rc = IRQ_NONE;
12816 /* Return device-level interrupt handling status */
12817 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
12818 } /* lpfc_sli_intr_handler */
12821 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
12822 * @phba: pointer to lpfc hba data structure.
12824 * This routine is invoked by the worker thread to process all the pending
12825 * SLI4 els abort xri events.
12827 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
12829 struct lpfc_cq_event *cq_event;
12831 /* First, declare the els xri abort event has been handled */
12832 spin_lock_irq(&phba->hbalock);
12833 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
12834 spin_unlock_irq(&phba->hbalock);
12835 /* Now, handle all the els xri abort events */
12836 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
12837 /* Get the first event from the head of the event queue */
12838 spin_lock_irq(&phba->hbalock);
12839 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
12840 cq_event, struct lpfc_cq_event, list);
12841 spin_unlock_irq(&phba->hbalock);
12842 /* Notify aborted XRI for ELS work queue */
12843 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12844 /* Free the event processed back to the free pool */
12845 lpfc_sli4_cq_event_release(phba, cq_event);
12850 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
12851 * @phba: pointer to lpfc hba data structure
12852 * @pIocbIn: pointer to the rspiocbq
12853 * @pIocbOut: pointer to the cmdiocbq
12854 * @wcqe: pointer to the complete wcqe
12856 * This routine transfers the fields of a command iocbq to a response iocbq
12857 * by copying all the IOCB fields from command iocbq and transferring the
12858 * completion status information from the complete wcqe.
12861 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
12862 struct lpfc_iocbq *pIocbIn,
12863 struct lpfc_iocbq *pIocbOut,
12864 struct lpfc_wcqe_complete *wcqe)
12867 unsigned long iflags;
12868 uint32_t status, max_response;
12869 struct lpfc_dmabuf *dmabuf;
12870 struct ulp_bde64 *bpl, bde;
12871 size_t offset = offsetof(struct lpfc_iocbq, iocb);
12873 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
12874 sizeof(struct lpfc_iocbq) - offset);
12875 /* Map WCQE parameters into irspiocb parameters */
12876 status = bf_get(lpfc_wcqe_c_status, wcqe);
12877 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
12878 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
12879 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
12880 pIocbIn->iocb.un.fcpi.fcpi_parm =
12881 pIocbOut->iocb.un.fcpi.fcpi_parm -
12882 wcqe->total_data_placed;
12884 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
12886 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
12887 switch (pIocbOut->iocb.ulpCommand) {
12888 case CMD_ELS_REQUEST64_CR:
12889 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12890 bpl = (struct ulp_bde64 *)dmabuf->virt;
12891 bde.tus.w = le32_to_cpu(bpl[1].tus.w);
12892 max_response = bde.tus.f.bdeSize;
12894 case CMD_GEN_REQUEST64_CR:
12896 if (!pIocbOut->context3)
12898 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
12899 sizeof(struct ulp_bde64);
12900 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12901 bpl = (struct ulp_bde64 *)dmabuf->virt;
12902 for (i = 0; i < numBdes; i++) {
12903 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
12904 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
12905 max_response += bde.tus.f.bdeSize;
12909 max_response = wcqe->total_data_placed;
12912 if (max_response < wcqe->total_data_placed)
12913 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
12915 pIocbIn->iocb.un.genreq64.bdl.bdeSize =
12916 wcqe->total_data_placed;
12919 /* Convert BG errors for completion status */
12920 if (status == CQE_STATUS_DI_ERROR) {
12921 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
12923 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
12924 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
12926 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
12928 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
12929 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
12930 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12931 BGS_GUARD_ERR_MASK;
12932 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
12933 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12934 BGS_APPTAG_ERR_MASK;
12935 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
12936 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12937 BGS_REFTAG_ERR_MASK;
12939 /* Check to see if there was any good data before the error */
12940 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
12941 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12942 BGS_HI_WATER_MARK_PRESENT_MASK;
12943 pIocbIn->iocb.unsli3.sli3_bg.bghm =
12944 wcqe->total_data_placed;
12948 * Set ALL the error bits to indicate we don't know what
12949 * type of error it is.
12951 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
12952 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12953 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
12954 BGS_GUARD_ERR_MASK);
12957 /* Pick up HBA exchange busy condition */
12958 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
12959 spin_lock_irqsave(&phba->hbalock, iflags);
12960 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
12961 spin_unlock_irqrestore(&phba->hbalock, iflags);
12966 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
12967 * @phba: Pointer to HBA context object.
12968 * @wcqe: Pointer to work-queue completion queue entry.
12970 * This routine handles an ELS work-queue completion event and construct
12971 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
12972 * discovery engine to handle.
12974 * Return: Pointer to the receive IOCBQ, NULL otherwise.
12976 static struct lpfc_iocbq *
12977 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
12978 struct lpfc_iocbq *irspiocbq)
12980 struct lpfc_sli_ring *pring;
12981 struct lpfc_iocbq *cmdiocbq;
12982 struct lpfc_wcqe_complete *wcqe;
12983 unsigned long iflags;
12985 pring = lpfc_phba_elsring(phba);
12986 if (unlikely(!pring))
12989 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
12990 pring->stats.iocb_event++;
12991 /* Look up the ELS command IOCB and create pseudo response IOCB */
12992 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
12993 bf_get(lpfc_wcqe_c_request_tag, wcqe));
12994 if (unlikely(!cmdiocbq)) {
12995 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12996 "0386 ELS complete with no corresponding "
12997 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
12998 wcqe->word0, wcqe->total_data_placed,
12999 wcqe->parameter, wcqe->word3);
13000 lpfc_sli_release_iocbq(phba, irspiocbq);
13004 spin_lock_irqsave(&pring->ring_lock, iflags);
13005 /* Put the iocb back on the txcmplq */
13006 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
13007 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13009 /* Fake the irspiocbq and copy necessary response information */
13010 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
13015 inline struct lpfc_cq_event *
13016 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
13018 struct lpfc_cq_event *cq_event;
13020 /* Allocate a new internal CQ_EVENT entry */
13021 cq_event = lpfc_sli4_cq_event_alloc(phba);
13023 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13024 "0602 Failed to alloc CQ_EVENT entry\n");
13028 /* Move the CQE into the event */
13029 memcpy(&cq_event->cqe, entry, size);
13034 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
13035 * @phba: Pointer to HBA context object.
13036 * @cqe: Pointer to mailbox completion queue entry.
13038 * This routine process a mailbox completion queue entry with asynchrous
13041 * Return: true if work posted to worker thread, otherwise false.
13044 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13046 struct lpfc_cq_event *cq_event;
13047 unsigned long iflags;
13049 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13050 "0392 Async Event: word0:x%x, word1:x%x, "
13051 "word2:x%x, word3:x%x\n", mcqe->word0,
13052 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
13054 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
13057 spin_lock_irqsave(&phba->hbalock, iflags);
13058 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
13059 /* Set the async event flag */
13060 phba->hba_flag |= ASYNC_EVENT;
13061 spin_unlock_irqrestore(&phba->hbalock, iflags);
13067 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
13068 * @phba: Pointer to HBA context object.
13069 * @cqe: Pointer to mailbox completion queue entry.
13071 * This routine process a mailbox completion queue entry with mailbox
13072 * completion event.
13074 * Return: true if work posted to worker thread, otherwise false.
13077 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13079 uint32_t mcqe_status;
13080 MAILBOX_t *mbox, *pmbox;
13081 struct lpfc_mqe *mqe;
13082 struct lpfc_vport *vport;
13083 struct lpfc_nodelist *ndlp;
13084 struct lpfc_dmabuf *mp;
13085 unsigned long iflags;
13087 bool workposted = false;
13090 /* If not a mailbox complete MCQE, out by checking mailbox consume */
13091 if (!bf_get(lpfc_trailer_completed, mcqe))
13092 goto out_no_mqe_complete;
13094 /* Get the reference to the active mbox command */
13095 spin_lock_irqsave(&phba->hbalock, iflags);
13096 pmb = phba->sli.mbox_active;
13097 if (unlikely(!pmb)) {
13098 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
13099 "1832 No pending MBOX command to handle\n");
13100 spin_unlock_irqrestore(&phba->hbalock, iflags);
13101 goto out_no_mqe_complete;
13103 spin_unlock_irqrestore(&phba->hbalock, iflags);
13105 pmbox = (MAILBOX_t *)&pmb->u.mqe;
13107 vport = pmb->vport;
13109 /* Reset heartbeat timer */
13110 phba->last_completion_time = jiffies;
13111 del_timer(&phba->sli.mbox_tmo);
13113 /* Move mbox data to caller's mailbox region, do endian swapping */
13114 if (pmb->mbox_cmpl && mbox)
13115 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
13118 * For mcqe errors, conditionally move a modified error code to
13119 * the mbox so that the error will not be missed.
13121 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
13122 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
13123 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
13124 bf_set(lpfc_mqe_status, mqe,
13125 (LPFC_MBX_ERROR_RANGE | mcqe_status));
13127 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13128 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13129 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
13130 "MBOX dflt rpi: status:x%x rpi:x%x",
13132 pmbox->un.varWords[0], 0);
13133 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
13134 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
13135 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
13136 /* Reg_LOGIN of dflt RPI was successful. Now lets get
13137 * RID of the PPI using the same mbox buffer.
13139 lpfc_unreg_login(phba, vport->vpi,
13140 pmbox->un.varWords[0], pmb);
13141 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
13143 pmb->ctx_ndlp = ndlp;
13144 pmb->vport = vport;
13145 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
13146 if (rc != MBX_BUSY)
13147 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
13148 LOG_SLI, "0385 rc should "
13149 "have been MBX_BUSY\n");
13150 if (rc != MBX_NOT_FINISHED)
13151 goto send_current_mbox;
13154 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
13155 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
13156 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
13158 /* There is mailbox completion work to do */
13159 spin_lock_irqsave(&phba->hbalock, iflags);
13160 __lpfc_mbox_cmpl_put(phba, pmb);
13161 phba->work_ha |= HA_MBATT;
13162 spin_unlock_irqrestore(&phba->hbalock, iflags);
13166 spin_lock_irqsave(&phba->hbalock, iflags);
13167 /* Release the mailbox command posting token */
13168 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13169 /* Setting active mailbox pointer need to be in sync to flag clear */
13170 phba->sli.mbox_active = NULL;
13171 if (bf_get(lpfc_trailer_consumed, mcqe))
13172 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13173 spin_unlock_irqrestore(&phba->hbalock, iflags);
13174 /* Wake up worker thread to post the next pending mailbox command */
13175 lpfc_worker_wake_up(phba);
13178 out_no_mqe_complete:
13179 spin_lock_irqsave(&phba->hbalock, iflags);
13180 if (bf_get(lpfc_trailer_consumed, mcqe))
13181 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13182 spin_unlock_irqrestore(&phba->hbalock, iflags);
13187 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
13188 * @phba: Pointer to HBA context object.
13189 * @cqe: Pointer to mailbox completion queue entry.
13191 * This routine process a mailbox completion queue entry, it invokes the
13192 * proper mailbox complete handling or asynchrous event handling routine
13193 * according to the MCQE's async bit.
13195 * Return: true if work posted to worker thread, otherwise false.
13198 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13199 struct lpfc_cqe *cqe)
13201 struct lpfc_mcqe mcqe;
13206 /* Copy the mailbox MCQE and convert endian order as needed */
13207 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
13209 /* Invoke the proper event handling routine */
13210 if (!bf_get(lpfc_trailer_async, &mcqe))
13211 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
13213 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
13218 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
13219 * @phba: Pointer to HBA context object.
13220 * @cq: Pointer to associated CQ
13221 * @wcqe: Pointer to work-queue completion queue entry.
13223 * This routine handles an ELS work-queue completion event.
13225 * Return: true if work posted to worker thread, otherwise false.
13228 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13229 struct lpfc_wcqe_complete *wcqe)
13231 struct lpfc_iocbq *irspiocbq;
13232 unsigned long iflags;
13233 struct lpfc_sli_ring *pring = cq->pring;
13235 int txcmplq_cnt = 0;
13236 int fcp_txcmplq_cnt = 0;
13238 /* Check for response status */
13239 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13240 /* Log the error status */
13241 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13242 "0357 ELS CQE error: status=x%x: "
13243 "CQE: %08x %08x %08x %08x\n",
13244 bf_get(lpfc_wcqe_c_status, wcqe),
13245 wcqe->word0, wcqe->total_data_placed,
13246 wcqe->parameter, wcqe->word3);
13249 /* Get an irspiocbq for later ELS response processing use */
13250 irspiocbq = lpfc_sli_get_iocbq(phba);
13252 if (!list_empty(&pring->txq))
13254 if (!list_empty(&pring->txcmplq))
13256 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13257 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
13258 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
13259 txq_cnt, phba->iocb_cnt,
13265 /* Save off the slow-path queue event for work thread to process */
13266 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
13267 spin_lock_irqsave(&phba->hbalock, iflags);
13268 list_add_tail(&irspiocbq->cq_event.list,
13269 &phba->sli4_hba.sp_queue_event);
13270 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13271 spin_unlock_irqrestore(&phba->hbalock, iflags);
13277 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
13278 * @phba: Pointer to HBA context object.
13279 * @wcqe: Pointer to work-queue completion queue entry.
13281 * This routine handles slow-path WQ entry consumed event by invoking the
13282 * proper WQ release routine to the slow-path WQ.
13285 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
13286 struct lpfc_wcqe_release *wcqe)
13288 /* sanity check on queue memory */
13289 if (unlikely(!phba->sli4_hba.els_wq))
13291 /* Check for the slow-path ELS work queue */
13292 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
13293 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
13294 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13296 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13297 "2579 Slow-path wqe consume event carries "
13298 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
13299 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
13300 phba->sli4_hba.els_wq->queue_id);
13304 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
13305 * @phba: Pointer to HBA context object.
13306 * @cq: Pointer to a WQ completion queue.
13307 * @wcqe: Pointer to work-queue completion queue entry.
13309 * This routine handles an XRI abort event.
13311 * Return: true if work posted to worker thread, otherwise false.
13314 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
13315 struct lpfc_queue *cq,
13316 struct sli4_wcqe_xri_aborted *wcqe)
13318 bool workposted = false;
13319 struct lpfc_cq_event *cq_event;
13320 unsigned long iflags;
13322 switch (cq->subtype) {
13324 lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
13325 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13326 /* Notify aborted XRI for NVME work queue */
13327 if (phba->nvmet_support)
13328 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
13330 workposted = false;
13332 case LPFC_NVME_LS: /* NVME LS uses ELS resources */
13334 cq_event = lpfc_cq_event_setup(
13335 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
13338 cq_event->hdwq = cq->hdwq;
13339 spin_lock_irqsave(&phba->hbalock, iflags);
13340 list_add_tail(&cq_event->list,
13341 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
13342 /* Set the els xri abort event flag */
13343 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
13344 spin_unlock_irqrestore(&phba->hbalock, iflags);
13348 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13349 "0603 Invalid CQ subtype %d: "
13350 "%08x %08x %08x %08x\n",
13351 cq->subtype, wcqe->word0, wcqe->parameter,
13352 wcqe->word2, wcqe->word3);
13353 workposted = false;
13359 #define FC_RCTL_MDS_DIAGS 0xF4
13362 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
13363 * @phba: Pointer to HBA context object.
13364 * @rcqe: Pointer to receive-queue completion queue entry.
13366 * This routine process a receive-queue completion queue entry.
13368 * Return: true if work posted to worker thread, otherwise false.
13371 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
13373 bool workposted = false;
13374 struct fc_frame_header *fc_hdr;
13375 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
13376 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
13377 struct lpfc_nvmet_tgtport *tgtp;
13378 struct hbq_dmabuf *dma_buf;
13379 uint32_t status, rq_id;
13380 unsigned long iflags;
13382 /* sanity check on queue memory */
13383 if (unlikely(!hrq) || unlikely(!drq))
13386 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13387 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13389 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13390 if (rq_id != hrq->queue_id)
13393 status = bf_get(lpfc_rcqe_status, rcqe);
13395 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13396 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13397 "2537 Receive Frame Truncated!!\n");
13399 case FC_STATUS_RQ_SUCCESS:
13400 spin_lock_irqsave(&phba->hbalock, iflags);
13401 lpfc_sli4_rq_release(hrq, drq);
13402 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
13404 hrq->RQ_no_buf_found++;
13405 spin_unlock_irqrestore(&phba->hbalock, iflags);
13409 hrq->RQ_buf_posted--;
13410 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
13412 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13414 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
13415 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
13416 spin_unlock_irqrestore(&phba->hbalock, iflags);
13417 /* Handle MDS Loopback frames */
13418 lpfc_sli4_handle_mds_loopback(phba->pport, dma_buf);
13422 /* save off the frame for the work thread to process */
13423 list_add_tail(&dma_buf->cq_event.list,
13424 &phba->sli4_hba.sp_queue_event);
13425 /* Frame received */
13426 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13427 spin_unlock_irqrestore(&phba->hbalock, iflags);
13430 case FC_STATUS_INSUFF_BUF_FRM_DISC:
13431 if (phba->nvmet_support) {
13432 tgtp = phba->targetport->private;
13433 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13434 "6402 RQE Error x%x, posted %d err_cnt "
13436 status, hrq->RQ_buf_posted,
13437 hrq->RQ_no_posted_buf,
13438 atomic_read(&tgtp->rcv_fcp_cmd_in),
13439 atomic_read(&tgtp->rcv_fcp_cmd_out),
13440 atomic_read(&tgtp->xmt_fcp_release));
13444 case FC_STATUS_INSUFF_BUF_NEED_BUF:
13445 hrq->RQ_no_posted_buf++;
13446 /* Post more buffers if possible */
13447 spin_lock_irqsave(&phba->hbalock, iflags);
13448 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
13449 spin_unlock_irqrestore(&phba->hbalock, iflags);
13458 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
13459 * @phba: Pointer to HBA context object.
13460 * @cq: Pointer to the completion queue.
13461 * @cqe: Pointer to a completion queue entry.
13463 * This routine process a slow-path work-queue or receive queue completion queue
13466 * Return: true if work posted to worker thread, otherwise false.
13469 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13470 struct lpfc_cqe *cqe)
13472 struct lpfc_cqe cqevt;
13473 bool workposted = false;
13475 /* Copy the work queue CQE and convert endian order if needed */
13476 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
13478 /* Check and process for different type of WCQE and dispatch */
13479 switch (bf_get(lpfc_cqe_code, &cqevt)) {
13480 case CQE_CODE_COMPL_WQE:
13481 /* Process the WQ/RQ complete event */
13482 phba->last_completion_time = jiffies;
13483 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
13484 (struct lpfc_wcqe_complete *)&cqevt);
13486 case CQE_CODE_RELEASE_WQE:
13487 /* Process the WQ release event */
13488 lpfc_sli4_sp_handle_rel_wcqe(phba,
13489 (struct lpfc_wcqe_release *)&cqevt);
13491 case CQE_CODE_XRI_ABORTED:
13492 /* Process the WQ XRI abort event */
13493 phba->last_completion_time = jiffies;
13494 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
13495 (struct sli4_wcqe_xri_aborted *)&cqevt);
13497 case CQE_CODE_RECEIVE:
13498 case CQE_CODE_RECEIVE_V1:
13499 /* Process the RQ event */
13500 phba->last_completion_time = jiffies;
13501 workposted = lpfc_sli4_sp_handle_rcqe(phba,
13502 (struct lpfc_rcqe *)&cqevt);
13505 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13506 "0388 Not a valid WCQE code: x%x\n",
13507 bf_get(lpfc_cqe_code, &cqevt));
13514 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
13515 * @phba: Pointer to HBA context object.
13516 * @eqe: Pointer to fast-path event queue entry.
13518 * This routine process a event queue entry from the slow-path event queue.
13519 * It will check the MajorCode and MinorCode to determine this is for a
13520 * completion event on a completion queue, if not, an error shall be logged
13521 * and just return. Otherwise, it will get to the corresponding completion
13522 * queue and process all the entries on that completion queue, rearm the
13523 * completion queue, and then return.
13527 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13528 struct lpfc_queue *speq)
13530 struct lpfc_queue *cq = NULL, *childq;
13533 /* Get the reference to the corresponding CQ */
13534 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13536 list_for_each_entry(childq, &speq->child_list, list) {
13537 if (childq->queue_id == cqid) {
13542 if (unlikely(!cq)) {
13543 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13544 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13545 "0365 Slow-path CQ identifier "
13546 "(%d) does not exist\n", cqid);
13550 /* Save EQ associated with this CQ */
13551 cq->assoc_qp = speq;
13553 if (!queue_work_on(cq->chann, phba->wq, &cq->spwork))
13554 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13555 "0390 Cannot schedule soft IRQ "
13556 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13557 cqid, cq->queue_id, raw_smp_processor_id());
13561 * __lpfc_sli4_process_cq - Process elements of a CQ
13562 * @phba: Pointer to HBA context object.
13563 * @cq: Pointer to CQ to be processed
13564 * @handler: Routine to process each cqe
13565 * @delay: Pointer to usdelay to set in case of rescheduling of the handler
13567 * This routine processes completion queue entries in a CQ. While a valid
13568 * queue element is found, the handler is called. During processing checks
13569 * are made for periodic doorbell writes to let the hardware know of
13570 * element consumption.
13572 * If the max limit on cqes to process is hit, or there are no more valid
13573 * entries, the loop stops. If we processed a sufficient number of elements,
13574 * meaning there is sufficient load, rather than rearming and generating
13575 * another interrupt, a cq rescheduling delay will be set. A delay of 0
13576 * indicates no rescheduling.
13578 * Returns True if work scheduled, False otherwise.
13581 __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
13582 bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
13583 struct lpfc_cqe *), unsigned long *delay)
13585 struct lpfc_cqe *cqe;
13586 bool workposted = false;
13587 int count = 0, consumed = 0;
13590 /* default - no reschedule */
13593 if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
13594 goto rearm_and_exit;
13596 /* Process all the entries to the CQ */
13598 cqe = lpfc_sli4_cq_get(cq);
13600 workposted |= handler(phba, cq, cqe);
13601 __lpfc_sli4_consume_cqe(phba, cq, cqe);
13604 if (!(++count % cq->max_proc_limit))
13607 if (!(count % cq->notify_interval)) {
13608 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13613 if (count == LPFC_NVMET_CQ_NOTIFY)
13614 cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
13616 cqe = lpfc_sli4_cq_get(cq);
13618 if (count >= phba->cfg_cq_poll_threshold) {
13623 /* Track the max number of CQEs processed in 1 EQ */
13624 if (count > cq->CQ_max_cqe)
13625 cq->CQ_max_cqe = count;
13627 cq->assoc_qp->EQ_cqe_cnt += count;
13629 /* Catch the no cq entry condition */
13630 if (unlikely(count == 0))
13631 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13632 "0369 No entry from completion queue "
13633 "qid=%d\n", cq->queue_id);
13635 cq->queue_claimed = 0;
13638 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13639 arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
13645 * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
13646 * @cq: pointer to CQ to process
13648 * This routine calls the cq processing routine with a handler specific
13649 * to the type of queue bound to it.
13651 * The CQ routine returns two values: the first is the calling status,
13652 * which indicates whether work was queued to the background discovery
13653 * thread. If true, the routine should wakeup the discovery thread;
13654 * the second is the delay parameter. If non-zero, rather than rearming
13655 * the CQ and yet another interrupt, the CQ handler should be queued so
13656 * that it is processed in a subsequent polling action. The value of
13657 * the delay indicates when to reschedule it.
13660 __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
13662 struct lpfc_hba *phba = cq->phba;
13663 unsigned long delay;
13664 bool workposted = false;
13666 /* Process and rearm the CQ */
13667 switch (cq->type) {
13669 workposted |= __lpfc_sli4_process_cq(phba, cq,
13670 lpfc_sli4_sp_handle_mcqe,
13674 if (cq->subtype == LPFC_IO)
13675 workposted |= __lpfc_sli4_process_cq(phba, cq,
13676 lpfc_sli4_fp_handle_cqe,
13679 workposted |= __lpfc_sli4_process_cq(phba, cq,
13680 lpfc_sli4_sp_handle_cqe,
13684 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13685 "0370 Invalid completion queue type (%d)\n",
13691 if (!queue_delayed_work_on(cq->chann, phba->wq,
13692 &cq->sched_spwork, delay))
13693 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13694 "0394 Cannot schedule soft IRQ "
13695 "for cqid=%d on CPU %d\n",
13696 cq->queue_id, cq->chann);
13699 /* wake up worker thread if there are works to be done */
13701 lpfc_worker_wake_up(phba);
13705 * lpfc_sli4_sp_process_cq - slow-path work handler when started by
13707 * @work: pointer to work element
13709 * translates from the work handler and calls the slow-path handler.
13712 lpfc_sli4_sp_process_cq(struct work_struct *work)
13714 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
13716 __lpfc_sli4_sp_process_cq(cq);
13720 * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
13721 * @work: pointer to work element
13723 * translates from the work handler and calls the slow-path handler.
13726 lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
13728 struct lpfc_queue *cq = container_of(to_delayed_work(work),
13729 struct lpfc_queue, sched_spwork);
13731 __lpfc_sli4_sp_process_cq(cq);
13735 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
13736 * @phba: Pointer to HBA context object.
13737 * @cq: Pointer to associated CQ
13738 * @wcqe: Pointer to work-queue completion queue entry.
13740 * This routine process a fast-path work queue completion entry from fast-path
13741 * event queue for FCP command response completion.
13744 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13745 struct lpfc_wcqe_complete *wcqe)
13747 struct lpfc_sli_ring *pring = cq->pring;
13748 struct lpfc_iocbq *cmdiocbq;
13749 struct lpfc_iocbq irspiocbq;
13750 unsigned long iflags;
13752 /* Check for response status */
13753 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13754 /* If resource errors reported from HBA, reduce queue
13755 * depth of the SCSI device.
13757 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
13758 IOSTAT_LOCAL_REJECT)) &&
13759 ((wcqe->parameter & IOERR_PARAM_MASK) ==
13760 IOERR_NO_RESOURCES))
13761 phba->lpfc_rampdown_queue_depth(phba);
13763 /* Log the error status */
13764 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13765 "0373 FCP CQE error: status=x%x: "
13766 "CQE: %08x %08x %08x %08x\n",
13767 bf_get(lpfc_wcqe_c_status, wcqe),
13768 wcqe->word0, wcqe->total_data_placed,
13769 wcqe->parameter, wcqe->word3);
13772 /* Look up the FCP command IOCB and create pseudo response IOCB */
13773 spin_lock_irqsave(&pring->ring_lock, iflags);
13774 pring->stats.iocb_event++;
13775 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13776 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13777 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13778 if (unlikely(!cmdiocbq)) {
13779 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13780 "0374 FCP complete with no corresponding "
13781 "cmdiocb: iotag (%d)\n",
13782 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13785 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13786 cmdiocbq->isr_timestamp = cq->isr_timestamp;
13788 if (cmdiocbq->iocb_cmpl == NULL) {
13789 if (cmdiocbq->wqe_cmpl) {
13790 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13791 spin_lock_irqsave(&phba->hbalock, iflags);
13792 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13793 spin_unlock_irqrestore(&phba->hbalock, iflags);
13796 /* Pass the cmd_iocb and the wcqe to the upper layer */
13797 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
13800 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13801 "0375 FCP cmdiocb not callback function "
13803 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13807 /* Fake the irspiocb and copy necessary response information */
13808 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
13810 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13811 spin_lock_irqsave(&phba->hbalock, iflags);
13812 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13813 spin_unlock_irqrestore(&phba->hbalock, iflags);
13816 /* Pass the cmd_iocb and the rsp state to the upper layer */
13817 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
13821 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
13822 * @phba: Pointer to HBA context object.
13823 * @cq: Pointer to completion queue.
13824 * @wcqe: Pointer to work-queue completion queue entry.
13826 * This routine handles an fast-path WQ entry consumed event by invoking the
13827 * proper WQ release routine to the slow-path WQ.
13830 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13831 struct lpfc_wcqe_release *wcqe)
13833 struct lpfc_queue *childwq;
13834 bool wqid_matched = false;
13837 /* Check for fast-path FCP work queue release */
13838 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
13839 list_for_each_entry(childwq, &cq->child_list, list) {
13840 if (childwq->queue_id == hba_wqid) {
13841 lpfc_sli4_wq_release(childwq,
13842 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13843 if (childwq->q_flag & HBA_NVMET_WQFULL)
13844 lpfc_nvmet_wqfull_process(phba, childwq);
13845 wqid_matched = true;
13849 /* Report warning log message if no match found */
13850 if (wqid_matched != true)
13851 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13852 "2580 Fast-path wqe consume event carries "
13853 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
13857 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
13858 * @phba: Pointer to HBA context object.
13859 * @rcqe: Pointer to receive-queue completion queue entry.
13861 * This routine process a receive-queue completion queue entry.
13863 * Return: true if work posted to worker thread, otherwise false.
13866 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13867 struct lpfc_rcqe *rcqe)
13869 bool workposted = false;
13870 struct lpfc_queue *hrq;
13871 struct lpfc_queue *drq;
13872 struct rqb_dmabuf *dma_buf;
13873 struct fc_frame_header *fc_hdr;
13874 struct lpfc_nvmet_tgtport *tgtp;
13875 uint32_t status, rq_id;
13876 unsigned long iflags;
13877 uint32_t fctl, idx;
13879 if ((phba->nvmet_support == 0) ||
13880 (phba->sli4_hba.nvmet_cqset == NULL))
13883 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
13884 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
13885 drq = phba->sli4_hba.nvmet_mrq_data[idx];
13887 /* sanity check on queue memory */
13888 if (unlikely(!hrq) || unlikely(!drq))
13891 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13892 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13894 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13896 if ((phba->nvmet_support == 0) ||
13897 (rq_id != hrq->queue_id))
13900 status = bf_get(lpfc_rcqe_status, rcqe);
13902 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13903 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13904 "6126 Receive Frame Truncated!!\n");
13906 case FC_STATUS_RQ_SUCCESS:
13907 spin_lock_irqsave(&phba->hbalock, iflags);
13908 lpfc_sli4_rq_release(hrq, drq);
13909 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
13911 hrq->RQ_no_buf_found++;
13912 spin_unlock_irqrestore(&phba->hbalock, iflags);
13915 spin_unlock_irqrestore(&phba->hbalock, iflags);
13917 hrq->RQ_buf_posted--;
13918 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13920 /* Just some basic sanity checks on FCP Command frame */
13921 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
13922 fc_hdr->fh_f_ctl[1] << 8 |
13923 fc_hdr->fh_f_ctl[2]);
13925 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
13926 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
13927 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
13930 if (fc_hdr->fh_type == FC_TYPE_FCP) {
13931 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
13932 lpfc_nvmet_unsol_fcp_event(
13933 phba, idx, dma_buf, cq->isr_timestamp,
13934 cq->q_flag & HBA_NVMET_CQ_NOTIFY);
13938 lpfc_rq_buf_free(phba, &dma_buf->hbuf);
13940 case FC_STATUS_INSUFF_BUF_FRM_DISC:
13941 if (phba->nvmet_support) {
13942 tgtp = phba->targetport->private;
13943 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13944 "6401 RQE Error x%x, posted %d err_cnt "
13946 status, hrq->RQ_buf_posted,
13947 hrq->RQ_no_posted_buf,
13948 atomic_read(&tgtp->rcv_fcp_cmd_in),
13949 atomic_read(&tgtp->rcv_fcp_cmd_out),
13950 atomic_read(&tgtp->xmt_fcp_release));
13954 case FC_STATUS_INSUFF_BUF_NEED_BUF:
13955 hrq->RQ_no_posted_buf++;
13956 /* Post more buffers if possible */
13964 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
13965 * @phba: adapter with cq
13966 * @cq: Pointer to the completion queue.
13967 * @eqe: Pointer to fast-path completion queue entry.
13969 * This routine process a fast-path work queue completion entry from fast-path
13970 * event queue for FCP command response completion.
13972 * Return: true if work posted to worker thread, otherwise false.
13975 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13976 struct lpfc_cqe *cqe)
13978 struct lpfc_wcqe_release wcqe;
13979 bool workposted = false;
13981 /* Copy the work queue CQE and convert endian order if needed */
13982 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
13984 /* Check and process for different type of WCQE and dispatch */
13985 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
13986 case CQE_CODE_COMPL_WQE:
13987 case CQE_CODE_NVME_ERSP:
13989 /* Process the WQ complete event */
13990 phba->last_completion_time = jiffies;
13991 if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
13992 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
13993 (struct lpfc_wcqe_complete *)&wcqe);
13995 case CQE_CODE_RELEASE_WQE:
13996 cq->CQ_release_wqe++;
13997 /* Process the WQ release event */
13998 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
13999 (struct lpfc_wcqe_release *)&wcqe);
14001 case CQE_CODE_XRI_ABORTED:
14002 cq->CQ_xri_aborted++;
14003 /* Process the WQ XRI abort event */
14004 phba->last_completion_time = jiffies;
14005 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14006 (struct sli4_wcqe_xri_aborted *)&wcqe);
14008 case CQE_CODE_RECEIVE_V1:
14009 case CQE_CODE_RECEIVE:
14010 phba->last_completion_time = jiffies;
14011 if (cq->subtype == LPFC_NVMET) {
14012 workposted = lpfc_sli4_nvmet_handle_rcqe(
14013 phba, cq, (struct lpfc_rcqe *)&wcqe);
14017 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14018 "0144 Not a valid CQE code: x%x\n",
14019 bf_get(lpfc_wcqe_c_code, &wcqe));
14026 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
14027 * @phba: Pointer to HBA context object.
14028 * @eqe: Pointer to fast-path event queue entry.
14030 * This routine process a event queue entry from the fast-path event queue.
14031 * It will check the MajorCode and MinorCode to determine this is for a
14032 * completion event on a completion queue, if not, an error shall be logged
14033 * and just return. Otherwise, it will get to the corresponding completion
14034 * queue and process all the entries on the completion queue, rearm the
14035 * completion queue, and then return.
14038 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
14039 struct lpfc_eqe *eqe)
14041 struct lpfc_queue *cq = NULL;
14042 uint32_t qidx = eq->hdwq;
14045 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
14046 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14047 "0366 Not a valid completion "
14048 "event: majorcode=x%x, minorcode=x%x\n",
14049 bf_get_le32(lpfc_eqe_major_code, eqe),
14050 bf_get_le32(lpfc_eqe_minor_code, eqe));
14054 /* Get the reference to the corresponding CQ */
14055 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14057 /* Use the fast lookup method first */
14058 if (cqid <= phba->sli4_hba.cq_max) {
14059 cq = phba->sli4_hba.cq_lookup[cqid];
14064 /* Next check for NVMET completion */
14065 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
14066 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
14067 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
14068 /* Process NVMET unsol rcv */
14069 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
14074 if (phba->sli4_hba.nvmels_cq &&
14075 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
14076 /* Process NVME unsol rcv */
14077 cq = phba->sli4_hba.nvmels_cq;
14080 /* Otherwise this is a Slow path event */
14082 lpfc_sli4_sp_handle_eqe(phba, eqe,
14083 phba->sli4_hba.hdwq[qidx].hba_eq);
14088 if (unlikely(cqid != cq->queue_id)) {
14089 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14090 "0368 Miss-matched fast-path completion "
14091 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
14092 cqid, cq->queue_id);
14097 #if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
14098 if (phba->ktime_on)
14099 cq->isr_timestamp = ktime_get_ns();
14101 cq->isr_timestamp = 0;
14103 if (!queue_work_on(cq->chann, phba->wq, &cq->irqwork))
14104 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14105 "0363 Cannot schedule soft IRQ "
14106 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14107 cqid, cq->queue_id, raw_smp_processor_id());
14111 * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
14112 * @cq: Pointer to CQ to be processed
14114 * This routine calls the cq processing routine with the handler for
14117 * The CQ routine returns two values: the first is the calling status,
14118 * which indicates whether work was queued to the background discovery
14119 * thread. If true, the routine should wakeup the discovery thread;
14120 * the second is the delay parameter. If non-zero, rather than rearming
14121 * the CQ and yet another interrupt, the CQ handler should be queued so
14122 * that it is processed in a subsequent polling action. The value of
14123 * the delay indicates when to reschedule it.
14126 __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq)
14128 struct lpfc_hba *phba = cq->phba;
14129 unsigned long delay;
14130 bool workposted = false;
14132 /* process and rearm the CQ */
14133 workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
14137 if (!queue_delayed_work_on(cq->chann, phba->wq,
14138 &cq->sched_irqwork, delay))
14139 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14140 "0367 Cannot schedule soft IRQ "
14141 "for cqid=%d on CPU %d\n",
14142 cq->queue_id, cq->chann);
14145 /* wake up worker thread if there are works to be done */
14147 lpfc_worker_wake_up(phba);
14151 * lpfc_sli4_hba_process_cq - fast-path work handler when started by
14153 * @work: pointer to work element
14155 * translates from the work handler and calls the fast-path handler.
14158 lpfc_sli4_hba_process_cq(struct work_struct *work)
14160 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
14162 __lpfc_sli4_hba_process_cq(cq);
14166 * lpfc_sli4_hba_process_cq - fast-path work handler when started by timer
14167 * @work: pointer to work element
14169 * translates from the work handler and calls the fast-path handler.
14172 lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
14174 struct lpfc_queue *cq = container_of(to_delayed_work(work),
14175 struct lpfc_queue, sched_irqwork);
14177 __lpfc_sli4_hba_process_cq(cq);
14181 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
14182 * @irq: Interrupt number.
14183 * @dev_id: The device context pointer.
14185 * This function is directly called from the PCI layer as an interrupt
14186 * service routine when device with SLI-4 interface spec is enabled with
14187 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
14188 * ring event in the HBA. However, when the device is enabled with either
14189 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
14190 * device-level interrupt handler. When the PCI slot is in error recovery
14191 * or the HBA is undergoing initialization, the interrupt handler will not
14192 * process the interrupt. The SCSI FCP fast-path ring event are handled in
14193 * the intrrupt context. This function is called without any lock held.
14194 * It gets the hbalock to access and update SLI data structures. Note that,
14195 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
14196 * equal to that of FCP CQ index.
14198 * The link attention and ELS ring attention events are handled
14199 * by the worker thread. The interrupt handler signals the worker thread
14200 * and returns for these events. This function is called without any lock
14201 * held. It gets the hbalock to access and update SLI data structures.
14203 * This function returns IRQ_HANDLED when interrupt is handled else it
14204 * returns IRQ_NONE.
14207 lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
14209 struct lpfc_hba *phba;
14210 struct lpfc_hba_eq_hdl *hba_eq_hdl;
14211 struct lpfc_queue *fpeq;
14212 unsigned long iflag;
14215 struct lpfc_eq_intr_info *eqi;
14218 /* Get the driver's phba structure from the dev_id */
14219 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
14220 phba = hba_eq_hdl->phba;
14221 hba_eqidx = hba_eq_hdl->idx;
14223 if (unlikely(!phba))
14225 if (unlikely(!phba->sli4_hba.hdwq))
14228 /* Get to the EQ struct associated with this vector */
14229 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
14230 if (unlikely(!fpeq))
14233 /* Check device state for handling interrupt */
14234 if (unlikely(lpfc_intr_state_check(phba))) {
14235 /* Check again for link_state with lock held */
14236 spin_lock_irqsave(&phba->hbalock, iflag);
14237 if (phba->link_state < LPFC_LINK_DOWN)
14238 /* Flush, clear interrupt, and rearm the EQ */
14239 lpfc_sli4_eq_flush(phba, fpeq);
14240 spin_unlock_irqrestore(&phba->hbalock, iflag);
14244 eqi = phba->sli4_hba.eq_info;
14245 icnt = this_cpu_inc_return(eqi->icnt);
14246 fpeq->last_cpu = raw_smp_processor_id();
14248 if (icnt > LPFC_EQD_ISR_TRIGGER &&
14249 phba->cfg_irq_chann == 1 &&
14250 phba->cfg_auto_imax &&
14251 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
14252 phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
14253 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
14255 /* process and rearm the EQ */
14256 ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
14258 if (unlikely(ecount == 0)) {
14259 fpeq->EQ_no_entry++;
14260 if (phba->intr_type == MSIX)
14261 /* MSI-X treated interrupt served as no EQ share INT */
14262 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14263 "0358 MSI-X interrupt with no EQE\n");
14265 /* Non MSI-X treated on interrupt as EQ share INT */
14269 return IRQ_HANDLED;
14270 } /* lpfc_sli4_fp_intr_handler */
14273 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
14274 * @irq: Interrupt number.
14275 * @dev_id: The device context pointer.
14277 * This function is the device-level interrupt handler to device with SLI-4
14278 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
14279 * interrupt mode is enabled and there is an event in the HBA which requires
14280 * driver attention. This function invokes the slow-path interrupt attention
14281 * handling function and fast-path interrupt attention handling function in
14282 * turn to process the relevant HBA attention events. This function is called
14283 * without any lock held. It gets the hbalock to access and update SLI data
14286 * This function returns IRQ_HANDLED when interrupt is handled, else it
14287 * returns IRQ_NONE.
14290 lpfc_sli4_intr_handler(int irq, void *dev_id)
14292 struct lpfc_hba *phba;
14293 irqreturn_t hba_irq_rc;
14294 bool hba_handled = false;
14297 /* Get the driver's phba structure from the dev_id */
14298 phba = (struct lpfc_hba *)dev_id;
14300 if (unlikely(!phba))
14304 * Invoke fast-path host attention interrupt handling as appropriate.
14306 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
14307 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
14308 &phba->sli4_hba.hba_eq_hdl[qidx]);
14309 if (hba_irq_rc == IRQ_HANDLED)
14310 hba_handled |= true;
14313 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
14314 } /* lpfc_sli4_intr_handler */
14316 void lpfc_sli4_poll_hbtimer(struct timer_list *t)
14318 struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
14319 struct lpfc_queue *eq;
14324 list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
14325 i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
14326 if (!list_empty(&phba->poll_list))
14327 mod_timer(&phba->cpuhp_poll_timer,
14328 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
14333 inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
14335 struct lpfc_hba *phba = eq->phba;
14339 * Unlocking an irq is one of the entry point to check
14340 * for re-schedule, but we are good for io submission
14341 * path as midlayer does a get_cpu to glue us in. Flush
14342 * out the invalidate queue so we can see the updated
14347 if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
14348 /* We will not likely get the completion for the caller
14349 * during this iteration but i guess that's fine.
14350 * Future io's coming on this eq should be able to
14351 * pick it up. As for the case of single io's, they
14352 * will be handled through a sched from polling timer
14353 * function which is currently triggered every 1msec.
14355 i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
14360 static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
14362 struct lpfc_hba *phba = eq->phba;
14364 if (list_empty(&phba->poll_list)) {
14365 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
14366 /* kickstart slowpath processing for this eq */
14367 mod_timer(&phba->cpuhp_poll_timer,
14368 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
14371 list_add_rcu(&eq->_poll_list, &phba->poll_list);
14375 static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
14377 struct lpfc_hba *phba = eq->phba;
14379 /* Disable slowpath processing for this eq. Kick start the eq
14380 * by RE-ARMING the eq's ASAP
14382 list_del_rcu(&eq->_poll_list);
14385 if (list_empty(&phba->poll_list))
14386 del_timer_sync(&phba->cpuhp_poll_timer);
14389 void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
14391 struct lpfc_queue *eq, *next;
14393 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
14394 list_del(&eq->_poll_list);
14396 INIT_LIST_HEAD(&phba->poll_list);
14401 __lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
14403 if (mode == eq->mode)
14406 * currently this function is only called during a hotplug
14407 * event and the cpu on which this function is executing
14408 * is going offline. By now the hotplug has instructed
14409 * the scheduler to remove this cpu from cpu active mask.
14410 * So we don't need to work about being put aside by the
14411 * scheduler for a high priority process. Yes, the inte-
14412 * rrupts could come but they are known to retire ASAP.
14415 /* Disable polling in the fastpath */
14416 WRITE_ONCE(eq->mode, mode);
14417 /* flush out the store buffer */
14421 * Add this eq to the polling list and start polling. For
14422 * a grace period both interrupt handler and poller will
14423 * try to process the eq _but_ that's fine. We have a
14424 * synchronization mechanism in place (queue_claimed) to
14425 * deal with it. This is just a draining phase for int-
14426 * errupt handler (not eq's) as we have guranteed through
14427 * barrier that all the CPUs have seen the new CQ_POLLED
14428 * state. which will effectively disable the REARMING of
14429 * the EQ. The whole idea is eq's die off eventually as
14430 * we are not rearming EQ's anymore.
14432 mode ? lpfc_sli4_add_to_poll_list(eq) :
14433 lpfc_sli4_remove_from_poll_list(eq);
14436 void lpfc_sli4_start_polling(struct lpfc_queue *eq)
14438 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
14441 void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
14443 struct lpfc_hba *phba = eq->phba;
14445 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
14447 /* Kick start for the pending io's in h/w.
14448 * Once we switch back to interrupt processing on a eq
14449 * the io path completion will only arm eq's when it
14450 * receives a completion. But since eq's are in disa-
14451 * rmed state it doesn't receive a completion. This
14452 * creates a deadlock scenaro.
14454 phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
14458 * lpfc_sli4_queue_free - free a queue structure and associated memory
14459 * @queue: The queue structure to free.
14461 * This function frees a queue structure and the DMAable memory used for
14462 * the host resident queue. This function must be called after destroying the
14463 * queue on the HBA.
14466 lpfc_sli4_queue_free(struct lpfc_queue *queue)
14468 struct lpfc_dmabuf *dmabuf;
14473 if (!list_empty(&queue->wq_list))
14474 list_del(&queue->wq_list);
14476 while (!list_empty(&queue->page_list)) {
14477 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
14479 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
14480 dmabuf->virt, dmabuf->phys);
14484 lpfc_free_rq_buffer(queue->phba, queue);
14485 kfree(queue->rqbp);
14488 if (!list_empty(&queue->cpu_list))
14489 list_del(&queue->cpu_list);
14496 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
14497 * @phba: The HBA that this queue is being created on.
14498 * @page_size: The size of a queue page
14499 * @entry_size: The size of each queue entry for this queue.
14500 * @entry count: The number of entries that this queue will handle.
14501 * @cpu: The cpu that will primarily utilize this queue.
14503 * This function allocates a queue structure and the DMAable memory used for
14504 * the host resident queue. This function must be called before creating the
14505 * queue on the HBA.
14507 struct lpfc_queue *
14508 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
14509 uint32_t entry_size, uint32_t entry_count, int cpu)
14511 struct lpfc_queue *queue;
14512 struct lpfc_dmabuf *dmabuf;
14513 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14516 if (!phba->sli4_hba.pc_sli4_params.supported)
14517 hw_page_size = page_size;
14519 pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
14521 /* If needed, Adjust page count to match the max the adapter supports */
14522 if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
14523 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
14525 queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
14526 GFP_KERNEL, cpu_to_node(cpu));
14530 INIT_LIST_HEAD(&queue->list);
14531 INIT_LIST_HEAD(&queue->_poll_list);
14532 INIT_LIST_HEAD(&queue->wq_list);
14533 INIT_LIST_HEAD(&queue->wqfull_list);
14534 INIT_LIST_HEAD(&queue->page_list);
14535 INIT_LIST_HEAD(&queue->child_list);
14536 INIT_LIST_HEAD(&queue->cpu_list);
14538 /* Set queue parameters now. If the system cannot provide memory
14539 * resources, the free routine needs to know what was allocated.
14541 queue->page_count = pgcnt;
14542 queue->q_pgs = (void **)&queue[1];
14543 queue->entry_cnt_per_pg = hw_page_size / entry_size;
14544 queue->entry_size = entry_size;
14545 queue->entry_count = entry_count;
14546 queue->page_size = hw_page_size;
14547 queue->phba = phba;
14549 for (x = 0; x < queue->page_count; x++) {
14550 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
14551 dev_to_node(&phba->pcidev->dev));
14554 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14555 hw_page_size, &dmabuf->phys,
14557 if (!dmabuf->virt) {
14561 dmabuf->buffer_tag = x;
14562 list_add_tail(&dmabuf->list, &queue->page_list);
14563 /* use lpfc_sli4_qe to index a paritcular entry in this page */
14564 queue->q_pgs[x] = dmabuf->virt;
14566 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
14567 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
14568 INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
14569 INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
14571 /* notify_interval will be set during q creation */
14575 lpfc_sli4_queue_free(queue);
14580 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
14581 * @phba: HBA structure that indicates port to create a queue on.
14582 * @pci_barset: PCI BAR set flag.
14584 * This function shall perform iomap of the specified PCI BAR address to host
14585 * memory address if not already done so and return it. The returned host
14586 * memory address can be NULL.
14588 static void __iomem *
14589 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
14594 switch (pci_barset) {
14595 case WQ_PCI_BAR_0_AND_1:
14596 return phba->pci_bar0_memmap_p;
14597 case WQ_PCI_BAR_2_AND_3:
14598 return phba->pci_bar2_memmap_p;
14599 case WQ_PCI_BAR_4_AND_5:
14600 return phba->pci_bar4_memmap_p;
14608 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
14609 * @phba: HBA structure that EQs are on.
14610 * @startq: The starting EQ index to modify
14611 * @numq: The number of EQs (consecutive indexes) to modify
14612 * @usdelay: amount of delay
14614 * This function revises the EQ delay on 1 or more EQs. The EQ delay
14615 * is set either by writing to a register (if supported by the SLI Port)
14616 * or by mailbox command. The mailbox command allows several EQs to be
14619 * The @phba struct is used to send a mailbox command to HBA. The @startq
14620 * is used to get the starting EQ index to change. The @numq value is
14621 * used to specify how many consecutive EQ indexes, starting at EQ index,
14622 * are to be changed. This function is asynchronous and will wait for any
14623 * mailbox commands to finish before returning.
14625 * On success this function will return a zero. If unable to allocate
14626 * enough memory this function will return -ENOMEM. If a mailbox command
14627 * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
14628 * have had their delay multipler changed.
14631 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
14632 uint32_t numq, uint32_t usdelay)
14634 struct lpfc_mbx_modify_eq_delay *eq_delay;
14635 LPFC_MBOXQ_t *mbox;
14636 struct lpfc_queue *eq;
14637 int cnt = 0, rc, length;
14638 uint32_t shdr_status, shdr_add_status;
14641 union lpfc_sli4_cfg_shdr *shdr;
14643 if (startq >= phba->cfg_irq_chann)
14646 if (usdelay > 0xFFFF) {
14647 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
14648 "6429 usdelay %d too large. Scaled down to "
14649 "0xFFFF.\n", usdelay);
14653 /* set values by EQ_DELAY register if supported */
14654 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
14655 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
14656 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
14660 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
14668 /* Otherwise, set values by mailbox cmd */
14670 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14672 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_FCP | LOG_NVME,
14673 "6428 Failed allocating mailbox cmd buffer."
14674 " EQ delay was not set.\n");
14677 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
14678 sizeof(struct lpfc_sli4_cfg_mhdr));
14679 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14680 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
14681 length, LPFC_SLI4_MBX_EMBED);
14682 eq_delay = &mbox->u.mqe.un.eq_delay;
14684 /* Calculate delay multiper from maximum interrupt per second */
14685 dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
14688 if (dmult > LPFC_DMULT_MAX)
14689 dmult = LPFC_DMULT_MAX;
14691 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
14692 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
14695 eq->q_mode = usdelay;
14696 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
14697 eq_delay->u.request.eq[cnt].phase = 0;
14698 eq_delay->u.request.eq[cnt].delay_multi = dmult;
14703 eq_delay->u.request.num_eq = cnt;
14705 mbox->vport = phba->pport;
14706 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14707 mbox->ctx_buf = NULL;
14708 mbox->ctx_ndlp = NULL;
14709 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14710 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
14711 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14712 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14713 if (shdr_status || shdr_add_status || rc) {
14714 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14715 "2512 MODIFY_EQ_DELAY mailbox failed with "
14716 "status x%x add_status x%x, mbx status x%x\n",
14717 shdr_status, shdr_add_status, rc);
14719 mempool_free(mbox, phba->mbox_mem_pool);
14724 * lpfc_eq_create - Create an Event Queue on the HBA
14725 * @phba: HBA structure that indicates port to create a queue on.
14726 * @eq: The queue structure to use to create the event queue.
14727 * @imax: The maximum interrupt per second limit.
14729 * This function creates an event queue, as detailed in @eq, on a port,
14730 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
14732 * The @phba struct is used to send mailbox command to HBA. The @eq struct
14733 * is used to get the entry count and entry size that are necessary to
14734 * determine the number of pages to allocate and use for this queue. This
14735 * function will send the EQ_CREATE mailbox command to the HBA to setup the
14736 * event queue. This function is asynchronous and will wait for the mailbox
14737 * command to finish before continuing.
14739 * On success this function will return a zero. If unable to allocate enough
14740 * memory this function will return -ENOMEM. If the queue create mailbox command
14741 * fails this function will return -ENXIO.
14744 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
14746 struct lpfc_mbx_eq_create *eq_create;
14747 LPFC_MBOXQ_t *mbox;
14748 int rc, length, status = 0;
14749 struct lpfc_dmabuf *dmabuf;
14750 uint32_t shdr_status, shdr_add_status;
14751 union lpfc_sli4_cfg_shdr *shdr;
14753 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14755 /* sanity check on queue memory */
14758 if (!phba->sli4_hba.pc_sli4_params.supported)
14759 hw_page_size = SLI4_PAGE_SIZE;
14761 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14764 length = (sizeof(struct lpfc_mbx_eq_create) -
14765 sizeof(struct lpfc_sli4_cfg_mhdr));
14766 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14767 LPFC_MBOX_OPCODE_EQ_CREATE,
14768 length, LPFC_SLI4_MBX_EMBED);
14769 eq_create = &mbox->u.mqe.un.eq_create;
14770 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
14771 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
14773 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
14775 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
14777 /* Use version 2 of CREATE_EQ if eqav is set */
14778 if (phba->sli4_hba.pc_sli4_params.eqav) {
14779 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14780 LPFC_Q_CREATE_VERSION_2);
14781 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
14782 phba->sli4_hba.pc_sli4_params.eqav);
14785 /* don't setup delay multiplier using EQ_CREATE */
14787 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
14789 switch (eq->entry_count) {
14791 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14792 "0360 Unsupported EQ count. (%d)\n",
14794 if (eq->entry_count < 256) {
14798 /* fall through - otherwise default to smallest count */
14800 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14804 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14808 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14812 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14816 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14820 list_for_each_entry(dmabuf, &eq->page_list, list) {
14821 memset(dmabuf->virt, 0, hw_page_size);
14822 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14823 putPaddrLow(dmabuf->phys);
14824 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14825 putPaddrHigh(dmabuf->phys);
14827 mbox->vport = phba->pport;
14828 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14829 mbox->ctx_buf = NULL;
14830 mbox->ctx_ndlp = NULL;
14831 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14832 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14833 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14834 if (shdr_status || shdr_add_status || rc) {
14835 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14836 "2500 EQ_CREATE mailbox failed with "
14837 "status x%x add_status x%x, mbx status x%x\n",
14838 shdr_status, shdr_add_status, rc);
14841 eq->type = LPFC_EQ;
14842 eq->subtype = LPFC_NONE;
14843 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
14844 if (eq->queue_id == 0xFFFF)
14846 eq->host_index = 0;
14847 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
14848 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
14850 mempool_free(mbox, phba->mbox_mem_pool);
14855 * lpfc_cq_create - Create a Completion Queue on the HBA
14856 * @phba: HBA structure that indicates port to create a queue on.
14857 * @cq: The queue structure to use to create the completion queue.
14858 * @eq: The event queue to bind this completion queue to.
14860 * This function creates a completion queue, as detailed in @wq, on a port,
14861 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
14863 * The @phba struct is used to send mailbox command to HBA. The @cq struct
14864 * is used to get the entry count and entry size that are necessary to
14865 * determine the number of pages to allocate and use for this queue. The @eq
14866 * is used to indicate which event queue to bind this completion queue to. This
14867 * function will send the CQ_CREATE mailbox command to the HBA to setup the
14868 * completion queue. This function is asynchronous and will wait for the mailbox
14869 * command to finish before continuing.
14871 * On success this function will return a zero. If unable to allocate enough
14872 * memory this function will return -ENOMEM. If the queue create mailbox command
14873 * fails this function will return -ENXIO.
14876 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14877 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
14879 struct lpfc_mbx_cq_create *cq_create;
14880 struct lpfc_dmabuf *dmabuf;
14881 LPFC_MBOXQ_t *mbox;
14882 int rc, length, status = 0;
14883 uint32_t shdr_status, shdr_add_status;
14884 union lpfc_sli4_cfg_shdr *shdr;
14886 /* sanity check on queue memory */
14890 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14893 length = (sizeof(struct lpfc_mbx_cq_create) -
14894 sizeof(struct lpfc_sli4_cfg_mhdr));
14895 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14896 LPFC_MBOX_OPCODE_CQ_CREATE,
14897 length, LPFC_SLI4_MBX_EMBED);
14898 cq_create = &mbox->u.mqe.un.cq_create;
14899 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
14900 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
14902 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
14903 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
14904 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14905 phba->sli4_hba.pc_sli4_params.cqv);
14906 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
14907 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
14908 (cq->page_size / SLI4_PAGE_SIZE));
14909 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
14911 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
14912 phba->sli4_hba.pc_sli4_params.cqav);
14914 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
14917 switch (cq->entry_count) {
14920 if (phba->sli4_hba.pc_sli4_params.cqv ==
14921 LPFC_Q_CREATE_VERSION_2) {
14922 cq_create->u.request.context.lpfc_cq_context_count =
14924 bf_set(lpfc_cq_context_count,
14925 &cq_create->u.request.context,
14926 LPFC_CQ_CNT_WORD7);
14931 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14932 "0361 Unsupported CQ count: "
14933 "entry cnt %d sz %d pg cnt %d\n",
14934 cq->entry_count, cq->entry_size,
14936 if (cq->entry_count < 256) {
14940 /* fall through - otherwise default to smallest count */
14942 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14946 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14950 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14954 list_for_each_entry(dmabuf, &cq->page_list, list) {
14955 memset(dmabuf->virt, 0, cq->page_size);
14956 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14957 putPaddrLow(dmabuf->phys);
14958 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14959 putPaddrHigh(dmabuf->phys);
14961 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14963 /* The IOCTL status is embedded in the mailbox subheader. */
14964 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14965 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14966 if (shdr_status || shdr_add_status || rc) {
14967 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14968 "2501 CQ_CREATE mailbox failed with "
14969 "status x%x add_status x%x, mbx status x%x\n",
14970 shdr_status, shdr_add_status, rc);
14974 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
14975 if (cq->queue_id == 0xFFFF) {
14979 /* link the cq onto the parent eq child list */
14980 list_add_tail(&cq->list, &eq->child_list);
14981 /* Set up completion queue's type and subtype */
14983 cq->subtype = subtype;
14984 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
14985 cq->assoc_qid = eq->queue_id;
14987 cq->host_index = 0;
14988 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
14989 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
14991 if (cq->queue_id > phba->sli4_hba.cq_max)
14992 phba->sli4_hba.cq_max = cq->queue_id;
14994 mempool_free(mbox, phba->mbox_mem_pool);
14999 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
15000 * @phba: HBA structure that indicates port to create a queue on.
15001 * @cqp: The queue structure array to use to create the completion queues.
15002 * @hdwq: The hardware queue array with the EQ to bind completion queues to.
15004 * This function creates a set of completion queue, s to support MRQ
15005 * as detailed in @cqp, on a port,
15006 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
15008 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15009 * is used to get the entry count and entry size that are necessary to
15010 * determine the number of pages to allocate and use for this queue. The @eq
15011 * is used to indicate which event queue to bind this completion queue to. This
15012 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
15013 * completion queue. This function is asynchronous and will wait for the mailbox
15014 * command to finish before continuing.
15016 * On success this function will return a zero. If unable to allocate enough
15017 * memory this function will return -ENOMEM. If the queue create mailbox command
15018 * fails this function will return -ENXIO.
15021 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
15022 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
15025 struct lpfc_queue *cq;
15026 struct lpfc_queue *eq;
15027 struct lpfc_mbx_cq_create_set *cq_set;
15028 struct lpfc_dmabuf *dmabuf;
15029 LPFC_MBOXQ_t *mbox;
15030 int rc, length, alloclen, status = 0;
15031 int cnt, idx, numcq, page_idx = 0;
15032 uint32_t shdr_status, shdr_add_status;
15033 union lpfc_sli4_cfg_shdr *shdr;
15034 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15036 /* sanity check on queue memory */
15037 numcq = phba->cfg_nvmet_mrq;
15038 if (!cqp || !hdwq || !numcq)
15041 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15045 length = sizeof(struct lpfc_mbx_cq_create_set);
15046 length += ((numcq * cqp[0]->page_count) *
15047 sizeof(struct dma_address));
15048 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15049 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
15050 LPFC_SLI4_MBX_NEMBED);
15051 if (alloclen < length) {
15052 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15053 "3098 Allocated DMA memory size (%d) is "
15054 "less than the requested DMA memory size "
15055 "(%d)\n", alloclen, length);
15059 cq_set = mbox->sge_array->addr[0];
15060 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
15061 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
15063 for (idx = 0; idx < numcq; idx++) {
15065 eq = hdwq[idx].hba_eq;
15070 if (!phba->sli4_hba.pc_sli4_params.supported)
15071 hw_page_size = cq->page_size;
15075 bf_set(lpfc_mbx_cq_create_set_page_size,
15076 &cq_set->u.request,
15077 (hw_page_size / SLI4_PAGE_SIZE));
15078 bf_set(lpfc_mbx_cq_create_set_num_pages,
15079 &cq_set->u.request, cq->page_count);
15080 bf_set(lpfc_mbx_cq_create_set_evt,
15081 &cq_set->u.request, 1);
15082 bf_set(lpfc_mbx_cq_create_set_valid,
15083 &cq_set->u.request, 1);
15084 bf_set(lpfc_mbx_cq_create_set_cqe_size,
15085 &cq_set->u.request, 0);
15086 bf_set(lpfc_mbx_cq_create_set_num_cq,
15087 &cq_set->u.request, numcq);
15088 bf_set(lpfc_mbx_cq_create_set_autovalid,
15089 &cq_set->u.request,
15090 phba->sli4_hba.pc_sli4_params.cqav);
15091 switch (cq->entry_count) {
15094 if (phba->sli4_hba.pc_sli4_params.cqv ==
15095 LPFC_Q_CREATE_VERSION_2) {
15096 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15097 &cq_set->u.request,
15099 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15100 &cq_set->u.request,
15101 LPFC_CQ_CNT_WORD7);
15106 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15107 "3118 Bad CQ count. (%d)\n",
15109 if (cq->entry_count < 256) {
15113 /* fall through - otherwise default to smallest */
15115 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15116 &cq_set->u.request, LPFC_CQ_CNT_256);
15119 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15120 &cq_set->u.request, LPFC_CQ_CNT_512);
15123 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15124 &cq_set->u.request, LPFC_CQ_CNT_1024);
15127 bf_set(lpfc_mbx_cq_create_set_eq_id0,
15128 &cq_set->u.request, eq->queue_id);
15131 bf_set(lpfc_mbx_cq_create_set_eq_id1,
15132 &cq_set->u.request, eq->queue_id);
15135 bf_set(lpfc_mbx_cq_create_set_eq_id2,
15136 &cq_set->u.request, eq->queue_id);
15139 bf_set(lpfc_mbx_cq_create_set_eq_id3,
15140 &cq_set->u.request, eq->queue_id);
15143 bf_set(lpfc_mbx_cq_create_set_eq_id4,
15144 &cq_set->u.request, eq->queue_id);
15147 bf_set(lpfc_mbx_cq_create_set_eq_id5,
15148 &cq_set->u.request, eq->queue_id);
15151 bf_set(lpfc_mbx_cq_create_set_eq_id6,
15152 &cq_set->u.request, eq->queue_id);
15155 bf_set(lpfc_mbx_cq_create_set_eq_id7,
15156 &cq_set->u.request, eq->queue_id);
15159 bf_set(lpfc_mbx_cq_create_set_eq_id8,
15160 &cq_set->u.request, eq->queue_id);
15163 bf_set(lpfc_mbx_cq_create_set_eq_id9,
15164 &cq_set->u.request, eq->queue_id);
15167 bf_set(lpfc_mbx_cq_create_set_eq_id10,
15168 &cq_set->u.request, eq->queue_id);
15171 bf_set(lpfc_mbx_cq_create_set_eq_id11,
15172 &cq_set->u.request, eq->queue_id);
15175 bf_set(lpfc_mbx_cq_create_set_eq_id12,
15176 &cq_set->u.request, eq->queue_id);
15179 bf_set(lpfc_mbx_cq_create_set_eq_id13,
15180 &cq_set->u.request, eq->queue_id);
15183 bf_set(lpfc_mbx_cq_create_set_eq_id14,
15184 &cq_set->u.request, eq->queue_id);
15187 bf_set(lpfc_mbx_cq_create_set_eq_id15,
15188 &cq_set->u.request, eq->queue_id);
15192 /* link the cq onto the parent eq child list */
15193 list_add_tail(&cq->list, &eq->child_list);
15194 /* Set up completion queue's type and subtype */
15196 cq->subtype = subtype;
15197 cq->assoc_qid = eq->queue_id;
15199 cq->host_index = 0;
15200 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15201 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
15206 list_for_each_entry(dmabuf, &cq->page_list, list) {
15207 memset(dmabuf->virt, 0, hw_page_size);
15208 cnt = page_idx + dmabuf->buffer_tag;
15209 cq_set->u.request.page[cnt].addr_lo =
15210 putPaddrLow(dmabuf->phys);
15211 cq_set->u.request.page[cnt].addr_hi =
15212 putPaddrHigh(dmabuf->phys);
15218 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15220 /* The IOCTL status is embedded in the mailbox subheader. */
15221 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15222 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15223 if (shdr_status || shdr_add_status || rc) {
15224 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15225 "3119 CQ_CREATE_SET mailbox failed with "
15226 "status x%x add_status x%x, mbx status x%x\n",
15227 shdr_status, shdr_add_status, rc);
15231 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
15232 if (rc == 0xFFFF) {
15237 for (idx = 0; idx < numcq; idx++) {
15239 cq->queue_id = rc + idx;
15240 if (cq->queue_id > phba->sli4_hba.cq_max)
15241 phba->sli4_hba.cq_max = cq->queue_id;
15245 lpfc_sli4_mbox_cmd_free(phba, mbox);
15250 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
15251 * @phba: HBA structure that indicates port to create a queue on.
15252 * @mq: The queue structure to use to create the mailbox queue.
15253 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
15254 * @cq: The completion queue to associate with this cq.
15256 * This function provides failback (fb) functionality when the
15257 * mq_create_ext fails on older FW generations. It's purpose is identical
15258 * to mq_create_ext otherwise.
15260 * This routine cannot fail as all attributes were previously accessed and
15261 * initialized in mq_create_ext.
15264 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
15265 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
15267 struct lpfc_mbx_mq_create *mq_create;
15268 struct lpfc_dmabuf *dmabuf;
15271 length = (sizeof(struct lpfc_mbx_mq_create) -
15272 sizeof(struct lpfc_sli4_cfg_mhdr));
15273 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15274 LPFC_MBOX_OPCODE_MQ_CREATE,
15275 length, LPFC_SLI4_MBX_EMBED);
15276 mq_create = &mbox->u.mqe.un.mq_create;
15277 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
15279 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
15281 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
15282 switch (mq->entry_count) {
15284 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15285 LPFC_MQ_RING_SIZE_16);
15288 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15289 LPFC_MQ_RING_SIZE_32);
15292 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15293 LPFC_MQ_RING_SIZE_64);
15296 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15297 LPFC_MQ_RING_SIZE_128);
15300 list_for_each_entry(dmabuf, &mq->page_list, list) {
15301 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15302 putPaddrLow(dmabuf->phys);
15303 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15304 putPaddrHigh(dmabuf->phys);
15309 * lpfc_mq_create - Create a mailbox Queue on the HBA
15310 * @phba: HBA structure that indicates port to create a queue on.
15311 * @mq: The queue structure to use to create the mailbox queue.
15312 * @cq: The completion queue to associate with this cq.
15313 * @subtype: The queue's subtype.
15315 * This function creates a mailbox queue, as detailed in @mq, on a port,
15316 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
15318 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15319 * is used to get the entry count and entry size that are necessary to
15320 * determine the number of pages to allocate and use for this queue. This
15321 * function will send the MQ_CREATE mailbox command to the HBA to setup the
15322 * mailbox queue. This function is asynchronous and will wait for the mailbox
15323 * command to finish before continuing.
15325 * On success this function will return a zero. If unable to allocate enough
15326 * memory this function will return -ENOMEM. If the queue create mailbox command
15327 * fails this function will return -ENXIO.
15330 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
15331 struct lpfc_queue *cq, uint32_t subtype)
15333 struct lpfc_mbx_mq_create *mq_create;
15334 struct lpfc_mbx_mq_create_ext *mq_create_ext;
15335 struct lpfc_dmabuf *dmabuf;
15336 LPFC_MBOXQ_t *mbox;
15337 int rc, length, status = 0;
15338 uint32_t shdr_status, shdr_add_status;
15339 union lpfc_sli4_cfg_shdr *shdr;
15340 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15342 /* sanity check on queue memory */
15345 if (!phba->sli4_hba.pc_sli4_params.supported)
15346 hw_page_size = SLI4_PAGE_SIZE;
15348 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15351 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
15352 sizeof(struct lpfc_sli4_cfg_mhdr));
15353 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15354 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
15355 length, LPFC_SLI4_MBX_EMBED);
15357 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
15358 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
15359 bf_set(lpfc_mbx_mq_create_ext_num_pages,
15360 &mq_create_ext->u.request, mq->page_count);
15361 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
15362 &mq_create_ext->u.request, 1);
15363 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
15364 &mq_create_ext->u.request, 1);
15365 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
15366 &mq_create_ext->u.request, 1);
15367 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
15368 &mq_create_ext->u.request, 1);
15369 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
15370 &mq_create_ext->u.request, 1);
15371 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
15372 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15373 phba->sli4_hba.pc_sli4_params.mqv);
15374 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
15375 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
15378 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
15380 switch (mq->entry_count) {
15382 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15383 "0362 Unsupported MQ count. (%d)\n",
15385 if (mq->entry_count < 16) {
15389 /* fall through - otherwise default to smallest count */
15391 bf_set(lpfc_mq_context_ring_size,
15392 &mq_create_ext->u.request.context,
15393 LPFC_MQ_RING_SIZE_16);
15396 bf_set(lpfc_mq_context_ring_size,
15397 &mq_create_ext->u.request.context,
15398 LPFC_MQ_RING_SIZE_32);
15401 bf_set(lpfc_mq_context_ring_size,
15402 &mq_create_ext->u.request.context,
15403 LPFC_MQ_RING_SIZE_64);
15406 bf_set(lpfc_mq_context_ring_size,
15407 &mq_create_ext->u.request.context,
15408 LPFC_MQ_RING_SIZE_128);
15411 list_for_each_entry(dmabuf, &mq->page_list, list) {
15412 memset(dmabuf->virt, 0, hw_page_size);
15413 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
15414 putPaddrLow(dmabuf->phys);
15415 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
15416 putPaddrHigh(dmabuf->phys);
15418 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15419 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15420 &mq_create_ext->u.response);
15421 if (rc != MBX_SUCCESS) {
15422 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15423 "2795 MQ_CREATE_EXT failed with "
15424 "status x%x. Failback to MQ_CREATE.\n",
15426 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
15427 mq_create = &mbox->u.mqe.un.mq_create;
15428 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15429 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
15430 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15431 &mq_create->u.response);
15434 /* The IOCTL status is embedded in the mailbox subheader. */
15435 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15436 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15437 if (shdr_status || shdr_add_status || rc) {
15438 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15439 "2502 MQ_CREATE mailbox failed with "
15440 "status x%x add_status x%x, mbx status x%x\n",
15441 shdr_status, shdr_add_status, rc);
15445 if (mq->queue_id == 0xFFFF) {
15449 mq->type = LPFC_MQ;
15450 mq->assoc_qid = cq->queue_id;
15451 mq->subtype = subtype;
15452 mq->host_index = 0;
15455 /* link the mq onto the parent cq child list */
15456 list_add_tail(&mq->list, &cq->child_list);
15458 mempool_free(mbox, phba->mbox_mem_pool);
15463 * lpfc_wq_create - Create a Work Queue on the HBA
15464 * @phba: HBA structure that indicates port to create a queue on.
15465 * @wq: The queue structure to use to create the work queue.
15466 * @cq: The completion queue to bind this work queue to.
15467 * @subtype: The subtype of the work queue indicating its functionality.
15469 * This function creates a work queue, as detailed in @wq, on a port, described
15470 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
15472 * The @phba struct is used to send mailbox command to HBA. The @wq struct
15473 * is used to get the entry count and entry size that are necessary to
15474 * determine the number of pages to allocate and use for this queue. The @cq
15475 * is used to indicate which completion queue to bind this work queue to. This
15476 * function will send the WQ_CREATE mailbox command to the HBA to setup the
15477 * work queue. This function is asynchronous and will wait for the mailbox
15478 * command to finish before continuing.
15480 * On success this function will return a zero. If unable to allocate enough
15481 * memory this function will return -ENOMEM. If the queue create mailbox command
15482 * fails this function will return -ENXIO.
15485 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
15486 struct lpfc_queue *cq, uint32_t subtype)
15488 struct lpfc_mbx_wq_create *wq_create;
15489 struct lpfc_dmabuf *dmabuf;
15490 LPFC_MBOXQ_t *mbox;
15491 int rc, length, status = 0;
15492 uint32_t shdr_status, shdr_add_status;
15493 union lpfc_sli4_cfg_shdr *shdr;
15494 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15495 struct dma_address *page;
15496 void __iomem *bar_memmap_p;
15497 uint32_t db_offset;
15498 uint16_t pci_barset;
15499 uint8_t dpp_barset;
15500 uint32_t dpp_offset;
15501 unsigned long pg_addr;
15502 uint8_t wq_create_version;
15504 /* sanity check on queue memory */
15507 if (!phba->sli4_hba.pc_sli4_params.supported)
15508 hw_page_size = wq->page_size;
15510 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15513 length = (sizeof(struct lpfc_mbx_wq_create) -
15514 sizeof(struct lpfc_sli4_cfg_mhdr));
15515 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15516 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
15517 length, LPFC_SLI4_MBX_EMBED);
15518 wq_create = &mbox->u.mqe.un.wq_create;
15519 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
15520 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
15522 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
15525 /* wqv is the earliest version supported, NOT the latest */
15526 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15527 phba->sli4_hba.pc_sli4_params.wqv);
15529 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
15530 (wq->page_size > SLI4_PAGE_SIZE))
15531 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15533 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15536 if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
15537 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15539 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15541 switch (wq_create_version) {
15542 case LPFC_Q_CREATE_VERSION_1:
15543 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
15545 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15546 LPFC_Q_CREATE_VERSION_1);
15548 switch (wq->entry_size) {
15551 bf_set(lpfc_mbx_wq_create_wqe_size,
15552 &wq_create->u.request_1,
15553 LPFC_WQ_WQE_SIZE_64);
15556 bf_set(lpfc_mbx_wq_create_wqe_size,
15557 &wq_create->u.request_1,
15558 LPFC_WQ_WQE_SIZE_128);
15561 /* Request DPP by default */
15562 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
15563 bf_set(lpfc_mbx_wq_create_page_size,
15564 &wq_create->u.request_1,
15565 (wq->page_size / SLI4_PAGE_SIZE));
15566 page = wq_create->u.request_1.page;
15569 page = wq_create->u.request.page;
15573 list_for_each_entry(dmabuf, &wq->page_list, list) {
15574 memset(dmabuf->virt, 0, hw_page_size);
15575 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
15576 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
15579 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15580 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
15582 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15583 /* The IOCTL status is embedded in the mailbox subheader. */
15584 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15585 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15586 if (shdr_status || shdr_add_status || rc) {
15587 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15588 "2503 WQ_CREATE mailbox failed with "
15589 "status x%x add_status x%x, mbx status x%x\n",
15590 shdr_status, shdr_add_status, rc);
15595 if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
15596 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
15597 &wq_create->u.response);
15599 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
15600 &wq_create->u.response_1);
15602 if (wq->queue_id == 0xFFFF) {
15607 wq->db_format = LPFC_DB_LIST_FORMAT;
15608 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
15609 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15610 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
15611 &wq_create->u.response);
15612 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
15613 (wq->db_format != LPFC_DB_RING_FORMAT)) {
15614 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15615 "3265 WQ[%d] doorbell format "
15616 "not supported: x%x\n",
15617 wq->queue_id, wq->db_format);
15621 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
15622 &wq_create->u.response);
15623 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15625 if (!bar_memmap_p) {
15626 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15627 "3263 WQ[%d] failed to memmap "
15628 "pci barset:x%x\n",
15629 wq->queue_id, pci_barset);
15633 db_offset = wq_create->u.response.doorbell_offset;
15634 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
15635 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
15636 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15637 "3252 WQ[%d] doorbell offset "
15638 "not supported: x%x\n",
15639 wq->queue_id, db_offset);
15643 wq->db_regaddr = bar_memmap_p + db_offset;
15644 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15645 "3264 WQ[%d]: barset:x%x, offset:x%x, "
15646 "format:x%x\n", wq->queue_id,
15647 pci_barset, db_offset, wq->db_format);
15649 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
15651 /* Check if DPP was honored by the firmware */
15652 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
15653 &wq_create->u.response_1);
15654 if (wq->dpp_enable) {
15655 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
15656 &wq_create->u.response_1);
15657 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15659 if (!bar_memmap_p) {
15660 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15661 "3267 WQ[%d] failed to memmap "
15662 "pci barset:x%x\n",
15663 wq->queue_id, pci_barset);
15667 db_offset = wq_create->u.response_1.doorbell_offset;
15668 wq->db_regaddr = bar_memmap_p + db_offset;
15669 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
15670 &wq_create->u.response_1);
15671 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
15672 &wq_create->u.response_1);
15673 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15675 if (!bar_memmap_p) {
15676 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15677 "3268 WQ[%d] failed to memmap "
15678 "pci barset:x%x\n",
15679 wq->queue_id, dpp_barset);
15683 dpp_offset = wq_create->u.response_1.dpp_offset;
15684 wq->dpp_regaddr = bar_memmap_p + dpp_offset;
15685 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15686 "3271 WQ[%d]: barset:x%x, offset:x%x, "
15687 "dpp_id:x%x dpp_barset:x%x "
15688 "dpp_offset:x%x\n",
15689 wq->queue_id, pci_barset, db_offset,
15690 wq->dpp_id, dpp_barset, dpp_offset);
15692 /* Enable combined writes for DPP aperture */
15693 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
15695 rc = set_memory_wc(pg_addr, 1);
15697 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15698 "3272 Cannot setup Combined "
15699 "Write on WQ[%d] - disable DPP\n",
15701 phba->cfg_enable_dpp = 0;
15704 phba->cfg_enable_dpp = 0;
15707 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
15709 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
15710 if (wq->pring == NULL) {
15714 wq->type = LPFC_WQ;
15715 wq->assoc_qid = cq->queue_id;
15716 wq->subtype = subtype;
15717 wq->host_index = 0;
15719 wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
15721 /* link the wq onto the parent cq child list */
15722 list_add_tail(&wq->list, &cq->child_list);
15724 mempool_free(mbox, phba->mbox_mem_pool);
15729 * lpfc_rq_create - Create a Receive Queue on the HBA
15730 * @phba: HBA structure that indicates port to create a queue on.
15731 * @hrq: The queue structure to use to create the header receive queue.
15732 * @drq: The queue structure to use to create the data receive queue.
15733 * @cq: The completion queue to bind this work queue to.
15735 * This function creates a receive buffer queue pair , as detailed in @hrq and
15736 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
15739 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
15740 * struct is used to get the entry count that is necessary to determine the
15741 * number of pages to use for this queue. The @cq is used to indicate which
15742 * completion queue to bind received buffers that are posted to these queues to.
15743 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
15744 * receive queue pair. This function is asynchronous and will wait for the
15745 * mailbox command to finish before continuing.
15747 * On success this function will return a zero. If unable to allocate enough
15748 * memory this function will return -ENOMEM. If the queue create mailbox command
15749 * fails this function will return -ENXIO.
15752 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15753 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
15755 struct lpfc_mbx_rq_create *rq_create;
15756 struct lpfc_dmabuf *dmabuf;
15757 LPFC_MBOXQ_t *mbox;
15758 int rc, length, status = 0;
15759 uint32_t shdr_status, shdr_add_status;
15760 union lpfc_sli4_cfg_shdr *shdr;
15761 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15762 void __iomem *bar_memmap_p;
15763 uint32_t db_offset;
15764 uint16_t pci_barset;
15766 /* sanity check on queue memory */
15767 if (!hrq || !drq || !cq)
15769 if (!phba->sli4_hba.pc_sli4_params.supported)
15770 hw_page_size = SLI4_PAGE_SIZE;
15772 if (hrq->entry_count != drq->entry_count)
15774 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15777 length = (sizeof(struct lpfc_mbx_rq_create) -
15778 sizeof(struct lpfc_sli4_cfg_mhdr));
15779 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15780 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15781 length, LPFC_SLI4_MBX_EMBED);
15782 rq_create = &mbox->u.mqe.un.rq_create;
15783 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15784 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15785 phba->sli4_hba.pc_sli4_params.rqv);
15786 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15787 bf_set(lpfc_rq_context_rqe_count_1,
15788 &rq_create->u.request.context,
15790 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
15791 bf_set(lpfc_rq_context_rqe_size,
15792 &rq_create->u.request.context,
15794 bf_set(lpfc_rq_context_page_size,
15795 &rq_create->u.request.context,
15796 LPFC_RQ_PAGE_SIZE_4096);
15798 switch (hrq->entry_count) {
15800 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15801 "2535 Unsupported RQ count. (%d)\n",
15803 if (hrq->entry_count < 512) {
15807 /* fall through - otherwise default to smallest count */
15809 bf_set(lpfc_rq_context_rqe_count,
15810 &rq_create->u.request.context,
15811 LPFC_RQ_RING_SIZE_512);
15814 bf_set(lpfc_rq_context_rqe_count,
15815 &rq_create->u.request.context,
15816 LPFC_RQ_RING_SIZE_1024);
15819 bf_set(lpfc_rq_context_rqe_count,
15820 &rq_create->u.request.context,
15821 LPFC_RQ_RING_SIZE_2048);
15824 bf_set(lpfc_rq_context_rqe_count,
15825 &rq_create->u.request.context,
15826 LPFC_RQ_RING_SIZE_4096);
15829 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
15830 LPFC_HDR_BUF_SIZE);
15832 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15834 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15836 list_for_each_entry(dmabuf, &hrq->page_list, list) {
15837 memset(dmabuf->virt, 0, hw_page_size);
15838 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15839 putPaddrLow(dmabuf->phys);
15840 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15841 putPaddrHigh(dmabuf->phys);
15843 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15844 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15846 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15847 /* The IOCTL status is embedded in the mailbox subheader. */
15848 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15849 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15850 if (shdr_status || shdr_add_status || rc) {
15851 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15852 "2504 RQ_CREATE mailbox failed with "
15853 "status x%x add_status x%x, mbx status x%x\n",
15854 shdr_status, shdr_add_status, rc);
15858 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15859 if (hrq->queue_id == 0xFFFF) {
15864 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15865 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
15866 &rq_create->u.response);
15867 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
15868 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
15869 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15870 "3262 RQ [%d] doorbell format not "
15871 "supported: x%x\n", hrq->queue_id,
15877 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
15878 &rq_create->u.response);
15879 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
15880 if (!bar_memmap_p) {
15881 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15882 "3269 RQ[%d] failed to memmap pci "
15883 "barset:x%x\n", hrq->queue_id,
15889 db_offset = rq_create->u.response.doorbell_offset;
15890 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
15891 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
15892 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15893 "3270 RQ[%d] doorbell offset not "
15894 "supported: x%x\n", hrq->queue_id,
15899 hrq->db_regaddr = bar_memmap_p + db_offset;
15900 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15901 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
15902 "format:x%x\n", hrq->queue_id, pci_barset,
15903 db_offset, hrq->db_format);
15905 hrq->db_format = LPFC_DB_RING_FORMAT;
15906 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15908 hrq->type = LPFC_HRQ;
15909 hrq->assoc_qid = cq->queue_id;
15910 hrq->subtype = subtype;
15911 hrq->host_index = 0;
15912 hrq->hba_index = 0;
15913 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
15915 /* now create the data queue */
15916 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15917 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15918 length, LPFC_SLI4_MBX_EMBED);
15919 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15920 phba->sli4_hba.pc_sli4_params.rqv);
15921 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15922 bf_set(lpfc_rq_context_rqe_count_1,
15923 &rq_create->u.request.context, hrq->entry_count);
15924 if (subtype == LPFC_NVMET)
15925 rq_create->u.request.context.buffer_size =
15926 LPFC_NVMET_DATA_BUF_SIZE;
15928 rq_create->u.request.context.buffer_size =
15929 LPFC_DATA_BUF_SIZE;
15930 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
15932 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
15933 (PAGE_SIZE/SLI4_PAGE_SIZE));
15935 switch (drq->entry_count) {
15937 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15938 "2536 Unsupported RQ count. (%d)\n",
15940 if (drq->entry_count < 512) {
15944 /* fall through - otherwise default to smallest count */
15946 bf_set(lpfc_rq_context_rqe_count,
15947 &rq_create->u.request.context,
15948 LPFC_RQ_RING_SIZE_512);
15951 bf_set(lpfc_rq_context_rqe_count,
15952 &rq_create->u.request.context,
15953 LPFC_RQ_RING_SIZE_1024);
15956 bf_set(lpfc_rq_context_rqe_count,
15957 &rq_create->u.request.context,
15958 LPFC_RQ_RING_SIZE_2048);
15961 bf_set(lpfc_rq_context_rqe_count,
15962 &rq_create->u.request.context,
15963 LPFC_RQ_RING_SIZE_4096);
15966 if (subtype == LPFC_NVMET)
15967 bf_set(lpfc_rq_context_buf_size,
15968 &rq_create->u.request.context,
15969 LPFC_NVMET_DATA_BUF_SIZE);
15971 bf_set(lpfc_rq_context_buf_size,
15972 &rq_create->u.request.context,
15973 LPFC_DATA_BUF_SIZE);
15975 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15977 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15979 list_for_each_entry(dmabuf, &drq->page_list, list) {
15980 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15981 putPaddrLow(dmabuf->phys);
15982 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15983 putPaddrHigh(dmabuf->phys);
15985 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15986 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15987 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15988 /* The IOCTL status is embedded in the mailbox subheader. */
15989 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15990 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15991 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15992 if (shdr_status || shdr_add_status || rc) {
15996 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15997 if (drq->queue_id == 0xFFFF) {
16001 drq->type = LPFC_DRQ;
16002 drq->assoc_qid = cq->queue_id;
16003 drq->subtype = subtype;
16004 drq->host_index = 0;
16005 drq->hba_index = 0;
16006 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16008 /* link the header and data RQs onto the parent cq child list */
16009 list_add_tail(&hrq->list, &cq->child_list);
16010 list_add_tail(&drq->list, &cq->child_list);
16013 mempool_free(mbox, phba->mbox_mem_pool);
16018 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
16019 * @phba: HBA structure that indicates port to create a queue on.
16020 * @hrqp: The queue structure array to use to create the header receive queues.
16021 * @drqp: The queue structure array to use to create the data receive queues.
16022 * @cqp: The completion queue array to bind these receive queues to.
16024 * This function creates a receive buffer queue pair , as detailed in @hrq and
16025 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
16028 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
16029 * struct is used to get the entry count that is necessary to determine the
16030 * number of pages to use for this queue. The @cq is used to indicate which
16031 * completion queue to bind received buffers that are posted to these queues to.
16032 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
16033 * receive queue pair. This function is asynchronous and will wait for the
16034 * mailbox command to finish before continuing.
16036 * On success this function will return a zero. If unable to allocate enough
16037 * memory this function will return -ENOMEM. If the queue create mailbox command
16038 * fails this function will return -ENXIO.
16041 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
16042 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
16045 struct lpfc_queue *hrq, *drq, *cq;
16046 struct lpfc_mbx_rq_create_v2 *rq_create;
16047 struct lpfc_dmabuf *dmabuf;
16048 LPFC_MBOXQ_t *mbox;
16049 int rc, length, alloclen, status = 0;
16050 int cnt, idx, numrq, page_idx = 0;
16051 uint32_t shdr_status, shdr_add_status;
16052 union lpfc_sli4_cfg_shdr *shdr;
16053 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16055 numrq = phba->cfg_nvmet_mrq;
16056 /* sanity check on array memory */
16057 if (!hrqp || !drqp || !cqp || !numrq)
16059 if (!phba->sli4_hba.pc_sli4_params.supported)
16060 hw_page_size = SLI4_PAGE_SIZE;
16062 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16066 length = sizeof(struct lpfc_mbx_rq_create_v2);
16067 length += ((2 * numrq * hrqp[0]->page_count) *
16068 sizeof(struct dma_address));
16070 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16071 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
16072 LPFC_SLI4_MBX_NEMBED);
16073 if (alloclen < length) {
16074 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16075 "3099 Allocated DMA memory size (%d) is "
16076 "less than the requested DMA memory size "
16077 "(%d)\n", alloclen, length);
16084 rq_create = mbox->sge_array->addr[0];
16085 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
16087 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
16090 for (idx = 0; idx < numrq; idx++) {
16095 /* sanity check on queue memory */
16096 if (!hrq || !drq || !cq) {
16101 if (hrq->entry_count != drq->entry_count) {
16107 bf_set(lpfc_mbx_rq_create_num_pages,
16108 &rq_create->u.request,
16110 bf_set(lpfc_mbx_rq_create_rq_cnt,
16111 &rq_create->u.request, (numrq * 2));
16112 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
16114 bf_set(lpfc_rq_context_base_cq,
16115 &rq_create->u.request.context,
16117 bf_set(lpfc_rq_context_data_size,
16118 &rq_create->u.request.context,
16119 LPFC_NVMET_DATA_BUF_SIZE);
16120 bf_set(lpfc_rq_context_hdr_size,
16121 &rq_create->u.request.context,
16122 LPFC_HDR_BUF_SIZE);
16123 bf_set(lpfc_rq_context_rqe_count_1,
16124 &rq_create->u.request.context,
16126 bf_set(lpfc_rq_context_rqe_size,
16127 &rq_create->u.request.context,
16129 bf_set(lpfc_rq_context_page_size,
16130 &rq_create->u.request.context,
16131 (PAGE_SIZE/SLI4_PAGE_SIZE));
16134 list_for_each_entry(dmabuf, &hrq->page_list, list) {
16135 memset(dmabuf->virt, 0, hw_page_size);
16136 cnt = page_idx + dmabuf->buffer_tag;
16137 rq_create->u.request.page[cnt].addr_lo =
16138 putPaddrLow(dmabuf->phys);
16139 rq_create->u.request.page[cnt].addr_hi =
16140 putPaddrHigh(dmabuf->phys);
16146 list_for_each_entry(dmabuf, &drq->page_list, list) {
16147 memset(dmabuf->virt, 0, hw_page_size);
16148 cnt = page_idx + dmabuf->buffer_tag;
16149 rq_create->u.request.page[cnt].addr_lo =
16150 putPaddrLow(dmabuf->phys);
16151 rq_create->u.request.page[cnt].addr_hi =
16152 putPaddrHigh(dmabuf->phys);
16157 hrq->db_format = LPFC_DB_RING_FORMAT;
16158 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16159 hrq->type = LPFC_HRQ;
16160 hrq->assoc_qid = cq->queue_id;
16161 hrq->subtype = subtype;
16162 hrq->host_index = 0;
16163 hrq->hba_index = 0;
16164 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16166 drq->db_format = LPFC_DB_RING_FORMAT;
16167 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16168 drq->type = LPFC_DRQ;
16169 drq->assoc_qid = cq->queue_id;
16170 drq->subtype = subtype;
16171 drq->host_index = 0;
16172 drq->hba_index = 0;
16173 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16175 list_add_tail(&hrq->list, &cq->child_list);
16176 list_add_tail(&drq->list, &cq->child_list);
16179 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16180 /* The IOCTL status is embedded in the mailbox subheader. */
16181 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16182 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16183 if (shdr_status || shdr_add_status || rc) {
16184 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16185 "3120 RQ_CREATE mailbox failed with "
16186 "status x%x add_status x%x, mbx status x%x\n",
16187 shdr_status, shdr_add_status, rc);
16191 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16192 if (rc == 0xFFFF) {
16197 /* Initialize all RQs with associated queue id */
16198 for (idx = 0; idx < numrq; idx++) {
16200 hrq->queue_id = rc + (2 * idx);
16202 drq->queue_id = rc + (2 * idx) + 1;
16206 lpfc_sli4_mbox_cmd_free(phba, mbox);
16211 * lpfc_eq_destroy - Destroy an event Queue on the HBA
16212 * @eq: The queue structure associated with the queue to destroy.
16214 * This function destroys a queue, as detailed in @eq by sending an mailbox
16215 * command, specific to the type of queue, to the HBA.
16217 * The @eq struct is used to get the queue ID of the queue to destroy.
16219 * On success this function will return a zero. If the queue destroy mailbox
16220 * command fails this function will return -ENXIO.
16223 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
16225 LPFC_MBOXQ_t *mbox;
16226 int rc, length, status = 0;
16227 uint32_t shdr_status, shdr_add_status;
16228 union lpfc_sli4_cfg_shdr *shdr;
16230 /* sanity check on queue memory */
16234 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
16237 length = (sizeof(struct lpfc_mbx_eq_destroy) -
16238 sizeof(struct lpfc_sli4_cfg_mhdr));
16239 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16240 LPFC_MBOX_OPCODE_EQ_DESTROY,
16241 length, LPFC_SLI4_MBX_EMBED);
16242 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
16244 mbox->vport = eq->phba->pport;
16245 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16247 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
16248 /* The IOCTL status is embedded in the mailbox subheader. */
16249 shdr = (union lpfc_sli4_cfg_shdr *)
16250 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
16251 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16252 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16253 if (shdr_status || shdr_add_status || rc) {
16254 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16255 "2505 EQ_DESTROY mailbox failed with "
16256 "status x%x add_status x%x, mbx status x%x\n",
16257 shdr_status, shdr_add_status, rc);
16261 /* Remove eq from any list */
16262 list_del_init(&eq->list);
16263 mempool_free(mbox, eq->phba->mbox_mem_pool);
16268 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
16269 * @cq: The queue structure associated with the queue to destroy.
16271 * This function destroys a queue, as detailed in @cq by sending an mailbox
16272 * command, specific to the type of queue, to the HBA.
16274 * The @cq struct is used to get the queue ID of the queue to destroy.
16276 * On success this function will return a zero. If the queue destroy mailbox
16277 * command fails this function will return -ENXIO.
16280 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
16282 LPFC_MBOXQ_t *mbox;
16283 int rc, length, status = 0;
16284 uint32_t shdr_status, shdr_add_status;
16285 union lpfc_sli4_cfg_shdr *shdr;
16287 /* sanity check on queue memory */
16290 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
16293 length = (sizeof(struct lpfc_mbx_cq_destroy) -
16294 sizeof(struct lpfc_sli4_cfg_mhdr));
16295 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16296 LPFC_MBOX_OPCODE_CQ_DESTROY,
16297 length, LPFC_SLI4_MBX_EMBED);
16298 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
16300 mbox->vport = cq->phba->pport;
16301 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16302 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
16303 /* The IOCTL status is embedded in the mailbox subheader. */
16304 shdr = (union lpfc_sli4_cfg_shdr *)
16305 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
16306 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16307 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16308 if (shdr_status || shdr_add_status || rc) {
16309 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16310 "2506 CQ_DESTROY mailbox failed with "
16311 "status x%x add_status x%x, mbx status x%x\n",
16312 shdr_status, shdr_add_status, rc);
16315 /* Remove cq from any list */
16316 list_del_init(&cq->list);
16317 mempool_free(mbox, cq->phba->mbox_mem_pool);
16322 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
16323 * @qm: The queue structure associated with the queue to destroy.
16325 * This function destroys a queue, as detailed in @mq by sending an mailbox
16326 * command, specific to the type of queue, to the HBA.
16328 * The @mq struct is used to get the queue ID of the queue to destroy.
16330 * On success this function will return a zero. If the queue destroy mailbox
16331 * command fails this function will return -ENXIO.
16334 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
16336 LPFC_MBOXQ_t *mbox;
16337 int rc, length, status = 0;
16338 uint32_t shdr_status, shdr_add_status;
16339 union lpfc_sli4_cfg_shdr *shdr;
16341 /* sanity check on queue memory */
16344 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
16347 length = (sizeof(struct lpfc_mbx_mq_destroy) -
16348 sizeof(struct lpfc_sli4_cfg_mhdr));
16349 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16350 LPFC_MBOX_OPCODE_MQ_DESTROY,
16351 length, LPFC_SLI4_MBX_EMBED);
16352 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
16354 mbox->vport = mq->phba->pport;
16355 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16356 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
16357 /* The IOCTL status is embedded in the mailbox subheader. */
16358 shdr = (union lpfc_sli4_cfg_shdr *)
16359 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
16360 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16361 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16362 if (shdr_status || shdr_add_status || rc) {
16363 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16364 "2507 MQ_DESTROY mailbox failed with "
16365 "status x%x add_status x%x, mbx status x%x\n",
16366 shdr_status, shdr_add_status, rc);
16369 /* Remove mq from any list */
16370 list_del_init(&mq->list);
16371 mempool_free(mbox, mq->phba->mbox_mem_pool);
16376 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
16377 * @wq: The queue structure associated with the queue to destroy.
16379 * This function destroys a queue, as detailed in @wq by sending an mailbox
16380 * command, specific to the type of queue, to the HBA.
16382 * The @wq struct is used to get the queue ID of the queue to destroy.
16384 * On success this function will return a zero. If the queue destroy mailbox
16385 * command fails this function will return -ENXIO.
16388 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
16390 LPFC_MBOXQ_t *mbox;
16391 int rc, length, status = 0;
16392 uint32_t shdr_status, shdr_add_status;
16393 union lpfc_sli4_cfg_shdr *shdr;
16395 /* sanity check on queue memory */
16398 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
16401 length = (sizeof(struct lpfc_mbx_wq_destroy) -
16402 sizeof(struct lpfc_sli4_cfg_mhdr));
16403 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16404 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
16405 length, LPFC_SLI4_MBX_EMBED);
16406 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
16408 mbox->vport = wq->phba->pport;
16409 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16410 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
16411 shdr = (union lpfc_sli4_cfg_shdr *)
16412 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
16413 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16414 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16415 if (shdr_status || shdr_add_status || rc) {
16416 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16417 "2508 WQ_DESTROY mailbox failed with "
16418 "status x%x add_status x%x, mbx status x%x\n",
16419 shdr_status, shdr_add_status, rc);
16422 /* Remove wq from any list */
16423 list_del_init(&wq->list);
16426 mempool_free(mbox, wq->phba->mbox_mem_pool);
16431 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
16432 * @rq: The queue structure associated with the queue to destroy.
16434 * This function destroys a queue, as detailed in @rq by sending an mailbox
16435 * command, specific to the type of queue, to the HBA.
16437 * The @rq struct is used to get the queue ID of the queue to destroy.
16439 * On success this function will return a zero. If the queue destroy mailbox
16440 * command fails this function will return -ENXIO.
16443 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16444 struct lpfc_queue *drq)
16446 LPFC_MBOXQ_t *mbox;
16447 int rc, length, status = 0;
16448 uint32_t shdr_status, shdr_add_status;
16449 union lpfc_sli4_cfg_shdr *shdr;
16451 /* sanity check on queue memory */
16454 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
16457 length = (sizeof(struct lpfc_mbx_rq_destroy) -
16458 sizeof(struct lpfc_sli4_cfg_mhdr));
16459 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16460 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
16461 length, LPFC_SLI4_MBX_EMBED);
16462 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16464 mbox->vport = hrq->phba->pport;
16465 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16466 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
16467 /* The IOCTL status is embedded in the mailbox subheader. */
16468 shdr = (union lpfc_sli4_cfg_shdr *)
16469 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16470 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16471 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16472 if (shdr_status || shdr_add_status || rc) {
16473 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16474 "2509 RQ_DESTROY mailbox failed with "
16475 "status x%x add_status x%x, mbx status x%x\n",
16476 shdr_status, shdr_add_status, rc);
16477 if (rc != MBX_TIMEOUT)
16478 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16481 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16483 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
16484 shdr = (union lpfc_sli4_cfg_shdr *)
16485 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16486 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16487 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16488 if (shdr_status || shdr_add_status || rc) {
16489 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16490 "2510 RQ_DESTROY mailbox failed with "
16491 "status x%x add_status x%x, mbx status x%x\n",
16492 shdr_status, shdr_add_status, rc);
16495 list_del_init(&hrq->list);
16496 list_del_init(&drq->list);
16497 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16502 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
16503 * @phba: The virtual port for which this call being executed.
16504 * @pdma_phys_addr0: Physical address of the 1st SGL page.
16505 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
16506 * @xritag: the xritag that ties this io to the SGL pages.
16508 * This routine will post the sgl pages for the IO that has the xritag
16509 * that is in the iocbq structure. The xritag is assigned during iocbq
16510 * creation and persists for as long as the driver is loaded.
16511 * if the caller has fewer than 256 scatter gather segments to map then
16512 * pdma_phys_addr1 should be 0.
16513 * If the caller needs to map more than 256 scatter gather segment then
16514 * pdma_phys_addr1 should be a valid physical address.
16515 * physical address for SGLs must be 64 byte aligned.
16516 * If you are going to map 2 SGL's then the first one must have 256 entries
16517 * the second sgl can have between 1 and 256 entries.
16521 * -ENXIO, -ENOMEM - Failure
16524 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
16525 dma_addr_t pdma_phys_addr0,
16526 dma_addr_t pdma_phys_addr1,
16529 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
16530 LPFC_MBOXQ_t *mbox;
16532 uint32_t shdr_status, shdr_add_status;
16534 union lpfc_sli4_cfg_shdr *shdr;
16536 if (xritag == NO_XRI) {
16537 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16538 "0364 Invalid param:\n");
16542 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16546 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16547 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16548 sizeof(struct lpfc_mbx_post_sgl_pages) -
16549 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
16551 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
16552 &mbox->u.mqe.un.post_sgl_pages;
16553 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
16554 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
16556 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
16557 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
16558 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
16559 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
16561 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
16562 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
16563 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
16564 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
16565 if (!phba->sli4_hba.intr_enable)
16566 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16568 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16569 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16571 /* The IOCTL status is embedded in the mailbox subheader. */
16572 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
16573 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16574 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16575 if (rc != MBX_TIMEOUT)
16576 mempool_free(mbox, phba->mbox_mem_pool);
16577 if (shdr_status || shdr_add_status || rc) {
16578 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16579 "2511 POST_SGL mailbox failed with "
16580 "status x%x add_status x%x, mbx status x%x\n",
16581 shdr_status, shdr_add_status, rc);
16587 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
16588 * @phba: pointer to lpfc hba data structure.
16590 * This routine is invoked to post rpi header templates to the
16591 * HBA consistent with the SLI-4 interface spec. This routine
16592 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
16593 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
16596 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
16597 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
16600 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
16605 * Fetch the next logical xri. Because this index is logical,
16606 * the driver starts at 0 each time.
16608 spin_lock_irq(&phba->hbalock);
16609 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
16610 phba->sli4_hba.max_cfg_param.max_xri, 0);
16611 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
16612 spin_unlock_irq(&phba->hbalock);
16615 set_bit(xri, phba->sli4_hba.xri_bmask);
16616 phba->sli4_hba.max_cfg_param.xri_used++;
16618 spin_unlock_irq(&phba->hbalock);
16623 * lpfc_sli4_free_xri - Release an xri for reuse.
16624 * @phba: pointer to lpfc hba data structure.
16626 * This routine is invoked to release an xri to the pool of
16627 * available rpis maintained by the driver.
16630 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16632 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
16633 phba->sli4_hba.max_cfg_param.xri_used--;
16638 * lpfc_sli4_free_xri - Release an xri for reuse.
16639 * @phba: pointer to lpfc hba data structure.
16641 * This routine is invoked to release an xri to the pool of
16642 * available rpis maintained by the driver.
16645 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16647 spin_lock_irq(&phba->hbalock);
16648 __lpfc_sli4_free_xri(phba, xri);
16649 spin_unlock_irq(&phba->hbalock);
16653 * lpfc_sli4_next_xritag - Get an xritag for the io
16654 * @phba: Pointer to HBA context object.
16656 * This function gets an xritag for the iocb. If there is no unused xritag
16657 * it will return 0xffff.
16658 * The function returns the allocated xritag if successful, else returns zero.
16659 * Zero is not a valid xritag.
16660 * The caller is not required to hold any lock.
16663 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
16665 uint16_t xri_index;
16667 xri_index = lpfc_sli4_alloc_xri(phba);
16668 if (xri_index == NO_XRI)
16669 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
16670 "2004 Failed to allocate XRI.last XRITAG is %d"
16671 " Max XRI is %d, Used XRI is %d\n",
16673 phba->sli4_hba.max_cfg_param.max_xri,
16674 phba->sli4_hba.max_cfg_param.xri_used);
16679 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
16680 * @phba: pointer to lpfc hba data structure.
16681 * @post_sgl_list: pointer to els sgl entry list.
16682 * @count: number of els sgl entries on the list.
16684 * This routine is invoked to post a block of driver's sgl pages to the
16685 * HBA using non-embedded mailbox command. No Lock is held. This routine
16686 * is only called when the driver is loading and after all IO has been
16690 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
16691 struct list_head *post_sgl_list,
16694 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
16695 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16696 struct sgl_page_pairs *sgl_pg_pairs;
16698 LPFC_MBOXQ_t *mbox;
16699 uint32_t reqlen, alloclen, pg_pairs;
16701 uint16_t xritag_start = 0;
16703 uint32_t shdr_status, shdr_add_status;
16704 union lpfc_sli4_cfg_shdr *shdr;
16706 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
16707 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
16708 if (reqlen > SLI4_PAGE_SIZE) {
16709 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16710 "2559 Block sgl registration required DMA "
16711 "size (%d) great than a page\n", reqlen);
16715 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16719 /* Allocate DMA memory and set up the non-embedded mailbox command */
16720 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16721 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
16722 LPFC_SLI4_MBX_NEMBED);
16724 if (alloclen < reqlen) {
16725 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16726 "0285 Allocated DMA memory size (%d) is "
16727 "less than the requested DMA memory "
16728 "size (%d)\n", alloclen, reqlen);
16729 lpfc_sli4_mbox_cmd_free(phba, mbox);
16732 /* Set up the SGL pages in the non-embedded DMA pages */
16733 viraddr = mbox->sge_array->addr[0];
16734 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16735 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16738 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
16739 /* Set up the sge entry */
16740 sgl_pg_pairs->sgl_pg0_addr_lo =
16741 cpu_to_le32(putPaddrLow(sglq_entry->phys));
16742 sgl_pg_pairs->sgl_pg0_addr_hi =
16743 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
16744 sgl_pg_pairs->sgl_pg1_addr_lo =
16745 cpu_to_le32(putPaddrLow(0));
16746 sgl_pg_pairs->sgl_pg1_addr_hi =
16747 cpu_to_le32(putPaddrHigh(0));
16749 /* Keep the first xritag on the list */
16751 xritag_start = sglq_entry->sli4_xritag;
16756 /* Complete initialization and perform endian conversion. */
16757 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16758 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
16759 sgl->word0 = cpu_to_le32(sgl->word0);
16761 if (!phba->sli4_hba.intr_enable)
16762 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16764 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16765 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16767 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
16768 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16769 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16770 if (rc != MBX_TIMEOUT)
16771 lpfc_sli4_mbox_cmd_free(phba, mbox);
16772 if (shdr_status || shdr_add_status || rc) {
16773 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16774 "2513 POST_SGL_BLOCK mailbox command failed "
16775 "status x%x add_status x%x mbx status x%x\n",
16776 shdr_status, shdr_add_status, rc);
16783 * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
16784 * @phba: pointer to lpfc hba data structure.
16785 * @nblist: pointer to nvme buffer list.
16786 * @count: number of scsi buffers on the list.
16788 * This routine is invoked to post a block of @count scsi sgl pages from a
16789 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
16794 lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
16797 struct lpfc_io_buf *lpfc_ncmd;
16798 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16799 struct sgl_page_pairs *sgl_pg_pairs;
16801 LPFC_MBOXQ_t *mbox;
16802 uint32_t reqlen, alloclen, pg_pairs;
16804 uint16_t xritag_start = 0;
16806 uint32_t shdr_status, shdr_add_status;
16807 dma_addr_t pdma_phys_bpl1;
16808 union lpfc_sli4_cfg_shdr *shdr;
16810 /* Calculate the requested length of the dma memory */
16811 reqlen = count * sizeof(struct sgl_page_pairs) +
16812 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
16813 if (reqlen > SLI4_PAGE_SIZE) {
16814 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
16815 "6118 Block sgl registration required DMA "
16816 "size (%d) great than a page\n", reqlen);
16819 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16821 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16822 "6119 Failed to allocate mbox cmd memory\n");
16826 /* Allocate DMA memory and set up the non-embedded mailbox command */
16827 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16828 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16829 reqlen, LPFC_SLI4_MBX_NEMBED);
16831 if (alloclen < reqlen) {
16832 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16833 "6120 Allocated DMA memory size (%d) is "
16834 "less than the requested DMA memory "
16835 "size (%d)\n", alloclen, reqlen);
16836 lpfc_sli4_mbox_cmd_free(phba, mbox);
16840 /* Get the first SGE entry from the non-embedded DMA memory */
16841 viraddr = mbox->sge_array->addr[0];
16843 /* Set up the SGL pages in the non-embedded DMA pages */
16844 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16845 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16848 list_for_each_entry(lpfc_ncmd, nblist, list) {
16849 /* Set up the sge entry */
16850 sgl_pg_pairs->sgl_pg0_addr_lo =
16851 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
16852 sgl_pg_pairs->sgl_pg0_addr_hi =
16853 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
16854 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
16855 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
16858 pdma_phys_bpl1 = 0;
16859 sgl_pg_pairs->sgl_pg1_addr_lo =
16860 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
16861 sgl_pg_pairs->sgl_pg1_addr_hi =
16862 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
16863 /* Keep the first xritag on the list */
16865 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
16869 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16870 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
16871 /* Perform endian conversion if necessary */
16872 sgl->word0 = cpu_to_le32(sgl->word0);
16874 if (!phba->sli4_hba.intr_enable) {
16875 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16877 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16878 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16880 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
16881 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16882 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16883 if (rc != MBX_TIMEOUT)
16884 lpfc_sli4_mbox_cmd_free(phba, mbox);
16885 if (shdr_status || shdr_add_status || rc) {
16886 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16887 "6125 POST_SGL_BLOCK mailbox command failed "
16888 "status x%x add_status x%x mbx status x%x\n",
16889 shdr_status, shdr_add_status, rc);
16896 * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
16897 * @phba: pointer to lpfc hba data structure.
16898 * @post_nblist: pointer to the nvme buffer list.
16900 * This routine walks a list of nvme buffers that was passed in. It attempts
16901 * to construct blocks of nvme buffer sgls which contains contiguous xris and
16902 * uses the non-embedded SGL block post mailbox commands to post to the port.
16903 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
16904 * embedded SGL post mailbox command for posting. The @post_nblist passed in
16905 * must be local list, thus no lock is needed when manipulate the list.
16907 * Returns: 0 = failure, non-zero number of successfully posted buffers.
16910 lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
16911 struct list_head *post_nblist, int sb_count)
16913 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
16914 int status, sgl_size;
16915 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
16916 dma_addr_t pdma_phys_sgl1;
16917 int last_xritag = NO_XRI;
16919 LIST_HEAD(prep_nblist);
16920 LIST_HEAD(blck_nblist);
16921 LIST_HEAD(nvme_nblist);
16927 sgl_size = phba->cfg_sg_dma_buf_size;
16928 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
16929 list_del_init(&lpfc_ncmd->list);
16931 if ((last_xritag != NO_XRI) &&
16932 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
16933 /* a hole in xri block, form a sgl posting block */
16934 list_splice_init(&prep_nblist, &blck_nblist);
16935 post_cnt = block_cnt - 1;
16936 /* prepare list for next posting block */
16937 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
16940 /* prepare list for next posting block */
16941 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
16942 /* enough sgls for non-embed sgl mbox command */
16943 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
16944 list_splice_init(&prep_nblist, &blck_nblist);
16945 post_cnt = block_cnt;
16950 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
16952 /* end of repost sgl list condition for NVME buffers */
16953 if (num_posting == sb_count) {
16954 if (post_cnt == 0) {
16955 /* last sgl posting block */
16956 list_splice_init(&prep_nblist, &blck_nblist);
16957 post_cnt = block_cnt;
16958 } else if (block_cnt == 1) {
16959 /* last single sgl with non-contiguous xri */
16960 if (sgl_size > SGL_PAGE_SIZE)
16962 lpfc_ncmd->dma_phys_sgl +
16965 pdma_phys_sgl1 = 0;
16966 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
16967 status = lpfc_sli4_post_sgl(
16968 phba, lpfc_ncmd->dma_phys_sgl,
16969 pdma_phys_sgl1, cur_xritag);
16971 /* Post error. Buffer unavailable. */
16972 lpfc_ncmd->flags |=
16973 LPFC_SBUF_NOT_POSTED;
16975 /* Post success. Bffer available. */
16976 lpfc_ncmd->flags &=
16977 ~LPFC_SBUF_NOT_POSTED;
16978 lpfc_ncmd->status = IOSTAT_SUCCESS;
16981 /* success, put on NVME buffer sgl list */
16982 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
16986 /* continue until a nembed page worth of sgls */
16990 /* post block of NVME buffer list sgls */
16991 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
16994 /* don't reset xirtag due to hole in xri block */
16995 if (block_cnt == 0)
16996 last_xritag = NO_XRI;
16998 /* reset NVME buffer post count for next round of posting */
17001 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
17002 while (!list_empty(&blck_nblist)) {
17003 list_remove_head(&blck_nblist, lpfc_ncmd,
17004 struct lpfc_io_buf, list);
17006 /* Post error. Mark buffer unavailable. */
17007 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
17009 /* Post success, Mark buffer available. */
17010 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
17011 lpfc_ncmd->status = IOSTAT_SUCCESS;
17014 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
17017 /* Push NVME buffers with sgl posted to the available list */
17018 lpfc_io_buf_replenish(phba, &nvme_nblist);
17024 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
17025 * @phba: pointer to lpfc_hba struct that the frame was received on
17026 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17028 * This function checks the fields in the @fc_hdr to see if the FC frame is a
17029 * valid type of frame that the LPFC driver will handle. This function will
17030 * return a zero if the frame is a valid frame or a non zero value when the
17031 * frame does not pass the check.
17034 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
17036 /* make rctl_names static to save stack space */
17037 struct fc_vft_header *fc_vft_hdr;
17038 uint32_t *header = (uint32_t *) fc_hdr;
17040 #define FC_RCTL_MDS_DIAGS 0xF4
17042 switch (fc_hdr->fh_r_ctl) {
17043 case FC_RCTL_DD_UNCAT: /* uncategorized information */
17044 case FC_RCTL_DD_SOL_DATA: /* solicited data */
17045 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
17046 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
17047 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
17048 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
17049 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
17050 case FC_RCTL_DD_CMD_STATUS: /* command status */
17051 case FC_RCTL_ELS_REQ: /* extended link services request */
17052 case FC_RCTL_ELS_REP: /* extended link services reply */
17053 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
17054 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
17055 case FC_RCTL_BA_NOP: /* basic link service NOP */
17056 case FC_RCTL_BA_ABTS: /* basic link service abort */
17057 case FC_RCTL_BA_RMC: /* remove connection */
17058 case FC_RCTL_BA_ACC: /* basic accept */
17059 case FC_RCTL_BA_RJT: /* basic reject */
17060 case FC_RCTL_BA_PRMT:
17061 case FC_RCTL_ACK_1: /* acknowledge_1 */
17062 case FC_RCTL_ACK_0: /* acknowledge_0 */
17063 case FC_RCTL_P_RJT: /* port reject */
17064 case FC_RCTL_F_RJT: /* fabric reject */
17065 case FC_RCTL_P_BSY: /* port busy */
17066 case FC_RCTL_F_BSY: /* fabric busy to data frame */
17067 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
17068 case FC_RCTL_LCR: /* link credit reset */
17069 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
17070 case FC_RCTL_END: /* end */
17072 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
17073 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17074 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
17075 return lpfc_fc_frame_check(phba, fc_hdr);
17080 switch (fc_hdr->fh_type) {
17093 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
17094 "2538 Received frame rctl:x%x, type:x%x, "
17095 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
17096 fc_hdr->fh_r_ctl, fc_hdr->fh_type,
17097 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
17098 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
17099 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
17100 be32_to_cpu(header[6]));
17103 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
17104 "2539 Dropped frame rctl:x%x type:x%x\n",
17105 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17110 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
17111 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17113 * This function processes the FC header to retrieve the VFI from the VF
17114 * header, if one exists. This function will return the VFI if one exists
17115 * or 0 if no VSAN Header exists.
17118 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
17120 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17122 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
17124 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
17128 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
17129 * @phba: Pointer to the HBA structure to search for the vport on
17130 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17131 * @fcfi: The FC Fabric ID that the frame came from
17133 * This function searches the @phba for a vport that matches the content of the
17134 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
17135 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
17136 * returns the matching vport pointer or NULL if unable to match frame to a
17139 static struct lpfc_vport *
17140 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
17141 uint16_t fcfi, uint32_t did)
17143 struct lpfc_vport **vports;
17144 struct lpfc_vport *vport = NULL;
17147 if (did == Fabric_DID)
17148 return phba->pport;
17149 if ((phba->pport->fc_flag & FC_PT2PT) &&
17150 !(phba->link_state == LPFC_HBA_READY))
17151 return phba->pport;
17153 vports = lpfc_create_vport_work_array(phba);
17154 if (vports != NULL) {
17155 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
17156 if (phba->fcf.fcfi == fcfi &&
17157 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
17158 vports[i]->fc_myDID == did) {
17164 lpfc_destroy_vport_work_array(phba, vports);
17169 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
17170 * @vport: The vport to work on.
17172 * This function updates the receive sequence time stamp for this vport. The
17173 * receive sequence time stamp indicates the time that the last frame of the
17174 * the sequence that has been idle for the longest amount of time was received.
17175 * the driver uses this time stamp to indicate if any received sequences have
17179 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
17181 struct lpfc_dmabuf *h_buf;
17182 struct hbq_dmabuf *dmabuf = NULL;
17184 /* get the oldest sequence on the rcv list */
17185 h_buf = list_get_first(&vport->rcv_buffer_list,
17186 struct lpfc_dmabuf, list);
17189 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17190 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
17194 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
17195 * @vport: The vport that the received sequences were sent to.
17197 * This function cleans up all outstanding received sequences. This is called
17198 * by the driver when a link event or user action invalidates all the received
17202 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
17204 struct lpfc_dmabuf *h_buf, *hnext;
17205 struct lpfc_dmabuf *d_buf, *dnext;
17206 struct hbq_dmabuf *dmabuf = NULL;
17208 /* start with the oldest sequence on the rcv list */
17209 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17210 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17211 list_del_init(&dmabuf->hbuf.list);
17212 list_for_each_entry_safe(d_buf, dnext,
17213 &dmabuf->dbuf.list, list) {
17214 list_del_init(&d_buf->list);
17215 lpfc_in_buf_free(vport->phba, d_buf);
17217 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17222 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
17223 * @vport: The vport that the received sequences were sent to.
17225 * This function determines whether any received sequences have timed out by
17226 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
17227 * indicates that there is at least one timed out sequence this routine will
17228 * go through the received sequences one at a time from most inactive to most
17229 * active to determine which ones need to be cleaned up. Once it has determined
17230 * that a sequence needs to be cleaned up it will simply free up the resources
17231 * without sending an abort.
17234 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
17236 struct lpfc_dmabuf *h_buf, *hnext;
17237 struct lpfc_dmabuf *d_buf, *dnext;
17238 struct hbq_dmabuf *dmabuf = NULL;
17239 unsigned long timeout;
17240 int abort_count = 0;
17242 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17243 vport->rcv_buffer_time_stamp);
17244 if (list_empty(&vport->rcv_buffer_list) ||
17245 time_before(jiffies, timeout))
17247 /* start with the oldest sequence on the rcv list */
17248 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17249 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17250 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17251 dmabuf->time_stamp);
17252 if (time_before(jiffies, timeout))
17255 list_del_init(&dmabuf->hbuf.list);
17256 list_for_each_entry_safe(d_buf, dnext,
17257 &dmabuf->dbuf.list, list) {
17258 list_del_init(&d_buf->list);
17259 lpfc_in_buf_free(vport->phba, d_buf);
17261 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17264 lpfc_update_rcv_time_stamp(vport);
17268 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
17269 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
17271 * This function searches through the existing incomplete sequences that have
17272 * been sent to this @vport. If the frame matches one of the incomplete
17273 * sequences then the dbuf in the @dmabuf is added to the list of frames that
17274 * make up that sequence. If no sequence is found that matches this frame then
17275 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
17276 * This function returns a pointer to the first dmabuf in the sequence list that
17277 * the frame was linked to.
17279 static struct hbq_dmabuf *
17280 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17282 struct fc_frame_header *new_hdr;
17283 struct fc_frame_header *temp_hdr;
17284 struct lpfc_dmabuf *d_buf;
17285 struct lpfc_dmabuf *h_buf;
17286 struct hbq_dmabuf *seq_dmabuf = NULL;
17287 struct hbq_dmabuf *temp_dmabuf = NULL;
17290 INIT_LIST_HEAD(&dmabuf->dbuf.list);
17291 dmabuf->time_stamp = jiffies;
17292 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17294 /* Use the hdr_buf to find the sequence that this frame belongs to */
17295 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17296 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17297 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17298 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17299 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17301 /* found a pending sequence that matches this frame */
17302 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17307 * This indicates first frame received for this sequence.
17308 * Queue the buffer on the vport's rcv_buffer_list.
17310 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17311 lpfc_update_rcv_time_stamp(vport);
17314 temp_hdr = seq_dmabuf->hbuf.virt;
17315 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
17316 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17317 list_del_init(&seq_dmabuf->hbuf.list);
17318 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17319 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17320 lpfc_update_rcv_time_stamp(vport);
17323 /* move this sequence to the tail to indicate a young sequence */
17324 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
17325 seq_dmabuf->time_stamp = jiffies;
17326 lpfc_update_rcv_time_stamp(vport);
17327 if (list_empty(&seq_dmabuf->dbuf.list)) {
17328 temp_hdr = dmabuf->hbuf.virt;
17329 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17332 /* find the correct place in the sequence to insert this frame */
17333 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
17335 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17336 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
17338 * If the frame's sequence count is greater than the frame on
17339 * the list then insert the frame right after this frame
17341 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
17342 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17343 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
17348 if (&d_buf->list == &seq_dmabuf->dbuf.list)
17350 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
17359 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
17360 * @vport: pointer to a vitural port
17361 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17363 * This function tries to abort from the partially assembed sequence, described
17364 * by the information from basic abbort @dmabuf. It checks to see whether such
17365 * partially assembled sequence held by the driver. If so, it shall free up all
17366 * the frames from the partially assembled sequence.
17369 * true -- if there is matching partially assembled sequence present and all
17370 * the frames freed with the sequence;
17371 * false -- if there is no matching partially assembled sequence present so
17372 * nothing got aborted in the lower layer driver
17375 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
17376 struct hbq_dmabuf *dmabuf)
17378 struct fc_frame_header *new_hdr;
17379 struct fc_frame_header *temp_hdr;
17380 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
17381 struct hbq_dmabuf *seq_dmabuf = NULL;
17383 /* Use the hdr_buf to find the sequence that matches this frame */
17384 INIT_LIST_HEAD(&dmabuf->dbuf.list);
17385 INIT_LIST_HEAD(&dmabuf->hbuf.list);
17386 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17387 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17388 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17389 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17390 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17391 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17393 /* found a pending sequence that matches this frame */
17394 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17398 /* Free up all the frames from the partially assembled sequence */
17400 list_for_each_entry_safe(d_buf, n_buf,
17401 &seq_dmabuf->dbuf.list, list) {
17402 list_del_init(&d_buf->list);
17403 lpfc_in_buf_free(vport->phba, d_buf);
17411 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
17412 * @vport: pointer to a vitural port
17413 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17415 * This function tries to abort from the assembed sequence from upper level
17416 * protocol, described by the information from basic abbort @dmabuf. It
17417 * checks to see whether such pending context exists at upper level protocol.
17418 * If so, it shall clean up the pending context.
17421 * true -- if there is matching pending context of the sequence cleaned
17423 * false -- if there is no matching pending context of the sequence present
17427 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17429 struct lpfc_hba *phba = vport->phba;
17432 /* Accepting abort at ulp with SLI4 only */
17433 if (phba->sli_rev < LPFC_SLI_REV4)
17436 /* Register all caring upper level protocols to attend abort */
17437 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
17445 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
17446 * @phba: Pointer to HBA context object.
17447 * @cmd_iocbq: pointer to the command iocbq structure.
17448 * @rsp_iocbq: pointer to the response iocbq structure.
17450 * This function handles the sequence abort response iocb command complete
17451 * event. It properly releases the memory allocated to the sequence abort
17455 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
17456 struct lpfc_iocbq *cmd_iocbq,
17457 struct lpfc_iocbq *rsp_iocbq)
17459 struct lpfc_nodelist *ndlp;
17462 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
17463 lpfc_nlp_put(ndlp);
17464 lpfc_nlp_not_used(ndlp);
17465 lpfc_sli_release_iocbq(phba, cmd_iocbq);
17468 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
17469 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
17470 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17471 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
17472 rsp_iocbq->iocb.ulpStatus,
17473 rsp_iocbq->iocb.un.ulpWord[4]);
17477 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
17478 * @phba: Pointer to HBA context object.
17479 * @xri: xri id in transaction.
17481 * This function validates the xri maps to the known range of XRIs allocated an
17482 * used by the driver.
17485 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
17490 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
17491 if (xri == phba->sli4_hba.xri_ids[i])
17498 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
17499 * @phba: Pointer to HBA context object.
17500 * @fc_hdr: pointer to a FC frame header.
17502 * This function sends a basic response to a previous unsol sequence abort
17503 * event after aborting the sequence handling.
17506 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
17507 struct fc_frame_header *fc_hdr, bool aborted)
17509 struct lpfc_hba *phba = vport->phba;
17510 struct lpfc_iocbq *ctiocb = NULL;
17511 struct lpfc_nodelist *ndlp;
17512 uint16_t oxid, rxid, xri, lxri;
17513 uint32_t sid, fctl;
17517 if (!lpfc_is_link_up(phba))
17520 sid = sli4_sid_from_fc_hdr(fc_hdr);
17521 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
17522 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
17524 ndlp = lpfc_findnode_did(vport, sid);
17526 ndlp = lpfc_nlp_init(vport, sid);
17528 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17529 "1268 Failed to allocate ndlp for "
17530 "oxid:x%x SID:x%x\n", oxid, sid);
17533 /* Put ndlp onto pport node list */
17534 lpfc_enqueue_node(vport, ndlp);
17535 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
17536 /* re-setup ndlp without removing from node list */
17537 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
17539 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17540 "3275 Failed to active ndlp found "
17541 "for oxid:x%x SID:x%x\n", oxid, sid);
17546 /* Allocate buffer for rsp iocb */
17547 ctiocb = lpfc_sli_get_iocbq(phba);
17551 /* Extract the F_CTL field from FC_HDR */
17552 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
17554 icmd = &ctiocb->iocb;
17555 icmd->un.xseq64.bdl.bdeSize = 0;
17556 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
17557 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
17558 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
17559 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
17561 /* Fill in the rest of iocb fields */
17562 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
17563 icmd->ulpBdeCount = 0;
17565 icmd->ulpClass = CLASS3;
17566 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
17567 ctiocb->context1 = lpfc_nlp_get(ndlp);
17569 ctiocb->vport = phba->pport;
17570 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
17571 ctiocb->sli4_lxritag = NO_XRI;
17572 ctiocb->sli4_xritag = NO_XRI;
17574 if (fctl & FC_FC_EX_CTX)
17575 /* Exchange responder sent the abort so we
17581 lxri = lpfc_sli4_xri_inrange(phba, xri);
17582 if (lxri != NO_XRI)
17583 lpfc_set_rrq_active(phba, ndlp, lxri,
17584 (xri == oxid) ? rxid : oxid, 0);
17585 /* For BA_ABTS from exchange responder, if the logical xri with
17586 * the oxid maps to the FCP XRI range, the port no longer has
17587 * that exchange context, send a BLS_RJT. Override the IOCB for
17590 if ((fctl & FC_FC_EX_CTX) &&
17591 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
17592 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17593 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17594 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17595 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17598 /* If BA_ABTS failed to abort a partially assembled receive sequence,
17599 * the driver no longer has that exchange, send a BLS_RJT. Override
17600 * the IOCB for a BA_RJT.
17602 if (aborted == false) {
17603 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17604 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17605 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17606 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17609 if (fctl & FC_FC_EX_CTX) {
17610 /* ABTS sent by responder to CT exchange, construction
17611 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
17612 * field and RX_ID from ABTS for RX_ID field.
17614 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
17616 /* ABTS sent by initiator to CT exchange, construction
17617 * of BA_ACC will need to allocate a new XRI as for the
17620 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
17622 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
17623 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
17625 /* Xmit CT abts response on exchange <xid> */
17626 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
17627 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
17628 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
17630 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
17631 if (rc == IOCB_ERROR) {
17632 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
17633 "2925 Failed to issue CT ABTS RSP x%x on "
17634 "xri x%x, Data x%x\n",
17635 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
17637 lpfc_nlp_put(ndlp);
17638 ctiocb->context1 = NULL;
17639 lpfc_sli_release_iocbq(phba, ctiocb);
17644 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
17645 * @vport: Pointer to the vport on which this sequence was received
17646 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17648 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
17649 * receive sequence is only partially assembed by the driver, it shall abort
17650 * the partially assembled frames for the sequence. Otherwise, if the
17651 * unsolicited receive sequence has been completely assembled and passed to
17652 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
17653 * unsolicited sequence has been aborted. After that, it will issue a basic
17654 * accept to accept the abort.
17657 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
17658 struct hbq_dmabuf *dmabuf)
17660 struct lpfc_hba *phba = vport->phba;
17661 struct fc_frame_header fc_hdr;
17665 /* Make a copy of fc_hdr before the dmabuf being released */
17666 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
17667 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
17669 if (fctl & FC_FC_EX_CTX) {
17670 /* ABTS by responder to exchange, no cleanup needed */
17673 /* ABTS by initiator to exchange, need to do cleanup */
17674 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
17675 if (aborted == false)
17676 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
17678 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17680 if (phba->nvmet_support) {
17681 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
17685 /* Respond with BA_ACC or BA_RJT accordingly */
17686 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
17690 * lpfc_seq_complete - Indicates if a sequence is complete
17691 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17693 * This function checks the sequence, starting with the frame described by
17694 * @dmabuf, to see if all the frames associated with this sequence are present.
17695 * the frames associated with this sequence are linked to the @dmabuf using the
17696 * dbuf list. This function looks for two major things. 1) That the first frame
17697 * has a sequence count of zero. 2) There is a frame with last frame of sequence
17698 * set. 3) That there are no holes in the sequence count. The function will
17699 * return 1 when the sequence is complete, otherwise it will return 0.
17702 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
17704 struct fc_frame_header *hdr;
17705 struct lpfc_dmabuf *d_buf;
17706 struct hbq_dmabuf *seq_dmabuf;
17710 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17711 /* make sure first fame of sequence has a sequence count of zero */
17712 if (hdr->fh_seq_cnt != seq_count)
17714 fctl = (hdr->fh_f_ctl[0] << 16 |
17715 hdr->fh_f_ctl[1] << 8 |
17717 /* If last frame of sequence we can return success. */
17718 if (fctl & FC_FC_END_SEQ)
17720 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
17721 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17722 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17723 /* If there is a hole in the sequence count then fail. */
17724 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
17726 fctl = (hdr->fh_f_ctl[0] << 16 |
17727 hdr->fh_f_ctl[1] << 8 |
17729 /* If last frame of sequence we can return success. */
17730 if (fctl & FC_FC_END_SEQ)
17737 * lpfc_prep_seq - Prep sequence for ULP processing
17738 * @vport: Pointer to the vport on which this sequence was received
17739 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17741 * This function takes a sequence, described by a list of frames, and creates
17742 * a list of iocbq structures to describe the sequence. This iocbq list will be
17743 * used to issue to the generic unsolicited sequence handler. This routine
17744 * returns a pointer to the first iocbq in the list. If the function is unable
17745 * to allocate an iocbq then it throw out the received frames that were not
17746 * able to be described and return a pointer to the first iocbq. If unable to
17747 * allocate any iocbqs (including the first) this function will return NULL.
17749 static struct lpfc_iocbq *
17750 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
17752 struct hbq_dmabuf *hbq_buf;
17753 struct lpfc_dmabuf *d_buf, *n_buf;
17754 struct lpfc_iocbq *first_iocbq, *iocbq;
17755 struct fc_frame_header *fc_hdr;
17757 uint32_t len, tot_len;
17758 struct ulp_bde64 *pbde;
17760 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17761 /* remove from receive buffer list */
17762 list_del_init(&seq_dmabuf->hbuf.list);
17763 lpfc_update_rcv_time_stamp(vport);
17764 /* get the Remote Port's SID */
17765 sid = sli4_sid_from_fc_hdr(fc_hdr);
17767 /* Get an iocbq struct to fill in. */
17768 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
17770 /* Initialize the first IOCB. */
17771 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
17772 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
17773 first_iocbq->vport = vport;
17775 /* Check FC Header to see what TYPE of frame we are rcv'ing */
17776 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
17777 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
17778 first_iocbq->iocb.un.rcvels.parmRo =
17779 sli4_did_from_fc_hdr(fc_hdr);
17780 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
17782 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
17783 first_iocbq->iocb.ulpContext = NO_XRI;
17784 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
17785 be16_to_cpu(fc_hdr->fh_ox_id);
17786 /* iocbq is prepped for internal consumption. Physical vpi. */
17787 first_iocbq->iocb.unsli3.rcvsli3.vpi =
17788 vport->phba->vpi_ids[vport->vpi];
17789 /* put the first buffer into the first IOCBq */
17790 tot_len = bf_get(lpfc_rcqe_length,
17791 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
17793 first_iocbq->context2 = &seq_dmabuf->dbuf;
17794 first_iocbq->context3 = NULL;
17795 first_iocbq->iocb.ulpBdeCount = 1;
17796 if (tot_len > LPFC_DATA_BUF_SIZE)
17797 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
17798 LPFC_DATA_BUF_SIZE;
17800 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
17802 first_iocbq->iocb.un.rcvels.remoteID = sid;
17804 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17806 iocbq = first_iocbq;
17808 * Each IOCBq can have two Buffers assigned, so go through the list
17809 * of buffers for this sequence and save two buffers in each IOCBq
17811 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
17813 lpfc_in_buf_free(vport->phba, d_buf);
17816 if (!iocbq->context3) {
17817 iocbq->context3 = d_buf;
17818 iocbq->iocb.ulpBdeCount++;
17819 /* We need to get the size out of the right CQE */
17820 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17821 len = bf_get(lpfc_rcqe_length,
17822 &hbq_buf->cq_event.cqe.rcqe_cmpl);
17823 pbde = (struct ulp_bde64 *)
17824 &iocbq->iocb.unsli3.sli3Words[4];
17825 if (len > LPFC_DATA_BUF_SIZE)
17826 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
17828 pbde->tus.f.bdeSize = len;
17830 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
17833 iocbq = lpfc_sli_get_iocbq(vport->phba);
17836 first_iocbq->iocb.ulpStatus =
17837 IOSTAT_FCP_RSP_ERROR;
17838 first_iocbq->iocb.un.ulpWord[4] =
17839 IOERR_NO_RESOURCES;
17841 lpfc_in_buf_free(vport->phba, d_buf);
17844 /* We need to get the size out of the right CQE */
17845 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17846 len = bf_get(lpfc_rcqe_length,
17847 &hbq_buf->cq_event.cqe.rcqe_cmpl);
17848 iocbq->context2 = d_buf;
17849 iocbq->context3 = NULL;
17850 iocbq->iocb.ulpBdeCount = 1;
17851 if (len > LPFC_DATA_BUF_SIZE)
17852 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
17853 LPFC_DATA_BUF_SIZE;
17855 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
17858 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17860 iocbq->iocb.un.rcvels.remoteID = sid;
17861 list_add_tail(&iocbq->list, &first_iocbq->list);
17864 return first_iocbq;
17868 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
17869 struct hbq_dmabuf *seq_dmabuf)
17871 struct fc_frame_header *fc_hdr;
17872 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
17873 struct lpfc_hba *phba = vport->phba;
17875 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17876 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
17878 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17879 "2707 Ring %d handler: Failed to allocate "
17880 "iocb Rctl x%x Type x%x received\n",
17882 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17885 if (!lpfc_complete_unsol_iocb(phba,
17886 phba->sli4_hba.els_wq->pring,
17887 iocbq, fc_hdr->fh_r_ctl,
17889 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17890 "2540 Ring %d handler: unexpected Rctl "
17891 "x%x Type x%x received\n",
17893 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17895 /* Free iocb created in lpfc_prep_seq */
17896 list_for_each_entry_safe(curr_iocb, next_iocb,
17897 &iocbq->list, list) {
17898 list_del_init(&curr_iocb->list);
17899 lpfc_sli_release_iocbq(phba, curr_iocb);
17901 lpfc_sli_release_iocbq(phba, iocbq);
17905 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
17906 struct lpfc_iocbq *rspiocb)
17908 struct lpfc_dmabuf *pcmd = cmdiocb->context2;
17910 if (pcmd && pcmd->virt)
17911 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
17913 lpfc_sli_release_iocbq(phba, cmdiocb);
17914 lpfc_drain_txq(phba);
17918 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
17919 struct hbq_dmabuf *dmabuf)
17921 struct fc_frame_header *fc_hdr;
17922 struct lpfc_hba *phba = vport->phba;
17923 struct lpfc_iocbq *iocbq = NULL;
17924 union lpfc_wqe *wqe;
17925 struct lpfc_dmabuf *pcmd = NULL;
17926 uint32_t frame_len;
17928 unsigned long iflags;
17930 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17931 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
17933 /* Send the received frame back */
17934 iocbq = lpfc_sli_get_iocbq(phba);
17936 /* Queue cq event and wakeup worker thread to process it */
17937 spin_lock_irqsave(&phba->hbalock, iflags);
17938 list_add_tail(&dmabuf->cq_event.list,
17939 &phba->sli4_hba.sp_queue_event);
17940 phba->hba_flag |= HBA_SP_QUEUE_EVT;
17941 spin_unlock_irqrestore(&phba->hbalock, iflags);
17942 lpfc_worker_wake_up(phba);
17946 /* Allocate buffer for command payload */
17947 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
17949 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
17951 if (!pcmd || !pcmd->virt)
17954 INIT_LIST_HEAD(&pcmd->list);
17956 /* copyin the payload */
17957 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
17959 /* fill in BDE's for command */
17960 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
17961 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
17962 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
17963 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
17965 iocbq->context2 = pcmd;
17966 iocbq->vport = vport;
17967 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
17968 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
17971 * Setup rest of the iocb as though it were a WQE
17972 * Build the SEND_FRAME WQE
17974 wqe = (union lpfc_wqe *)&iocbq->iocb;
17976 wqe->send_frame.frame_len = frame_len;
17977 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
17978 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
17979 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
17980 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
17981 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
17982 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
17984 iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
17985 iocbq->iocb.ulpLe = 1;
17986 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
17987 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
17988 if (rc == IOCB_ERROR)
17991 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17995 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
17996 "2023 Unable to process MDS loopback frame\n");
17997 if (pcmd && pcmd->virt)
17998 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
18001 lpfc_sli_release_iocbq(phba, iocbq);
18002 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18006 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
18007 * @phba: Pointer to HBA context object.
18009 * This function is called with no lock held. This function processes all
18010 * the received buffers and gives it to upper layers when a received buffer
18011 * indicates that it is the final frame in the sequence. The interrupt
18012 * service routine processes received buffers at interrupt contexts.
18013 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
18014 * appropriate receive function when the final frame in a sequence is received.
18017 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
18018 struct hbq_dmabuf *dmabuf)
18020 struct hbq_dmabuf *seq_dmabuf;
18021 struct fc_frame_header *fc_hdr;
18022 struct lpfc_vport *vport;
18026 /* Process each received buffer */
18027 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18029 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
18030 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
18031 vport = phba->pport;
18032 /* Handle MDS Loopback frames */
18033 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
18037 /* check to see if this a valid type of frame */
18038 if (lpfc_fc_frame_check(phba, fc_hdr)) {
18039 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18043 if ((bf_get(lpfc_cqe_code,
18044 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
18045 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
18046 &dmabuf->cq_event.cqe.rcqe_cmpl);
18048 fcfi = bf_get(lpfc_rcqe_fcf_id,
18049 &dmabuf->cq_event.cqe.rcqe_cmpl);
18051 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
18052 vport = phba->pport;
18053 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
18054 "2023 MDS Loopback %d bytes\n",
18055 bf_get(lpfc_rcqe_length,
18056 &dmabuf->cq_event.cqe.rcqe_cmpl));
18057 /* Handle MDS Loopback frames */
18058 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
18062 /* d_id this frame is directed to */
18063 did = sli4_did_from_fc_hdr(fc_hdr);
18065 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
18067 /* throw out the frame */
18068 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18072 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
18073 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
18074 (did != Fabric_DID)) {
18076 * Throw out the frame if we are not pt2pt.
18077 * The pt2pt protocol allows for discovery frames
18078 * to be received without a registered VPI.
18080 if (!(vport->fc_flag & FC_PT2PT) ||
18081 (phba->link_state == LPFC_HBA_READY)) {
18082 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18087 /* Handle the basic abort sequence (BA_ABTS) event */
18088 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
18089 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
18093 /* Link this frame */
18094 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
18096 /* unable to add frame to vport - throw it out */
18097 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18100 /* If not last frame in sequence continue processing frames. */
18101 if (!lpfc_seq_complete(seq_dmabuf))
18104 /* Send the complete sequence to the upper layer protocol */
18105 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
18109 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
18110 * @phba: pointer to lpfc hba data structure.
18112 * This routine is invoked to post rpi header templates to the
18113 * HBA consistent with the SLI-4 interface spec. This routine
18114 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18115 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
18117 * This routine does not require any locks. It's usage is expected
18118 * to be driver load or reset recovery when the driver is
18123 * -EIO - The mailbox failed to complete successfully.
18124 * When this error occurs, the driver is not guaranteed
18125 * to have any rpi regions posted to the device and
18126 * must either attempt to repost the regions or take a
18130 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
18132 struct lpfc_rpi_hdr *rpi_page;
18136 /* SLI4 ports that support extents do not require RPI headers. */
18137 if (!phba->sli4_hba.rpi_hdrs_in_use)
18139 if (phba->sli4_hba.extents_in_use)
18142 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
18144 * Assign the rpi headers a physical rpi only if the driver
18145 * has not initialized those resources. A port reset only
18146 * needs the headers posted.
18148 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
18150 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18152 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
18153 if (rc != MBX_SUCCESS) {
18154 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18155 "2008 Error %d posting all rpi "
18163 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
18164 LPFC_RPI_RSRC_RDY);
18169 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
18170 * @phba: pointer to lpfc hba data structure.
18171 * @rpi_page: pointer to the rpi memory region.
18173 * This routine is invoked to post a single rpi header to the
18174 * HBA consistent with the SLI-4 interface spec. This memory region
18175 * maps up to 64 rpi context regions.
18179 * -ENOMEM - No available memory
18180 * -EIO - The mailbox failed to complete successfully.
18183 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
18185 LPFC_MBOXQ_t *mboxq;
18186 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
18188 uint32_t shdr_status, shdr_add_status;
18189 union lpfc_sli4_cfg_shdr *shdr;
18191 /* SLI4 ports that support extents do not require RPI headers. */
18192 if (!phba->sli4_hba.rpi_hdrs_in_use)
18194 if (phba->sli4_hba.extents_in_use)
18197 /* The port is notified of the header region via a mailbox command. */
18198 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18200 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18201 "2001 Unable to allocate memory for issuing "
18202 "SLI_CONFIG_SPECIAL mailbox command\n");
18206 /* Post all rpi memory regions to the port. */
18207 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
18208 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18209 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
18210 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
18211 sizeof(struct lpfc_sli4_cfg_mhdr),
18212 LPFC_SLI4_MBX_EMBED);
18215 /* Post the physical rpi to the port for this rpi header. */
18216 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
18217 rpi_page->start_rpi);
18218 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
18219 hdr_tmpl, rpi_page->page_count);
18221 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
18222 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
18223 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
18224 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
18225 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18226 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18227 if (rc != MBX_TIMEOUT)
18228 mempool_free(mboxq, phba->mbox_mem_pool);
18229 if (shdr_status || shdr_add_status || rc) {
18230 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18231 "2514 POST_RPI_HDR mailbox failed with "
18232 "status x%x add_status x%x, mbx status x%x\n",
18233 shdr_status, shdr_add_status, rc);
18237 * The next_rpi stores the next logical module-64 rpi value used
18238 * to post physical rpis in subsequent rpi postings.
18240 spin_lock_irq(&phba->hbalock);
18241 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
18242 spin_unlock_irq(&phba->hbalock);
18248 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
18249 * @phba: pointer to lpfc hba data structure.
18251 * This routine is invoked to post rpi header templates to the
18252 * HBA consistent with the SLI-4 interface spec. This routine
18253 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18254 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
18257 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
18258 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
18261 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
18264 uint16_t max_rpi, rpi_limit;
18265 uint16_t rpi_remaining, lrpi = 0;
18266 struct lpfc_rpi_hdr *rpi_hdr;
18267 unsigned long iflag;
18270 * Fetch the next logical rpi. Because this index is logical,
18271 * the driver starts at 0 each time.
18273 spin_lock_irqsave(&phba->hbalock, iflag);
18274 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
18275 rpi_limit = phba->sli4_hba.next_rpi;
18277 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
18278 if (rpi >= rpi_limit)
18279 rpi = LPFC_RPI_ALLOC_ERROR;
18281 set_bit(rpi, phba->sli4_hba.rpi_bmask);
18282 phba->sli4_hba.max_cfg_param.rpi_used++;
18283 phba->sli4_hba.rpi_count++;
18285 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
18286 "0001 rpi:%x max:%x lim:%x\n",
18287 (int) rpi, max_rpi, rpi_limit);
18290 * Don't try to allocate more rpi header regions if the device limit
18291 * has been exhausted.
18293 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
18294 (phba->sli4_hba.rpi_count >= max_rpi)) {
18295 spin_unlock_irqrestore(&phba->hbalock, iflag);
18300 * RPI header postings are not required for SLI4 ports capable of
18303 if (!phba->sli4_hba.rpi_hdrs_in_use) {
18304 spin_unlock_irqrestore(&phba->hbalock, iflag);
18309 * If the driver is running low on rpi resources, allocate another
18310 * page now. Note that the next_rpi value is used because
18311 * it represents how many are actually in use whereas max_rpi notes
18312 * how many are supported max by the device.
18314 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
18315 spin_unlock_irqrestore(&phba->hbalock, iflag);
18316 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
18317 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
18319 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18320 "2002 Error Could not grow rpi "
18323 lrpi = rpi_hdr->start_rpi;
18324 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18325 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
18333 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18334 * @phba: pointer to lpfc hba data structure.
18336 * This routine is invoked to release an rpi to the pool of
18337 * available rpis maintained by the driver.
18340 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18343 * if the rpi value indicates a prior unreg has already
18344 * been done, skip the unreg.
18346 if (rpi == LPFC_RPI_ALLOC_ERROR)
18349 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
18350 phba->sli4_hba.rpi_count--;
18351 phba->sli4_hba.max_cfg_param.rpi_used--;
18353 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
18354 "2016 rpi %x not inuse\n",
18360 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18361 * @phba: pointer to lpfc hba data structure.
18363 * This routine is invoked to release an rpi to the pool of
18364 * available rpis maintained by the driver.
18367 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18369 spin_lock_irq(&phba->hbalock);
18370 __lpfc_sli4_free_rpi(phba, rpi);
18371 spin_unlock_irq(&phba->hbalock);
18375 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
18376 * @phba: pointer to lpfc hba data structure.
18378 * This routine is invoked to remove the memory region that
18379 * provided rpi via a bitmask.
18382 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
18384 kfree(phba->sli4_hba.rpi_bmask);
18385 kfree(phba->sli4_hba.rpi_ids);
18386 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
18390 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
18391 * @phba: pointer to lpfc hba data structure.
18393 * This routine is invoked to remove the memory region that
18394 * provided rpi via a bitmask.
18397 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
18398 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
18400 LPFC_MBOXQ_t *mboxq;
18401 struct lpfc_hba *phba = ndlp->phba;
18404 /* The port is notified of the header region via a mailbox command. */
18405 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18409 /* Post all rpi memory regions to the port. */
18410 lpfc_resume_rpi(mboxq, ndlp);
18412 mboxq->mbox_cmpl = cmpl;
18413 mboxq->ctx_buf = arg;
18414 mboxq->ctx_ndlp = ndlp;
18416 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
18417 mboxq->vport = ndlp->vport;
18418 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18419 if (rc == MBX_NOT_FINISHED) {
18420 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18421 "2010 Resume RPI Mailbox failed "
18422 "status %d, mbxStatus x%x\n", rc,
18423 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18424 mempool_free(mboxq, phba->mbox_mem_pool);
18431 * lpfc_sli4_init_vpi - Initialize a vpi with the port
18432 * @vport: Pointer to the vport for which the vpi is being initialized
18434 * This routine is invoked to activate a vpi with the port.
18438 * -Evalue otherwise
18441 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
18443 LPFC_MBOXQ_t *mboxq;
18445 int retval = MBX_SUCCESS;
18447 struct lpfc_hba *phba = vport->phba;
18448 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18451 lpfc_init_vpi(phba, mboxq, vport->vpi);
18452 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
18453 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
18454 if (rc != MBX_SUCCESS) {
18455 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
18456 "2022 INIT VPI Mailbox failed "
18457 "status %d, mbxStatus x%x\n", rc,
18458 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18461 if (rc != MBX_TIMEOUT)
18462 mempool_free(mboxq, vport->phba->mbox_mem_pool);
18468 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
18469 * @phba: pointer to lpfc hba data structure.
18470 * @mboxq: Pointer to mailbox object.
18472 * This routine is invoked to manually add a single FCF record. The caller
18473 * must pass a completely initialized FCF_Record. This routine takes
18474 * care of the nonembedded mailbox operations.
18477 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
18480 union lpfc_sli4_cfg_shdr *shdr;
18481 uint32_t shdr_status, shdr_add_status;
18483 virt_addr = mboxq->sge_array->addr[0];
18484 /* The IOCTL status is embedded in the mailbox subheader. */
18485 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
18486 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18487 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18489 if ((shdr_status || shdr_add_status) &&
18490 (shdr_status != STATUS_FCF_IN_USE))
18491 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18492 "2558 ADD_FCF_RECORD mailbox failed with "
18493 "status x%x add_status x%x\n",
18494 shdr_status, shdr_add_status);
18496 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18500 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
18501 * @phba: pointer to lpfc hba data structure.
18502 * @fcf_record: pointer to the initialized fcf record to add.
18504 * This routine is invoked to manually add a single FCF record. The caller
18505 * must pass a completely initialized FCF_Record. This routine takes
18506 * care of the nonembedded mailbox operations.
18509 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
18512 LPFC_MBOXQ_t *mboxq;
18515 struct lpfc_mbx_sge sge;
18516 uint32_t alloc_len, req_len;
18519 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18521 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18522 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
18526 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
18529 /* Allocate DMA memory and set up the non-embedded mailbox command */
18530 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18531 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
18532 req_len, LPFC_SLI4_MBX_NEMBED);
18533 if (alloc_len < req_len) {
18534 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18535 "2523 Allocated DMA memory size (x%x) is "
18536 "less than the requested DMA memory "
18537 "size (x%x)\n", alloc_len, req_len);
18538 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18543 * Get the first SGE entry from the non-embedded DMA memory. This
18544 * routine only uses a single SGE.
18546 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
18547 virt_addr = mboxq->sge_array->addr[0];
18549 * Configure the FCF record for FCFI 0. This is the driver's
18550 * hardcoded default and gets used in nonFIP mode.
18552 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
18553 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
18554 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
18557 * Copy the fcf_index and the FCF Record Data. The data starts after
18558 * the FCoE header plus word10. The data copy needs to be endian
18561 bytep += sizeof(uint32_t);
18562 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
18563 mboxq->vport = phba->pport;
18564 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
18565 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18566 if (rc == MBX_NOT_FINISHED) {
18567 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18568 "2515 ADD_FCF_RECORD mailbox failed with "
18569 "status 0x%x\n", rc);
18570 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18579 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
18580 * @phba: pointer to lpfc hba data structure.
18581 * @fcf_record: pointer to the fcf record to write the default data.
18582 * @fcf_index: FCF table entry index.
18584 * This routine is invoked to build the driver's default FCF record. The
18585 * values used are hardcoded. This routine handles memory initialization.
18589 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
18590 struct fcf_record *fcf_record,
18591 uint16_t fcf_index)
18593 memset(fcf_record, 0, sizeof(struct fcf_record));
18594 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
18595 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
18596 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
18597 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
18598 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
18599 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
18600 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
18601 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
18602 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
18603 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
18604 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
18605 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
18606 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
18607 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
18608 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
18609 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
18610 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
18611 /* Set the VLAN bit map */
18612 if (phba->valid_vlan) {
18613 fcf_record->vlan_bitmap[phba->vlan_id / 8]
18614 = 1 << (phba->vlan_id % 8);
18619 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
18620 * @phba: pointer to lpfc hba data structure.
18621 * @fcf_index: FCF table entry offset.
18623 * This routine is invoked to scan the entire FCF table by reading FCF
18624 * record and processing it one at a time starting from the @fcf_index
18625 * for initial FCF discovery or fast FCF failover rediscovery.
18627 * Return 0 if the mailbox command is submitted successfully, none 0
18631 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18634 LPFC_MBOXQ_t *mboxq;
18636 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
18637 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
18638 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18640 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18641 "2000 Failed to allocate mbox for "
18644 goto fail_fcf_scan;
18646 /* Construct the read FCF record mailbox command */
18647 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18650 goto fail_fcf_scan;
18652 /* Issue the mailbox command asynchronously */
18653 mboxq->vport = phba->pport;
18654 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
18656 spin_lock_irq(&phba->hbalock);
18657 phba->hba_flag |= FCF_TS_INPROG;
18658 spin_unlock_irq(&phba->hbalock);
18660 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18661 if (rc == MBX_NOT_FINISHED)
18664 /* Reset eligible FCF count for new scan */
18665 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
18666 phba->fcf.eligible_fcf_cnt = 0;
18672 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18673 /* FCF scan failed, clear FCF_TS_INPROG flag */
18674 spin_lock_irq(&phba->hbalock);
18675 phba->hba_flag &= ~FCF_TS_INPROG;
18676 spin_unlock_irq(&phba->hbalock);
18682 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
18683 * @phba: pointer to lpfc hba data structure.
18684 * @fcf_index: FCF table entry offset.
18686 * This routine is invoked to read an FCF record indicated by @fcf_index
18687 * and to use it for FLOGI roundrobin FCF failover.
18689 * Return 0 if the mailbox command is submitted successfully, none 0
18693 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18696 LPFC_MBOXQ_t *mboxq;
18698 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18700 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18701 "2763 Failed to allocate mbox for "
18704 goto fail_fcf_read;
18706 /* Construct the read FCF record mailbox command */
18707 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18710 goto fail_fcf_read;
18712 /* Issue the mailbox command asynchronously */
18713 mboxq->vport = phba->pport;
18714 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
18715 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18716 if (rc == MBX_NOT_FINISHED)
18722 if (error && mboxq)
18723 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18728 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
18729 * @phba: pointer to lpfc hba data structure.
18730 * @fcf_index: FCF table entry offset.
18732 * This routine is invoked to read an FCF record indicated by @fcf_index to
18733 * determine whether it's eligible for FLOGI roundrobin failover list.
18735 * Return 0 if the mailbox command is submitted successfully, none 0
18739 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18742 LPFC_MBOXQ_t *mboxq;
18744 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18746 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18747 "2758 Failed to allocate mbox for "
18750 goto fail_fcf_read;
18752 /* Construct the read FCF record mailbox command */
18753 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18756 goto fail_fcf_read;
18758 /* Issue the mailbox command asynchronously */
18759 mboxq->vport = phba->pport;
18760 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
18761 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18762 if (rc == MBX_NOT_FINISHED)
18768 if (error && mboxq)
18769 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18774 * lpfc_check_next_fcf_pri_level
18775 * phba pointer to the lpfc_hba struct for this port.
18776 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
18777 * routine when the rr_bmask is empty. The FCF indecies are put into the
18778 * rr_bmask based on their priority level. Starting from the highest priority
18779 * to the lowest. The most likely FCF candidate will be in the highest
18780 * priority group. When this routine is called it searches the fcf_pri list for
18781 * next lowest priority group and repopulates the rr_bmask with only those
18784 * 1=success 0=failure
18787 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
18789 uint16_t next_fcf_pri;
18790 uint16_t last_index;
18791 struct lpfc_fcf_pri *fcf_pri;
18795 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
18796 LPFC_SLI4_FCF_TBL_INDX_MAX);
18797 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18798 "3060 Last IDX %d\n", last_index);
18800 /* Verify the priority list has 2 or more entries */
18801 spin_lock_irq(&phba->hbalock);
18802 if (list_empty(&phba->fcf.fcf_pri_list) ||
18803 list_is_singular(&phba->fcf.fcf_pri_list)) {
18804 spin_unlock_irq(&phba->hbalock);
18805 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18806 "3061 Last IDX %d\n", last_index);
18807 return 0; /* Empty rr list */
18809 spin_unlock_irq(&phba->hbalock);
18813 * Clear the rr_bmask and set all of the bits that are at this
18816 memset(phba->fcf.fcf_rr_bmask, 0,
18817 sizeof(*phba->fcf.fcf_rr_bmask));
18818 spin_lock_irq(&phba->hbalock);
18819 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18820 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
18823 * the 1st priority that has not FLOGI failed
18824 * will be the highest.
18827 next_fcf_pri = fcf_pri->fcf_rec.priority;
18828 spin_unlock_irq(&phba->hbalock);
18829 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18830 rc = lpfc_sli4_fcf_rr_index_set(phba,
18831 fcf_pri->fcf_rec.fcf_index);
18835 spin_lock_irq(&phba->hbalock);
18838 * if next_fcf_pri was not set above and the list is not empty then
18839 * we have failed flogis on all of them. So reset flogi failed
18840 * and start at the beginning.
18842 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
18843 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18844 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
18846 * the 1st priority that has not FLOGI failed
18847 * will be the highest.
18850 next_fcf_pri = fcf_pri->fcf_rec.priority;
18851 spin_unlock_irq(&phba->hbalock);
18852 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18853 rc = lpfc_sli4_fcf_rr_index_set(phba,
18854 fcf_pri->fcf_rec.fcf_index);
18858 spin_lock_irq(&phba->hbalock);
18862 spin_unlock_irq(&phba->hbalock);
18867 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
18868 * @phba: pointer to lpfc hba data structure.
18870 * This routine is to get the next eligible FCF record index in a round
18871 * robin fashion. If the next eligible FCF record index equals to the
18872 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
18873 * shall be returned, otherwise, the next eligible FCF record's index
18874 * shall be returned.
18877 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
18879 uint16_t next_fcf_index;
18882 /* Search start from next bit of currently registered FCF index */
18883 next_fcf_index = phba->fcf.current_rec.fcf_indx;
18886 /* Determine the next fcf index to check */
18887 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
18888 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18889 LPFC_SLI4_FCF_TBL_INDX_MAX,
18892 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
18893 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18895 * If we have wrapped then we need to clear the bits that
18896 * have been tested so that we can detect when we should
18897 * change the priority level.
18899 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18900 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
18904 /* Check roundrobin failover list empty condition */
18905 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
18906 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
18908 * If next fcf index is not found check if there are lower
18909 * Priority level fcf's in the fcf_priority list.
18910 * Set up the rr_bmask with all of the avaiable fcf bits
18911 * at that level and continue the selection process.
18913 if (lpfc_check_next_fcf_pri_level(phba))
18914 goto initial_priority;
18915 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
18916 "2844 No roundrobin failover FCF available\n");
18918 return LPFC_FCOE_FCF_NEXT_NONE;
18921 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
18922 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
18923 LPFC_FCF_FLOGI_FAILED) {
18924 if (list_is_singular(&phba->fcf.fcf_pri_list))
18925 return LPFC_FCOE_FCF_NEXT_NONE;
18927 goto next_priority;
18930 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18931 "2845 Get next roundrobin failover FCF (x%x)\n",
18934 return next_fcf_index;
18938 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
18939 * @phba: pointer to lpfc hba data structure.
18941 * This routine sets the FCF record index in to the eligible bmask for
18942 * roundrobin failover search. It checks to make sure that the index
18943 * does not go beyond the range of the driver allocated bmask dimension
18944 * before setting the bit.
18946 * Returns 0 if the index bit successfully set, otherwise, it returns
18950 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
18952 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18953 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18954 "2610 FCF (x%x) reached driver's book "
18955 "keeping dimension:x%x\n",
18956 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18959 /* Set the eligible FCF record index bmask */
18960 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
18962 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18963 "2790 Set FCF (x%x) to roundrobin FCF failover "
18964 "bmask\n", fcf_index);
18970 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
18971 * @phba: pointer to lpfc hba data structure.
18973 * This routine clears the FCF record index from the eligible bmask for
18974 * roundrobin failover search. It checks to make sure that the index
18975 * does not go beyond the range of the driver allocated bmask dimension
18976 * before clearing the bit.
18979 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
18981 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
18982 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18983 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18984 "2762 FCF (x%x) reached driver's book "
18985 "keeping dimension:x%x\n",
18986 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18989 /* Clear the eligible FCF record index bmask */
18990 spin_lock_irq(&phba->hbalock);
18991 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
18993 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
18994 list_del_init(&fcf_pri->list);
18998 spin_unlock_irq(&phba->hbalock);
18999 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
19001 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19002 "2791 Clear FCF (x%x) from roundrobin failover "
19003 "bmask\n", fcf_index);
19007 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
19008 * @phba: pointer to lpfc hba data structure.
19010 * This routine is the completion routine for the rediscover FCF table mailbox
19011 * command. If the mailbox command returned failure, it will try to stop the
19012 * FCF rediscover wait timer.
19015 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
19017 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
19018 uint32_t shdr_status, shdr_add_status;
19020 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
19022 shdr_status = bf_get(lpfc_mbox_hdr_status,
19023 &redisc_fcf->header.cfg_shdr.response);
19024 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19025 &redisc_fcf->header.cfg_shdr.response);
19026 if (shdr_status || shdr_add_status) {
19027 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19028 "2746 Requesting for FCF rediscovery failed "
19029 "status x%x add_status x%x\n",
19030 shdr_status, shdr_add_status);
19031 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
19032 spin_lock_irq(&phba->hbalock);
19033 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
19034 spin_unlock_irq(&phba->hbalock);
19036 * CVL event triggered FCF rediscover request failed,
19037 * last resort to re-try current registered FCF entry.
19039 lpfc_retry_pport_discovery(phba);
19041 spin_lock_irq(&phba->hbalock);
19042 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
19043 spin_unlock_irq(&phba->hbalock);
19045 * DEAD FCF event triggered FCF rediscover request
19046 * failed, last resort to fail over as a link down
19047 * to FCF registration.
19049 lpfc_sli4_fcf_dead_failthrough(phba);
19052 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19053 "2775 Start FCF rediscover quiescent timer\n");
19055 * Start FCF rediscovery wait timer for pending FCF
19056 * before rescan FCF record table.
19058 lpfc_fcf_redisc_wait_start_timer(phba);
19061 mempool_free(mbox, phba->mbox_mem_pool);
19065 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
19066 * @phba: pointer to lpfc hba data structure.
19068 * This routine is invoked to request for rediscovery of the entire FCF table
19072 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
19074 LPFC_MBOXQ_t *mbox;
19075 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
19078 /* Cancel retry delay timers to all vports before FCF rediscover */
19079 lpfc_cancel_all_vport_retry_delay_timer(phba);
19081 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19083 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19084 "2745 Failed to allocate mbox for "
19085 "requesting FCF rediscover.\n");
19089 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
19090 sizeof(struct lpfc_sli4_cfg_mhdr));
19091 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
19092 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
19093 length, LPFC_SLI4_MBX_EMBED);
19095 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
19096 /* Set count to 0 for invalidating the entire FCF database */
19097 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
19099 /* Issue the mailbox command asynchronously */
19100 mbox->vport = phba->pport;
19101 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
19102 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
19104 if (rc == MBX_NOT_FINISHED) {
19105 mempool_free(mbox, phba->mbox_mem_pool);
19112 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
19113 * @phba: pointer to lpfc hba data structure.
19115 * This function is the failover routine as a last resort to the FCF DEAD
19116 * event when driver failed to perform fast FCF failover.
19119 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
19121 uint32_t link_state;
19124 * Last resort as FCF DEAD event failover will treat this as
19125 * a link down, but save the link state because we don't want
19126 * it to be changed to Link Down unless it is already down.
19128 link_state = phba->link_state;
19129 lpfc_linkdown(phba);
19130 phba->link_state = link_state;
19132 /* Unregister FCF if no devices connected to it */
19133 lpfc_unregister_unused_fcf(phba);
19137 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
19138 * @phba: pointer to lpfc hba data structure.
19139 * @rgn23_data: pointer to configure region 23 data.
19141 * This function gets SLI3 port configure region 23 data through memory dump
19142 * mailbox command. When it successfully retrieves data, the size of the data
19143 * will be returned, otherwise, 0 will be returned.
19146 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19148 LPFC_MBOXQ_t *pmb = NULL;
19150 uint32_t offset = 0;
19156 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19158 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19159 "2600 failed to allocate mailbox memory\n");
19165 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
19166 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
19168 if (rc != MBX_SUCCESS) {
19169 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19170 "2601 failed to read config "
19171 "region 23, rc 0x%x Status 0x%x\n",
19172 rc, mb->mbxStatus);
19173 mb->un.varDmp.word_cnt = 0;
19176 * dump mem may return a zero when finished or we got a
19177 * mailbox error, either way we are done.
19179 if (mb->un.varDmp.word_cnt == 0)
19181 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
19182 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
19184 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
19185 rgn23_data + offset,
19186 mb->un.varDmp.word_cnt);
19187 offset += mb->un.varDmp.word_cnt;
19188 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
19190 mempool_free(pmb, phba->mbox_mem_pool);
19195 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
19196 * @phba: pointer to lpfc hba data structure.
19197 * @rgn23_data: pointer to configure region 23 data.
19199 * This function gets SLI4 port configure region 23 data through memory dump
19200 * mailbox command. When it successfully retrieves data, the size of the data
19201 * will be returned, otherwise, 0 will be returned.
19204 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19206 LPFC_MBOXQ_t *mboxq = NULL;
19207 struct lpfc_dmabuf *mp = NULL;
19208 struct lpfc_mqe *mqe;
19209 uint32_t data_length = 0;
19215 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19217 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19218 "3105 failed to allocate mailbox memory\n");
19222 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
19224 mqe = &mboxq->u.mqe;
19225 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
19226 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19229 data_length = mqe->un.mb_words[5];
19230 if (data_length == 0)
19232 if (data_length > DMP_RGN23_SIZE) {
19236 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
19238 mempool_free(mboxq, phba->mbox_mem_pool);
19240 lpfc_mbuf_free(phba, mp->virt, mp->phys);
19243 return data_length;
19247 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
19248 * @phba: pointer to lpfc hba data structure.
19250 * This function read region 23 and parse TLV for port status to
19251 * decide if the user disaled the port. If the TLV indicates the
19252 * port is disabled, the hba_flag is set accordingly.
19255 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
19257 uint8_t *rgn23_data = NULL;
19258 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
19259 uint32_t offset = 0;
19261 /* Get adapter Region 23 data */
19262 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
19266 if (phba->sli_rev < LPFC_SLI_REV4)
19267 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
19269 if_type = bf_get(lpfc_sli_intf_if_type,
19270 &phba->sli4_hba.sli_intf);
19271 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
19273 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
19279 /* Check the region signature first */
19280 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
19281 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19282 "2619 Config region 23 has bad signature\n");
19287 /* Check the data structure version */
19288 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
19289 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19290 "2620 Config region 23 has bad version\n");
19295 /* Parse TLV entries in the region */
19296 while (offset < data_size) {
19297 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
19300 * If the TLV is not driver specific TLV or driver id is
19301 * not linux driver id, skip the record.
19303 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
19304 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
19305 (rgn23_data[offset + 3] != 0)) {
19306 offset += rgn23_data[offset + 1] * 4 + 4;
19310 /* Driver found a driver specific TLV in the config region */
19311 sub_tlv_len = rgn23_data[offset + 1] * 4;
19316 * Search for configured port state sub-TLV.
19318 while ((offset < data_size) &&
19319 (tlv_offset < sub_tlv_len)) {
19320 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
19325 if (rgn23_data[offset] != PORT_STE_TYPE) {
19326 offset += rgn23_data[offset + 1] * 4 + 4;
19327 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
19331 /* This HBA contains PORT_STE configured */
19332 if (!rgn23_data[offset + 2])
19333 phba->hba_flag |= LINK_DISABLED;
19345 * lpfc_wr_object - write an object to the firmware
19346 * @phba: HBA structure that indicates port to create a queue on.
19347 * @dmabuf_list: list of dmabufs to write to the port.
19348 * @size: the total byte value of the objects to write to the port.
19349 * @offset: the current offset to be used to start the transfer.
19351 * This routine will create a wr_object mailbox command to send to the port.
19352 * the mailbox command will be constructed using the dma buffers described in
19353 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
19354 * BDEs that the imbedded mailbox can support. The @offset variable will be
19355 * used to indicate the starting offset of the transfer and will also return
19356 * the offset after the write object mailbox has completed. @size is used to
19357 * determine the end of the object and whether the eof bit should be set.
19359 * Return 0 is successful and offset will contain the the new offset to use
19360 * for the next write.
19361 * Return negative value for error cases.
19364 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
19365 uint32_t size, uint32_t *offset)
19367 struct lpfc_mbx_wr_object *wr_object;
19368 LPFC_MBOXQ_t *mbox;
19370 uint32_t shdr_status, shdr_add_status, shdr_change_status;
19372 struct lpfc_dmabuf *dmabuf;
19373 uint32_t written = 0;
19374 bool check_change_status = false;
19376 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19380 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
19381 LPFC_MBOX_OPCODE_WRITE_OBJECT,
19382 sizeof(struct lpfc_mbx_wr_object) -
19383 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
19385 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
19386 wr_object->u.request.write_offset = *offset;
19387 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
19388 wr_object->u.request.object_name[0] =
19389 cpu_to_le32(wr_object->u.request.object_name[0]);
19390 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
19391 list_for_each_entry(dmabuf, dmabuf_list, list) {
19392 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
19394 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
19395 wr_object->u.request.bde[i].addrHigh =
19396 putPaddrHigh(dmabuf->phys);
19397 if (written + SLI4_PAGE_SIZE >= size) {
19398 wr_object->u.request.bde[i].tus.f.bdeSize =
19400 written += (size - written);
19401 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
19402 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
19403 check_change_status = true;
19405 wr_object->u.request.bde[i].tus.f.bdeSize =
19407 written += SLI4_PAGE_SIZE;
19411 wr_object->u.request.bde_count = i;
19412 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
19413 if (!phba->sli4_hba.intr_enable)
19414 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
19416 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
19417 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
19419 /* The IOCTL status is embedded in the mailbox subheader. */
19420 shdr_status = bf_get(lpfc_mbox_hdr_status,
19421 &wr_object->header.cfg_shdr.response);
19422 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19423 &wr_object->header.cfg_shdr.response);
19424 if (check_change_status) {
19425 shdr_change_status = bf_get(lpfc_wr_object_change_status,
19426 &wr_object->u.response);
19427 switch (shdr_change_status) {
19428 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
19429 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19430 "3198 Firmware write complete: System "
19431 "reboot required to instantiate\n");
19433 case (LPFC_CHANGE_STATUS_FW_RESET):
19434 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19435 "3199 Firmware write complete: Firmware"
19436 " reset required to instantiate\n");
19438 case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
19439 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19440 "3200 Firmware write complete: Port "
19441 "Migration or PCI Reset required to "
19444 case (LPFC_CHANGE_STATUS_PCI_RESET):
19445 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19446 "3201 Firmware write complete: PCI "
19447 "Reset required to instantiate\n");
19453 if (rc != MBX_TIMEOUT)
19454 mempool_free(mbox, phba->mbox_mem_pool);
19455 if (shdr_status || shdr_add_status || rc) {
19456 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19457 "3025 Write Object mailbox failed with "
19458 "status x%x add_status x%x, mbx status x%x\n",
19459 shdr_status, shdr_add_status, rc);
19461 *offset = shdr_add_status;
19463 *offset += wr_object->u.response.actual_write_length;
19468 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
19469 * @vport: pointer to vport data structure.
19471 * This function iterate through the mailboxq and clean up all REG_LOGIN
19472 * and REG_VPI mailbox commands associated with the vport. This function
19473 * is called when driver want to restart discovery of the vport due to
19474 * a Clear Virtual Link event.
19477 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
19479 struct lpfc_hba *phba = vport->phba;
19480 LPFC_MBOXQ_t *mb, *nextmb;
19481 struct lpfc_dmabuf *mp;
19482 struct lpfc_nodelist *ndlp;
19483 struct lpfc_nodelist *act_mbx_ndlp = NULL;
19484 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
19485 LIST_HEAD(mbox_cmd_list);
19486 uint8_t restart_loop;
19488 /* Clean up internally queued mailbox commands with the vport */
19489 spin_lock_irq(&phba->hbalock);
19490 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
19491 if (mb->vport != vport)
19494 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19495 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19498 list_del(&mb->list);
19499 list_add_tail(&mb->list, &mbox_cmd_list);
19501 /* Clean up active mailbox command with the vport */
19502 mb = phba->sli.mbox_active;
19503 if (mb && (mb->vport == vport)) {
19504 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
19505 (mb->u.mb.mbxCommand == MBX_REG_VPI))
19506 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19507 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19508 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19509 /* Put reference count for delayed processing */
19510 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
19511 /* Unregister the RPI when mailbox complete */
19512 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19515 /* Cleanup any mailbox completions which are not yet processed */
19518 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
19520 * If this mailox is already processed or it is
19521 * for another vport ignore it.
19523 if ((mb->vport != vport) ||
19524 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
19527 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19528 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19531 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19532 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19533 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19534 /* Unregister the RPI when mailbox complete */
19535 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19537 spin_unlock_irq(&phba->hbalock);
19538 spin_lock(shost->host_lock);
19539 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19540 spin_unlock(shost->host_lock);
19541 spin_lock_irq(&phba->hbalock);
19545 } while (restart_loop);
19547 spin_unlock_irq(&phba->hbalock);
19549 /* Release the cleaned-up mailbox commands */
19550 while (!list_empty(&mbox_cmd_list)) {
19551 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
19552 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19553 mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
19555 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
19558 mb->ctx_buf = NULL;
19559 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19560 mb->ctx_ndlp = NULL;
19562 spin_lock(shost->host_lock);
19563 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19564 spin_unlock(shost->host_lock);
19565 lpfc_nlp_put(ndlp);
19568 mempool_free(mb, phba->mbox_mem_pool);
19571 /* Release the ndlp with the cleaned-up active mailbox command */
19572 if (act_mbx_ndlp) {
19573 spin_lock(shost->host_lock);
19574 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19575 spin_unlock(shost->host_lock);
19576 lpfc_nlp_put(act_mbx_ndlp);
19581 * lpfc_drain_txq - Drain the txq
19582 * @phba: Pointer to HBA context object.
19584 * This function attempt to submit IOCBs on the txq
19585 * to the adapter. For SLI4 adapters, the txq contains
19586 * ELS IOCBs that have been deferred because the there
19587 * are no SGLs. This congestion can occur with large
19588 * vport counts during node discovery.
19592 lpfc_drain_txq(struct lpfc_hba *phba)
19594 LIST_HEAD(completions);
19595 struct lpfc_sli_ring *pring;
19596 struct lpfc_iocbq *piocbq = NULL;
19597 unsigned long iflags = 0;
19598 char *fail_msg = NULL;
19599 struct lpfc_sglq *sglq;
19600 union lpfc_wqe128 wqe;
19601 uint32_t txq_cnt = 0;
19602 struct lpfc_queue *wq;
19604 if (phba->link_flag & LS_MDS_LOOPBACK) {
19605 /* MDS WQE are posted only to first WQ*/
19606 wq = phba->sli4_hba.hdwq[0].io_wq;
19611 wq = phba->sli4_hba.els_wq;
19614 pring = lpfc_phba_elsring(phba);
19617 if (unlikely(!pring) || list_empty(&pring->txq))
19620 spin_lock_irqsave(&pring->ring_lock, iflags);
19621 list_for_each_entry(piocbq, &pring->txq, list) {
19625 if (txq_cnt > pring->txq_max)
19626 pring->txq_max = txq_cnt;
19628 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19630 while (!list_empty(&pring->txq)) {
19631 spin_lock_irqsave(&pring->ring_lock, iflags);
19633 piocbq = lpfc_sli_ringtx_get(phba, pring);
19635 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19636 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19637 "2823 txq empty and txq_cnt is %d\n ",
19641 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
19643 __lpfc_sli_ringtx_put(phba, pring, piocbq);
19644 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19649 /* The xri and iocb resources secured,
19650 * attempt to issue request
19652 piocbq->sli4_lxritag = sglq->sli4_lxritag;
19653 piocbq->sli4_xritag = sglq->sli4_xritag;
19654 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
19655 fail_msg = "to convert bpl to sgl";
19656 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
19657 fail_msg = "to convert iocb to wqe";
19658 else if (lpfc_sli4_wq_put(wq, &wqe))
19659 fail_msg = " - Wq is full";
19661 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
19664 /* Failed means we can't issue and need to cancel */
19665 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19666 "2822 IOCB failed %s iotag 0x%x "
19669 piocbq->iotag, piocbq->sli4_xritag);
19670 list_add_tail(&piocbq->list, &completions);
19672 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19675 /* Cancel all the IOCBs that cannot be issued */
19676 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
19677 IOERR_SLI_ABORTED);
19683 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
19684 * @phba: Pointer to HBA context object.
19685 * @pwqe: Pointer to command WQE.
19686 * @sglq: Pointer to the scatter gather queue object.
19688 * This routine converts the bpl or bde that is in the WQE
19689 * to a sgl list for the sli4 hardware. The physical address
19690 * of the bpl/bde is converted back to a virtual address.
19691 * If the WQE contains a BPL then the list of BDE's is
19692 * converted to sli4_sge's. If the WQE contains a single
19693 * BDE then it is converted to a single sli_sge.
19694 * The WQE is still in cpu endianness so the contents of
19695 * the bpl can be used without byte swapping.
19697 * Returns valid XRI = Success, NO_XRI = Failure.
19700 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
19701 struct lpfc_sglq *sglq)
19703 uint16_t xritag = NO_XRI;
19704 struct ulp_bde64 *bpl = NULL;
19705 struct ulp_bde64 bde;
19706 struct sli4_sge *sgl = NULL;
19707 struct lpfc_dmabuf *dmabuf;
19708 union lpfc_wqe128 *wqe;
19711 uint32_t offset = 0; /* accumulated offset in the sg request list */
19712 int inbound = 0; /* number of sg reply entries inbound from firmware */
19715 if (!pwqeq || !sglq)
19718 sgl = (struct sli4_sge *)sglq->sgl;
19720 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
19722 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
19723 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
19724 return sglq->sli4_xritag;
19725 numBdes = pwqeq->rsvd2;
19727 /* The addrHigh and addrLow fields within the WQE
19728 * have not been byteswapped yet so there is no
19729 * need to swap them back.
19731 if (pwqeq->context3)
19732 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
19736 bpl = (struct ulp_bde64 *)dmabuf->virt;
19740 for (i = 0; i < numBdes; i++) {
19741 /* Should already be byte swapped. */
19742 sgl->addr_hi = bpl->addrHigh;
19743 sgl->addr_lo = bpl->addrLow;
19745 sgl->word2 = le32_to_cpu(sgl->word2);
19746 if ((i+1) == numBdes)
19747 bf_set(lpfc_sli4_sge_last, sgl, 1);
19749 bf_set(lpfc_sli4_sge_last, sgl, 0);
19750 /* swap the size field back to the cpu so we
19751 * can assign it to the sgl.
19753 bde.tus.w = le32_to_cpu(bpl->tus.w);
19754 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
19755 /* The offsets in the sgl need to be accumulated
19756 * separately for the request and reply lists.
19757 * The request is always first, the reply follows.
19760 case CMD_GEN_REQUEST64_WQE:
19761 /* add up the reply sg entries */
19762 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
19764 /* first inbound? reset the offset */
19767 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19768 bf_set(lpfc_sli4_sge_type, sgl,
19769 LPFC_SGE_TYPE_DATA);
19770 offset += bde.tus.f.bdeSize;
19772 case CMD_FCP_TRSP64_WQE:
19773 bf_set(lpfc_sli4_sge_offset, sgl, 0);
19774 bf_set(lpfc_sli4_sge_type, sgl,
19775 LPFC_SGE_TYPE_DATA);
19777 case CMD_FCP_TSEND64_WQE:
19778 case CMD_FCP_TRECEIVE64_WQE:
19779 bf_set(lpfc_sli4_sge_type, sgl,
19780 bpl->tus.f.bdeFlags);
19784 offset += bde.tus.f.bdeSize;
19785 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19788 sgl->word2 = cpu_to_le32(sgl->word2);
19792 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
19793 /* The addrHigh and addrLow fields of the BDE have not
19794 * been byteswapped yet so they need to be swapped
19795 * before putting them in the sgl.
19797 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
19798 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
19799 sgl->word2 = le32_to_cpu(sgl->word2);
19800 bf_set(lpfc_sli4_sge_last, sgl, 1);
19801 sgl->word2 = cpu_to_le32(sgl->word2);
19802 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
19804 return sglq->sli4_xritag;
19808 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
19809 * @phba: Pointer to HBA context object.
19810 * @ring_number: Base sli ring number
19811 * @pwqe: Pointer to command WQE.
19814 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
19815 struct lpfc_iocbq *pwqe)
19817 union lpfc_wqe128 *wqe = &pwqe->wqe;
19818 struct lpfc_nvmet_rcv_ctx *ctxp;
19819 struct lpfc_queue *wq;
19820 struct lpfc_sglq *sglq;
19821 struct lpfc_sli_ring *pring;
19822 unsigned long iflags;
19825 /* NVME_LS and NVME_LS ABTS requests. */
19826 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
19827 pring = phba->sli4_hba.nvmels_wq->pring;
19828 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19830 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
19832 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19835 pwqe->sli4_lxritag = sglq->sli4_lxritag;
19836 pwqe->sli4_xritag = sglq->sli4_xritag;
19837 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
19838 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19841 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19842 pwqe->sli4_xritag);
19843 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
19845 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19849 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19850 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19852 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
19856 /* NVME_FCREQ and NVME_ABTS requests */
19857 if (pwqe->iocb_flag & LPFC_IO_NVME) {
19858 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
19862 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
19864 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19866 ret = lpfc_sli4_wq_put(wq, wqe);
19868 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19871 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19872 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19874 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
19878 /* NVMET requests */
19879 if (pwqe->iocb_flag & LPFC_IO_NVMET) {
19880 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
19884 ctxp = pwqe->context2;
19885 sglq = ctxp->ctxbuf->sglq;
19886 if (pwqe->sli4_xritag == NO_XRI) {
19887 pwqe->sli4_lxritag = sglq->sli4_lxritag;
19888 pwqe->sli4_xritag = sglq->sli4_xritag;
19890 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19891 pwqe->sli4_xritag);
19892 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
19894 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19896 ret = lpfc_sli4_wq_put(wq, wqe);
19898 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19901 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19902 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19904 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
19910 #ifdef LPFC_MXP_STAT
19912 * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
19913 * @phba: pointer to lpfc hba data structure.
19914 * @hwqid: belong to which HWQ.
19916 * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
19917 * 15 seconds after a test case is running.
19919 * The user should call lpfc_debugfs_multixripools_write before running a test
19920 * case to clear stat_snapshot_taken. Then the user starts a test case. During
19921 * test case is running, stat_snapshot_taken is incremented by 1 every time when
19922 * this routine is called from heartbeat timer. When stat_snapshot_taken is
19923 * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
19925 void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
19927 struct lpfc_sli4_hdw_queue *qp;
19928 struct lpfc_multixri_pool *multixri_pool;
19929 struct lpfc_pvt_pool *pvt_pool;
19930 struct lpfc_pbl_pool *pbl_pool;
19933 qp = &phba->sli4_hba.hdwq[hwqid];
19934 multixri_pool = qp->p_multixri_pool;
19935 if (!multixri_pool)
19938 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
19939 pvt_pool = &qp->p_multixri_pool->pvt_pool;
19940 pbl_pool = &qp->p_multixri_pool->pbl_pool;
19941 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
19943 multixri_pool->stat_pbl_count = pbl_pool->count;
19944 multixri_pool->stat_pvt_count = pvt_pool->count;
19945 multixri_pool->stat_busy_count = txcmplq_cnt;
19948 multixri_pool->stat_snapshot_taken++;
19953 * lpfc_adjust_pvt_pool_count - Adjust private pool count
19954 * @phba: pointer to lpfc hba data structure.
19955 * @hwqid: belong to which HWQ.
19957 * This routine moves some XRIs from private to public pool when private pool
19960 void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
19962 struct lpfc_multixri_pool *multixri_pool;
19964 u32 prev_io_req_count;
19966 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
19967 if (!multixri_pool)
19969 io_req_count = multixri_pool->io_req_count;
19970 prev_io_req_count = multixri_pool->prev_io_req_count;
19972 if (prev_io_req_count != io_req_count) {
19973 /* Private pool is busy */
19974 multixri_pool->prev_io_req_count = io_req_count;
19976 /* Private pool is not busy.
19977 * Move XRIs from private to public pool.
19979 lpfc_move_xri_pvt_to_pbl(phba, hwqid);
19984 * lpfc_adjust_high_watermark - Adjust high watermark
19985 * @phba: pointer to lpfc hba data structure.
19986 * @hwqid: belong to which HWQ.
19988 * This routine sets high watermark as number of outstanding XRIs,
19989 * but make sure the new value is between xri_limit/2 and xri_limit.
19991 void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
19999 struct lpfc_multixri_pool *multixri_pool;
20000 struct lpfc_sli4_hdw_queue *qp;
20002 qp = &phba->sli4_hba.hdwq[hwqid];
20003 multixri_pool = qp->p_multixri_pool;
20004 if (!multixri_pool)
20006 xri_limit = multixri_pool->xri_limit;
20008 watermark_max = xri_limit;
20009 watermark_min = xri_limit / 2;
20011 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
20012 abts_io_bufs = qp->abts_scsi_io_bufs;
20013 abts_io_bufs += qp->abts_nvme_io_bufs;
20015 new_watermark = txcmplq_cnt + abts_io_bufs;
20016 new_watermark = min(watermark_max, new_watermark);
20017 new_watermark = max(watermark_min, new_watermark);
20018 multixri_pool->pvt_pool.high_watermark = new_watermark;
20020 #ifdef LPFC_MXP_STAT
20021 multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
20027 * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
20028 * @phba: pointer to lpfc hba data structure.
20029 * @hwqid: belong to which HWQ.
20031 * This routine is called from hearbeat timer when pvt_pool is idle.
20032 * All free XRIs are moved from private to public pool on hwqid with 2 steps.
20033 * The first step moves (all - low_watermark) amount of XRIs.
20034 * The second step moves the rest of XRIs.
20036 void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
20038 struct lpfc_pbl_pool *pbl_pool;
20039 struct lpfc_pvt_pool *pvt_pool;
20040 struct lpfc_sli4_hdw_queue *qp;
20041 struct lpfc_io_buf *lpfc_ncmd;
20042 struct lpfc_io_buf *lpfc_ncmd_next;
20043 unsigned long iflag;
20044 struct list_head tmp_list;
20047 qp = &phba->sli4_hba.hdwq[hwqid];
20048 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20049 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20052 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
20053 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
20055 if (pvt_pool->count > pvt_pool->low_watermark) {
20056 /* Step 1: move (all - low_watermark) from pvt_pool
20060 /* Move low watermark of bufs from pvt_pool to tmp_list */
20061 INIT_LIST_HEAD(&tmp_list);
20062 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20063 &pvt_pool->list, list) {
20064 list_move_tail(&lpfc_ncmd->list, &tmp_list);
20066 if (tmp_count >= pvt_pool->low_watermark)
20070 /* Move all bufs from pvt_pool to pbl_pool */
20071 list_splice_init(&pvt_pool->list, &pbl_pool->list);
20073 /* Move all bufs from tmp_list to pvt_pool */
20074 list_splice(&tmp_list, &pvt_pool->list);
20076 pbl_pool->count += (pvt_pool->count - tmp_count);
20077 pvt_pool->count = tmp_count;
20079 /* Step 2: move the rest from pvt_pool to pbl_pool */
20080 list_splice_init(&pvt_pool->list, &pbl_pool->list);
20081 pbl_pool->count += pvt_pool->count;
20082 pvt_pool->count = 0;
20085 spin_unlock(&pvt_pool->lock);
20086 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20090 * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20091 * @phba: pointer to lpfc hba data structure
20092 * @pbl_pool: specified public free XRI pool
20093 * @pvt_pool: specified private free XRI pool
20094 * @count: number of XRIs to move
20096 * This routine tries to move some free common bufs from the specified pbl_pool
20097 * to the specified pvt_pool. It might move less than count XRIs if there's not
20098 * enough in public pool.
20101 * true - if XRIs are successfully moved from the specified pbl_pool to the
20102 * specified pvt_pool
20103 * false - if the specified pbl_pool is empty or locked by someone else
20106 _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
20107 struct lpfc_pbl_pool *pbl_pool,
20108 struct lpfc_pvt_pool *pvt_pool, u32 count)
20110 struct lpfc_io_buf *lpfc_ncmd;
20111 struct lpfc_io_buf *lpfc_ncmd_next;
20112 unsigned long iflag;
20115 ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
20117 if (pbl_pool->count) {
20118 /* Move a batch of XRIs from public to private pool */
20119 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
20120 list_for_each_entry_safe(lpfc_ncmd,
20124 list_move_tail(&lpfc_ncmd->list,
20133 spin_unlock(&pvt_pool->lock);
20134 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20137 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20144 * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20145 * @phba: pointer to lpfc hba data structure.
20146 * @hwqid: belong to which HWQ.
20147 * @count: number of XRIs to move
20149 * This routine tries to find some free common bufs in one of public pools with
20150 * Round Robin method. The search always starts from local hwqid, then the next
20151 * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
20152 * a batch of free common bufs are moved to private pool on hwqid.
20153 * It might move less than count XRIs if there's not enough in public pool.
20155 void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
20157 struct lpfc_multixri_pool *multixri_pool;
20158 struct lpfc_multixri_pool *next_multixri_pool;
20159 struct lpfc_pvt_pool *pvt_pool;
20160 struct lpfc_pbl_pool *pbl_pool;
20161 struct lpfc_sli4_hdw_queue *qp;
20166 qp = &phba->sli4_hba.hdwq[hwqid];
20167 multixri_pool = qp->p_multixri_pool;
20168 pvt_pool = &multixri_pool->pvt_pool;
20169 pbl_pool = &multixri_pool->pbl_pool;
20171 /* Check if local pbl_pool is available */
20172 ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
20174 #ifdef LPFC_MXP_STAT
20175 multixri_pool->local_pbl_hit_count++;
20180 hwq_count = phba->cfg_hdw_queue;
20182 /* Get the next hwqid which was found last time */
20183 next_hwqid = multixri_pool->rrb_next_hwqid;
20186 /* Go to next hwq */
20187 next_hwqid = (next_hwqid + 1) % hwq_count;
20189 next_multixri_pool =
20190 phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
20191 pbl_pool = &next_multixri_pool->pbl_pool;
20193 /* Check if the public free xri pool is available */
20194 ret = _lpfc_move_xri_pbl_to_pvt(
20195 phba, qp, pbl_pool, pvt_pool, count);
20197 /* Exit while-loop if success or all hwqid are checked */
20198 } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
20200 /* Starting point for the next time */
20201 multixri_pool->rrb_next_hwqid = next_hwqid;
20204 /* stats: all public pools are empty*/
20205 multixri_pool->pbl_empty_count++;
20208 #ifdef LPFC_MXP_STAT
20210 if (next_hwqid == hwqid)
20211 multixri_pool->local_pbl_hit_count++;
20213 multixri_pool->other_pbl_hit_count++;
20219 * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
20220 * @phba: pointer to lpfc hba data structure.
20221 * @qp: belong to which HWQ.
20223 * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
20226 void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
20228 struct lpfc_multixri_pool *multixri_pool;
20229 struct lpfc_pvt_pool *pvt_pool;
20231 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
20232 pvt_pool = &multixri_pool->pvt_pool;
20234 if (pvt_pool->count < pvt_pool->low_watermark)
20235 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20239 * lpfc_release_io_buf - Return one IO buf back to free pool
20240 * @phba: pointer to lpfc hba data structure.
20241 * @lpfc_ncmd: IO buf to be returned.
20242 * @qp: belong to which HWQ.
20244 * This routine returns one IO buf back to free pool. If this is an urgent IO,
20245 * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
20246 * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
20247 * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to
20248 * lpfc_io_buf_list_put.
20250 void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
20251 struct lpfc_sli4_hdw_queue *qp)
20253 unsigned long iflag;
20254 struct lpfc_pbl_pool *pbl_pool;
20255 struct lpfc_pvt_pool *pvt_pool;
20256 struct lpfc_epd_pool *epd_pool;
20262 /* MUST zero fields if buffer is reused by another protocol */
20263 lpfc_ncmd->nvmeCmd = NULL;
20264 lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
20265 lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
20267 if (phba->cfg_xpsgl && !phba->nvmet_support &&
20268 !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
20269 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
20271 if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
20272 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
20274 if (phba->cfg_xri_rebalancing) {
20275 if (lpfc_ncmd->expedite) {
20276 /* Return to expedite pool */
20277 epd_pool = &phba->epd_pool;
20278 spin_lock_irqsave(&epd_pool->lock, iflag);
20279 list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
20281 spin_unlock_irqrestore(&epd_pool->lock, iflag);
20285 /* Avoid invalid access if an IO sneaks in and is being rejected
20286 * just _after_ xri pools are destroyed in lpfc_offline.
20287 * Nothing much can be done at this point.
20289 if (!qp->p_multixri_pool)
20292 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20293 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20295 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
20296 abts_io_bufs = qp->abts_scsi_io_bufs;
20297 abts_io_bufs += qp->abts_nvme_io_bufs;
20299 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
20300 xri_limit = qp->p_multixri_pool->xri_limit;
20302 #ifdef LPFC_MXP_STAT
20303 if (xri_owned <= xri_limit)
20304 qp->p_multixri_pool->below_limit_count++;
20306 qp->p_multixri_pool->above_limit_count++;
20309 /* XRI goes to either public or private free xri pool
20310 * based on watermark and xri_limit
20312 if ((pvt_pool->count < pvt_pool->low_watermark) ||
20313 (xri_owned < xri_limit &&
20314 pvt_pool->count < pvt_pool->high_watermark)) {
20315 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
20316 qp, free_pvt_pool);
20317 list_add_tail(&lpfc_ncmd->list,
20320 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20322 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
20323 qp, free_pub_pool);
20324 list_add_tail(&lpfc_ncmd->list,
20327 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20330 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
20332 list_add_tail(&lpfc_ncmd->list,
20333 &qp->lpfc_io_buf_list_put);
20335 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
20341 * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
20342 * @phba: pointer to lpfc hba data structure.
20343 * @pvt_pool: pointer to private pool data structure.
20344 * @ndlp: pointer to lpfc nodelist data structure.
20346 * This routine tries to get one free IO buf from private pool.
20349 * pointer to one free IO buf - if private pool is not empty
20350 * NULL - if private pool is empty
20352 static struct lpfc_io_buf *
20353 lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
20354 struct lpfc_sli4_hdw_queue *qp,
20355 struct lpfc_pvt_pool *pvt_pool,
20356 struct lpfc_nodelist *ndlp)
20358 struct lpfc_io_buf *lpfc_ncmd;
20359 struct lpfc_io_buf *lpfc_ncmd_next;
20360 unsigned long iflag;
20362 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
20363 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20364 &pvt_pool->list, list) {
20365 if (lpfc_test_rrq_active(
20366 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
20368 list_del(&lpfc_ncmd->list);
20370 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20373 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20379 * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
20380 * @phba: pointer to lpfc hba data structure.
20382 * This routine tries to get one free IO buf from expedite pool.
20385 * pointer to one free IO buf - if expedite pool is not empty
20386 * NULL - if expedite pool is empty
20388 static struct lpfc_io_buf *
20389 lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
20391 struct lpfc_io_buf *lpfc_ncmd;
20392 struct lpfc_io_buf *lpfc_ncmd_next;
20393 unsigned long iflag;
20394 struct lpfc_epd_pool *epd_pool;
20396 epd_pool = &phba->epd_pool;
20399 spin_lock_irqsave(&epd_pool->lock, iflag);
20400 if (epd_pool->count > 0) {
20401 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20402 &epd_pool->list, list) {
20403 list_del(&lpfc_ncmd->list);
20408 spin_unlock_irqrestore(&epd_pool->lock, iflag);
20414 * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
20415 * @phba: pointer to lpfc hba data structure.
20416 * @ndlp: pointer to lpfc nodelist data structure.
20417 * @hwqid: belong to which HWQ
20418 * @expedite: 1 means this request is urgent.
20420 * This routine will do the following actions and then return a pointer to
20423 * 1. If private free xri count is empty, move some XRIs from public to
20425 * 2. Get one XRI from private free xri pool.
20426 * 3. If we fail to get one from pvt_pool and this is an expedite request,
20427 * get one free xri from expedite pool.
20429 * Note: ndlp is only used on SCSI side for RRQ testing.
20430 * The caller should pass NULL for ndlp on NVME side.
20433 * pointer to one free IO buf - if private pool is not empty
20434 * NULL - if private pool is empty
20436 static struct lpfc_io_buf *
20437 lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
20438 struct lpfc_nodelist *ndlp,
20439 int hwqid, int expedite)
20441 struct lpfc_sli4_hdw_queue *qp;
20442 struct lpfc_multixri_pool *multixri_pool;
20443 struct lpfc_pvt_pool *pvt_pool;
20444 struct lpfc_io_buf *lpfc_ncmd;
20446 qp = &phba->sli4_hba.hdwq[hwqid];
20448 multixri_pool = qp->p_multixri_pool;
20449 pvt_pool = &multixri_pool->pvt_pool;
20450 multixri_pool->io_req_count++;
20452 /* If pvt_pool is empty, move some XRIs from public to private pool */
20453 if (pvt_pool->count == 0)
20454 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20456 /* Get one XRI from private free xri pool */
20457 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
20460 lpfc_ncmd->hdwq = qp;
20461 lpfc_ncmd->hdwq_no = hwqid;
20462 } else if (expedite) {
20463 /* If we fail to get one from pvt_pool and this is an expedite
20464 * request, get one free xri from expedite pool.
20466 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
20472 static inline struct lpfc_io_buf *
20473 lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
20475 struct lpfc_sli4_hdw_queue *qp;
20476 struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
20478 qp = &phba->sli4_hba.hdwq[idx];
20479 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
20480 &qp->lpfc_io_buf_list_get, list) {
20481 if (lpfc_test_rrq_active(phba, ndlp,
20482 lpfc_cmd->cur_iocbq.sli4_lxritag))
20485 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
20488 list_del_init(&lpfc_cmd->list);
20490 lpfc_cmd->hdwq = qp;
20491 lpfc_cmd->hdwq_no = idx;
20498 * lpfc_get_io_buf - Get one IO buffer from free pool
20499 * @phba: The HBA for which this call is being executed.
20500 * @ndlp: pointer to lpfc nodelist data structure.
20501 * @hwqid: belong to which HWQ
20502 * @expedite: 1 means this request is urgent.
20504 * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
20505 * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
20506 * a IO buffer from head of @hdwq io_buf_list and returns to caller.
20508 * Note: ndlp is only used on SCSI side for RRQ testing.
20509 * The caller should pass NULL for ndlp on NVME side.
20513 * Pointer to lpfc_io_buf - Success
20515 struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
20516 struct lpfc_nodelist *ndlp,
20517 u32 hwqid, int expedite)
20519 struct lpfc_sli4_hdw_queue *qp;
20520 unsigned long iflag;
20521 struct lpfc_io_buf *lpfc_cmd;
20523 qp = &phba->sli4_hba.hdwq[hwqid];
20526 if (phba->cfg_xri_rebalancing)
20527 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
20528 phba, ndlp, hwqid, expedite);
20530 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
20531 qp, alloc_xri_get);
20532 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
20533 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20535 lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
20536 qp, alloc_xri_put);
20537 list_splice(&qp->lpfc_io_buf_list_put,
20538 &qp->lpfc_io_buf_list_get);
20539 qp->get_io_bufs += qp->put_io_bufs;
20540 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
20541 qp->put_io_bufs = 0;
20542 spin_unlock(&qp->io_buf_list_put_lock);
20543 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
20545 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20547 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
20554 * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
20555 * @phba: The HBA for which this call is being executed.
20556 * @lpfc_buf: IO buf structure to append the SGL chunk
20558 * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
20559 * and will allocate an SGL chunk if the pool is empty.
20563 * Pointer to sli4_hybrid_sgl - Success
20565 struct sli4_hybrid_sgl *
20566 lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
20568 struct sli4_hybrid_sgl *list_entry = NULL;
20569 struct sli4_hybrid_sgl *tmp = NULL;
20570 struct sli4_hybrid_sgl *allocated_sgl = NULL;
20571 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
20572 struct list_head *buf_list = &hdwq->sgl_list;
20573 unsigned long iflags;
20575 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20577 if (likely(!list_empty(buf_list))) {
20578 /* break off 1 chunk from the sgl_list */
20579 list_for_each_entry_safe(list_entry, tmp,
20580 buf_list, list_node) {
20581 list_move_tail(&list_entry->list_node,
20582 &lpfc_buf->dma_sgl_xtra_list);
20586 /* allocate more */
20587 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20588 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
20589 cpu_to_node(hdwq->io_wq->chann));
20591 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20592 "8353 error kmalloc memory for HDWQ "
20594 lpfc_buf->hdwq_no, __func__);
20598 tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
20599 GFP_ATOMIC, &tmp->dma_phys_sgl);
20600 if (!tmp->dma_sgl) {
20601 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20602 "8354 error pool_alloc memory for HDWQ "
20604 lpfc_buf->hdwq_no, __func__);
20609 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20610 list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
20613 allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
20614 struct sli4_hybrid_sgl,
20617 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20619 return allocated_sgl;
20623 * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
20624 * @phba: The HBA for which this call is being executed.
20625 * @lpfc_buf: IO buf structure with the SGL chunk
20627 * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
20634 lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
20637 struct sli4_hybrid_sgl *list_entry = NULL;
20638 struct sli4_hybrid_sgl *tmp = NULL;
20639 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
20640 struct list_head *buf_list = &hdwq->sgl_list;
20641 unsigned long iflags;
20643 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20645 if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
20646 list_for_each_entry_safe(list_entry, tmp,
20647 &lpfc_buf->dma_sgl_xtra_list,
20649 list_move_tail(&list_entry->list_node,
20656 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20661 * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
20662 * @phba: phba object
20663 * @hdwq: hdwq to cleanup sgl buff resources on
20665 * This routine frees all SGL chunks of hdwq SGL chunk pool.
20671 lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
20672 struct lpfc_sli4_hdw_queue *hdwq)
20674 struct list_head *buf_list = &hdwq->sgl_list;
20675 struct sli4_hybrid_sgl *list_entry = NULL;
20676 struct sli4_hybrid_sgl *tmp = NULL;
20677 unsigned long iflags;
20679 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20681 /* Free sgl pool */
20682 list_for_each_entry_safe(list_entry, tmp,
20683 buf_list, list_node) {
20684 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
20685 list_entry->dma_sgl,
20686 list_entry->dma_phys_sgl);
20687 list_del(&list_entry->list_node);
20691 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20695 * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
20696 * @phba: The HBA for which this call is being executed.
20697 * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
20699 * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
20700 * and will allocate an CMD/RSP buffer if the pool is empty.
20704 * Pointer to fcp_cmd_rsp_buf - Success
20706 struct fcp_cmd_rsp_buf *
20707 lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
20708 struct lpfc_io_buf *lpfc_buf)
20710 struct fcp_cmd_rsp_buf *list_entry = NULL;
20711 struct fcp_cmd_rsp_buf *tmp = NULL;
20712 struct fcp_cmd_rsp_buf *allocated_buf = NULL;
20713 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
20714 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
20715 unsigned long iflags;
20717 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20719 if (likely(!list_empty(buf_list))) {
20720 /* break off 1 chunk from the list */
20721 list_for_each_entry_safe(list_entry, tmp,
20724 list_move_tail(&list_entry->list_node,
20725 &lpfc_buf->dma_cmd_rsp_list);
20729 /* allocate more */
20730 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20731 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
20732 cpu_to_node(hdwq->io_wq->chann));
20734 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20735 "8355 error kmalloc memory for HDWQ "
20737 lpfc_buf->hdwq_no, __func__);
20741 tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool,
20743 &tmp->fcp_cmd_rsp_dma_handle);
20745 if (!tmp->fcp_cmnd) {
20746 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20747 "8356 error pool_alloc memory for HDWQ "
20749 lpfc_buf->hdwq_no, __func__);
20754 tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
20755 sizeof(struct fcp_cmnd));
20757 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20758 list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
20761 allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
20762 struct fcp_cmd_rsp_buf,
20765 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20767 return allocated_buf;
20771 * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
20772 * @phba: The HBA for which this call is being executed.
20773 * @lpfc_buf: IO buf structure with the CMD/RSP buf
20775 * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
20782 lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
20783 struct lpfc_io_buf *lpfc_buf)
20786 struct fcp_cmd_rsp_buf *list_entry = NULL;
20787 struct fcp_cmd_rsp_buf *tmp = NULL;
20788 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
20789 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
20790 unsigned long iflags;
20792 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20794 if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
20795 list_for_each_entry_safe(list_entry, tmp,
20796 &lpfc_buf->dma_cmd_rsp_list,
20798 list_move_tail(&list_entry->list_node,
20805 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20810 * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
20811 * @phba: phba object
20812 * @hdwq: hdwq to cleanup cmd rsp buff resources on
20814 * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
20820 lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
20821 struct lpfc_sli4_hdw_queue *hdwq)
20823 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
20824 struct fcp_cmd_rsp_buf *list_entry = NULL;
20825 struct fcp_cmd_rsp_buf *tmp = NULL;
20826 unsigned long iflags;
20828 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20830 /* Free cmd_rsp buf pool */
20831 list_for_each_entry_safe(list_entry, tmp,
20834 dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
20835 list_entry->fcp_cmnd,
20836 list_entry->fcp_cmd_rsp_dma_handle);
20837 list_del(&list_entry->list_node);
20841 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);