1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/interrupt.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/lockdep.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36 #include <scsi/fc/fc_fs.h>
37 #include <linux/aer.h>
38 #include <linux/crash_dump.h>
40 #include <asm/set_memory.h>
46 #include "lpfc_sli4.h"
48 #include "lpfc_disc.h"
50 #include "lpfc_scsi.h"
51 #include "lpfc_nvme.h"
52 #include "lpfc_crtn.h"
53 #include "lpfc_logmsg.h"
54 #include "lpfc_compat.h"
55 #include "lpfc_debugfs.h"
56 #include "lpfc_vport.h"
57 #include "lpfc_version.h"
59 /* There are only four IOCB completion types. */
60 typedef enum _lpfc_iocb_type {
68 /* Provide function prototypes local to this module. */
69 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
71 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
72 uint8_t *, uint32_t *);
73 static struct lpfc_iocbq *
74 lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
75 struct lpfc_iocbq *rspiocbq);
76 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
78 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
79 struct hbq_dmabuf *dmabuf);
80 static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
81 struct lpfc_queue *cq, struct lpfc_cqe *cqe);
82 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
84 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
85 struct lpfc_queue *eq,
86 struct lpfc_eqe *eqe);
87 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
88 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
89 static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
90 static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
91 struct lpfc_queue *cq,
92 struct lpfc_cqe *cqe);
93 static uint16_t lpfc_wqe_bpl2sgl(struct lpfc_hba *phba,
94 struct lpfc_iocbq *pwqeq,
95 struct lpfc_sglq *sglq);
97 union lpfc_wqe128 lpfc_iread_cmd_template;
98 union lpfc_wqe128 lpfc_iwrite_cmd_template;
99 union lpfc_wqe128 lpfc_icmnd_cmd_template;
102 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
107 /* Setup WQE templates for IOs */
108 void lpfc_wqe_cmd_template(void)
110 union lpfc_wqe128 *wqe;
113 wqe = &lpfc_iread_cmd_template;
114 memset(wqe, 0, sizeof(union lpfc_wqe128));
116 /* Word 0, 1, 2 - BDE is variable */
118 /* Word 3 - cmd_buff_len, payload_offset_len is zero */
120 /* Word 4 - total_xfer_len is variable */
122 /* Word 5 - is zero */
124 /* Word 6 - ctxt_tag, xri_tag is variable */
127 bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
128 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
129 bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
130 bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
132 /* Word 8 - abort_tag is variable */
134 /* Word 9 - reqtag is variable */
136 /* Word 10 - dbde, wqes is variable */
137 bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
138 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
139 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
140 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
141 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
143 /* Word 11 - pbde is variable */
144 bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, COMMAND_DATA_IN);
145 bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
146 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
148 /* Word 12 - is zero */
150 /* Word 13, 14, 15 - PBDE is variable */
152 /* IWRITE template */
153 wqe = &lpfc_iwrite_cmd_template;
154 memset(wqe, 0, sizeof(union lpfc_wqe128));
156 /* Word 0, 1, 2 - BDE is variable */
158 /* Word 3 - cmd_buff_len, payload_offset_len is zero */
160 /* Word 4 - total_xfer_len is variable */
162 /* Word 5 - initial_xfer_len is variable */
164 /* Word 6 - ctxt_tag, xri_tag is variable */
167 bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
168 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
169 bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
170 bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
172 /* Word 8 - abort_tag is variable */
174 /* Word 9 - reqtag is variable */
176 /* Word 10 - dbde, wqes is variable */
177 bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
178 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
179 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
180 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
181 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
183 /* Word 11 - pbde is variable */
184 bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, COMMAND_DATA_OUT);
185 bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
186 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
188 /* Word 12 - is zero */
190 /* Word 13, 14, 15 - PBDE is variable */
193 wqe = &lpfc_icmnd_cmd_template;
194 memset(wqe, 0, sizeof(union lpfc_wqe128));
196 /* Word 0, 1, 2 - BDE is variable */
198 /* Word 3 - payload_offset_len is variable */
200 /* Word 4, 5 - is zero */
202 /* Word 6 - ctxt_tag, xri_tag is variable */
205 bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
206 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
207 bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
208 bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
210 /* Word 8 - abort_tag is variable */
212 /* Word 9 - reqtag is variable */
214 /* Word 10 - dbde, wqes is variable */
215 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
216 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
217 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
218 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
219 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
222 bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, COMMAND_DATA_IN);
223 bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
224 bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
226 /* Word 12, 13, 14, 15 - is zero */
229 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
231 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
232 * @srcp: Source memory pointer.
233 * @destp: Destination memory pointer.
234 * @cnt: Number of words required to be copied.
235 * Must be a multiple of sizeof(uint64_t)
237 * This function is used for copying data between driver memory
238 * and the SLI WQ. This function also changes the endianness
239 * of each word if native endianness is different from SLI
240 * endianness. This function can be called with or without
244 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
246 uint64_t *src = srcp;
247 uint64_t *dest = destp;
250 for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
254 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
258 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
259 * @q: The Work Queue to operate on.
260 * @wqe: The work Queue Entry to put on the Work queue.
262 * This routine will copy the contents of @wqe to the next available entry on
263 * the @q. This function will then ring the Work Queue Doorbell to signal the
264 * HBA to start processing the Work Queue Entry. This function returns 0 if
265 * successful. If no entries are available on @q then this function will return
267 * The caller is expected to hold the hbalock when calling this routine.
270 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
272 union lpfc_wqe *temp_wqe;
273 struct lpfc_register doorbell;
280 /* sanity check on queue memory */
284 temp_wqe = lpfc_sli4_qe(q, q->host_index);
286 /* If the host has not yet processed the next entry then we are done */
287 idx = ((q->host_index + 1) % q->entry_count);
288 if (idx == q->hba_index) {
293 /* set consumption flag every once in a while */
294 if (!((q->host_index + 1) % q->notify_interval))
295 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
297 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
298 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
299 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
300 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
301 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
302 /* write to DPP aperture taking advatage of Combined Writes */
303 tmp = (uint8_t *)temp_wqe;
305 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
306 __raw_writeq(*((uint64_t *)(tmp + i)),
309 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
310 __raw_writel(*((uint32_t *)(tmp + i)),
314 /* ensure WQE bcopy and DPP flushed before doorbell write */
317 /* Update the host index before invoking device */
318 host_index = q->host_index;
324 if (q->db_format == LPFC_DB_LIST_FORMAT) {
325 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
326 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
327 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
328 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
330 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
333 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
334 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
336 /* Leave bits <23:16> clear for if_type 6 dpp */
337 if_type = bf_get(lpfc_sli_intf_if_type,
338 &q->phba->sli4_hba.sli_intf);
339 if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
340 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
343 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
344 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
345 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
349 writel(doorbell.word0, q->db_regaddr);
355 * lpfc_sli4_wq_release - Updates internal hba index for WQ
356 * @q: The Work Queue to operate on.
357 * @index: The index to advance the hba index to.
359 * This routine will update the HBA index of a queue to reflect consumption of
360 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
361 * an entry the host calls this function to update the queue's internal
365 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
367 /* sanity check on queue memory */
371 q->hba_index = index;
375 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
376 * @q: The Mailbox Queue to operate on.
377 * @mqe: The Mailbox Queue Entry to put on the Work queue.
379 * This routine will copy the contents of @mqe to the next available entry on
380 * the @q. This function will then ring the Work Queue Doorbell to signal the
381 * HBA to start processing the Work Queue Entry. This function returns 0 if
382 * successful. If no entries are available on @q then this function will return
384 * The caller is expected to hold the hbalock when calling this routine.
387 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
389 struct lpfc_mqe *temp_mqe;
390 struct lpfc_register doorbell;
392 /* sanity check on queue memory */
395 temp_mqe = lpfc_sli4_qe(q, q->host_index);
397 /* If the host has not yet processed the next entry then we are done */
398 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
400 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
401 /* Save off the mailbox pointer for completion */
402 q->phba->mbox = (MAILBOX_t *)temp_mqe;
404 /* Update the host index before invoking device */
405 q->host_index = ((q->host_index + 1) % q->entry_count);
409 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
410 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
411 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
416 * lpfc_sli4_mq_release - Updates internal hba index for MQ
417 * @q: The Mailbox Queue to operate on.
419 * This routine will update the HBA index of a queue to reflect consumption of
420 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
421 * an entry the host calls this function to update the queue's internal
422 * pointers. This routine returns the number of entries that were consumed by
426 lpfc_sli4_mq_release(struct lpfc_queue *q)
428 /* sanity check on queue memory */
432 /* Clear the mailbox pointer for completion */
433 q->phba->mbox = NULL;
434 q->hba_index = ((q->hba_index + 1) % q->entry_count);
439 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
440 * @q: The Event Queue to get the first valid EQE from
442 * This routine will get the first valid Event Queue Entry from @q, update
443 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
444 * the Queue (no more work to do), or the Queue is full of EQEs that have been
445 * processed, but not popped back to the HBA then this routine will return NULL.
447 static struct lpfc_eqe *
448 lpfc_sli4_eq_get(struct lpfc_queue *q)
450 struct lpfc_eqe *eqe;
452 /* sanity check on queue memory */
455 eqe = lpfc_sli4_qe(q, q->host_index);
457 /* If the next EQE is not valid then we are done */
458 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
462 * insert barrier for instruction interlock : data from the hardware
463 * must have the valid bit checked before it can be copied and acted
464 * upon. Speculative instructions were allowing a bcopy at the start
465 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
466 * after our return, to copy data before the valid bit check above
467 * was done. As such, some of the copied data was stale. The barrier
468 * ensures the check is before any data is copied.
475 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
476 * @q: The Event Queue to disable interrupts
480 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
482 struct lpfc_register doorbell;
485 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
486 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
487 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
488 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
489 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
490 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
494 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
495 * @q: The Event Queue to disable interrupts
499 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
501 struct lpfc_register doorbell;
504 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
505 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
509 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
510 * @phba: adapter with EQ
511 * @q: The Event Queue that the host has completed processing for.
512 * @count: Number of elements that have been consumed
513 * @arm: Indicates whether the host wants to arms this CQ.
515 * This routine will notify the HBA, by ringing the doorbell, that count
516 * number of EQEs have been processed. The @arm parameter indicates whether
517 * the queue should be rearmed when ringing the doorbell.
520 lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
521 uint32_t count, bool arm)
523 struct lpfc_register doorbell;
525 /* sanity check on queue memory */
526 if (unlikely(!q || (count == 0 && !arm)))
529 /* ring doorbell for number popped */
532 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
533 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
535 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
536 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
537 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
538 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
539 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
540 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
541 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
542 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
543 readl(q->phba->sli4_hba.EQDBregaddr);
547 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
548 * @phba: adapter with EQ
549 * @q: The Event Queue that the host has completed processing for.
550 * @count: Number of elements that have been consumed
551 * @arm: Indicates whether the host wants to arms this CQ.
553 * This routine will notify the HBA, by ringing the doorbell, that count
554 * number of EQEs have been processed. The @arm parameter indicates whether
555 * the queue should be rearmed when ringing the doorbell.
558 lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
559 uint32_t count, bool arm)
561 struct lpfc_register doorbell;
563 /* sanity check on queue memory */
564 if (unlikely(!q || (count == 0 && !arm)))
567 /* ring doorbell for number popped */
570 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
571 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
572 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
573 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
574 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
575 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
576 readl(q->phba->sli4_hba.EQDBregaddr);
580 __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
581 struct lpfc_eqe *eqe)
583 if (!phba->sli4_hba.pc_sli4_params.eqav)
584 bf_set_le32(lpfc_eqe_valid, eqe, 0);
586 eq->host_index = ((eq->host_index + 1) % eq->entry_count);
588 /* if the index wrapped around, toggle the valid bit */
589 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
590 eq->qe_valid = (eq->qe_valid) ? 0 : 1;
594 lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
596 struct lpfc_eqe *eqe = NULL;
597 u32 eq_count = 0, cq_count = 0;
598 struct lpfc_cqe *cqe = NULL;
599 struct lpfc_queue *cq = NULL, *childq = NULL;
602 /* walk all the EQ entries and drop on the floor */
603 eqe = lpfc_sli4_eq_get(eq);
605 /* Get the reference to the corresponding CQ */
606 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
609 list_for_each_entry(childq, &eq->child_list, list) {
610 if (childq->queue_id == cqid) {
615 /* If CQ is valid, iterate through it and drop all the CQEs */
617 cqe = lpfc_sli4_cq_get(cq);
619 __lpfc_sli4_consume_cqe(phba, cq, cqe);
621 cqe = lpfc_sli4_cq_get(cq);
623 /* Clear and re-arm the CQ */
624 phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
628 __lpfc_sli4_consume_eqe(phba, eq, eqe);
630 eqe = lpfc_sli4_eq_get(eq);
633 /* Clear and re-arm the EQ */
634 phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
638 lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
641 struct lpfc_eqe *eqe;
642 int count = 0, consumed = 0;
644 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
647 eqe = lpfc_sli4_eq_get(eq);
649 lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
650 __lpfc_sli4_consume_eqe(phba, eq, eqe);
653 if (!(++count % eq->max_proc_limit))
656 if (!(count % eq->notify_interval)) {
657 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
662 eqe = lpfc_sli4_eq_get(eq);
664 eq->EQ_processed += count;
666 /* Track the max number of EQEs processed in 1 intr */
667 if (count > eq->EQ_max_eqe)
668 eq->EQ_max_eqe = count;
670 xchg(&eq->queue_claimed, 0);
673 /* Always clear the EQ. */
674 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
680 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
681 * @q: The Completion Queue to get the first valid CQE from
683 * This routine will get the first valid Completion Queue Entry from @q, update
684 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
685 * the Queue (no more work to do), or the Queue is full of CQEs that have been
686 * processed, but not popped back to the HBA then this routine will return NULL.
688 static struct lpfc_cqe *
689 lpfc_sli4_cq_get(struct lpfc_queue *q)
691 struct lpfc_cqe *cqe;
693 /* sanity check on queue memory */
696 cqe = lpfc_sli4_qe(q, q->host_index);
698 /* If the next CQE is not valid then we are done */
699 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
703 * insert barrier for instruction interlock : data from the hardware
704 * must have the valid bit checked before it can be copied and acted
705 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
706 * instructions allowing action on content before valid bit checked,
707 * add barrier here as well. May not be needed as "content" is a
708 * single 32-bit entity here (vs multi word structure for cq's).
715 __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
716 struct lpfc_cqe *cqe)
718 if (!phba->sli4_hba.pc_sli4_params.cqav)
719 bf_set_le32(lpfc_cqe_valid, cqe, 0);
721 cq->host_index = ((cq->host_index + 1) % cq->entry_count);
723 /* if the index wrapped around, toggle the valid bit */
724 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
725 cq->qe_valid = (cq->qe_valid) ? 0 : 1;
729 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
730 * @phba: the adapter with the CQ
731 * @q: The Completion Queue that the host has completed processing for.
732 * @count: the number of elements that were consumed
733 * @arm: Indicates whether the host wants to arms this CQ.
735 * This routine will notify the HBA, by ringing the doorbell, that the
736 * CQEs have been processed. The @arm parameter specifies whether the
737 * queue should be rearmed when ringing the doorbell.
740 lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
741 uint32_t count, bool arm)
743 struct lpfc_register doorbell;
745 /* sanity check on queue memory */
746 if (unlikely(!q || (count == 0 && !arm)))
749 /* ring doorbell for number popped */
752 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
753 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
754 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
755 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
756 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
757 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
758 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
762 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
763 * @phba: the adapter with the CQ
764 * @q: The Completion Queue that the host has completed processing for.
765 * @count: the number of elements that were consumed
766 * @arm: Indicates whether the host wants to arms this CQ.
768 * This routine will notify the HBA, by ringing the doorbell, that the
769 * CQEs have been processed. The @arm parameter specifies whether the
770 * queue should be rearmed when ringing the doorbell.
773 lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
774 uint32_t count, bool arm)
776 struct lpfc_register doorbell;
778 /* sanity check on queue memory */
779 if (unlikely(!q || (count == 0 && !arm)))
782 /* ring doorbell for number popped */
785 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
786 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
787 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
788 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
792 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
794 * This routine will copy the contents of @wqe to the next available entry on
795 * the @q. This function will then ring the Receive Queue Doorbell to signal the
796 * HBA to start processing the Receive Queue Entry. This function returns the
797 * index that the rqe was copied to if successful. If no entries are available
798 * on @q then this function will return -ENOMEM.
799 * The caller is expected to hold the hbalock when calling this routine.
802 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
803 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
805 struct lpfc_rqe *temp_hrqe;
806 struct lpfc_rqe *temp_drqe;
807 struct lpfc_register doorbell;
811 /* sanity check on queue memory */
812 if (unlikely(!hq) || unlikely(!dq))
814 hq_put_index = hq->host_index;
815 dq_put_index = dq->host_index;
816 temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
817 temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
819 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
821 if (hq_put_index != dq_put_index)
823 /* If the host has not yet processed the next entry then we are done */
824 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
826 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
827 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
829 /* Update the host index to point to the next slot */
830 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
831 dq->host_index = ((dq_put_index + 1) % dq->entry_count);
834 /* Ring The Header Receive Queue Doorbell */
835 if (!(hq->host_index % hq->notify_interval)) {
837 if (hq->db_format == LPFC_DB_RING_FORMAT) {
838 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
839 hq->notify_interval);
840 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
841 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
842 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
843 hq->notify_interval);
844 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
846 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
850 writel(doorbell.word0, hq->db_regaddr);
856 * lpfc_sli4_rq_release - Updates internal hba index for RQ
858 * This routine will update the HBA index of a queue to reflect consumption of
859 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
860 * consumed an entry the host calls this function to update the queue's
861 * internal pointers. This routine returns the number of entries that were
862 * consumed by the HBA.
865 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
867 /* sanity check on queue memory */
868 if (unlikely(!hq) || unlikely(!dq))
871 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
873 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
874 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
879 * lpfc_cmd_iocb - Get next command iocb entry in the ring
880 * @phba: Pointer to HBA context object.
881 * @pring: Pointer to driver SLI ring object.
883 * This function returns pointer to next command iocb entry
884 * in the command ring. The caller must hold hbalock to prevent
885 * other threads consume the next command iocb.
886 * SLI-2/SLI-3 provide different sized iocbs.
888 static inline IOCB_t *
889 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
891 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
892 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
896 * lpfc_resp_iocb - Get next response iocb entry in the ring
897 * @phba: Pointer to HBA context object.
898 * @pring: Pointer to driver SLI ring object.
900 * This function returns pointer to next response iocb entry
901 * in the response ring. The caller must hold hbalock to make sure
902 * that no other thread consume the next response iocb.
903 * SLI-2/SLI-3 provide different sized iocbs.
905 static inline IOCB_t *
906 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
908 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
909 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
913 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
914 * @phba: Pointer to HBA context object.
916 * This function is called with hbalock held. This function
917 * allocates a new driver iocb object from the iocb pool. If the
918 * allocation is successful, it returns pointer to the newly
919 * allocated iocb object else it returns NULL.
922 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
924 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
925 struct lpfc_iocbq * iocbq = NULL;
927 lockdep_assert_held(&phba->hbalock);
929 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
932 if (phba->iocb_cnt > phba->iocb_max)
933 phba->iocb_max = phba->iocb_cnt;
938 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
939 * @phba: Pointer to HBA context object.
940 * @xritag: XRI value.
942 * This function clears the sglq pointer from the array of active
943 * sglq's. The xritag that is passed in is used to index into the
944 * array. Before the xritag can be used it needs to be adjusted
945 * by subtracting the xribase.
947 * Returns sglq ponter = success, NULL = Failure.
950 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
952 struct lpfc_sglq *sglq;
954 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
955 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
960 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
961 * @phba: Pointer to HBA context object.
962 * @xritag: XRI value.
964 * This function returns the sglq pointer from the array of active
965 * sglq's. The xritag that is passed in is used to index into the
966 * array. Before the xritag can be used it needs to be adjusted
967 * by subtracting the xribase.
969 * Returns sglq ponter = success, NULL = Failure.
972 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
974 struct lpfc_sglq *sglq;
976 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
981 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
982 * @phba: Pointer to HBA context object.
983 * @xritag: xri used in this exchange.
984 * @rrq: The RRQ to be cleared.
988 lpfc_clr_rrq_active(struct lpfc_hba *phba,
990 struct lpfc_node_rrq *rrq)
992 struct lpfc_nodelist *ndlp = NULL;
994 /* Lookup did to verify if did is still active on this vport */
996 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
1001 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
1004 rrq->rrq_stop_time = 0;
1007 mempool_free(rrq, phba->rrq_pool);
1011 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
1012 * @phba: Pointer to HBA context object.
1014 * This function is called with hbalock held. This function
1015 * Checks if stop_time (ratov from setting rrq active) has
1016 * been reached, if it has and the send_rrq flag is set then
1017 * it will call lpfc_send_rrq. If the send_rrq flag is not set
1018 * then it will just call the routine to clear the rrq and
1019 * free the rrq resource.
1020 * The timer is set to the next rrq that is going to expire before
1021 * leaving the routine.
1025 lpfc_handle_rrq_active(struct lpfc_hba *phba)
1027 struct lpfc_node_rrq *rrq;
1028 struct lpfc_node_rrq *nextrrq;
1029 unsigned long next_time;
1030 unsigned long iflags;
1031 LIST_HEAD(send_rrq);
1033 spin_lock_irqsave(&phba->hbalock, iflags);
1034 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1035 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1036 list_for_each_entry_safe(rrq, nextrrq,
1037 &phba->active_rrq_list, list) {
1038 if (time_after(jiffies, rrq->rrq_stop_time))
1039 list_move(&rrq->list, &send_rrq);
1040 else if (time_before(rrq->rrq_stop_time, next_time))
1041 next_time = rrq->rrq_stop_time;
1043 spin_unlock_irqrestore(&phba->hbalock, iflags);
1044 if ((!list_empty(&phba->active_rrq_list)) &&
1045 (!(phba->pport->load_flag & FC_UNLOADING)))
1046 mod_timer(&phba->rrq_tmr, next_time);
1047 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
1048 list_del(&rrq->list);
1049 if (!rrq->send_rrq) {
1050 /* this call will free the rrq */
1051 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1052 } else if (lpfc_send_rrq(phba, rrq)) {
1053 /* if we send the rrq then the completion handler
1054 * will clear the bit in the xribitmap.
1056 lpfc_clr_rrq_active(phba, rrq->xritag,
1063 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
1064 * @vport: Pointer to vport context object.
1065 * @xri: The xri used in the exchange.
1066 * @did: The targets DID for this exchange.
1068 * returns NULL = rrq not found in the phba->active_rrq_list.
1069 * rrq = rrq for this xri and target.
1071 struct lpfc_node_rrq *
1072 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
1074 struct lpfc_hba *phba = vport->phba;
1075 struct lpfc_node_rrq *rrq;
1076 struct lpfc_node_rrq *nextrrq;
1077 unsigned long iflags;
1079 if (phba->sli_rev != LPFC_SLI_REV4)
1081 spin_lock_irqsave(&phba->hbalock, iflags);
1082 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1083 if (rrq->vport == vport && rrq->xritag == xri &&
1084 rrq->nlp_DID == did){
1085 list_del(&rrq->list);
1086 spin_unlock_irqrestore(&phba->hbalock, iflags);
1090 spin_unlock_irqrestore(&phba->hbalock, iflags);
1095 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
1096 * @vport: Pointer to vport context object.
1097 * @ndlp: Pointer to the lpfc_node_list structure.
1098 * If ndlp is NULL Remove all active RRQs for this vport from the
1099 * phba->active_rrq_list and clear the rrq.
1100 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
1103 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1106 struct lpfc_hba *phba = vport->phba;
1107 struct lpfc_node_rrq *rrq;
1108 struct lpfc_node_rrq *nextrrq;
1109 unsigned long iflags;
1110 LIST_HEAD(rrq_list);
1112 if (phba->sli_rev != LPFC_SLI_REV4)
1115 lpfc_sli4_vport_delete_els_xri_aborted(vport);
1116 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
1118 spin_lock_irqsave(&phba->hbalock, iflags);
1119 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1120 if (rrq->vport != vport)
1123 if (!ndlp || ndlp == lpfc_findnode_did(vport, rrq->nlp_DID))
1124 list_move(&rrq->list, &rrq_list);
1127 spin_unlock_irqrestore(&phba->hbalock, iflags);
1129 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
1130 list_del(&rrq->list);
1131 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1136 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
1137 * @phba: Pointer to HBA context object.
1138 * @ndlp: Targets nodelist pointer for this exchange.
1139 * @xritag: the xri in the bitmap to test.
1141 * This function returns:
1142 * 0 = rrq not active for this xri
1143 * 1 = rrq is valid for this xri.
1146 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1151 if (!ndlp->active_rrqs_xri_bitmap)
1153 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1160 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1161 * @phba: Pointer to HBA context object.
1162 * @ndlp: nodelist pointer for this target.
1163 * @xritag: xri used in this exchange.
1164 * @rxid: Remote Exchange ID.
1165 * @send_rrq: Flag used to determine if we should send rrq els cmd.
1167 * This function takes the hbalock.
1168 * The active bit is always set in the active rrq xri_bitmap even
1169 * if there is no slot avaiable for the other rrq information.
1171 * returns 0 rrq actived for this xri
1172 * < 0 No memory or invalid ndlp.
1175 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1176 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1178 unsigned long iflags;
1179 struct lpfc_node_rrq *rrq;
1185 if (!phba->cfg_enable_rrq)
1188 spin_lock_irqsave(&phba->hbalock, iflags);
1189 if (phba->pport->load_flag & FC_UNLOADING) {
1190 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1194 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1197 if (!ndlp->active_rrqs_xri_bitmap)
1200 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1203 spin_unlock_irqrestore(&phba->hbalock, iflags);
1204 rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC);
1206 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1207 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1208 " DID:0x%x Send:%d\n",
1209 xritag, rxid, ndlp->nlp_DID, send_rrq);
1212 if (phba->cfg_enable_rrq == 1)
1213 rrq->send_rrq = send_rrq;
1216 rrq->xritag = xritag;
1217 rrq->rrq_stop_time = jiffies +
1218 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1219 rrq->nlp_DID = ndlp->nlp_DID;
1220 rrq->vport = ndlp->vport;
1222 spin_lock_irqsave(&phba->hbalock, iflags);
1223 empty = list_empty(&phba->active_rrq_list);
1224 list_add_tail(&rrq->list, &phba->active_rrq_list);
1225 phba->hba_flag |= HBA_RRQ_ACTIVE;
1227 lpfc_worker_wake_up(phba);
1228 spin_unlock_irqrestore(&phba->hbalock, iflags);
1231 spin_unlock_irqrestore(&phba->hbalock, iflags);
1232 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1233 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1234 " DID:0x%x Send:%d\n",
1235 xritag, rxid, ndlp->nlp_DID, send_rrq);
1240 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
1241 * @phba: Pointer to HBA context object.
1242 * @piocbq: Pointer to the iocbq.
1244 * The driver calls this function with either the nvme ls ring lock
1245 * or the fc els ring lock held depending on the iocb usage. This function
1246 * gets a new driver sglq object from the sglq list. If the list is not empty
1247 * then it is successful, it returns pointer to the newly allocated sglq
1248 * object else it returns NULL.
1250 static struct lpfc_sglq *
1251 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1253 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1254 struct lpfc_sglq *sglq = NULL;
1255 struct lpfc_sglq *start_sglq = NULL;
1256 struct lpfc_io_buf *lpfc_cmd;
1257 struct lpfc_nodelist *ndlp;
1258 struct lpfc_sli_ring *pring = NULL;
1261 if (piocbq->cmd_flag & LPFC_IO_NVME_LS)
1262 pring = phba->sli4_hba.nvmels_wq->pring;
1264 pring = lpfc_phba_elsring(phba);
1266 lockdep_assert_held(&pring->ring_lock);
1268 if (piocbq->cmd_flag & LPFC_IO_FCP) {
1269 lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
1270 ndlp = lpfc_cmd->rdata->pnode;
1271 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
1272 !(piocbq->cmd_flag & LPFC_IO_LIBDFC)) {
1273 ndlp = piocbq->context_un.ndlp;
1274 } else if (piocbq->cmd_flag & LPFC_IO_LIBDFC) {
1275 if (piocbq->cmd_flag & LPFC_IO_LOOPBACK)
1278 ndlp = piocbq->context_un.ndlp;
1280 ndlp = piocbq->context1;
1283 spin_lock(&phba->sli4_hba.sgl_list_lock);
1284 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1289 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1290 test_bit(sglq->sli4_lxritag,
1291 ndlp->active_rrqs_xri_bitmap)) {
1292 /* This xri has an rrq outstanding for this DID.
1293 * put it back in the list and get another xri.
1295 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1297 list_remove_head(lpfc_els_sgl_list, sglq,
1298 struct lpfc_sglq, list);
1299 if (sglq == start_sglq) {
1300 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1308 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1309 sglq->state = SGL_ALLOCATED;
1311 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1316 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1317 * @phba: Pointer to HBA context object.
1318 * @piocbq: Pointer to the iocbq.
1320 * This function is called with the sgl_list lock held. This function
1321 * gets a new driver sglq object from the sglq list. If the
1322 * list is not empty then it is successful, it returns pointer to the newly
1323 * allocated sglq object else it returns NULL.
1326 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1328 struct list_head *lpfc_nvmet_sgl_list;
1329 struct lpfc_sglq *sglq = NULL;
1331 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1333 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1335 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1338 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1339 sglq->state = SGL_ALLOCATED;
1344 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1345 * @phba: Pointer to HBA context object.
1347 * This function is called with no lock held. This function
1348 * allocates a new driver iocb object from the iocb pool. If the
1349 * allocation is successful, it returns pointer to the newly
1350 * allocated iocb object else it returns NULL.
1353 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1355 struct lpfc_iocbq * iocbq = NULL;
1356 unsigned long iflags;
1358 spin_lock_irqsave(&phba->hbalock, iflags);
1359 iocbq = __lpfc_sli_get_iocbq(phba);
1360 spin_unlock_irqrestore(&phba->hbalock, iflags);
1365 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1366 * @phba: Pointer to HBA context object.
1367 * @iocbq: Pointer to driver iocb object.
1369 * This function is called to release the driver iocb object
1370 * to the iocb pool. The iotag in the iocb object
1371 * does not change for each use of the iocb object. This function
1372 * clears all other fields of the iocb object when it is freed.
1373 * The sqlq structure that holds the xritag and phys and virtual
1374 * mappings for the scatter gather list is retrieved from the
1375 * active array of sglq. The get of the sglq pointer also clears
1376 * the entry in the array. If the status of the IO indiactes that
1377 * this IO was aborted then the sglq entry it put on the
1378 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1379 * IO has good status or fails for any other reason then the sglq
1380 * entry is added to the free list (lpfc_els_sgl_list). The hbalock is
1381 * asserted held in the code path calling this routine.
1384 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1386 struct lpfc_sglq *sglq;
1387 size_t start_clean = offsetof(struct lpfc_iocbq, wqe);
1388 unsigned long iflag = 0;
1389 struct lpfc_sli_ring *pring;
1391 if (iocbq->sli4_xritag == NO_XRI)
1394 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1398 if (iocbq->cmd_flag & LPFC_IO_NVMET) {
1399 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1401 sglq->state = SGL_FREED;
1403 list_add_tail(&sglq->list,
1404 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1405 spin_unlock_irqrestore(
1406 &phba->sli4_hba.sgl_list_lock, iflag);
1410 if ((iocbq->cmd_flag & LPFC_EXCHANGE_BUSY) &&
1411 (!(unlikely(pci_channel_offline(phba->pcidev)))) &&
1412 sglq->state != SGL_XRI_ABORTED) {
1413 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1416 /* Check if we can get a reference on ndlp */
1417 if (sglq->ndlp && !lpfc_nlp_get(sglq->ndlp))
1420 list_add(&sglq->list,
1421 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1422 spin_unlock_irqrestore(
1423 &phba->sli4_hba.sgl_list_lock, iflag);
1425 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1427 sglq->state = SGL_FREED;
1429 list_add_tail(&sglq->list,
1430 &phba->sli4_hba.lpfc_els_sgl_list);
1431 spin_unlock_irqrestore(
1432 &phba->sli4_hba.sgl_list_lock, iflag);
1433 pring = lpfc_phba_elsring(phba);
1434 /* Check if TXQ queue needs to be serviced */
1435 if (pring && (!list_empty(&pring->txq)))
1436 lpfc_worker_wake_up(phba);
1442 * Clean all volatile data fields, preserve iotag and node struct.
1444 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1445 iocbq->sli4_lxritag = NO_XRI;
1446 iocbq->sli4_xritag = NO_XRI;
1447 iocbq->cmd_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF |
1449 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1454 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1455 * @phba: Pointer to HBA context object.
1456 * @iocbq: Pointer to driver iocb object.
1458 * This function is called to release the driver iocb object to the
1459 * iocb pool. The iotag in the iocb object does not change for each
1460 * use of the iocb object. This function clears all other fields of
1461 * the iocb object when it is freed. The hbalock is asserted held in
1462 * the code path calling this routine.
1465 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1467 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1470 * Clean all volatile data fields, preserve iotag and node struct.
1472 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1473 iocbq->sli4_xritag = NO_XRI;
1474 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1478 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1479 * @phba: Pointer to HBA context object.
1480 * @iocbq: Pointer to driver iocb object.
1482 * This function is called with hbalock held to release driver
1483 * iocb object to the iocb pool. The iotag in the iocb object
1484 * does not change for each use of the iocb object. This function
1485 * clears all other fields of the iocb object when it is freed.
1488 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1490 lockdep_assert_held(&phba->hbalock);
1492 phba->__lpfc_sli_release_iocbq(phba, iocbq);
1497 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1498 * @phba: Pointer to HBA context object.
1499 * @iocbq: Pointer to driver iocb object.
1501 * This function is called with no lock held to release the iocb to
1505 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1507 unsigned long iflags;
1510 * Clean all volatile data fields, preserve iotag and node struct.
1512 spin_lock_irqsave(&phba->hbalock, iflags);
1513 __lpfc_sli_release_iocbq(phba, iocbq);
1514 spin_unlock_irqrestore(&phba->hbalock, iflags);
1518 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1519 * @phba: Pointer to HBA context object.
1520 * @iocblist: List of IOCBs.
1521 * @ulpstatus: ULP status in IOCB command field.
1522 * @ulpWord4: ULP word-4 in IOCB command field.
1524 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1525 * on the list by invoking the complete callback function associated with the
1526 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1530 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1531 uint32_t ulpstatus, uint32_t ulpWord4)
1533 struct lpfc_iocbq *piocb;
1535 while (!list_empty(iocblist)) {
1536 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1537 if (piocb->cmd_cmpl) {
1538 if (piocb->cmd_flag & LPFC_IO_NVME)
1539 lpfc_nvme_cancel_iocb(phba, piocb,
1540 ulpstatus, ulpWord4);
1542 lpfc_sli_release_iocbq(phba, piocb);
1544 } else if (piocb->cmd_cmpl) {
1545 piocb->iocb.ulpStatus = ulpstatus;
1546 piocb->iocb.un.ulpWord[4] = ulpWord4;
1547 (piocb->cmd_cmpl) (phba, piocb, piocb);
1549 lpfc_sli_release_iocbq(phba, piocb);
1556 * lpfc_sli_iocb_cmd_type - Get the iocb type
1557 * @iocb_cmnd: iocb command code.
1559 * This function is called by ring event handler function to get the iocb type.
1560 * This function translates the iocb command to an iocb command type used to
1561 * decide the final disposition of each completed IOCB.
1562 * The function returns
1563 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1564 * LPFC_SOL_IOCB if it is a solicited iocb completion
1565 * LPFC_ABORT_IOCB if it is an abort iocb
1566 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1568 * The caller is not required to hold any lock.
1570 static lpfc_iocb_type
1571 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1573 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1575 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1578 switch (iocb_cmnd) {
1579 case CMD_XMIT_SEQUENCE_CR:
1580 case CMD_XMIT_SEQUENCE_CX:
1581 case CMD_XMIT_BCAST_CN:
1582 case CMD_XMIT_BCAST_CX:
1583 case CMD_ELS_REQUEST_CR:
1584 case CMD_ELS_REQUEST_CX:
1585 case CMD_CREATE_XRI_CR:
1586 case CMD_CREATE_XRI_CX:
1587 case CMD_GET_RPI_CN:
1588 case CMD_XMIT_ELS_RSP_CX:
1589 case CMD_GET_RPI_CR:
1590 case CMD_FCP_IWRITE_CR:
1591 case CMD_FCP_IWRITE_CX:
1592 case CMD_FCP_IREAD_CR:
1593 case CMD_FCP_IREAD_CX:
1594 case CMD_FCP_ICMND_CR:
1595 case CMD_FCP_ICMND_CX:
1596 case CMD_FCP_TSEND_CX:
1597 case CMD_FCP_TRSP_CX:
1598 case CMD_FCP_TRECEIVE_CX:
1599 case CMD_FCP_AUTO_TRSP_CX:
1600 case CMD_ADAPTER_MSG:
1601 case CMD_ADAPTER_DUMP:
1602 case CMD_XMIT_SEQUENCE64_CR:
1603 case CMD_XMIT_SEQUENCE64_CX:
1604 case CMD_XMIT_BCAST64_CN:
1605 case CMD_XMIT_BCAST64_CX:
1606 case CMD_ELS_REQUEST64_CR:
1607 case CMD_ELS_REQUEST64_CX:
1608 case CMD_FCP_IWRITE64_CR:
1609 case CMD_FCP_IWRITE64_CX:
1610 case CMD_FCP_IREAD64_CR:
1611 case CMD_FCP_IREAD64_CX:
1612 case CMD_FCP_ICMND64_CR:
1613 case CMD_FCP_ICMND64_CX:
1614 case CMD_FCP_TSEND64_CX:
1615 case CMD_FCP_TRSP64_CX:
1616 case CMD_FCP_TRECEIVE64_CX:
1617 case CMD_GEN_REQUEST64_CR:
1618 case CMD_GEN_REQUEST64_CX:
1619 case CMD_XMIT_ELS_RSP64_CX:
1620 case DSSCMD_IWRITE64_CR:
1621 case DSSCMD_IWRITE64_CX:
1622 case DSSCMD_IREAD64_CR:
1623 case DSSCMD_IREAD64_CX:
1624 case CMD_SEND_FRAME:
1625 type = LPFC_SOL_IOCB;
1627 case CMD_ABORT_XRI_CN:
1628 case CMD_ABORT_XRI_CX:
1629 case CMD_CLOSE_XRI_CN:
1630 case CMD_CLOSE_XRI_CX:
1631 case CMD_XRI_ABORTED_CX:
1632 case CMD_ABORT_MXRI64_CN:
1633 case CMD_XMIT_BLS_RSP64_CX:
1634 type = LPFC_ABORT_IOCB;
1636 case CMD_RCV_SEQUENCE_CX:
1637 case CMD_RCV_ELS_REQ_CX:
1638 case CMD_RCV_SEQUENCE64_CX:
1639 case CMD_RCV_ELS_REQ64_CX:
1640 case CMD_ASYNC_STATUS:
1641 case CMD_IOCB_RCV_SEQ64_CX:
1642 case CMD_IOCB_RCV_ELS64_CX:
1643 case CMD_IOCB_RCV_CONT64_CX:
1644 case CMD_IOCB_RET_XRI64_CX:
1645 type = LPFC_UNSOL_IOCB;
1647 case CMD_IOCB_XMIT_MSEQ64_CR:
1648 case CMD_IOCB_XMIT_MSEQ64_CX:
1649 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1650 case CMD_IOCB_RCV_ELS_LIST64_CX:
1651 case CMD_IOCB_CLOSE_EXTENDED_CN:
1652 case CMD_IOCB_ABORT_EXTENDED_CN:
1653 case CMD_IOCB_RET_HBQE64_CN:
1654 case CMD_IOCB_FCP_IBIDIR64_CR:
1655 case CMD_IOCB_FCP_IBIDIR64_CX:
1656 case CMD_IOCB_FCP_ITASKMGT64_CX:
1657 case CMD_IOCB_LOGENTRY_CN:
1658 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1659 printk("%s - Unhandled SLI-3 Command x%x\n",
1660 __func__, iocb_cmnd);
1661 type = LPFC_UNKNOWN_IOCB;
1664 type = LPFC_UNKNOWN_IOCB;
1672 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1673 * @phba: Pointer to HBA context object.
1675 * This function is called from SLI initialization code
1676 * to configure every ring of the HBA's SLI interface. The
1677 * caller is not required to hold any lock. This function issues
1678 * a config_ring mailbox command for each ring.
1679 * This function returns zero if successful else returns a negative
1683 lpfc_sli_ring_map(struct lpfc_hba *phba)
1685 struct lpfc_sli *psli = &phba->sli;
1690 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1694 phba->link_state = LPFC_INIT_MBX_CMDS;
1695 for (i = 0; i < psli->num_rings; i++) {
1696 lpfc_config_ring(phba, i, pmb);
1697 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1698 if (rc != MBX_SUCCESS) {
1699 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1700 "0446 Adapter failed to init (%d), "
1701 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1703 rc, pmbox->mbxCommand,
1704 pmbox->mbxStatus, i);
1705 phba->link_state = LPFC_HBA_ERROR;
1710 mempool_free(pmb, phba->mbox_mem_pool);
1715 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1716 * @phba: Pointer to HBA context object.
1717 * @pring: Pointer to driver SLI ring object.
1718 * @piocb: Pointer to the driver iocb object.
1720 * The driver calls this function with the hbalock held for SLI3 ports or
1721 * the ring lock held for SLI4 ports. The function adds the
1722 * new iocb to txcmplq of the given ring. This function always returns
1723 * 0. If this function is called for ELS ring, this function checks if
1724 * there is a vport associated with the ELS command. This function also
1725 * starts els_tmofunc timer if this is an ELS command.
1728 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1729 struct lpfc_iocbq *piocb)
1731 if (phba->sli_rev == LPFC_SLI_REV4)
1732 lockdep_assert_held(&pring->ring_lock);
1734 lockdep_assert_held(&phba->hbalock);
1738 list_add_tail(&piocb->list, &pring->txcmplq);
1739 piocb->cmd_flag |= LPFC_IO_ON_TXCMPLQ;
1740 pring->txcmplq_cnt++;
1742 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1743 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1744 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1745 BUG_ON(!piocb->vport);
1746 if (!(piocb->vport->load_flag & FC_UNLOADING))
1747 mod_timer(&piocb->vport->els_tmofunc,
1749 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1756 * lpfc_sli_ringtx_get - Get first element of the txq
1757 * @phba: Pointer to HBA context object.
1758 * @pring: Pointer to driver SLI ring object.
1760 * This function is called with hbalock held to get next
1761 * iocb in txq of the given ring. If there is any iocb in
1762 * the txq, the function returns first iocb in the list after
1763 * removing the iocb from the list, else it returns NULL.
1766 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1768 struct lpfc_iocbq *cmd_iocb;
1770 lockdep_assert_held(&phba->hbalock);
1772 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1777 * lpfc_cmf_sync_cmpl - Process a CMF_SYNC_WQE cmpl
1778 * @phba: Pointer to HBA context object.
1779 * @cmdiocb: Pointer to driver command iocb object.
1780 * @rspiocb: Pointer to driver response iocb object.
1782 * This routine will inform the driver of any BW adjustments we need
1783 * to make. These changes will be picked up during the next CMF
1784 * timer interrupt. In addition, any BW changes will be logged
1785 * with LOG_CGN_MGMT.
1788 lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1789 struct lpfc_iocbq *rspiocb)
1791 union lpfc_wqe128 *wqe;
1792 uint32_t status, info;
1793 struct lpfc_wcqe_complete *wcqe = &rspiocb->wcqe_cmpl;
1794 uint64_t bw, bwdif, slop;
1795 uint64_t pcent, bwpcent;
1796 int asig, afpin, sigcnt, fpincnt;
1797 int wsigmax, wfpinmax, cg, tdp;
1800 /* First check for error */
1801 status = bf_get(lpfc_wcqe_c_status, wcqe);
1803 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1804 "6211 CMF_SYNC_WQE Error "
1805 "req_tag x%x status x%x hwstatus x%x "
1806 "tdatap x%x parm x%x\n",
1807 bf_get(lpfc_wcqe_c_request_tag, wcqe),
1808 bf_get(lpfc_wcqe_c_status, wcqe),
1809 bf_get(lpfc_wcqe_c_hw_status, wcqe),
1810 wcqe->total_data_placed,
1815 /* Gather congestion information on a successful cmpl */
1816 info = wcqe->parameter;
1817 phba->cmf_active_info = info;
1819 /* See if firmware info count is valid or has changed */
1820 if (info > LPFC_MAX_CMF_INFO || phba->cmf_info_per_interval == info)
1823 phba->cmf_info_per_interval = info;
1825 tdp = bf_get(lpfc_wcqe_c_cmf_bw, wcqe);
1826 cg = bf_get(lpfc_wcqe_c_cmf_cg, wcqe);
1828 /* Get BW requirement from firmware */
1829 bw = (uint64_t)tdp * LPFC_CMF_BLK_SIZE;
1831 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1832 "6212 CMF_SYNC_WQE x%x: NULL bw\n",
1833 bf_get(lpfc_wcqe_c_request_tag, wcqe));
1837 /* Gather information needed for logging if a BW change is required */
1838 wqe = &cmdiocb->wqe;
1839 asig = bf_get(cmf_sync_asig, &wqe->cmf_sync);
1840 afpin = bf_get(cmf_sync_afpin, &wqe->cmf_sync);
1841 fpincnt = bf_get(cmf_sync_wfpincnt, &wqe->cmf_sync);
1842 sigcnt = bf_get(cmf_sync_wsigcnt, &wqe->cmf_sync);
1843 if (phba->cmf_max_bytes_per_interval != bw ||
1844 (asig || afpin || sigcnt || fpincnt)) {
1845 /* Are we increasing or decreasing BW */
1846 if (phba->cmf_max_bytes_per_interval < bw) {
1847 bwdif = bw - phba->cmf_max_bytes_per_interval;
1850 bwdif = phba->cmf_max_bytes_per_interval - bw;
1854 /* What is the change percentage */
1855 slop = div_u64(phba->cmf_link_byte_count, 200); /*For rounding*/
1856 pcent = div64_u64(bwdif * 100 + slop,
1857 phba->cmf_link_byte_count);
1858 bwpcent = div64_u64(bw * 100 + slop,
1859 phba->cmf_link_byte_count);
1861 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1862 "6237 BW Threshold %lld%% (%lld): "
1863 "%lld%% %s: Signal Alarm: cg:%d "
1865 bwpcent, bw, pcent, s, cg,
1866 phba->cmf_active_info);
1868 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1869 "6238 BW Threshold %lld%% (%lld): "
1870 "%lld%% %s: FPIN Alarm: cg:%d "
1872 bwpcent, bw, pcent, s, cg,
1873 phba->cmf_active_info);
1874 } else if (sigcnt) {
1875 wsigmax = bf_get(cmf_sync_wsigmax, &wqe->cmf_sync);
1876 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1877 "6239 BW Threshold %lld%% (%lld): "
1878 "%lld%% %s: Signal Warning: "
1879 "Cnt %d Max %d: cg:%d Info:%u\n",
1880 bwpcent, bw, pcent, s, sigcnt,
1881 wsigmax, cg, phba->cmf_active_info);
1882 } else if (fpincnt) {
1883 wfpinmax = bf_get(cmf_sync_wfpinmax, &wqe->cmf_sync);
1884 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1885 "6240 BW Threshold %lld%% (%lld): "
1886 "%lld%% %s: FPIN Warning: "
1887 "Cnt %d Max %d: cg:%d Info:%u\n",
1888 bwpcent, bw, pcent, s, fpincnt,
1889 wfpinmax, cg, phba->cmf_active_info);
1891 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1892 "6241 BW Threshold %lld%% (%lld): "
1893 "CMF %lld%% %s: cg:%d Info:%u\n",
1894 bwpcent, bw, pcent, s, cg,
1895 phba->cmf_active_info);
1898 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1899 "6246 Info Threshold %u\n", info);
1902 /* Save BW change to be picked up during next timer interrupt */
1903 phba->cmf_last_sync_bw = bw;
1905 lpfc_sli_release_iocbq(phba, cmdiocb);
1909 * lpfc_issue_cmf_sync_wqe - Issue a CMF_SYNC_WQE
1910 * @phba: Pointer to HBA context object.
1911 * @ms: ms to set in WQE interval, 0 means use init op
1912 * @total: Total rcv bytes for this interval
1914 * This routine is called every CMF timer interrupt. Its purpose is
1915 * to issue a CMF_SYNC_WQE to the firmware to inform it of any events
1916 * that may indicate we have congestion (FPINs or Signals). Upon
1917 * completion, the firmware will indicate any BW restrictions the
1918 * driver may need to take.
1921 lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
1923 union lpfc_wqe128 *wqe;
1924 struct lpfc_iocbq *sync_buf;
1925 unsigned long iflags;
1927 u32 atot, wtot, max;
1929 /* First address any alarm / warning activity */
1930 atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0);
1931 wtot = atomic_xchg(&phba->cgn_sync_warn_cnt, 0);
1933 /* ONLY Managed mode will send the CMF_SYNC_WQE to the HBA */
1934 if (phba->cmf_active_mode != LPFC_CFG_MANAGED ||
1935 phba->link_state == LPFC_LINK_DOWN)
1938 spin_lock_irqsave(&phba->hbalock, iflags);
1939 sync_buf = __lpfc_sli_get_iocbq(phba);
1941 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
1942 "6244 No available WQEs for CMF_SYNC_WQE\n");
1947 wqe = &sync_buf->wqe;
1949 /* WQEs are reused. Clear stale data and set key fields to zero */
1950 memset(wqe, 0, sizeof(*wqe));
1952 /* If this is the very first CMF_SYNC_WQE, issue an init operation */
1954 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1955 "6441 CMF Init %d - CMF_SYNC_WQE\n",
1957 bf_set(cmf_sync_op, &wqe->cmf_sync, 1); /* 1=init */
1958 bf_set(cmf_sync_interval, &wqe->cmf_sync, LPFC_CMF_INTERVAL);
1962 bf_set(cmf_sync_op, &wqe->cmf_sync, 0); /* 0=recalc */
1963 bf_set(cmf_sync_interval, &wqe->cmf_sync, ms);
1965 /* Check for alarms / warnings */
1967 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
1968 /* We hit an Signal alarm condition */
1969 bf_set(cmf_sync_asig, &wqe->cmf_sync, 1);
1971 /* We hit a FPIN alarm condition */
1972 bf_set(cmf_sync_afpin, &wqe->cmf_sync, 1);
1975 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
1976 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
1977 /* We hit an Signal warning condition */
1978 max = LPFC_SEC_TO_MSEC / lpfc_fabric_cgn_frequency *
1979 lpfc_acqe_cgn_frequency;
1980 bf_set(cmf_sync_wsigmax, &wqe->cmf_sync, max);
1981 bf_set(cmf_sync_wsigcnt, &wqe->cmf_sync, wtot);
1983 /* We hit a FPIN warning condition */
1984 bf_set(cmf_sync_wfpinmax, &wqe->cmf_sync, 1);
1985 bf_set(cmf_sync_wfpincnt, &wqe->cmf_sync, 1);
1989 /* Update total read blocks during previous timer interval */
1990 wqe->cmf_sync.read_bytes = (u32)(total / LPFC_CMF_BLK_SIZE);
1993 bf_set(cmf_sync_ver, &wqe->cmf_sync, LPFC_CMF_SYNC_VER);
1994 wqe->cmf_sync.event_tag = phba->fc_eventTag;
1995 bf_set(cmf_sync_cmnd, &wqe->cmf_sync, CMD_CMF_SYNC_WQE);
1997 /* Setup reqtag to match the wqe completion. */
1998 bf_set(cmf_sync_reqtag, &wqe->cmf_sync, sync_buf->iotag);
2000 bf_set(cmf_sync_qosd, &wqe->cmf_sync, 1);
2002 bf_set(cmf_sync_cmd_type, &wqe->cmf_sync, CMF_SYNC_COMMAND);
2003 bf_set(cmf_sync_wqec, &wqe->cmf_sync, 1);
2004 bf_set(cmf_sync_cqid, &wqe->cmf_sync, LPFC_WQE_CQ_ID_DEFAULT);
2006 sync_buf->vport = phba->pport;
2007 sync_buf->cmd_cmpl = lpfc_cmf_sync_cmpl;
2008 sync_buf->context1 = NULL;
2009 sync_buf->context2 = NULL;
2010 sync_buf->context3 = NULL;
2011 sync_buf->sli4_xritag = NO_XRI;
2013 sync_buf->cmd_flag |= LPFC_IO_CMF;
2014 ret_val = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], sync_buf);
2016 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
2017 "6214 Cannot issue CMF_SYNC_WQE: x%x\n",
2019 __lpfc_sli_release_iocbq(phba, sync_buf);
2022 spin_unlock_irqrestore(&phba->hbalock, iflags);
2027 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
2028 * @phba: Pointer to HBA context object.
2029 * @pring: Pointer to driver SLI ring object.
2031 * This function is called with hbalock held and the caller must post the
2032 * iocb without releasing the lock. If the caller releases the lock,
2033 * iocb slot returned by the function is not guaranteed to be available.
2034 * The function returns pointer to the next available iocb slot if there
2035 * is available slot in the ring, else it returns NULL.
2036 * If the get index of the ring is ahead of the put index, the function
2037 * will post an error attention event to the worker thread to take the
2038 * HBA to offline state.
2041 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2043 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2044 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
2046 lockdep_assert_held(&phba->hbalock);
2048 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
2049 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
2050 pring->sli.sli3.next_cmdidx = 0;
2052 if (unlikely(pring->sli.sli3.local_getidx ==
2053 pring->sli.sli3.next_cmdidx)) {
2055 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
2057 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
2058 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2059 "0315 Ring %d issue: portCmdGet %d "
2060 "is bigger than cmd ring %d\n",
2062 pring->sli.sli3.local_getidx,
2065 phba->link_state = LPFC_HBA_ERROR;
2067 * All error attention handlers are posted to
2070 phba->work_ha |= HA_ERATT;
2071 phba->work_hs = HS_FFER3;
2073 lpfc_worker_wake_up(phba);
2078 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
2082 return lpfc_cmd_iocb(phba, pring);
2086 * lpfc_sli_next_iotag - Get an iotag for the iocb
2087 * @phba: Pointer to HBA context object.
2088 * @iocbq: Pointer to driver iocb object.
2090 * This function gets an iotag for the iocb. If there is no unused iotag and
2091 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
2092 * array and assigns a new iotag.
2093 * The function returns the allocated iotag if successful, else returns zero.
2094 * Zero is not a valid iotag.
2095 * The caller is not required to hold any lock.
2098 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
2100 struct lpfc_iocbq **new_arr;
2101 struct lpfc_iocbq **old_arr;
2103 struct lpfc_sli *psli = &phba->sli;
2106 spin_lock_irq(&phba->hbalock);
2107 iotag = psli->last_iotag;
2108 if(++iotag < psli->iocbq_lookup_len) {
2109 psli->last_iotag = iotag;
2110 psli->iocbq_lookup[iotag] = iocbq;
2111 spin_unlock_irq(&phba->hbalock);
2112 iocbq->iotag = iotag;
2114 } else if (psli->iocbq_lookup_len < (0xffff
2115 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
2116 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
2117 spin_unlock_irq(&phba->hbalock);
2118 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
2121 spin_lock_irq(&phba->hbalock);
2122 old_arr = psli->iocbq_lookup;
2123 if (new_len <= psli->iocbq_lookup_len) {
2124 /* highly unprobable case */
2126 iotag = psli->last_iotag;
2127 if(++iotag < psli->iocbq_lookup_len) {
2128 psli->last_iotag = iotag;
2129 psli->iocbq_lookup[iotag] = iocbq;
2130 spin_unlock_irq(&phba->hbalock);
2131 iocbq->iotag = iotag;
2134 spin_unlock_irq(&phba->hbalock);
2137 if (psli->iocbq_lookup)
2138 memcpy(new_arr, old_arr,
2139 ((psli->last_iotag + 1) *
2140 sizeof (struct lpfc_iocbq *)));
2141 psli->iocbq_lookup = new_arr;
2142 psli->iocbq_lookup_len = new_len;
2143 psli->last_iotag = iotag;
2144 psli->iocbq_lookup[iotag] = iocbq;
2145 spin_unlock_irq(&phba->hbalock);
2146 iocbq->iotag = iotag;
2151 spin_unlock_irq(&phba->hbalock);
2153 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2154 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
2161 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
2162 * @phba: Pointer to HBA context object.
2163 * @pring: Pointer to driver SLI ring object.
2164 * @iocb: Pointer to iocb slot in the ring.
2165 * @nextiocb: Pointer to driver iocb object which need to be
2166 * posted to firmware.
2168 * This function is called to post a new iocb to the firmware. This
2169 * function copies the new iocb to ring iocb slot and updates the
2170 * ring pointers. It adds the new iocb to txcmplq if there is
2171 * a completion call back for this iocb else the function will free the
2172 * iocb object. The hbalock is asserted held in the code path calling
2176 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2177 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
2182 nextiocb->iocb.ulpIoTag = (nextiocb->cmd_cmpl) ? nextiocb->iotag : 0;
2185 if (pring->ringno == LPFC_ELS_RING) {
2186 lpfc_debugfs_slow_ring_trc(phba,
2187 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
2188 *(((uint32_t *) &nextiocb->iocb) + 4),
2189 *(((uint32_t *) &nextiocb->iocb) + 6),
2190 *(((uint32_t *) &nextiocb->iocb) + 7));
2194 * Issue iocb command to adapter
2196 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
2198 pring->stats.iocb_cmd++;
2201 * If there is no completion routine to call, we can release the
2202 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
2203 * that have no rsp ring completion, cmd_cmpl MUST be NULL.
2205 if (nextiocb->cmd_cmpl)
2206 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
2208 __lpfc_sli_release_iocbq(phba, nextiocb);
2211 * Let the HBA know what IOCB slot will be the next one the
2212 * driver will put a command into.
2214 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
2215 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
2219 * lpfc_sli_update_full_ring - Update the chip attention register
2220 * @phba: Pointer to HBA context object.
2221 * @pring: Pointer to driver SLI ring object.
2223 * The caller is not required to hold any lock for calling this function.
2224 * This function updates the chip attention bits for the ring to inform firmware
2225 * that there are pending work to be done for this ring and requests an
2226 * interrupt when there is space available in the ring. This function is
2227 * called when the driver is unable to post more iocbs to the ring due
2228 * to unavailability of space in the ring.
2231 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2233 int ringno = pring->ringno;
2235 pring->flag |= LPFC_CALL_RING_AVAILABLE;
2240 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
2241 * The HBA will tell us when an IOCB entry is available.
2243 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
2244 readl(phba->CAregaddr); /* flush */
2246 pring->stats.iocb_cmd_full++;
2250 * lpfc_sli_update_ring - Update chip attention register
2251 * @phba: Pointer to HBA context object.
2252 * @pring: Pointer to driver SLI ring object.
2254 * This function updates the chip attention register bit for the
2255 * given ring to inform HBA that there is more work to be done
2256 * in this ring. The caller is not required to hold any lock.
2259 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2261 int ringno = pring->ringno;
2264 * Tell the HBA that there is work to do in this ring.
2266 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
2268 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
2269 readl(phba->CAregaddr); /* flush */
2274 * lpfc_sli_resume_iocb - Process iocbs in the txq
2275 * @phba: Pointer to HBA context object.
2276 * @pring: Pointer to driver SLI ring object.
2278 * This function is called with hbalock held to post pending iocbs
2279 * in the txq to the firmware. This function is called when driver
2280 * detects space available in the ring.
2283 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2286 struct lpfc_iocbq *nextiocb;
2288 lockdep_assert_held(&phba->hbalock);
2292 * (a) there is anything on the txq to send
2294 * (c) link attention events can be processed (fcp ring only)
2295 * (d) IOCB processing is not blocked by the outstanding mbox command.
2298 if (lpfc_is_link_up(phba) &&
2299 (!list_empty(&pring->txq)) &&
2300 (pring->ringno != LPFC_FCP_RING ||
2301 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
2303 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2304 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
2305 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2308 lpfc_sli_update_ring(phba, pring);
2310 lpfc_sli_update_full_ring(phba, pring);
2317 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
2318 * @phba: Pointer to HBA context object.
2319 * @hbqno: HBQ number.
2321 * This function is called with hbalock held to get the next
2322 * available slot for the given HBQ. If there is free slot
2323 * available for the HBQ it will return pointer to the next available
2324 * HBQ entry else it will return NULL.
2326 static struct lpfc_hbq_entry *
2327 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
2329 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2331 lockdep_assert_held(&phba->hbalock);
2333 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
2334 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
2335 hbqp->next_hbqPutIdx = 0;
2337 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
2338 uint32_t raw_index = phba->hbq_get[hbqno];
2339 uint32_t getidx = le32_to_cpu(raw_index);
2341 hbqp->local_hbqGetIdx = getidx;
2343 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
2344 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2345 "1802 HBQ %d: local_hbqGetIdx "
2346 "%u is > than hbqp->entry_count %u\n",
2347 hbqno, hbqp->local_hbqGetIdx,
2350 phba->link_state = LPFC_HBA_ERROR;
2354 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
2358 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
2363 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
2364 * @phba: Pointer to HBA context object.
2366 * This function is called with no lock held to free all the
2367 * hbq buffers while uninitializing the SLI interface. It also
2368 * frees the HBQ buffers returned by the firmware but not yet
2369 * processed by the upper layers.
2372 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
2374 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2375 struct hbq_dmabuf *hbq_buf;
2376 unsigned long flags;
2379 hbq_count = lpfc_sli_hbq_count();
2380 /* Return all memory used by all HBQs */
2381 spin_lock_irqsave(&phba->hbalock, flags);
2382 for (i = 0; i < hbq_count; ++i) {
2383 list_for_each_entry_safe(dmabuf, next_dmabuf,
2384 &phba->hbqs[i].hbq_buffer_list, list) {
2385 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
2386 list_del(&hbq_buf->dbuf.list);
2387 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
2389 phba->hbqs[i].buffer_count = 0;
2392 /* Mark the HBQs not in use */
2393 phba->hbq_in_use = 0;
2394 spin_unlock_irqrestore(&phba->hbalock, flags);
2398 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
2399 * @phba: Pointer to HBA context object.
2400 * @hbqno: HBQ number.
2401 * @hbq_buf: Pointer to HBQ buffer.
2403 * This function is called with the hbalock held to post a
2404 * hbq buffer to the firmware. If the function finds an empty
2405 * slot in the HBQ, it will post the buffer. The function will return
2406 * pointer to the hbq entry if it successfully post the buffer
2407 * else it will return NULL.
2410 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2411 struct hbq_dmabuf *hbq_buf)
2413 lockdep_assert_held(&phba->hbalock);
2414 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2418 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2419 * @phba: Pointer to HBA context object.
2420 * @hbqno: HBQ number.
2421 * @hbq_buf: Pointer to HBQ buffer.
2423 * This function is called with the hbalock held to post a hbq buffer to the
2424 * firmware. If the function finds an empty slot in the HBQ, it will post the
2425 * buffer and place it on the hbq_buffer_list. The function will return zero if
2426 * it successfully post the buffer else it will return an error.
2429 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2430 struct hbq_dmabuf *hbq_buf)
2432 struct lpfc_hbq_entry *hbqe;
2433 dma_addr_t physaddr = hbq_buf->dbuf.phys;
2435 lockdep_assert_held(&phba->hbalock);
2436 /* Get next HBQ entry slot to use */
2437 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2439 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2441 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2442 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
2443 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2444 hbqe->bde.tus.f.bdeFlags = 0;
2445 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2446 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2448 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2449 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2451 readl(phba->hbq_put + hbqno);
2452 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2459 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2460 * @phba: Pointer to HBA context object.
2461 * @hbqno: HBQ number.
2462 * @hbq_buf: Pointer to HBQ buffer.
2464 * This function is called with the hbalock held to post an RQE to the SLI4
2465 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2466 * the hbq_buffer_list and return zero, otherwise it will return an error.
2469 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2470 struct hbq_dmabuf *hbq_buf)
2473 struct lpfc_rqe hrqe;
2474 struct lpfc_rqe drqe;
2475 struct lpfc_queue *hrq;
2476 struct lpfc_queue *drq;
2478 if (hbqno != LPFC_ELS_HBQ)
2480 hrq = phba->sli4_hba.hdr_rq;
2481 drq = phba->sli4_hba.dat_rq;
2483 lockdep_assert_held(&phba->hbalock);
2484 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2485 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2486 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2487 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2488 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2491 hbq_buf->tag = (rc | (hbqno << 16));
2492 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2496 /* HBQ for ELS and CT traffic. */
2497 static struct lpfc_hbq_init lpfc_els_hbq = {
2502 .ring_mask = (1 << LPFC_ELS_RING),
2509 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2514 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
2515 * @phba: Pointer to HBA context object.
2516 * @hbqno: HBQ number.
2517 * @count: Number of HBQ buffers to be posted.
2519 * This function is called with no lock held to post more hbq buffers to the
2520 * given HBQ. The function returns the number of HBQ buffers successfully
2524 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2526 uint32_t i, posted = 0;
2527 unsigned long flags;
2528 struct hbq_dmabuf *hbq_buffer;
2529 LIST_HEAD(hbq_buf_list);
2530 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2533 if ((phba->hbqs[hbqno].buffer_count + count) >
2534 lpfc_hbq_defs[hbqno]->entry_count)
2535 count = lpfc_hbq_defs[hbqno]->entry_count -
2536 phba->hbqs[hbqno].buffer_count;
2539 /* Allocate HBQ entries */
2540 for (i = 0; i < count; i++) {
2541 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2544 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2546 /* Check whether HBQ is still in use */
2547 spin_lock_irqsave(&phba->hbalock, flags);
2548 if (!phba->hbq_in_use)
2550 while (!list_empty(&hbq_buf_list)) {
2551 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2553 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2555 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2556 phba->hbqs[hbqno].buffer_count++;
2559 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2561 spin_unlock_irqrestore(&phba->hbalock, flags);
2564 spin_unlock_irqrestore(&phba->hbalock, flags);
2565 while (!list_empty(&hbq_buf_list)) {
2566 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2568 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2574 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
2575 * @phba: Pointer to HBA context object.
2578 * This function posts more buffers to the HBQ. This function
2579 * is called with no lock held. The function returns the number of HBQ entries
2580 * successfully allocated.
2583 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2585 if (phba->sli_rev == LPFC_SLI_REV4)
2588 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2589 lpfc_hbq_defs[qno]->add_count);
2593 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
2594 * @phba: Pointer to HBA context object.
2595 * @qno: HBQ queue number.
2597 * This function is called from SLI initialization code path with
2598 * no lock held to post initial HBQ buffers to firmware. The
2599 * function returns the number of HBQ entries successfully allocated.
2602 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2604 if (phba->sli_rev == LPFC_SLI_REV4)
2605 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2606 lpfc_hbq_defs[qno]->entry_count);
2608 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2609 lpfc_hbq_defs[qno]->init_count);
2613 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2615 * This function removes the first hbq buffer on an hbq list and returns a
2616 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2618 static struct hbq_dmabuf *
2619 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2621 struct lpfc_dmabuf *d_buf;
2623 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2626 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2630 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2631 * @phba: Pointer to HBA context object.
2634 * This function removes the first RQ buffer on an RQ buffer list and returns a
2635 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2637 static struct rqb_dmabuf *
2638 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2640 struct lpfc_dmabuf *h_buf;
2641 struct lpfc_rqb *rqbp;
2644 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2645 struct lpfc_dmabuf, list);
2648 rqbp->buffer_count--;
2649 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2653 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2654 * @phba: Pointer to HBA context object.
2655 * @tag: Tag of the hbq buffer.
2657 * This function searches for the hbq buffer associated with the given tag in
2658 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2659 * otherwise it returns NULL.
2661 static struct hbq_dmabuf *
2662 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2664 struct lpfc_dmabuf *d_buf;
2665 struct hbq_dmabuf *hbq_buf;
2669 if (hbqno >= LPFC_MAX_HBQS)
2672 spin_lock_irq(&phba->hbalock);
2673 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2674 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2675 if (hbq_buf->tag == tag) {
2676 spin_unlock_irq(&phba->hbalock);
2680 spin_unlock_irq(&phba->hbalock);
2681 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2682 "1803 Bad hbq tag. Data: x%x x%x\n",
2683 tag, phba->hbqs[tag >> 16].buffer_count);
2688 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2689 * @phba: Pointer to HBA context object.
2690 * @hbq_buffer: Pointer to HBQ buffer.
2692 * This function is called with hbalock. This function gives back
2693 * the hbq buffer to firmware. If the HBQ does not have space to
2694 * post the buffer, it will free the buffer.
2697 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2702 hbqno = hbq_buffer->tag >> 16;
2703 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2704 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2709 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2710 * @mbxCommand: mailbox command code.
2712 * This function is called by the mailbox event handler function to verify
2713 * that the completed mailbox command is a legitimate mailbox command. If the
2714 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2715 * and the mailbox event handler will take the HBA offline.
2718 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2722 switch (mbxCommand) {
2726 case MBX_WRITE_VPARMS:
2727 case MBX_RUN_BIU_DIAG:
2730 case MBX_CONFIG_LINK:
2731 case MBX_CONFIG_RING:
2732 case MBX_RESET_RING:
2733 case MBX_READ_CONFIG:
2734 case MBX_READ_RCONFIG:
2735 case MBX_READ_SPARM:
2736 case MBX_READ_STATUS:
2740 case MBX_READ_LNK_STAT:
2742 case MBX_UNREG_LOGIN:
2744 case MBX_DUMP_MEMORY:
2745 case MBX_DUMP_CONTEXT:
2748 case MBX_UPDATE_CFG:
2750 case MBX_DEL_LD_ENTRY:
2751 case MBX_RUN_PROGRAM:
2753 case MBX_SET_VARIABLE:
2754 case MBX_UNREG_D_ID:
2755 case MBX_KILL_BOARD:
2756 case MBX_CONFIG_FARP:
2759 case MBX_RUN_BIU_DIAG64:
2760 case MBX_CONFIG_PORT:
2761 case MBX_READ_SPARM64:
2762 case MBX_READ_RPI64:
2763 case MBX_REG_LOGIN64:
2764 case MBX_READ_TOPOLOGY:
2767 case MBX_LOAD_EXP_ROM:
2768 case MBX_ASYNCEVT_ENABLE:
2772 case MBX_PORT_CAPABILITIES:
2773 case MBX_PORT_IOV_CONTROL:
2774 case MBX_SLI4_CONFIG:
2775 case MBX_SLI4_REQ_FTRS:
2777 case MBX_UNREG_FCFI:
2782 case MBX_RESUME_RPI:
2783 case MBX_READ_EVENT_LOG_STATUS:
2784 case MBX_READ_EVENT_LOG:
2785 case MBX_SECURITY_MGMT:
2787 case MBX_ACCESS_VDATA:
2798 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2799 * @phba: Pointer to HBA context object.
2800 * @pmboxq: Pointer to mailbox command.
2802 * This is completion handler function for mailbox commands issued from
2803 * lpfc_sli_issue_mbox_wait function. This function is called by the
2804 * mailbox event handler function with no lock held. This function
2805 * will wake up thread waiting on the wait queue pointed by context1
2809 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2811 unsigned long drvr_flag;
2812 struct completion *pmbox_done;
2815 * If pmbox_done is empty, the driver thread gave up waiting and
2816 * continued running.
2818 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2819 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2820 pmbox_done = (struct completion *)pmboxq->context3;
2822 complete(pmbox_done);
2823 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2828 __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2830 unsigned long iflags;
2832 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2833 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
2834 spin_lock_irqsave(&ndlp->lock, iflags);
2835 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2836 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
2837 spin_unlock_irqrestore(&ndlp->lock, iflags);
2839 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2843 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2844 * @phba: Pointer to HBA context object.
2845 * @pmb: Pointer to mailbox object.
2847 * This function is the default mailbox completion handler. It
2848 * frees the memory resources associated with the completed mailbox
2849 * command. If the completed command is a REG_LOGIN mailbox command,
2850 * this function will issue a UREG_LOGIN to re-claim the RPI.
2853 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2855 struct lpfc_vport *vport = pmb->vport;
2856 struct lpfc_dmabuf *mp;
2857 struct lpfc_nodelist *ndlp;
2858 struct Scsi_Host *shost;
2862 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
2865 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2870 * If a REG_LOGIN succeeded after node is destroyed or node
2871 * is in re-discovery driver need to cleanup the RPI.
2873 if (!(phba->pport->load_flag & FC_UNLOADING) &&
2874 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2875 !pmb->u.mb.mbxStatus) {
2876 rpi = pmb->u.mb.un.varWords[0];
2877 vpi = pmb->u.mb.un.varRegLogin.vpi;
2878 if (phba->sli_rev == LPFC_SLI_REV4)
2879 vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
2880 lpfc_unreg_login(phba, vpi, rpi, pmb);
2882 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2883 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2884 if (rc != MBX_NOT_FINISHED)
2888 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2889 !(phba->pport->load_flag & FC_UNLOADING) &&
2890 !pmb->u.mb.mbxStatus) {
2891 shost = lpfc_shost_from_vport(vport);
2892 spin_lock_irq(shost->host_lock);
2893 vport->vpi_state |= LPFC_VPI_REGISTERED;
2894 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2895 spin_unlock_irq(shost->host_lock);
2898 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2899 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2901 pmb->ctx_buf = NULL;
2902 pmb->ctx_ndlp = NULL;
2905 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2906 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2908 /* Check to see if there are any deferred events to process */
2912 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2913 "1438 UNREG cmpl deferred mbox x%x "
2914 "on NPort x%x Data: x%x x%x x%px x%x x%x\n",
2915 ndlp->nlp_rpi, ndlp->nlp_DID,
2916 ndlp->nlp_flag, ndlp->nlp_defer_did,
2917 ndlp, vport->load_flag, kref_read(&ndlp->kref));
2919 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2920 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2921 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2922 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2923 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2925 __lpfc_sli_rpi_release(vport, ndlp);
2928 /* The unreg_login mailbox is complete and had a
2929 * reference that has to be released. The PLOGI
2933 pmb->ctx_ndlp = NULL;
2937 /* This nlp_put pairs with lpfc_sli4_resume_rpi */
2938 if (pmb->u.mb.mbxCommand == MBX_RESUME_RPI) {
2939 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2943 /* Check security permission status on INIT_LINK mailbox command */
2944 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2945 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2946 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2947 "2860 SLI authentication is required "
2948 "for INIT_LINK but has not done yet\n");
2950 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2951 lpfc_sli4_mbox_cmd_free(phba, pmb);
2953 mempool_free(pmb, phba->mbox_mem_pool);
2956 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2957 * @phba: Pointer to HBA context object.
2958 * @pmb: Pointer to mailbox object.
2960 * This function is the unreg rpi mailbox completion handler. It
2961 * frees the memory resources associated with the completed mailbox
2962 * command. An additional reference is put on the ndlp to prevent
2963 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2964 * the unreg mailbox command completes, this routine puts the
2969 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2971 struct lpfc_vport *vport = pmb->vport;
2972 struct lpfc_nodelist *ndlp;
2974 ndlp = pmb->ctx_ndlp;
2975 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2976 if (phba->sli_rev == LPFC_SLI_REV4 &&
2977 (bf_get(lpfc_sli_intf_if_type,
2978 &phba->sli4_hba.sli_intf) >=
2979 LPFC_SLI_INTF_IF_TYPE_2)) {
2982 vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2983 "0010 UNREG_LOGIN vpi:%x "
2984 "rpi:%x DID:%x defer x%x flg x%x "
2986 vport->vpi, ndlp->nlp_rpi,
2987 ndlp->nlp_DID, ndlp->nlp_defer_did,
2990 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2992 /* Check to see if there are any deferred
2995 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2996 (ndlp->nlp_defer_did !=
2997 NLP_EVT_NOTHING_PENDING)) {
2999 vport, KERN_INFO, LOG_DISCOVERY,
3000 "4111 UNREG cmpl deferred "
3002 "NPort x%x Data: x%x x%px\n",
3003 ndlp->nlp_rpi, ndlp->nlp_DID,
3004 ndlp->nlp_defer_did, ndlp);
3005 ndlp->nlp_flag &= ~NLP_UNREG_INP;
3006 ndlp->nlp_defer_did =
3007 NLP_EVT_NOTHING_PENDING;
3008 lpfc_issue_els_plogi(
3009 vport, ndlp->nlp_DID, 0);
3011 __lpfc_sli_rpi_release(vport, ndlp);
3018 mempool_free(pmb, phba->mbox_mem_pool);
3022 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
3023 * @phba: Pointer to HBA context object.
3025 * This function is called with no lock held. This function processes all
3026 * the completed mailbox commands and gives it to upper layers. The interrupt
3027 * service routine processes mailbox completion interrupt and adds completed
3028 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
3029 * Worker thread call lpfc_sli_handle_mb_event, which will return the
3030 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
3031 * function returns the mailbox commands to the upper layer by calling the
3032 * completion handler function of each mailbox.
3035 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
3042 phba->sli.slistat.mbox_event++;
3044 /* Get all completed mailboxe buffers into the cmplq */
3045 spin_lock_irq(&phba->hbalock);
3046 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
3047 spin_unlock_irq(&phba->hbalock);
3049 /* Get a Mailbox buffer to setup mailbox commands for callback */
3051 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
3057 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
3059 lpfc_debugfs_disc_trc(pmb->vport,
3060 LPFC_DISC_TRC_MBOX_VPORT,
3061 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
3062 (uint32_t)pmbox->mbxCommand,
3063 pmbox->un.varWords[0],
3064 pmbox->un.varWords[1]);
3067 lpfc_debugfs_disc_trc(phba->pport,
3069 "MBOX cmpl: cmd:x%x mb:x%x x%x",
3070 (uint32_t)pmbox->mbxCommand,
3071 pmbox->un.varWords[0],
3072 pmbox->un.varWords[1]);
3077 * It is a fatal error if unknown mbox command completion.
3079 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
3081 /* Unknown mailbox command compl */
3082 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3083 "(%d):0323 Unknown Mailbox command "
3084 "x%x (x%x/x%x) Cmpl\n",
3085 pmb->vport ? pmb->vport->vpi :
3088 lpfc_sli_config_mbox_subsys_get(phba,
3090 lpfc_sli_config_mbox_opcode_get(phba,
3092 phba->link_state = LPFC_HBA_ERROR;
3093 phba->work_hs = HS_FFER3;
3094 lpfc_handle_eratt(phba);
3098 if (pmbox->mbxStatus) {
3099 phba->sli.slistat.mbox_stat_err++;
3100 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
3101 /* Mbox cmd cmpl error - RETRYing */
3102 lpfc_printf_log(phba, KERN_INFO,
3104 "(%d):0305 Mbox cmd cmpl "
3105 "error - RETRYing Data: x%x "
3106 "(x%x/x%x) x%x x%x x%x\n",
3107 pmb->vport ? pmb->vport->vpi :
3110 lpfc_sli_config_mbox_subsys_get(phba,
3112 lpfc_sli_config_mbox_opcode_get(phba,
3115 pmbox->un.varWords[0],
3116 pmb->vport ? pmb->vport->port_state :
3117 LPFC_VPORT_UNKNOWN);
3118 pmbox->mbxStatus = 0;
3119 pmbox->mbxOwner = OWN_HOST;
3120 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3121 if (rc != MBX_NOT_FINISHED)
3126 /* Mailbox cmd <cmd> Cmpl <cmpl> */
3127 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
3128 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
3129 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
3131 pmb->vport ? pmb->vport->vpi : 0,
3133 lpfc_sli_config_mbox_subsys_get(phba, pmb),
3134 lpfc_sli_config_mbox_opcode_get(phba, pmb),
3136 *((uint32_t *) pmbox),
3137 pmbox->un.varWords[0],
3138 pmbox->un.varWords[1],
3139 pmbox->un.varWords[2],
3140 pmbox->un.varWords[3],
3141 pmbox->un.varWords[4],
3142 pmbox->un.varWords[5],
3143 pmbox->un.varWords[6],
3144 pmbox->un.varWords[7],
3145 pmbox->un.varWords[8],
3146 pmbox->un.varWords[9],
3147 pmbox->un.varWords[10]);
3150 pmb->mbox_cmpl(phba,pmb);
3156 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
3157 * @phba: Pointer to HBA context object.
3158 * @pring: Pointer to driver SLI ring object.
3161 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
3162 * is set in the tag the buffer is posted for a particular exchange,
3163 * the function will return the buffer without replacing the buffer.
3164 * If the buffer is for unsolicited ELS or CT traffic, this function
3165 * returns the buffer and also posts another buffer to the firmware.
3167 static struct lpfc_dmabuf *
3168 lpfc_sli_get_buff(struct lpfc_hba *phba,
3169 struct lpfc_sli_ring *pring,
3172 struct hbq_dmabuf *hbq_entry;
3174 if (tag & QUE_BUFTAG_BIT)
3175 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
3176 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
3179 return &hbq_entry->dbuf;
3183 * lpfc_nvme_unsol_ls_handler - Process an unsolicited event data buffer
3184 * containing a NVME LS request.
3185 * @phba: pointer to lpfc hba data structure.
3186 * @piocb: pointer to the iocbq struct representing the sequence starting
3189 * This routine initially validates the NVME LS, validates there is a login
3190 * with the port that sent the LS, and then calls the appropriate nvme host
3191 * or target LS request handler.
3194 lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
3196 struct lpfc_nodelist *ndlp;
3197 struct lpfc_dmabuf *d_buf;
3198 struct hbq_dmabuf *nvmebuf;
3199 struct fc_frame_header *fc_hdr;
3200 struct lpfc_async_xchg_ctx *axchg = NULL;
3201 char *failwhy = NULL;
3202 uint32_t oxid, sid, did, fctl, size;
3205 d_buf = piocb->context2;
3207 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
3208 fc_hdr = nvmebuf->hbuf.virt;
3209 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
3210 sid = sli4_sid_from_fc_hdr(fc_hdr);
3211 did = sli4_did_from_fc_hdr(fc_hdr);
3212 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
3213 fc_hdr->fh_f_ctl[1] << 8 |
3214 fc_hdr->fh_f_ctl[2]);
3215 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
3217 lpfc_nvmeio_data(phba, "NVME LS RCV: xri x%x sz %d from %06x\n",
3220 if (phba->pport->load_flag & FC_UNLOADING) {
3221 failwhy = "Driver Unloading";
3222 } else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
3223 failwhy = "NVME FC4 Disabled";
3224 } else if (!phba->nvmet_support && !phba->pport->localport) {
3225 failwhy = "No Localport";
3226 } else if (phba->nvmet_support && !phba->targetport) {
3227 failwhy = "No Targetport";
3228 } else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) {
3229 failwhy = "Bad NVME LS R_CTL";
3230 } else if (unlikely((fctl & 0x00FF0000) !=
3231 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) {
3232 failwhy = "Bad NVME LS F_CTL";
3234 axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC);
3236 failwhy = "No CTX memory";
3239 if (unlikely(failwhy)) {
3240 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3241 "6154 Drop NVME LS: SID %06X OXID x%X: %s\n",
3242 sid, oxid, failwhy);
3246 /* validate the source of the LS is logged in */
3247 ndlp = lpfc_findnode_did(phba->pport, sid);
3249 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3250 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3251 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
3252 "6216 NVME Unsol rcv: No ndlp: "
3253 "NPort_ID x%x oxid x%x\n",
3264 axchg->state = LPFC_NVME_STE_LS_RCV;
3265 axchg->entry_cnt = 1;
3266 axchg->rqb_buffer = (void *)nvmebuf;
3267 axchg->hdwq = &phba->sli4_hba.hdwq[0];
3268 axchg->payload = nvmebuf->dbuf.virt;
3269 INIT_LIST_HEAD(&axchg->list);
3271 if (phba->nvmet_support) {
3272 ret = lpfc_nvmet_handle_lsreq(phba, axchg);
3273 spin_lock_irq(&ndlp->lock);
3274 if (!ret && !(ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH)) {
3275 ndlp->fc4_xpt_flags |= NLP_XPT_HAS_HH;
3276 spin_unlock_irq(&ndlp->lock);
3278 /* This reference is a single occurrence to hold the
3279 * node valid until the nvmet transport calls
3282 if (!lpfc_nlp_get(ndlp))
3285 lpfc_printf_log(phba, KERN_ERR, LOG_NODE,
3286 "6206 NVMET unsol ls_req ndlp x%px "
3287 "DID x%x xflags x%x refcnt %d\n",
3288 ndlp, ndlp->nlp_DID,
3289 ndlp->fc4_xpt_flags,
3290 kref_read(&ndlp->kref));
3292 spin_unlock_irq(&ndlp->lock);
3295 ret = lpfc_nvme_handle_lsreq(phba, axchg);
3298 /* if zero, LS was successfully handled. If non-zero, LS not handled */
3303 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3304 "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X "
3305 "NVMe%s handler failed %d\n",
3307 (phba->nvmet_support) ? "T" : "I", ret);
3309 /* recycle receive buffer */
3310 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
3312 /* If start of new exchange, abort it */
3313 if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX)))
3314 ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid);
3321 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
3322 * @phba: Pointer to HBA context object.
3323 * @pring: Pointer to driver SLI ring object.
3324 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
3325 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
3326 * @fch_type: the type for the first frame of the sequence.
3328 * This function is called with no lock held. This function uses the r_ctl and
3329 * type of the received sequence to find the correct callback function to call
3330 * to process the sequence.
3333 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3334 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
3341 lpfc_nvme_unsol_ls_handler(phba, saveq);
3347 /* unSolicited Responses */
3348 if (pring->prt[0].profile) {
3349 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
3350 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
3354 /* We must search, based on rctl / type
3355 for the right routine */
3356 for (i = 0; i < pring->num_mask; i++) {
3357 if ((pring->prt[i].rctl == fch_r_ctl) &&
3358 (pring->prt[i].type == fch_type)) {
3359 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
3360 (pring->prt[i].lpfc_sli_rcv_unsol_event)
3361 (phba, pring, saveq);
3369 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
3370 * @phba: Pointer to HBA context object.
3371 * @pring: Pointer to driver SLI ring object.
3372 * @saveq: Pointer to the unsolicited iocb.
3374 * This function is called with no lock held by the ring event handler
3375 * when there is an unsolicited iocb posted to the response ring by the
3376 * firmware. This function gets the buffer associated with the iocbs
3377 * and calls the event handler for the ring. This function handles both
3378 * qring buffers and hbq buffers.
3379 * When the function returns 1 the caller can free the iocb object otherwise
3380 * upper layer functions will free the iocb objects.
3383 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3384 struct lpfc_iocbq *saveq)
3388 uint32_t Rctl, Type;
3389 struct lpfc_iocbq *iocbq;
3390 struct lpfc_dmabuf *dmzbuf;
3392 irsp = &(saveq->iocb);
3394 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
3395 if (pring->lpfc_sli_rcv_async_status)
3396 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
3398 lpfc_printf_log(phba,
3401 "0316 Ring %d handler: unexpected "
3402 "ASYNC_STATUS iocb received evt_code "
3405 irsp->un.asyncstat.evt_code);
3409 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
3410 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
3411 if (irsp->ulpBdeCount > 0) {
3412 dmzbuf = lpfc_sli_get_buff(phba, pring,
3413 irsp->un.ulpWord[3]);
3414 lpfc_in_buf_free(phba, dmzbuf);
3417 if (irsp->ulpBdeCount > 1) {
3418 dmzbuf = lpfc_sli_get_buff(phba, pring,
3419 irsp->unsli3.sli3Words[3]);
3420 lpfc_in_buf_free(phba, dmzbuf);
3423 if (irsp->ulpBdeCount > 2) {
3424 dmzbuf = lpfc_sli_get_buff(phba, pring,
3425 irsp->unsli3.sli3Words[7]);
3426 lpfc_in_buf_free(phba, dmzbuf);
3432 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3433 if (irsp->ulpBdeCount != 0) {
3434 saveq->context2 = lpfc_sli_get_buff(phba, pring,
3435 irsp->un.ulpWord[3]);
3436 if (!saveq->context2)
3437 lpfc_printf_log(phba,
3440 "0341 Ring %d Cannot find buffer for "
3441 "an unsolicited iocb. tag 0x%x\n",
3443 irsp->un.ulpWord[3]);
3445 if (irsp->ulpBdeCount == 2) {
3446 saveq->context3 = lpfc_sli_get_buff(phba, pring,
3447 irsp->unsli3.sli3Words[7]);
3448 if (!saveq->context3)
3449 lpfc_printf_log(phba,
3452 "0342 Ring %d Cannot find buffer for an"
3453 " unsolicited iocb. tag 0x%x\n",
3455 irsp->unsli3.sli3Words[7]);
3457 list_for_each_entry(iocbq, &saveq->list, list) {
3458 irsp = &(iocbq->iocb);
3459 if (irsp->ulpBdeCount != 0) {
3460 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
3461 irsp->un.ulpWord[3]);
3462 if (!iocbq->context2)
3463 lpfc_printf_log(phba,
3466 "0343 Ring %d Cannot find "
3467 "buffer for an unsolicited iocb"
3468 ". tag 0x%x\n", pring->ringno,
3469 irsp->un.ulpWord[3]);
3471 if (irsp->ulpBdeCount == 2) {
3472 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
3473 irsp->unsli3.sli3Words[7]);
3474 if (!iocbq->context3)
3475 lpfc_printf_log(phba,
3478 "0344 Ring %d Cannot find "
3479 "buffer for an unsolicited "
3482 irsp->unsli3.sli3Words[7]);
3486 if (irsp->ulpBdeCount != 0 &&
3487 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
3488 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
3491 /* search continue save q for same XRI */
3492 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
3493 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
3494 saveq->iocb.unsli3.rcvsli3.ox_id) {
3495 list_add_tail(&saveq->list, &iocbq->list);
3501 list_add_tail(&saveq->clist,
3502 &pring->iocb_continue_saveq);
3503 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
3504 list_del_init(&iocbq->clist);
3506 irsp = &(saveq->iocb);
3510 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
3511 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
3512 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
3513 Rctl = FC_RCTL_ELS_REQ;
3516 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
3517 Rctl = w5p->hcsw.Rctl;
3518 Type = w5p->hcsw.Type;
3520 /* Firmware Workaround */
3521 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
3522 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
3523 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3524 Rctl = FC_RCTL_ELS_REQ;
3526 w5p->hcsw.Rctl = Rctl;
3527 w5p->hcsw.Type = Type;
3531 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
3532 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3533 "0313 Ring %d handler: unexpected Rctl x%x "
3534 "Type x%x received\n",
3535 pring->ringno, Rctl, Type);
3541 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
3542 * @phba: Pointer to HBA context object.
3543 * @pring: Pointer to driver SLI ring object.
3544 * @prspiocb: Pointer to response iocb object.
3546 * This function looks up the iocb_lookup table to get the command iocb
3547 * corresponding to the given response iocb using the iotag of the
3548 * response iocb. The driver calls this function with the hbalock held
3549 * for SLI3 ports or the ring lock held for SLI4 ports.
3550 * This function returns the command iocb object if it finds the command
3551 * iocb else returns NULL.
3553 static struct lpfc_iocbq *
3554 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
3555 struct lpfc_sli_ring *pring,
3556 struct lpfc_iocbq *prspiocb)
3558 struct lpfc_iocbq *cmd_iocb = NULL;
3561 if (phba->sli_rev == LPFC_SLI_REV4)
3562 iotag = get_wqe_reqtag(prspiocb);
3564 iotag = prspiocb->iocb.ulpIoTag;
3566 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3567 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3568 if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
3569 /* remove from txcmpl queue list */
3570 list_del_init(&cmd_iocb->list);
3571 cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
3572 pring->txcmplq_cnt--;
3577 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3578 "0317 iotag x%x is out of "
3579 "range: max iotag x%x\n",
3580 iotag, phba->sli.last_iotag);
3585 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3586 * @phba: Pointer to HBA context object.
3587 * @pring: Pointer to driver SLI ring object.
3590 * This function looks up the iocb_lookup table to get the command iocb
3591 * corresponding to the given iotag. The driver calls this function with
3592 * the ring lock held because this function is an SLI4 port only helper.
3593 * This function returns the command iocb object if it finds the command
3594 * iocb else returns NULL.
3596 static struct lpfc_iocbq *
3597 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3598 struct lpfc_sli_ring *pring, uint16_t iotag)
3600 struct lpfc_iocbq *cmd_iocb = NULL;
3602 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3603 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3604 if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
3605 /* remove from txcmpl queue list */
3606 list_del_init(&cmd_iocb->list);
3607 cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
3608 pring->txcmplq_cnt--;
3613 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3614 "0372 iotag x%x lookup error: max iotag (x%x) "
3616 iotag, phba->sli.last_iotag,
3617 cmd_iocb ? cmd_iocb->cmd_flag : 0xffff);
3622 * lpfc_sli_process_sol_iocb - process solicited iocb completion
3623 * @phba: Pointer to HBA context object.
3624 * @pring: Pointer to driver SLI ring object.
3625 * @saveq: Pointer to the response iocb to be processed.
3627 * This function is called by the ring event handler for non-fcp
3628 * rings when there is a new response iocb in the response ring.
3629 * The caller is not required to hold any locks. This function
3630 * gets the command iocb associated with the response iocb and
3631 * calls the completion handler for the command iocb. If there
3632 * is no completion handler, the function will free the resources
3633 * associated with command iocb. If the response iocb is for
3634 * an already aborted command iocb, the status of the completion
3635 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3636 * This function always returns 1.
3639 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3640 struct lpfc_iocbq *saveq)
3642 struct lpfc_iocbq *cmdiocbp;
3644 unsigned long iflag;
3645 u32 ulp_command, ulp_status, ulp_word4, ulp_context, iotag;
3647 if (phba->sli_rev == LPFC_SLI_REV4)
3648 spin_lock_irqsave(&pring->ring_lock, iflag);
3650 spin_lock_irqsave(&phba->hbalock, iflag);
3651 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3652 if (phba->sli_rev == LPFC_SLI_REV4)
3653 spin_unlock_irqrestore(&pring->ring_lock, iflag);
3655 spin_unlock_irqrestore(&phba->hbalock, iflag);
3657 ulp_command = get_job_cmnd(phba, saveq);
3658 ulp_status = get_job_ulpstatus(phba, saveq);
3659 ulp_word4 = get_job_word4(phba, saveq);
3660 ulp_context = get_job_ulpcontext(phba, saveq);
3661 if (phba->sli_rev == LPFC_SLI_REV4)
3662 iotag = get_wqe_reqtag(saveq);
3664 iotag = saveq->iocb.ulpIoTag;
3667 ulp_command = get_job_cmnd(phba, cmdiocbp);
3668 if (cmdiocbp->cmd_cmpl) {
3670 * If an ELS command failed send an event to mgmt
3674 (pring->ringno == LPFC_ELS_RING) &&
3675 (ulp_command == CMD_ELS_REQUEST64_CR))
3676 lpfc_send_els_failure_event(phba,
3680 * Post all ELS completions to the worker thread.
3681 * All other are passed to the completion callback.
3683 if (pring->ringno == LPFC_ELS_RING) {
3684 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3685 (cmdiocbp->cmd_flag &
3686 LPFC_DRIVER_ABORTED)) {
3687 spin_lock_irqsave(&phba->hbalock,
3689 cmdiocbp->cmd_flag &=
3690 ~LPFC_DRIVER_ABORTED;
3691 spin_unlock_irqrestore(&phba->hbalock,
3693 saveq->iocb.ulpStatus =
3694 IOSTAT_LOCAL_REJECT;
3695 saveq->iocb.un.ulpWord[4] =
3698 /* Firmware could still be in progress
3699 * of DMAing payload, so don't free data
3700 * buffer till after a hbeat.
3702 spin_lock_irqsave(&phba->hbalock,
3704 saveq->cmd_flag |= LPFC_DELAY_MEM_FREE;
3705 spin_unlock_irqrestore(&phba->hbalock,
3708 if (phba->sli_rev == LPFC_SLI_REV4) {
3709 if (saveq->cmd_flag &
3710 LPFC_EXCHANGE_BUSY) {
3711 /* Set cmdiocb flag for the
3712 * exchange busy so sgl (xri)
3713 * will not be released until
3714 * the abort xri is received
3718 &phba->hbalock, iflag);
3719 cmdiocbp->cmd_flag |=
3721 spin_unlock_irqrestore(
3722 &phba->hbalock, iflag);
3724 if (cmdiocbp->cmd_flag &
3725 LPFC_DRIVER_ABORTED) {
3727 * Clear LPFC_DRIVER_ABORTED
3728 * bit in case it was driver
3732 &phba->hbalock, iflag);
3733 cmdiocbp->cmd_flag &=
3734 ~LPFC_DRIVER_ABORTED;
3735 spin_unlock_irqrestore(
3736 &phba->hbalock, iflag);
3737 set_job_ulpstatus(cmdiocbp,
3738 IOSTAT_LOCAL_REJECT);
3739 set_job_ulpword4(cmdiocbp,
3740 IOERR_ABORT_REQUESTED);
3742 * For SLI4, irspiocb contains
3743 * NO_XRI in sli_xritag, it
3744 * shall not affect releasing
3745 * sgl (xri) process.
3747 set_job_ulpstatus(saveq,
3748 IOSTAT_LOCAL_REJECT);
3749 set_job_ulpword4(saveq,
3752 &phba->hbalock, iflag);
3754 LPFC_DELAY_MEM_FREE;
3755 spin_unlock_irqrestore(
3756 &phba->hbalock, iflag);
3760 cmdiocbp->cmd_cmpl(phba, cmdiocbp, saveq);
3762 lpfc_sli_release_iocbq(phba, cmdiocbp);
3765 * Unknown initiating command based on the response iotag.
3766 * This could be the case on the ELS ring because of
3769 if (pring->ringno != LPFC_ELS_RING) {
3771 * Ring <ringno> handler: unexpected completion IoTag
3774 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3775 "0322 Ring %d handler: "
3776 "unexpected completion IoTag x%x "
3777 "Data: x%x x%x x%x x%x\n",
3778 pring->ringno, iotag, ulp_status,
3779 ulp_word4, ulp_command, ulp_context);
3787 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
3788 * @phba: Pointer to HBA context object.
3789 * @pring: Pointer to driver SLI ring object.
3791 * This function is called from the iocb ring event handlers when
3792 * put pointer is ahead of the get pointer for a ring. This function signal
3793 * an error attention condition to the worker thread and the worker
3794 * thread will transition the HBA to offline state.
3797 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3799 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3801 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3802 * rsp ring <portRspMax>
3804 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3805 "0312 Ring %d handler: portRspPut %d "
3806 "is bigger than rsp ring %d\n",
3807 pring->ringno, le32_to_cpu(pgp->rspPutInx),
3808 pring->sli.sli3.numRiocb);
3810 phba->link_state = LPFC_HBA_ERROR;
3813 * All error attention handlers are posted to
3816 phba->work_ha |= HA_ERATT;
3817 phba->work_hs = HS_FFER3;
3819 lpfc_worker_wake_up(phba);
3825 * lpfc_poll_eratt - Error attention polling timer timeout handler
3826 * @t: Context to fetch pointer to address of HBA context object from.
3828 * This function is invoked by the Error Attention polling timer when the
3829 * timer times out. It will check the SLI Error Attention register for
3830 * possible attention events. If so, it will post an Error Attention event
3831 * and wake up worker thread to process it. Otherwise, it will set up the
3832 * Error Attention polling timer for the next poll.
3834 void lpfc_poll_eratt(struct timer_list *t)
3836 struct lpfc_hba *phba;
3838 uint64_t sli_intr, cnt;
3840 phba = from_timer(phba, t, eratt_poll);
3842 /* Here we will also keep track of interrupts per sec of the hba */
3843 sli_intr = phba->sli.slistat.sli_intr;
3845 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3846 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3849 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3851 /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3852 do_div(cnt, phba->eratt_poll_interval);
3853 phba->sli.slistat.sli_ips = cnt;
3855 phba->sli.slistat.sli_prev_intr = sli_intr;
3857 /* Check chip HA register for error event */
3858 eratt = lpfc_sli_check_eratt(phba);
3861 /* Tell the worker thread there is work to do */
3862 lpfc_worker_wake_up(phba);
3864 /* Restart the timer for next eratt poll */
3865 mod_timer(&phba->eratt_poll,
3867 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3873 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3874 * @phba: Pointer to HBA context object.
3875 * @pring: Pointer to driver SLI ring object.
3876 * @mask: Host attention register mask for this ring.
3878 * This function is called from the interrupt context when there is a ring
3879 * event for the fcp ring. The caller does not hold any lock.
3880 * The function processes each response iocb in the response ring until it
3881 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3882 * LE bit set. The function will call the completion handler of the command iocb
3883 * if the response iocb indicates a completion for a command iocb or it is
3884 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3885 * function if this is an unsolicited iocb.
3886 * This routine presumes LPFC_FCP_RING handling and doesn't bother
3887 * to check it explicitly.
3890 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3891 struct lpfc_sli_ring *pring, uint32_t mask)
3893 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3894 IOCB_t *irsp = NULL;
3895 IOCB_t *entry = NULL;
3896 struct lpfc_iocbq *cmdiocbq = NULL;
3897 struct lpfc_iocbq rspiocbq;
3899 uint32_t portRspPut, portRspMax;
3901 lpfc_iocb_type type;
3902 unsigned long iflag;
3903 uint32_t rsp_cmpl = 0;
3905 spin_lock_irqsave(&phba->hbalock, iflag);
3906 pring->stats.iocb_event++;
3909 * The next available response entry should never exceed the maximum
3910 * entries. If it does, treat it as an adapter hardware error.
3912 portRspMax = pring->sli.sli3.numRiocb;
3913 portRspPut = le32_to_cpu(pgp->rspPutInx);
3914 if (unlikely(portRspPut >= portRspMax)) {
3915 lpfc_sli_rsp_pointers_error(phba, pring);
3916 spin_unlock_irqrestore(&phba->hbalock, iflag);
3919 if (phba->fcp_ring_in_use) {
3920 spin_unlock_irqrestore(&phba->hbalock, iflag);
3923 phba->fcp_ring_in_use = 1;
3926 while (pring->sli.sli3.rspidx != portRspPut) {
3928 * Fetch an entry off the ring and copy it into a local data
3929 * structure. The copy involves a byte-swap since the
3930 * network byte order and pci byte orders are different.
3932 entry = lpfc_resp_iocb(phba, pring);
3933 phba->last_completion_time = jiffies;
3935 if (++pring->sli.sli3.rspidx >= portRspMax)
3936 pring->sli.sli3.rspidx = 0;
3938 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3939 (uint32_t *) &rspiocbq.iocb,
3940 phba->iocb_rsp_size);
3941 INIT_LIST_HEAD(&(rspiocbq.list));
3942 irsp = &rspiocbq.iocb;
3944 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3945 pring->stats.iocb_rsp++;
3948 if (unlikely(irsp->ulpStatus)) {
3950 * If resource errors reported from HBA, reduce
3951 * queuedepths of the SCSI device.
3953 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3954 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3955 IOERR_NO_RESOURCES)) {
3956 spin_unlock_irqrestore(&phba->hbalock, iflag);
3957 phba->lpfc_rampdown_queue_depth(phba);
3958 spin_lock_irqsave(&phba->hbalock, iflag);
3961 /* Rsp ring <ringno> error: IOCB */
3962 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3963 "0336 Rsp Ring %d error: IOCB Data: "
3964 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3966 irsp->un.ulpWord[0],
3967 irsp->un.ulpWord[1],
3968 irsp->un.ulpWord[2],
3969 irsp->un.ulpWord[3],
3970 irsp->un.ulpWord[4],
3971 irsp->un.ulpWord[5],
3972 *(uint32_t *)&irsp->un1,
3973 *((uint32_t *)&irsp->un1 + 1));
3977 case LPFC_ABORT_IOCB:
3980 * Idle exchange closed via ABTS from port. No iocb
3981 * resources need to be recovered.
3983 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3984 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3985 "0333 IOCB cmd 0x%x"
3986 " processed. Skipping"
3992 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3994 if (unlikely(!cmdiocbq))
3996 if (cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED)
3997 cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
3998 if (cmdiocbq->cmd_cmpl) {
3999 spin_unlock_irqrestore(&phba->hbalock, iflag);
4000 cmdiocbq->cmd_cmpl(phba, cmdiocbq, &rspiocbq);
4001 spin_lock_irqsave(&phba->hbalock, iflag);
4004 case LPFC_UNSOL_IOCB:
4005 spin_unlock_irqrestore(&phba->hbalock, iflag);
4006 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
4007 spin_lock_irqsave(&phba->hbalock, iflag);
4010 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
4011 char adaptermsg[LPFC_MAX_ADPTMSG];
4012 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
4013 memcpy(&adaptermsg[0], (uint8_t *) irsp,
4015 dev_warn(&((phba->pcidev)->dev),
4017 phba->brd_no, adaptermsg);
4019 /* Unknown IOCB command */
4020 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4021 "0334 Unknown IOCB command "
4022 "Data: x%x, x%x x%x x%x x%x\n",
4023 type, irsp->ulpCommand,
4032 * The response IOCB has been processed. Update the ring
4033 * pointer in SLIM. If the port response put pointer has not
4034 * been updated, sync the pgp->rspPutInx and fetch the new port
4035 * response put pointer.
4037 writel(pring->sli.sli3.rspidx,
4038 &phba->host_gp[pring->ringno].rspGetInx);
4040 if (pring->sli.sli3.rspidx == portRspPut)
4041 portRspPut = le32_to_cpu(pgp->rspPutInx);
4044 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
4045 pring->stats.iocb_rsp_full++;
4046 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
4047 writel(status, phba->CAregaddr);
4048 readl(phba->CAregaddr);
4050 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
4051 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
4052 pring->stats.iocb_cmd_empty++;
4054 /* Force update of the local copy of cmdGetInx */
4055 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
4056 lpfc_sli_resume_iocb(phba, pring);
4058 if ((pring->lpfc_sli_cmd_available))
4059 (pring->lpfc_sli_cmd_available) (phba, pring);
4063 phba->fcp_ring_in_use = 0;
4064 spin_unlock_irqrestore(&phba->hbalock, iflag);
4069 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
4070 * @phba: Pointer to HBA context object.
4071 * @pring: Pointer to driver SLI ring object.
4072 * @rspiocbp: Pointer to driver response IOCB object.
4074 * This function is called from the worker thread when there is a slow-path
4075 * response IOCB to process. This function chains all the response iocbs until
4076 * seeing the iocb with the LE bit set. The function will call
4077 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
4078 * completion of a command iocb. The function will call the
4079 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
4080 * The function frees the resources or calls the completion handler if this
4081 * iocb is an abort completion. The function returns NULL when the response
4082 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
4083 * this function shall chain the iocb on to the iocb_continueq and return the
4084 * response iocb passed in.
4086 static struct lpfc_iocbq *
4087 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4088 struct lpfc_iocbq *rspiocbp)
4090 struct lpfc_iocbq *saveq;
4091 struct lpfc_iocbq *cmdiocb;
4092 struct lpfc_iocbq *next_iocb;
4094 uint32_t free_saveq;
4096 lpfc_iocb_type type;
4097 unsigned long iflag;
4098 u32 ulp_status = get_job_ulpstatus(phba, rspiocbp);
4099 u32 ulp_word4 = get_job_word4(phba, rspiocbp);
4100 u32 ulp_command = get_job_cmnd(phba, rspiocbp);
4103 spin_lock_irqsave(&phba->hbalock, iflag);
4104 /* First add the response iocb to the countinueq list */
4105 list_add_tail(&rspiocbp->list, &pring->iocb_continueq);
4106 pring->iocb_continueq_cnt++;
4109 * By default, the driver expects to free all resources
4110 * associated with this iocb completion.
4113 saveq = list_get_first(&pring->iocb_continueq,
4114 struct lpfc_iocbq, list);
4115 list_del_init(&pring->iocb_continueq);
4116 pring->iocb_continueq_cnt = 0;
4118 pring->stats.iocb_rsp++;
4121 * If resource errors reported from HBA, reduce
4122 * queuedepths of the SCSI device.
4124 if (ulp_status == IOSTAT_LOCAL_REJECT &&
4125 ((ulp_word4 & IOERR_PARAM_MASK) ==
4126 IOERR_NO_RESOURCES)) {
4127 spin_unlock_irqrestore(&phba->hbalock, iflag);
4128 phba->lpfc_rampdown_queue_depth(phba);
4129 spin_lock_irqsave(&phba->hbalock, iflag);
4133 /* Rsp ring <ringno> error: IOCB */
4134 if (phba->sli_rev < LPFC_SLI_REV4) {
4135 irsp = &rspiocbp->iocb;
4136 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4137 "0328 Rsp Ring %d error: ulp_status x%x "
4139 "x%08x x%08x x%08x x%08x "
4140 "x%08x x%08x x%08x x%08x "
4141 "x%08x x%08x x%08x x%08x "
4142 "x%08x x%08x x%08x x%08x\n",
4143 pring->ringno, ulp_status,
4144 get_job_ulpword(rspiocbp, 0),
4145 get_job_ulpword(rspiocbp, 1),
4146 get_job_ulpword(rspiocbp, 2),
4147 get_job_ulpword(rspiocbp, 3),
4148 get_job_ulpword(rspiocbp, 4),
4149 get_job_ulpword(rspiocbp, 5),
4150 *(((uint32_t *)irsp) + 6),
4151 *(((uint32_t *)irsp) + 7),
4152 *(((uint32_t *)irsp) + 8),
4153 *(((uint32_t *)irsp) + 9),
4154 *(((uint32_t *)irsp) + 10),
4155 *(((uint32_t *)irsp) + 11),
4156 *(((uint32_t *)irsp) + 12),
4157 *(((uint32_t *)irsp) + 13),
4158 *(((uint32_t *)irsp) + 14),
4159 *(((uint32_t *)irsp) + 15));
4161 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4162 "0321 Rsp Ring %d error: "
4164 "x%x x%x x%x x%x\n",
4166 rspiocbp->wcqe_cmpl.word0,
4167 rspiocbp->wcqe_cmpl.total_data_placed,
4168 rspiocbp->wcqe_cmpl.parameter,
4169 rspiocbp->wcqe_cmpl.word3);
4175 * Fetch the iocb command type and call the correct completion
4176 * routine. Solicited and Unsolicited IOCBs on the ELS ring
4177 * get freed back to the lpfc_iocb_list by the discovery
4180 cmd_type = ulp_command & CMD_IOCB_MASK;
4181 type = lpfc_sli_iocb_cmd_type(cmd_type);
4184 spin_unlock_irqrestore(&phba->hbalock, iflag);
4185 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
4186 spin_lock_irqsave(&phba->hbalock, iflag);
4188 case LPFC_UNSOL_IOCB:
4189 spin_unlock_irqrestore(&phba->hbalock, iflag);
4190 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
4191 spin_lock_irqsave(&phba->hbalock, iflag);
4195 case LPFC_ABORT_IOCB:
4197 if (ulp_command != CMD_XRI_ABORTED_CX)
4198 cmdiocb = lpfc_sli_iocbq_lookup(phba, pring,
4201 /* Call the specified completion routine */
4202 if (cmdiocb->cmd_cmpl) {
4203 spin_unlock_irqrestore(&phba->hbalock, iflag);
4204 cmdiocb->cmd_cmpl(phba, cmdiocb, saveq);
4205 spin_lock_irqsave(&phba->hbalock, iflag);
4207 __lpfc_sli_release_iocbq(phba, cmdiocb);
4211 case LPFC_UNKNOWN_IOCB:
4212 if (ulp_command == CMD_ADAPTER_MSG) {
4213 char adaptermsg[LPFC_MAX_ADPTMSG];
4215 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
4216 memcpy(&adaptermsg[0], (uint8_t *)&rspiocbp->wqe,
4218 dev_warn(&((phba->pcidev)->dev),
4220 phba->brd_no, adaptermsg);
4222 /* Unknown command */
4223 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4224 "0335 Unknown IOCB "
4225 "command Data: x%x "
4229 get_wqe_reqtag(rspiocbp),
4230 get_job_ulpcontext(phba, rspiocbp));
4236 list_for_each_entry_safe(rspiocbp, next_iocb,
4237 &saveq->list, list) {
4238 list_del_init(&rspiocbp->list);
4239 __lpfc_sli_release_iocbq(phba, rspiocbp);
4241 __lpfc_sli_release_iocbq(phba, saveq);
4244 spin_unlock_irqrestore(&phba->hbalock, iflag);
4249 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
4250 * @phba: Pointer to HBA context object.
4251 * @pring: Pointer to driver SLI ring object.
4252 * @mask: Host attention register mask for this ring.
4254 * This routine wraps the actual slow_ring event process routine from the
4255 * API jump table function pointer from the lpfc_hba struct.
4258 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
4259 struct lpfc_sli_ring *pring, uint32_t mask)
4261 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
4265 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
4266 * @phba: Pointer to HBA context object.
4267 * @pring: Pointer to driver SLI ring object.
4268 * @mask: Host attention register mask for this ring.
4270 * This function is called from the worker thread when there is a ring event
4271 * for non-fcp rings. The caller does not hold any lock. The function will
4272 * remove each response iocb in the response ring and calls the handle
4273 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4276 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
4277 struct lpfc_sli_ring *pring, uint32_t mask)
4279 struct lpfc_pgp *pgp;
4281 IOCB_t *irsp = NULL;
4282 struct lpfc_iocbq *rspiocbp = NULL;
4283 uint32_t portRspPut, portRspMax;
4284 unsigned long iflag;
4287 pgp = &phba->port_gp[pring->ringno];
4288 spin_lock_irqsave(&phba->hbalock, iflag);
4289 pring->stats.iocb_event++;
4292 * The next available response entry should never exceed the maximum
4293 * entries. If it does, treat it as an adapter hardware error.
4295 portRspMax = pring->sli.sli3.numRiocb;
4296 portRspPut = le32_to_cpu(pgp->rspPutInx);
4297 if (portRspPut >= portRspMax) {
4299 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
4300 * rsp ring <portRspMax>
4302 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4303 "0303 Ring %d handler: portRspPut %d "
4304 "is bigger than rsp ring %d\n",
4305 pring->ringno, portRspPut, portRspMax);
4307 phba->link_state = LPFC_HBA_ERROR;
4308 spin_unlock_irqrestore(&phba->hbalock, iflag);
4310 phba->work_hs = HS_FFER3;
4311 lpfc_handle_eratt(phba);
4317 while (pring->sli.sli3.rspidx != portRspPut) {
4319 * Build a completion list and call the appropriate handler.
4320 * The process is to get the next available response iocb, get
4321 * a free iocb from the list, copy the response data into the
4322 * free iocb, insert to the continuation list, and update the
4323 * next response index to slim. This process makes response
4324 * iocb's in the ring available to DMA as fast as possible but
4325 * pays a penalty for a copy operation. Since the iocb is
4326 * only 32 bytes, this penalty is considered small relative to
4327 * the PCI reads for register values and a slim write. When
4328 * the ulpLe field is set, the entire Command has been
4331 entry = lpfc_resp_iocb(phba, pring);
4333 phba->last_completion_time = jiffies;
4334 rspiocbp = __lpfc_sli_get_iocbq(phba);
4335 if (rspiocbp == NULL) {
4336 printk(KERN_ERR "%s: out of buffers! Failing "
4337 "completion.\n", __func__);
4341 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
4342 phba->iocb_rsp_size);
4343 irsp = &rspiocbp->iocb;
4345 if (++pring->sli.sli3.rspidx >= portRspMax)
4346 pring->sli.sli3.rspidx = 0;
4348 if (pring->ringno == LPFC_ELS_RING) {
4349 lpfc_debugfs_slow_ring_trc(phba,
4350 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
4351 *(((uint32_t *) irsp) + 4),
4352 *(((uint32_t *) irsp) + 6),
4353 *(((uint32_t *) irsp) + 7));
4356 writel(pring->sli.sli3.rspidx,
4357 &phba->host_gp[pring->ringno].rspGetInx);
4359 spin_unlock_irqrestore(&phba->hbalock, iflag);
4360 /* Handle the response IOCB */
4361 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
4362 spin_lock_irqsave(&phba->hbalock, iflag);
4365 * If the port response put pointer has not been updated, sync
4366 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
4367 * response put pointer.
4369 if (pring->sli.sli3.rspidx == portRspPut) {
4370 portRspPut = le32_to_cpu(pgp->rspPutInx);
4372 } /* while (pring->sli.sli3.rspidx != portRspPut) */
4374 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
4375 /* At least one response entry has been freed */
4376 pring->stats.iocb_rsp_full++;
4377 /* SET RxRE_RSP in Chip Att register */
4378 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
4379 writel(status, phba->CAregaddr);
4380 readl(phba->CAregaddr); /* flush */
4382 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
4383 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
4384 pring->stats.iocb_cmd_empty++;
4386 /* Force update of the local copy of cmdGetInx */
4387 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
4388 lpfc_sli_resume_iocb(phba, pring);
4390 if ((pring->lpfc_sli_cmd_available))
4391 (pring->lpfc_sli_cmd_available) (phba, pring);
4395 spin_unlock_irqrestore(&phba->hbalock, iflag);
4400 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
4401 * @phba: Pointer to HBA context object.
4402 * @pring: Pointer to driver SLI ring object.
4403 * @mask: Host attention register mask for this ring.
4405 * This function is called from the worker thread when there is a pending
4406 * ELS response iocb on the driver internal slow-path response iocb worker
4407 * queue. The caller does not hold any lock. The function will remove each
4408 * response iocb from the response worker queue and calls the handle
4409 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4412 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
4413 struct lpfc_sli_ring *pring, uint32_t mask)
4415 struct lpfc_iocbq *irspiocbq;
4416 struct hbq_dmabuf *dmabuf;
4417 struct lpfc_cq_event *cq_event;
4418 unsigned long iflag;
4421 spin_lock_irqsave(&phba->hbalock, iflag);
4422 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
4423 spin_unlock_irqrestore(&phba->hbalock, iflag);
4424 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
4425 /* Get the response iocb from the head of work queue */
4426 spin_lock_irqsave(&phba->hbalock, iflag);
4427 list_remove_head(&phba->sli4_hba.sp_queue_event,
4428 cq_event, struct lpfc_cq_event, list);
4429 spin_unlock_irqrestore(&phba->hbalock, iflag);
4431 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
4432 case CQE_CODE_COMPL_WQE:
4433 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
4435 /* Translate ELS WCQE to response IOCBQ */
4436 irspiocbq = lpfc_sli4_els_preprocess_rspiocbq(phba,
4439 lpfc_sli_sp_handle_rspiocb(phba, pring,
4443 case CQE_CODE_RECEIVE:
4444 case CQE_CODE_RECEIVE_V1:
4445 dmabuf = container_of(cq_event, struct hbq_dmabuf,
4447 lpfc_sli4_handle_received_buffer(phba, dmabuf);
4454 /* Limit the number of events to 64 to avoid soft lockups */
4461 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
4462 * @phba: Pointer to HBA context object.
4463 * @pring: Pointer to driver SLI ring object.
4465 * This function aborts all iocbs in the given ring and frees all the iocb
4466 * objects in txq. This function issues an abort iocb for all the iocb commands
4467 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4468 * the return of this function. The caller is not required to hold any locks.
4471 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
4473 LIST_HEAD(completions);
4474 struct lpfc_iocbq *iocb, *next_iocb;
4476 if (pring->ringno == LPFC_ELS_RING) {
4477 lpfc_fabric_abort_hba(phba);
4480 /* Error everything on txq and txcmplq
4483 if (phba->sli_rev >= LPFC_SLI_REV4) {
4484 spin_lock_irq(&pring->ring_lock);
4485 list_splice_init(&pring->txq, &completions);
4487 spin_unlock_irq(&pring->ring_lock);
4489 spin_lock_irq(&phba->hbalock);
4490 /* Next issue ABTS for everything on the txcmplq */
4491 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
4492 lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
4493 spin_unlock_irq(&phba->hbalock);
4495 spin_lock_irq(&phba->hbalock);
4496 list_splice_init(&pring->txq, &completions);
4499 /* Next issue ABTS for everything on the txcmplq */
4500 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
4501 lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
4502 spin_unlock_irq(&phba->hbalock);
4504 /* Make sure HBA is alive */
4505 lpfc_issue_hb_tmo(phba);
4507 /* Cancel all the IOCBs from the completions list */
4508 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
4513 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
4514 * @phba: Pointer to HBA context object.
4516 * This function aborts all iocbs in FCP rings and frees all the iocb
4517 * objects in txq. This function issues an abort iocb for all the iocb commands
4518 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4519 * the return of this function. The caller is not required to hold any locks.
4522 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
4524 struct lpfc_sli *psli = &phba->sli;
4525 struct lpfc_sli_ring *pring;
4528 /* Look on all the FCP Rings for the iotag */
4529 if (phba->sli_rev >= LPFC_SLI_REV4) {
4530 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4531 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4532 lpfc_sli_abort_iocb_ring(phba, pring);
4535 pring = &psli->sli3_ring[LPFC_FCP_RING];
4536 lpfc_sli_abort_iocb_ring(phba, pring);
4541 * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
4542 * @phba: Pointer to HBA context object.
4544 * This function flushes all iocbs in the IO ring and frees all the iocb
4545 * objects in txq and txcmplq. This function will not issue abort iocbs
4546 * for all the iocb commands in txcmplq, they will just be returned with
4547 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4548 * slot has been permanently disabled.
4551 lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
4555 struct lpfc_sli *psli = &phba->sli;
4556 struct lpfc_sli_ring *pring;
4558 struct lpfc_iocbq *piocb, *next_iocb;
4560 spin_lock_irq(&phba->hbalock);
4561 if (phba->hba_flag & HBA_IOQ_FLUSH ||
4562 !phba->sli4_hba.hdwq) {
4563 spin_unlock_irq(&phba->hbalock);
4566 /* Indicate the I/O queues are flushed */
4567 phba->hba_flag |= HBA_IOQ_FLUSH;
4568 spin_unlock_irq(&phba->hbalock);
4570 /* Look on all the FCP Rings for the iotag */
4571 if (phba->sli_rev >= LPFC_SLI_REV4) {
4572 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4573 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4575 spin_lock_irq(&pring->ring_lock);
4576 /* Retrieve everything on txq */
4577 list_splice_init(&pring->txq, &txq);
4578 list_for_each_entry_safe(piocb, next_iocb,
4579 &pring->txcmplq, list)
4580 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
4581 /* Retrieve everything on the txcmplq */
4582 list_splice_init(&pring->txcmplq, &txcmplq);
4584 pring->txcmplq_cnt = 0;
4585 spin_unlock_irq(&pring->ring_lock);
4588 lpfc_sli_cancel_iocbs(phba, &txq,
4589 IOSTAT_LOCAL_REJECT,
4591 /* Flush the txcmplq */
4592 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4593 IOSTAT_LOCAL_REJECT,
4595 if (unlikely(pci_channel_offline(phba->pcidev)))
4596 lpfc_sli4_io_xri_aborted(phba, NULL, 0);
4599 pring = &psli->sli3_ring[LPFC_FCP_RING];
4601 spin_lock_irq(&phba->hbalock);
4602 /* Retrieve everything on txq */
4603 list_splice_init(&pring->txq, &txq);
4604 list_for_each_entry_safe(piocb, next_iocb,
4605 &pring->txcmplq, list)
4606 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
4607 /* Retrieve everything on the txcmplq */
4608 list_splice_init(&pring->txcmplq, &txcmplq);
4610 pring->txcmplq_cnt = 0;
4611 spin_unlock_irq(&phba->hbalock);
4614 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4616 /* Flush the txcmpq */
4617 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4623 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
4624 * @phba: Pointer to HBA context object.
4625 * @mask: Bit mask to be checked.
4627 * This function reads the host status register and compares
4628 * with the provided bit mask to check if HBA completed
4629 * the restart. This function will wait in a loop for the
4630 * HBA to complete restart. If the HBA does not restart within
4631 * 15 iterations, the function will reset the HBA again. The
4632 * function returns 1 when HBA fail to restart otherwise returns
4636 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4642 /* Read the HBA Host Status Register */
4643 if (lpfc_readl(phba->HSregaddr, &status))
4646 phba->hba_flag |= HBA_NEEDS_CFG_PORT;
4649 * Check status register every 100ms for 5 retries, then every
4650 * 500ms for 5, then every 2.5 sec for 5, then reset board and
4651 * every 2.5 sec for 4.
4652 * Break our of the loop if errors occurred during init.
4654 while (((status & mask) != mask) &&
4655 !(status & HS_FFERM) &&
4667 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4668 lpfc_sli_brdrestart(phba);
4670 /* Read the HBA Host Status Register */
4671 if (lpfc_readl(phba->HSregaddr, &status)) {
4677 /* Check to see if any errors occurred during init */
4678 if ((status & HS_FFERM) || (i >= 20)) {
4679 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4680 "2751 Adapter failed to restart, "
4681 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4683 readl(phba->MBslimaddr + 0xa8),
4684 readl(phba->MBslimaddr + 0xac));
4685 phba->link_state = LPFC_HBA_ERROR;
4693 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4694 * @phba: Pointer to HBA context object.
4695 * @mask: Bit mask to be checked.
4697 * This function checks the host status register to check if HBA is
4698 * ready. This function will wait in a loop for the HBA to be ready
4699 * If the HBA is not ready , the function will will reset the HBA PCI
4700 * function again. The function returns 1 when HBA fail to be ready
4701 * otherwise returns zero.
4704 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4709 /* Read the HBA Host Status Register */
4710 status = lpfc_sli4_post_status_check(phba);
4713 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4714 lpfc_sli_brdrestart(phba);
4715 status = lpfc_sli4_post_status_check(phba);
4718 /* Check to see if any errors occurred during init */
4720 phba->link_state = LPFC_HBA_ERROR;
4723 phba->sli4_hba.intr_enable = 0;
4725 phba->hba_flag &= ~HBA_SETUP;
4730 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4731 * @phba: Pointer to HBA context object.
4732 * @mask: Bit mask to be checked.
4734 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4735 * from the API jump table function pointer from the lpfc_hba struct.
4738 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4740 return phba->lpfc_sli_brdready(phba, mask);
4743 #define BARRIER_TEST_PATTERN (0xdeadbeef)
4746 * lpfc_reset_barrier - Make HBA ready for HBA reset
4747 * @phba: Pointer to HBA context object.
4749 * This function is called before resetting an HBA. This function is called
4750 * with hbalock held and requests HBA to quiesce DMAs before a reset.
4752 void lpfc_reset_barrier(struct lpfc_hba *phba)
4754 uint32_t __iomem *resp_buf;
4755 uint32_t __iomem *mbox_buf;
4756 volatile uint32_t mbox;
4757 uint32_t hc_copy, ha_copy, resp_data;
4761 lockdep_assert_held(&phba->hbalock);
4763 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4764 if (hdrtype != 0x80 ||
4765 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4766 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4770 * Tell the other part of the chip to suspend temporarily all
4773 resp_buf = phba->MBslimaddr;
4775 /* Disable the error attention */
4776 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4778 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4779 readl(phba->HCregaddr); /* flush */
4780 phba->link_flag |= LS_IGNORE_ERATT;
4782 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4784 if (ha_copy & HA_ERATT) {
4785 /* Clear Chip error bit */
4786 writel(HA_ERATT, phba->HAregaddr);
4787 phba->pport->stopped = 1;
4791 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4792 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4794 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4795 mbox_buf = phba->MBslimaddr;
4796 writel(mbox, mbox_buf);
4798 for (i = 0; i < 50; i++) {
4799 if (lpfc_readl((resp_buf + 1), &resp_data))
4801 if (resp_data != ~(BARRIER_TEST_PATTERN))
4807 if (lpfc_readl((resp_buf + 1), &resp_data))
4809 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
4810 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4811 phba->pport->stopped)
4817 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
4819 for (i = 0; i < 500; i++) {
4820 if (lpfc_readl(resp_buf, &resp_data))
4822 if (resp_data != mbox)
4831 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4833 if (!(ha_copy & HA_ERATT))
4839 if (readl(phba->HAregaddr) & HA_ERATT) {
4840 writel(HA_ERATT, phba->HAregaddr);
4841 phba->pport->stopped = 1;
4845 phba->link_flag &= ~LS_IGNORE_ERATT;
4846 writel(hc_copy, phba->HCregaddr);
4847 readl(phba->HCregaddr); /* flush */
4851 * lpfc_sli_brdkill - Issue a kill_board mailbox command
4852 * @phba: Pointer to HBA context object.
4854 * This function issues a kill_board mailbox command and waits for
4855 * the error attention interrupt. This function is called for stopping
4856 * the firmware processing. The caller is not required to hold any
4857 * locks. This function calls lpfc_hba_down_post function to free
4858 * any pending commands after the kill. The function will return 1 when it
4859 * fails to kill the board else will return 0.
4862 lpfc_sli_brdkill(struct lpfc_hba *phba)
4864 struct lpfc_sli *psli;
4874 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4875 "0329 Kill HBA Data: x%x x%x\n",
4876 phba->pport->port_state, psli->sli_flag);
4878 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4882 /* Disable the error attention */
4883 spin_lock_irq(&phba->hbalock);
4884 if (lpfc_readl(phba->HCregaddr, &status)) {
4885 spin_unlock_irq(&phba->hbalock);
4886 mempool_free(pmb, phba->mbox_mem_pool);
4889 status &= ~HC_ERINT_ENA;
4890 writel(status, phba->HCregaddr);
4891 readl(phba->HCregaddr); /* flush */
4892 phba->link_flag |= LS_IGNORE_ERATT;
4893 spin_unlock_irq(&phba->hbalock);
4895 lpfc_kill_board(phba, pmb);
4896 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4897 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4899 if (retval != MBX_SUCCESS) {
4900 if (retval != MBX_BUSY)
4901 mempool_free(pmb, phba->mbox_mem_pool);
4902 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4903 "2752 KILL_BOARD command failed retval %d\n",
4905 spin_lock_irq(&phba->hbalock);
4906 phba->link_flag &= ~LS_IGNORE_ERATT;
4907 spin_unlock_irq(&phba->hbalock);
4911 spin_lock_irq(&phba->hbalock);
4912 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4913 spin_unlock_irq(&phba->hbalock);
4915 mempool_free(pmb, phba->mbox_mem_pool);
4917 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4918 * attention every 100ms for 3 seconds. If we don't get ERATT after
4919 * 3 seconds we still set HBA_ERROR state because the status of the
4920 * board is now undefined.
4922 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4924 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4926 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4930 del_timer_sync(&psli->mbox_tmo);
4931 if (ha_copy & HA_ERATT) {
4932 writel(HA_ERATT, phba->HAregaddr);
4933 phba->pport->stopped = 1;
4935 spin_lock_irq(&phba->hbalock);
4936 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4937 psli->mbox_active = NULL;
4938 phba->link_flag &= ~LS_IGNORE_ERATT;
4939 spin_unlock_irq(&phba->hbalock);
4941 lpfc_hba_down_post(phba);
4942 phba->link_state = LPFC_HBA_ERROR;
4944 return ha_copy & HA_ERATT ? 0 : 1;
4948 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
4949 * @phba: Pointer to HBA context object.
4951 * This function resets the HBA by writing HC_INITFF to the control
4952 * register. After the HBA resets, this function resets all the iocb ring
4953 * indices. This function disables PCI layer parity checking during
4955 * This function returns 0 always.
4956 * The caller is not required to hold any locks.
4959 lpfc_sli_brdreset(struct lpfc_hba *phba)
4961 struct lpfc_sli *psli;
4962 struct lpfc_sli_ring *pring;
4969 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4970 "0325 Reset HBA Data: x%x x%x\n",
4971 (phba->pport) ? phba->pport->port_state : 0,
4974 /* perform board reset */
4975 phba->fc_eventTag = 0;
4976 phba->link_events = 0;
4977 phba->hba_flag |= HBA_NEEDS_CFG_PORT;
4979 phba->pport->fc_myDID = 0;
4980 phba->pport->fc_prevDID = 0;
4983 /* Turn off parity checking and serr during the physical reset */
4984 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
4987 pci_write_config_word(phba->pcidev, PCI_COMMAND,
4989 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4991 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4993 /* Now toggle INITFF bit in the Host Control Register */
4994 writel(HC_INITFF, phba->HCregaddr);
4996 readl(phba->HCregaddr); /* flush */
4997 writel(0, phba->HCregaddr);
4998 readl(phba->HCregaddr); /* flush */
5000 /* Restore PCI cmd register */
5001 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
5003 /* Initialize relevant SLI info */
5004 for (i = 0; i < psli->num_rings; i++) {
5005 pring = &psli->sli3_ring[i];
5007 pring->sli.sli3.rspidx = 0;
5008 pring->sli.sli3.next_cmdidx = 0;
5009 pring->sli.sli3.local_getidx = 0;
5010 pring->sli.sli3.cmdidx = 0;
5011 pring->missbufcnt = 0;
5014 phba->link_state = LPFC_WARM_START;
5019 * lpfc_sli4_brdreset - Reset a sli-4 HBA
5020 * @phba: Pointer to HBA context object.
5022 * This function resets a SLI4 HBA. This function disables PCI layer parity
5023 * checking during resets the device. The caller is not required to hold
5026 * This function returns 0 on success else returns negative error code.
5029 lpfc_sli4_brdreset(struct lpfc_hba *phba)
5031 struct lpfc_sli *psli = &phba->sli;
5036 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5037 "0295 Reset HBA Data: x%x x%x x%x\n",
5038 phba->pport->port_state, psli->sli_flag,
5041 /* perform board reset */
5042 phba->fc_eventTag = 0;
5043 phba->link_events = 0;
5044 phba->pport->fc_myDID = 0;
5045 phba->pport->fc_prevDID = 0;
5046 phba->hba_flag &= ~HBA_SETUP;
5048 spin_lock_irq(&phba->hbalock);
5049 psli->sli_flag &= ~(LPFC_PROCESS_LA);
5050 phba->fcf.fcf_flag = 0;
5051 spin_unlock_irq(&phba->hbalock);
5053 /* Now physically reset the device */
5054 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5055 "0389 Performing PCI function reset!\n");
5057 /* Turn off parity checking and serr during the physical reset */
5058 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
5059 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5060 "3205 PCI read Config failed\n");
5064 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
5065 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
5067 /* Perform FCoE PCI function reset before freeing queue memory */
5068 rc = lpfc_pci_function_reset(phba);
5070 /* Restore PCI cmd register */
5071 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
5077 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
5078 * @phba: Pointer to HBA context object.
5080 * This function is called in the SLI initialization code path to
5081 * restart the HBA. The caller is not required to hold any lock.
5082 * This function writes MBX_RESTART mailbox command to the SLIM and
5083 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
5084 * function to free any pending commands. The function enables
5085 * POST only during the first initialization. The function returns zero.
5086 * The function does not guarantee completion of MBX_RESTART mailbox
5087 * command before the return of this function.
5090 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
5093 struct lpfc_sli *psli;
5094 volatile uint32_t word0;
5095 void __iomem *to_slim;
5096 uint32_t hba_aer_enabled;
5098 spin_lock_irq(&phba->hbalock);
5100 /* Take PCIe device Advanced Error Reporting (AER) state */
5101 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
5106 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5107 "0337 Restart HBA Data: x%x x%x\n",
5108 (phba->pport) ? phba->pport->port_state : 0,
5112 mb = (MAILBOX_t *) &word0;
5113 mb->mbxCommand = MBX_RESTART;
5116 lpfc_reset_barrier(phba);
5118 to_slim = phba->MBslimaddr;
5119 writel(*(uint32_t *) mb, to_slim);
5120 readl(to_slim); /* flush */
5122 /* Only skip post after fc_ffinit is completed */
5123 if (phba->pport && phba->pport->port_state)
5124 word0 = 1; /* This is really setting up word1 */
5126 word0 = 0; /* This is really setting up word1 */
5127 to_slim = phba->MBslimaddr + sizeof (uint32_t);
5128 writel(*(uint32_t *) mb, to_slim);
5129 readl(to_slim); /* flush */
5131 lpfc_sli_brdreset(phba);
5133 phba->pport->stopped = 0;
5134 phba->link_state = LPFC_INIT_START;
5136 spin_unlock_irq(&phba->hbalock);
5138 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
5139 psli->stats_start = ktime_get_seconds();
5141 /* Give the INITFF and Post time to settle. */
5144 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
5145 if (hba_aer_enabled)
5146 pci_disable_pcie_error_reporting(phba->pcidev);
5148 lpfc_hba_down_post(phba);
5154 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
5155 * @phba: Pointer to HBA context object.
5157 * This function is called in the SLI initialization code path to restart
5158 * a SLI4 HBA. The caller is not required to hold any lock.
5159 * At the end of the function, it calls lpfc_hba_down_post function to
5160 * free any pending commands.
5163 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
5165 struct lpfc_sli *psli = &phba->sli;
5166 uint32_t hba_aer_enabled;
5170 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5171 "0296 Restart HBA Data: x%x x%x\n",
5172 phba->pport->port_state, psli->sli_flag);
5174 /* Take PCIe device Advanced Error Reporting (AER) state */
5175 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
5177 rc = lpfc_sli4_brdreset(phba);
5179 phba->link_state = LPFC_HBA_ERROR;
5180 goto hba_down_queue;
5183 spin_lock_irq(&phba->hbalock);
5184 phba->pport->stopped = 0;
5185 phba->link_state = LPFC_INIT_START;
5187 spin_unlock_irq(&phba->hbalock);
5189 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
5190 psli->stats_start = ktime_get_seconds();
5192 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
5193 if (hba_aer_enabled)
5194 pci_disable_pcie_error_reporting(phba->pcidev);
5197 lpfc_hba_down_post(phba);
5198 lpfc_sli4_queue_destroy(phba);
5204 * lpfc_sli_brdrestart - Wrapper func for restarting hba
5205 * @phba: Pointer to HBA context object.
5207 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
5208 * API jump table function pointer from the lpfc_hba struct.
5211 lpfc_sli_brdrestart(struct lpfc_hba *phba)
5213 return phba->lpfc_sli_brdrestart(phba);
5217 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
5218 * @phba: Pointer to HBA context object.
5220 * This function is called after a HBA restart to wait for successful
5221 * restart of the HBA. Successful restart of the HBA is indicated by
5222 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
5223 * iteration, the function will restart the HBA again. The function returns
5224 * zero if HBA successfully restarted else returns negative error code.
5227 lpfc_sli_chipset_init(struct lpfc_hba *phba)
5229 uint32_t status, i = 0;
5231 /* Read the HBA Host Status Register */
5232 if (lpfc_readl(phba->HSregaddr, &status))
5235 /* Check status register to see what current state is */
5237 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
5239 /* Check every 10ms for 10 retries, then every 100ms for 90
5240 * retries, then every 1 sec for 50 retires for a total of
5241 * ~60 seconds before reset the board again and check every
5242 * 1 sec for 50 retries. The up to 60 seconds before the
5243 * board ready is required by the Falcon FIPS zeroization
5244 * complete, and any reset the board in between shall cause
5245 * restart of zeroization, further delay the board ready.
5248 /* Adapter failed to init, timeout, status reg
5250 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5251 "0436 Adapter failed to init, "
5252 "timeout, status reg x%x, "
5253 "FW Data: A8 x%x AC x%x\n", status,
5254 readl(phba->MBslimaddr + 0xa8),
5255 readl(phba->MBslimaddr + 0xac));
5256 phba->link_state = LPFC_HBA_ERROR;
5260 /* Check to see if any errors occurred during init */
5261 if (status & HS_FFERM) {
5262 /* ERROR: During chipset initialization */
5263 /* Adapter failed to init, chipset, status reg
5265 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5266 "0437 Adapter failed to init, "
5267 "chipset, status reg x%x, "
5268 "FW Data: A8 x%x AC x%x\n", status,
5269 readl(phba->MBslimaddr + 0xa8),
5270 readl(phba->MBslimaddr + 0xac));
5271 phba->link_state = LPFC_HBA_ERROR;
5284 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5285 lpfc_sli_brdrestart(phba);
5287 /* Read the HBA Host Status Register */
5288 if (lpfc_readl(phba->HSregaddr, &status))
5292 /* Check to see if any errors occurred during init */
5293 if (status & HS_FFERM) {
5294 /* ERROR: During chipset initialization */
5295 /* Adapter failed to init, chipset, status reg <status> */
5296 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5297 "0438 Adapter failed to init, chipset, "
5299 "FW Data: A8 x%x AC x%x\n", status,
5300 readl(phba->MBslimaddr + 0xa8),
5301 readl(phba->MBslimaddr + 0xac));
5302 phba->link_state = LPFC_HBA_ERROR;
5306 phba->hba_flag |= HBA_NEEDS_CFG_PORT;
5308 /* Clear all interrupt enable conditions */
5309 writel(0, phba->HCregaddr);
5310 readl(phba->HCregaddr); /* flush */
5312 /* setup host attn register */
5313 writel(0xffffffff, phba->HAregaddr);
5314 readl(phba->HAregaddr); /* flush */
5319 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
5321 * This function calculates and returns the number of HBQs required to be
5325 lpfc_sli_hbq_count(void)
5327 return ARRAY_SIZE(lpfc_hbq_defs);
5331 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
5333 * This function adds the number of hbq entries in every HBQ to get
5334 * the total number of hbq entries required for the HBA and returns
5338 lpfc_sli_hbq_entry_count(void)
5340 int hbq_count = lpfc_sli_hbq_count();
5344 for (i = 0; i < hbq_count; ++i)
5345 count += lpfc_hbq_defs[i]->entry_count;
5350 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
5352 * This function calculates amount of memory required for all hbq entries
5353 * to be configured and returns the total memory required.
5356 lpfc_sli_hbq_size(void)
5358 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
5362 * lpfc_sli_hbq_setup - configure and initialize HBQs
5363 * @phba: Pointer to HBA context object.
5365 * This function is called during the SLI initialization to configure
5366 * all the HBQs and post buffers to the HBQ. The caller is not
5367 * required to hold any locks. This function will return zero if successful
5368 * else it will return negative error code.
5371 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
5373 int hbq_count = lpfc_sli_hbq_count();
5377 uint32_t hbq_entry_index;
5379 /* Get a Mailbox buffer to setup mailbox
5380 * commands for HBA initialization
5382 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5389 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
5390 phba->link_state = LPFC_INIT_MBX_CMDS;
5391 phba->hbq_in_use = 1;
5393 hbq_entry_index = 0;
5394 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
5395 phba->hbqs[hbqno].next_hbqPutIdx = 0;
5396 phba->hbqs[hbqno].hbqPutIdx = 0;
5397 phba->hbqs[hbqno].local_hbqGetIdx = 0;
5398 phba->hbqs[hbqno].entry_count =
5399 lpfc_hbq_defs[hbqno]->entry_count;
5400 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
5401 hbq_entry_index, pmb);
5402 hbq_entry_index += phba->hbqs[hbqno].entry_count;
5404 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
5405 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
5406 mbxStatus <status>, ring <num> */
5408 lpfc_printf_log(phba, KERN_ERR,
5409 LOG_SLI | LOG_VPORT,
5410 "1805 Adapter failed to init. "
5411 "Data: x%x x%x x%x\n",
5413 pmbox->mbxStatus, hbqno);
5415 phba->link_state = LPFC_HBA_ERROR;
5416 mempool_free(pmb, phba->mbox_mem_pool);
5420 phba->hbq_count = hbq_count;
5422 mempool_free(pmb, phba->mbox_mem_pool);
5424 /* Initially populate or replenish the HBQs */
5425 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
5426 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
5431 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
5432 * @phba: Pointer to HBA context object.
5434 * This function is called during the SLI initialization to configure
5435 * all the HBQs and post buffers to the HBQ. The caller is not
5436 * required to hold any locks. This function will return zero if successful
5437 * else it will return negative error code.
5440 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
5442 phba->hbq_in_use = 1;
5444 * Specific case when the MDS diagnostics is enabled and supported.
5445 * The receive buffer count is truncated to manage the incoming
5448 if (phba->cfg_enable_mds_diags && phba->mds_diags_support)
5449 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5450 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1;
5452 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5453 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
5454 phba->hbq_count = 1;
5455 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
5456 /* Initially populate or replenish the HBQs */
5461 * lpfc_sli_config_port - Issue config port mailbox command
5462 * @phba: Pointer to HBA context object.
5463 * @sli_mode: sli mode - 2/3
5465 * This function is called by the sli initialization code path
5466 * to issue config_port mailbox command. This function restarts the
5467 * HBA firmware and issues a config_port mailbox command to configure
5468 * the SLI interface in the sli mode specified by sli_mode
5469 * variable. The caller is not required to hold any locks.
5470 * The function returns 0 if successful, else returns negative error
5474 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
5477 uint32_t resetcount = 0, rc = 0, done = 0;
5479 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5481 phba->link_state = LPFC_HBA_ERROR;
5485 phba->sli_rev = sli_mode;
5486 while (resetcount < 2 && !done) {
5487 spin_lock_irq(&phba->hbalock);
5488 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5489 spin_unlock_irq(&phba->hbalock);
5490 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5491 lpfc_sli_brdrestart(phba);
5492 rc = lpfc_sli_chipset_init(phba);
5496 spin_lock_irq(&phba->hbalock);
5497 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5498 spin_unlock_irq(&phba->hbalock);
5501 /* Call pre CONFIG_PORT mailbox command initialization. A
5502 * value of 0 means the call was successful. Any other
5503 * nonzero value is a failure, but if ERESTART is returned,
5504 * the driver may reset the HBA and try again.
5506 rc = lpfc_config_port_prep(phba);
5507 if (rc == -ERESTART) {
5508 phba->link_state = LPFC_LINK_UNKNOWN;
5513 phba->link_state = LPFC_INIT_MBX_CMDS;
5514 lpfc_config_port(phba, pmb);
5515 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5516 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
5517 LPFC_SLI3_HBQ_ENABLED |
5518 LPFC_SLI3_CRP_ENABLED |
5519 LPFC_SLI3_DSS_ENABLED);
5520 if (rc != MBX_SUCCESS) {
5521 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5522 "0442 Adapter failed to init, mbxCmd x%x "
5523 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
5524 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
5525 spin_lock_irq(&phba->hbalock);
5526 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
5527 spin_unlock_irq(&phba->hbalock);
5530 /* Allow asynchronous mailbox command to go through */
5531 spin_lock_irq(&phba->hbalock);
5532 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5533 spin_unlock_irq(&phba->hbalock);
5536 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5537 (pmb->u.mb.un.varCfgPort.gasabt == 0))
5538 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5539 "3110 Port did not grant ASABT\n");
5544 goto do_prep_failed;
5546 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5547 if (!pmb->u.mb.un.varCfgPort.cMA) {
5549 goto do_prep_failed;
5551 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
5552 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
5553 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5554 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5555 phba->max_vpi : phba->max_vports;
5559 if (pmb->u.mb.un.varCfgPort.gerbm)
5560 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
5561 if (pmb->u.mb.un.varCfgPort.gcrp)
5562 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
5564 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5565 phba->port_gp = phba->mbox->us.s3_pgp.port;
5567 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5568 if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5569 phba->cfg_enable_bg = 0;
5570 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5571 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5572 "0443 Adapter did not grant "
5577 phba->hbq_get = NULL;
5578 phba->port_gp = phba->mbox->us.s2.port;
5582 mempool_free(pmb, phba->mbox_mem_pool);
5588 * lpfc_sli_hba_setup - SLI initialization function
5589 * @phba: Pointer to HBA context object.
5591 * This function is the main SLI initialization function. This function
5592 * is called by the HBA initialization code, HBA reset code and HBA
5593 * error attention handler code. Caller is not required to hold any
5594 * locks. This function issues config_port mailbox command to configure
5595 * the SLI, setup iocb rings and HBQ rings. In the end the function
5596 * calls the config_port_post function to issue init_link mailbox
5597 * command and to start the discovery. The function will return zero
5598 * if successful, else it will return negative error code.
5601 lpfc_sli_hba_setup(struct lpfc_hba *phba)
5607 /* Enable ISR already does config_port because of config_msi mbx */
5608 if (phba->hba_flag & HBA_NEEDS_CFG_PORT) {
5609 rc = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
5612 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
5614 phba->fcp_embed_io = 0; /* SLI4 FC support only */
5616 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
5617 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5618 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5620 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5621 "2709 This device supports "
5622 "Advanced Error Reporting (AER)\n");
5623 spin_lock_irq(&phba->hbalock);
5624 phba->hba_flag |= HBA_AER_ENABLED;
5625 spin_unlock_irq(&phba->hbalock);
5627 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5628 "2708 This device does not support "
5629 "Advanced Error Reporting (AER): %d\n",
5631 phba->cfg_aer_support = 0;
5635 if (phba->sli_rev == 3) {
5636 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5637 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5639 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5640 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5641 phba->sli3_options = 0;
5644 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5645 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5646 phba->sli_rev, phba->max_vpi);
5647 rc = lpfc_sli_ring_map(phba);
5650 goto lpfc_sli_hba_setup_error;
5652 /* Initialize VPIs. */
5653 if (phba->sli_rev == LPFC_SLI_REV3) {
5655 * The VPI bitmask and physical ID array are allocated
5656 * and initialized once only - at driver load. A port
5657 * reset doesn't need to reinitialize this memory.
5659 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5660 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5661 phba->vpi_bmask = kcalloc(longs,
5662 sizeof(unsigned long),
5664 if (!phba->vpi_bmask) {
5666 goto lpfc_sli_hba_setup_error;
5669 phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5672 if (!phba->vpi_ids) {
5673 kfree(phba->vpi_bmask);
5675 goto lpfc_sli_hba_setup_error;
5677 for (i = 0; i < phba->max_vpi; i++)
5678 phba->vpi_ids[i] = i;
5683 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5684 rc = lpfc_sli_hbq_setup(phba);
5686 goto lpfc_sli_hba_setup_error;
5688 spin_lock_irq(&phba->hbalock);
5689 phba->sli.sli_flag |= LPFC_PROCESS_LA;
5690 spin_unlock_irq(&phba->hbalock);
5692 rc = lpfc_config_port_post(phba);
5694 goto lpfc_sli_hba_setup_error;
5698 lpfc_sli_hba_setup_error:
5699 phba->link_state = LPFC_HBA_ERROR;
5700 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5701 "0445 Firmware initialization failed\n");
5706 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5707 * @phba: Pointer to HBA context object.
5709 * This function issue a dump mailbox command to read config region
5710 * 23 and parse the records in the region and populate driver
5714 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5716 LPFC_MBOXQ_t *mboxq;
5717 struct lpfc_dmabuf *mp;
5718 struct lpfc_mqe *mqe;
5719 uint32_t data_length;
5722 /* Program the default value of vlan_id and fc_map */
5723 phba->valid_vlan = 0;
5724 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5725 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5726 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5728 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5732 mqe = &mboxq->u.mqe;
5733 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5735 goto out_free_mboxq;
5738 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
5739 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5741 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5742 "(%d):2571 Mailbox cmd x%x Status x%x "
5743 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5744 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5745 "CQ: x%x x%x x%x x%x\n",
5746 mboxq->vport ? mboxq->vport->vpi : 0,
5747 bf_get(lpfc_mqe_command, mqe),
5748 bf_get(lpfc_mqe_status, mqe),
5749 mqe->un.mb_words[0], mqe->un.mb_words[1],
5750 mqe->un.mb_words[2], mqe->un.mb_words[3],
5751 mqe->un.mb_words[4], mqe->un.mb_words[5],
5752 mqe->un.mb_words[6], mqe->un.mb_words[7],
5753 mqe->un.mb_words[8], mqe->un.mb_words[9],
5754 mqe->un.mb_words[10], mqe->un.mb_words[11],
5755 mqe->un.mb_words[12], mqe->un.mb_words[13],
5756 mqe->un.mb_words[14], mqe->un.mb_words[15],
5757 mqe->un.mb_words[16], mqe->un.mb_words[50],
5759 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5760 mboxq->mcqe.trailer);
5763 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5766 goto out_free_mboxq;
5768 data_length = mqe->un.mb_words[5];
5769 if (data_length > DMP_RGN23_SIZE) {
5770 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5773 goto out_free_mboxq;
5776 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5777 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5782 mempool_free(mboxq, phba->mbox_mem_pool);
5787 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5788 * @phba: pointer to lpfc hba data structure.
5789 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5790 * @vpd: pointer to the memory to hold resulting port vpd data.
5791 * @vpd_size: On input, the number of bytes allocated to @vpd.
5792 * On output, the number of data bytes in @vpd.
5794 * This routine executes a READ_REV SLI4 mailbox command. In
5795 * addition, this routine gets the port vpd data.
5799 * -ENOMEM - could not allocated memory.
5802 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5803 uint8_t *vpd, uint32_t *vpd_size)
5807 struct lpfc_dmabuf *dmabuf;
5808 struct lpfc_mqe *mqe;
5810 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5815 * Get a DMA buffer for the vpd data resulting from the READ_REV
5818 dma_size = *vpd_size;
5819 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5820 &dmabuf->phys, GFP_KERNEL);
5821 if (!dmabuf->virt) {
5827 * The SLI4 implementation of READ_REV conflicts at word1,
5828 * bits 31:16 and SLI4 adds vpd functionality not present
5829 * in SLI3. This code corrects the conflicts.
5831 lpfc_read_rev(phba, mboxq);
5832 mqe = &mboxq->u.mqe;
5833 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5834 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5835 mqe->un.read_rev.word1 &= 0x0000FFFF;
5836 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5837 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5839 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5841 dma_free_coherent(&phba->pcidev->dev, dma_size,
5842 dmabuf->virt, dmabuf->phys);
5848 * The available vpd length cannot be bigger than the
5849 * DMA buffer passed to the port. Catch the less than
5850 * case and update the caller's size.
5852 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5853 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5855 memcpy(vpd, dmabuf->virt, *vpd_size);
5857 dma_free_coherent(&phba->pcidev->dev, dma_size,
5858 dmabuf->virt, dmabuf->phys);
5864 * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
5865 * @phba: pointer to lpfc hba data structure.
5867 * This routine retrieves SLI4 device physical port name this PCI function
5872 * otherwise - failed to retrieve controller attributes
5875 lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
5877 LPFC_MBOXQ_t *mboxq;
5878 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5879 struct lpfc_controller_attribute *cntl_attr;
5880 void *virtaddr = NULL;
5881 uint32_t alloclen, reqlen;
5882 uint32_t shdr_status, shdr_add_status;
5883 union lpfc_sli4_cfg_shdr *shdr;
5886 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5890 /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
5891 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5892 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5893 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5894 LPFC_SLI4_MBX_NEMBED);
5896 if (alloclen < reqlen) {
5897 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5898 "3084 Allocated DMA memory size (%d) is "
5899 "less than the requested DMA memory size "
5900 "(%d)\n", alloclen, reqlen);
5902 goto out_free_mboxq;
5904 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5905 virtaddr = mboxq->sge_array->addr[0];
5906 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5907 shdr = &mbx_cntl_attr->cfg_shdr;
5908 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5909 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5910 if (shdr_status || shdr_add_status || rc) {
5911 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5912 "3085 Mailbox x%x (x%x/x%x) failed, "
5913 "rc:x%x, status:x%x, add_status:x%x\n",
5914 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5915 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5916 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5917 rc, shdr_status, shdr_add_status);
5919 goto out_free_mboxq;
5922 cntl_attr = &mbx_cntl_attr->cntl_attr;
5923 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5924 phba->sli4_hba.lnk_info.lnk_tp =
5925 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5926 phba->sli4_hba.lnk_info.lnk_no =
5927 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5928 phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr);
5929 phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr);
5931 memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
5932 strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
5933 sizeof(phba->BIOSVersion));
5935 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5936 "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, "
5937 "flash_id: x%02x, asic_rev: x%02x\n",
5938 phba->sli4_hba.lnk_info.lnk_tp,
5939 phba->sli4_hba.lnk_info.lnk_no,
5940 phba->BIOSVersion, phba->sli4_hba.flash_id,
5941 phba->sli4_hba.asic_rev);
5943 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5944 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5946 mempool_free(mboxq, phba->mbox_mem_pool);
5951 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5952 * @phba: pointer to lpfc hba data structure.
5954 * This routine retrieves SLI4 device physical port name this PCI function
5959 * otherwise - failed to retrieve physical port name
5962 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5964 LPFC_MBOXQ_t *mboxq;
5965 struct lpfc_mbx_get_port_name *get_port_name;
5966 uint32_t shdr_status, shdr_add_status;
5967 union lpfc_sli4_cfg_shdr *shdr;
5968 char cport_name = 0;
5971 /* We assume nothing at this point */
5972 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5973 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5975 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5978 /* obtain link type and link number via READ_CONFIG */
5979 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5980 lpfc_sli4_read_config(phba);
5981 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5982 goto retrieve_ppname;
5984 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5985 rc = lpfc_sli4_get_ctl_attr(phba);
5987 goto out_free_mboxq;
5990 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5991 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5992 sizeof(struct lpfc_mbx_get_port_name) -
5993 sizeof(struct lpfc_sli4_cfg_mhdr),
5994 LPFC_SLI4_MBX_EMBED);
5995 get_port_name = &mboxq->u.mqe.un.get_port_name;
5996 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5997 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5998 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5999 phba->sli4_hba.lnk_info.lnk_tp);
6000 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6001 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6002 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6003 if (shdr_status || shdr_add_status || rc) {
6004 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6005 "3087 Mailbox x%x (x%x/x%x) failed: "
6006 "rc:x%x, status:x%x, add_status:x%x\n",
6007 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
6008 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
6009 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
6010 rc, shdr_status, shdr_add_status);
6012 goto out_free_mboxq;
6014 switch (phba->sli4_hba.lnk_info.lnk_no) {
6015 case LPFC_LINK_NUMBER_0:
6016 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
6017 &get_port_name->u.response);
6018 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6020 case LPFC_LINK_NUMBER_1:
6021 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
6022 &get_port_name->u.response);
6023 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6025 case LPFC_LINK_NUMBER_2:
6026 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
6027 &get_port_name->u.response);
6028 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6030 case LPFC_LINK_NUMBER_3:
6031 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
6032 &get_port_name->u.response);
6033 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6039 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
6040 phba->Port[0] = cport_name;
6041 phba->Port[1] = '\0';
6042 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6043 "3091 SLI get port name: %s\n", phba->Port);
6047 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
6048 lpfc_sli4_mbox_cmd_free(phba, mboxq);
6050 mempool_free(mboxq, phba->mbox_mem_pool);
6055 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
6056 * @phba: pointer to lpfc hba data structure.
6058 * This routine is called to explicitly arm the SLI4 device's completion and
6062 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
6065 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
6066 struct lpfc_sli4_hdw_queue *qp;
6067 struct lpfc_queue *eq;
6069 sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
6070 sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
6071 if (sli4_hba->nvmels_cq)
6072 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
6075 if (sli4_hba->hdwq) {
6076 /* Loop thru all Hardware Queues */
6077 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
6078 qp = &sli4_hba->hdwq[qidx];
6079 /* ARM the corresponding CQ */
6080 sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
6084 /* Loop thru all IRQ vectors */
6085 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
6086 eq = sli4_hba->hba_eq_hdl[qidx].eq;
6087 /* ARM the corresponding EQ */
6088 sli4_hba->sli4_write_eq_db(phba, eq,
6089 0, LPFC_QUEUE_REARM);
6093 if (phba->nvmet_support) {
6094 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
6095 sli4_hba->sli4_write_cq_db(phba,
6096 sli4_hba->nvmet_cqset[qidx], 0,
6103 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
6104 * @phba: Pointer to HBA context object.
6105 * @type: The resource extent type.
6106 * @extnt_count: buffer to hold port available extent count.
6107 * @extnt_size: buffer to hold element count per extent.
6109 * This function calls the port and retrievs the number of available
6110 * extents and their size for a particular extent type.
6112 * Returns: 0 if successful. Nonzero otherwise.
6115 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
6116 uint16_t *extnt_count, uint16_t *extnt_size)
6121 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
6124 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6128 /* Find out how many extents are available for this resource type */
6129 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
6130 sizeof(struct lpfc_sli4_cfg_mhdr));
6131 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6132 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
6133 length, LPFC_SLI4_MBX_EMBED);
6135 /* Send an extents count of 0 - the GET doesn't use it. */
6136 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6137 LPFC_SLI4_MBX_EMBED);
6143 if (!phba->sli4_hba.intr_enable)
6144 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6146 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6147 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6154 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
6155 if (bf_get(lpfc_mbox_hdr_status,
6156 &rsrc_info->header.cfg_shdr.response)) {
6157 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6158 "2930 Failed to get resource extents "
6159 "Status 0x%x Add'l Status 0x%x\n",
6160 bf_get(lpfc_mbox_hdr_status,
6161 &rsrc_info->header.cfg_shdr.response),
6162 bf_get(lpfc_mbox_hdr_add_status,
6163 &rsrc_info->header.cfg_shdr.response));
6168 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
6170 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
6173 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6174 "3162 Retrieved extents type-%d from port: count:%d, "
6175 "size:%d\n", type, *extnt_count, *extnt_size);
6178 mempool_free(mbox, phba->mbox_mem_pool);
6183 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
6184 * @phba: Pointer to HBA context object.
6185 * @type: The extent type to check.
6187 * This function reads the current available extents from the port and checks
6188 * if the extent count or extent size has changed since the last access.
6189 * Callers use this routine post port reset to understand if there is a
6190 * extent reprovisioning requirement.
6193 * -Error: error indicates problem.
6194 * 1: Extent count or size has changed.
6198 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
6200 uint16_t curr_ext_cnt, rsrc_ext_cnt;
6201 uint16_t size_diff, rsrc_ext_size;
6203 struct lpfc_rsrc_blks *rsrc_entry;
6204 struct list_head *rsrc_blk_list = NULL;
6208 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
6215 case LPFC_RSC_TYPE_FCOE_RPI:
6216 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6218 case LPFC_RSC_TYPE_FCOE_VPI:
6219 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
6221 case LPFC_RSC_TYPE_FCOE_XRI:
6222 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6224 case LPFC_RSC_TYPE_FCOE_VFI:
6225 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6231 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
6233 if (rsrc_entry->rsrc_size != rsrc_ext_size)
6237 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
6244 * lpfc_sli4_cfg_post_extnts -
6245 * @phba: Pointer to HBA context object.
6246 * @extnt_cnt: number of available extents.
6247 * @type: the extent type (rpi, xri, vfi, vpi).
6248 * @emb: buffer to hold either MBX_EMBED or MBX_NEMBED operation.
6249 * @mbox: pointer to the caller's allocated mailbox structure.
6251 * This function executes the extents allocation request. It also
6252 * takes care of the amount of memory needed to allocate or get the
6253 * allocated extents. It is the caller's responsibility to evaluate
6257 * -Error: Error value describes the condition found.
6261 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
6262 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
6267 uint32_t alloc_len, mbox_tmo;
6269 /* Calculate the total requested length of the dma memory */
6270 req_len = extnt_cnt * sizeof(uint16_t);
6273 * Calculate the size of an embedded mailbox. The uint32_t
6274 * accounts for extents-specific word.
6276 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6280 * Presume the allocation and response will fit into an embedded
6281 * mailbox. If not true, reconfigure to a non-embedded mailbox.
6283 *emb = LPFC_SLI4_MBX_EMBED;
6284 if (req_len > emb_len) {
6285 req_len = extnt_cnt * sizeof(uint16_t) +
6286 sizeof(union lpfc_sli4_cfg_shdr) +
6288 *emb = LPFC_SLI4_MBX_NEMBED;
6291 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6292 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
6294 if (alloc_len < req_len) {
6295 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6296 "2982 Allocated DMA memory size (x%x) is "
6297 "less than the requested DMA memory "
6298 "size (x%x)\n", alloc_len, req_len);
6301 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
6305 if (!phba->sli4_hba.intr_enable)
6306 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6308 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6309 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6318 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
6319 * @phba: Pointer to HBA context object.
6320 * @type: The resource extent type to allocate.
6322 * This function allocates the number of elements for the specified
6326 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
6329 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
6330 uint16_t rsrc_id, rsrc_start, j, k;
6333 unsigned long longs;
6334 unsigned long *bmask;
6335 struct lpfc_rsrc_blks *rsrc_blks;
6338 struct lpfc_id_range *id_array = NULL;
6339 void *virtaddr = NULL;
6340 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6341 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6342 struct list_head *ext_blk_list;
6344 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
6350 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
6351 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6352 "3009 No available Resource Extents "
6353 "for resource type 0x%x: Count: 0x%x, "
6354 "Size 0x%x\n", type, rsrc_cnt,
6359 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
6360 "2903 Post resource extents type-0x%x: "
6361 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
6363 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6367 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
6374 * Figure out where the response is located. Then get local pointers
6375 * to the response data. The port does not guarantee to respond to
6376 * all extents counts request so update the local variable with the
6377 * allocated count from the port.
6379 if (emb == LPFC_SLI4_MBX_EMBED) {
6380 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6381 id_array = &rsrc_ext->u.rsp.id[0];
6382 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6384 virtaddr = mbox->sge_array->addr[0];
6385 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6386 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6387 id_array = &n_rsrc->id;
6390 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
6391 rsrc_id_cnt = rsrc_cnt * rsrc_size;
6394 * Based on the resource size and count, correct the base and max
6397 length = sizeof(struct lpfc_rsrc_blks);
6399 case LPFC_RSC_TYPE_FCOE_RPI:
6400 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6401 sizeof(unsigned long),
6403 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6407 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
6410 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6411 kfree(phba->sli4_hba.rpi_bmask);
6417 * The next_rpi was initialized with the maximum available
6418 * count but the port may allocate a smaller number. Catch
6419 * that case and update the next_rpi.
6421 phba->sli4_hba.next_rpi = rsrc_id_cnt;
6423 /* Initialize local ptrs for common extent processing later. */
6424 bmask = phba->sli4_hba.rpi_bmask;
6425 ids = phba->sli4_hba.rpi_ids;
6426 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6428 case LPFC_RSC_TYPE_FCOE_VPI:
6429 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6431 if (unlikely(!phba->vpi_bmask)) {
6435 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
6437 if (unlikely(!phba->vpi_ids)) {
6438 kfree(phba->vpi_bmask);
6443 /* Initialize local ptrs for common extent processing later. */
6444 bmask = phba->vpi_bmask;
6445 ids = phba->vpi_ids;
6446 ext_blk_list = &phba->lpfc_vpi_blk_list;
6448 case LPFC_RSC_TYPE_FCOE_XRI:
6449 phba->sli4_hba.xri_bmask = kcalloc(longs,
6450 sizeof(unsigned long),
6452 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6456 phba->sli4_hba.max_cfg_param.xri_used = 0;
6457 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
6460 if (unlikely(!phba->sli4_hba.xri_ids)) {
6461 kfree(phba->sli4_hba.xri_bmask);
6466 /* Initialize local ptrs for common extent processing later. */
6467 bmask = phba->sli4_hba.xri_bmask;
6468 ids = phba->sli4_hba.xri_ids;
6469 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6471 case LPFC_RSC_TYPE_FCOE_VFI:
6472 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6473 sizeof(unsigned long),
6475 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6479 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
6482 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6483 kfree(phba->sli4_hba.vfi_bmask);
6488 /* Initialize local ptrs for common extent processing later. */
6489 bmask = phba->sli4_hba.vfi_bmask;
6490 ids = phba->sli4_hba.vfi_ids;
6491 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6494 /* Unsupported Opcode. Fail call. */
6498 ext_blk_list = NULL;
6503 * Complete initializing the extent configuration with the
6504 * allocated ids assigned to this function. The bitmask serves
6505 * as an index into the array and manages the available ids. The
6506 * array just stores the ids communicated to the port via the wqes.
6508 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6510 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6513 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6516 rsrc_blks = kzalloc(length, GFP_KERNEL);
6517 if (unlikely(!rsrc_blks)) {
6523 rsrc_blks->rsrc_start = rsrc_id;
6524 rsrc_blks->rsrc_size = rsrc_size;
6525 list_add_tail(&rsrc_blks->list, ext_blk_list);
6526 rsrc_start = rsrc_id;
6527 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
6528 phba->sli4_hba.io_xri_start = rsrc_start +
6529 lpfc_sli4_get_iocb_cnt(phba);
6532 while (rsrc_id < (rsrc_start + rsrc_size)) {
6537 /* Entire word processed. Get next word.*/
6542 lpfc_sli4_mbox_cmd_free(phba, mbox);
6549 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6550 * @phba: Pointer to HBA context object.
6551 * @type: the extent's type.
6553 * This function deallocates all extents of a particular resource type.
6554 * SLI4 does not allow for deallocating a particular extent range. It
6555 * is the caller's responsibility to release all kernel memory resources.
6558 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6561 uint32_t length, mbox_tmo = 0;
6563 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6564 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6566 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6571 * This function sends an embedded mailbox because it only sends the
6572 * the resource type. All extents of this type are released by the
6575 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6576 sizeof(struct lpfc_sli4_cfg_mhdr));
6577 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6578 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6579 length, LPFC_SLI4_MBX_EMBED);
6581 /* Send an extents count of 0 - the dealloc doesn't use it. */
6582 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6583 LPFC_SLI4_MBX_EMBED);
6588 if (!phba->sli4_hba.intr_enable)
6589 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6591 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6592 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6599 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6600 if (bf_get(lpfc_mbox_hdr_status,
6601 &dealloc_rsrc->header.cfg_shdr.response)) {
6602 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6603 "2919 Failed to release resource extents "
6604 "for type %d - Status 0x%x Add'l Status 0x%x. "
6605 "Resource memory not released.\n",
6607 bf_get(lpfc_mbox_hdr_status,
6608 &dealloc_rsrc->header.cfg_shdr.response),
6609 bf_get(lpfc_mbox_hdr_add_status,
6610 &dealloc_rsrc->header.cfg_shdr.response));
6615 /* Release kernel memory resources for the specific type. */
6617 case LPFC_RSC_TYPE_FCOE_VPI:
6618 kfree(phba->vpi_bmask);
6619 kfree(phba->vpi_ids);
6620 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6621 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6622 &phba->lpfc_vpi_blk_list, list) {
6623 list_del_init(&rsrc_blk->list);
6626 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6628 case LPFC_RSC_TYPE_FCOE_XRI:
6629 kfree(phba->sli4_hba.xri_bmask);
6630 kfree(phba->sli4_hba.xri_ids);
6631 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6632 &phba->sli4_hba.lpfc_xri_blk_list, list) {
6633 list_del_init(&rsrc_blk->list);
6637 case LPFC_RSC_TYPE_FCOE_VFI:
6638 kfree(phba->sli4_hba.vfi_bmask);
6639 kfree(phba->sli4_hba.vfi_ids);
6640 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6641 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6642 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6643 list_del_init(&rsrc_blk->list);
6647 case LPFC_RSC_TYPE_FCOE_RPI:
6648 /* RPI bitmask and physical id array are cleaned up earlier. */
6649 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6650 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6651 list_del_init(&rsrc_blk->list);
6659 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6662 mempool_free(mbox, phba->mbox_mem_pool);
6667 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6673 len = sizeof(struct lpfc_mbx_set_feature) -
6674 sizeof(struct lpfc_sli4_cfg_mhdr);
6675 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6676 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6677 LPFC_SLI4_MBX_EMBED);
6680 case LPFC_SET_UE_RECOVERY:
6681 bf_set(lpfc_mbx_set_feature_UER,
6682 &mbox->u.mqe.un.set_feature, 1);
6683 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6684 mbox->u.mqe.un.set_feature.param_len = 8;
6686 case LPFC_SET_MDS_DIAGS:
6687 bf_set(lpfc_mbx_set_feature_mds,
6688 &mbox->u.mqe.un.set_feature, 1);
6689 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6690 &mbox->u.mqe.un.set_feature, 1);
6691 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6692 mbox->u.mqe.un.set_feature.param_len = 8;
6694 case LPFC_SET_CGN_SIGNAL:
6695 if (phba->cmf_active_mode == LPFC_CFG_OFF)
6698 sig_freq = phba->cgn_sig_freq;
6700 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6701 bf_set(lpfc_mbx_set_feature_CGN_alarm_freq,
6702 &mbox->u.mqe.un.set_feature, sig_freq);
6703 bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
6704 &mbox->u.mqe.un.set_feature, sig_freq);
6707 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY)
6708 bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
6709 &mbox->u.mqe.un.set_feature, sig_freq);
6711 if (phba->cmf_active_mode == LPFC_CFG_OFF ||
6712 phba->cgn_reg_signal == EDC_CG_SIG_NOTSUPPORTED)
6715 sig_freq = lpfc_acqe_cgn_frequency;
6717 bf_set(lpfc_mbx_set_feature_CGN_acqe_freq,
6718 &mbox->u.mqe.un.set_feature, sig_freq);
6720 mbox->u.mqe.un.set_feature.feature = LPFC_SET_CGN_SIGNAL;
6721 mbox->u.mqe.un.set_feature.param_len = 12;
6723 case LPFC_SET_DUAL_DUMP:
6724 bf_set(lpfc_mbx_set_feature_dd,
6725 &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
6726 bf_set(lpfc_mbx_set_feature_ddquery,
6727 &mbox->u.mqe.un.set_feature, 0);
6728 mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
6729 mbox->u.mqe.un.set_feature.param_len = 4;
6731 case LPFC_SET_ENABLE_MI:
6732 mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_MI;
6733 mbox->u.mqe.un.set_feature.param_len = 4;
6734 bf_set(lpfc_mbx_set_feature_milunq, &mbox->u.mqe.un.set_feature,
6735 phba->pport->cfg_lun_queue_depth);
6736 bf_set(lpfc_mbx_set_feature_mi, &mbox->u.mqe.un.set_feature,
6737 phba->sli4_hba.pc_sli4_params.mi_ver);
6739 case LPFC_SET_ENABLE_CMF:
6740 bf_set(lpfc_mbx_set_feature_dd, &mbox->u.mqe.un.set_feature, 1);
6741 mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_CMF;
6742 mbox->u.mqe.un.set_feature.param_len = 4;
6743 bf_set(lpfc_mbx_set_feature_cmf,
6744 &mbox->u.mqe.un.set_feature, 1);
6751 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6752 * @phba: Pointer to HBA context object.
6754 * Disable FW logging into host memory on the adapter. To
6755 * be done before reading logs from the host memory.
6758 lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6760 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6762 spin_lock_irq(&phba->hbalock);
6763 ras_fwlog->state = INACTIVE;
6764 spin_unlock_irq(&phba->hbalock);
6766 /* Disable FW logging to host memory */
6767 writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6768 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6770 /* Wait 10ms for firmware to stop using DMA buffer */
6771 usleep_range(10 * 1000, 20 * 1000);
6775 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6776 * @phba: Pointer to HBA context object.
6778 * This function is called to free memory allocated for RAS FW logging
6779 * support in the driver.
6782 lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6784 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6785 struct lpfc_dmabuf *dmabuf, *next;
6787 if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6788 list_for_each_entry_safe(dmabuf, next,
6789 &ras_fwlog->fwlog_buff_list,
6791 list_del(&dmabuf->list);
6792 dma_free_coherent(&phba->pcidev->dev,
6793 LPFC_RAS_MAX_ENTRY_SIZE,
6794 dmabuf->virt, dmabuf->phys);
6799 if (ras_fwlog->lwpd.virt) {
6800 dma_free_coherent(&phba->pcidev->dev,
6801 sizeof(uint32_t) * 2,
6802 ras_fwlog->lwpd.virt,
6803 ras_fwlog->lwpd.phys);
6804 ras_fwlog->lwpd.virt = NULL;
6807 spin_lock_irq(&phba->hbalock);
6808 ras_fwlog->state = INACTIVE;
6809 spin_unlock_irq(&phba->hbalock);
6813 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6814 * @phba: Pointer to HBA context object.
6815 * @fwlog_buff_count: Count of buffers to be created.
6817 * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6818 * to update FW log is posted to the adapter.
6819 * Buffer count is calculated based on module param ras_fwlog_buffsize
6820 * Size of each buffer posted to FW is 64K.
6824 lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6825 uint32_t fwlog_buff_count)
6827 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6828 struct lpfc_dmabuf *dmabuf;
6831 /* Initialize List */
6832 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6834 /* Allocate memory for the LWPD */
6835 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6836 sizeof(uint32_t) * 2,
6837 &ras_fwlog->lwpd.phys,
6839 if (!ras_fwlog->lwpd.virt) {
6840 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6841 "6185 LWPD Memory Alloc Failed\n");
6846 ras_fwlog->fw_buffcount = fwlog_buff_count;
6847 for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6848 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6852 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6853 "6186 Memory Alloc failed FW logging");
6857 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6858 LPFC_RAS_MAX_ENTRY_SIZE,
6859 &dmabuf->phys, GFP_KERNEL);
6860 if (!dmabuf->virt) {
6863 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6864 "6187 DMA Alloc Failed FW logging");
6867 dmabuf->buffer_tag = i;
6868 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6873 lpfc_sli4_ras_dma_free(phba);
6879 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6880 * @phba: pointer to lpfc hba data structure.
6881 * @pmb: pointer to the driver internal queue element for mailbox command.
6883 * Completion handler for driver's RAS MBX command to the device.
6886 lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6889 union lpfc_sli4_cfg_shdr *shdr;
6890 uint32_t shdr_status, shdr_add_status;
6891 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6895 shdr = (union lpfc_sli4_cfg_shdr *)
6896 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6897 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6898 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6900 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
6901 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6902 "6188 FW LOG mailbox "
6903 "completed with status x%x add_status x%x,"
6904 " mbx status x%x\n",
6905 shdr_status, shdr_add_status, mb->mbxStatus);
6907 ras_fwlog->ras_hwsupport = false;
6911 spin_lock_irq(&phba->hbalock);
6912 ras_fwlog->state = ACTIVE;
6913 spin_unlock_irq(&phba->hbalock);
6914 mempool_free(pmb, phba->mbox_mem_pool);
6919 /* Free RAS DMA memory */
6920 lpfc_sli4_ras_dma_free(phba);
6921 mempool_free(pmb, phba->mbox_mem_pool);
6925 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
6926 * @phba: pointer to lpfc hba data structure.
6927 * @fwlog_level: Logging verbosity level.
6928 * @fwlog_enable: Enable/Disable logging.
6930 * Initialize memory and post mailbox command to enable FW logging in host
6934 lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
6935 uint32_t fwlog_level,
6936 uint32_t fwlog_enable)
6938 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6939 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
6940 struct lpfc_dmabuf *dmabuf;
6942 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
6945 spin_lock_irq(&phba->hbalock);
6946 ras_fwlog->state = INACTIVE;
6947 spin_unlock_irq(&phba->hbalock);
6949 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
6950 phba->cfg_ras_fwlog_buffsize);
6951 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
6954 * If re-enabling FW logging support use earlier allocated
6955 * DMA buffers while posting MBX command.
6957 if (!ras_fwlog->lwpd.virt) {
6958 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
6960 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6961 "6189 FW Log Memory Allocation Failed");
6966 /* Setup Mailbox command */
6967 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6969 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6970 "6190 RAS MBX Alloc Failed");
6975 ras_fwlog->fw_loglevel = fwlog_level;
6976 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
6977 sizeof(struct lpfc_sli4_cfg_mhdr));
6979 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
6980 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
6981 len, LPFC_SLI4_MBX_EMBED);
6983 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
6984 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
6986 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
6987 ras_fwlog->fw_loglevel);
6988 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
6989 ras_fwlog->fw_buffcount);
6990 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
6991 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
6993 /* Update DMA buffer address */
6994 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
6995 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
6997 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
6998 putPaddrLow(dmabuf->phys);
7000 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
7001 putPaddrHigh(dmabuf->phys);
7004 /* Update LPWD address */
7005 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
7006 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
7008 spin_lock_irq(&phba->hbalock);
7009 ras_fwlog->state = REG_INPROGRESS;
7010 spin_unlock_irq(&phba->hbalock);
7011 mbox->vport = phba->pport;
7012 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
7014 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
7016 if (rc == MBX_NOT_FINISHED) {
7017 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7018 "6191 FW-Log Mailbox failed. "
7019 "status %d mbxStatus : x%x", rc,
7020 bf_get(lpfc_mqe_status, &mbox->u.mqe));
7021 mempool_free(mbox, phba->mbox_mem_pool);
7028 lpfc_sli4_ras_dma_free(phba);
7034 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
7035 * @phba: Pointer to HBA context object.
7037 * Check if RAS is supported on the adapter and initialize it.
7040 lpfc_sli4_ras_setup(struct lpfc_hba *phba)
7042 /* Check RAS FW Log needs to be enabled or not */
7043 if (lpfc_check_fwlog_support(phba))
7046 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
7047 LPFC_RAS_ENABLE_LOGGING);
7051 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
7052 * @phba: Pointer to HBA context object.
7054 * This function allocates all SLI4 resource identifiers.
7057 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
7059 int i, rc, error = 0;
7060 uint16_t count, base;
7061 unsigned long longs;
7063 if (!phba->sli4_hba.rpi_hdrs_in_use)
7064 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
7065 if (phba->sli4_hba.extents_in_use) {
7067 * The port supports resource extents. The XRI, VPI, VFI, RPI
7068 * resource extent count must be read and allocated before
7069 * provisioning the resource id arrays.
7071 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
7072 LPFC_IDX_RSRC_RDY) {
7074 * Extent-based resources are set - the driver could
7075 * be in a port reset. Figure out if any corrective
7076 * actions need to be taken.
7078 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7079 LPFC_RSC_TYPE_FCOE_VFI);
7082 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7083 LPFC_RSC_TYPE_FCOE_VPI);
7086 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7087 LPFC_RSC_TYPE_FCOE_XRI);
7090 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7091 LPFC_RSC_TYPE_FCOE_RPI);
7096 * It's possible that the number of resources
7097 * provided to this port instance changed between
7098 * resets. Detect this condition and reallocate
7099 * resources. Otherwise, there is no action.
7102 lpfc_printf_log(phba, KERN_INFO,
7103 LOG_MBOX | LOG_INIT,
7104 "2931 Detected extent resource "
7105 "change. Reallocating all "
7107 rc = lpfc_sli4_dealloc_extent(phba,
7108 LPFC_RSC_TYPE_FCOE_VFI);
7109 rc = lpfc_sli4_dealloc_extent(phba,
7110 LPFC_RSC_TYPE_FCOE_VPI);
7111 rc = lpfc_sli4_dealloc_extent(phba,
7112 LPFC_RSC_TYPE_FCOE_XRI);
7113 rc = lpfc_sli4_dealloc_extent(phba,
7114 LPFC_RSC_TYPE_FCOE_RPI);
7119 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
7123 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
7127 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
7131 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
7134 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
7139 * The port does not support resource extents. The XRI, VPI,
7140 * VFI, RPI resource ids were determined from READ_CONFIG.
7141 * Just allocate the bitmasks and provision the resource id
7142 * arrays. If a port reset is active, the resources don't
7143 * need any action - just exit.
7145 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
7146 LPFC_IDX_RSRC_RDY) {
7147 lpfc_sli4_dealloc_resource_identifiers(phba);
7148 lpfc_sli4_remove_rpis(phba);
7151 count = phba->sli4_hba.max_cfg_param.max_rpi;
7153 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7154 "3279 Invalid provisioning of "
7159 base = phba->sli4_hba.max_cfg_param.rpi_base;
7160 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7161 phba->sli4_hba.rpi_bmask = kcalloc(longs,
7162 sizeof(unsigned long),
7164 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
7168 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
7170 if (unlikely(!phba->sli4_hba.rpi_ids)) {
7172 goto free_rpi_bmask;
7175 for (i = 0; i < count; i++)
7176 phba->sli4_hba.rpi_ids[i] = base + i;
7179 count = phba->sli4_hba.max_cfg_param.max_vpi;
7181 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7182 "3280 Invalid provisioning of "
7187 base = phba->sli4_hba.max_cfg_param.vpi_base;
7188 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7189 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
7191 if (unlikely(!phba->vpi_bmask)) {
7195 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
7197 if (unlikely(!phba->vpi_ids)) {
7199 goto free_vpi_bmask;
7202 for (i = 0; i < count; i++)
7203 phba->vpi_ids[i] = base + i;
7206 count = phba->sli4_hba.max_cfg_param.max_xri;
7208 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7209 "3281 Invalid provisioning of "
7214 base = phba->sli4_hba.max_cfg_param.xri_base;
7215 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7216 phba->sli4_hba.xri_bmask = kcalloc(longs,
7217 sizeof(unsigned long),
7219 if (unlikely(!phba->sli4_hba.xri_bmask)) {
7223 phba->sli4_hba.max_cfg_param.xri_used = 0;
7224 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
7226 if (unlikely(!phba->sli4_hba.xri_ids)) {
7228 goto free_xri_bmask;
7231 for (i = 0; i < count; i++)
7232 phba->sli4_hba.xri_ids[i] = base + i;
7235 count = phba->sli4_hba.max_cfg_param.max_vfi;
7237 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7238 "3282 Invalid provisioning of "
7243 base = phba->sli4_hba.max_cfg_param.vfi_base;
7244 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7245 phba->sli4_hba.vfi_bmask = kcalloc(longs,
7246 sizeof(unsigned long),
7248 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
7252 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
7254 if (unlikely(!phba->sli4_hba.vfi_ids)) {
7256 goto free_vfi_bmask;
7259 for (i = 0; i < count; i++)
7260 phba->sli4_hba.vfi_ids[i] = base + i;
7263 * Mark all resources ready. An HBA reset doesn't need
7264 * to reset the initialization.
7266 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
7272 kfree(phba->sli4_hba.vfi_bmask);
7273 phba->sli4_hba.vfi_bmask = NULL;
7275 kfree(phba->sli4_hba.xri_ids);
7276 phba->sli4_hba.xri_ids = NULL;
7278 kfree(phba->sli4_hba.xri_bmask);
7279 phba->sli4_hba.xri_bmask = NULL;
7281 kfree(phba->vpi_ids);
7282 phba->vpi_ids = NULL;
7284 kfree(phba->vpi_bmask);
7285 phba->vpi_bmask = NULL;
7287 kfree(phba->sli4_hba.rpi_ids);
7288 phba->sli4_hba.rpi_ids = NULL;
7290 kfree(phba->sli4_hba.rpi_bmask);
7291 phba->sli4_hba.rpi_bmask = NULL;
7297 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
7298 * @phba: Pointer to HBA context object.
7300 * This function allocates the number of elements for the specified
7304 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
7306 if (phba->sli4_hba.extents_in_use) {
7307 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
7308 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
7309 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
7310 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
7312 kfree(phba->vpi_bmask);
7313 phba->sli4_hba.max_cfg_param.vpi_used = 0;
7314 kfree(phba->vpi_ids);
7315 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7316 kfree(phba->sli4_hba.xri_bmask);
7317 kfree(phba->sli4_hba.xri_ids);
7318 kfree(phba->sli4_hba.vfi_bmask);
7319 kfree(phba->sli4_hba.vfi_ids);
7320 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7321 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7328 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
7329 * @phba: Pointer to HBA context object.
7330 * @type: The resource extent type.
7331 * @extnt_cnt: buffer to hold port extent count response
7332 * @extnt_size: buffer to hold port extent size response.
7334 * This function calls the port to read the host allocated extents
7335 * for a particular type.
7338 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
7339 uint16_t *extnt_cnt, uint16_t *extnt_size)
7343 uint16_t curr_blks = 0;
7344 uint32_t req_len, emb_len;
7345 uint32_t alloc_len, mbox_tmo;
7346 struct list_head *blk_list_head;
7347 struct lpfc_rsrc_blks *rsrc_blk;
7349 void *virtaddr = NULL;
7350 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
7351 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
7352 union lpfc_sli4_cfg_shdr *shdr;
7355 case LPFC_RSC_TYPE_FCOE_VPI:
7356 blk_list_head = &phba->lpfc_vpi_blk_list;
7358 case LPFC_RSC_TYPE_FCOE_XRI:
7359 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
7361 case LPFC_RSC_TYPE_FCOE_VFI:
7362 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
7364 case LPFC_RSC_TYPE_FCOE_RPI:
7365 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
7371 /* Count the number of extents currently allocatd for this type. */
7372 list_for_each_entry(rsrc_blk, blk_list_head, list) {
7373 if (curr_blks == 0) {
7375 * The GET_ALLOCATED mailbox does not return the size,
7376 * just the count. The size should be just the size
7377 * stored in the current allocated block and all sizes
7378 * for an extent type are the same so set the return
7381 *extnt_size = rsrc_blk->rsrc_size;
7387 * Calculate the size of an embedded mailbox. The uint32_t
7388 * accounts for extents-specific word.
7390 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
7394 * Presume the allocation and response will fit into an embedded
7395 * mailbox. If not true, reconfigure to a non-embedded mailbox.
7397 emb = LPFC_SLI4_MBX_EMBED;
7399 if (req_len > emb_len) {
7400 req_len = curr_blks * sizeof(uint16_t) +
7401 sizeof(union lpfc_sli4_cfg_shdr) +
7403 emb = LPFC_SLI4_MBX_NEMBED;
7406 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7409 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
7411 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7412 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
7414 if (alloc_len < req_len) {
7415 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7416 "2983 Allocated DMA memory size (x%x) is "
7417 "less than the requested DMA memory "
7418 "size (x%x)\n", alloc_len, req_len);
7422 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
7428 if (!phba->sli4_hba.intr_enable)
7429 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
7431 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
7432 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
7441 * Figure out where the response is located. Then get local pointers
7442 * to the response data. The port does not guarantee to respond to
7443 * all extents counts request so update the local variable with the
7444 * allocated count from the port.
7446 if (emb == LPFC_SLI4_MBX_EMBED) {
7447 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
7448 shdr = &rsrc_ext->header.cfg_shdr;
7449 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
7451 virtaddr = mbox->sge_array->addr[0];
7452 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
7453 shdr = &n_rsrc->cfg_shdr;
7454 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
7457 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
7458 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7459 "2984 Failed to read allocated resources "
7460 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
7462 bf_get(lpfc_mbox_hdr_status, &shdr->response),
7463 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
7468 lpfc_sli4_mbox_cmd_free(phba, mbox);
7473 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
7474 * @phba: pointer to lpfc hba data structure.
7475 * @sgl_list: linked link of sgl buffers to post
7476 * @cnt: number of linked list buffers
7478 * This routine walks the list of buffers that have been allocated and
7479 * repost them to the port by using SGL block post. This is needed after a
7480 * pci_function_reset/warm_start or start. It attempts to construct blocks
7481 * of buffer sgls which contains contiguous xris and uses the non-embedded
7482 * SGL block post mailbox commands to post them to the port. For single
7483 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
7484 * mailbox command for posting.
7486 * Returns: 0 = success, non-zero failure.
7489 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
7490 struct list_head *sgl_list, int cnt)
7492 struct lpfc_sglq *sglq_entry = NULL;
7493 struct lpfc_sglq *sglq_entry_next = NULL;
7494 struct lpfc_sglq *sglq_entry_first = NULL;
7495 int status, total_cnt;
7496 int post_cnt = 0, num_posted = 0, block_cnt = 0;
7497 int last_xritag = NO_XRI;
7498 LIST_HEAD(prep_sgl_list);
7499 LIST_HEAD(blck_sgl_list);
7500 LIST_HEAD(allc_sgl_list);
7501 LIST_HEAD(post_sgl_list);
7502 LIST_HEAD(free_sgl_list);
7504 spin_lock_irq(&phba->hbalock);
7505 spin_lock(&phba->sli4_hba.sgl_list_lock);
7506 list_splice_init(sgl_list, &allc_sgl_list);
7507 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7508 spin_unlock_irq(&phba->hbalock);
7511 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
7512 &allc_sgl_list, list) {
7513 list_del_init(&sglq_entry->list);
7515 if ((last_xritag != NO_XRI) &&
7516 (sglq_entry->sli4_xritag != last_xritag + 1)) {
7517 /* a hole in xri block, form a sgl posting block */
7518 list_splice_init(&prep_sgl_list, &blck_sgl_list);
7519 post_cnt = block_cnt - 1;
7520 /* prepare list for next posting block */
7521 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7524 /* prepare list for next posting block */
7525 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7526 /* enough sgls for non-embed sgl mbox command */
7527 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
7528 list_splice_init(&prep_sgl_list,
7530 post_cnt = block_cnt;
7536 /* keep track of last sgl's xritag */
7537 last_xritag = sglq_entry->sli4_xritag;
7539 /* end of repost sgl list condition for buffers */
7540 if (num_posted == total_cnt) {
7541 if (post_cnt == 0) {
7542 list_splice_init(&prep_sgl_list,
7544 post_cnt = block_cnt;
7545 } else if (block_cnt == 1) {
7546 status = lpfc_sli4_post_sgl(phba,
7547 sglq_entry->phys, 0,
7548 sglq_entry->sli4_xritag);
7550 /* successful, put sgl to posted list */
7551 list_add_tail(&sglq_entry->list,
7554 /* Failure, put sgl to free list */
7555 lpfc_printf_log(phba, KERN_WARNING,
7557 "3159 Failed to post "
7558 "sgl, xritag:x%x\n",
7559 sglq_entry->sli4_xritag);
7560 list_add_tail(&sglq_entry->list,
7567 /* continue until a nembed page worth of sgls */
7571 /* post the buffer list sgls as a block */
7572 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7576 /* success, put sgl list to posted sgl list */
7577 list_splice_init(&blck_sgl_list, &post_sgl_list);
7579 /* Failure, put sgl list to free sgl list */
7580 sglq_entry_first = list_first_entry(&blck_sgl_list,
7583 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7584 "3160 Failed to post sgl-list, "
7586 sglq_entry_first->sli4_xritag,
7587 (sglq_entry_first->sli4_xritag +
7589 list_splice_init(&blck_sgl_list, &free_sgl_list);
7590 total_cnt -= post_cnt;
7593 /* don't reset xirtag due to hole in xri block */
7595 last_xritag = NO_XRI;
7597 /* reset sgl post count for next round of posting */
7601 /* free the sgls failed to post */
7602 lpfc_free_sgl_list(phba, &free_sgl_list);
7604 /* push sgls posted to the available list */
7605 if (!list_empty(&post_sgl_list)) {
7606 spin_lock_irq(&phba->hbalock);
7607 spin_lock(&phba->sli4_hba.sgl_list_lock);
7608 list_splice_init(&post_sgl_list, sgl_list);
7609 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7610 spin_unlock_irq(&phba->hbalock);
7612 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7613 "3161 Failure to post sgl to port.\n");
7617 /* return the number of XRIs actually posted */
7622 * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
7623 * @phba: pointer to lpfc hba data structure.
7625 * This routine walks the list of nvme buffers that have been allocated and
7626 * repost them to the port by using SGL block post. This is needed after a
7627 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7628 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
7629 * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
7631 * Returns: 0 = success, non-zero failure.
7634 lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
7636 LIST_HEAD(post_nblist);
7637 int num_posted, rc = 0;
7639 /* get all NVME buffers need to repost to a local list */
7640 lpfc_io_buf_flush(phba, &post_nblist);
7642 /* post the list of nvme buffer sgls to port if available */
7643 if (!list_empty(&post_nblist)) {
7644 num_posted = lpfc_sli4_post_io_sgl_list(
7645 phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
7646 /* failed to post any nvme buffer, return error */
7647 if (num_posted == 0)
7654 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7658 len = sizeof(struct lpfc_mbx_set_host_data) -
7659 sizeof(struct lpfc_sli4_cfg_mhdr);
7660 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7661 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7662 LPFC_SLI4_MBX_EMBED);
7664 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
7665 mbox->u.mqe.un.set_host_data.param_len =
7666 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
7667 snprintf(mbox->u.mqe.un.set_host_data.un.data,
7668 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7669 "Linux %s v"LPFC_DRIVER_VERSION,
7670 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7674 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
7675 struct lpfc_queue *drq, int count, int idx)
7678 struct lpfc_rqe hrqe;
7679 struct lpfc_rqe drqe;
7680 struct lpfc_rqb *rqbp;
7681 unsigned long flags;
7682 struct rqb_dmabuf *rqb_buffer;
7683 LIST_HEAD(rqb_buf_list);
7686 for (i = 0; i < count; i++) {
7687 spin_lock_irqsave(&phba->hbalock, flags);
7688 /* IF RQ is already full, don't bother */
7689 if (rqbp->buffer_count + i >= rqbp->entry_count - 1) {
7690 spin_unlock_irqrestore(&phba->hbalock, flags);
7693 spin_unlock_irqrestore(&phba->hbalock, flags);
7695 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7698 rqb_buffer->hrq = hrq;
7699 rqb_buffer->drq = drq;
7700 rqb_buffer->idx = idx;
7701 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7704 spin_lock_irqsave(&phba->hbalock, flags);
7705 while (!list_empty(&rqb_buf_list)) {
7706 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7709 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7710 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7711 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7712 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7713 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7715 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7716 "6421 Cannot post to HRQ %d: %x %x %x "
7724 rqbp->rqb_free_buffer(phba, rqb_buffer);
7726 list_add_tail(&rqb_buffer->hbuf.list,
7727 &rqbp->rqb_buffer_list);
7728 rqbp->buffer_count++;
7731 spin_unlock_irqrestore(&phba->hbalock, flags);
7736 lpfc_mbx_cmpl_cgn_set_ftrs(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7738 struct lpfc_vport *vport = pmb->vport;
7739 union lpfc_sli4_cfg_shdr *shdr;
7740 u32 shdr_status, shdr_add_status;
7743 /* Two outcomes. (1) Set featurs was successul and EDC negotiation
7744 * is done. (2) Mailbox failed and send FPIN support only.
7746 shdr = (union lpfc_sli4_cfg_shdr *)
7747 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
7748 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7749 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7750 if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) {
7751 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
7752 "2516 CGN SET_FEATURE mbox failed with "
7753 "status x%x add_status x%x, mbx status x%x "
7754 "Reset Congestion to FPINs only\n",
7755 shdr_status, shdr_add_status,
7756 pmb->u.mb.mbxStatus);
7757 /* If there is a mbox error, move on to RDF */
7758 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
7759 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
7763 /* Zero out Congestion Signal ACQE counter */
7764 phba->cgn_acqe_cnt = 0;
7765 atomic64_set(&phba->cgn_acqe_stat.warn, 0);
7766 atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
7768 acqe = bf_get(lpfc_mbx_set_feature_CGN_acqe_freq,
7769 &pmb->u.mqe.un.set_feature);
7770 sig = bf_get(lpfc_mbx_set_feature_CGN_warn_freq,
7771 &pmb->u.mqe.un.set_feature);
7772 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7773 "4620 SET_FEATURES Success: Freq: %ds %dms "
7774 " Reg: x%x x%x\n", acqe, sig,
7775 phba->cgn_reg_signal, phba->cgn_reg_fpin);
7777 mempool_free(pmb, phba->mbox_mem_pool);
7779 /* Register for FPIN events from the fabric now that the
7780 * EDC common_set_features has completed.
7782 lpfc_issue_els_rdf(vport, 0);
7786 lpfc_config_cgn_signal(struct lpfc_hba *phba)
7788 LPFC_MBOXQ_t *mboxq;
7791 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7795 lpfc_set_features(phba, mboxq, LPFC_SET_CGN_SIGNAL);
7796 mboxq->vport = phba->pport;
7797 mboxq->mbox_cmpl = lpfc_mbx_cmpl_cgn_set_ftrs;
7799 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7800 "4621 SET_FEATURES: FREQ sig x%x acqe x%x: "
7802 phba->cgn_sig_freq, lpfc_acqe_cgn_frequency,
7803 phba->cgn_reg_signal, phba->cgn_reg_fpin);
7805 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
7806 if (rc == MBX_NOT_FINISHED)
7811 mempool_free(mboxq, phba->mbox_mem_pool);
7813 /* If there is a mbox error, move on to RDF */
7814 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
7815 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
7816 lpfc_issue_els_rdf(phba->pport, 0);
7821 * lpfc_init_idle_stat_hb - Initialize idle_stat tracking
7822 * @phba: pointer to lpfc hba data structure.
7824 * This routine initializes the per-cq idle_stat to dynamically dictate
7825 * polling decisions.
7830 static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
7833 struct lpfc_sli4_hdw_queue *hdwq;
7834 struct lpfc_queue *cq;
7835 struct lpfc_idle_stat *idle_stat;
7838 for_each_present_cpu(i) {
7839 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
7842 /* Skip if we've already handled this cq's primary CPU */
7846 idle_stat = &phba->sli4_hba.idle_stat[i];
7848 idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1);
7849 idle_stat->prev_wall = wall;
7851 if (phba->nvmet_support ||
7852 phba->cmf_active_mode != LPFC_CFG_OFF)
7853 cq->poll_mode = LPFC_QUEUE_WORK;
7855 cq->poll_mode = LPFC_IRQ_POLL;
7858 if (!phba->nvmet_support)
7859 schedule_delayed_work(&phba->idle_stat_delay_work,
7860 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
7863 static void lpfc_sli4_dip(struct lpfc_hba *phba)
7867 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7868 if (if_type == LPFC_SLI_INTF_IF_TYPE_2 ||
7869 if_type == LPFC_SLI_INTF_IF_TYPE_6) {
7870 struct lpfc_register reg_data;
7872 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
7876 if (bf_get(lpfc_sliport_status_dip, ®_data))
7877 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7878 "2904 Firmware Dump Image Present"
7884 * lpfc_cmf_setup - Initialize idle_stat tracking
7885 * @phba: Pointer to HBA context object.
7887 * This is called from HBA setup during driver load or when the HBA
7888 * comes online. this does all the initialization to support CMF and MI.
7891 lpfc_cmf_setup(struct lpfc_hba *phba)
7893 LPFC_MBOXQ_t *mboxq;
7894 struct lpfc_mqe *mqe;
7895 struct lpfc_dmabuf *mp;
7896 struct lpfc_pc_sli4_params *sli4_params;
7897 struct lpfc_sli4_parameters *mbx_sli4_parameters;
7899 int rc, cmf, mi_ver;
7901 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7904 mqe = &mboxq->u.mqe;
7906 /* Read the port's SLI4 Config Parameters */
7907 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
7908 sizeof(struct lpfc_sli4_cfg_mhdr));
7909 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7910 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
7911 length, LPFC_SLI4_MBX_EMBED);
7913 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7915 mempool_free(mboxq, phba->mbox_mem_pool);
7919 /* Gather info on CMF and MI support */
7920 sli4_params = &phba->sli4_hba.pc_sli4_params;
7921 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
7922 sli4_params->mi_ver = bf_get(cfg_mi_ver, mbx_sli4_parameters);
7923 sli4_params->cmf = bf_get(cfg_cmf, mbx_sli4_parameters);
7925 /* Are we forcing MI off via module parameter? */
7926 if (!phba->cfg_enable_mi)
7927 sli4_params->mi_ver = 0;
7929 /* Always try to enable MI feature if we can */
7930 if (sli4_params->mi_ver) {
7931 lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_MI);
7932 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7933 mi_ver = bf_get(lpfc_mbx_set_feature_mi,
7934 &mboxq->u.mqe.un.set_feature);
7936 if (rc == MBX_SUCCESS) {
7938 lpfc_printf_log(phba,
7939 KERN_WARNING, LOG_CGN_MGMT,
7940 "6215 MI is enabled\n");
7941 sli4_params->mi_ver = mi_ver;
7943 lpfc_printf_log(phba,
7944 KERN_WARNING, LOG_CGN_MGMT,
7945 "6338 MI is disabled\n");
7946 sli4_params->mi_ver = 0;
7949 /* mi_ver is already set from GET_SLI4_PARAMETERS */
7950 lpfc_printf_log(phba, KERN_INFO,
7951 LOG_CGN_MGMT | LOG_INIT,
7952 "6245 Enable MI Mailbox x%x (x%x/x%x) "
7953 "failed, rc:x%x mi:x%x\n",
7954 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
7955 lpfc_sli_config_mbox_subsys_get
7957 lpfc_sli_config_mbox_opcode_get
7959 rc, sli4_params->mi_ver);
7962 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
7963 "6217 MI is disabled\n");
7966 /* Ensure FDMI is enabled for MI if enable_mi is set */
7967 if (sli4_params->mi_ver)
7968 phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT;
7970 /* Always try to enable CMF feature if we can */
7971 if (sli4_params->cmf) {
7972 lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_CMF);
7973 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7974 cmf = bf_get(lpfc_mbx_set_feature_cmf,
7975 &mboxq->u.mqe.un.set_feature);
7976 if (rc == MBX_SUCCESS && cmf) {
7977 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
7978 "6218 CMF is enabled: mode %d\n",
7979 phba->cmf_active_mode);
7981 lpfc_printf_log(phba, KERN_WARNING,
7982 LOG_CGN_MGMT | LOG_INIT,
7983 "6219 Enable CMF Mailbox x%x (x%x/x%x) "
7984 "failed, rc:x%x dd:x%x\n",
7985 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
7986 lpfc_sli_config_mbox_subsys_get
7988 lpfc_sli_config_mbox_opcode_get
7991 sli4_params->cmf = 0;
7992 phba->cmf_active_mode = LPFC_CFG_OFF;
7996 /* Allocate Congestion Information Buffer */
7998 mp = kmalloc(sizeof(*mp), GFP_KERNEL);
8000 mp->virt = dma_alloc_coherent
8001 (&phba->pcidev->dev,
8002 sizeof(struct lpfc_cgn_info),
8003 &mp->phys, GFP_KERNEL);
8004 if (!mp || !mp->virt) {
8005 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8006 "2640 Failed to alloc memory "
8007 "for Congestion Info\n");
8009 sli4_params->cmf = 0;
8010 phba->cmf_active_mode = LPFC_CFG_OFF;
8015 /* initialize congestion buffer info */
8016 lpfc_init_congestion_buf(phba);
8017 lpfc_init_congestion_stat(phba);
8020 rc = lpfc_sli4_cgn_params_read(phba);
8022 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
8023 "6242 Error reading Cgn Params (%d)\n",
8025 /* Ensure CGN Mode is off */
8026 sli4_params->cmf = 0;
8028 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
8029 "6243 CGN Event empty object.\n");
8030 /* Ensure CGN Mode is off */
8031 sli4_params->cmf = 0;
8035 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8036 "6220 CMF is disabled\n");
8039 /* Only register congestion buffer with firmware if BOTH
8040 * CMF and E2E are enabled.
8042 if (sli4_params->cmf && sli4_params->mi_ver) {
8043 rc = lpfc_reg_congestion_buf(phba);
8045 dma_free_coherent(&phba->pcidev->dev,
8046 sizeof(struct lpfc_cgn_info),
8047 phba->cgn_i->virt, phba->cgn_i->phys);
8050 /* Ensure CGN Mode is off */
8051 phba->cmf_active_mode = LPFC_CFG_OFF;
8055 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8056 "6470 Setup MI version %d CMF %d mode %d\n",
8057 sli4_params->mi_ver, sli4_params->cmf,
8058 phba->cmf_active_mode);
8060 mempool_free(mboxq, phba->mbox_mem_pool);
8062 /* Initialize atomic counters */
8063 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
8064 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
8065 atomic_set(&phba->cgn_sync_alarm_cnt, 0);
8066 atomic_set(&phba->cgn_sync_warn_cnt, 0);
8067 atomic_set(&phba->cgn_driver_evt_cnt, 0);
8068 atomic_set(&phba->cgn_latency_evt_cnt, 0);
8069 atomic64_set(&phba->cgn_latency_evt, 0);
8071 phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
8073 /* Allocate RX Monitor Buffer */
8074 if (!phba->rxtable) {
8075 phba->rxtable = kmalloc_array(LPFC_MAX_RXMONITOR_ENTRY,
8076 sizeof(struct rxtable_entry),
8078 if (!phba->rxtable) {
8079 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8080 "2644 Failed to alloc memory "
8081 "for RX Monitor Buffer\n");
8085 atomic_set(&phba->rxtable_idx_head, 0);
8086 atomic_set(&phba->rxtable_idx_tail, 0);
8091 lpfc_set_host_tm(struct lpfc_hba *phba)
8093 LPFC_MBOXQ_t *mboxq;
8095 struct timespec64 cur_time;
8097 uint32_t month, day, year;
8098 uint32_t hour, minute, second;
8099 struct lpfc_mbx_set_host_date_time *tm;
8101 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8105 len = sizeof(struct lpfc_mbx_set_host_data) -
8106 sizeof(struct lpfc_sli4_cfg_mhdr);
8107 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
8108 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
8109 LPFC_SLI4_MBX_EMBED);
8111 mboxq->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_DATE_TIME;
8112 mboxq->u.mqe.un.set_host_data.param_len =
8113 sizeof(struct lpfc_mbx_set_host_date_time);
8114 tm = &mboxq->u.mqe.un.set_host_data.un.tm;
8115 ktime_get_real_ts64(&cur_time);
8116 time64_to_tm(cur_time.tv_sec, 0, &broken);
8117 month = broken.tm_mon + 1;
8118 day = broken.tm_mday;
8119 year = broken.tm_year - 100;
8120 hour = broken.tm_hour;
8121 minute = broken.tm_min;
8122 second = broken.tm_sec;
8123 bf_set(lpfc_mbx_set_host_month, tm, month);
8124 bf_set(lpfc_mbx_set_host_day, tm, day);
8125 bf_set(lpfc_mbx_set_host_year, tm, year);
8126 bf_set(lpfc_mbx_set_host_hour, tm, hour);
8127 bf_set(lpfc_mbx_set_host_min, tm, minute);
8128 bf_set(lpfc_mbx_set_host_sec, tm, second);
8130 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8131 mempool_free(mboxq, phba->mbox_mem_pool);
8136 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
8137 * @phba: Pointer to HBA context object.
8139 * This function is the main SLI4 device initialization PCI function. This
8140 * function is called by the HBA initialization code, HBA reset code and
8141 * HBA error attention handler code. Caller is not required to hold any
8145 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
8147 int rc, i, cnt, len, dd;
8148 LPFC_MBOXQ_t *mboxq;
8149 struct lpfc_mqe *mqe;
8152 uint32_t ftr_rsp = 0;
8153 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
8154 struct lpfc_vport *vport = phba->pport;
8155 struct lpfc_dmabuf *mp;
8156 struct lpfc_rqb *rqbp;
8159 /* Perform a PCI function reset to start from clean */
8160 rc = lpfc_pci_function_reset(phba);
8164 /* Check the HBA Host Status Register for readyness */
8165 rc = lpfc_sli4_post_status_check(phba);
8169 spin_lock_irq(&phba->hbalock);
8170 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
8171 flg = phba->sli.sli_flag;
8172 spin_unlock_irq(&phba->hbalock);
8173 /* Allow a little time after setting SLI_ACTIVE for any polled
8174 * MBX commands to complete via BSG.
8176 for (i = 0; i < 50 && (flg & LPFC_SLI_MBOX_ACTIVE); i++) {
8178 spin_lock_irq(&phba->hbalock);
8179 flg = phba->sli.sli_flag;
8180 spin_unlock_irq(&phba->hbalock);
8184 lpfc_sli4_dip(phba);
8187 * Allocate a single mailbox container for initializing the
8190 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8194 /* Issue READ_REV to collect vpd and FW information. */
8195 vpd_size = SLI4_PAGE_SIZE;
8196 vpd = kzalloc(vpd_size, GFP_KERNEL);
8202 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
8208 mqe = &mboxq->u.mqe;
8209 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
8210 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
8211 phba->hba_flag |= HBA_FCOE_MODE;
8212 phba->fcp_embed_io = 0; /* SLI4 FC support only */
8214 phba->hba_flag &= ~HBA_FCOE_MODE;
8217 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
8219 phba->hba_flag |= HBA_FIP_SUPPORT;
8221 phba->hba_flag &= ~HBA_FIP_SUPPORT;
8223 phba->hba_flag &= ~HBA_IOQ_FLUSH;
8225 if (phba->sli_rev != LPFC_SLI_REV4) {
8226 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8227 "0376 READ_REV Error. SLI Level %d "
8228 "FCoE enabled %d\n",
8229 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
8235 rc = lpfc_set_host_tm(phba);
8236 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
8237 "6468 Set host date / time: Status x%x:\n", rc);
8240 * Continue initialization with default values even if driver failed
8241 * to read FCoE param config regions, only read parameters if the
8244 if (phba->hba_flag & HBA_FCOE_MODE &&
8245 lpfc_sli4_read_fcoe_params(phba))
8246 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
8247 "2570 Failed to read FCoE parameters\n");
8250 * Retrieve sli4 device physical port name, failure of doing it
8251 * is considered as non-fatal.
8253 rc = lpfc_sli4_retrieve_pport_name(phba);
8255 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8256 "3080 Successful retrieving SLI4 device "
8257 "physical port name: %s.\n", phba->Port);
8259 rc = lpfc_sli4_get_ctl_attr(phba);
8261 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8262 "8351 Successful retrieving SLI4 device "
8266 * Evaluate the read rev and vpd data. Populate the driver
8267 * state with the results. If this routine fails, the failure
8268 * is not fatal as the driver will use generic values.
8270 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
8271 if (unlikely(!rc)) {
8272 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8273 "0377 Error %d parsing vpd. "
8274 "Using defaults.\n", rc);
8279 /* Save information as VPD data */
8280 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
8281 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
8284 * This is because first G7 ASIC doesn't support the standard
8285 * 0x5a NVME cmd descriptor type/subtype
8287 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8288 LPFC_SLI_INTF_IF_TYPE_6) &&
8289 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
8290 (phba->vpd.rev.smRev == 0) &&
8291 (phba->cfg_nvme_embed_cmd == 1))
8292 phba->cfg_nvme_embed_cmd = 0;
8294 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
8295 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
8297 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
8299 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
8301 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
8303 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
8304 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
8305 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
8306 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
8307 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
8308 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
8309 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8310 "(%d):0380 READ_REV Status x%x "
8311 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
8312 mboxq->vport ? mboxq->vport->vpi : 0,
8313 bf_get(lpfc_mqe_status, mqe),
8314 phba->vpd.rev.opFwName,
8315 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
8316 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
8318 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8319 LPFC_SLI_INTF_IF_TYPE_0) {
8320 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
8321 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8322 if (rc == MBX_SUCCESS) {
8323 phba->hba_flag |= HBA_RECOVERABLE_UE;
8324 /* Set 1Sec interval to detect UE */
8325 phba->eratt_poll_interval = 1;
8326 phba->sli4_hba.ue_to_sr = bf_get(
8327 lpfc_mbx_set_feature_UESR,
8328 &mboxq->u.mqe.un.set_feature);
8329 phba->sli4_hba.ue_to_rp = bf_get(
8330 lpfc_mbx_set_feature_UERP,
8331 &mboxq->u.mqe.un.set_feature);
8335 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
8336 /* Enable MDS Diagnostics only if the SLI Port supports it */
8337 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
8338 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8339 if (rc != MBX_SUCCESS)
8340 phba->mds_diags_support = 0;
8344 * Discover the port's supported feature set and match it against the
8347 lpfc_request_features(phba, mboxq);
8348 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8354 /* Disable VMID if app header is not supported */
8355 if (phba->cfg_vmid_app_header && !(bf_get(lpfc_mbx_rq_ftr_rsp_ashdr,
8356 &mqe->un.req_ftrs))) {
8357 bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 0);
8358 phba->cfg_vmid_app_header = 0;
8359 lpfc_printf_log(phba, KERN_DEBUG, LOG_SLI,
8360 "1242 vmid feature not supported\n");
8364 * The port must support FCP initiator mode as this is the
8365 * only mode running in the host.
8367 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
8368 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8369 "0378 No support for fcpi mode.\n");
8373 /* Performance Hints are ONLY for FCoE */
8374 if (phba->hba_flag & HBA_FCOE_MODE) {
8375 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
8376 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
8378 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
8382 * If the port cannot support the host's requested features
8383 * then turn off the global config parameters to disable the
8384 * feature in the driver. This is not a fatal error.
8386 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
8387 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
8388 phba->cfg_enable_bg = 0;
8389 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
8394 if (phba->max_vpi && phba->cfg_enable_npiv &&
8395 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
8399 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8400 "0379 Feature Mismatch Data: x%08x %08x "
8401 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
8402 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
8403 phba->cfg_enable_npiv, phba->max_vpi);
8404 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
8405 phba->cfg_enable_bg = 0;
8406 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
8407 phba->cfg_enable_npiv = 0;
8410 /* These SLI3 features are assumed in SLI4 */
8411 spin_lock_irq(&phba->hbalock);
8412 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
8413 spin_unlock_irq(&phba->hbalock);
8415 /* Always try to enable dual dump feature if we can */
8416 lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
8417 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8418 dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
8419 if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
8420 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8421 "6448 Dual Dump is enabled\n");
8423 lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
8424 "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
8426 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8427 lpfc_sli_config_mbox_subsys_get(
8429 lpfc_sli_config_mbox_opcode_get(
8433 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
8434 * calls depends on these resources to complete port setup.
8436 rc = lpfc_sli4_alloc_resource_identifiers(phba);
8438 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8439 "2920 Failed to alloc Resource IDs "
8444 lpfc_set_host_data(phba, mboxq);
8446 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8448 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8449 "2134 Failed to set host os driver version %x",
8453 /* Read the port's service parameters. */
8454 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
8456 phba->link_state = LPFC_HBA_ERROR;
8461 mboxq->vport = vport;
8462 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8463 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
8464 if (rc == MBX_SUCCESS) {
8465 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
8470 * This memory was allocated by the lpfc_read_sparam routine. Release
8471 * it to the mbuf pool.
8473 lpfc_mbuf_free(phba, mp->virt, mp->phys);
8475 mboxq->ctx_buf = NULL;
8477 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8478 "0382 READ_SPARAM command failed "
8479 "status %d, mbxStatus x%x\n",
8480 rc, bf_get(lpfc_mqe_status, mqe));
8481 phba->link_state = LPFC_HBA_ERROR;
8486 lpfc_update_vport_wwn(vport);
8488 /* Update the fc_host data structures with new wwn. */
8489 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
8490 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
8492 /* Create all the SLI4 queues */
8493 rc = lpfc_sli4_queue_create(phba);
8495 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8496 "3089 Failed to allocate queues\n");
8500 /* Set up all the queues to the device */
8501 rc = lpfc_sli4_queue_setup(phba);
8503 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8504 "0381 Error %d during queue setup.\n ", rc);
8505 goto out_stop_timers;
8507 /* Initialize the driver internal SLI layer lists. */
8508 lpfc_sli4_setup(phba);
8509 lpfc_sli4_queue_init(phba);
8511 /* update host els xri-sgl sizes and mappings */
8512 rc = lpfc_sli4_els_sgl_update(phba);
8514 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8515 "1400 Failed to update xri-sgl size and "
8516 "mapping: %d\n", rc);
8517 goto out_destroy_queue;
8520 /* register the els sgl pool to the port */
8521 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
8522 phba->sli4_hba.els_xri_cnt);
8523 if (unlikely(rc < 0)) {
8524 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8525 "0582 Error %d during els sgl post "
8528 goto out_destroy_queue;
8530 phba->sli4_hba.els_xri_cnt = rc;
8532 if (phba->nvmet_support) {
8533 /* update host nvmet xri-sgl sizes and mappings */
8534 rc = lpfc_sli4_nvmet_sgl_update(phba);
8536 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8537 "6308 Failed to update nvmet-sgl size "
8538 "and mapping: %d\n", rc);
8539 goto out_destroy_queue;
8542 /* register the nvmet sgl pool to the port */
8543 rc = lpfc_sli4_repost_sgl_list(
8545 &phba->sli4_hba.lpfc_nvmet_sgl_list,
8546 phba->sli4_hba.nvmet_xri_cnt);
8547 if (unlikely(rc < 0)) {
8548 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8549 "3117 Error %d during nvmet "
8552 goto out_destroy_queue;
8554 phba->sli4_hba.nvmet_xri_cnt = rc;
8556 /* We allocate an iocbq for every receive context SGL.
8557 * The additional allocation is for abort and ls handling.
8559 cnt = phba->sli4_hba.nvmet_xri_cnt +
8560 phba->sli4_hba.max_cfg_param.max_xri;
8562 /* update host common xri-sgl sizes and mappings */
8563 rc = lpfc_sli4_io_sgl_update(phba);
8565 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8566 "6082 Failed to update nvme-sgl size "
8567 "and mapping: %d\n", rc);
8568 goto out_destroy_queue;
8571 /* register the allocated common sgl pool to the port */
8572 rc = lpfc_sli4_repost_io_sgl_list(phba);
8574 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8575 "6116 Error %d during nvme sgl post "
8577 /* Some NVME buffers were moved to abort nvme list */
8578 /* A pci function reset will repost them */
8580 goto out_destroy_queue;
8582 /* Each lpfc_io_buf job structure has an iocbq element.
8583 * This cnt provides for abort, els, ct and ls requests.
8585 cnt = phba->sli4_hba.max_cfg_param.max_xri;
8588 if (!phba->sli.iocbq_lookup) {
8589 /* Initialize and populate the iocb list per host */
8590 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8591 "2821 initialize iocb list with %d entries\n",
8593 rc = lpfc_init_iocb_list(phba, cnt);
8595 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8596 "1413 Failed to init iocb list.\n");
8597 goto out_destroy_queue;
8601 if (phba->nvmet_support)
8602 lpfc_nvmet_create_targetport(phba);
8604 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
8605 /* Post initial buffers to all RQs created */
8606 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
8607 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
8608 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
8609 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
8610 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
8611 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
8612 rqbp->buffer_count = 0;
8614 lpfc_post_rq_buffer(
8615 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
8616 phba->sli4_hba.nvmet_mrq_data[i],
8617 phba->cfg_nvmet_mrq_post, i);
8621 /* Post the rpi header region to the device. */
8622 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
8624 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8625 "0393 Error %d during rpi post operation\n",
8628 goto out_free_iocblist;
8630 lpfc_sli4_node_prep(phba);
8632 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
8633 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
8635 * The FC Port needs to register FCFI (index 0)
8637 lpfc_reg_fcfi(phba, mboxq);
8638 mboxq->vport = phba->pport;
8639 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8640 if (rc != MBX_SUCCESS)
8641 goto out_unset_queue;
8643 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
8644 &mboxq->u.mqe.un.reg_fcfi);
8646 /* We are a NVME Target mode with MRQ > 1 */
8648 /* First register the FCFI */
8649 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
8650 mboxq->vport = phba->pport;
8651 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8652 if (rc != MBX_SUCCESS)
8653 goto out_unset_queue;
8655 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
8656 &mboxq->u.mqe.un.reg_fcfi_mrq);
8658 /* Next register the MRQs */
8659 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
8660 mboxq->vport = phba->pport;
8661 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8662 if (rc != MBX_SUCCESS)
8663 goto out_unset_queue;
8666 /* Check if the port is configured to be disabled */
8667 lpfc_sli_read_link_ste(phba);
8670 /* Don't post more new bufs if repost already recovered
8673 if (phba->nvmet_support == 0) {
8674 if (phba->sli4_hba.io_xri_cnt == 0) {
8675 len = lpfc_new_io_buf(
8676 phba, phba->sli4_hba.io_xri_max);
8679 goto out_unset_queue;
8682 if (phba->cfg_xri_rebalancing)
8683 lpfc_create_multixri_pools(phba);
8686 phba->cfg_xri_rebalancing = 0;
8689 /* Allow asynchronous mailbox command to go through */
8690 spin_lock_irq(&phba->hbalock);
8691 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8692 spin_unlock_irq(&phba->hbalock);
8694 /* Post receive buffers to the device */
8695 lpfc_sli4_rb_setup(phba);
8697 /* Reset HBA FCF states after HBA reset */
8698 phba->fcf.fcf_flag = 0;
8699 phba->fcf.current_rec.flag = 0;
8701 /* Start the ELS watchdog timer */
8702 mod_timer(&vport->els_tmofunc,
8703 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
8705 /* Start heart beat timer */
8706 mod_timer(&phba->hb_tmofunc,
8707 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
8708 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
8709 phba->last_completion_time = jiffies;
8711 /* start eq_delay heartbeat */
8712 if (phba->cfg_auto_imax)
8713 queue_delayed_work(phba->wq, &phba->eq_delay_work,
8714 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
8716 /* start per phba idle_stat_delay heartbeat */
8717 lpfc_init_idle_stat_hb(phba);
8719 /* Start error attention (ERATT) polling timer */
8720 mod_timer(&phba->eratt_poll,
8721 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
8723 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
8724 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
8725 rc = pci_enable_pcie_error_reporting(phba->pcidev);
8727 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8728 "2829 This device supports "
8729 "Advanced Error Reporting (AER)\n");
8730 spin_lock_irq(&phba->hbalock);
8731 phba->hba_flag |= HBA_AER_ENABLED;
8732 spin_unlock_irq(&phba->hbalock);
8734 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8735 "2830 This device does not support "
8736 "Advanced Error Reporting (AER)\n");
8737 phba->cfg_aer_support = 0;
8743 * The port is ready, set the host's link state to LINK_DOWN
8744 * in preparation for link interrupts.
8746 spin_lock_irq(&phba->hbalock);
8747 phba->link_state = LPFC_LINK_DOWN;
8749 /* Check if physical ports are trunked */
8750 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
8751 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
8752 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
8753 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
8754 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
8755 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
8756 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
8757 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
8758 spin_unlock_irq(&phba->hbalock);
8760 /* Arm the CQs and then EQs on device */
8761 lpfc_sli4_arm_cqeq_intr(phba);
8763 /* Indicate device interrupt mode */
8764 phba->sli4_hba.intr_enable = 1;
8766 /* Setup CMF after HBA is initialized */
8767 lpfc_cmf_setup(phba);
8769 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
8770 (phba->hba_flag & LINK_DISABLED)) {
8771 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8772 "3103 Adapter Link is disabled.\n");
8773 lpfc_down_link(phba, mboxq);
8774 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8775 if (rc != MBX_SUCCESS) {
8776 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8777 "3104 Adapter failed to issue "
8778 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
8779 goto out_io_buff_free;
8781 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
8782 /* don't perform init_link on SLI4 FC port loopback test */
8783 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
8784 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
8786 goto out_io_buff_free;
8789 mempool_free(mboxq, phba->mbox_mem_pool);
8791 phba->hba_flag |= HBA_SETUP;
8795 /* Free allocated IO Buffers */
8798 /* Unset all the queues set up in this routine when error out */
8799 lpfc_sli4_queue_unset(phba);
8801 lpfc_free_iocb_list(phba);
8803 lpfc_sli4_queue_destroy(phba);
8805 lpfc_stop_hba_timers(phba);
8807 mempool_free(mboxq, phba->mbox_mem_pool);
8812 * lpfc_mbox_timeout - Timeout call back function for mbox timer
8813 * @t: Context to fetch pointer to hba structure from.
8815 * This is the callback function for mailbox timer. The mailbox
8816 * timer is armed when a new mailbox command is issued and the timer
8817 * is deleted when the mailbox complete. The function is called by
8818 * the kernel timer code when a mailbox does not complete within
8819 * expected time. This function wakes up the worker thread to
8820 * process the mailbox timeout and returns. All the processing is
8821 * done by the worker thread function lpfc_mbox_timeout_handler.
8824 lpfc_mbox_timeout(struct timer_list *t)
8826 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
8827 unsigned long iflag;
8828 uint32_t tmo_posted;
8830 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
8831 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
8833 phba->pport->work_port_events |= WORKER_MBOX_TMO;
8834 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
8837 lpfc_worker_wake_up(phba);
8842 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
8844 * @phba: Pointer to HBA context object.
8846 * This function checks if any mailbox completions are present on the mailbox
8850 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
8854 struct lpfc_queue *mcq;
8855 struct lpfc_mcqe *mcqe;
8856 bool pending_completions = false;
8859 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
8862 /* Check for completions on mailbox completion queue */
8864 mcq = phba->sli4_hba.mbx_cq;
8865 idx = mcq->hba_index;
8866 qe_valid = mcq->qe_valid;
8867 while (bf_get_le32(lpfc_cqe_valid,
8868 (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
8869 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
8870 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
8871 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
8872 pending_completions = true;
8875 idx = (idx + 1) % mcq->entry_count;
8876 if (mcq->hba_index == idx)
8879 /* if the index wrapped around, toggle the valid bit */
8880 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
8881 qe_valid = (qe_valid) ? 0 : 1;
8883 return pending_completions;
8888 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
8890 * @phba: Pointer to HBA context object.
8892 * For sli4, it is possible to miss an interrupt. As such mbox completions
8893 * maybe missed causing erroneous mailbox timeouts to occur. This function
8894 * checks to see if mbox completions are on the mailbox completion queue
8895 * and will process all the completions associated with the eq for the
8896 * mailbox completion queue.
8899 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
8901 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
8903 struct lpfc_queue *fpeq = NULL;
8904 struct lpfc_queue *eq;
8907 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
8910 /* Find the EQ associated with the mbox CQ */
8911 if (sli4_hba->hdwq) {
8912 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
8913 eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
8914 if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
8923 /* Turn off interrupts from this EQ */
8925 sli4_hba->sli4_eq_clr_intr(fpeq);
8927 /* Check to see if a mbox completion is pending */
8929 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
8932 * If a mbox completion is pending, process all the events on EQ
8933 * associated with the mbox completion queue (this could include
8934 * mailbox commands, async events, els commands, receive queue data
8939 /* process and rearm the EQ */
8940 lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
8942 /* Always clear and re-arm the EQ */
8943 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
8945 return mbox_pending;
8950 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
8951 * @phba: Pointer to HBA context object.
8953 * This function is called from worker thread when a mailbox command times out.
8954 * The caller is not required to hold any locks. This function will reset the
8955 * HBA and recover all the pending commands.
8958 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
8960 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
8961 MAILBOX_t *mb = NULL;
8963 struct lpfc_sli *psli = &phba->sli;
8965 /* If the mailbox completed, process the completion */
8966 lpfc_sli4_process_missed_mbox_completions(phba);
8968 if (!(psli->sli_flag & LPFC_SLI_ACTIVE))
8973 /* Check the pmbox pointer first. There is a race condition
8974 * between the mbox timeout handler getting executed in the
8975 * worklist and the mailbox actually completing. When this
8976 * race condition occurs, the mbox_active will be NULL.
8978 spin_lock_irq(&phba->hbalock);
8979 if (pmbox == NULL) {
8980 lpfc_printf_log(phba, KERN_WARNING,
8982 "0353 Active Mailbox cleared - mailbox timeout "
8984 spin_unlock_irq(&phba->hbalock);
8988 /* Mbox cmd <mbxCommand> timeout */
8989 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8990 "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
8992 phba->pport->port_state,
8994 phba->sli.mbox_active);
8995 spin_unlock_irq(&phba->hbalock);
8997 /* Setting state unknown so lpfc_sli_abort_iocb_ring
8998 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
8999 * it to fail all outstanding SCSI IO.
9001 spin_lock_irq(&phba->pport->work_port_lock);
9002 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
9003 spin_unlock_irq(&phba->pport->work_port_lock);
9004 spin_lock_irq(&phba->hbalock);
9005 phba->link_state = LPFC_LINK_UNKNOWN;
9006 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
9007 spin_unlock_irq(&phba->hbalock);
9009 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9010 "0345 Resetting board due to mailbox timeout\n");
9012 /* Reset the HBA device */
9013 lpfc_reset_hba(phba);
9017 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
9018 * @phba: Pointer to HBA context object.
9019 * @pmbox: Pointer to mailbox object.
9020 * @flag: Flag indicating how the mailbox need to be processed.
9022 * This function is called by discovery code and HBA management code
9023 * to submit a mailbox command to firmware with SLI-3 interface spec. This
9024 * function gets the hbalock to protect the data structures.
9025 * The mailbox command can be submitted in polling mode, in which case
9026 * this function will wait in a polling loop for the completion of the
9028 * If the mailbox is submitted in no_wait mode (not polling) the
9029 * function will submit the command and returns immediately without waiting
9030 * for the mailbox completion. The no_wait is supported only when HBA
9031 * is in SLI2/SLI3 mode - interrupts are enabled.
9032 * The SLI interface allows only one mailbox pending at a time. If the
9033 * mailbox is issued in polling mode and there is already a mailbox
9034 * pending, then the function will return an error. If the mailbox is issued
9035 * in NO_WAIT mode and there is a mailbox pending already, the function
9036 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
9037 * The sli layer owns the mailbox object until the completion of mailbox
9038 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
9039 * return codes the caller owns the mailbox command after the return of
9043 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
9047 struct lpfc_sli *psli = &phba->sli;
9048 uint32_t status, evtctr;
9049 uint32_t ha_copy, hc_copy;
9051 unsigned long timeout;
9052 unsigned long drvr_flag = 0;
9053 uint32_t word0, ldata;
9054 void __iomem *to_slim;
9055 int processing_queue = 0;
9057 spin_lock_irqsave(&phba->hbalock, drvr_flag);
9059 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9060 /* processing mbox queue from intr_handler */
9061 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9062 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9065 processing_queue = 1;
9066 pmbox = lpfc_mbox_get(phba);
9068 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9073 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
9074 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
9076 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9077 lpfc_printf_log(phba, KERN_ERR,
9078 LOG_MBOX | LOG_VPORT,
9079 "1806 Mbox x%x failed. No vport\n",
9080 pmbox->u.mb.mbxCommand);
9082 goto out_not_finished;
9086 /* If the PCI channel is in offline state, do not post mbox. */
9087 if (unlikely(pci_channel_offline(phba->pcidev))) {
9088 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9089 goto out_not_finished;
9092 /* If HBA has a deferred error attention, fail the iocb. */
9093 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
9094 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9095 goto out_not_finished;
9101 status = MBX_SUCCESS;
9103 if (phba->link_state == LPFC_HBA_ERROR) {
9104 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9106 /* Mbox command <mbxCommand> cannot issue */
9107 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9108 "(%d):0311 Mailbox command x%x cannot "
9109 "issue Data: x%x x%x\n",
9110 pmbox->vport ? pmbox->vport->vpi : 0,
9111 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9112 goto out_not_finished;
9115 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
9116 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
9117 !(hc_copy & HC_MBINT_ENA)) {
9118 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9119 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9120 "(%d):2528 Mailbox command x%x cannot "
9121 "issue Data: x%x x%x\n",
9122 pmbox->vport ? pmbox->vport->vpi : 0,
9123 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9124 goto out_not_finished;
9128 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9129 /* Polling for a mbox command when another one is already active
9130 * is not allowed in SLI. Also, the driver must have established
9131 * SLI2 mode to queue and process multiple mbox commands.
9134 if (flag & MBX_POLL) {
9135 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9137 /* Mbox command <mbxCommand> cannot issue */
9138 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9139 "(%d):2529 Mailbox command x%x "
9140 "cannot issue Data: x%x x%x\n",
9141 pmbox->vport ? pmbox->vport->vpi : 0,
9142 pmbox->u.mb.mbxCommand,
9143 psli->sli_flag, flag);
9144 goto out_not_finished;
9147 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
9148 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9149 /* Mbox command <mbxCommand> cannot issue */
9150 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9151 "(%d):2530 Mailbox command x%x "
9152 "cannot issue Data: x%x x%x\n",
9153 pmbox->vport ? pmbox->vport->vpi : 0,
9154 pmbox->u.mb.mbxCommand,
9155 psli->sli_flag, flag);
9156 goto out_not_finished;
9159 /* Another mailbox command is still being processed, queue this
9160 * command to be processed later.
9162 lpfc_mbox_put(phba, pmbox);
9164 /* Mbox cmd issue - BUSY */
9165 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9166 "(%d):0308 Mbox cmd issue - BUSY Data: "
9167 "x%x x%x x%x x%x\n",
9168 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
9170 phba->pport ? phba->pport->port_state : 0xff,
9171 psli->sli_flag, flag);
9173 psli->slistat.mbox_busy++;
9174 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9177 lpfc_debugfs_disc_trc(pmbox->vport,
9178 LPFC_DISC_TRC_MBOX_VPORT,
9179 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
9180 (uint32_t)mbx->mbxCommand,
9181 mbx->un.varWords[0], mbx->un.varWords[1]);
9184 lpfc_debugfs_disc_trc(phba->pport,
9186 "MBOX Bsy: cmd:x%x mb:x%x x%x",
9187 (uint32_t)mbx->mbxCommand,
9188 mbx->un.varWords[0], mbx->un.varWords[1]);
9194 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9196 /* If we are not polling, we MUST be in SLI2 mode */
9197 if (flag != MBX_POLL) {
9198 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
9199 (mbx->mbxCommand != MBX_KILL_BOARD)) {
9200 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9201 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9202 /* Mbox command <mbxCommand> cannot issue */
9203 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9204 "(%d):2531 Mailbox command x%x "
9205 "cannot issue Data: x%x x%x\n",
9206 pmbox->vport ? pmbox->vport->vpi : 0,
9207 pmbox->u.mb.mbxCommand,
9208 psli->sli_flag, flag);
9209 goto out_not_finished;
9211 /* timeout active mbox command */
9212 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
9214 mod_timer(&psli->mbox_tmo, jiffies + timeout);
9217 /* Mailbox cmd <cmd> issue */
9218 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9219 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
9221 pmbox->vport ? pmbox->vport->vpi : 0,
9223 phba->pport ? phba->pport->port_state : 0xff,
9224 psli->sli_flag, flag);
9226 if (mbx->mbxCommand != MBX_HEARTBEAT) {
9228 lpfc_debugfs_disc_trc(pmbox->vport,
9229 LPFC_DISC_TRC_MBOX_VPORT,
9230 "MBOX Send vport: cmd:x%x mb:x%x x%x",
9231 (uint32_t)mbx->mbxCommand,
9232 mbx->un.varWords[0], mbx->un.varWords[1]);
9235 lpfc_debugfs_disc_trc(phba->pport,
9237 "MBOX Send: cmd:x%x mb:x%x x%x",
9238 (uint32_t)mbx->mbxCommand,
9239 mbx->un.varWords[0], mbx->un.varWords[1]);
9243 psli->slistat.mbox_cmd++;
9244 evtctr = psli->slistat.mbox_event;
9246 /* next set own bit for the adapter and copy over command word */
9247 mbx->mbxOwner = OWN_CHIP;
9249 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9250 /* Populate mbox extension offset word. */
9251 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
9252 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
9253 = (uint8_t *)phba->mbox_ext
9254 - (uint8_t *)phba->mbox;
9257 /* Copy the mailbox extension data */
9258 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
9259 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
9260 (uint8_t *)phba->mbox_ext,
9261 pmbox->in_ext_byte_len);
9263 /* Copy command data to host SLIM area */
9264 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
9266 /* Populate mbox extension offset word. */
9267 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
9268 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
9269 = MAILBOX_HBA_EXT_OFFSET;
9271 /* Copy the mailbox extension data */
9272 if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
9273 lpfc_memcpy_to_slim(phba->MBslimaddr +
9274 MAILBOX_HBA_EXT_OFFSET,
9275 pmbox->ctx_buf, pmbox->in_ext_byte_len);
9277 if (mbx->mbxCommand == MBX_CONFIG_PORT)
9278 /* copy command data into host mbox for cmpl */
9279 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
9282 /* First copy mbox command data to HBA SLIM, skip past first
9284 to_slim = phba->MBslimaddr + sizeof (uint32_t);
9285 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
9286 MAILBOX_CMD_SIZE - sizeof (uint32_t));
9288 /* Next copy over first word, with mbxOwner set */
9289 ldata = *((uint32_t *)mbx);
9290 to_slim = phba->MBslimaddr;
9291 writel(ldata, to_slim);
9292 readl(to_slim); /* flush */
9294 if (mbx->mbxCommand == MBX_CONFIG_PORT)
9295 /* switch over to host mailbox */
9296 psli->sli_flag |= LPFC_SLI_ACTIVE;
9303 /* Set up reference to mailbox command */
9304 psli->mbox_active = pmbox;
9305 /* Interrupt board to do it */
9306 writel(CA_MBATT, phba->CAregaddr);
9307 readl(phba->CAregaddr); /* flush */
9308 /* Don't wait for it to finish, just return */
9312 /* Set up null reference to mailbox command */
9313 psli->mbox_active = NULL;
9314 /* Interrupt board to do it */
9315 writel(CA_MBATT, phba->CAregaddr);
9316 readl(phba->CAregaddr); /* flush */
9318 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9319 /* First read mbox status word */
9320 word0 = *((uint32_t *)phba->mbox);
9321 word0 = le32_to_cpu(word0);
9323 /* First read mbox status word */
9324 if (lpfc_readl(phba->MBslimaddr, &word0)) {
9325 spin_unlock_irqrestore(&phba->hbalock,
9327 goto out_not_finished;
9331 /* Read the HBA Host Attention Register */
9332 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
9333 spin_unlock_irqrestore(&phba->hbalock,
9335 goto out_not_finished;
9337 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
9340 /* Wait for command to complete */
9341 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
9342 (!(ha_copy & HA_MBATT) &&
9343 (phba->link_state > LPFC_WARM_START))) {
9344 if (time_after(jiffies, timeout)) {
9345 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9346 spin_unlock_irqrestore(&phba->hbalock,
9348 goto out_not_finished;
9351 /* Check if we took a mbox interrupt while we were
9353 if (((word0 & OWN_CHIP) != OWN_CHIP)
9354 && (evtctr != psli->slistat.mbox_event))
9358 spin_unlock_irqrestore(&phba->hbalock,
9361 spin_lock_irqsave(&phba->hbalock, drvr_flag);
9364 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9365 /* First copy command data */
9366 word0 = *((uint32_t *)phba->mbox);
9367 word0 = le32_to_cpu(word0);
9368 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
9371 /* Check real SLIM for any errors */
9372 slimword0 = readl(phba->MBslimaddr);
9373 slimmb = (MAILBOX_t *) & slimword0;
9374 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
9375 && slimmb->mbxStatus) {
9382 /* First copy command data */
9383 word0 = readl(phba->MBslimaddr);
9385 /* Read the HBA Host Attention Register */
9386 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
9387 spin_unlock_irqrestore(&phba->hbalock,
9389 goto out_not_finished;
9393 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9394 /* copy results back to user */
9395 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
9397 /* Copy the mailbox extension data */
9398 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
9399 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
9401 pmbox->out_ext_byte_len);
9404 /* First copy command data */
9405 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
9407 /* Copy the mailbox extension data */
9408 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
9409 lpfc_memcpy_from_slim(
9412 MAILBOX_HBA_EXT_OFFSET,
9413 pmbox->out_ext_byte_len);
9417 writel(HA_MBATT, phba->HAregaddr);
9418 readl(phba->HAregaddr); /* flush */
9420 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9421 status = mbx->mbxStatus;
9424 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9428 if (processing_queue) {
9429 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
9430 lpfc_mbox_cmpl_put(phba, pmbox);
9432 return MBX_NOT_FINISHED;
9436 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
9437 * @phba: Pointer to HBA context object.
9439 * The function blocks the posting of SLI4 asynchronous mailbox commands from
9440 * the driver internal pending mailbox queue. It will then try to wait out the
9441 * possible outstanding mailbox command before return.
9444 * 0 - the outstanding mailbox command completed; otherwise, the wait for
9445 * the outstanding mailbox command timed out.
9448 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
9450 struct lpfc_sli *psli = &phba->sli;
9451 LPFC_MBOXQ_t *mboxq;
9453 unsigned long timeout = 0;
9455 u8 cmd, subsys, opcode;
9457 /* Mark the asynchronous mailbox command posting as blocked */
9458 spin_lock_irq(&phba->hbalock);
9459 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
9460 /* Determine how long we might wait for the active mailbox
9461 * command to be gracefully completed by firmware.
9463 if (phba->sli.mbox_active)
9464 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
9465 phba->sli.mbox_active) *
9467 spin_unlock_irq(&phba->hbalock);
9469 /* Make sure the mailbox is really active */
9471 lpfc_sli4_process_missed_mbox_completions(phba);
9473 /* Wait for the outstanding mailbox command to complete */
9474 while (phba->sli.mbox_active) {
9475 /* Check active mailbox complete status every 2ms */
9477 if (time_after(jiffies, timeout)) {
9478 /* Timeout, mark the outstanding cmd not complete */
9480 /* Sanity check sli.mbox_active has not completed or
9481 * cancelled from another context during last 2ms sleep,
9482 * so take hbalock to be sure before logging.
9484 spin_lock_irq(&phba->hbalock);
9485 if (phba->sli.mbox_active) {
9486 mboxq = phba->sli.mbox_active;
9487 cmd = mboxq->u.mb.mbxCommand;
9488 subsys = lpfc_sli_config_mbox_subsys_get(phba,
9490 opcode = lpfc_sli_config_mbox_opcode_get(phba,
9492 sli_flag = psli->sli_flag;
9493 spin_unlock_irq(&phba->hbalock);
9494 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9495 "2352 Mailbox command x%x "
9496 "(x%x/x%x) sli_flag x%x could "
9498 cmd, subsys, opcode,
9501 spin_unlock_irq(&phba->hbalock);
9509 /* Can not cleanly block async mailbox command, fails it */
9511 spin_lock_irq(&phba->hbalock);
9512 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
9513 spin_unlock_irq(&phba->hbalock);
9519 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
9520 * @phba: Pointer to HBA context object.
9522 * The function unblocks and resume posting of SLI4 asynchronous mailbox
9523 * commands from the driver internal pending mailbox queue. It makes sure
9524 * that there is no outstanding mailbox command before resuming posting
9525 * asynchronous mailbox commands. If, for any reason, there is outstanding
9526 * mailbox command, it will try to wait it out before resuming asynchronous
9527 * mailbox command posting.
9530 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
9532 struct lpfc_sli *psli = &phba->sli;
9534 spin_lock_irq(&phba->hbalock);
9535 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9536 /* Asynchronous mailbox posting is not blocked, do nothing */
9537 spin_unlock_irq(&phba->hbalock);
9541 /* Outstanding synchronous mailbox command is guaranteed to be done,
9542 * successful or timeout, after timing-out the outstanding mailbox
9543 * command shall always be removed, so just unblock posting async
9544 * mailbox command and resume
9546 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
9547 spin_unlock_irq(&phba->hbalock);
9549 /* wake up worker thread to post asynchronous mailbox command */
9550 lpfc_worker_wake_up(phba);
9554 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
9555 * @phba: Pointer to HBA context object.
9556 * @mboxq: Pointer to mailbox object.
9558 * The function waits for the bootstrap mailbox register ready bit from
9559 * port for twice the regular mailbox command timeout value.
9561 * 0 - no timeout on waiting for bootstrap mailbox register ready.
9562 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
9565 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9568 unsigned long timeout;
9569 struct lpfc_register bmbx_reg;
9571 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
9575 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
9576 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
9580 if (time_after(jiffies, timeout))
9581 return MBXERR_ERROR;
9582 } while (!db_ready);
9588 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
9589 * @phba: Pointer to HBA context object.
9590 * @mboxq: Pointer to mailbox object.
9592 * The function posts a mailbox to the port. The mailbox is expected
9593 * to be comletely filled in and ready for the port to operate on it.
9594 * This routine executes a synchronous completion operation on the
9595 * mailbox by polling for its completion.
9597 * The caller must not be holding any locks when calling this routine.
9600 * MBX_SUCCESS - mailbox posted successfully
9601 * Any of the MBX error values.
9604 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9606 int rc = MBX_SUCCESS;
9607 unsigned long iflag;
9608 uint32_t mcqe_status;
9610 struct lpfc_sli *psli = &phba->sli;
9611 struct lpfc_mqe *mb = &mboxq->u.mqe;
9612 struct lpfc_bmbx_create *mbox_rgn;
9613 struct dma_address *dma_address;
9616 * Only one mailbox can be active to the bootstrap mailbox region
9617 * at a time and there is no queueing provided.
9619 spin_lock_irqsave(&phba->hbalock, iflag);
9620 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9621 spin_unlock_irqrestore(&phba->hbalock, iflag);
9622 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9623 "(%d):2532 Mailbox command x%x (x%x/x%x) "
9624 "cannot issue Data: x%x x%x\n",
9625 mboxq->vport ? mboxq->vport->vpi : 0,
9626 mboxq->u.mb.mbxCommand,
9627 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9628 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9629 psli->sli_flag, MBX_POLL);
9630 return MBXERR_ERROR;
9632 /* The server grabs the token and owns it until release */
9633 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9634 phba->sli.mbox_active = mboxq;
9635 spin_unlock_irqrestore(&phba->hbalock, iflag);
9637 /* wait for bootstrap mbox register for readyness */
9638 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9642 * Initialize the bootstrap memory region to avoid stale data areas
9643 * in the mailbox post. Then copy the caller's mailbox contents to
9644 * the bmbx mailbox region.
9646 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
9647 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
9648 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
9649 sizeof(struct lpfc_mqe));
9651 /* Post the high mailbox dma address to the port and wait for ready. */
9652 dma_address = &phba->sli4_hba.bmbx.dma_address;
9653 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
9655 /* wait for bootstrap mbox register for hi-address write done */
9656 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9660 /* Post the low mailbox dma address to the port. */
9661 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
9663 /* wait for bootstrap mbox register for low address write done */
9664 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9669 * Read the CQ to ensure the mailbox has completed.
9670 * If so, update the mailbox status so that the upper layers
9671 * can complete the request normally.
9673 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
9674 sizeof(struct lpfc_mqe));
9675 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
9676 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
9677 sizeof(struct lpfc_mcqe));
9678 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
9680 * When the CQE status indicates a failure and the mailbox status
9681 * indicates success then copy the CQE status into the mailbox status
9682 * (and prefix it with x4000).
9684 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
9685 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
9686 bf_set(lpfc_mqe_status, mb,
9687 (LPFC_MBX_ERROR_RANGE | mcqe_status));
9690 lpfc_sli4_swap_str(phba, mboxq);
9692 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9693 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
9694 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
9695 " x%x x%x CQ: x%x x%x x%x x%x\n",
9696 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
9697 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9698 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9699 bf_get(lpfc_mqe_status, mb),
9700 mb->un.mb_words[0], mb->un.mb_words[1],
9701 mb->un.mb_words[2], mb->un.mb_words[3],
9702 mb->un.mb_words[4], mb->un.mb_words[5],
9703 mb->un.mb_words[6], mb->un.mb_words[7],
9704 mb->un.mb_words[8], mb->un.mb_words[9],
9705 mb->un.mb_words[10], mb->un.mb_words[11],
9706 mb->un.mb_words[12], mboxq->mcqe.word0,
9707 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
9708 mboxq->mcqe.trailer);
9710 /* We are holding the token, no needed for lock when release */
9711 spin_lock_irqsave(&phba->hbalock, iflag);
9712 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9713 phba->sli.mbox_active = NULL;
9714 spin_unlock_irqrestore(&phba->hbalock, iflag);
9719 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
9720 * @phba: Pointer to HBA context object.
9721 * @mboxq: Pointer to mailbox object.
9722 * @flag: Flag indicating how the mailbox need to be processed.
9724 * This function is called by discovery code and HBA management code to submit
9725 * a mailbox command to firmware with SLI-4 interface spec.
9727 * Return codes the caller owns the mailbox command after the return of the
9731 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
9734 struct lpfc_sli *psli = &phba->sli;
9735 unsigned long iflags;
9738 /* dump from issue mailbox command if setup */
9739 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
9741 rc = lpfc_mbox_dev_check(phba);
9743 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9744 "(%d):2544 Mailbox command x%x (x%x/x%x) "
9745 "cannot issue Data: x%x x%x\n",
9746 mboxq->vport ? mboxq->vport->vpi : 0,
9747 mboxq->u.mb.mbxCommand,
9748 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9749 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9750 psli->sli_flag, flag);
9751 goto out_not_finished;
9754 /* Detect polling mode and jump to a handler */
9755 if (!phba->sli4_hba.intr_enable) {
9756 if (flag == MBX_POLL)
9757 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
9760 if (rc != MBX_SUCCESS)
9761 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
9762 "(%d):2541 Mailbox command x%x "
9763 "(x%x/x%x) failure: "
9764 "mqe_sta: x%x mcqe_sta: x%x/x%x "
9766 mboxq->vport ? mboxq->vport->vpi : 0,
9767 mboxq->u.mb.mbxCommand,
9768 lpfc_sli_config_mbox_subsys_get(phba,
9770 lpfc_sli_config_mbox_opcode_get(phba,
9772 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
9773 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
9774 bf_get(lpfc_mcqe_ext_status,
9776 psli->sli_flag, flag);
9778 } else if (flag == MBX_POLL) {
9779 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
9780 "(%d):2542 Try to issue mailbox command "
9781 "x%x (x%x/x%x) synchronously ahead of async "
9782 "mailbox command queue: x%x x%x\n",
9783 mboxq->vport ? mboxq->vport->vpi : 0,
9784 mboxq->u.mb.mbxCommand,
9785 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9786 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9787 psli->sli_flag, flag);
9788 /* Try to block the asynchronous mailbox posting */
9789 rc = lpfc_sli4_async_mbox_block(phba);
9791 /* Successfully blocked, now issue sync mbox cmd */
9792 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
9793 if (rc != MBX_SUCCESS)
9794 lpfc_printf_log(phba, KERN_WARNING,
9796 "(%d):2597 Sync Mailbox command "
9797 "x%x (x%x/x%x) failure: "
9798 "mqe_sta: x%x mcqe_sta: x%x/x%x "
9800 mboxq->vport ? mboxq->vport->vpi : 0,
9801 mboxq->u.mb.mbxCommand,
9802 lpfc_sli_config_mbox_subsys_get(phba,
9804 lpfc_sli_config_mbox_opcode_get(phba,
9806 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
9807 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
9808 bf_get(lpfc_mcqe_ext_status,
9810 psli->sli_flag, flag);
9811 /* Unblock the async mailbox posting afterward */
9812 lpfc_sli4_async_mbox_unblock(phba);
9817 /* Now, interrupt mode asynchronous mailbox command */
9818 rc = lpfc_mbox_cmd_check(phba, mboxq);
9820 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9821 "(%d):2543 Mailbox command x%x (x%x/x%x) "
9822 "cannot issue Data: x%x x%x\n",
9823 mboxq->vport ? mboxq->vport->vpi : 0,
9824 mboxq->u.mb.mbxCommand,
9825 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9826 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9827 psli->sli_flag, flag);
9828 goto out_not_finished;
9831 /* Put the mailbox command to the driver internal FIFO */
9832 psli->slistat.mbox_busy++;
9833 spin_lock_irqsave(&phba->hbalock, iflags);
9834 lpfc_mbox_put(phba, mboxq);
9835 spin_unlock_irqrestore(&phba->hbalock, iflags);
9836 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9837 "(%d):0354 Mbox cmd issue - Enqueue Data: "
9838 "x%x (x%x/x%x) x%x x%x x%x\n",
9839 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
9840 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
9841 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9842 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9843 phba->pport->port_state,
9844 psli->sli_flag, MBX_NOWAIT);
9845 /* Wake up worker thread to transport mailbox command from head */
9846 lpfc_worker_wake_up(phba);
9851 return MBX_NOT_FINISHED;
9855 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
9856 * @phba: Pointer to HBA context object.
9858 * This function is called by worker thread to send a mailbox command to
9859 * SLI4 HBA firmware.
9863 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
9865 struct lpfc_sli *psli = &phba->sli;
9866 LPFC_MBOXQ_t *mboxq;
9867 int rc = MBX_SUCCESS;
9868 unsigned long iflags;
9869 struct lpfc_mqe *mqe;
9872 /* Check interrupt mode before post async mailbox command */
9873 if (unlikely(!phba->sli4_hba.intr_enable))
9874 return MBX_NOT_FINISHED;
9876 /* Check for mailbox command service token */
9877 spin_lock_irqsave(&phba->hbalock, iflags);
9878 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9879 spin_unlock_irqrestore(&phba->hbalock, iflags);
9880 return MBX_NOT_FINISHED;
9882 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9883 spin_unlock_irqrestore(&phba->hbalock, iflags);
9884 return MBX_NOT_FINISHED;
9886 if (unlikely(phba->sli.mbox_active)) {
9887 spin_unlock_irqrestore(&phba->hbalock, iflags);
9888 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9889 "0384 There is pending active mailbox cmd\n");
9890 return MBX_NOT_FINISHED;
9892 /* Take the mailbox command service token */
9893 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9895 /* Get the next mailbox command from head of queue */
9896 mboxq = lpfc_mbox_get(phba);
9898 /* If no more mailbox command waiting for post, we're done */
9900 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9901 spin_unlock_irqrestore(&phba->hbalock, iflags);
9904 phba->sli.mbox_active = mboxq;
9905 spin_unlock_irqrestore(&phba->hbalock, iflags);
9907 /* Check device readiness for posting mailbox command */
9908 rc = lpfc_mbox_dev_check(phba);
9910 /* Driver clean routine will clean up pending mailbox */
9911 goto out_not_finished;
9913 /* Prepare the mbox command to be posted */
9914 mqe = &mboxq->u.mqe;
9915 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
9917 /* Start timer for the mbox_tmo and log some mailbox post messages */
9918 mod_timer(&psli->mbox_tmo, (jiffies +
9919 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
9921 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9922 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
9924 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
9925 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9926 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9927 phba->pport->port_state, psli->sli_flag);
9929 if (mbx_cmnd != MBX_HEARTBEAT) {
9931 lpfc_debugfs_disc_trc(mboxq->vport,
9932 LPFC_DISC_TRC_MBOX_VPORT,
9933 "MBOX Send vport: cmd:x%x mb:x%x x%x",
9934 mbx_cmnd, mqe->un.mb_words[0],
9935 mqe->un.mb_words[1]);
9937 lpfc_debugfs_disc_trc(phba->pport,
9939 "MBOX Send: cmd:x%x mb:x%x x%x",
9940 mbx_cmnd, mqe->un.mb_words[0],
9941 mqe->un.mb_words[1]);
9944 psli->slistat.mbox_cmd++;
9946 /* Post the mailbox command to the port */
9947 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
9948 if (rc != MBX_SUCCESS) {
9949 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9950 "(%d):2533 Mailbox command x%x (x%x/x%x) "
9951 "cannot issue Data: x%x x%x\n",
9952 mboxq->vport ? mboxq->vport->vpi : 0,
9953 mboxq->u.mb.mbxCommand,
9954 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9955 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9956 psli->sli_flag, MBX_NOWAIT);
9957 goto out_not_finished;
9963 spin_lock_irqsave(&phba->hbalock, iflags);
9964 if (phba->sli.mbox_active) {
9965 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
9966 __lpfc_mbox_cmpl_put(phba, mboxq);
9967 /* Release the token */
9968 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9969 phba->sli.mbox_active = NULL;
9971 spin_unlock_irqrestore(&phba->hbalock, iflags);
9973 return MBX_NOT_FINISHED;
9977 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
9978 * @phba: Pointer to HBA context object.
9979 * @pmbox: Pointer to mailbox object.
9980 * @flag: Flag indicating how the mailbox need to be processed.
9982 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
9983 * the API jump table function pointer from the lpfc_hba struct.
9985 * Return codes the caller owns the mailbox command after the return of the
9989 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
9991 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
9995 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
9996 * @phba: The hba struct for which this call is being executed.
9997 * @dev_grp: The HBA PCI-Device group number.
9999 * This routine sets up the mbox interface API function jump table in @phba
10001 * Returns: 0 - success, -ENODEV - failure.
10004 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10008 case LPFC_PCI_DEV_LP:
10009 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
10010 phba->lpfc_sli_handle_slow_ring_event =
10011 lpfc_sli_handle_slow_ring_event_s3;
10012 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
10013 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
10014 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
10016 case LPFC_PCI_DEV_OC:
10017 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
10018 phba->lpfc_sli_handle_slow_ring_event =
10019 lpfc_sli_handle_slow_ring_event_s4;
10020 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
10021 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
10022 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
10025 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10026 "1420 Invalid HBA PCI-device group: 0x%x\n",
10034 * __lpfc_sli_ringtx_put - Add an iocb to the txq
10035 * @phba: Pointer to HBA context object.
10036 * @pring: Pointer to driver SLI ring object.
10037 * @piocb: Pointer to address of newly added command iocb.
10039 * This function is called with hbalock held for SLI3 ports or
10040 * the ring lock held for SLI4 ports to add a command
10041 * iocb to the txq when SLI layer cannot submit the command iocb
10045 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10046 struct lpfc_iocbq *piocb)
10048 if (phba->sli_rev == LPFC_SLI_REV4)
10049 lockdep_assert_held(&pring->ring_lock);
10051 lockdep_assert_held(&phba->hbalock);
10052 /* Insert the caller's iocb in the txq tail for later processing. */
10053 list_add_tail(&piocb->list, &pring->txq);
10057 * lpfc_sli_next_iocb - Get the next iocb in the txq
10058 * @phba: Pointer to HBA context object.
10059 * @pring: Pointer to driver SLI ring object.
10060 * @piocb: Pointer to address of newly added command iocb.
10062 * This function is called with hbalock held before a new
10063 * iocb is submitted to the firmware. This function checks
10064 * txq to flush the iocbs in txq to Firmware before
10065 * submitting new iocbs to the Firmware.
10066 * If there are iocbs in the txq which need to be submitted
10067 * to firmware, lpfc_sli_next_iocb returns the first element
10068 * of the txq after dequeuing it from txq.
10069 * If there is no iocb in the txq then the function will return
10070 * *piocb and *piocb is set to NULL. Caller needs to check
10071 * *piocb to find if there are more commands in the txq.
10073 static struct lpfc_iocbq *
10074 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10075 struct lpfc_iocbq **piocb)
10077 struct lpfc_iocbq * nextiocb;
10079 lockdep_assert_held(&phba->hbalock);
10081 nextiocb = lpfc_sli_ringtx_get(phba, pring);
10091 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
10092 * @phba: Pointer to HBA context object.
10093 * @ring_number: SLI ring number to issue iocb on.
10094 * @piocb: Pointer to command iocb.
10095 * @flag: Flag indicating if this command can be put into txq.
10097 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
10098 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
10099 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
10100 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
10101 * this function allows only iocbs for posting buffers. This function finds
10102 * next available slot in the command ring and posts the command to the
10103 * available slot and writes the port attention register to request HBA start
10104 * processing new iocb. If there is no slot available in the ring and
10105 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
10106 * the function returns IOCB_BUSY.
10108 * This function is called with hbalock held. The function will return success
10109 * after it successfully submit the iocb to firmware or after adding to the
10113 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
10114 struct lpfc_iocbq *piocb, uint32_t flag)
10116 struct lpfc_iocbq *nextiocb;
10118 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
10120 lockdep_assert_held(&phba->hbalock);
10122 if (piocb->cmd_cmpl && (!piocb->vport) &&
10123 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
10124 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
10125 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10126 "1807 IOCB x%x failed. No vport\n",
10127 piocb->iocb.ulpCommand);
10133 /* If the PCI channel is in offline state, do not post iocbs. */
10134 if (unlikely(pci_channel_offline(phba->pcidev)))
10137 /* If HBA has a deferred error attention, fail the iocb. */
10138 if (unlikely(phba->hba_flag & DEFER_ERATT))
10142 * We should never get an IOCB if we are in a < LINK_DOWN state
10144 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
10148 * Check to see if we are blocking IOCB processing because of a
10149 * outstanding event.
10151 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
10154 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
10156 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
10157 * can be issued if the link is not up.
10159 switch (piocb->iocb.ulpCommand) {
10160 case CMD_GEN_REQUEST64_CR:
10161 case CMD_GEN_REQUEST64_CX:
10162 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
10163 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
10164 FC_RCTL_DD_UNSOL_CMD) ||
10165 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
10166 MENLO_TRANSPORT_TYPE))
10170 case CMD_QUE_RING_BUF_CN:
10171 case CMD_QUE_RING_BUF64_CN:
10173 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
10174 * completion, cmd_cmpl MUST be 0.
10176 if (piocb->cmd_cmpl)
10177 piocb->cmd_cmpl = NULL;
10179 case CMD_CREATE_XRI_CR:
10180 case CMD_CLOSE_XRI_CN:
10181 case CMD_CLOSE_XRI_CX:
10188 * For FCP commands, we must be in a state where we can process link
10189 * attention events.
10191 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
10192 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
10196 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
10197 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
10198 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
10201 lpfc_sli_update_ring(phba, pring);
10203 lpfc_sli_update_full_ring(phba, pring);
10206 return IOCB_SUCCESS;
10211 pring->stats.iocb_cmd_delay++;
10215 if (!(flag & SLI_IOCB_RET_IOCB)) {
10216 __lpfc_sli_ringtx_put(phba, pring, piocb);
10217 return IOCB_SUCCESS;
10224 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
10225 * @phba: Pointer to HBA context object.
10226 * @piocbq: Pointer to command iocb.
10227 * @sglq: Pointer to the scatter gather queue object.
10229 * This routine converts the bpl or bde that is in the IOCB
10230 * to a sgl list for the sli4 hardware. The physical address
10231 * of the bpl/bde is converted back to a virtual address.
10232 * If the IOCB contains a BPL then the list of BDE's is
10233 * converted to sli4_sge's. If the IOCB contains a single
10234 * BDE then it is converted to a single sli_sge.
10235 * The IOCB is still in cpu endianess so the contents of
10236 * the bpl can be used without byte swapping.
10238 * Returns valid XRI = Success, NO_XRI = Failure.
10241 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
10242 struct lpfc_sglq *sglq)
10244 uint16_t xritag = NO_XRI;
10245 struct ulp_bde64 *bpl = NULL;
10246 struct ulp_bde64 bde;
10247 struct sli4_sge *sgl = NULL;
10248 struct lpfc_dmabuf *dmabuf;
10252 uint32_t offset = 0; /* accumulated offset in the sg request list */
10253 int inbound = 0; /* number of sg reply entries inbound from firmware */
10255 if (!piocbq || !sglq)
10258 sgl = (struct sli4_sge *)sglq->sgl;
10259 icmd = &piocbq->iocb;
10260 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
10261 return sglq->sli4_xritag;
10262 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
10263 numBdes = icmd->un.genreq64.bdl.bdeSize /
10264 sizeof(struct ulp_bde64);
10265 /* The addrHigh and addrLow fields within the IOCB
10266 * have not been byteswapped yet so there is no
10267 * need to swap them back.
10269 if (piocbq->context3)
10270 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
10274 bpl = (struct ulp_bde64 *)dmabuf->virt;
10278 for (i = 0; i < numBdes; i++) {
10279 /* Should already be byte swapped. */
10280 sgl->addr_hi = bpl->addrHigh;
10281 sgl->addr_lo = bpl->addrLow;
10283 sgl->word2 = le32_to_cpu(sgl->word2);
10284 if ((i+1) == numBdes)
10285 bf_set(lpfc_sli4_sge_last, sgl, 1);
10287 bf_set(lpfc_sli4_sge_last, sgl, 0);
10288 /* swap the size field back to the cpu so we
10289 * can assign it to the sgl.
10291 bde.tus.w = le32_to_cpu(bpl->tus.w);
10292 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
10293 /* The offsets in the sgl need to be accumulated
10294 * separately for the request and reply lists.
10295 * The request is always first, the reply follows.
10297 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
10298 /* add up the reply sg entries */
10299 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
10301 /* first inbound? reset the offset */
10304 bf_set(lpfc_sli4_sge_offset, sgl, offset);
10305 bf_set(lpfc_sli4_sge_type, sgl,
10306 LPFC_SGE_TYPE_DATA);
10307 offset += bde.tus.f.bdeSize;
10309 sgl->word2 = cpu_to_le32(sgl->word2);
10313 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
10314 /* The addrHigh and addrLow fields of the BDE have not
10315 * been byteswapped yet so they need to be swapped
10316 * before putting them in the sgl.
10319 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
10321 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
10322 sgl->word2 = le32_to_cpu(sgl->word2);
10323 bf_set(lpfc_sli4_sge_last, sgl, 1);
10324 sgl->word2 = cpu_to_le32(sgl->word2);
10326 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
10328 return sglq->sli4_xritag;
10332 * lpfc_sli4_iocb2wqe - Convert the IOCB to a work queue entry.
10333 * @phba: Pointer to HBA context object.
10334 * @iocbq: Pointer to command iocb.
10335 * @wqe: Pointer to the work queue entry.
10337 * This routine converts the iocb command to its Work Queue Entry
10338 * equivalent. The wqe pointer should not have any fields set when
10339 * this routine is called because it will memcpy over them.
10340 * This routine does not set the CQ_ID or the WQEC bits in the
10343 * Returns: 0 = Success, IOCB_ERROR = Failure.
10346 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
10347 union lpfc_wqe128 *wqe)
10349 uint32_t xmit_len = 0, total_len = 0;
10352 uint32_t abort_tag;
10353 uint8_t command_type = ELS_COMMAND_NON_FIP;
10356 uint16_t abrt_iotag;
10357 struct lpfc_iocbq *abrtiocbq;
10358 struct ulp_bde64 *bpl = NULL;
10359 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
10361 struct ulp_bde64 bde;
10362 struct lpfc_nodelist *ndlp;
10366 fip = phba->hba_flag & HBA_FIP_SUPPORT;
10367 /* The fcp commands will set command type */
10368 if (iocbq->cmd_flag & LPFC_IO_FCP)
10369 command_type = FCP_COMMAND;
10370 else if (fip && (iocbq->cmd_flag & LPFC_FIP_ELS_ID_MASK))
10371 command_type = ELS_COMMAND_FIP;
10373 command_type = ELS_COMMAND_NON_FIP;
10375 if (phba->fcp_embed_io)
10376 memset(wqe, 0, sizeof(union lpfc_wqe128));
10377 /* Some of the fields are in the right position already */
10378 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
10379 /* The ct field has moved so reset */
10380 wqe->generic.wqe_com.word7 = 0;
10381 wqe->generic.wqe_com.word10 = 0;
10383 abort_tag = (uint32_t) iocbq->iotag;
10384 xritag = iocbq->sli4_xritag;
10385 /* words0-2 bpl convert bde */
10386 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
10387 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
10388 sizeof(struct ulp_bde64);
10389 bpl = (struct ulp_bde64 *)
10390 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
10394 /* Should already be byte swapped. */
10395 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
10396 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
10397 /* swap the size field back to the cpu so we
10398 * can assign it to the sgl.
10400 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
10401 xmit_len = wqe->generic.bde.tus.f.bdeSize;
10403 for (i = 0; i < numBdes; i++) {
10404 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
10405 total_len += bde.tus.f.bdeSize;
10408 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
10410 iocbq->iocb.ulpIoTag = iocbq->iotag;
10411 cmnd = iocbq->iocb.ulpCommand;
10413 switch (iocbq->iocb.ulpCommand) {
10414 case CMD_ELS_REQUEST64_CR:
10415 if (iocbq->cmd_flag & LPFC_IO_LIBDFC)
10416 ndlp = iocbq->context_un.ndlp;
10418 ndlp = (struct lpfc_nodelist *)iocbq->context1;
10419 if (!iocbq->iocb.ulpLe) {
10420 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10421 "2007 Only Limited Edition cmd Format"
10422 " supported 0x%x\n",
10423 iocbq->iocb.ulpCommand);
10427 wqe->els_req.payload_len = xmit_len;
10428 /* Els_reguest64 has a TMO */
10429 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
10430 iocbq->iocb.ulpTimeout);
10431 /* Need a VF for word 4 set the vf bit*/
10432 bf_set(els_req64_vf, &wqe->els_req, 0);
10433 /* And a VFID for word 12 */
10434 bf_set(els_req64_vfid, &wqe->els_req, 0);
10435 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
10436 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
10437 iocbq->iocb.ulpContext);
10438 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
10439 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
10440 /* CCP CCPE PV PRI in word10 were set in the memcpy */
10441 if (command_type == ELS_COMMAND_FIP)
10442 els_id = ((iocbq->cmd_flag & LPFC_FIP_ELS_ID_MASK)
10443 >> LPFC_FIP_ELS_ID_SHIFT);
10444 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
10445 iocbq->context2)->virt);
10446 if_type = bf_get(lpfc_sli_intf_if_type,
10447 &phba->sli4_hba.sli_intf);
10448 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
10449 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
10450 *pcmd == ELS_CMD_SCR ||
10451 *pcmd == ELS_CMD_RDF ||
10452 *pcmd == ELS_CMD_EDC ||
10453 *pcmd == ELS_CMD_RSCN_XMT ||
10454 *pcmd == ELS_CMD_FDISC ||
10455 *pcmd == ELS_CMD_LOGO ||
10456 *pcmd == ELS_CMD_QFPA ||
10457 *pcmd == ELS_CMD_UVEM ||
10458 *pcmd == ELS_CMD_PLOGI)) {
10459 bf_set(els_req64_sp, &wqe->els_req, 1);
10460 bf_set(els_req64_sid, &wqe->els_req,
10461 iocbq->vport->fc_myDID);
10462 if ((*pcmd == ELS_CMD_FLOGI) &&
10463 !(phba->fc_topology ==
10464 LPFC_TOPOLOGY_LOOP))
10465 bf_set(els_req64_sid, &wqe->els_req, 0);
10466 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
10467 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
10468 phba->vpi_ids[iocbq->vport->vpi]);
10469 } else if (pcmd && iocbq->context1) {
10470 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
10471 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
10472 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
10475 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
10476 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
10477 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
10478 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
10479 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
10480 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
10481 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
10482 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
10483 wqe->els_req.max_response_payload_len = total_len - xmit_len;
10485 case CMD_XMIT_SEQUENCE64_CX:
10486 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
10487 iocbq->iocb.un.ulpWord[3]);
10488 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
10489 iocbq->iocb.unsli3.rcvsli3.ox_id);
10490 /* The entire sequence is transmitted for this IOCB */
10491 xmit_len = total_len;
10492 cmnd = CMD_XMIT_SEQUENCE64_CR;
10493 if (phba->link_flag & LS_LOOPBACK_MODE)
10494 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
10496 case CMD_XMIT_SEQUENCE64_CR:
10497 /* word3 iocb=io_tag32 wqe=reserved */
10498 wqe->xmit_sequence.rsvd3 = 0;
10499 /* word4 relative_offset memcpy */
10500 /* word5 r_ctl/df_ctl memcpy */
10501 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
10502 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
10503 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
10504 LPFC_WQE_IOD_WRITE);
10505 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
10506 LPFC_WQE_LENLOC_WORD12);
10507 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
10508 wqe->xmit_sequence.xmit_len = xmit_len;
10509 command_type = OTHER_COMMAND;
10511 case CMD_XMIT_BCAST64_CN:
10512 /* word3 iocb=iotag32 wqe=seq_payload_len */
10513 wqe->xmit_bcast64.seq_payload_len = xmit_len;
10514 /* word4 iocb=rsvd wqe=rsvd */
10515 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
10516 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
10517 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
10518 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
10519 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
10520 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
10521 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
10522 LPFC_WQE_LENLOC_WORD3);
10523 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
10525 case CMD_FCP_IWRITE64_CR:
10526 command_type = FCP_COMMAND_DATA_OUT;
10527 /* word3 iocb=iotag wqe=payload_offset_len */
10528 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
10529 bf_set(payload_offset_len, &wqe->fcp_iwrite,
10530 xmit_len + sizeof(struct fcp_rsp));
10531 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
10533 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
10534 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
10535 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
10536 iocbq->iocb.ulpFCP2Rcvy);
10537 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
10538 /* Always open the exchange */
10539 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
10540 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
10541 LPFC_WQE_LENLOC_WORD4);
10542 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
10543 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
10544 if (iocbq->cmd_flag & LPFC_IO_OAS) {
10545 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
10546 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
10547 if (iocbq->priority) {
10548 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
10549 (iocbq->priority << 1));
10551 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
10552 (phba->cfg_XLanePriority << 1));
10555 /* Note, word 10 is already initialized to 0 */
10557 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
10558 if (phba->cfg_enable_pbde)
10559 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
10561 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
10563 if (phba->fcp_embed_io) {
10564 struct lpfc_io_buf *lpfc_cmd;
10565 struct sli4_sge *sgl;
10566 struct fcp_cmnd *fcp_cmnd;
10569 /* 128 byte wqe support here */
10571 lpfc_cmd = iocbq->context1;
10572 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
10573 fcp_cmnd = lpfc_cmd->fcp_cmnd;
10575 /* Word 0-2 - FCP_CMND */
10576 wqe->generic.bde.tus.f.bdeFlags =
10577 BUFF_TYPE_BDE_IMMED;
10578 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
10579 wqe->generic.bde.addrHigh = 0;
10580 wqe->generic.bde.addrLow = 88; /* Word 22 */
10582 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
10583 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
10585 /* Word 22-29 FCP CMND Payload */
10586 ptr = &wqe->words[22];
10587 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
10590 case CMD_FCP_IREAD64_CR:
10591 /* word3 iocb=iotag wqe=payload_offset_len */
10592 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
10593 bf_set(payload_offset_len, &wqe->fcp_iread,
10594 xmit_len + sizeof(struct fcp_rsp));
10595 bf_set(cmd_buff_len, &wqe->fcp_iread,
10597 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
10598 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
10599 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
10600 iocbq->iocb.ulpFCP2Rcvy);
10601 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
10602 /* Always open the exchange */
10603 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
10604 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
10605 LPFC_WQE_LENLOC_WORD4);
10606 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
10607 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
10608 if (iocbq->cmd_flag & LPFC_IO_OAS) {
10609 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
10610 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
10611 if (iocbq->priority) {
10612 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
10613 (iocbq->priority << 1));
10615 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
10616 (phba->cfg_XLanePriority << 1));
10619 /* Note, word 10 is already initialized to 0 */
10621 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
10622 if (phba->cfg_enable_pbde)
10623 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
10625 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
10627 if (phba->fcp_embed_io) {
10628 struct lpfc_io_buf *lpfc_cmd;
10629 struct sli4_sge *sgl;
10630 struct fcp_cmnd *fcp_cmnd;
10633 /* 128 byte wqe support here */
10635 lpfc_cmd = iocbq->context1;
10636 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
10637 fcp_cmnd = lpfc_cmd->fcp_cmnd;
10639 /* Word 0-2 - FCP_CMND */
10640 wqe->generic.bde.tus.f.bdeFlags =
10641 BUFF_TYPE_BDE_IMMED;
10642 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
10643 wqe->generic.bde.addrHigh = 0;
10644 wqe->generic.bde.addrLow = 88; /* Word 22 */
10646 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
10647 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
10649 /* Word 22-29 FCP CMND Payload */
10650 ptr = &wqe->words[22];
10651 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
10654 case CMD_FCP_ICMND64_CR:
10655 /* word3 iocb=iotag wqe=payload_offset_len */
10656 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
10657 bf_set(payload_offset_len, &wqe->fcp_icmd,
10658 xmit_len + sizeof(struct fcp_rsp));
10659 bf_set(cmd_buff_len, &wqe->fcp_icmd,
10661 /* word3 iocb=IO_TAG wqe=reserved */
10662 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
10663 /* Always open the exchange */
10664 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
10665 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
10666 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
10667 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
10668 LPFC_WQE_LENLOC_NONE);
10669 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
10670 iocbq->iocb.ulpFCP2Rcvy);
10671 if (iocbq->cmd_flag & LPFC_IO_OAS) {
10672 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
10673 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
10674 if (iocbq->priority) {
10675 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
10676 (iocbq->priority << 1));
10678 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
10679 (phba->cfg_XLanePriority << 1));
10682 /* Note, word 10 is already initialized to 0 */
10684 if (phba->fcp_embed_io) {
10685 struct lpfc_io_buf *lpfc_cmd;
10686 struct sli4_sge *sgl;
10687 struct fcp_cmnd *fcp_cmnd;
10690 /* 128 byte wqe support here */
10692 lpfc_cmd = iocbq->context1;
10693 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
10694 fcp_cmnd = lpfc_cmd->fcp_cmnd;
10696 /* Word 0-2 - FCP_CMND */
10697 wqe->generic.bde.tus.f.bdeFlags =
10698 BUFF_TYPE_BDE_IMMED;
10699 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
10700 wqe->generic.bde.addrHigh = 0;
10701 wqe->generic.bde.addrLow = 88; /* Word 22 */
10703 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
10704 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
10706 /* Word 22-29 FCP CMND Payload */
10707 ptr = &wqe->words[22];
10708 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
10711 case CMD_GEN_REQUEST64_CR:
10712 /* For this command calculate the xmit length of the
10716 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
10717 sizeof(struct ulp_bde64);
10718 for (i = 0; i < numBdes; i++) {
10719 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
10720 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
10722 xmit_len += bde.tus.f.bdeSize;
10724 /* word3 iocb=IO_TAG wqe=request_payload_len */
10725 wqe->gen_req.request_payload_len = xmit_len;
10726 /* word4 iocb=parameter wqe=relative_offset memcpy */
10727 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
10728 /* word6 context tag copied in memcpy */
10729 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
10730 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
10731 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10732 "2015 Invalid CT %x command 0x%x\n",
10733 ct, iocbq->iocb.ulpCommand);
10736 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
10737 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
10738 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
10739 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
10740 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
10741 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
10742 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
10743 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
10744 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
10745 command_type = OTHER_COMMAND;
10747 case CMD_XMIT_ELS_RSP64_CX:
10748 ndlp = (struct lpfc_nodelist *)iocbq->context1;
10749 /* words0-2 BDE memcpy */
10750 /* word3 iocb=iotag32 wqe=response_payload_len */
10751 wqe->xmit_els_rsp.response_payload_len = xmit_len;
10753 wqe->xmit_els_rsp.word4 = 0;
10754 /* word5 iocb=rsvd wge=did */
10755 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
10756 iocbq->iocb.un.xseq64.xmit_els_remoteID);
10758 if_type = bf_get(lpfc_sli_intf_if_type,
10759 &phba->sli4_hba.sli_intf);
10760 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
10761 if (iocbq->vport->fc_flag & FC_PT2PT) {
10762 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
10763 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
10764 iocbq->vport->fc_myDID);
10765 if (iocbq->vport->fc_myDID == Fabric_DID) {
10766 bf_set(wqe_els_did,
10767 &wqe->xmit_els_rsp.wqe_dest, 0);
10771 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
10772 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
10773 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
10774 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
10775 iocbq->iocb.unsli3.rcvsli3.ox_id);
10776 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
10777 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
10778 phba->vpi_ids[iocbq->vport->vpi]);
10779 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
10780 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
10781 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
10782 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
10783 LPFC_WQE_LENLOC_WORD3);
10784 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
10785 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
10786 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
10787 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
10788 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
10789 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
10790 iocbq->vport->fc_myDID);
10791 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
10792 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
10793 phba->vpi_ids[phba->pport->vpi]);
10795 command_type = OTHER_COMMAND;
10797 case CMD_CLOSE_XRI_CN:
10798 case CMD_ABORT_XRI_CN:
10799 case CMD_ABORT_XRI_CX:
10800 /* words 0-2 memcpy should be 0 rserved */
10801 /* port will send abts */
10802 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
10803 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
10804 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
10805 fip = abrtiocbq->cmd_flag & LPFC_FIP_ELS_ID_MASK;
10809 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
10811 * The link is down, or the command was ELS_FIP
10812 * so the fw does not need to send abts
10815 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
10817 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
10818 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
10819 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
10820 wqe->abort_cmd.rsrvd5 = 0;
10821 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
10822 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
10823 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
10825 * The abort handler will send us CMD_ABORT_XRI_CN or
10826 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
10828 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
10829 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
10830 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
10831 LPFC_WQE_LENLOC_NONE);
10832 cmnd = CMD_ABORT_XRI_CX;
10833 command_type = OTHER_COMMAND;
10836 case CMD_XMIT_BLS_RSP64_CX:
10837 ndlp = (struct lpfc_nodelist *)iocbq->context1;
10838 /* As BLS ABTS RSP WQE is very different from other WQEs,
10839 * we re-construct this WQE here based on information in
10840 * iocbq from scratch.
10842 memset(wqe, 0, sizeof(*wqe));
10843 /* OX_ID is invariable to who sent ABTS to CT exchange */
10844 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
10845 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
10846 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
10847 LPFC_ABTS_UNSOL_INT) {
10848 /* ABTS sent by initiator to CT exchange, the
10849 * RX_ID field will be filled with the newly
10850 * allocated responder XRI.
10852 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
10853 iocbq->sli4_xritag);
10855 /* ABTS sent by responder to CT exchange, the
10856 * RX_ID field will be filled with the responder
10859 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
10860 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
10862 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
10863 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
10866 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
10868 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
10869 iocbq->iocb.ulpContext);
10870 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
10871 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
10872 phba->vpi_ids[phba->pport->vpi]);
10873 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
10874 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
10875 LPFC_WQE_LENLOC_NONE);
10876 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
10877 command_type = OTHER_COMMAND;
10878 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
10879 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
10880 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
10881 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
10882 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
10883 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
10884 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
10888 case CMD_SEND_FRAME:
10889 bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_SEND_FRAME);
10890 bf_set(wqe_sof, &wqe->generic.wqe_com, 0x2E); /* SOF byte */
10891 bf_set(wqe_eof, &wqe->generic.wqe_com, 0x41); /* EOF byte */
10892 bf_set(wqe_lenloc, &wqe->generic.wqe_com, 1);
10893 bf_set(wqe_xbl, &wqe->generic.wqe_com, 1);
10894 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
10895 bf_set(wqe_xc, &wqe->generic.wqe_com, 1);
10896 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 0xA);
10897 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
10898 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
10899 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
10901 case CMD_XRI_ABORTED_CX:
10902 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
10903 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
10904 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
10905 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
10906 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
10908 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10909 "2014 Invalid command 0x%x\n",
10910 iocbq->iocb.ulpCommand);
10914 if (iocbq->cmd_flag & LPFC_IO_DIF_PASS)
10915 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
10916 else if (iocbq->cmd_flag & LPFC_IO_DIF_STRIP)
10917 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
10918 else if (iocbq->cmd_flag & LPFC_IO_DIF_INSERT)
10919 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
10920 iocbq->cmd_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
10921 LPFC_IO_DIF_INSERT);
10922 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
10923 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
10924 wqe->generic.wqe_com.abort_tag = abort_tag;
10925 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
10926 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
10927 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
10928 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
10933 * __lpfc_sli_issue_fcp_io_s3 - SLI3 device for sending fcp io iocb
10934 * @phba: Pointer to HBA context object.
10935 * @ring_number: SLI ring number to issue wqe on.
10936 * @piocb: Pointer to command iocb.
10937 * @flag: Flag indicating if this command can be put into txq.
10939 * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to
10940 * send an iocb command to an HBA with SLI-3 interface spec.
10942 * This function takes the hbalock before invoking the lockless version.
10943 * The function will return success after it successfully submit the wqe to
10944 * firmware or after adding to the txq.
10947 __lpfc_sli_issue_fcp_io_s3(struct lpfc_hba *phba, uint32_t ring_number,
10948 struct lpfc_iocbq *piocb, uint32_t flag)
10950 unsigned long iflags;
10953 spin_lock_irqsave(&phba->hbalock, iflags);
10954 rc = __lpfc_sli_issue_iocb_s3(phba, ring_number, piocb, flag);
10955 spin_unlock_irqrestore(&phba->hbalock, iflags);
10961 * __lpfc_sli_issue_fcp_io_s4 - SLI4 device for sending fcp io wqe
10962 * @phba: Pointer to HBA context object.
10963 * @ring_number: SLI ring number to issue wqe on.
10964 * @piocb: Pointer to command iocb.
10965 * @flag: Flag indicating if this command can be put into txq.
10967 * __lpfc_sli_issue_fcp_io_s4 is used by other functions in the driver to issue
10968 * an wqe command to an HBA with SLI-4 interface spec.
10970 * This function is a lockless version. The function will return success
10971 * after it successfully submit the wqe to firmware or after adding to the
10975 __lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
10976 struct lpfc_iocbq *piocb, uint32_t flag)
10979 struct lpfc_io_buf *lpfc_cmd =
10980 (struct lpfc_io_buf *)piocb->context1;
10982 lpfc_prep_embed_io(phba, lpfc_cmd);
10983 rc = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
10988 lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
10990 struct lpfc_iocbq *piocb = &lpfc_cmd->cur_iocbq;
10991 union lpfc_wqe128 *wqe = &lpfc_cmd->cur_iocbq.wqe;
10992 struct sli4_sge *sgl;
10994 /* 128 byte wqe support here */
10995 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
10997 if (phba->fcp_embed_io) {
10998 struct fcp_cmnd *fcp_cmnd;
11001 fcp_cmnd = lpfc_cmd->fcp_cmnd;
11003 /* Word 0-2 - FCP_CMND */
11004 wqe->generic.bde.tus.f.bdeFlags =
11005 BUFF_TYPE_BDE_IMMED;
11006 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
11007 wqe->generic.bde.addrHigh = 0;
11008 wqe->generic.bde.addrLow = 88; /* Word 22 */
11010 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
11011 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
11013 /* Word 22-29 FCP CMND Payload */
11014 ptr = &wqe->words[22];
11015 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
11017 /* Word 0-2 - Inline BDE */
11018 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
11019 wqe->generic.bde.tus.f.bdeSize = sizeof(struct fcp_cmnd);
11020 wqe->generic.bde.addrHigh = sgl->addr_hi;
11021 wqe->generic.bde.addrLow = sgl->addr_lo;
11024 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
11025 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
11028 /* add the VMID tags as per switch response */
11029 if (unlikely(piocb->cmd_flag & LPFC_IO_VMID)) {
11030 if (phba->pport->vmid_priority_tagging) {
11031 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
11032 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
11033 (piocb->vmid_tag.cs_ctl_vmid));
11035 bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
11036 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
11037 wqe->words[31] = piocb->vmid_tag.app_id;
11043 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
11044 * @phba: Pointer to HBA context object.
11045 * @ring_number: SLI ring number to issue iocb on.
11046 * @piocb: Pointer to command iocb.
11047 * @flag: Flag indicating if this command can be put into txq.
11049 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
11050 * an iocb command to an HBA with SLI-4 interface spec.
11052 * This function is called with ringlock held. The function will return success
11053 * after it successfully submit the iocb to firmware or after adding to the
11057 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
11058 struct lpfc_iocbq *piocb, uint32_t flag)
11060 struct lpfc_sglq *sglq;
11061 union lpfc_wqe128 *wqe;
11062 struct lpfc_queue *wq;
11063 struct lpfc_sli_ring *pring;
11064 u32 ulp_command = get_job_cmnd(phba, piocb);
11067 if ((piocb->cmd_flag & LPFC_IO_FCP) ||
11068 (piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
11069 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
11071 wq = phba->sli4_hba.els_wq;
11074 /* Get corresponding ring */
11078 * The WQE can be either 64 or 128 bytes,
11081 lockdep_assert_held(&pring->ring_lock);
11083 if (piocb->sli4_xritag == NO_XRI) {
11084 if (ulp_command == CMD_ABORT_XRI_WQE)
11087 if (!list_empty(&pring->txq)) {
11088 if (!(flag & SLI_IOCB_RET_IOCB)) {
11089 __lpfc_sli_ringtx_put(phba,
11091 return IOCB_SUCCESS;
11096 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
11098 if (!(flag & SLI_IOCB_RET_IOCB)) {
11099 __lpfc_sli_ringtx_put(phba,
11102 return IOCB_SUCCESS;
11108 } else if (piocb->cmd_flag & LPFC_IO_FCP) {
11109 /* These IO's already have an XRI and a mapped sgl. */
11114 * This is a continuation of a commandi,(CX) so this
11115 * sglq is on the active list
11117 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
11123 piocb->sli4_lxritag = sglq->sli4_lxritag;
11124 piocb->sli4_xritag = sglq->sli4_xritag;
11126 /* ABTS sent by initiator to CT exchange, the
11127 * RX_ID field will be filled with the newly
11128 * allocated responder XRI.
11130 if (ulp_command == CMD_XMIT_BLS_RSP64_CX &&
11131 piocb->abort_bls == LPFC_ABTS_UNSOL_INT)
11132 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
11133 piocb->sli4_xritag);
11135 bf_set(wqe_xri_tag, &wqe->generic.wqe_com,
11136 piocb->sli4_xritag);
11138 if (lpfc_wqe_bpl2sgl(phba, piocb, sglq) == NO_XRI)
11142 if (lpfc_sli4_wq_put(wq, wqe))
11144 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
11150 * lpfc_sli_issue_fcp_io - Wrapper func for issuing fcp i/o
11152 * This routine wraps the actual fcp i/o function for issusing WQE for sli-4
11153 * or IOCB for sli-3 function.
11154 * pointer from the lpfc_hba struct.
11157 * IOCB_ERROR - Error
11158 * IOCB_SUCCESS - Success
11162 lpfc_sli_issue_fcp_io(struct lpfc_hba *phba, uint32_t ring_number,
11163 struct lpfc_iocbq *piocb, uint32_t flag)
11165 return phba->__lpfc_sli_issue_fcp_io(phba, ring_number, piocb, flag);
11169 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
11171 * This routine wraps the actual lockless version for issusing IOCB function
11172 * pointer from the lpfc_hba struct.
11175 * IOCB_ERROR - Error
11176 * IOCB_SUCCESS - Success
11180 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
11181 struct lpfc_iocbq *piocb, uint32_t flag)
11183 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
11187 * lpfc_sli_api_table_setup - Set up sli api function jump table
11188 * @phba: The hba struct for which this call is being executed.
11189 * @dev_grp: The HBA PCI-Device group number.
11191 * This routine sets up the SLI interface API function jump table in @phba
11193 * Returns: 0 - success, -ENODEV - failure.
11196 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
11200 case LPFC_PCI_DEV_LP:
11201 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
11202 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
11203 phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s3;
11205 case LPFC_PCI_DEV_OC:
11206 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
11207 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
11208 phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s4;
11211 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11212 "1419 Invalid HBA PCI-device group: 0x%x\n",
11216 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
11221 * lpfc_sli4_calc_ring - Calculates which ring to use
11222 * @phba: Pointer to HBA context object.
11223 * @piocb: Pointer to command iocb.
11225 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
11226 * hba_wqidx, thus we need to calculate the corresponding ring.
11227 * Since ABORTS must go on the same WQ of the command they are
11228 * aborting, we use command's hba_wqidx.
11230 struct lpfc_sli_ring *
11231 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
11233 struct lpfc_io_buf *lpfc_cmd;
11235 if (piocb->cmd_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
11236 if (unlikely(!phba->sli4_hba.hdwq))
11239 * for abort iocb hba_wqidx should already
11240 * be setup based on what work queue we used.
11242 if (!(piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
11243 lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
11244 piocb->hba_wqidx = lpfc_cmd->hdwq_no;
11246 return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
11248 if (unlikely(!phba->sli4_hba.els_wq))
11250 piocb->hba_wqidx = 0;
11251 return phba->sli4_hba.els_wq->pring;
11256 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
11257 * @phba: Pointer to HBA context object.
11258 * @ring_number: Ring number
11259 * @piocb: Pointer to command iocb.
11260 * @flag: Flag indicating if this command can be put into txq.
11262 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
11263 * function. This function gets the hbalock and calls
11264 * __lpfc_sli_issue_iocb function and will return the error returned
11265 * by __lpfc_sli_issue_iocb function. This wrapper is used by
11266 * functions which do not hold hbalock.
11269 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
11270 struct lpfc_iocbq *piocb, uint32_t flag)
11272 struct lpfc_sli_ring *pring;
11273 struct lpfc_queue *eq;
11274 unsigned long iflags;
11277 if (phba->sli_rev == LPFC_SLI_REV4) {
11278 eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
11280 pring = lpfc_sli4_calc_ring(phba, piocb);
11281 if (unlikely(pring == NULL))
11284 spin_lock_irqsave(&pring->ring_lock, iflags);
11285 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
11286 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11288 lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
11290 /* For now, SLI2/3 will still use hbalock */
11291 spin_lock_irqsave(&phba->hbalock, iflags);
11292 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
11293 spin_unlock_irqrestore(&phba->hbalock, iflags);
11299 * lpfc_extra_ring_setup - Extra ring setup function
11300 * @phba: Pointer to HBA context object.
11302 * This function is called while driver attaches with the
11303 * HBA to setup the extra ring. The extra ring is used
11304 * only when driver needs to support target mode functionality
11305 * or IP over FC functionalities.
11307 * This function is called with no lock held. SLI3 only.
11310 lpfc_extra_ring_setup( struct lpfc_hba *phba)
11312 struct lpfc_sli *psli;
11313 struct lpfc_sli_ring *pring;
11317 /* Adjust cmd/rsp ring iocb entries more evenly */
11319 /* Take some away from the FCP ring */
11320 pring = &psli->sli3_ring[LPFC_FCP_RING];
11321 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11322 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11323 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11324 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11326 /* and give them to the extra ring */
11327 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
11329 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11330 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11331 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11332 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11334 /* Setup default profile for this ring */
11335 pring->iotag_max = 4096;
11336 pring->num_mask = 1;
11337 pring->prt[0].profile = 0; /* Mask 0 */
11338 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
11339 pring->prt[0].type = phba->cfg_multi_ring_type;
11340 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
11345 lpfc_sli_post_recovery_event(struct lpfc_hba *phba,
11346 struct lpfc_nodelist *ndlp)
11348 unsigned long iflags;
11349 struct lpfc_work_evt *evtp = &ndlp->recovery_evt;
11351 spin_lock_irqsave(&phba->hbalock, iflags);
11352 if (!list_empty(&evtp->evt_listp)) {
11353 spin_unlock_irqrestore(&phba->hbalock, iflags);
11357 /* Incrementing the reference count until the queued work is done. */
11358 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
11359 if (!evtp->evt_arg1) {
11360 spin_unlock_irqrestore(&phba->hbalock, iflags);
11363 evtp->evt = LPFC_EVT_RECOVER_PORT;
11364 list_add_tail(&evtp->evt_listp, &phba->work_list);
11365 spin_unlock_irqrestore(&phba->hbalock, iflags);
11367 lpfc_worker_wake_up(phba);
11370 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
11371 * @phba: Pointer to HBA context object.
11372 * @iocbq: Pointer to iocb object.
11374 * The async_event handler calls this routine when it receives
11375 * an ASYNC_STATUS_CN event from the port. The port generates
11376 * this event when an Abort Sequence request to an rport fails
11377 * twice in succession. The abort could be originated by the
11378 * driver or by the port. The ABTS could have been for an ELS
11379 * or FCP IO. The port only generates this event when an ABTS
11380 * fails to complete after one retry.
11383 lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
11384 struct lpfc_iocbq *iocbq)
11386 struct lpfc_nodelist *ndlp = NULL;
11387 uint16_t rpi = 0, vpi = 0;
11388 struct lpfc_vport *vport = NULL;
11390 /* The rpi in the ulpContext is vport-sensitive. */
11391 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
11392 rpi = iocbq->iocb.ulpContext;
11394 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11395 "3092 Port generated ABTS async event "
11396 "on vpi %d rpi %d status 0x%x\n",
11397 vpi, rpi, iocbq->iocb.ulpStatus);
11399 vport = lpfc_find_vport_by_vpid(phba, vpi);
11402 ndlp = lpfc_findnode_rpi(vport, rpi);
11406 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
11407 lpfc_sli_abts_recover_port(vport, ndlp);
11411 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11412 "3095 Event Context not found, no "
11413 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
11414 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
11418 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
11419 * @phba: pointer to HBA context object.
11420 * @ndlp: nodelist pointer for the impacted rport.
11421 * @axri: pointer to the wcqe containing the failed exchange.
11423 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
11424 * port. The port generates this event when an abort exchange request to an
11425 * rport fails twice in succession with no reply. The abort could be originated
11426 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
11429 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
11430 struct lpfc_nodelist *ndlp,
11431 struct sli4_wcqe_xri_aborted *axri)
11433 uint32_t ext_status = 0;
11436 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11437 "3115 Node Context not found, driver "
11438 "ignoring abts err event\n");
11442 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11443 "3116 Port generated FCP XRI ABORT event on "
11444 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
11445 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
11446 bf_get(lpfc_wcqe_xa_xri, axri),
11447 bf_get(lpfc_wcqe_xa_status, axri),
11451 * Catch the ABTS protocol failure case. Older OCe FW releases returned
11452 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
11453 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
11455 ext_status = axri->parameter & IOERR_PARAM_MASK;
11456 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
11457 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
11458 lpfc_sli_post_recovery_event(phba, ndlp);
11462 * lpfc_sli_async_event_handler - ASYNC iocb handler function
11463 * @phba: Pointer to HBA context object.
11464 * @pring: Pointer to driver SLI ring object.
11465 * @iocbq: Pointer to iocb object.
11467 * This function is called by the slow ring event handler
11468 * function when there is an ASYNC event iocb in the ring.
11469 * This function is called with no lock held.
11470 * Currently this function handles only temperature related
11471 * ASYNC events. The function decodes the temperature sensor
11472 * event message and posts events for the management applications.
11475 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
11476 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
11480 struct temp_event temp_event_data;
11481 struct Scsi_Host *shost;
11484 icmd = &iocbq->iocb;
11485 evt_code = icmd->un.asyncstat.evt_code;
11487 switch (evt_code) {
11488 case ASYNC_TEMP_WARN:
11489 case ASYNC_TEMP_SAFE:
11490 temp_event_data.data = (uint32_t) icmd->ulpContext;
11491 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
11492 if (evt_code == ASYNC_TEMP_WARN) {
11493 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
11494 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11495 "0347 Adapter is very hot, please take "
11496 "corrective action. temperature : %d Celsius\n",
11497 (uint32_t) icmd->ulpContext);
11499 temp_event_data.event_code = LPFC_NORMAL_TEMP;
11500 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11501 "0340 Adapter temperature is OK now. "
11502 "temperature : %d Celsius\n",
11503 (uint32_t) icmd->ulpContext);
11506 /* Send temperature change event to applications */
11507 shost = lpfc_shost_from_vport(phba->pport);
11508 fc_host_post_vendor_event(shost, fc_get_event_number(),
11509 sizeof(temp_event_data), (char *) &temp_event_data,
11510 LPFC_NL_VENDOR_ID);
11512 case ASYNC_STATUS_CN:
11513 lpfc_sli_abts_err_handler(phba, iocbq);
11516 iocb_w = (uint32_t *) icmd;
11517 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11518 "0346 Ring %d handler: unexpected ASYNC_STATUS"
11520 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
11521 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
11522 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
11523 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
11524 pring->ringno, icmd->un.asyncstat.evt_code,
11525 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
11526 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
11527 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
11528 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
11536 * lpfc_sli4_setup - SLI ring setup function
11537 * @phba: Pointer to HBA context object.
11539 * lpfc_sli_setup sets up rings of the SLI interface with
11540 * number of iocbs per ring and iotags. This function is
11541 * called while driver attach to the HBA and before the
11542 * interrupts are enabled. So there is no need for locking.
11544 * This function always returns 0.
11547 lpfc_sli4_setup(struct lpfc_hba *phba)
11549 struct lpfc_sli_ring *pring;
11551 pring = phba->sli4_hba.els_wq->pring;
11552 pring->num_mask = LPFC_MAX_RING_MASK;
11553 pring->prt[0].profile = 0; /* Mask 0 */
11554 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
11555 pring->prt[0].type = FC_TYPE_ELS;
11556 pring->prt[0].lpfc_sli_rcv_unsol_event =
11557 lpfc_els_unsol_event;
11558 pring->prt[1].profile = 0; /* Mask 1 */
11559 pring->prt[1].rctl = FC_RCTL_ELS_REP;
11560 pring->prt[1].type = FC_TYPE_ELS;
11561 pring->prt[1].lpfc_sli_rcv_unsol_event =
11562 lpfc_els_unsol_event;
11563 pring->prt[2].profile = 0; /* Mask 2 */
11564 /* NameServer Inquiry */
11565 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
11567 pring->prt[2].type = FC_TYPE_CT;
11568 pring->prt[2].lpfc_sli_rcv_unsol_event =
11569 lpfc_ct_unsol_event;
11570 pring->prt[3].profile = 0; /* Mask 3 */
11571 /* NameServer response */
11572 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
11574 pring->prt[3].type = FC_TYPE_CT;
11575 pring->prt[3].lpfc_sli_rcv_unsol_event =
11576 lpfc_ct_unsol_event;
11581 * lpfc_sli_setup - SLI ring setup function
11582 * @phba: Pointer to HBA context object.
11584 * lpfc_sli_setup sets up rings of the SLI interface with
11585 * number of iocbs per ring and iotags. This function is
11586 * called while driver attach to the HBA and before the
11587 * interrupts are enabled. So there is no need for locking.
11589 * This function always returns 0. SLI3 only.
11592 lpfc_sli_setup(struct lpfc_hba *phba)
11594 int i, totiocbsize = 0;
11595 struct lpfc_sli *psli = &phba->sli;
11596 struct lpfc_sli_ring *pring;
11598 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
11599 psli->sli_flag = 0;
11601 psli->iocbq_lookup = NULL;
11602 psli->iocbq_lookup_len = 0;
11603 psli->last_iotag = 0;
11605 for (i = 0; i < psli->num_rings; i++) {
11606 pring = &psli->sli3_ring[i];
11608 case LPFC_FCP_RING: /* ring 0 - FCP */
11609 /* numCiocb and numRiocb are used in config_port */
11610 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
11611 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
11612 pring->sli.sli3.numCiocb +=
11613 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11614 pring->sli.sli3.numRiocb +=
11615 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11616 pring->sli.sli3.numCiocb +=
11617 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11618 pring->sli.sli3.numRiocb +=
11619 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11620 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11621 SLI3_IOCB_CMD_SIZE :
11622 SLI2_IOCB_CMD_SIZE;
11623 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11624 SLI3_IOCB_RSP_SIZE :
11625 SLI2_IOCB_RSP_SIZE;
11626 pring->iotag_ctr = 0;
11628 (phba->cfg_hba_queue_depth * 2);
11629 pring->fast_iotag = pring->iotag_max;
11630 pring->num_mask = 0;
11632 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
11633 /* numCiocb and numRiocb are used in config_port */
11634 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
11635 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
11636 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11637 SLI3_IOCB_CMD_SIZE :
11638 SLI2_IOCB_CMD_SIZE;
11639 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11640 SLI3_IOCB_RSP_SIZE :
11641 SLI2_IOCB_RSP_SIZE;
11642 pring->iotag_max = phba->cfg_hba_queue_depth;
11643 pring->num_mask = 0;
11645 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
11646 /* numCiocb and numRiocb are used in config_port */
11647 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
11648 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
11649 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11650 SLI3_IOCB_CMD_SIZE :
11651 SLI2_IOCB_CMD_SIZE;
11652 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11653 SLI3_IOCB_RSP_SIZE :
11654 SLI2_IOCB_RSP_SIZE;
11655 pring->fast_iotag = 0;
11656 pring->iotag_ctr = 0;
11657 pring->iotag_max = 4096;
11658 pring->lpfc_sli_rcv_async_status =
11659 lpfc_sli_async_event_handler;
11660 pring->num_mask = LPFC_MAX_RING_MASK;
11661 pring->prt[0].profile = 0; /* Mask 0 */
11662 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
11663 pring->prt[0].type = FC_TYPE_ELS;
11664 pring->prt[0].lpfc_sli_rcv_unsol_event =
11665 lpfc_els_unsol_event;
11666 pring->prt[1].profile = 0; /* Mask 1 */
11667 pring->prt[1].rctl = FC_RCTL_ELS_REP;
11668 pring->prt[1].type = FC_TYPE_ELS;
11669 pring->prt[1].lpfc_sli_rcv_unsol_event =
11670 lpfc_els_unsol_event;
11671 pring->prt[2].profile = 0; /* Mask 2 */
11672 /* NameServer Inquiry */
11673 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
11675 pring->prt[2].type = FC_TYPE_CT;
11676 pring->prt[2].lpfc_sli_rcv_unsol_event =
11677 lpfc_ct_unsol_event;
11678 pring->prt[3].profile = 0; /* Mask 3 */
11679 /* NameServer response */
11680 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
11682 pring->prt[3].type = FC_TYPE_CT;
11683 pring->prt[3].lpfc_sli_rcv_unsol_event =
11684 lpfc_ct_unsol_event;
11687 totiocbsize += (pring->sli.sli3.numCiocb *
11688 pring->sli.sli3.sizeCiocb) +
11689 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
11691 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
11692 /* Too many cmd / rsp ring entries in SLI2 SLIM */
11693 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
11694 "SLI2 SLIM Data: x%x x%lx\n",
11695 phba->brd_no, totiocbsize,
11696 (unsigned long) MAX_SLIM_IOCB_SIZE);
11698 if (phba->cfg_multi_ring_support == 2)
11699 lpfc_extra_ring_setup(phba);
11705 * lpfc_sli4_queue_init - Queue initialization function
11706 * @phba: Pointer to HBA context object.
11708 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
11709 * ring. This function also initializes ring indices of each ring.
11710 * This function is called during the initialization of the SLI
11711 * interface of an HBA.
11712 * This function is called with no lock held and always returns
11716 lpfc_sli4_queue_init(struct lpfc_hba *phba)
11718 struct lpfc_sli *psli;
11719 struct lpfc_sli_ring *pring;
11723 spin_lock_irq(&phba->hbalock);
11724 INIT_LIST_HEAD(&psli->mboxq);
11725 INIT_LIST_HEAD(&psli->mboxq_cmpl);
11726 /* Initialize list headers for txq and txcmplq as double linked lists */
11727 for (i = 0; i < phba->cfg_hdw_queue; i++) {
11728 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
11730 pring->ringno = LPFC_FCP_RING;
11731 pring->txcmplq_cnt = 0;
11732 INIT_LIST_HEAD(&pring->txq);
11733 INIT_LIST_HEAD(&pring->txcmplq);
11734 INIT_LIST_HEAD(&pring->iocb_continueq);
11735 spin_lock_init(&pring->ring_lock);
11737 pring = phba->sli4_hba.els_wq->pring;
11739 pring->ringno = LPFC_ELS_RING;
11740 pring->txcmplq_cnt = 0;
11741 INIT_LIST_HEAD(&pring->txq);
11742 INIT_LIST_HEAD(&pring->txcmplq);
11743 INIT_LIST_HEAD(&pring->iocb_continueq);
11744 spin_lock_init(&pring->ring_lock);
11746 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11747 pring = phba->sli4_hba.nvmels_wq->pring;
11749 pring->ringno = LPFC_ELS_RING;
11750 pring->txcmplq_cnt = 0;
11751 INIT_LIST_HEAD(&pring->txq);
11752 INIT_LIST_HEAD(&pring->txcmplq);
11753 INIT_LIST_HEAD(&pring->iocb_continueq);
11754 spin_lock_init(&pring->ring_lock);
11757 spin_unlock_irq(&phba->hbalock);
11761 * lpfc_sli_queue_init - Queue initialization function
11762 * @phba: Pointer to HBA context object.
11764 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
11765 * ring. This function also initializes ring indices of each ring.
11766 * This function is called during the initialization of the SLI
11767 * interface of an HBA.
11768 * This function is called with no lock held and always returns
11772 lpfc_sli_queue_init(struct lpfc_hba *phba)
11774 struct lpfc_sli *psli;
11775 struct lpfc_sli_ring *pring;
11779 spin_lock_irq(&phba->hbalock);
11780 INIT_LIST_HEAD(&psli->mboxq);
11781 INIT_LIST_HEAD(&psli->mboxq_cmpl);
11782 /* Initialize list headers for txq and txcmplq as double linked lists */
11783 for (i = 0; i < psli->num_rings; i++) {
11784 pring = &psli->sli3_ring[i];
11786 pring->sli.sli3.next_cmdidx = 0;
11787 pring->sli.sli3.local_getidx = 0;
11788 pring->sli.sli3.cmdidx = 0;
11789 INIT_LIST_HEAD(&pring->iocb_continueq);
11790 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
11791 INIT_LIST_HEAD(&pring->postbufq);
11793 INIT_LIST_HEAD(&pring->txq);
11794 INIT_LIST_HEAD(&pring->txcmplq);
11795 spin_lock_init(&pring->ring_lock);
11797 spin_unlock_irq(&phba->hbalock);
11801 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
11802 * @phba: Pointer to HBA context object.
11804 * This routine flushes the mailbox command subsystem. It will unconditionally
11805 * flush all the mailbox commands in the three possible stages in the mailbox
11806 * command sub-system: pending mailbox command queue; the outstanding mailbox
11807 * command; and completed mailbox command queue. It is caller's responsibility
11808 * to make sure that the driver is in the proper state to flush the mailbox
11809 * command sub-system. Namely, the posting of mailbox commands into the
11810 * pending mailbox command queue from the various clients must be stopped;
11811 * either the HBA is in a state that it will never works on the outstanding
11812 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
11813 * mailbox command has been completed.
11816 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
11818 LIST_HEAD(completions);
11819 struct lpfc_sli *psli = &phba->sli;
11821 unsigned long iflag;
11823 /* Disable softirqs, including timers from obtaining phba->hbalock */
11824 local_bh_disable();
11826 /* Flush all the mailbox commands in the mbox system */
11827 spin_lock_irqsave(&phba->hbalock, iflag);
11829 /* The pending mailbox command queue */
11830 list_splice_init(&phba->sli.mboxq, &completions);
11831 /* The outstanding active mailbox command */
11832 if (psli->mbox_active) {
11833 list_add_tail(&psli->mbox_active->list, &completions);
11834 psli->mbox_active = NULL;
11835 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11837 /* The completed mailbox command queue */
11838 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
11839 spin_unlock_irqrestore(&phba->hbalock, iflag);
11841 /* Enable softirqs again, done with phba->hbalock */
11844 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
11845 while (!list_empty(&completions)) {
11846 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
11847 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
11848 if (pmb->mbox_cmpl)
11849 pmb->mbox_cmpl(phba, pmb);
11854 * lpfc_sli_host_down - Vport cleanup function
11855 * @vport: Pointer to virtual port object.
11857 * lpfc_sli_host_down is called to clean up the resources
11858 * associated with a vport before destroying virtual
11859 * port data structures.
11860 * This function does following operations:
11861 * - Free discovery resources associated with this virtual
11863 * - Free iocbs associated with this virtual port in
11865 * - Send abort for all iocb commands associated with this
11866 * vport in txcmplq.
11868 * This function is called with no lock held and always returns 1.
11871 lpfc_sli_host_down(struct lpfc_vport *vport)
11873 LIST_HEAD(completions);
11874 struct lpfc_hba *phba = vport->phba;
11875 struct lpfc_sli *psli = &phba->sli;
11876 struct lpfc_queue *qp = NULL;
11877 struct lpfc_sli_ring *pring;
11878 struct lpfc_iocbq *iocb, *next_iocb;
11880 unsigned long flags = 0;
11881 uint16_t prev_pring_flag;
11883 lpfc_cleanup_discovery_resources(vport);
11885 spin_lock_irqsave(&phba->hbalock, flags);
11888 * Error everything on the txq since these iocbs
11889 * have not been given to the FW yet.
11890 * Also issue ABTS for everything on the txcmplq
11892 if (phba->sli_rev != LPFC_SLI_REV4) {
11893 for (i = 0; i < psli->num_rings; i++) {
11894 pring = &psli->sli3_ring[i];
11895 prev_pring_flag = pring->flag;
11896 /* Only slow rings */
11897 if (pring->ringno == LPFC_ELS_RING) {
11898 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11899 /* Set the lpfc data pending flag */
11900 set_bit(LPFC_DATA_READY, &phba->data_flags);
11902 list_for_each_entry_safe(iocb, next_iocb,
11903 &pring->txq, list) {
11904 if (iocb->vport != vport)
11906 list_move_tail(&iocb->list, &completions);
11908 list_for_each_entry_safe(iocb, next_iocb,
11909 &pring->txcmplq, list) {
11910 if (iocb->vport != vport)
11912 lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11915 pring->flag = prev_pring_flag;
11918 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11922 if (pring == phba->sli4_hba.els_wq->pring) {
11923 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11924 /* Set the lpfc data pending flag */
11925 set_bit(LPFC_DATA_READY, &phba->data_flags);
11927 prev_pring_flag = pring->flag;
11928 spin_lock(&pring->ring_lock);
11929 list_for_each_entry_safe(iocb, next_iocb,
11930 &pring->txq, list) {
11931 if (iocb->vport != vport)
11933 list_move_tail(&iocb->list, &completions);
11935 spin_unlock(&pring->ring_lock);
11936 list_for_each_entry_safe(iocb, next_iocb,
11937 &pring->txcmplq, list) {
11938 if (iocb->vport != vport)
11940 lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11943 pring->flag = prev_pring_flag;
11946 spin_unlock_irqrestore(&phba->hbalock, flags);
11948 /* Make sure HBA is alive */
11949 lpfc_issue_hb_tmo(phba);
11951 /* Cancel all the IOCBs from the completions list */
11952 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11958 * lpfc_sli_hba_down - Resource cleanup function for the HBA
11959 * @phba: Pointer to HBA context object.
11961 * This function cleans up all iocb, buffers, mailbox commands
11962 * while shutting down the HBA. This function is called with no
11963 * lock held and always returns 1.
11964 * This function does the following to cleanup driver resources:
11965 * - Free discovery resources for each virtual port
11966 * - Cleanup any pending fabric iocbs
11967 * - Iterate through the iocb txq and free each entry
11969 * - Free up any buffer posted to the HBA
11970 * - Free mailbox commands in the mailbox queue.
11973 lpfc_sli_hba_down(struct lpfc_hba *phba)
11975 LIST_HEAD(completions);
11976 struct lpfc_sli *psli = &phba->sli;
11977 struct lpfc_queue *qp = NULL;
11978 struct lpfc_sli_ring *pring;
11979 struct lpfc_dmabuf *buf_ptr;
11980 unsigned long flags = 0;
11983 /* Shutdown the mailbox command sub-system */
11984 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
11986 lpfc_hba_down_prep(phba);
11988 /* Disable softirqs, including timers from obtaining phba->hbalock */
11989 local_bh_disable();
11991 lpfc_fabric_abort_hba(phba);
11993 spin_lock_irqsave(&phba->hbalock, flags);
11996 * Error everything on the txq since these iocbs
11997 * have not been given to the FW yet.
11999 if (phba->sli_rev != LPFC_SLI_REV4) {
12000 for (i = 0; i < psli->num_rings; i++) {
12001 pring = &psli->sli3_ring[i];
12002 /* Only slow rings */
12003 if (pring->ringno == LPFC_ELS_RING) {
12004 pring->flag |= LPFC_DEFERRED_RING_EVENT;
12005 /* Set the lpfc data pending flag */
12006 set_bit(LPFC_DATA_READY, &phba->data_flags);
12008 list_splice_init(&pring->txq, &completions);
12011 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
12015 spin_lock(&pring->ring_lock);
12016 list_splice_init(&pring->txq, &completions);
12017 spin_unlock(&pring->ring_lock);
12018 if (pring == phba->sli4_hba.els_wq->pring) {
12019 pring->flag |= LPFC_DEFERRED_RING_EVENT;
12020 /* Set the lpfc data pending flag */
12021 set_bit(LPFC_DATA_READY, &phba->data_flags);
12025 spin_unlock_irqrestore(&phba->hbalock, flags);
12027 /* Cancel all the IOCBs from the completions list */
12028 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
12031 spin_lock_irqsave(&phba->hbalock, flags);
12032 list_splice_init(&phba->elsbuf, &completions);
12033 phba->elsbuf_cnt = 0;
12034 phba->elsbuf_prev_cnt = 0;
12035 spin_unlock_irqrestore(&phba->hbalock, flags);
12037 while (!list_empty(&completions)) {
12038 list_remove_head(&completions, buf_ptr,
12039 struct lpfc_dmabuf, list);
12040 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
12044 /* Enable softirqs again, done with phba->hbalock */
12047 /* Return any active mbox cmds */
12048 del_timer_sync(&psli->mbox_tmo);
12050 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
12051 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
12052 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
12058 * lpfc_sli_pcimem_bcopy - SLI memory copy function
12059 * @srcp: Source memory pointer.
12060 * @destp: Destination memory pointer.
12061 * @cnt: Number of words required to be copied.
12063 * This function is used for copying data between driver memory
12064 * and the SLI memory. This function also changes the endianness
12065 * of each word if native endianness is different from SLI
12066 * endianness. This function can be called with or without
12070 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
12072 uint32_t *src = srcp;
12073 uint32_t *dest = destp;
12077 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
12079 ldata = le32_to_cpu(ldata);
12088 * lpfc_sli_bemem_bcopy - SLI memory copy function
12089 * @srcp: Source memory pointer.
12090 * @destp: Destination memory pointer.
12091 * @cnt: Number of words required to be copied.
12093 * This function is used for copying data between a data structure
12094 * with big endian representation to local endianness.
12095 * This function can be called with or without lock.
12098 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
12100 uint32_t *src = srcp;
12101 uint32_t *dest = destp;
12105 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
12107 ldata = be32_to_cpu(ldata);
12115 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
12116 * @phba: Pointer to HBA context object.
12117 * @pring: Pointer to driver SLI ring object.
12118 * @mp: Pointer to driver buffer object.
12120 * This function is called with no lock held.
12121 * It always return zero after adding the buffer to the postbufq
12125 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12126 struct lpfc_dmabuf *mp)
12128 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
12130 spin_lock_irq(&phba->hbalock);
12131 list_add_tail(&mp->list, &pring->postbufq);
12132 pring->postbufq_cnt++;
12133 spin_unlock_irq(&phba->hbalock);
12138 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
12139 * @phba: Pointer to HBA context object.
12141 * When HBQ is enabled, buffers are searched based on tags. This function
12142 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
12143 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
12144 * does not conflict with tags of buffer posted for unsolicited events.
12145 * The function returns the allocated tag. The function is called with
12149 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
12151 spin_lock_irq(&phba->hbalock);
12152 phba->buffer_tag_count++;
12154 * Always set the QUE_BUFTAG_BIT to distiguish between
12155 * a tag assigned by HBQ.
12157 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
12158 spin_unlock_irq(&phba->hbalock);
12159 return phba->buffer_tag_count;
12163 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
12164 * @phba: Pointer to HBA context object.
12165 * @pring: Pointer to driver SLI ring object.
12166 * @tag: Buffer tag.
12168 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
12169 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
12170 * iocb is posted to the response ring with the tag of the buffer.
12171 * This function searches the pring->postbufq list using the tag
12172 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
12173 * iocb. If the buffer is found then lpfc_dmabuf object of the
12174 * buffer is returned to the caller else NULL is returned.
12175 * This function is called with no lock held.
12177 struct lpfc_dmabuf *
12178 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12181 struct lpfc_dmabuf *mp, *next_mp;
12182 struct list_head *slp = &pring->postbufq;
12184 /* Search postbufq, from the beginning, looking for a match on tag */
12185 spin_lock_irq(&phba->hbalock);
12186 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
12187 if (mp->buffer_tag == tag) {
12188 list_del_init(&mp->list);
12189 pring->postbufq_cnt--;
12190 spin_unlock_irq(&phba->hbalock);
12195 spin_unlock_irq(&phba->hbalock);
12196 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12197 "0402 Cannot find virtual addr for buffer tag on "
12198 "ring %d Data x%lx x%px x%px x%x\n",
12199 pring->ringno, (unsigned long) tag,
12200 slp->next, slp->prev, pring->postbufq_cnt);
12206 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
12207 * @phba: Pointer to HBA context object.
12208 * @pring: Pointer to driver SLI ring object.
12209 * @phys: DMA address of the buffer.
12211 * This function searches the buffer list using the dma_address
12212 * of unsolicited event to find the driver's lpfc_dmabuf object
12213 * corresponding to the dma_address. The function returns the
12214 * lpfc_dmabuf object if a buffer is found else it returns NULL.
12215 * This function is called by the ct and els unsolicited event
12216 * handlers to get the buffer associated with the unsolicited
12219 * This function is called with no lock held.
12221 struct lpfc_dmabuf *
12222 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12225 struct lpfc_dmabuf *mp, *next_mp;
12226 struct list_head *slp = &pring->postbufq;
12228 /* Search postbufq, from the beginning, looking for a match on phys */
12229 spin_lock_irq(&phba->hbalock);
12230 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
12231 if (mp->phys == phys) {
12232 list_del_init(&mp->list);
12233 pring->postbufq_cnt--;
12234 spin_unlock_irq(&phba->hbalock);
12239 spin_unlock_irq(&phba->hbalock);
12240 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12241 "0410 Cannot find virtual addr for mapped buf on "
12242 "ring %d Data x%llx x%px x%px x%x\n",
12243 pring->ringno, (unsigned long long)phys,
12244 slp->next, slp->prev, pring->postbufq_cnt);
12249 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
12250 * @phba: Pointer to HBA context object.
12251 * @cmdiocb: Pointer to driver command iocb object.
12252 * @rspiocb: Pointer to driver response iocb object.
12254 * This function is the completion handler for the abort iocbs for
12255 * ELS commands. This function is called from the ELS ring event
12256 * handler with no lock held. This function frees memory resources
12257 * associated with the abort iocb.
12260 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12261 struct lpfc_iocbq *rspiocb)
12263 IOCB_t *irsp = &rspiocb->iocb;
12264 uint16_t abort_iotag, abort_context;
12265 struct lpfc_iocbq *abort_iocb = NULL;
12267 if (irsp->ulpStatus) {
12270 * Assume that the port already completed and returned, or
12271 * will return the iocb. Just Log the message.
12273 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
12274 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
12276 spin_lock_irq(&phba->hbalock);
12277 if (phba->sli_rev < LPFC_SLI_REV4) {
12278 if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
12279 irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
12280 irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
12281 spin_unlock_irq(&phba->hbalock);
12284 if (abort_iotag != 0 &&
12285 abort_iotag <= phba->sli.last_iotag)
12287 phba->sli.iocbq_lookup[abort_iotag];
12289 /* For sli4 the abort_tag is the XRI,
12290 * so the abort routine puts the iotag of the iocb
12291 * being aborted in the context field of the abort
12294 abort_iocb = phba->sli.iocbq_lookup[abort_context];
12296 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
12297 "0327 Cannot abort els iocb x%px "
12298 "with tag %x context %x, abort status %x, "
12300 abort_iocb, abort_iotag, abort_context,
12301 irsp->ulpStatus, irsp->un.ulpWord[4]);
12303 spin_unlock_irq(&phba->hbalock);
12306 lpfc_sli_release_iocbq(phba, cmdiocb);
12311 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
12312 * @phba: Pointer to HBA context object.
12313 * @cmdiocb: Pointer to driver command iocb object.
12314 * @rspiocb: Pointer to driver response iocb object.
12316 * The function is called from SLI ring event handler with no
12317 * lock held. This function is the completion handler for ELS commands
12318 * which are aborted. The function frees memory resources used for
12319 * the aborted ELS commands.
12322 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12323 struct lpfc_iocbq *rspiocb)
12325 struct lpfc_nodelist *ndlp = NULL;
12326 IOCB_t *irsp = &rspiocb->iocb;
12328 /* ELS cmd tag <ulpIoTag> completes */
12329 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
12330 "0139 Ignoring ELS cmd code x%x completion Data: "
12332 irsp->ulpIoTag, irsp->ulpStatus,
12333 irsp->un.ulpWord[4], irsp->ulpTimeout);
12335 * Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp
12336 * if exchange is busy.
12338 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
12339 ndlp = cmdiocb->context_un.ndlp;
12340 lpfc_ct_free_iocb(phba, cmdiocb);
12342 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
12343 lpfc_els_free_iocb(phba, cmdiocb);
12346 lpfc_nlp_put(ndlp);
12350 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
12351 * @phba: Pointer to HBA context object.
12352 * @pring: Pointer to driver SLI ring object.
12353 * @cmdiocb: Pointer to driver command iocb object.
12354 * @cmpl: completion function.
12356 * This function issues an abort iocb for the provided command iocb. In case
12357 * of unloading, the abort iocb will not be issued to commands on the ELS
12358 * ring. Instead, the callback function shall be changed to those commands
12359 * so that nothing happens when them finishes. This function is called with
12360 * hbalock held andno ring_lock held (SLI4). The function returns IOCB_SUCCESS
12361 * when the command iocb is an abort request.
12365 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12366 struct lpfc_iocbq *cmdiocb, void *cmpl)
12368 struct lpfc_vport *vport = cmdiocb->vport;
12369 struct lpfc_iocbq *abtsiocbp;
12370 IOCB_t *icmd = NULL;
12371 IOCB_t *iabt = NULL;
12372 int retval = IOCB_ERROR;
12373 unsigned long iflags;
12374 struct lpfc_nodelist *ndlp;
12377 * There are certain command types we don't want to abort. And we
12378 * don't want to abort commands that are already in the process of
12381 icmd = &cmdiocb->iocb;
12382 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
12383 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
12384 cmdiocb->cmd_flag & LPFC_DRIVER_ABORTED)
12385 return IOCB_ABORTING;
12388 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
12389 cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
12391 cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
12396 * If we're unloading, don't abort iocb on the ELS ring, but change
12397 * the callback so that nothing happens when it finishes.
12399 if ((vport->load_flag & FC_UNLOADING) &&
12400 pring->ringno == LPFC_ELS_RING) {
12401 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
12402 cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
12404 cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
12408 /* issue ABTS for this IOCB based on iotag */
12409 abtsiocbp = __lpfc_sli_get_iocbq(phba);
12410 if (abtsiocbp == NULL)
12411 return IOCB_NORESOURCE;
12413 /* This signals the response to set the correct status
12414 * before calling the completion handler
12416 cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
12418 iabt = &abtsiocbp->iocb;
12419 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
12420 iabt->un.acxri.abortContextTag = icmd->ulpContext;
12421 if (phba->sli_rev == LPFC_SLI_REV4) {
12422 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
12423 if (pring->ringno == LPFC_ELS_RING)
12424 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
12426 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
12427 if (pring->ringno == LPFC_ELS_RING) {
12428 ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
12429 iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
12433 iabt->ulpClass = icmd->ulpClass;
12435 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
12436 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
12437 if (cmdiocb->cmd_flag & LPFC_IO_FCP)
12438 abtsiocbp->cmd_flag |= (LPFC_IO_FCP | LPFC_USE_FCPWQIDX);
12439 if (cmdiocb->cmd_flag & LPFC_IO_FOF)
12440 abtsiocbp->cmd_flag |= LPFC_IO_FOF;
12442 if (phba->link_state < LPFC_LINK_UP ||
12443 (phba->sli_rev == LPFC_SLI_REV4 &&
12444 phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN))
12445 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
12447 iabt->ulpCommand = CMD_ABORT_XRI_CN;
12450 abtsiocbp->cmd_cmpl = cmpl;
12452 abtsiocbp->cmd_cmpl = lpfc_sli_abort_els_cmpl;
12453 abtsiocbp->vport = vport;
12455 if (phba->sli_rev == LPFC_SLI_REV4) {
12456 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
12457 if (unlikely(pring == NULL))
12458 goto abort_iotag_exit;
12459 /* Note: both hbalock and ring_lock need to be set here */
12460 spin_lock_irqsave(&pring->ring_lock, iflags);
12461 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
12463 spin_unlock_irqrestore(&pring->ring_lock, iflags);
12465 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
12471 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
12472 "0339 Abort xri x%x, original iotag x%x, "
12473 "abort cmd iotag x%x retval x%x\n",
12474 iabt->un.acxri.abortIoTag,
12475 iabt->un.acxri.abortContextTag,
12476 abtsiocbp->iotag, retval);
12479 cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
12480 __lpfc_sli_release_iocbq(phba, abtsiocbp);
12484 * Caller to this routine should check for IOCB_ERROR
12485 * and handle it properly. This routine no longer removes
12486 * iocb off txcmplq and call compl in case of IOCB_ERROR.
12492 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
12493 * @phba: pointer to lpfc HBA data structure.
12495 * This routine will abort all pending and outstanding iocbs to an HBA.
12498 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
12500 struct lpfc_sli *psli = &phba->sli;
12501 struct lpfc_sli_ring *pring;
12502 struct lpfc_queue *qp = NULL;
12505 if (phba->sli_rev != LPFC_SLI_REV4) {
12506 for (i = 0; i < psli->num_rings; i++) {
12507 pring = &psli->sli3_ring[i];
12508 lpfc_sli_abort_iocb_ring(phba, pring);
12512 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
12516 lpfc_sli_abort_iocb_ring(phba, pring);
12521 * lpfc_sli_validate_fcp_iocb_for_abort - filter iocbs appropriate for FCP aborts
12522 * @iocbq: Pointer to iocb object.
12523 * @vport: Pointer to driver virtual port object.
12525 * This function acts as an iocb filter for functions which abort FCP iocbs.
12528 * -ENODEV, if a null iocb or vport ptr is encountered
12529 * -EINVAL, if the iocb is not an FCP I/O, not on the TX cmpl queue, premarked as
12530 * driver already started the abort process, or is an abort iocb itself
12531 * 0, passes criteria for aborting the FCP I/O iocb
12534 lpfc_sli_validate_fcp_iocb_for_abort(struct lpfc_iocbq *iocbq,
12535 struct lpfc_vport *vport)
12537 IOCB_t *icmd = NULL;
12539 /* No null ptr vports */
12540 if (!iocbq || iocbq->vport != vport)
12543 /* iocb must be for FCP IO, already exists on the TX cmpl queue,
12544 * can't be premarked as driver aborted, nor be an ABORT iocb itself
12546 icmd = &iocbq->iocb;
12547 if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
12548 !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ) ||
12549 (iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
12550 (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
12551 icmd->ulpCommand == CMD_CLOSE_XRI_CN))
12558 * lpfc_sli_validate_fcp_iocb - validate commands associated with a SCSI target
12559 * @iocbq: Pointer to driver iocb object.
12560 * @vport: Pointer to driver virtual port object.
12561 * @tgt_id: SCSI ID of the target.
12562 * @lun_id: LUN ID of the scsi device.
12563 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
12565 * This function acts as an iocb filter for validating a lun/SCSI target/SCSI
12569 * 0 if the filtering criteria is met for the given iocb and will return
12570 * 1 if the filtering criteria is not met.
12571 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
12572 * given iocb is for the SCSI device specified by vport, tgt_id and
12573 * lun_id parameter.
12574 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
12575 * given iocb is for the SCSI target specified by vport and tgt_id
12577 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
12578 * given iocb is for the SCSI host associated with the given vport.
12579 * This function is called with no locks held.
12582 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
12583 uint16_t tgt_id, uint64_t lun_id,
12584 lpfc_ctx_cmd ctx_cmd)
12586 struct lpfc_io_buf *lpfc_cmd;
12589 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
12591 if (lpfc_cmd->pCmd == NULL)
12596 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
12597 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
12598 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
12602 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
12603 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
12606 case LPFC_CTX_HOST:
12610 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
12611 __func__, ctx_cmd);
12619 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
12620 * @vport: Pointer to virtual port.
12621 * @tgt_id: SCSI ID of the target.
12622 * @lun_id: LUN ID of the scsi device.
12623 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12625 * This function returns number of FCP commands pending for the vport.
12626 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
12627 * commands pending on the vport associated with SCSI device specified
12628 * by tgt_id and lun_id parameters.
12629 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
12630 * commands pending on the vport associated with SCSI target specified
12631 * by tgt_id parameter.
12632 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
12633 * commands pending on the vport.
12634 * This function returns the number of iocbs which satisfy the filter.
12635 * This function is called without any lock held.
12638 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
12639 lpfc_ctx_cmd ctx_cmd)
12641 struct lpfc_hba *phba = vport->phba;
12642 struct lpfc_iocbq *iocbq;
12643 IOCB_t *icmd = NULL;
12645 unsigned long iflags;
12647 spin_lock_irqsave(&phba->hbalock, iflags);
12648 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
12649 iocbq = phba->sli.iocbq_lookup[i];
12651 if (!iocbq || iocbq->vport != vport)
12653 if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
12654 !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ))
12657 /* Include counting outstanding aborts */
12658 icmd = &iocbq->iocb;
12659 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
12660 icmd->ulpCommand == CMD_CLOSE_XRI_CN) {
12665 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12669 spin_unlock_irqrestore(&phba->hbalock, iflags);
12675 * lpfc_sli4_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
12676 * @phba: Pointer to HBA context object
12677 * @cmdiocb: Pointer to command iocb object.
12678 * @wcqe: pointer to the complete wcqe
12680 * This function is called when an aborted FCP iocb completes. This
12681 * function is called by the ring event handler with no lock held.
12682 * This function frees the iocb. It is called for sli-4 adapters.
12685 lpfc_sli4_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12686 struct lpfc_wcqe_complete *wcqe)
12688 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12689 "3017 ABORT_XRI_CN completing on rpi x%x "
12690 "original iotag x%x, abort cmd iotag x%x "
12691 "status 0x%x, reason 0x%x\n",
12692 cmdiocb->iocb.un.acxri.abortContextTag,
12693 cmdiocb->iocb.un.acxri.abortIoTag,
12695 (bf_get(lpfc_wcqe_c_status, wcqe)
12696 & LPFC_IOCB_STATUS_MASK),
12698 lpfc_sli_release_iocbq(phba, cmdiocb);
12702 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
12703 * @phba: Pointer to HBA context object
12704 * @cmdiocb: Pointer to command iocb object.
12705 * @rspiocb: Pointer to response iocb object.
12707 * This function is called when an aborted FCP iocb completes. This
12708 * function is called by the ring event handler with no lock held.
12709 * This function frees the iocb.
12712 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12713 struct lpfc_iocbq *rspiocb)
12715 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12716 "3096 ABORT_XRI_CN completing on rpi x%x "
12717 "original iotag x%x, abort cmd iotag x%x "
12718 "status 0x%x, reason 0x%x\n",
12719 cmdiocb->iocb.un.acxri.abortContextTag,
12720 cmdiocb->iocb.un.acxri.abortIoTag,
12721 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
12722 rspiocb->iocb.un.ulpWord[4]);
12723 lpfc_sli_release_iocbq(phba, cmdiocb);
12728 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
12729 * @vport: Pointer to virtual port.
12730 * @tgt_id: SCSI ID of the target.
12731 * @lun_id: LUN ID of the scsi device.
12732 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12734 * This function sends an abort command for every SCSI command
12735 * associated with the given virtual port pending on the ring
12736 * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
12737 * lpfc_sli_validate_fcp_iocb function. The ordering for validation before
12738 * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
12739 * followed by lpfc_sli_validate_fcp_iocb.
12741 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
12742 * FCP iocbs associated with lun specified by tgt_id and lun_id
12744 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
12745 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
12746 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
12747 * FCP iocbs associated with virtual port.
12748 * The pring used for SLI3 is sli3_ring[LPFC_FCP_RING], for SLI4
12749 * lpfc_sli4_calc_ring is used.
12750 * This function returns number of iocbs it failed to abort.
12751 * This function is called with no locks held.
12754 lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
12755 lpfc_ctx_cmd abort_cmd)
12757 struct lpfc_hba *phba = vport->phba;
12758 struct lpfc_sli_ring *pring = NULL;
12759 struct lpfc_iocbq *iocbq;
12760 int errcnt = 0, ret_val = 0;
12761 unsigned long iflags;
12763 void *fcp_cmpl = NULL;
12765 /* all I/Os are in process of being flushed */
12766 if (phba->hba_flag & HBA_IOQ_FLUSH)
12769 for (i = 1; i <= phba->sli.last_iotag; i++) {
12770 iocbq = phba->sli.iocbq_lookup[i];
12772 if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
12775 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12779 spin_lock_irqsave(&phba->hbalock, iflags);
12780 if (phba->sli_rev == LPFC_SLI_REV3) {
12781 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12782 fcp_cmpl = lpfc_sli_abort_fcp_cmpl;
12783 } else if (phba->sli_rev == LPFC_SLI_REV4) {
12784 pring = lpfc_sli4_calc_ring(phba, iocbq);
12785 fcp_cmpl = lpfc_sli4_abort_fcp_cmpl;
12787 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq,
12789 spin_unlock_irqrestore(&phba->hbalock, iflags);
12790 if (ret_val != IOCB_SUCCESS)
12798 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
12799 * @vport: Pointer to virtual port.
12800 * @pring: Pointer to driver SLI ring object.
12801 * @tgt_id: SCSI ID of the target.
12802 * @lun_id: LUN ID of the scsi device.
12803 * @cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12805 * This function sends an abort command for every SCSI command
12806 * associated with the given virtual port pending on the ring
12807 * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
12808 * lpfc_sli_validate_fcp_iocb function. The ordering for validation before
12809 * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
12810 * followed by lpfc_sli_validate_fcp_iocb.
12812 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
12813 * FCP iocbs associated with lun specified by tgt_id and lun_id
12815 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
12816 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
12817 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
12818 * FCP iocbs associated with virtual port.
12819 * This function returns number of iocbs it aborted .
12820 * This function is called with no locks held right after a taskmgmt
12824 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
12825 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
12827 struct lpfc_hba *phba = vport->phba;
12828 struct lpfc_io_buf *lpfc_cmd;
12829 struct lpfc_iocbq *abtsiocbq;
12830 struct lpfc_nodelist *ndlp;
12831 struct lpfc_iocbq *iocbq;
12833 int sum, i, ret_val;
12834 unsigned long iflags;
12835 struct lpfc_sli_ring *pring_s4 = NULL;
12837 spin_lock_irqsave(&phba->hbalock, iflags);
12839 /* all I/Os are in process of being flushed */
12840 if (phba->hba_flag & HBA_IOQ_FLUSH) {
12841 spin_unlock_irqrestore(&phba->hbalock, iflags);
12846 for (i = 1; i <= phba->sli.last_iotag; i++) {
12847 iocbq = phba->sli.iocbq_lookup[i];
12849 if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
12852 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12856 /* Guard against IO completion being called at same time */
12857 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
12858 spin_lock(&lpfc_cmd->buf_lock);
12860 if (!lpfc_cmd->pCmd) {
12861 spin_unlock(&lpfc_cmd->buf_lock);
12865 if (phba->sli_rev == LPFC_SLI_REV4) {
12867 phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
12869 spin_unlock(&lpfc_cmd->buf_lock);
12872 /* Note: both hbalock and ring_lock must be set here */
12873 spin_lock(&pring_s4->ring_lock);
12877 * If the iocbq is already being aborted, don't take a second
12878 * action, but do count it.
12880 if ((iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
12881 !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
12882 if (phba->sli_rev == LPFC_SLI_REV4)
12883 spin_unlock(&pring_s4->ring_lock);
12884 spin_unlock(&lpfc_cmd->buf_lock);
12888 /* issue ABTS for this IOCB based on iotag */
12889 abtsiocbq = __lpfc_sli_get_iocbq(phba);
12891 if (phba->sli_rev == LPFC_SLI_REV4)
12892 spin_unlock(&pring_s4->ring_lock);
12893 spin_unlock(&lpfc_cmd->buf_lock);
12897 icmd = &iocbq->iocb;
12898 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
12899 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
12900 if (phba->sli_rev == LPFC_SLI_REV4)
12901 abtsiocbq->iocb.un.acxri.abortIoTag =
12902 iocbq->sli4_xritag;
12904 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
12905 abtsiocbq->iocb.ulpLe = 1;
12906 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
12907 abtsiocbq->vport = vport;
12909 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
12910 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
12911 if (iocbq->cmd_flag & LPFC_IO_FCP)
12912 abtsiocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
12913 if (iocbq->cmd_flag & LPFC_IO_FOF)
12914 abtsiocbq->cmd_flag |= LPFC_IO_FOF;
12916 ndlp = lpfc_cmd->rdata->pnode;
12918 if (lpfc_is_link_up(phba) &&
12919 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
12920 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
12922 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
12924 /* Setup callback routine and issue the command. */
12925 abtsiocbq->cmd_cmpl = lpfc_sli_abort_fcp_cmpl;
12928 * Indicate the IO is being aborted by the driver and set
12929 * the caller's flag into the aborted IO.
12931 iocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
12933 if (phba->sli_rev == LPFC_SLI_REV4) {
12934 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
12936 spin_unlock(&pring_s4->ring_lock);
12938 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
12942 spin_unlock(&lpfc_cmd->buf_lock);
12944 if (ret_val == IOCB_ERROR)
12945 __lpfc_sli_release_iocbq(phba, abtsiocbq);
12949 spin_unlock_irqrestore(&phba->hbalock, iflags);
12954 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
12955 * @phba: Pointer to HBA context object.
12956 * @cmdiocbq: Pointer to command iocb.
12957 * @rspiocbq: Pointer to response iocb.
12959 * This function is the completion handler for iocbs issued using
12960 * lpfc_sli_issue_iocb_wait function. This function is called by the
12961 * ring event handler function without any lock held. This function
12962 * can be called from both worker thread context and interrupt
12963 * context. This function also can be called from other thread which
12964 * cleans up the SLI layer objects.
12965 * This function copy the contents of the response iocb to the
12966 * response iocb memory object provided by the caller of
12967 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
12968 * sleeps for the iocb completion.
12971 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
12972 struct lpfc_iocbq *cmdiocbq,
12973 struct lpfc_iocbq *rspiocbq)
12975 wait_queue_head_t *pdone_q;
12976 unsigned long iflags;
12977 struct lpfc_io_buf *lpfc_cmd;
12978 size_t offset = offsetof(struct lpfc_iocbq, wqe);
12980 spin_lock_irqsave(&phba->hbalock, iflags);
12981 if (cmdiocbq->cmd_flag & LPFC_IO_WAKE_TMO) {
12984 * A time out has occurred for the iocb. If a time out
12985 * completion handler has been supplied, call it. Otherwise,
12986 * just free the iocbq.
12989 spin_unlock_irqrestore(&phba->hbalock, iflags);
12990 cmdiocbq->cmd_cmpl = cmdiocbq->wait_cmd_cmpl;
12991 cmdiocbq->wait_cmd_cmpl = NULL;
12992 if (cmdiocbq->cmd_cmpl)
12993 cmdiocbq->cmd_cmpl(phba, cmdiocbq, NULL);
12995 lpfc_sli_release_iocbq(phba, cmdiocbq);
12999 /* Copy the contents of the local rspiocb into the caller's buffer. */
13000 cmdiocbq->cmd_flag |= LPFC_IO_WAKE;
13001 if (cmdiocbq->context2 && rspiocbq)
13002 memcpy((char *)cmdiocbq->context2 + offset,
13003 (char *)rspiocbq + offset, sizeof(*rspiocbq) - offset);
13005 /* Set the exchange busy flag for task management commands */
13006 if ((cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
13007 !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) {
13008 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
13010 if (rspiocbq && (rspiocbq->cmd_flag & LPFC_EXCHANGE_BUSY))
13011 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
13013 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
13016 pdone_q = cmdiocbq->context_un.wait_queue;
13019 spin_unlock_irqrestore(&phba->hbalock, iflags);
13024 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
13025 * @phba: Pointer to HBA context object..
13026 * @piocbq: Pointer to command iocb.
13027 * @flag: Flag to test.
13029 * This routine grabs the hbalock and then test the cmd_flag to
13030 * see if the passed in flag is set.
13032 * 1 if flag is set.
13033 * 0 if flag is not set.
13036 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
13037 struct lpfc_iocbq *piocbq, uint32_t flag)
13039 unsigned long iflags;
13042 spin_lock_irqsave(&phba->hbalock, iflags);
13043 ret = piocbq->cmd_flag & flag;
13044 spin_unlock_irqrestore(&phba->hbalock, iflags);
13050 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
13051 * @phba: Pointer to HBA context object..
13052 * @ring_number: Ring number
13053 * @piocb: Pointer to command iocb.
13054 * @prspiocbq: Pointer to response iocb.
13055 * @timeout: Timeout in number of seconds.
13057 * This function issues the iocb to firmware and waits for the
13058 * iocb to complete. The cmd_cmpl field of the shall be used
13059 * to handle iocbs which time out. If the field is NULL, the
13060 * function shall free the iocbq structure. If more clean up is
13061 * needed, the caller is expected to provide a completion function
13062 * that will provide the needed clean up. If the iocb command is
13063 * not completed within timeout seconds, the function will either
13064 * free the iocbq structure (if cmd_cmpl == NULL) or execute the
13065 * completion function set in the cmd_cmpl field and then return
13066 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
13067 * resources if this function returns IOCB_TIMEDOUT.
13068 * The function waits for the iocb completion using an
13069 * non-interruptible wait.
13070 * This function will sleep while waiting for iocb completion.
13071 * So, this function should not be called from any context which
13072 * does not allow sleeping. Due to the same reason, this function
13073 * cannot be called with interrupt disabled.
13074 * This function assumes that the iocb completions occur while
13075 * this function sleep. So, this function cannot be called from
13076 * the thread which process iocb completion for this ring.
13077 * This function clears the cmd_flag of the iocb object before
13078 * issuing the iocb and the iocb completion handler sets this
13079 * flag and wakes this thread when the iocb completes.
13080 * The contents of the response iocb will be copied to prspiocbq
13081 * by the completion handler when the command completes.
13082 * This function returns IOCB_SUCCESS when success.
13083 * This function is called with no lock held.
13086 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
13087 uint32_t ring_number,
13088 struct lpfc_iocbq *piocb,
13089 struct lpfc_iocbq *prspiocbq,
13092 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
13093 long timeleft, timeout_req = 0;
13094 int retval = IOCB_SUCCESS;
13096 struct lpfc_iocbq *iocb;
13098 int txcmplq_cnt = 0;
13099 struct lpfc_sli_ring *pring;
13100 unsigned long iflags;
13101 bool iocb_completed = true;
13103 if (phba->sli_rev >= LPFC_SLI_REV4)
13104 pring = lpfc_sli4_calc_ring(phba, piocb);
13106 pring = &phba->sli.sli3_ring[ring_number];
13108 * If the caller has provided a response iocbq buffer, then context2
13109 * is NULL or its an error.
13112 if (piocb->context2)
13114 piocb->context2 = prspiocbq;
13117 piocb->wait_cmd_cmpl = piocb->cmd_cmpl;
13118 piocb->cmd_cmpl = lpfc_sli_wake_iocb_wait;
13119 piocb->context_un.wait_queue = &done_q;
13120 piocb->cmd_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
13122 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
13123 if (lpfc_readl(phba->HCregaddr, &creg_val))
13125 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
13126 writel(creg_val, phba->HCregaddr);
13127 readl(phba->HCregaddr); /* flush */
13130 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
13131 SLI_IOCB_RET_IOCB);
13132 if (retval == IOCB_SUCCESS) {
13133 timeout_req = msecs_to_jiffies(timeout * 1000);
13134 timeleft = wait_event_timeout(done_q,
13135 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
13137 spin_lock_irqsave(&phba->hbalock, iflags);
13138 if (!(piocb->cmd_flag & LPFC_IO_WAKE)) {
13141 * IOCB timed out. Inform the wake iocb wait
13142 * completion function and set local status
13145 iocb_completed = false;
13146 piocb->cmd_flag |= LPFC_IO_WAKE_TMO;
13148 spin_unlock_irqrestore(&phba->hbalock, iflags);
13149 if (iocb_completed) {
13150 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13151 "0331 IOCB wake signaled\n");
13152 /* Note: we are not indicating if the IOCB has a success
13153 * status or not - that's for the caller to check.
13154 * IOCB_SUCCESS means just that the command was sent and
13155 * completed. Not that it completed successfully.
13157 } else if (timeleft == 0) {
13158 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13159 "0338 IOCB wait timeout error - no "
13160 "wake response Data x%x\n", timeout);
13161 retval = IOCB_TIMEDOUT;
13163 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13164 "0330 IOCB wake NOT set, "
13166 timeout, (timeleft / jiffies));
13167 retval = IOCB_TIMEDOUT;
13169 } else if (retval == IOCB_BUSY) {
13170 if (phba->cfg_log_verbose & LOG_SLI) {
13171 list_for_each_entry(iocb, &pring->txq, list) {
13174 list_for_each_entry(iocb, &pring->txcmplq, list) {
13177 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13178 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
13179 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
13183 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13184 "0332 IOCB wait issue failed, Data x%x\n",
13186 retval = IOCB_ERROR;
13189 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
13190 if (lpfc_readl(phba->HCregaddr, &creg_val))
13192 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
13193 writel(creg_val, phba->HCregaddr);
13194 readl(phba->HCregaddr); /* flush */
13198 piocb->context2 = NULL;
13200 piocb->context_un.wait_queue = NULL;
13201 piocb->cmd_cmpl = NULL;
13206 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
13207 * @phba: Pointer to HBA context object.
13208 * @pmboxq: Pointer to driver mailbox object.
13209 * @timeout: Timeout in number of seconds.
13211 * This function issues the mailbox to firmware and waits for the
13212 * mailbox command to complete. If the mailbox command is not
13213 * completed within timeout seconds, it returns MBX_TIMEOUT.
13214 * The function waits for the mailbox completion using an
13215 * interruptible wait. If the thread is woken up due to a
13216 * signal, MBX_TIMEOUT error is returned to the caller. Caller
13217 * should not free the mailbox resources, if this function returns
13219 * This function will sleep while waiting for mailbox completion.
13220 * So, this function should not be called from any context which
13221 * does not allow sleeping. Due to the same reason, this function
13222 * cannot be called with interrupt disabled.
13223 * This function assumes that the mailbox completion occurs while
13224 * this function sleep. So, this function cannot be called from
13225 * the worker thread which processes mailbox completion.
13226 * This function is called in the context of HBA management
13228 * This function returns MBX_SUCCESS when successful.
13229 * This function is called with no lock held.
13232 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
13235 struct completion mbox_done;
13237 unsigned long flag;
13239 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
13240 /* setup wake call as IOCB callback */
13241 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
13243 /* setup context3 field to pass wait_queue pointer to wake function */
13244 init_completion(&mbox_done);
13245 pmboxq->context3 = &mbox_done;
13246 /* now issue the command */
13247 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
13248 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
13249 wait_for_completion_timeout(&mbox_done,
13250 msecs_to_jiffies(timeout * 1000));
13252 spin_lock_irqsave(&phba->hbalock, flag);
13253 pmboxq->context3 = NULL;
13255 * if LPFC_MBX_WAKE flag is set the mailbox is completed
13256 * else do not free the resources.
13258 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
13259 retval = MBX_SUCCESS;
13261 retval = MBX_TIMEOUT;
13262 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13264 spin_unlock_irqrestore(&phba->hbalock, flag);
13270 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
13271 * @phba: Pointer to HBA context.
13272 * @mbx_action: Mailbox shutdown options.
13274 * This function is called to shutdown the driver's mailbox sub-system.
13275 * It first marks the mailbox sub-system is in a block state to prevent
13276 * the asynchronous mailbox command from issued off the pending mailbox
13277 * command queue. If the mailbox command sub-system shutdown is due to
13278 * HBA error conditions such as EEH or ERATT, this routine shall invoke
13279 * the mailbox sub-system flush routine to forcefully bring down the
13280 * mailbox sub-system. Otherwise, if it is due to normal condition (such
13281 * as with offline or HBA function reset), this routine will wait for the
13282 * outstanding mailbox command to complete before invoking the mailbox
13283 * sub-system flush routine to gracefully bring down mailbox sub-system.
13286 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
13288 struct lpfc_sli *psli = &phba->sli;
13289 unsigned long timeout;
13291 if (mbx_action == LPFC_MBX_NO_WAIT) {
13292 /* delay 100ms for port state */
13294 lpfc_sli_mbox_sys_flush(phba);
13297 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
13299 /* Disable softirqs, including timers from obtaining phba->hbalock */
13300 local_bh_disable();
13302 spin_lock_irq(&phba->hbalock);
13303 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
13305 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
13306 /* Determine how long we might wait for the active mailbox
13307 * command to be gracefully completed by firmware.
13309 if (phba->sli.mbox_active)
13310 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
13311 phba->sli.mbox_active) *
13313 spin_unlock_irq(&phba->hbalock);
13315 /* Enable softirqs again, done with phba->hbalock */
13318 while (phba->sli.mbox_active) {
13319 /* Check active mailbox complete status every 2ms */
13321 if (time_after(jiffies, timeout))
13322 /* Timeout, let the mailbox flush routine to
13323 * forcefully release active mailbox command
13328 spin_unlock_irq(&phba->hbalock);
13330 /* Enable softirqs again, done with phba->hbalock */
13334 lpfc_sli_mbox_sys_flush(phba);
13338 * lpfc_sli_eratt_read - read sli-3 error attention events
13339 * @phba: Pointer to HBA context.
13341 * This function is called to read the SLI3 device error attention registers
13342 * for possible error attention events. The caller must hold the hostlock
13343 * with spin_lock_irq().
13345 * This function returns 1 when there is Error Attention in the Host Attention
13346 * Register and returns 0 otherwise.
13349 lpfc_sli_eratt_read(struct lpfc_hba *phba)
13353 /* Read chip Host Attention (HA) register */
13354 if (lpfc_readl(phba->HAregaddr, &ha_copy))
13357 if (ha_copy & HA_ERATT) {
13358 /* Read host status register to retrieve error event */
13359 if (lpfc_sli_read_hs(phba))
13362 /* Check if there is a deferred error condition is active */
13363 if ((HS_FFER1 & phba->work_hs) &&
13364 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
13365 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
13366 phba->hba_flag |= DEFER_ERATT;
13367 /* Clear all interrupt enable conditions */
13368 writel(0, phba->HCregaddr);
13369 readl(phba->HCregaddr);
13372 /* Set the driver HA work bitmap */
13373 phba->work_ha |= HA_ERATT;
13374 /* Indicate polling handles this ERATT */
13375 phba->hba_flag |= HBA_ERATT_HANDLED;
13381 /* Set the driver HS work bitmap */
13382 phba->work_hs |= UNPLUG_ERR;
13383 /* Set the driver HA work bitmap */
13384 phba->work_ha |= HA_ERATT;
13385 /* Indicate polling handles this ERATT */
13386 phba->hba_flag |= HBA_ERATT_HANDLED;
13391 * lpfc_sli4_eratt_read - read sli-4 error attention events
13392 * @phba: Pointer to HBA context.
13394 * This function is called to read the SLI4 device error attention registers
13395 * for possible error attention events. The caller must hold the hostlock
13396 * with spin_lock_irq().
13398 * This function returns 1 when there is Error Attention in the Host Attention
13399 * Register and returns 0 otherwise.
13402 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
13404 uint32_t uerr_sta_hi, uerr_sta_lo;
13405 uint32_t if_type, portsmphr;
13406 struct lpfc_register portstat_reg;
13410 * For now, use the SLI4 device internal unrecoverable error
13411 * registers for error attention. This can be changed later.
13413 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
13415 case LPFC_SLI_INTF_IF_TYPE_0:
13416 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
13418 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
13420 phba->work_hs |= UNPLUG_ERR;
13421 phba->work_ha |= HA_ERATT;
13422 phba->hba_flag |= HBA_ERATT_HANDLED;
13425 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
13426 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
13427 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13428 "1423 HBA Unrecoverable error: "
13429 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
13430 "ue_mask_lo_reg=0x%x, "
13431 "ue_mask_hi_reg=0x%x\n",
13432 uerr_sta_lo, uerr_sta_hi,
13433 phba->sli4_hba.ue_mask_lo,
13434 phba->sli4_hba.ue_mask_hi);
13435 phba->work_status[0] = uerr_sta_lo;
13436 phba->work_status[1] = uerr_sta_hi;
13437 phba->work_ha |= HA_ERATT;
13438 phba->hba_flag |= HBA_ERATT_HANDLED;
13442 case LPFC_SLI_INTF_IF_TYPE_2:
13443 case LPFC_SLI_INTF_IF_TYPE_6:
13444 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
13445 &portstat_reg.word0) ||
13446 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
13448 phba->work_hs |= UNPLUG_ERR;
13449 phba->work_ha |= HA_ERATT;
13450 phba->hba_flag |= HBA_ERATT_HANDLED;
13453 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
13454 phba->work_status[0] =
13455 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
13456 phba->work_status[1] =
13457 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
13458 logmask = LOG_TRACE_EVENT;
13459 if (phba->work_status[0] ==
13460 SLIPORT_ERR1_REG_ERR_CODE_2 &&
13461 phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART)
13463 lpfc_printf_log(phba, KERN_ERR, logmask,
13464 "2885 Port Status Event: "
13465 "port status reg 0x%x, "
13466 "port smphr reg 0x%x, "
13467 "error 1=0x%x, error 2=0x%x\n",
13468 portstat_reg.word0,
13470 phba->work_status[0],
13471 phba->work_status[1]);
13472 phba->work_ha |= HA_ERATT;
13473 phba->hba_flag |= HBA_ERATT_HANDLED;
13477 case LPFC_SLI_INTF_IF_TYPE_1:
13479 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13480 "2886 HBA Error Attention on unsupported "
13481 "if type %d.", if_type);
13489 * lpfc_sli_check_eratt - check error attention events
13490 * @phba: Pointer to HBA context.
13492 * This function is called from timer soft interrupt context to check HBA's
13493 * error attention register bit for error attention events.
13495 * This function returns 1 when there is Error Attention in the Host Attention
13496 * Register and returns 0 otherwise.
13499 lpfc_sli_check_eratt(struct lpfc_hba *phba)
13503 /* If somebody is waiting to handle an eratt, don't process it
13504 * here. The brdkill function will do this.
13506 if (phba->link_flag & LS_IGNORE_ERATT)
13509 /* Check if interrupt handler handles this ERATT */
13510 spin_lock_irq(&phba->hbalock);
13511 if (phba->hba_flag & HBA_ERATT_HANDLED) {
13512 /* Interrupt handler has handled ERATT */
13513 spin_unlock_irq(&phba->hbalock);
13518 * If there is deferred error attention, do not check for error
13521 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13522 spin_unlock_irq(&phba->hbalock);
13526 /* If PCI channel is offline, don't process it */
13527 if (unlikely(pci_channel_offline(phba->pcidev))) {
13528 spin_unlock_irq(&phba->hbalock);
13532 switch (phba->sli_rev) {
13533 case LPFC_SLI_REV2:
13534 case LPFC_SLI_REV3:
13535 /* Read chip Host Attention (HA) register */
13536 ha_copy = lpfc_sli_eratt_read(phba);
13538 case LPFC_SLI_REV4:
13539 /* Read device Uncoverable Error (UERR) registers */
13540 ha_copy = lpfc_sli4_eratt_read(phba);
13543 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13544 "0299 Invalid SLI revision (%d)\n",
13549 spin_unlock_irq(&phba->hbalock);
13555 * lpfc_intr_state_check - Check device state for interrupt handling
13556 * @phba: Pointer to HBA context.
13558 * This inline routine checks whether a device or its PCI slot is in a state
13559 * that the interrupt should be handled.
13561 * This function returns 0 if the device or the PCI slot is in a state that
13562 * interrupt should be handled, otherwise -EIO.
13565 lpfc_intr_state_check(struct lpfc_hba *phba)
13567 /* If the pci channel is offline, ignore all the interrupts */
13568 if (unlikely(pci_channel_offline(phba->pcidev)))
13571 /* Update device level interrupt statistics */
13572 phba->sli.slistat.sli_intr++;
13574 /* Ignore all interrupts during initialization. */
13575 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
13582 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
13583 * @irq: Interrupt number.
13584 * @dev_id: The device context pointer.
13586 * This function is directly called from the PCI layer as an interrupt
13587 * service routine when device with SLI-3 interface spec is enabled with
13588 * MSI-X multi-message interrupt mode and there are slow-path events in
13589 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
13590 * interrupt mode, this function is called as part of the device-level
13591 * interrupt handler. When the PCI slot is in error recovery or the HBA
13592 * is undergoing initialization, the interrupt handler will not process
13593 * the interrupt. The link attention and ELS ring attention events are
13594 * handled by the worker thread. The interrupt handler signals the worker
13595 * thread and returns for these events. This function is called without
13596 * any lock held. It gets the hbalock to access and update SLI data
13599 * This function returns IRQ_HANDLED when interrupt is handled else it
13600 * returns IRQ_NONE.
13603 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
13605 struct lpfc_hba *phba;
13606 uint32_t ha_copy, hc_copy;
13607 uint32_t work_ha_copy;
13608 unsigned long status;
13609 unsigned long iflag;
13612 MAILBOX_t *mbox, *pmbox;
13613 struct lpfc_vport *vport;
13614 struct lpfc_nodelist *ndlp;
13615 struct lpfc_dmabuf *mp;
13620 * Get the driver's phba structure from the dev_id and
13621 * assume the HBA is not interrupting.
13623 phba = (struct lpfc_hba *)dev_id;
13625 if (unlikely(!phba))
13629 * Stuff needs to be attented to when this function is invoked as an
13630 * individual interrupt handler in MSI-X multi-message interrupt mode
13632 if (phba->intr_type == MSIX) {
13633 /* Check device state for handling interrupt */
13634 if (lpfc_intr_state_check(phba))
13636 /* Need to read HA REG for slow-path events */
13637 spin_lock_irqsave(&phba->hbalock, iflag);
13638 if (lpfc_readl(phba->HAregaddr, &ha_copy))
13640 /* If somebody is waiting to handle an eratt don't process it
13641 * here. The brdkill function will do this.
13643 if (phba->link_flag & LS_IGNORE_ERATT)
13644 ha_copy &= ~HA_ERATT;
13645 /* Check the need for handling ERATT in interrupt handler */
13646 if (ha_copy & HA_ERATT) {
13647 if (phba->hba_flag & HBA_ERATT_HANDLED)
13648 /* ERATT polling has handled ERATT */
13649 ha_copy &= ~HA_ERATT;
13651 /* Indicate interrupt handler handles ERATT */
13652 phba->hba_flag |= HBA_ERATT_HANDLED;
13656 * If there is deferred error attention, do not check for any
13659 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13660 spin_unlock_irqrestore(&phba->hbalock, iflag);
13664 /* Clear up only attention source related to slow-path */
13665 if (lpfc_readl(phba->HCregaddr, &hc_copy))
13668 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
13669 HC_LAINT_ENA | HC_ERINT_ENA),
13671 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
13673 writel(hc_copy, phba->HCregaddr);
13674 readl(phba->HAregaddr); /* flush */
13675 spin_unlock_irqrestore(&phba->hbalock, iflag);
13677 ha_copy = phba->ha_copy;
13679 work_ha_copy = ha_copy & phba->work_ha_mask;
13681 if (work_ha_copy) {
13682 if (work_ha_copy & HA_LATT) {
13683 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
13685 * Turn off Link Attention interrupts
13686 * until CLEAR_LA done
13688 spin_lock_irqsave(&phba->hbalock, iflag);
13689 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
13690 if (lpfc_readl(phba->HCregaddr, &control))
13692 control &= ~HC_LAINT_ENA;
13693 writel(control, phba->HCregaddr);
13694 readl(phba->HCregaddr); /* flush */
13695 spin_unlock_irqrestore(&phba->hbalock, iflag);
13698 work_ha_copy &= ~HA_LATT;
13701 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
13703 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
13704 * the only slow ring.
13706 status = (work_ha_copy &
13707 (HA_RXMASK << (4*LPFC_ELS_RING)));
13708 status >>= (4*LPFC_ELS_RING);
13709 if (status & HA_RXMASK) {
13710 spin_lock_irqsave(&phba->hbalock, iflag);
13711 if (lpfc_readl(phba->HCregaddr, &control))
13714 lpfc_debugfs_slow_ring_trc(phba,
13715 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
13717 (uint32_t)phba->sli.slistat.sli_intr);
13719 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
13720 lpfc_debugfs_slow_ring_trc(phba,
13721 "ISR Disable ring:"
13722 "pwork:x%x hawork:x%x wait:x%x",
13723 phba->work_ha, work_ha_copy,
13724 (uint32_t)((unsigned long)
13725 &phba->work_waitq));
13728 ~(HC_R0INT_ENA << LPFC_ELS_RING);
13729 writel(control, phba->HCregaddr);
13730 readl(phba->HCregaddr); /* flush */
13733 lpfc_debugfs_slow_ring_trc(phba,
13734 "ISR slow ring: pwork:"
13735 "x%x hawork:x%x wait:x%x",
13736 phba->work_ha, work_ha_copy,
13737 (uint32_t)((unsigned long)
13738 &phba->work_waitq));
13740 spin_unlock_irqrestore(&phba->hbalock, iflag);
13743 spin_lock_irqsave(&phba->hbalock, iflag);
13744 if (work_ha_copy & HA_ERATT) {
13745 if (lpfc_sli_read_hs(phba))
13748 * Check if there is a deferred error condition
13751 if ((HS_FFER1 & phba->work_hs) &&
13752 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
13753 HS_FFER6 | HS_FFER7 | HS_FFER8) &
13755 phba->hba_flag |= DEFER_ERATT;
13756 /* Clear all interrupt enable conditions */
13757 writel(0, phba->HCregaddr);
13758 readl(phba->HCregaddr);
13762 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
13763 pmb = phba->sli.mbox_active;
13764 pmbox = &pmb->u.mb;
13766 vport = pmb->vport;
13768 /* First check out the status word */
13769 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
13770 if (pmbox->mbxOwner != OWN_HOST) {
13771 spin_unlock_irqrestore(&phba->hbalock, iflag);
13773 * Stray Mailbox Interrupt, mbxCommand <cmd>
13774 * mbxStatus <status>
13776 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13777 "(%d):0304 Stray Mailbox "
13778 "Interrupt mbxCommand x%x "
13780 (vport ? vport->vpi : 0),
13783 /* clear mailbox attention bit */
13784 work_ha_copy &= ~HA_MBATT;
13786 phba->sli.mbox_active = NULL;
13787 spin_unlock_irqrestore(&phba->hbalock, iflag);
13788 phba->last_completion_time = jiffies;
13789 del_timer(&phba->sli.mbox_tmo);
13790 if (pmb->mbox_cmpl) {
13791 lpfc_sli_pcimem_bcopy(mbox, pmbox,
13793 if (pmb->out_ext_byte_len &&
13795 lpfc_sli_pcimem_bcopy(
13798 pmb->out_ext_byte_len);
13800 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13801 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13803 lpfc_debugfs_disc_trc(vport,
13804 LPFC_DISC_TRC_MBOX_VPORT,
13805 "MBOX dflt rpi: : "
13806 "status:x%x rpi:x%x",
13807 (uint32_t)pmbox->mbxStatus,
13808 pmbox->un.varWords[0], 0);
13810 if (!pmbox->mbxStatus) {
13811 mp = (struct lpfc_dmabuf *)
13813 ndlp = (struct lpfc_nodelist *)
13816 /* Reg_LOGIN of dflt RPI was
13817 * successful. new lets get
13818 * rid of the RPI using the
13819 * same mbox buffer.
13821 lpfc_unreg_login(phba,
13823 pmbox->un.varWords[0],
13826 lpfc_mbx_cmpl_dflt_rpi;
13828 pmb->ctx_ndlp = ndlp;
13829 pmb->vport = vport;
13830 rc = lpfc_sli_issue_mbox(phba,
13833 if (rc != MBX_BUSY)
13834 lpfc_printf_log(phba,
13837 "0350 rc should have"
13838 "been MBX_BUSY\n");
13839 if (rc != MBX_NOT_FINISHED)
13840 goto send_current_mbox;
13844 &phba->pport->work_port_lock,
13846 phba->pport->work_port_events &=
13848 spin_unlock_irqrestore(
13849 &phba->pport->work_port_lock,
13852 /* Do NOT queue MBX_HEARTBEAT to the worker
13853 * thread for processing.
13855 if (pmbox->mbxCommand == MBX_HEARTBEAT) {
13856 /* Process mbox now */
13857 phba->sli.mbox_active = NULL;
13858 phba->sli.sli_flag &=
13859 ~LPFC_SLI_MBOX_ACTIVE;
13860 if (pmb->mbox_cmpl)
13861 pmb->mbox_cmpl(phba, pmb);
13863 /* Queue to worker thread to process */
13864 lpfc_mbox_cmpl_put(phba, pmb);
13868 spin_unlock_irqrestore(&phba->hbalock, iflag);
13870 if ((work_ha_copy & HA_MBATT) &&
13871 (phba->sli.mbox_active == NULL)) {
13873 /* Process next mailbox command if there is one */
13875 rc = lpfc_sli_issue_mbox(phba, NULL,
13877 } while (rc == MBX_NOT_FINISHED);
13878 if (rc != MBX_SUCCESS)
13879 lpfc_printf_log(phba, KERN_ERR,
13881 "0349 rc should be "
13885 spin_lock_irqsave(&phba->hbalock, iflag);
13886 phba->work_ha |= work_ha_copy;
13887 spin_unlock_irqrestore(&phba->hbalock, iflag);
13888 lpfc_worker_wake_up(phba);
13890 return IRQ_HANDLED;
13892 spin_unlock_irqrestore(&phba->hbalock, iflag);
13893 return IRQ_HANDLED;
13895 } /* lpfc_sli_sp_intr_handler */
13898 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
13899 * @irq: Interrupt number.
13900 * @dev_id: The device context pointer.
13902 * This function is directly called from the PCI layer as an interrupt
13903 * service routine when device with SLI-3 interface spec is enabled with
13904 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
13905 * ring event in the HBA. However, when the device is enabled with either
13906 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13907 * device-level interrupt handler. When the PCI slot is in error recovery
13908 * or the HBA is undergoing initialization, the interrupt handler will not
13909 * process the interrupt. The SCSI FCP fast-path ring event are handled in
13910 * the intrrupt context. This function is called without any lock held.
13911 * It gets the hbalock to access and update SLI data structures.
13913 * This function returns IRQ_HANDLED when interrupt is handled else it
13914 * returns IRQ_NONE.
13917 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
13919 struct lpfc_hba *phba;
13921 unsigned long status;
13922 unsigned long iflag;
13923 struct lpfc_sli_ring *pring;
13925 /* Get the driver's phba structure from the dev_id and
13926 * assume the HBA is not interrupting.
13928 phba = (struct lpfc_hba *) dev_id;
13930 if (unlikely(!phba))
13934 * Stuff needs to be attented to when this function is invoked as an
13935 * individual interrupt handler in MSI-X multi-message interrupt mode
13937 if (phba->intr_type == MSIX) {
13938 /* Check device state for handling interrupt */
13939 if (lpfc_intr_state_check(phba))
13941 /* Need to read HA REG for FCP ring and other ring events */
13942 if (lpfc_readl(phba->HAregaddr, &ha_copy))
13943 return IRQ_HANDLED;
13944 /* Clear up only attention source related to fast-path */
13945 spin_lock_irqsave(&phba->hbalock, iflag);
13947 * If there is deferred error attention, do not check for
13950 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13951 spin_unlock_irqrestore(&phba->hbalock, iflag);
13954 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
13956 readl(phba->HAregaddr); /* flush */
13957 spin_unlock_irqrestore(&phba->hbalock, iflag);
13959 ha_copy = phba->ha_copy;
13962 * Process all events on FCP ring. Take the optimized path for FCP IO.
13964 ha_copy &= ~(phba->work_ha_mask);
13966 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
13967 status >>= (4*LPFC_FCP_RING);
13968 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
13969 if (status & HA_RXMASK)
13970 lpfc_sli_handle_fast_ring_event(phba, pring, status);
13972 if (phba->cfg_multi_ring_support == 2) {
13974 * Process all events on extra ring. Take the optimized path
13975 * for extra ring IO.
13977 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
13978 status >>= (4*LPFC_EXTRA_RING);
13979 if (status & HA_RXMASK) {
13980 lpfc_sli_handle_fast_ring_event(phba,
13981 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
13985 return IRQ_HANDLED;
13986 } /* lpfc_sli_fp_intr_handler */
13989 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
13990 * @irq: Interrupt number.
13991 * @dev_id: The device context pointer.
13993 * This function is the HBA device-level interrupt handler to device with
13994 * SLI-3 interface spec, called from the PCI layer when either MSI or
13995 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
13996 * requires driver attention. This function invokes the slow-path interrupt
13997 * attention handling function and fast-path interrupt attention handling
13998 * function in turn to process the relevant HBA attention events. This
13999 * function is called without any lock held. It gets the hbalock to access
14000 * and update SLI data structures.
14002 * This function returns IRQ_HANDLED when interrupt is handled, else it
14003 * returns IRQ_NONE.
14006 lpfc_sli_intr_handler(int irq, void *dev_id)
14008 struct lpfc_hba *phba;
14009 irqreturn_t sp_irq_rc, fp_irq_rc;
14010 unsigned long status1, status2;
14014 * Get the driver's phba structure from the dev_id and
14015 * assume the HBA is not interrupting.
14017 phba = (struct lpfc_hba *) dev_id;
14019 if (unlikely(!phba))
14022 /* Check device state for handling interrupt */
14023 if (lpfc_intr_state_check(phba))
14026 spin_lock(&phba->hbalock);
14027 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
14028 spin_unlock(&phba->hbalock);
14029 return IRQ_HANDLED;
14032 if (unlikely(!phba->ha_copy)) {
14033 spin_unlock(&phba->hbalock);
14035 } else if (phba->ha_copy & HA_ERATT) {
14036 if (phba->hba_flag & HBA_ERATT_HANDLED)
14037 /* ERATT polling has handled ERATT */
14038 phba->ha_copy &= ~HA_ERATT;
14040 /* Indicate interrupt handler handles ERATT */
14041 phba->hba_flag |= HBA_ERATT_HANDLED;
14045 * If there is deferred error attention, do not check for any interrupt.
14047 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
14048 spin_unlock(&phba->hbalock);
14052 /* Clear attention sources except link and error attentions */
14053 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
14054 spin_unlock(&phba->hbalock);
14055 return IRQ_HANDLED;
14057 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
14058 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
14060 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
14061 writel(hc_copy, phba->HCregaddr);
14062 readl(phba->HAregaddr); /* flush */
14063 spin_unlock(&phba->hbalock);
14066 * Invokes slow-path host attention interrupt handling as appropriate.
14069 /* status of events with mailbox and link attention */
14070 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
14072 /* status of events with ELS ring */
14073 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
14074 status2 >>= (4*LPFC_ELS_RING);
14076 if (status1 || (status2 & HA_RXMASK))
14077 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
14079 sp_irq_rc = IRQ_NONE;
14082 * Invoke fast-path host attention interrupt handling as appropriate.
14085 /* status of events with FCP ring */
14086 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
14087 status1 >>= (4*LPFC_FCP_RING);
14089 /* status of events with extra ring */
14090 if (phba->cfg_multi_ring_support == 2) {
14091 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
14092 status2 >>= (4*LPFC_EXTRA_RING);
14096 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
14097 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
14099 fp_irq_rc = IRQ_NONE;
14101 /* Return device-level interrupt handling status */
14102 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
14103 } /* lpfc_sli_intr_handler */
14106 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
14107 * @phba: pointer to lpfc hba data structure.
14109 * This routine is invoked by the worker thread to process all the pending
14110 * SLI4 els abort xri events.
14112 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
14114 struct lpfc_cq_event *cq_event;
14115 unsigned long iflags;
14117 /* First, declare the els xri abort event has been handled */
14118 spin_lock_irqsave(&phba->hbalock, iflags);
14119 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
14120 spin_unlock_irqrestore(&phba->hbalock, iflags);
14122 /* Now, handle all the els xri abort events */
14123 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
14124 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
14125 /* Get the first event from the head of the event queue */
14126 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
14127 cq_event, struct lpfc_cq_event, list);
14128 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
14130 /* Notify aborted XRI for ELS work queue */
14131 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
14133 /* Free the event processed back to the free pool */
14134 lpfc_sli4_cq_event_release(phba, cq_event);
14135 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
14138 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
14142 * lpfc_sli4_els_preprocess_rspiocbq - Get response iocbq from els wcqe
14143 * @phba: Pointer to HBA context object.
14144 * @irspiocbq: Pointer to work-queue completion queue entry.
14146 * This routine handles an ELS work-queue completion event and construct
14147 * a pseudo response ELS IOCBQ from the SLI4 ELS WCQE for the common
14148 * discovery engine to handle.
14150 * Return: Pointer to the receive IOCBQ, NULL otherwise.
14152 static struct lpfc_iocbq *
14153 lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
14154 struct lpfc_iocbq *irspiocbq)
14156 struct lpfc_sli_ring *pring;
14157 struct lpfc_iocbq *cmdiocbq;
14158 struct lpfc_wcqe_complete *wcqe;
14159 unsigned long iflags;
14161 pring = lpfc_phba_elsring(phba);
14162 if (unlikely(!pring))
14165 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
14166 spin_lock_irqsave(&pring->ring_lock, iflags);
14167 pring->stats.iocb_event++;
14168 /* Look up the ELS command IOCB and create pseudo response IOCB */
14169 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
14170 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14171 if (unlikely(!cmdiocbq)) {
14172 spin_unlock_irqrestore(&pring->ring_lock, iflags);
14173 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14174 "0386 ELS complete with no corresponding "
14175 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
14176 wcqe->word0, wcqe->total_data_placed,
14177 wcqe->parameter, wcqe->word3);
14178 lpfc_sli_release_iocbq(phba, irspiocbq);
14182 memcpy(&irspiocbq->wqe, &cmdiocbq->wqe, sizeof(union lpfc_wqe128));
14183 memcpy(&irspiocbq->wcqe_cmpl, wcqe, sizeof(*wcqe));
14185 /* Put the iocb back on the txcmplq */
14186 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
14187 spin_unlock_irqrestore(&pring->ring_lock, iflags);
14189 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
14190 spin_lock_irqsave(&phba->hbalock, iflags);
14191 irspiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
14192 spin_unlock_irqrestore(&phba->hbalock, iflags);
14198 inline struct lpfc_cq_event *
14199 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
14201 struct lpfc_cq_event *cq_event;
14203 /* Allocate a new internal CQ_EVENT entry */
14204 cq_event = lpfc_sli4_cq_event_alloc(phba);
14206 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14207 "0602 Failed to alloc CQ_EVENT entry\n");
14211 /* Move the CQE into the event */
14212 memcpy(&cq_event->cqe, entry, size);
14217 * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
14218 * @phba: Pointer to HBA context object.
14219 * @mcqe: Pointer to mailbox completion queue entry.
14221 * This routine process a mailbox completion queue entry with asynchronous
14224 * Return: true if work posted to worker thread, otherwise false.
14227 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
14229 struct lpfc_cq_event *cq_event;
14230 unsigned long iflags;
14232 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14233 "0392 Async Event: word0:x%x, word1:x%x, "
14234 "word2:x%x, word3:x%x\n", mcqe->word0,
14235 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
14237 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
14241 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
14242 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
14243 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
14245 /* Set the async event flag */
14246 spin_lock_irqsave(&phba->hbalock, iflags);
14247 phba->hba_flag |= ASYNC_EVENT;
14248 spin_unlock_irqrestore(&phba->hbalock, iflags);
14254 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
14255 * @phba: Pointer to HBA context object.
14256 * @mcqe: Pointer to mailbox completion queue entry.
14258 * This routine process a mailbox completion queue entry with mailbox
14259 * completion event.
14261 * Return: true if work posted to worker thread, otherwise false.
14264 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
14266 uint32_t mcqe_status;
14267 MAILBOX_t *mbox, *pmbox;
14268 struct lpfc_mqe *mqe;
14269 struct lpfc_vport *vport;
14270 struct lpfc_nodelist *ndlp;
14271 struct lpfc_dmabuf *mp;
14272 unsigned long iflags;
14274 bool workposted = false;
14277 /* If not a mailbox complete MCQE, out by checking mailbox consume */
14278 if (!bf_get(lpfc_trailer_completed, mcqe))
14279 goto out_no_mqe_complete;
14281 /* Get the reference to the active mbox command */
14282 spin_lock_irqsave(&phba->hbalock, iflags);
14283 pmb = phba->sli.mbox_active;
14284 if (unlikely(!pmb)) {
14285 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14286 "1832 No pending MBOX command to handle\n");
14287 spin_unlock_irqrestore(&phba->hbalock, iflags);
14288 goto out_no_mqe_complete;
14290 spin_unlock_irqrestore(&phba->hbalock, iflags);
14292 pmbox = (MAILBOX_t *)&pmb->u.mqe;
14294 vport = pmb->vport;
14296 /* Reset heartbeat timer */
14297 phba->last_completion_time = jiffies;
14298 del_timer(&phba->sli.mbox_tmo);
14300 /* Move mbox data to caller's mailbox region, do endian swapping */
14301 if (pmb->mbox_cmpl && mbox)
14302 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
14305 * For mcqe errors, conditionally move a modified error code to
14306 * the mbox so that the error will not be missed.
14308 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
14309 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
14310 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
14311 bf_set(lpfc_mqe_status, mqe,
14312 (LPFC_MBX_ERROR_RANGE | mcqe_status));
14314 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
14315 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
14316 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
14317 "MBOX dflt rpi: status:x%x rpi:x%x",
14319 pmbox->un.varWords[0], 0);
14320 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
14321 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
14322 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
14324 /* Reg_LOGIN of dflt RPI was successful. Mark the
14325 * node as having an UNREG_LOGIN in progress to stop
14326 * an unsolicited PLOGI from the same NPortId from
14327 * starting another mailbox transaction.
14329 spin_lock_irqsave(&ndlp->lock, iflags);
14330 ndlp->nlp_flag |= NLP_UNREG_INP;
14331 spin_unlock_irqrestore(&ndlp->lock, iflags);
14332 lpfc_unreg_login(phba, vport->vpi,
14333 pmbox->un.varWords[0], pmb);
14334 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
14337 /* No reference taken here. This is a default
14338 * RPI reg/immediate unreg cycle. The reference was
14339 * taken in the reg rpi path and is released when
14340 * this mailbox completes.
14342 pmb->ctx_ndlp = ndlp;
14343 pmb->vport = vport;
14344 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
14345 if (rc != MBX_BUSY)
14346 lpfc_printf_log(phba, KERN_ERR,
14349 "have been MBX_BUSY\n");
14350 if (rc != MBX_NOT_FINISHED)
14351 goto send_current_mbox;
14354 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
14355 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
14356 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
14358 /* Do NOT queue MBX_HEARTBEAT to the worker thread for processing. */
14359 if (pmbox->mbxCommand == MBX_HEARTBEAT) {
14360 spin_lock_irqsave(&phba->hbalock, iflags);
14361 /* Release the mailbox command posting token */
14362 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
14363 phba->sli.mbox_active = NULL;
14364 if (bf_get(lpfc_trailer_consumed, mcqe))
14365 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14366 spin_unlock_irqrestore(&phba->hbalock, iflags);
14368 /* Post the next mbox command, if there is one */
14369 lpfc_sli4_post_async_mbox(phba);
14371 /* Process cmpl now */
14372 if (pmb->mbox_cmpl)
14373 pmb->mbox_cmpl(phba, pmb);
14377 /* There is mailbox completion work to queue to the worker thread */
14378 spin_lock_irqsave(&phba->hbalock, iflags);
14379 __lpfc_mbox_cmpl_put(phba, pmb);
14380 phba->work_ha |= HA_MBATT;
14381 spin_unlock_irqrestore(&phba->hbalock, iflags);
14385 spin_lock_irqsave(&phba->hbalock, iflags);
14386 /* Release the mailbox command posting token */
14387 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
14388 /* Setting active mailbox pointer need to be in sync to flag clear */
14389 phba->sli.mbox_active = NULL;
14390 if (bf_get(lpfc_trailer_consumed, mcqe))
14391 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14392 spin_unlock_irqrestore(&phba->hbalock, iflags);
14393 /* Wake up worker thread to post the next pending mailbox command */
14394 lpfc_worker_wake_up(phba);
14397 out_no_mqe_complete:
14398 spin_lock_irqsave(&phba->hbalock, iflags);
14399 if (bf_get(lpfc_trailer_consumed, mcqe))
14400 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14401 spin_unlock_irqrestore(&phba->hbalock, iflags);
14406 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
14407 * @phba: Pointer to HBA context object.
14408 * @cq: Pointer to associated CQ
14409 * @cqe: Pointer to mailbox completion queue entry.
14411 * This routine process a mailbox completion queue entry, it invokes the
14412 * proper mailbox complete handling or asynchronous event handling routine
14413 * according to the MCQE's async bit.
14415 * Return: true if work posted to worker thread, otherwise false.
14418 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14419 struct lpfc_cqe *cqe)
14421 struct lpfc_mcqe mcqe;
14426 /* Copy the mailbox MCQE and convert endian order as needed */
14427 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
14429 /* Invoke the proper event handling routine */
14430 if (!bf_get(lpfc_trailer_async, &mcqe))
14431 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
14433 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
14438 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
14439 * @phba: Pointer to HBA context object.
14440 * @cq: Pointer to associated CQ
14441 * @wcqe: Pointer to work-queue completion queue entry.
14443 * This routine handles an ELS work-queue completion event.
14445 * Return: true if work posted to worker thread, otherwise false.
14448 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14449 struct lpfc_wcqe_complete *wcqe)
14451 struct lpfc_iocbq *irspiocbq;
14452 unsigned long iflags;
14453 struct lpfc_sli_ring *pring = cq->pring;
14455 int txcmplq_cnt = 0;
14457 /* Check for response status */
14458 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
14459 /* Log the error status */
14460 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14461 "0357 ELS CQE error: status=x%x: "
14462 "CQE: %08x %08x %08x %08x\n",
14463 bf_get(lpfc_wcqe_c_status, wcqe),
14464 wcqe->word0, wcqe->total_data_placed,
14465 wcqe->parameter, wcqe->word3);
14468 /* Get an irspiocbq for later ELS response processing use */
14469 irspiocbq = lpfc_sli_get_iocbq(phba);
14471 if (!list_empty(&pring->txq))
14473 if (!list_empty(&pring->txcmplq))
14475 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14476 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
14477 "els_txcmplq_cnt=%d\n",
14478 txq_cnt, phba->iocb_cnt,
14483 /* Save off the slow-path queue event for work thread to process */
14484 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
14485 spin_lock_irqsave(&phba->hbalock, iflags);
14486 list_add_tail(&irspiocbq->cq_event.list,
14487 &phba->sli4_hba.sp_queue_event);
14488 phba->hba_flag |= HBA_SP_QUEUE_EVT;
14489 spin_unlock_irqrestore(&phba->hbalock, iflags);
14495 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
14496 * @phba: Pointer to HBA context object.
14497 * @wcqe: Pointer to work-queue completion queue entry.
14499 * This routine handles slow-path WQ entry consumed event by invoking the
14500 * proper WQ release routine to the slow-path WQ.
14503 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
14504 struct lpfc_wcqe_release *wcqe)
14506 /* sanity check on queue memory */
14507 if (unlikely(!phba->sli4_hba.els_wq))
14509 /* Check for the slow-path ELS work queue */
14510 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
14511 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
14512 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
14514 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14515 "2579 Slow-path wqe consume event carries "
14516 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
14517 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
14518 phba->sli4_hba.els_wq->queue_id);
14522 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
14523 * @phba: Pointer to HBA context object.
14524 * @cq: Pointer to a WQ completion queue.
14525 * @wcqe: Pointer to work-queue completion queue entry.
14527 * This routine handles an XRI abort event.
14529 * Return: true if work posted to worker thread, otherwise false.
14532 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
14533 struct lpfc_queue *cq,
14534 struct sli4_wcqe_xri_aborted *wcqe)
14536 bool workposted = false;
14537 struct lpfc_cq_event *cq_event;
14538 unsigned long iflags;
14540 switch (cq->subtype) {
14542 lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
14543 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14544 /* Notify aborted XRI for NVME work queue */
14545 if (phba->nvmet_support)
14546 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
14548 workposted = false;
14550 case LPFC_NVME_LS: /* NVME LS uses ELS resources */
14552 cq_event = lpfc_cq_event_setup(phba, wcqe, sizeof(*wcqe));
14554 workposted = false;
14557 cq_event->hdwq = cq->hdwq;
14558 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
14560 list_add_tail(&cq_event->list,
14561 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
14562 /* Set the els xri abort event flag */
14563 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
14564 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
14569 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14570 "0603 Invalid CQ subtype %d: "
14571 "%08x %08x %08x %08x\n",
14572 cq->subtype, wcqe->word0, wcqe->parameter,
14573 wcqe->word2, wcqe->word3);
14574 workposted = false;
14580 #define FC_RCTL_MDS_DIAGS 0xF4
14583 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
14584 * @phba: Pointer to HBA context object.
14585 * @rcqe: Pointer to receive-queue completion queue entry.
14587 * This routine process a receive-queue completion queue entry.
14589 * Return: true if work posted to worker thread, otherwise false.
14592 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
14594 bool workposted = false;
14595 struct fc_frame_header *fc_hdr;
14596 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
14597 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
14598 struct lpfc_nvmet_tgtport *tgtp;
14599 struct hbq_dmabuf *dma_buf;
14600 uint32_t status, rq_id;
14601 unsigned long iflags;
14603 /* sanity check on queue memory */
14604 if (unlikely(!hrq) || unlikely(!drq))
14607 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
14608 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
14610 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
14611 if (rq_id != hrq->queue_id)
14614 status = bf_get(lpfc_rcqe_status, rcqe);
14616 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
14617 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14618 "2537 Receive Frame Truncated!!\n");
14620 case FC_STATUS_RQ_SUCCESS:
14621 spin_lock_irqsave(&phba->hbalock, iflags);
14622 lpfc_sli4_rq_release(hrq, drq);
14623 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
14625 hrq->RQ_no_buf_found++;
14626 spin_unlock_irqrestore(&phba->hbalock, iflags);
14630 hrq->RQ_buf_posted--;
14631 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
14633 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
14635 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
14636 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
14637 spin_unlock_irqrestore(&phba->hbalock, iflags);
14638 /* Handle MDS Loopback frames */
14639 if (!(phba->pport->load_flag & FC_UNLOADING))
14640 lpfc_sli4_handle_mds_loopback(phba->pport,
14643 lpfc_in_buf_free(phba, &dma_buf->dbuf);
14647 /* save off the frame for the work thread to process */
14648 list_add_tail(&dma_buf->cq_event.list,
14649 &phba->sli4_hba.sp_queue_event);
14650 /* Frame received */
14651 phba->hba_flag |= HBA_SP_QUEUE_EVT;
14652 spin_unlock_irqrestore(&phba->hbalock, iflags);
14655 case FC_STATUS_INSUFF_BUF_FRM_DISC:
14656 if (phba->nvmet_support) {
14657 tgtp = phba->targetport->private;
14658 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14659 "6402 RQE Error x%x, posted %d err_cnt "
14661 status, hrq->RQ_buf_posted,
14662 hrq->RQ_no_posted_buf,
14663 atomic_read(&tgtp->rcv_fcp_cmd_in),
14664 atomic_read(&tgtp->rcv_fcp_cmd_out),
14665 atomic_read(&tgtp->xmt_fcp_release));
14669 case FC_STATUS_INSUFF_BUF_NEED_BUF:
14670 hrq->RQ_no_posted_buf++;
14671 /* Post more buffers if possible */
14672 spin_lock_irqsave(&phba->hbalock, iflags);
14673 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
14674 spin_unlock_irqrestore(&phba->hbalock, iflags);
14683 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
14684 * @phba: Pointer to HBA context object.
14685 * @cq: Pointer to the completion queue.
14686 * @cqe: Pointer to a completion queue entry.
14688 * This routine process a slow-path work-queue or receive queue completion queue
14691 * Return: true if work posted to worker thread, otherwise false.
14694 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14695 struct lpfc_cqe *cqe)
14697 struct lpfc_cqe cqevt;
14698 bool workposted = false;
14700 /* Copy the work queue CQE and convert endian order if needed */
14701 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
14703 /* Check and process for different type of WCQE and dispatch */
14704 switch (bf_get(lpfc_cqe_code, &cqevt)) {
14705 case CQE_CODE_COMPL_WQE:
14706 /* Process the WQ/RQ complete event */
14707 phba->last_completion_time = jiffies;
14708 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
14709 (struct lpfc_wcqe_complete *)&cqevt);
14711 case CQE_CODE_RELEASE_WQE:
14712 /* Process the WQ release event */
14713 lpfc_sli4_sp_handle_rel_wcqe(phba,
14714 (struct lpfc_wcqe_release *)&cqevt);
14716 case CQE_CODE_XRI_ABORTED:
14717 /* Process the WQ XRI abort event */
14718 phba->last_completion_time = jiffies;
14719 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14720 (struct sli4_wcqe_xri_aborted *)&cqevt);
14722 case CQE_CODE_RECEIVE:
14723 case CQE_CODE_RECEIVE_V1:
14724 /* Process the RQ event */
14725 phba->last_completion_time = jiffies;
14726 workposted = lpfc_sli4_sp_handle_rcqe(phba,
14727 (struct lpfc_rcqe *)&cqevt);
14730 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14731 "0388 Not a valid WCQE code: x%x\n",
14732 bf_get(lpfc_cqe_code, &cqevt));
14739 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
14740 * @phba: Pointer to HBA context object.
14741 * @eqe: Pointer to fast-path event queue entry.
14742 * @speq: Pointer to slow-path event queue.
14744 * This routine process a event queue entry from the slow-path event queue.
14745 * It will check the MajorCode and MinorCode to determine this is for a
14746 * completion event on a completion queue, if not, an error shall be logged
14747 * and just return. Otherwise, it will get to the corresponding completion
14748 * queue and process all the entries on that completion queue, rearm the
14749 * completion queue, and then return.
14753 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
14754 struct lpfc_queue *speq)
14756 struct lpfc_queue *cq = NULL, *childq;
14760 /* Get the reference to the corresponding CQ */
14761 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14763 list_for_each_entry(childq, &speq->child_list, list) {
14764 if (childq->queue_id == cqid) {
14769 if (unlikely(!cq)) {
14770 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
14771 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14772 "0365 Slow-path CQ identifier "
14773 "(%d) does not exist\n", cqid);
14777 /* Save EQ associated with this CQ */
14778 cq->assoc_qp = speq;
14780 if (is_kdump_kernel())
14781 ret = queue_work(phba->wq, &cq->spwork);
14783 ret = queue_work_on(cq->chann, phba->wq, &cq->spwork);
14786 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14787 "0390 Cannot schedule queue work "
14788 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14789 cqid, cq->queue_id, raw_smp_processor_id());
14793 * __lpfc_sli4_process_cq - Process elements of a CQ
14794 * @phba: Pointer to HBA context object.
14795 * @cq: Pointer to CQ to be processed
14796 * @handler: Routine to process each cqe
14797 * @delay: Pointer to usdelay to set in case of rescheduling of the handler
14798 * @poll_mode: Polling mode we were called from
14800 * This routine processes completion queue entries in a CQ. While a valid
14801 * queue element is found, the handler is called. During processing checks
14802 * are made for periodic doorbell writes to let the hardware know of
14803 * element consumption.
14805 * If the max limit on cqes to process is hit, or there are no more valid
14806 * entries, the loop stops. If we processed a sufficient number of elements,
14807 * meaning there is sufficient load, rather than rearming and generating
14808 * another interrupt, a cq rescheduling delay will be set. A delay of 0
14809 * indicates no rescheduling.
14811 * Returns True if work scheduled, False otherwise.
14814 __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
14815 bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
14816 struct lpfc_cqe *), unsigned long *delay,
14817 enum lpfc_poll_mode poll_mode)
14819 struct lpfc_cqe *cqe;
14820 bool workposted = false;
14821 int count = 0, consumed = 0;
14824 /* default - no reschedule */
14827 if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
14828 goto rearm_and_exit;
14830 /* Process all the entries to the CQ */
14832 cqe = lpfc_sli4_cq_get(cq);
14834 workposted |= handler(phba, cq, cqe);
14835 __lpfc_sli4_consume_cqe(phba, cq, cqe);
14838 if (!(++count % cq->max_proc_limit))
14841 if (!(count % cq->notify_interval)) {
14842 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14845 cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
14848 if (count == LPFC_NVMET_CQ_NOTIFY)
14849 cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
14851 cqe = lpfc_sli4_cq_get(cq);
14853 if (count >= phba->cfg_cq_poll_threshold) {
14858 /* Note: complete the irq_poll softirq before rearming CQ */
14859 if (poll_mode == LPFC_IRQ_POLL)
14860 irq_poll_complete(&cq->iop);
14862 /* Track the max number of CQEs processed in 1 EQ */
14863 if (count > cq->CQ_max_cqe)
14864 cq->CQ_max_cqe = count;
14866 cq->assoc_qp->EQ_cqe_cnt += count;
14868 /* Catch the no cq entry condition */
14869 if (unlikely(count == 0))
14870 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14871 "0369 No entry from completion queue "
14872 "qid=%d\n", cq->queue_id);
14874 xchg(&cq->queue_claimed, 0);
14877 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14878 arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
14884 * __lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
14885 * @cq: pointer to CQ to process
14887 * This routine calls the cq processing routine with a handler specific
14888 * to the type of queue bound to it.
14890 * The CQ routine returns two values: the first is the calling status,
14891 * which indicates whether work was queued to the background discovery
14892 * thread. If true, the routine should wakeup the discovery thread;
14893 * the second is the delay parameter. If non-zero, rather than rearming
14894 * the CQ and yet another interrupt, the CQ handler should be queued so
14895 * that it is processed in a subsequent polling action. The value of
14896 * the delay indicates when to reschedule it.
14899 __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
14901 struct lpfc_hba *phba = cq->phba;
14902 unsigned long delay;
14903 bool workposted = false;
14906 /* Process and rearm the CQ */
14907 switch (cq->type) {
14909 workposted |= __lpfc_sli4_process_cq(phba, cq,
14910 lpfc_sli4_sp_handle_mcqe,
14911 &delay, LPFC_QUEUE_WORK);
14914 if (cq->subtype == LPFC_IO)
14915 workposted |= __lpfc_sli4_process_cq(phba, cq,
14916 lpfc_sli4_fp_handle_cqe,
14917 &delay, LPFC_QUEUE_WORK);
14919 workposted |= __lpfc_sli4_process_cq(phba, cq,
14920 lpfc_sli4_sp_handle_cqe,
14921 &delay, LPFC_QUEUE_WORK);
14924 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14925 "0370 Invalid completion queue type (%d)\n",
14931 if (is_kdump_kernel())
14932 ret = queue_delayed_work(phba->wq, &cq->sched_spwork,
14935 ret = queue_delayed_work_on(cq->chann, phba->wq,
14936 &cq->sched_spwork, delay);
14938 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14939 "0394 Cannot schedule queue work "
14940 "for cqid=%d on CPU %d\n",
14941 cq->queue_id, cq->chann);
14944 /* wake up worker thread if there are works to be done */
14946 lpfc_worker_wake_up(phba);
14950 * lpfc_sli4_sp_process_cq - slow-path work handler when started by
14952 * @work: pointer to work element
14954 * translates from the work handler and calls the slow-path handler.
14957 lpfc_sli4_sp_process_cq(struct work_struct *work)
14959 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
14961 __lpfc_sli4_sp_process_cq(cq);
14965 * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
14966 * @work: pointer to work element
14968 * translates from the work handler and calls the slow-path handler.
14971 lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
14973 struct lpfc_queue *cq = container_of(to_delayed_work(work),
14974 struct lpfc_queue, sched_spwork);
14976 __lpfc_sli4_sp_process_cq(cq);
14980 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
14981 * @phba: Pointer to HBA context object.
14982 * @cq: Pointer to associated CQ
14983 * @wcqe: Pointer to work-queue completion queue entry.
14985 * This routine process a fast-path work queue completion entry from fast-path
14986 * event queue for FCP command response completion.
14989 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14990 struct lpfc_wcqe_complete *wcqe)
14992 struct lpfc_sli_ring *pring = cq->pring;
14993 struct lpfc_iocbq *cmdiocbq;
14994 unsigned long iflags;
14996 /* Check for response status */
14997 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
14998 /* If resource errors reported from HBA, reduce queue
14999 * depth of the SCSI device.
15001 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
15002 IOSTAT_LOCAL_REJECT)) &&
15003 ((wcqe->parameter & IOERR_PARAM_MASK) ==
15004 IOERR_NO_RESOURCES))
15005 phba->lpfc_rampdown_queue_depth(phba);
15007 /* Log the cmpl status */
15008 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
15009 "0373 FCP CQE cmpl: status=x%x: "
15010 "CQE: %08x %08x %08x %08x\n",
15011 bf_get(lpfc_wcqe_c_status, wcqe),
15012 wcqe->word0, wcqe->total_data_placed,
15013 wcqe->parameter, wcqe->word3);
15016 /* Look up the FCP command IOCB and create pseudo response IOCB */
15017 spin_lock_irqsave(&pring->ring_lock, iflags);
15018 pring->stats.iocb_event++;
15019 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
15020 bf_get(lpfc_wcqe_c_request_tag, wcqe));
15021 spin_unlock_irqrestore(&pring->ring_lock, iflags);
15022 if (unlikely(!cmdiocbq)) {
15023 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15024 "0374 FCP complete with no corresponding "
15025 "cmdiocb: iotag (%d)\n",
15026 bf_get(lpfc_wcqe_c_request_tag, wcqe));
15029 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
15030 cmdiocbq->isr_timestamp = cq->isr_timestamp;
15032 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
15033 spin_lock_irqsave(&phba->hbalock, iflags);
15034 cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
15035 spin_unlock_irqrestore(&phba->hbalock, iflags);
15038 if (cmdiocbq->cmd_cmpl) {
15039 /* For FCP the flag is cleared in cmd_cmpl */
15040 if (!(cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
15041 cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED) {
15042 spin_lock_irqsave(&phba->hbalock, iflags);
15043 cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
15044 spin_unlock_irqrestore(&phba->hbalock, iflags);
15047 /* Pass the cmd_iocb and the wcqe to the upper layer */
15048 memcpy(&cmdiocbq->wcqe_cmpl, wcqe,
15049 sizeof(struct lpfc_wcqe_complete));
15050 cmdiocbq->cmd_cmpl(phba, cmdiocbq, cmdiocbq);
15052 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15053 "0375 FCP cmdiocb not callback function "
15055 bf_get(lpfc_wcqe_c_request_tag, wcqe));
15060 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
15061 * @phba: Pointer to HBA context object.
15062 * @cq: Pointer to completion queue.
15063 * @wcqe: Pointer to work-queue completion queue entry.
15065 * This routine handles an fast-path WQ entry consumed event by invoking the
15066 * proper WQ release routine to the slow-path WQ.
15069 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15070 struct lpfc_wcqe_release *wcqe)
15072 struct lpfc_queue *childwq;
15073 bool wqid_matched = false;
15076 /* Check for fast-path FCP work queue release */
15077 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
15078 list_for_each_entry(childwq, &cq->child_list, list) {
15079 if (childwq->queue_id == hba_wqid) {
15080 lpfc_sli4_wq_release(childwq,
15081 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
15082 if (childwq->q_flag & HBA_NVMET_WQFULL)
15083 lpfc_nvmet_wqfull_process(phba, childwq);
15084 wqid_matched = true;
15088 /* Report warning log message if no match found */
15089 if (wqid_matched != true)
15090 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15091 "2580 Fast-path wqe consume event carries "
15092 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
15096 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
15097 * @phba: Pointer to HBA context object.
15098 * @cq: Pointer to completion queue.
15099 * @rcqe: Pointer to receive-queue completion queue entry.
15101 * This routine process a receive-queue completion queue entry.
15103 * Return: true if work posted to worker thread, otherwise false.
15106 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15107 struct lpfc_rcqe *rcqe)
15109 bool workposted = false;
15110 struct lpfc_queue *hrq;
15111 struct lpfc_queue *drq;
15112 struct rqb_dmabuf *dma_buf;
15113 struct fc_frame_header *fc_hdr;
15114 struct lpfc_nvmet_tgtport *tgtp;
15115 uint32_t status, rq_id;
15116 unsigned long iflags;
15117 uint32_t fctl, idx;
15119 if ((phba->nvmet_support == 0) ||
15120 (phba->sli4_hba.nvmet_cqset == NULL))
15123 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
15124 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
15125 drq = phba->sli4_hba.nvmet_mrq_data[idx];
15127 /* sanity check on queue memory */
15128 if (unlikely(!hrq) || unlikely(!drq))
15131 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
15132 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
15134 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
15136 if ((phba->nvmet_support == 0) ||
15137 (rq_id != hrq->queue_id))
15140 status = bf_get(lpfc_rcqe_status, rcqe);
15142 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
15143 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15144 "6126 Receive Frame Truncated!!\n");
15146 case FC_STATUS_RQ_SUCCESS:
15147 spin_lock_irqsave(&phba->hbalock, iflags);
15148 lpfc_sli4_rq_release(hrq, drq);
15149 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
15151 hrq->RQ_no_buf_found++;
15152 spin_unlock_irqrestore(&phba->hbalock, iflags);
15155 spin_unlock_irqrestore(&phba->hbalock, iflags);
15157 hrq->RQ_buf_posted--;
15158 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
15160 /* Just some basic sanity checks on FCP Command frame */
15161 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
15162 fc_hdr->fh_f_ctl[1] << 8 |
15163 fc_hdr->fh_f_ctl[2]);
15165 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
15166 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
15167 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
15170 if (fc_hdr->fh_type == FC_TYPE_FCP) {
15171 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
15172 lpfc_nvmet_unsol_fcp_event(
15173 phba, idx, dma_buf, cq->isr_timestamp,
15174 cq->q_flag & HBA_NVMET_CQ_NOTIFY);
15178 lpfc_rq_buf_free(phba, &dma_buf->hbuf);
15180 case FC_STATUS_INSUFF_BUF_FRM_DISC:
15181 if (phba->nvmet_support) {
15182 tgtp = phba->targetport->private;
15183 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15184 "6401 RQE Error x%x, posted %d err_cnt "
15186 status, hrq->RQ_buf_posted,
15187 hrq->RQ_no_posted_buf,
15188 atomic_read(&tgtp->rcv_fcp_cmd_in),
15189 atomic_read(&tgtp->rcv_fcp_cmd_out),
15190 atomic_read(&tgtp->xmt_fcp_release));
15194 case FC_STATUS_INSUFF_BUF_NEED_BUF:
15195 hrq->RQ_no_posted_buf++;
15196 /* Post more buffers if possible */
15204 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
15205 * @phba: adapter with cq
15206 * @cq: Pointer to the completion queue.
15207 * @cqe: Pointer to fast-path completion queue entry.
15209 * This routine process a fast-path work queue completion entry from fast-path
15210 * event queue for FCP command response completion.
15212 * Return: true if work posted to worker thread, otherwise false.
15215 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15216 struct lpfc_cqe *cqe)
15218 struct lpfc_wcqe_release wcqe;
15219 bool workposted = false;
15221 /* Copy the work queue CQE and convert endian order if needed */
15222 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
15224 /* Check and process for different type of WCQE and dispatch */
15225 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
15226 case CQE_CODE_COMPL_WQE:
15227 case CQE_CODE_NVME_ERSP:
15229 /* Process the WQ complete event */
15230 phba->last_completion_time = jiffies;
15231 if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
15232 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
15233 (struct lpfc_wcqe_complete *)&wcqe);
15235 case CQE_CODE_RELEASE_WQE:
15236 cq->CQ_release_wqe++;
15237 /* Process the WQ release event */
15238 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
15239 (struct lpfc_wcqe_release *)&wcqe);
15241 case CQE_CODE_XRI_ABORTED:
15242 cq->CQ_xri_aborted++;
15243 /* Process the WQ XRI abort event */
15244 phba->last_completion_time = jiffies;
15245 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
15246 (struct sli4_wcqe_xri_aborted *)&wcqe);
15248 case CQE_CODE_RECEIVE_V1:
15249 case CQE_CODE_RECEIVE:
15250 phba->last_completion_time = jiffies;
15251 if (cq->subtype == LPFC_NVMET) {
15252 workposted = lpfc_sli4_nvmet_handle_rcqe(
15253 phba, cq, (struct lpfc_rcqe *)&wcqe);
15257 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15258 "0144 Not a valid CQE code: x%x\n",
15259 bf_get(lpfc_wcqe_c_code, &wcqe));
15266 * lpfc_sli4_sched_cq_work - Schedules cq work
15267 * @phba: Pointer to HBA context object.
15268 * @cq: Pointer to CQ
15271 * This routine checks the poll mode of the CQ corresponding to
15272 * cq->chann, then either schedules a softirq or queue_work to complete
15275 * queue_work path is taken if in NVMET mode, or if poll_mode is in
15276 * LPFC_QUEUE_WORK mode. Otherwise, softirq path is taken.
15279 static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
15280 struct lpfc_queue *cq, uint16_t cqid)
15284 switch (cq->poll_mode) {
15285 case LPFC_IRQ_POLL:
15286 /* CGN mgmt is mutually exclusive from softirq processing */
15287 if (phba->cmf_active_mode == LPFC_CFG_OFF) {
15288 irq_poll_sched(&cq->iop);
15292 case LPFC_QUEUE_WORK:
15294 if (is_kdump_kernel())
15295 ret = queue_work(phba->wq, &cq->irqwork);
15297 ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
15299 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15300 "0383 Cannot schedule queue work "
15301 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
15302 cqid, cq->queue_id,
15303 raw_smp_processor_id());
15308 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
15309 * @phba: Pointer to HBA context object.
15310 * @eq: Pointer to the queue structure.
15311 * @eqe: Pointer to fast-path event queue entry.
15313 * This routine process a event queue entry from the fast-path event queue.
15314 * It will check the MajorCode and MinorCode to determine this is for a
15315 * completion event on a completion queue, if not, an error shall be logged
15316 * and just return. Otherwise, it will get to the corresponding completion
15317 * queue and process all the entries on the completion queue, rearm the
15318 * completion queue, and then return.
15321 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
15322 struct lpfc_eqe *eqe)
15324 struct lpfc_queue *cq = NULL;
15325 uint32_t qidx = eq->hdwq;
15328 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
15329 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15330 "0366 Not a valid completion "
15331 "event: majorcode=x%x, minorcode=x%x\n",
15332 bf_get_le32(lpfc_eqe_major_code, eqe),
15333 bf_get_le32(lpfc_eqe_minor_code, eqe));
15337 /* Get the reference to the corresponding CQ */
15338 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
15340 /* Use the fast lookup method first */
15341 if (cqid <= phba->sli4_hba.cq_max) {
15342 cq = phba->sli4_hba.cq_lookup[cqid];
15347 /* Next check for NVMET completion */
15348 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
15349 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
15350 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
15351 /* Process NVMET unsol rcv */
15352 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
15357 if (phba->sli4_hba.nvmels_cq &&
15358 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
15359 /* Process NVME unsol rcv */
15360 cq = phba->sli4_hba.nvmels_cq;
15363 /* Otherwise this is a Slow path event */
15365 lpfc_sli4_sp_handle_eqe(phba, eqe,
15366 phba->sli4_hba.hdwq[qidx].hba_eq);
15371 if (unlikely(cqid != cq->queue_id)) {
15372 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15373 "0368 Miss-matched fast-path completion "
15374 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
15375 cqid, cq->queue_id);
15380 #if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
15381 if (phba->ktime_on)
15382 cq->isr_timestamp = ktime_get_ns();
15384 cq->isr_timestamp = 0;
15386 lpfc_sli4_sched_cq_work(phba, cq, cqid);
15390 * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
15391 * @cq: Pointer to CQ to be processed
15392 * @poll_mode: Enum lpfc_poll_state to determine poll mode
15394 * This routine calls the cq processing routine with the handler for
15397 * The CQ routine returns two values: the first is the calling status,
15398 * which indicates whether work was queued to the background discovery
15399 * thread. If true, the routine should wakeup the discovery thread;
15400 * the second is the delay parameter. If non-zero, rather than rearming
15401 * the CQ and yet another interrupt, the CQ handler should be queued so
15402 * that it is processed in a subsequent polling action. The value of
15403 * the delay indicates when to reschedule it.
15406 __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq,
15407 enum lpfc_poll_mode poll_mode)
15409 struct lpfc_hba *phba = cq->phba;
15410 unsigned long delay;
15411 bool workposted = false;
15414 /* process and rearm the CQ */
15415 workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
15416 &delay, poll_mode);
15419 if (is_kdump_kernel())
15420 ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
15423 ret = queue_delayed_work_on(cq->chann, phba->wq,
15424 &cq->sched_irqwork, delay);
15426 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15427 "0367 Cannot schedule queue work "
15428 "for cqid=%d on CPU %d\n",
15429 cq->queue_id, cq->chann);
15432 /* wake up worker thread if there are works to be done */
15434 lpfc_worker_wake_up(phba);
15438 * lpfc_sli4_hba_process_cq - fast-path work handler when started by
15440 * @work: pointer to work element
15442 * translates from the work handler and calls the fast-path handler.
15445 lpfc_sli4_hba_process_cq(struct work_struct *work)
15447 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
15449 __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
15453 * lpfc_sli4_dly_hba_process_cq - fast-path work handler when started by timer
15454 * @work: pointer to work element
15456 * translates from the work handler and calls the fast-path handler.
15459 lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
15461 struct lpfc_queue *cq = container_of(to_delayed_work(work),
15462 struct lpfc_queue, sched_irqwork);
15464 __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
15468 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
15469 * @irq: Interrupt number.
15470 * @dev_id: The device context pointer.
15472 * This function is directly called from the PCI layer as an interrupt
15473 * service routine when device with SLI-4 interface spec is enabled with
15474 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
15475 * ring event in the HBA. However, when the device is enabled with either
15476 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
15477 * device-level interrupt handler. When the PCI slot is in error recovery
15478 * or the HBA is undergoing initialization, the interrupt handler will not
15479 * process the interrupt. The SCSI FCP fast-path ring event are handled in
15480 * the intrrupt context. This function is called without any lock held.
15481 * It gets the hbalock to access and update SLI data structures. Note that,
15482 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
15483 * equal to that of FCP CQ index.
15485 * The link attention and ELS ring attention events are handled
15486 * by the worker thread. The interrupt handler signals the worker thread
15487 * and returns for these events. This function is called without any lock
15488 * held. It gets the hbalock to access and update SLI data structures.
15490 * This function returns IRQ_HANDLED when interrupt is handled else it
15491 * returns IRQ_NONE.
15494 lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
15496 struct lpfc_hba *phba;
15497 struct lpfc_hba_eq_hdl *hba_eq_hdl;
15498 struct lpfc_queue *fpeq;
15499 unsigned long iflag;
15502 struct lpfc_eq_intr_info *eqi;
15504 /* Get the driver's phba structure from the dev_id */
15505 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
15506 phba = hba_eq_hdl->phba;
15507 hba_eqidx = hba_eq_hdl->idx;
15509 if (unlikely(!phba))
15511 if (unlikely(!phba->sli4_hba.hdwq))
15514 /* Get to the EQ struct associated with this vector */
15515 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
15516 if (unlikely(!fpeq))
15519 /* Check device state for handling interrupt */
15520 if (unlikely(lpfc_intr_state_check(phba))) {
15521 /* Check again for link_state with lock held */
15522 spin_lock_irqsave(&phba->hbalock, iflag);
15523 if (phba->link_state < LPFC_LINK_DOWN)
15524 /* Flush, clear interrupt, and rearm the EQ */
15525 lpfc_sli4_eqcq_flush(phba, fpeq);
15526 spin_unlock_irqrestore(&phba->hbalock, iflag);
15530 eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
15533 fpeq->last_cpu = raw_smp_processor_id();
15535 if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
15536 fpeq->q_flag & HBA_EQ_DELAY_CHK &&
15537 phba->cfg_auto_imax &&
15538 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
15539 phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
15540 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
15542 /* process and rearm the EQ */
15543 ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
15545 if (unlikely(ecount == 0)) {
15546 fpeq->EQ_no_entry++;
15547 if (phba->intr_type == MSIX)
15548 /* MSI-X treated interrupt served as no EQ share INT */
15549 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15550 "0358 MSI-X interrupt with no EQE\n");
15552 /* Non MSI-X treated on interrupt as EQ share INT */
15556 return IRQ_HANDLED;
15557 } /* lpfc_sli4_hba_intr_handler */
15560 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
15561 * @irq: Interrupt number.
15562 * @dev_id: The device context pointer.
15564 * This function is the device-level interrupt handler to device with SLI-4
15565 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
15566 * interrupt mode is enabled and there is an event in the HBA which requires
15567 * driver attention. This function invokes the slow-path interrupt attention
15568 * handling function and fast-path interrupt attention handling function in
15569 * turn to process the relevant HBA attention events. This function is called
15570 * without any lock held. It gets the hbalock to access and update SLI data
15573 * This function returns IRQ_HANDLED when interrupt is handled, else it
15574 * returns IRQ_NONE.
15577 lpfc_sli4_intr_handler(int irq, void *dev_id)
15579 struct lpfc_hba *phba;
15580 irqreturn_t hba_irq_rc;
15581 bool hba_handled = false;
15584 /* Get the driver's phba structure from the dev_id */
15585 phba = (struct lpfc_hba *)dev_id;
15587 if (unlikely(!phba))
15591 * Invoke fast-path host attention interrupt handling as appropriate.
15593 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
15594 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
15595 &phba->sli4_hba.hba_eq_hdl[qidx]);
15596 if (hba_irq_rc == IRQ_HANDLED)
15597 hba_handled |= true;
15600 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
15601 } /* lpfc_sli4_intr_handler */
15603 void lpfc_sli4_poll_hbtimer(struct timer_list *t)
15605 struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
15606 struct lpfc_queue *eq;
15611 list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
15612 i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
15613 if (!list_empty(&phba->poll_list))
15614 mod_timer(&phba->cpuhp_poll_timer,
15615 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
15620 inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
15622 struct lpfc_hba *phba = eq->phba;
15626 * Unlocking an irq is one of the entry point to check
15627 * for re-schedule, but we are good for io submission
15628 * path as midlayer does a get_cpu to glue us in. Flush
15629 * out the invalidate queue so we can see the updated
15634 if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
15635 /* We will not likely get the completion for the caller
15636 * during this iteration but i guess that's fine.
15637 * Future io's coming on this eq should be able to
15638 * pick it up. As for the case of single io's, they
15639 * will be handled through a sched from polling timer
15640 * function which is currently triggered every 1msec.
15642 i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
15647 static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
15649 struct lpfc_hba *phba = eq->phba;
15651 /* kickstart slowpath processing if needed */
15652 if (list_empty(&phba->poll_list))
15653 mod_timer(&phba->cpuhp_poll_timer,
15654 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
15656 list_add_rcu(&eq->_poll_list, &phba->poll_list);
15660 static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
15662 struct lpfc_hba *phba = eq->phba;
15664 /* Disable slowpath processing for this eq. Kick start the eq
15665 * by RE-ARMING the eq's ASAP
15667 list_del_rcu(&eq->_poll_list);
15670 if (list_empty(&phba->poll_list))
15671 del_timer_sync(&phba->cpuhp_poll_timer);
15674 void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
15676 struct lpfc_queue *eq, *next;
15678 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
15679 list_del(&eq->_poll_list);
15681 INIT_LIST_HEAD(&phba->poll_list);
15686 __lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
15688 if (mode == eq->mode)
15691 * currently this function is only called during a hotplug
15692 * event and the cpu on which this function is executing
15693 * is going offline. By now the hotplug has instructed
15694 * the scheduler to remove this cpu from cpu active mask.
15695 * So we don't need to work about being put aside by the
15696 * scheduler for a high priority process. Yes, the inte-
15697 * rrupts could come but they are known to retire ASAP.
15700 /* Disable polling in the fastpath */
15701 WRITE_ONCE(eq->mode, mode);
15702 /* flush out the store buffer */
15706 * Add this eq to the polling list and start polling. For
15707 * a grace period both interrupt handler and poller will
15708 * try to process the eq _but_ that's fine. We have a
15709 * synchronization mechanism in place (queue_claimed) to
15710 * deal with it. This is just a draining phase for int-
15711 * errupt handler (not eq's) as we have guranteed through
15712 * barrier that all the CPUs have seen the new CQ_POLLED
15713 * state. which will effectively disable the REARMING of
15714 * the EQ. The whole idea is eq's die off eventually as
15715 * we are not rearming EQ's anymore.
15717 mode ? lpfc_sli4_add_to_poll_list(eq) :
15718 lpfc_sli4_remove_from_poll_list(eq);
15721 void lpfc_sli4_start_polling(struct lpfc_queue *eq)
15723 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
15726 void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
15728 struct lpfc_hba *phba = eq->phba;
15730 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
15732 /* Kick start for the pending io's in h/w.
15733 * Once we switch back to interrupt processing on a eq
15734 * the io path completion will only arm eq's when it
15735 * receives a completion. But since eq's are in disa-
15736 * rmed state it doesn't receive a completion. This
15737 * creates a deadlock scenaro.
15739 phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
15743 * lpfc_sli4_queue_free - free a queue structure and associated memory
15744 * @queue: The queue structure to free.
15746 * This function frees a queue structure and the DMAable memory used for
15747 * the host resident queue. This function must be called after destroying the
15748 * queue on the HBA.
15751 lpfc_sli4_queue_free(struct lpfc_queue *queue)
15753 struct lpfc_dmabuf *dmabuf;
15758 if (!list_empty(&queue->wq_list))
15759 list_del(&queue->wq_list);
15761 while (!list_empty(&queue->page_list)) {
15762 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
15764 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
15765 dmabuf->virt, dmabuf->phys);
15769 lpfc_free_rq_buffer(queue->phba, queue);
15770 kfree(queue->rqbp);
15773 if (!list_empty(&queue->cpu_list))
15774 list_del(&queue->cpu_list);
15781 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
15782 * @phba: The HBA that this queue is being created on.
15783 * @page_size: The size of a queue page
15784 * @entry_size: The size of each queue entry for this queue.
15785 * @entry_count: The number of entries that this queue will handle.
15786 * @cpu: The cpu that will primarily utilize this queue.
15788 * This function allocates a queue structure and the DMAable memory used for
15789 * the host resident queue. This function must be called before creating the
15790 * queue on the HBA.
15792 struct lpfc_queue *
15793 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
15794 uint32_t entry_size, uint32_t entry_count, int cpu)
15796 struct lpfc_queue *queue;
15797 struct lpfc_dmabuf *dmabuf;
15798 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15801 if (!phba->sli4_hba.pc_sli4_params.supported)
15802 hw_page_size = page_size;
15804 pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
15806 /* If needed, Adjust page count to match the max the adapter supports */
15807 if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
15808 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
15810 queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
15811 GFP_KERNEL, cpu_to_node(cpu));
15815 INIT_LIST_HEAD(&queue->list);
15816 INIT_LIST_HEAD(&queue->_poll_list);
15817 INIT_LIST_HEAD(&queue->wq_list);
15818 INIT_LIST_HEAD(&queue->wqfull_list);
15819 INIT_LIST_HEAD(&queue->page_list);
15820 INIT_LIST_HEAD(&queue->child_list);
15821 INIT_LIST_HEAD(&queue->cpu_list);
15823 /* Set queue parameters now. If the system cannot provide memory
15824 * resources, the free routine needs to know what was allocated.
15826 queue->page_count = pgcnt;
15827 queue->q_pgs = (void **)&queue[1];
15828 queue->entry_cnt_per_pg = hw_page_size / entry_size;
15829 queue->entry_size = entry_size;
15830 queue->entry_count = entry_count;
15831 queue->page_size = hw_page_size;
15832 queue->phba = phba;
15834 for (x = 0; x < queue->page_count; x++) {
15835 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
15836 dev_to_node(&phba->pcidev->dev));
15839 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
15840 hw_page_size, &dmabuf->phys,
15842 if (!dmabuf->virt) {
15846 dmabuf->buffer_tag = x;
15847 list_add_tail(&dmabuf->list, &queue->page_list);
15848 /* use lpfc_sli4_qe to index a paritcular entry in this page */
15849 queue->q_pgs[x] = dmabuf->virt;
15851 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
15852 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
15853 INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
15854 INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
15856 /* notify_interval will be set during q creation */
15860 lpfc_sli4_queue_free(queue);
15865 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
15866 * @phba: HBA structure that indicates port to create a queue on.
15867 * @pci_barset: PCI BAR set flag.
15869 * This function shall perform iomap of the specified PCI BAR address to host
15870 * memory address if not already done so and return it. The returned host
15871 * memory address can be NULL.
15873 static void __iomem *
15874 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
15879 switch (pci_barset) {
15880 case WQ_PCI_BAR_0_AND_1:
15881 return phba->pci_bar0_memmap_p;
15882 case WQ_PCI_BAR_2_AND_3:
15883 return phba->pci_bar2_memmap_p;
15884 case WQ_PCI_BAR_4_AND_5:
15885 return phba->pci_bar4_memmap_p;
15893 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
15894 * @phba: HBA structure that EQs are on.
15895 * @startq: The starting EQ index to modify
15896 * @numq: The number of EQs (consecutive indexes) to modify
15897 * @usdelay: amount of delay
15899 * This function revises the EQ delay on 1 or more EQs. The EQ delay
15900 * is set either by writing to a register (if supported by the SLI Port)
15901 * or by mailbox command. The mailbox command allows several EQs to be
15904 * The @phba struct is used to send a mailbox command to HBA. The @startq
15905 * is used to get the starting EQ index to change. The @numq value is
15906 * used to specify how many consecutive EQ indexes, starting at EQ index,
15907 * are to be changed. This function is asynchronous and will wait for any
15908 * mailbox commands to finish before returning.
15910 * On success this function will return a zero. If unable to allocate
15911 * enough memory this function will return -ENOMEM. If a mailbox command
15912 * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
15913 * have had their delay multipler changed.
15916 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
15917 uint32_t numq, uint32_t usdelay)
15919 struct lpfc_mbx_modify_eq_delay *eq_delay;
15920 LPFC_MBOXQ_t *mbox;
15921 struct lpfc_queue *eq;
15922 int cnt = 0, rc, length;
15923 uint32_t shdr_status, shdr_add_status;
15926 union lpfc_sli4_cfg_shdr *shdr;
15928 if (startq >= phba->cfg_irq_chann)
15931 if (usdelay > 0xFFFF) {
15932 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
15933 "6429 usdelay %d too large. Scaled down to "
15934 "0xFFFF.\n", usdelay);
15938 /* set values by EQ_DELAY register if supported */
15939 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
15940 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
15941 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
15945 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
15953 /* Otherwise, set values by mailbox cmd */
15955 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15957 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15958 "6428 Failed allocating mailbox cmd buffer."
15959 " EQ delay was not set.\n");
15962 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
15963 sizeof(struct lpfc_sli4_cfg_mhdr));
15964 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15965 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
15966 length, LPFC_SLI4_MBX_EMBED);
15967 eq_delay = &mbox->u.mqe.un.eq_delay;
15969 /* Calculate delay multiper from maximum interrupt per second */
15970 dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
15973 if (dmult > LPFC_DMULT_MAX)
15974 dmult = LPFC_DMULT_MAX;
15976 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
15977 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
15980 eq->q_mode = usdelay;
15981 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
15982 eq_delay->u.request.eq[cnt].phase = 0;
15983 eq_delay->u.request.eq[cnt].delay_multi = dmult;
15988 eq_delay->u.request.num_eq = cnt;
15990 mbox->vport = phba->pport;
15991 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15992 mbox->ctx_buf = NULL;
15993 mbox->ctx_ndlp = NULL;
15994 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15995 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
15996 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15997 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15998 if (shdr_status || shdr_add_status || rc) {
15999 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16000 "2512 MODIFY_EQ_DELAY mailbox failed with "
16001 "status x%x add_status x%x, mbx status x%x\n",
16002 shdr_status, shdr_add_status, rc);
16004 mempool_free(mbox, phba->mbox_mem_pool);
16009 * lpfc_eq_create - Create an Event Queue on the HBA
16010 * @phba: HBA structure that indicates port to create a queue on.
16011 * @eq: The queue structure to use to create the event queue.
16012 * @imax: The maximum interrupt per second limit.
16014 * This function creates an event queue, as detailed in @eq, on a port,
16015 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
16017 * The @phba struct is used to send mailbox command to HBA. The @eq struct
16018 * is used to get the entry count and entry size that are necessary to
16019 * determine the number of pages to allocate and use for this queue. This
16020 * function will send the EQ_CREATE mailbox command to the HBA to setup the
16021 * event queue. This function is asynchronous and will wait for the mailbox
16022 * command to finish before continuing.
16024 * On success this function will return a zero. If unable to allocate enough
16025 * memory this function will return -ENOMEM. If the queue create mailbox command
16026 * fails this function will return -ENXIO.
16029 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
16031 struct lpfc_mbx_eq_create *eq_create;
16032 LPFC_MBOXQ_t *mbox;
16033 int rc, length, status = 0;
16034 struct lpfc_dmabuf *dmabuf;
16035 uint32_t shdr_status, shdr_add_status;
16036 union lpfc_sli4_cfg_shdr *shdr;
16038 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16040 /* sanity check on queue memory */
16043 if (!phba->sli4_hba.pc_sli4_params.supported)
16044 hw_page_size = SLI4_PAGE_SIZE;
16046 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16049 length = (sizeof(struct lpfc_mbx_eq_create) -
16050 sizeof(struct lpfc_sli4_cfg_mhdr));
16051 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16052 LPFC_MBOX_OPCODE_EQ_CREATE,
16053 length, LPFC_SLI4_MBX_EMBED);
16054 eq_create = &mbox->u.mqe.un.eq_create;
16055 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
16056 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
16058 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
16060 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
16062 /* Use version 2 of CREATE_EQ if eqav is set */
16063 if (phba->sli4_hba.pc_sli4_params.eqav) {
16064 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16065 LPFC_Q_CREATE_VERSION_2);
16066 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
16067 phba->sli4_hba.pc_sli4_params.eqav);
16070 /* don't setup delay multiplier using EQ_CREATE */
16072 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
16074 switch (eq->entry_count) {
16076 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16077 "0360 Unsupported EQ count. (%d)\n",
16079 if (eq->entry_count < 256) {
16083 fallthrough; /* otherwise default to smallest count */
16085 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16089 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16093 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16097 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16101 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16105 list_for_each_entry(dmabuf, &eq->page_list, list) {
16106 memset(dmabuf->virt, 0, hw_page_size);
16107 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16108 putPaddrLow(dmabuf->phys);
16109 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16110 putPaddrHigh(dmabuf->phys);
16112 mbox->vport = phba->pport;
16113 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16114 mbox->ctx_buf = NULL;
16115 mbox->ctx_ndlp = NULL;
16116 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16117 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16118 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16119 if (shdr_status || shdr_add_status || rc) {
16120 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16121 "2500 EQ_CREATE mailbox failed with "
16122 "status x%x add_status x%x, mbx status x%x\n",
16123 shdr_status, shdr_add_status, rc);
16126 eq->type = LPFC_EQ;
16127 eq->subtype = LPFC_NONE;
16128 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
16129 if (eq->queue_id == 0xFFFF)
16131 eq->host_index = 0;
16132 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
16133 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
16135 mempool_free(mbox, phba->mbox_mem_pool);
16139 static int lpfc_cq_poll_hdler(struct irq_poll *iop, int budget)
16141 struct lpfc_queue *cq = container_of(iop, struct lpfc_queue, iop);
16143 __lpfc_sli4_hba_process_cq(cq, LPFC_IRQ_POLL);
16149 * lpfc_cq_create - Create a Completion Queue on the HBA
16150 * @phba: HBA structure that indicates port to create a queue on.
16151 * @cq: The queue structure to use to create the completion queue.
16152 * @eq: The event queue to bind this completion queue to.
16153 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
16154 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
16156 * This function creates a completion queue, as detailed in @wq, on a port,
16157 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
16159 * The @phba struct is used to send mailbox command to HBA. The @cq struct
16160 * is used to get the entry count and entry size that are necessary to
16161 * determine the number of pages to allocate and use for this queue. The @eq
16162 * is used to indicate which event queue to bind this completion queue to. This
16163 * function will send the CQ_CREATE mailbox command to the HBA to setup the
16164 * completion queue. This function is asynchronous and will wait for the mailbox
16165 * command to finish before continuing.
16167 * On success this function will return a zero. If unable to allocate enough
16168 * memory this function will return -ENOMEM. If the queue create mailbox command
16169 * fails this function will return -ENXIO.
16172 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
16173 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
16175 struct lpfc_mbx_cq_create *cq_create;
16176 struct lpfc_dmabuf *dmabuf;
16177 LPFC_MBOXQ_t *mbox;
16178 int rc, length, status = 0;
16179 uint32_t shdr_status, shdr_add_status;
16180 union lpfc_sli4_cfg_shdr *shdr;
16182 /* sanity check on queue memory */
16186 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16189 length = (sizeof(struct lpfc_mbx_cq_create) -
16190 sizeof(struct lpfc_sli4_cfg_mhdr));
16191 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16192 LPFC_MBOX_OPCODE_CQ_CREATE,
16193 length, LPFC_SLI4_MBX_EMBED);
16194 cq_create = &mbox->u.mqe.un.cq_create;
16195 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
16196 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
16198 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
16199 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
16200 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16201 phba->sli4_hba.pc_sli4_params.cqv);
16202 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
16203 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
16204 (cq->page_size / SLI4_PAGE_SIZE));
16205 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
16207 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
16208 phba->sli4_hba.pc_sli4_params.cqav);
16210 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
16213 switch (cq->entry_count) {
16216 if (phba->sli4_hba.pc_sli4_params.cqv ==
16217 LPFC_Q_CREATE_VERSION_2) {
16218 cq_create->u.request.context.lpfc_cq_context_count =
16220 bf_set(lpfc_cq_context_count,
16221 &cq_create->u.request.context,
16222 LPFC_CQ_CNT_WORD7);
16227 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16228 "0361 Unsupported CQ count: "
16229 "entry cnt %d sz %d pg cnt %d\n",
16230 cq->entry_count, cq->entry_size,
16232 if (cq->entry_count < 256) {
16236 fallthrough; /* otherwise default to smallest count */
16238 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
16242 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
16246 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
16250 list_for_each_entry(dmabuf, &cq->page_list, list) {
16251 memset(dmabuf->virt, 0, cq->page_size);
16252 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16253 putPaddrLow(dmabuf->phys);
16254 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16255 putPaddrHigh(dmabuf->phys);
16257 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16259 /* The IOCTL status is embedded in the mailbox subheader. */
16260 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16261 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16262 if (shdr_status || shdr_add_status || rc) {
16263 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16264 "2501 CQ_CREATE mailbox failed with "
16265 "status x%x add_status x%x, mbx status x%x\n",
16266 shdr_status, shdr_add_status, rc);
16270 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
16271 if (cq->queue_id == 0xFFFF) {
16275 /* link the cq onto the parent eq child list */
16276 list_add_tail(&cq->list, &eq->child_list);
16277 /* Set up completion queue's type and subtype */
16279 cq->subtype = subtype;
16280 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
16281 cq->assoc_qid = eq->queue_id;
16283 cq->host_index = 0;
16284 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
16285 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
16287 if (cq->queue_id > phba->sli4_hba.cq_max)
16288 phba->sli4_hba.cq_max = cq->queue_id;
16290 irq_poll_init(&cq->iop, LPFC_IRQ_POLL_WEIGHT, lpfc_cq_poll_hdler);
16292 mempool_free(mbox, phba->mbox_mem_pool);
16297 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
16298 * @phba: HBA structure that indicates port to create a queue on.
16299 * @cqp: The queue structure array to use to create the completion queues.
16300 * @hdwq: The hardware queue array with the EQ to bind completion queues to.
16301 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
16302 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
16304 * This function creates a set of completion queue, s to support MRQ
16305 * as detailed in @cqp, on a port,
16306 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
16308 * The @phba struct is used to send mailbox command to HBA. The @cq struct
16309 * is used to get the entry count and entry size that are necessary to
16310 * determine the number of pages to allocate and use for this queue. The @eq
16311 * is used to indicate which event queue to bind this completion queue to. This
16312 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
16313 * completion queue. This function is asynchronous and will wait for the mailbox
16314 * command to finish before continuing.
16316 * On success this function will return a zero. If unable to allocate enough
16317 * memory this function will return -ENOMEM. If the queue create mailbox command
16318 * fails this function will return -ENXIO.
16321 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
16322 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
16325 struct lpfc_queue *cq;
16326 struct lpfc_queue *eq;
16327 struct lpfc_mbx_cq_create_set *cq_set;
16328 struct lpfc_dmabuf *dmabuf;
16329 LPFC_MBOXQ_t *mbox;
16330 int rc, length, alloclen, status = 0;
16331 int cnt, idx, numcq, page_idx = 0;
16332 uint32_t shdr_status, shdr_add_status;
16333 union lpfc_sli4_cfg_shdr *shdr;
16334 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16336 /* sanity check on queue memory */
16337 numcq = phba->cfg_nvmet_mrq;
16338 if (!cqp || !hdwq || !numcq)
16341 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16345 length = sizeof(struct lpfc_mbx_cq_create_set);
16346 length += ((numcq * cqp[0]->page_count) *
16347 sizeof(struct dma_address));
16348 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16349 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
16350 LPFC_SLI4_MBX_NEMBED);
16351 if (alloclen < length) {
16352 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16353 "3098 Allocated DMA memory size (%d) is "
16354 "less than the requested DMA memory size "
16355 "(%d)\n", alloclen, length);
16359 cq_set = mbox->sge_array->addr[0];
16360 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
16361 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
16363 for (idx = 0; idx < numcq; idx++) {
16365 eq = hdwq[idx].hba_eq;
16370 if (!phba->sli4_hba.pc_sli4_params.supported)
16371 hw_page_size = cq->page_size;
16375 bf_set(lpfc_mbx_cq_create_set_page_size,
16376 &cq_set->u.request,
16377 (hw_page_size / SLI4_PAGE_SIZE));
16378 bf_set(lpfc_mbx_cq_create_set_num_pages,
16379 &cq_set->u.request, cq->page_count);
16380 bf_set(lpfc_mbx_cq_create_set_evt,
16381 &cq_set->u.request, 1);
16382 bf_set(lpfc_mbx_cq_create_set_valid,
16383 &cq_set->u.request, 1);
16384 bf_set(lpfc_mbx_cq_create_set_cqe_size,
16385 &cq_set->u.request, 0);
16386 bf_set(lpfc_mbx_cq_create_set_num_cq,
16387 &cq_set->u.request, numcq);
16388 bf_set(lpfc_mbx_cq_create_set_autovalid,
16389 &cq_set->u.request,
16390 phba->sli4_hba.pc_sli4_params.cqav);
16391 switch (cq->entry_count) {
16394 if (phba->sli4_hba.pc_sli4_params.cqv ==
16395 LPFC_Q_CREATE_VERSION_2) {
16396 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16397 &cq_set->u.request,
16399 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16400 &cq_set->u.request,
16401 LPFC_CQ_CNT_WORD7);
16406 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16407 "3118 Bad CQ count. (%d)\n",
16409 if (cq->entry_count < 256) {
16413 fallthrough; /* otherwise default to smallest */
16415 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16416 &cq_set->u.request, LPFC_CQ_CNT_256);
16419 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16420 &cq_set->u.request, LPFC_CQ_CNT_512);
16423 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16424 &cq_set->u.request, LPFC_CQ_CNT_1024);
16427 bf_set(lpfc_mbx_cq_create_set_eq_id0,
16428 &cq_set->u.request, eq->queue_id);
16431 bf_set(lpfc_mbx_cq_create_set_eq_id1,
16432 &cq_set->u.request, eq->queue_id);
16435 bf_set(lpfc_mbx_cq_create_set_eq_id2,
16436 &cq_set->u.request, eq->queue_id);
16439 bf_set(lpfc_mbx_cq_create_set_eq_id3,
16440 &cq_set->u.request, eq->queue_id);
16443 bf_set(lpfc_mbx_cq_create_set_eq_id4,
16444 &cq_set->u.request, eq->queue_id);
16447 bf_set(lpfc_mbx_cq_create_set_eq_id5,
16448 &cq_set->u.request, eq->queue_id);
16451 bf_set(lpfc_mbx_cq_create_set_eq_id6,
16452 &cq_set->u.request, eq->queue_id);
16455 bf_set(lpfc_mbx_cq_create_set_eq_id7,
16456 &cq_set->u.request, eq->queue_id);
16459 bf_set(lpfc_mbx_cq_create_set_eq_id8,
16460 &cq_set->u.request, eq->queue_id);
16463 bf_set(lpfc_mbx_cq_create_set_eq_id9,
16464 &cq_set->u.request, eq->queue_id);
16467 bf_set(lpfc_mbx_cq_create_set_eq_id10,
16468 &cq_set->u.request, eq->queue_id);
16471 bf_set(lpfc_mbx_cq_create_set_eq_id11,
16472 &cq_set->u.request, eq->queue_id);
16475 bf_set(lpfc_mbx_cq_create_set_eq_id12,
16476 &cq_set->u.request, eq->queue_id);
16479 bf_set(lpfc_mbx_cq_create_set_eq_id13,
16480 &cq_set->u.request, eq->queue_id);
16483 bf_set(lpfc_mbx_cq_create_set_eq_id14,
16484 &cq_set->u.request, eq->queue_id);
16487 bf_set(lpfc_mbx_cq_create_set_eq_id15,
16488 &cq_set->u.request, eq->queue_id);
16492 /* link the cq onto the parent eq child list */
16493 list_add_tail(&cq->list, &eq->child_list);
16494 /* Set up completion queue's type and subtype */
16496 cq->subtype = subtype;
16497 cq->assoc_qid = eq->queue_id;
16499 cq->host_index = 0;
16500 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
16501 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
16506 list_for_each_entry(dmabuf, &cq->page_list, list) {
16507 memset(dmabuf->virt, 0, hw_page_size);
16508 cnt = page_idx + dmabuf->buffer_tag;
16509 cq_set->u.request.page[cnt].addr_lo =
16510 putPaddrLow(dmabuf->phys);
16511 cq_set->u.request.page[cnt].addr_hi =
16512 putPaddrHigh(dmabuf->phys);
16518 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16520 /* The IOCTL status is embedded in the mailbox subheader. */
16521 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16522 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16523 if (shdr_status || shdr_add_status || rc) {
16524 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16525 "3119 CQ_CREATE_SET mailbox failed with "
16526 "status x%x add_status x%x, mbx status x%x\n",
16527 shdr_status, shdr_add_status, rc);
16531 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
16532 if (rc == 0xFFFF) {
16537 for (idx = 0; idx < numcq; idx++) {
16539 cq->queue_id = rc + idx;
16540 if (cq->queue_id > phba->sli4_hba.cq_max)
16541 phba->sli4_hba.cq_max = cq->queue_id;
16545 lpfc_sli4_mbox_cmd_free(phba, mbox);
16550 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
16551 * @phba: HBA structure that indicates port to create a queue on.
16552 * @mq: The queue structure to use to create the mailbox queue.
16553 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
16554 * @cq: The completion queue to associate with this cq.
16556 * This function provides failback (fb) functionality when the
16557 * mq_create_ext fails on older FW generations. It's purpose is identical
16558 * to mq_create_ext otherwise.
16560 * This routine cannot fail as all attributes were previously accessed and
16561 * initialized in mq_create_ext.
16564 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
16565 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
16567 struct lpfc_mbx_mq_create *mq_create;
16568 struct lpfc_dmabuf *dmabuf;
16571 length = (sizeof(struct lpfc_mbx_mq_create) -
16572 sizeof(struct lpfc_sli4_cfg_mhdr));
16573 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16574 LPFC_MBOX_OPCODE_MQ_CREATE,
16575 length, LPFC_SLI4_MBX_EMBED);
16576 mq_create = &mbox->u.mqe.un.mq_create;
16577 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
16579 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
16581 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
16582 switch (mq->entry_count) {
16584 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16585 LPFC_MQ_RING_SIZE_16);
16588 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16589 LPFC_MQ_RING_SIZE_32);
16592 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16593 LPFC_MQ_RING_SIZE_64);
16596 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16597 LPFC_MQ_RING_SIZE_128);
16600 list_for_each_entry(dmabuf, &mq->page_list, list) {
16601 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16602 putPaddrLow(dmabuf->phys);
16603 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16604 putPaddrHigh(dmabuf->phys);
16609 * lpfc_mq_create - Create a mailbox Queue on the HBA
16610 * @phba: HBA structure that indicates port to create a queue on.
16611 * @mq: The queue structure to use to create the mailbox queue.
16612 * @cq: The completion queue to associate with this cq.
16613 * @subtype: The queue's subtype.
16615 * This function creates a mailbox queue, as detailed in @mq, on a port,
16616 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
16618 * The @phba struct is used to send mailbox command to HBA. The @cq struct
16619 * is used to get the entry count and entry size that are necessary to
16620 * determine the number of pages to allocate and use for this queue. This
16621 * function will send the MQ_CREATE mailbox command to the HBA to setup the
16622 * mailbox queue. This function is asynchronous and will wait for the mailbox
16623 * command to finish before continuing.
16625 * On success this function will return a zero. If unable to allocate enough
16626 * memory this function will return -ENOMEM. If the queue create mailbox command
16627 * fails this function will return -ENXIO.
16630 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
16631 struct lpfc_queue *cq, uint32_t subtype)
16633 struct lpfc_mbx_mq_create *mq_create;
16634 struct lpfc_mbx_mq_create_ext *mq_create_ext;
16635 struct lpfc_dmabuf *dmabuf;
16636 LPFC_MBOXQ_t *mbox;
16637 int rc, length, status = 0;
16638 uint32_t shdr_status, shdr_add_status;
16639 union lpfc_sli4_cfg_shdr *shdr;
16640 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16642 /* sanity check on queue memory */
16645 if (!phba->sli4_hba.pc_sli4_params.supported)
16646 hw_page_size = SLI4_PAGE_SIZE;
16648 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16651 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
16652 sizeof(struct lpfc_sli4_cfg_mhdr));
16653 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16654 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
16655 length, LPFC_SLI4_MBX_EMBED);
16657 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
16658 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
16659 bf_set(lpfc_mbx_mq_create_ext_num_pages,
16660 &mq_create_ext->u.request, mq->page_count);
16661 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
16662 &mq_create_ext->u.request, 1);
16663 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
16664 &mq_create_ext->u.request, 1);
16665 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
16666 &mq_create_ext->u.request, 1);
16667 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
16668 &mq_create_ext->u.request, 1);
16669 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
16670 &mq_create_ext->u.request, 1);
16671 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
16672 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16673 phba->sli4_hba.pc_sli4_params.mqv);
16674 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
16675 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
16678 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
16680 switch (mq->entry_count) {
16682 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16683 "0362 Unsupported MQ count. (%d)\n",
16685 if (mq->entry_count < 16) {
16689 fallthrough; /* otherwise default to smallest count */
16691 bf_set(lpfc_mq_context_ring_size,
16692 &mq_create_ext->u.request.context,
16693 LPFC_MQ_RING_SIZE_16);
16696 bf_set(lpfc_mq_context_ring_size,
16697 &mq_create_ext->u.request.context,
16698 LPFC_MQ_RING_SIZE_32);
16701 bf_set(lpfc_mq_context_ring_size,
16702 &mq_create_ext->u.request.context,
16703 LPFC_MQ_RING_SIZE_64);
16706 bf_set(lpfc_mq_context_ring_size,
16707 &mq_create_ext->u.request.context,
16708 LPFC_MQ_RING_SIZE_128);
16711 list_for_each_entry(dmabuf, &mq->page_list, list) {
16712 memset(dmabuf->virt, 0, hw_page_size);
16713 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
16714 putPaddrLow(dmabuf->phys);
16715 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
16716 putPaddrHigh(dmabuf->phys);
16718 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16719 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16720 &mq_create_ext->u.response);
16721 if (rc != MBX_SUCCESS) {
16722 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16723 "2795 MQ_CREATE_EXT failed with "
16724 "status x%x. Failback to MQ_CREATE.\n",
16726 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
16727 mq_create = &mbox->u.mqe.un.mq_create;
16728 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16729 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
16730 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16731 &mq_create->u.response);
16734 /* The IOCTL status is embedded in the mailbox subheader. */
16735 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16736 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16737 if (shdr_status || shdr_add_status || rc) {
16738 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16739 "2502 MQ_CREATE mailbox failed with "
16740 "status x%x add_status x%x, mbx status x%x\n",
16741 shdr_status, shdr_add_status, rc);
16745 if (mq->queue_id == 0xFFFF) {
16749 mq->type = LPFC_MQ;
16750 mq->assoc_qid = cq->queue_id;
16751 mq->subtype = subtype;
16752 mq->host_index = 0;
16755 /* link the mq onto the parent cq child list */
16756 list_add_tail(&mq->list, &cq->child_list);
16758 mempool_free(mbox, phba->mbox_mem_pool);
16763 * lpfc_wq_create - Create a Work Queue on the HBA
16764 * @phba: HBA structure that indicates port to create a queue on.
16765 * @wq: The queue structure to use to create the work queue.
16766 * @cq: The completion queue to bind this work queue to.
16767 * @subtype: The subtype of the work queue indicating its functionality.
16769 * This function creates a work queue, as detailed in @wq, on a port, described
16770 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
16772 * The @phba struct is used to send mailbox command to HBA. The @wq struct
16773 * is used to get the entry count and entry size that are necessary to
16774 * determine the number of pages to allocate and use for this queue. The @cq
16775 * is used to indicate which completion queue to bind this work queue to. This
16776 * function will send the WQ_CREATE mailbox command to the HBA to setup the
16777 * work queue. This function is asynchronous and will wait for the mailbox
16778 * command to finish before continuing.
16780 * On success this function will return a zero. If unable to allocate enough
16781 * memory this function will return -ENOMEM. If the queue create mailbox command
16782 * fails this function will return -ENXIO.
16785 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
16786 struct lpfc_queue *cq, uint32_t subtype)
16788 struct lpfc_mbx_wq_create *wq_create;
16789 struct lpfc_dmabuf *dmabuf;
16790 LPFC_MBOXQ_t *mbox;
16791 int rc, length, status = 0;
16792 uint32_t shdr_status, shdr_add_status;
16793 union lpfc_sli4_cfg_shdr *shdr;
16794 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16795 struct dma_address *page;
16796 void __iomem *bar_memmap_p;
16797 uint32_t db_offset;
16798 uint16_t pci_barset;
16799 uint8_t dpp_barset;
16800 uint32_t dpp_offset;
16801 uint8_t wq_create_version;
16803 unsigned long pg_addr;
16806 /* sanity check on queue memory */
16809 if (!phba->sli4_hba.pc_sli4_params.supported)
16810 hw_page_size = wq->page_size;
16812 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16815 length = (sizeof(struct lpfc_mbx_wq_create) -
16816 sizeof(struct lpfc_sli4_cfg_mhdr));
16817 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16818 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
16819 length, LPFC_SLI4_MBX_EMBED);
16820 wq_create = &mbox->u.mqe.un.wq_create;
16821 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
16822 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
16824 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
16827 /* wqv is the earliest version supported, NOT the latest */
16828 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16829 phba->sli4_hba.pc_sli4_params.wqv);
16831 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
16832 (wq->page_size > SLI4_PAGE_SIZE))
16833 wq_create_version = LPFC_Q_CREATE_VERSION_1;
16835 wq_create_version = LPFC_Q_CREATE_VERSION_0;
16837 switch (wq_create_version) {
16838 case LPFC_Q_CREATE_VERSION_1:
16839 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
16841 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16842 LPFC_Q_CREATE_VERSION_1);
16844 switch (wq->entry_size) {
16847 bf_set(lpfc_mbx_wq_create_wqe_size,
16848 &wq_create->u.request_1,
16849 LPFC_WQ_WQE_SIZE_64);
16852 bf_set(lpfc_mbx_wq_create_wqe_size,
16853 &wq_create->u.request_1,
16854 LPFC_WQ_WQE_SIZE_128);
16857 /* Request DPP by default */
16858 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
16859 bf_set(lpfc_mbx_wq_create_page_size,
16860 &wq_create->u.request_1,
16861 (wq->page_size / SLI4_PAGE_SIZE));
16862 page = wq_create->u.request_1.page;
16865 page = wq_create->u.request.page;
16869 list_for_each_entry(dmabuf, &wq->page_list, list) {
16870 memset(dmabuf->virt, 0, hw_page_size);
16871 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
16872 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
16875 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16876 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
16878 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16879 /* The IOCTL status is embedded in the mailbox subheader. */
16880 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16881 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16882 if (shdr_status || shdr_add_status || rc) {
16883 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16884 "2503 WQ_CREATE mailbox failed with "
16885 "status x%x add_status x%x, mbx status x%x\n",
16886 shdr_status, shdr_add_status, rc);
16891 if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
16892 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
16893 &wq_create->u.response);
16895 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
16896 &wq_create->u.response_1);
16898 if (wq->queue_id == 0xFFFF) {
16903 wq->db_format = LPFC_DB_LIST_FORMAT;
16904 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
16905 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
16906 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
16907 &wq_create->u.response);
16908 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
16909 (wq->db_format != LPFC_DB_RING_FORMAT)) {
16910 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16911 "3265 WQ[%d] doorbell format "
16912 "not supported: x%x\n",
16913 wq->queue_id, wq->db_format);
16917 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
16918 &wq_create->u.response);
16919 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16921 if (!bar_memmap_p) {
16922 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16923 "3263 WQ[%d] failed to memmap "
16924 "pci barset:x%x\n",
16925 wq->queue_id, pci_barset);
16929 db_offset = wq_create->u.response.doorbell_offset;
16930 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
16931 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
16932 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16933 "3252 WQ[%d] doorbell offset "
16934 "not supported: x%x\n",
16935 wq->queue_id, db_offset);
16939 wq->db_regaddr = bar_memmap_p + db_offset;
16940 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16941 "3264 WQ[%d]: barset:x%x, offset:x%x, "
16942 "format:x%x\n", wq->queue_id,
16943 pci_barset, db_offset, wq->db_format);
16945 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
16947 /* Check if DPP was honored by the firmware */
16948 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
16949 &wq_create->u.response_1);
16950 if (wq->dpp_enable) {
16951 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
16952 &wq_create->u.response_1);
16953 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16955 if (!bar_memmap_p) {
16956 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16957 "3267 WQ[%d] failed to memmap "
16958 "pci barset:x%x\n",
16959 wq->queue_id, pci_barset);
16963 db_offset = wq_create->u.response_1.doorbell_offset;
16964 wq->db_regaddr = bar_memmap_p + db_offset;
16965 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
16966 &wq_create->u.response_1);
16967 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
16968 &wq_create->u.response_1);
16969 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16971 if (!bar_memmap_p) {
16972 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16973 "3268 WQ[%d] failed to memmap "
16974 "pci barset:x%x\n",
16975 wq->queue_id, dpp_barset);
16979 dpp_offset = wq_create->u.response_1.dpp_offset;
16980 wq->dpp_regaddr = bar_memmap_p + dpp_offset;
16981 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16982 "3271 WQ[%d]: barset:x%x, offset:x%x, "
16983 "dpp_id:x%x dpp_barset:x%x "
16984 "dpp_offset:x%x\n",
16985 wq->queue_id, pci_barset, db_offset,
16986 wq->dpp_id, dpp_barset, dpp_offset);
16989 /* Enable combined writes for DPP aperture */
16990 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
16991 rc = set_memory_wc(pg_addr, 1);
16993 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16994 "3272 Cannot setup Combined "
16995 "Write on WQ[%d] - disable DPP\n",
16997 phba->cfg_enable_dpp = 0;
17000 phba->cfg_enable_dpp = 0;
17003 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
17005 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
17006 if (wq->pring == NULL) {
17010 wq->type = LPFC_WQ;
17011 wq->assoc_qid = cq->queue_id;
17012 wq->subtype = subtype;
17013 wq->host_index = 0;
17015 wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
17017 /* link the wq onto the parent cq child list */
17018 list_add_tail(&wq->list, &cq->child_list);
17020 mempool_free(mbox, phba->mbox_mem_pool);
17025 * lpfc_rq_create - Create a Receive Queue on the HBA
17026 * @phba: HBA structure that indicates port to create a queue on.
17027 * @hrq: The queue structure to use to create the header receive queue.
17028 * @drq: The queue structure to use to create the data receive queue.
17029 * @cq: The completion queue to bind this work queue to.
17030 * @subtype: The subtype of the work queue indicating its functionality.
17032 * This function creates a receive buffer queue pair , as detailed in @hrq and
17033 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
17036 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
17037 * struct is used to get the entry count that is necessary to determine the
17038 * number of pages to use for this queue. The @cq is used to indicate which
17039 * completion queue to bind received buffers that are posted to these queues to.
17040 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
17041 * receive queue pair. This function is asynchronous and will wait for the
17042 * mailbox command to finish before continuing.
17044 * On success this function will return a zero. If unable to allocate enough
17045 * memory this function will return -ENOMEM. If the queue create mailbox command
17046 * fails this function will return -ENXIO.
17049 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
17050 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
17052 struct lpfc_mbx_rq_create *rq_create;
17053 struct lpfc_dmabuf *dmabuf;
17054 LPFC_MBOXQ_t *mbox;
17055 int rc, length, status = 0;
17056 uint32_t shdr_status, shdr_add_status;
17057 union lpfc_sli4_cfg_shdr *shdr;
17058 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
17059 void __iomem *bar_memmap_p;
17060 uint32_t db_offset;
17061 uint16_t pci_barset;
17063 /* sanity check on queue memory */
17064 if (!hrq || !drq || !cq)
17066 if (!phba->sli4_hba.pc_sli4_params.supported)
17067 hw_page_size = SLI4_PAGE_SIZE;
17069 if (hrq->entry_count != drq->entry_count)
17071 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17074 length = (sizeof(struct lpfc_mbx_rq_create) -
17075 sizeof(struct lpfc_sli4_cfg_mhdr));
17076 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17077 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
17078 length, LPFC_SLI4_MBX_EMBED);
17079 rq_create = &mbox->u.mqe.un.rq_create;
17080 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
17081 bf_set(lpfc_mbox_hdr_version, &shdr->request,
17082 phba->sli4_hba.pc_sli4_params.rqv);
17083 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
17084 bf_set(lpfc_rq_context_rqe_count_1,
17085 &rq_create->u.request.context,
17087 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
17088 bf_set(lpfc_rq_context_rqe_size,
17089 &rq_create->u.request.context,
17091 bf_set(lpfc_rq_context_page_size,
17092 &rq_create->u.request.context,
17093 LPFC_RQ_PAGE_SIZE_4096);
17095 switch (hrq->entry_count) {
17097 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17098 "2535 Unsupported RQ count. (%d)\n",
17100 if (hrq->entry_count < 512) {
17104 fallthrough; /* otherwise default to smallest count */
17106 bf_set(lpfc_rq_context_rqe_count,
17107 &rq_create->u.request.context,
17108 LPFC_RQ_RING_SIZE_512);
17111 bf_set(lpfc_rq_context_rqe_count,
17112 &rq_create->u.request.context,
17113 LPFC_RQ_RING_SIZE_1024);
17116 bf_set(lpfc_rq_context_rqe_count,
17117 &rq_create->u.request.context,
17118 LPFC_RQ_RING_SIZE_2048);
17121 bf_set(lpfc_rq_context_rqe_count,
17122 &rq_create->u.request.context,
17123 LPFC_RQ_RING_SIZE_4096);
17126 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
17127 LPFC_HDR_BUF_SIZE);
17129 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
17131 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
17133 list_for_each_entry(dmabuf, &hrq->page_list, list) {
17134 memset(dmabuf->virt, 0, hw_page_size);
17135 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
17136 putPaddrLow(dmabuf->phys);
17137 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
17138 putPaddrHigh(dmabuf->phys);
17140 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
17141 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
17143 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17144 /* The IOCTL status is embedded in the mailbox subheader. */
17145 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17146 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17147 if (shdr_status || shdr_add_status || rc) {
17148 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17149 "2504 RQ_CREATE mailbox failed with "
17150 "status x%x add_status x%x, mbx status x%x\n",
17151 shdr_status, shdr_add_status, rc);
17155 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17156 if (hrq->queue_id == 0xFFFF) {
17161 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
17162 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
17163 &rq_create->u.response);
17164 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
17165 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
17166 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17167 "3262 RQ [%d] doorbell format not "
17168 "supported: x%x\n", hrq->queue_id,
17174 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
17175 &rq_create->u.response);
17176 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
17177 if (!bar_memmap_p) {
17178 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17179 "3269 RQ[%d] failed to memmap pci "
17180 "barset:x%x\n", hrq->queue_id,
17186 db_offset = rq_create->u.response.doorbell_offset;
17187 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
17188 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
17189 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17190 "3270 RQ[%d] doorbell offset not "
17191 "supported: x%x\n", hrq->queue_id,
17196 hrq->db_regaddr = bar_memmap_p + db_offset;
17197 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
17198 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
17199 "format:x%x\n", hrq->queue_id, pci_barset,
17200 db_offset, hrq->db_format);
17202 hrq->db_format = LPFC_DB_RING_FORMAT;
17203 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17205 hrq->type = LPFC_HRQ;
17206 hrq->assoc_qid = cq->queue_id;
17207 hrq->subtype = subtype;
17208 hrq->host_index = 0;
17209 hrq->hba_index = 0;
17210 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17212 /* now create the data queue */
17213 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17214 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
17215 length, LPFC_SLI4_MBX_EMBED);
17216 bf_set(lpfc_mbox_hdr_version, &shdr->request,
17217 phba->sli4_hba.pc_sli4_params.rqv);
17218 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
17219 bf_set(lpfc_rq_context_rqe_count_1,
17220 &rq_create->u.request.context, hrq->entry_count);
17221 if (subtype == LPFC_NVMET)
17222 rq_create->u.request.context.buffer_size =
17223 LPFC_NVMET_DATA_BUF_SIZE;
17225 rq_create->u.request.context.buffer_size =
17226 LPFC_DATA_BUF_SIZE;
17227 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
17229 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
17230 (PAGE_SIZE/SLI4_PAGE_SIZE));
17232 switch (drq->entry_count) {
17234 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17235 "2536 Unsupported RQ count. (%d)\n",
17237 if (drq->entry_count < 512) {
17241 fallthrough; /* otherwise default to smallest count */
17243 bf_set(lpfc_rq_context_rqe_count,
17244 &rq_create->u.request.context,
17245 LPFC_RQ_RING_SIZE_512);
17248 bf_set(lpfc_rq_context_rqe_count,
17249 &rq_create->u.request.context,
17250 LPFC_RQ_RING_SIZE_1024);
17253 bf_set(lpfc_rq_context_rqe_count,
17254 &rq_create->u.request.context,
17255 LPFC_RQ_RING_SIZE_2048);
17258 bf_set(lpfc_rq_context_rqe_count,
17259 &rq_create->u.request.context,
17260 LPFC_RQ_RING_SIZE_4096);
17263 if (subtype == LPFC_NVMET)
17264 bf_set(lpfc_rq_context_buf_size,
17265 &rq_create->u.request.context,
17266 LPFC_NVMET_DATA_BUF_SIZE);
17268 bf_set(lpfc_rq_context_buf_size,
17269 &rq_create->u.request.context,
17270 LPFC_DATA_BUF_SIZE);
17272 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
17274 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
17276 list_for_each_entry(dmabuf, &drq->page_list, list) {
17277 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
17278 putPaddrLow(dmabuf->phys);
17279 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
17280 putPaddrHigh(dmabuf->phys);
17282 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
17283 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
17284 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17285 /* The IOCTL status is embedded in the mailbox subheader. */
17286 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
17287 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17288 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17289 if (shdr_status || shdr_add_status || rc) {
17293 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17294 if (drq->queue_id == 0xFFFF) {
17298 drq->type = LPFC_DRQ;
17299 drq->assoc_qid = cq->queue_id;
17300 drq->subtype = subtype;
17301 drq->host_index = 0;
17302 drq->hba_index = 0;
17303 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17305 /* link the header and data RQs onto the parent cq child list */
17306 list_add_tail(&hrq->list, &cq->child_list);
17307 list_add_tail(&drq->list, &cq->child_list);
17310 mempool_free(mbox, phba->mbox_mem_pool);
17315 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
17316 * @phba: HBA structure that indicates port to create a queue on.
17317 * @hrqp: The queue structure array to use to create the header receive queues.
17318 * @drqp: The queue structure array to use to create the data receive queues.
17319 * @cqp: The completion queue array to bind these receive queues to.
17320 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
17322 * This function creates a receive buffer queue pair , as detailed in @hrq and
17323 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
17326 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
17327 * struct is used to get the entry count that is necessary to determine the
17328 * number of pages to use for this queue. The @cq is used to indicate which
17329 * completion queue to bind received buffers that are posted to these queues to.
17330 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
17331 * receive queue pair. This function is asynchronous and will wait for the
17332 * mailbox command to finish before continuing.
17334 * On success this function will return a zero. If unable to allocate enough
17335 * memory this function will return -ENOMEM. If the queue create mailbox command
17336 * fails this function will return -ENXIO.
17339 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
17340 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
17343 struct lpfc_queue *hrq, *drq, *cq;
17344 struct lpfc_mbx_rq_create_v2 *rq_create;
17345 struct lpfc_dmabuf *dmabuf;
17346 LPFC_MBOXQ_t *mbox;
17347 int rc, length, alloclen, status = 0;
17348 int cnt, idx, numrq, page_idx = 0;
17349 uint32_t shdr_status, shdr_add_status;
17350 union lpfc_sli4_cfg_shdr *shdr;
17351 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
17353 numrq = phba->cfg_nvmet_mrq;
17354 /* sanity check on array memory */
17355 if (!hrqp || !drqp || !cqp || !numrq)
17357 if (!phba->sli4_hba.pc_sli4_params.supported)
17358 hw_page_size = SLI4_PAGE_SIZE;
17360 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17364 length = sizeof(struct lpfc_mbx_rq_create_v2);
17365 length += ((2 * numrq * hrqp[0]->page_count) *
17366 sizeof(struct dma_address));
17368 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17369 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
17370 LPFC_SLI4_MBX_NEMBED);
17371 if (alloclen < length) {
17372 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17373 "3099 Allocated DMA memory size (%d) is "
17374 "less than the requested DMA memory size "
17375 "(%d)\n", alloclen, length);
17382 rq_create = mbox->sge_array->addr[0];
17383 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
17385 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
17388 for (idx = 0; idx < numrq; idx++) {
17393 /* sanity check on queue memory */
17394 if (!hrq || !drq || !cq) {
17399 if (hrq->entry_count != drq->entry_count) {
17405 bf_set(lpfc_mbx_rq_create_num_pages,
17406 &rq_create->u.request,
17408 bf_set(lpfc_mbx_rq_create_rq_cnt,
17409 &rq_create->u.request, (numrq * 2));
17410 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
17412 bf_set(lpfc_rq_context_base_cq,
17413 &rq_create->u.request.context,
17415 bf_set(lpfc_rq_context_data_size,
17416 &rq_create->u.request.context,
17417 LPFC_NVMET_DATA_BUF_SIZE);
17418 bf_set(lpfc_rq_context_hdr_size,
17419 &rq_create->u.request.context,
17420 LPFC_HDR_BUF_SIZE);
17421 bf_set(lpfc_rq_context_rqe_count_1,
17422 &rq_create->u.request.context,
17424 bf_set(lpfc_rq_context_rqe_size,
17425 &rq_create->u.request.context,
17427 bf_set(lpfc_rq_context_page_size,
17428 &rq_create->u.request.context,
17429 (PAGE_SIZE/SLI4_PAGE_SIZE));
17432 list_for_each_entry(dmabuf, &hrq->page_list, list) {
17433 memset(dmabuf->virt, 0, hw_page_size);
17434 cnt = page_idx + dmabuf->buffer_tag;
17435 rq_create->u.request.page[cnt].addr_lo =
17436 putPaddrLow(dmabuf->phys);
17437 rq_create->u.request.page[cnt].addr_hi =
17438 putPaddrHigh(dmabuf->phys);
17444 list_for_each_entry(dmabuf, &drq->page_list, list) {
17445 memset(dmabuf->virt, 0, hw_page_size);
17446 cnt = page_idx + dmabuf->buffer_tag;
17447 rq_create->u.request.page[cnt].addr_lo =
17448 putPaddrLow(dmabuf->phys);
17449 rq_create->u.request.page[cnt].addr_hi =
17450 putPaddrHigh(dmabuf->phys);
17455 hrq->db_format = LPFC_DB_RING_FORMAT;
17456 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17457 hrq->type = LPFC_HRQ;
17458 hrq->assoc_qid = cq->queue_id;
17459 hrq->subtype = subtype;
17460 hrq->host_index = 0;
17461 hrq->hba_index = 0;
17462 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17464 drq->db_format = LPFC_DB_RING_FORMAT;
17465 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17466 drq->type = LPFC_DRQ;
17467 drq->assoc_qid = cq->queue_id;
17468 drq->subtype = subtype;
17469 drq->host_index = 0;
17470 drq->hba_index = 0;
17471 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17473 list_add_tail(&hrq->list, &cq->child_list);
17474 list_add_tail(&drq->list, &cq->child_list);
17477 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17478 /* The IOCTL status is embedded in the mailbox subheader. */
17479 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17480 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17481 if (shdr_status || shdr_add_status || rc) {
17482 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17483 "3120 RQ_CREATE mailbox failed with "
17484 "status x%x add_status x%x, mbx status x%x\n",
17485 shdr_status, shdr_add_status, rc);
17489 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17490 if (rc == 0xFFFF) {
17495 /* Initialize all RQs with associated queue id */
17496 for (idx = 0; idx < numrq; idx++) {
17498 hrq->queue_id = rc + (2 * idx);
17500 drq->queue_id = rc + (2 * idx) + 1;
17504 lpfc_sli4_mbox_cmd_free(phba, mbox);
17509 * lpfc_eq_destroy - Destroy an event Queue on the HBA
17510 * @phba: HBA structure that indicates port to destroy a queue on.
17511 * @eq: The queue structure associated with the queue to destroy.
17513 * This function destroys a queue, as detailed in @eq by sending an mailbox
17514 * command, specific to the type of queue, to the HBA.
17516 * The @eq struct is used to get the queue ID of the queue to destroy.
17518 * On success this function will return a zero. If the queue destroy mailbox
17519 * command fails this function will return -ENXIO.
17522 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
17524 LPFC_MBOXQ_t *mbox;
17525 int rc, length, status = 0;
17526 uint32_t shdr_status, shdr_add_status;
17527 union lpfc_sli4_cfg_shdr *shdr;
17529 /* sanity check on queue memory */
17533 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
17536 length = (sizeof(struct lpfc_mbx_eq_destroy) -
17537 sizeof(struct lpfc_sli4_cfg_mhdr));
17538 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17539 LPFC_MBOX_OPCODE_EQ_DESTROY,
17540 length, LPFC_SLI4_MBX_EMBED);
17541 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
17543 mbox->vport = eq->phba->pport;
17544 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17546 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
17547 /* The IOCTL status is embedded in the mailbox subheader. */
17548 shdr = (union lpfc_sli4_cfg_shdr *)
17549 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
17550 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17551 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17552 if (shdr_status || shdr_add_status || rc) {
17553 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17554 "2505 EQ_DESTROY mailbox failed with "
17555 "status x%x add_status x%x, mbx status x%x\n",
17556 shdr_status, shdr_add_status, rc);
17560 /* Remove eq from any list */
17561 list_del_init(&eq->list);
17562 mempool_free(mbox, eq->phba->mbox_mem_pool);
17567 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
17568 * @phba: HBA structure that indicates port to destroy a queue on.
17569 * @cq: The queue structure associated with the queue to destroy.
17571 * This function destroys a queue, as detailed in @cq by sending an mailbox
17572 * command, specific to the type of queue, to the HBA.
17574 * The @cq struct is used to get the queue ID of the queue to destroy.
17576 * On success this function will return a zero. If the queue destroy mailbox
17577 * command fails this function will return -ENXIO.
17580 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
17582 LPFC_MBOXQ_t *mbox;
17583 int rc, length, status = 0;
17584 uint32_t shdr_status, shdr_add_status;
17585 union lpfc_sli4_cfg_shdr *shdr;
17587 /* sanity check on queue memory */
17590 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
17593 length = (sizeof(struct lpfc_mbx_cq_destroy) -
17594 sizeof(struct lpfc_sli4_cfg_mhdr));
17595 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17596 LPFC_MBOX_OPCODE_CQ_DESTROY,
17597 length, LPFC_SLI4_MBX_EMBED);
17598 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
17600 mbox->vport = cq->phba->pport;
17601 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17602 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
17603 /* The IOCTL status is embedded in the mailbox subheader. */
17604 shdr = (union lpfc_sli4_cfg_shdr *)
17605 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
17606 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17607 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17608 if (shdr_status || shdr_add_status || rc) {
17609 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17610 "2506 CQ_DESTROY mailbox failed with "
17611 "status x%x add_status x%x, mbx status x%x\n",
17612 shdr_status, shdr_add_status, rc);
17615 /* Remove cq from any list */
17616 list_del_init(&cq->list);
17617 mempool_free(mbox, cq->phba->mbox_mem_pool);
17622 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
17623 * @phba: HBA structure that indicates port to destroy a queue on.
17624 * @mq: The queue structure associated with the queue to destroy.
17626 * This function destroys a queue, as detailed in @mq by sending an mailbox
17627 * command, specific to the type of queue, to the HBA.
17629 * The @mq struct is used to get the queue ID of the queue to destroy.
17631 * On success this function will return a zero. If the queue destroy mailbox
17632 * command fails this function will return -ENXIO.
17635 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
17637 LPFC_MBOXQ_t *mbox;
17638 int rc, length, status = 0;
17639 uint32_t shdr_status, shdr_add_status;
17640 union lpfc_sli4_cfg_shdr *shdr;
17642 /* sanity check on queue memory */
17645 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
17648 length = (sizeof(struct lpfc_mbx_mq_destroy) -
17649 sizeof(struct lpfc_sli4_cfg_mhdr));
17650 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17651 LPFC_MBOX_OPCODE_MQ_DESTROY,
17652 length, LPFC_SLI4_MBX_EMBED);
17653 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
17655 mbox->vport = mq->phba->pport;
17656 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17657 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
17658 /* The IOCTL status is embedded in the mailbox subheader. */
17659 shdr = (union lpfc_sli4_cfg_shdr *)
17660 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
17661 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17662 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17663 if (shdr_status || shdr_add_status || rc) {
17664 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17665 "2507 MQ_DESTROY mailbox failed with "
17666 "status x%x add_status x%x, mbx status x%x\n",
17667 shdr_status, shdr_add_status, rc);
17670 /* Remove mq from any list */
17671 list_del_init(&mq->list);
17672 mempool_free(mbox, mq->phba->mbox_mem_pool);
17677 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
17678 * @phba: HBA structure that indicates port to destroy a queue on.
17679 * @wq: The queue structure associated with the queue to destroy.
17681 * This function destroys a queue, as detailed in @wq by sending an mailbox
17682 * command, specific to the type of queue, to the HBA.
17684 * The @wq struct is used to get the queue ID of the queue to destroy.
17686 * On success this function will return a zero. If the queue destroy mailbox
17687 * command fails this function will return -ENXIO.
17690 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
17692 LPFC_MBOXQ_t *mbox;
17693 int rc, length, status = 0;
17694 uint32_t shdr_status, shdr_add_status;
17695 union lpfc_sli4_cfg_shdr *shdr;
17697 /* sanity check on queue memory */
17700 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
17703 length = (sizeof(struct lpfc_mbx_wq_destroy) -
17704 sizeof(struct lpfc_sli4_cfg_mhdr));
17705 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17706 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
17707 length, LPFC_SLI4_MBX_EMBED);
17708 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
17710 mbox->vport = wq->phba->pport;
17711 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17712 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
17713 shdr = (union lpfc_sli4_cfg_shdr *)
17714 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
17715 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17716 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17717 if (shdr_status || shdr_add_status || rc) {
17718 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17719 "2508 WQ_DESTROY mailbox failed with "
17720 "status x%x add_status x%x, mbx status x%x\n",
17721 shdr_status, shdr_add_status, rc);
17724 /* Remove wq from any list */
17725 list_del_init(&wq->list);
17728 mempool_free(mbox, wq->phba->mbox_mem_pool);
17733 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
17734 * @phba: HBA structure that indicates port to destroy a queue on.
17735 * @hrq: The queue structure associated with the queue to destroy.
17736 * @drq: The queue structure associated with the queue to destroy.
17738 * This function destroys a queue, as detailed in @rq by sending an mailbox
17739 * command, specific to the type of queue, to the HBA.
17741 * The @rq struct is used to get the queue ID of the queue to destroy.
17743 * On success this function will return a zero. If the queue destroy mailbox
17744 * command fails this function will return -ENXIO.
17747 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
17748 struct lpfc_queue *drq)
17750 LPFC_MBOXQ_t *mbox;
17751 int rc, length, status = 0;
17752 uint32_t shdr_status, shdr_add_status;
17753 union lpfc_sli4_cfg_shdr *shdr;
17755 /* sanity check on queue memory */
17758 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
17761 length = (sizeof(struct lpfc_mbx_rq_destroy) -
17762 sizeof(struct lpfc_sli4_cfg_mhdr));
17763 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17764 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
17765 length, LPFC_SLI4_MBX_EMBED);
17766 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17768 mbox->vport = hrq->phba->pport;
17769 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17770 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
17771 /* The IOCTL status is embedded in the mailbox subheader. */
17772 shdr = (union lpfc_sli4_cfg_shdr *)
17773 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17774 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17775 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17776 if (shdr_status || shdr_add_status || rc) {
17777 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17778 "2509 RQ_DESTROY mailbox failed with "
17779 "status x%x add_status x%x, mbx status x%x\n",
17780 shdr_status, shdr_add_status, rc);
17781 mempool_free(mbox, hrq->phba->mbox_mem_pool);
17784 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17786 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
17787 shdr = (union lpfc_sli4_cfg_shdr *)
17788 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17789 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17790 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17791 if (shdr_status || shdr_add_status || rc) {
17792 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17793 "2510 RQ_DESTROY mailbox failed with "
17794 "status x%x add_status x%x, mbx status x%x\n",
17795 shdr_status, shdr_add_status, rc);
17798 list_del_init(&hrq->list);
17799 list_del_init(&drq->list);
17800 mempool_free(mbox, hrq->phba->mbox_mem_pool);
17805 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
17806 * @phba: The virtual port for which this call being executed.
17807 * @pdma_phys_addr0: Physical address of the 1st SGL page.
17808 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
17809 * @xritag: the xritag that ties this io to the SGL pages.
17811 * This routine will post the sgl pages for the IO that has the xritag
17812 * that is in the iocbq structure. The xritag is assigned during iocbq
17813 * creation and persists for as long as the driver is loaded.
17814 * if the caller has fewer than 256 scatter gather segments to map then
17815 * pdma_phys_addr1 should be 0.
17816 * If the caller needs to map more than 256 scatter gather segment then
17817 * pdma_phys_addr1 should be a valid physical address.
17818 * physical address for SGLs must be 64 byte aligned.
17819 * If you are going to map 2 SGL's then the first one must have 256 entries
17820 * the second sgl can have between 1 and 256 entries.
17824 * -ENXIO, -ENOMEM - Failure
17827 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
17828 dma_addr_t pdma_phys_addr0,
17829 dma_addr_t pdma_phys_addr1,
17832 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
17833 LPFC_MBOXQ_t *mbox;
17835 uint32_t shdr_status, shdr_add_status;
17837 union lpfc_sli4_cfg_shdr *shdr;
17839 if (xritag == NO_XRI) {
17840 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17841 "0364 Invalid param:\n");
17845 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17849 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17850 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
17851 sizeof(struct lpfc_mbx_post_sgl_pages) -
17852 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
17854 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
17855 &mbox->u.mqe.un.post_sgl_pages;
17856 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
17857 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
17859 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
17860 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
17861 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
17862 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
17864 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
17865 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
17866 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
17867 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
17868 if (!phba->sli4_hba.intr_enable)
17869 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17871 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17872 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17874 /* The IOCTL status is embedded in the mailbox subheader. */
17875 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
17876 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17877 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17878 if (!phba->sli4_hba.intr_enable)
17879 mempool_free(mbox, phba->mbox_mem_pool);
17880 else if (rc != MBX_TIMEOUT)
17881 mempool_free(mbox, phba->mbox_mem_pool);
17882 if (shdr_status || shdr_add_status || rc) {
17883 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17884 "2511 POST_SGL mailbox failed with "
17885 "status x%x add_status x%x, mbx status x%x\n",
17886 shdr_status, shdr_add_status, rc);
17892 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
17893 * @phba: pointer to lpfc hba data structure.
17895 * This routine is invoked to post rpi header templates to the
17896 * HBA consistent with the SLI-4 interface spec. This routine
17897 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
17898 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
17901 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
17902 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
17905 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
17910 * Fetch the next logical xri. Because this index is logical,
17911 * the driver starts at 0 each time.
17913 spin_lock_irq(&phba->hbalock);
17914 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
17915 phba->sli4_hba.max_cfg_param.max_xri, 0);
17916 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
17917 spin_unlock_irq(&phba->hbalock);
17920 set_bit(xri, phba->sli4_hba.xri_bmask);
17921 phba->sli4_hba.max_cfg_param.xri_used++;
17923 spin_unlock_irq(&phba->hbalock);
17928 * __lpfc_sli4_free_xri - Release an xri for reuse.
17929 * @phba: pointer to lpfc hba data structure.
17930 * @xri: xri to release.
17932 * This routine is invoked to release an xri to the pool of
17933 * available rpis maintained by the driver.
17936 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
17938 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
17939 phba->sli4_hba.max_cfg_param.xri_used--;
17944 * lpfc_sli4_free_xri - Release an xri for reuse.
17945 * @phba: pointer to lpfc hba data structure.
17946 * @xri: xri to release.
17948 * This routine is invoked to release an xri to the pool of
17949 * available rpis maintained by the driver.
17952 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
17954 spin_lock_irq(&phba->hbalock);
17955 __lpfc_sli4_free_xri(phba, xri);
17956 spin_unlock_irq(&phba->hbalock);
17960 * lpfc_sli4_next_xritag - Get an xritag for the io
17961 * @phba: Pointer to HBA context object.
17963 * This function gets an xritag for the iocb. If there is no unused xritag
17964 * it will return 0xffff.
17965 * The function returns the allocated xritag if successful, else returns zero.
17966 * Zero is not a valid xritag.
17967 * The caller is not required to hold any lock.
17970 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
17972 uint16_t xri_index;
17974 xri_index = lpfc_sli4_alloc_xri(phba);
17975 if (xri_index == NO_XRI)
17976 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
17977 "2004 Failed to allocate XRI.last XRITAG is %d"
17978 " Max XRI is %d, Used XRI is %d\n",
17980 phba->sli4_hba.max_cfg_param.max_xri,
17981 phba->sli4_hba.max_cfg_param.xri_used);
17986 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
17987 * @phba: pointer to lpfc hba data structure.
17988 * @post_sgl_list: pointer to els sgl entry list.
17989 * @post_cnt: number of els sgl entries on the list.
17991 * This routine is invoked to post a block of driver's sgl pages to the
17992 * HBA using non-embedded mailbox command. No Lock is held. This routine
17993 * is only called when the driver is loading and after all IO has been
17997 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
17998 struct list_head *post_sgl_list,
18001 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
18002 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
18003 struct sgl_page_pairs *sgl_pg_pairs;
18005 LPFC_MBOXQ_t *mbox;
18006 uint32_t reqlen, alloclen, pg_pairs;
18008 uint16_t xritag_start = 0;
18010 uint32_t shdr_status, shdr_add_status;
18011 union lpfc_sli4_cfg_shdr *shdr;
18013 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
18014 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
18015 if (reqlen > SLI4_PAGE_SIZE) {
18016 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18017 "2559 Block sgl registration required DMA "
18018 "size (%d) great than a page\n", reqlen);
18022 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18026 /* Allocate DMA memory and set up the non-embedded mailbox command */
18027 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
18028 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
18029 LPFC_SLI4_MBX_NEMBED);
18031 if (alloclen < reqlen) {
18032 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18033 "0285 Allocated DMA memory size (%d) is "
18034 "less than the requested DMA memory "
18035 "size (%d)\n", alloclen, reqlen);
18036 lpfc_sli4_mbox_cmd_free(phba, mbox);
18039 /* Set up the SGL pages in the non-embedded DMA pages */
18040 viraddr = mbox->sge_array->addr[0];
18041 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
18042 sgl_pg_pairs = &sgl->sgl_pg_pairs;
18045 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
18046 /* Set up the sge entry */
18047 sgl_pg_pairs->sgl_pg0_addr_lo =
18048 cpu_to_le32(putPaddrLow(sglq_entry->phys));
18049 sgl_pg_pairs->sgl_pg0_addr_hi =
18050 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
18051 sgl_pg_pairs->sgl_pg1_addr_lo =
18052 cpu_to_le32(putPaddrLow(0));
18053 sgl_pg_pairs->sgl_pg1_addr_hi =
18054 cpu_to_le32(putPaddrHigh(0));
18056 /* Keep the first xritag on the list */
18058 xritag_start = sglq_entry->sli4_xritag;
18063 /* Complete initialization and perform endian conversion. */
18064 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
18065 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
18066 sgl->word0 = cpu_to_le32(sgl->word0);
18068 if (!phba->sli4_hba.intr_enable)
18069 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
18071 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
18072 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
18074 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
18075 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18076 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18077 if (!phba->sli4_hba.intr_enable)
18078 lpfc_sli4_mbox_cmd_free(phba, mbox);
18079 else if (rc != MBX_TIMEOUT)
18080 lpfc_sli4_mbox_cmd_free(phba, mbox);
18081 if (shdr_status || shdr_add_status || rc) {
18082 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18083 "2513 POST_SGL_BLOCK mailbox command failed "
18084 "status x%x add_status x%x mbx status x%x\n",
18085 shdr_status, shdr_add_status, rc);
18092 * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
18093 * @phba: pointer to lpfc hba data structure.
18094 * @nblist: pointer to nvme buffer list.
18095 * @count: number of scsi buffers on the list.
18097 * This routine is invoked to post a block of @count scsi sgl pages from a
18098 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
18103 lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
18106 struct lpfc_io_buf *lpfc_ncmd;
18107 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
18108 struct sgl_page_pairs *sgl_pg_pairs;
18110 LPFC_MBOXQ_t *mbox;
18111 uint32_t reqlen, alloclen, pg_pairs;
18113 uint16_t xritag_start = 0;
18115 uint32_t shdr_status, shdr_add_status;
18116 dma_addr_t pdma_phys_bpl1;
18117 union lpfc_sli4_cfg_shdr *shdr;
18119 /* Calculate the requested length of the dma memory */
18120 reqlen = count * sizeof(struct sgl_page_pairs) +
18121 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
18122 if (reqlen > SLI4_PAGE_SIZE) {
18123 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
18124 "6118 Block sgl registration required DMA "
18125 "size (%d) great than a page\n", reqlen);
18128 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18130 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18131 "6119 Failed to allocate mbox cmd memory\n");
18135 /* Allocate DMA memory and set up the non-embedded mailbox command */
18136 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
18137 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
18138 reqlen, LPFC_SLI4_MBX_NEMBED);
18140 if (alloclen < reqlen) {
18141 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18142 "6120 Allocated DMA memory size (%d) is "
18143 "less than the requested DMA memory "
18144 "size (%d)\n", alloclen, reqlen);
18145 lpfc_sli4_mbox_cmd_free(phba, mbox);
18149 /* Get the first SGE entry from the non-embedded DMA memory */
18150 viraddr = mbox->sge_array->addr[0];
18152 /* Set up the SGL pages in the non-embedded DMA pages */
18153 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
18154 sgl_pg_pairs = &sgl->sgl_pg_pairs;
18157 list_for_each_entry(lpfc_ncmd, nblist, list) {
18158 /* Set up the sge entry */
18159 sgl_pg_pairs->sgl_pg0_addr_lo =
18160 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
18161 sgl_pg_pairs->sgl_pg0_addr_hi =
18162 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
18163 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
18164 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
18167 pdma_phys_bpl1 = 0;
18168 sgl_pg_pairs->sgl_pg1_addr_lo =
18169 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
18170 sgl_pg_pairs->sgl_pg1_addr_hi =
18171 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
18172 /* Keep the first xritag on the list */
18174 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
18178 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
18179 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
18180 /* Perform endian conversion if necessary */
18181 sgl->word0 = cpu_to_le32(sgl->word0);
18183 if (!phba->sli4_hba.intr_enable) {
18184 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
18186 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
18187 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
18189 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
18190 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18191 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18192 if (!phba->sli4_hba.intr_enable)
18193 lpfc_sli4_mbox_cmd_free(phba, mbox);
18194 else if (rc != MBX_TIMEOUT)
18195 lpfc_sli4_mbox_cmd_free(phba, mbox);
18196 if (shdr_status || shdr_add_status || rc) {
18197 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18198 "6125 POST_SGL_BLOCK mailbox command failed "
18199 "status x%x add_status x%x mbx status x%x\n",
18200 shdr_status, shdr_add_status, rc);
18207 * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
18208 * @phba: pointer to lpfc hba data structure.
18209 * @post_nblist: pointer to the nvme buffer list.
18210 * @sb_count: number of nvme buffers.
18212 * This routine walks a list of nvme buffers that was passed in. It attempts
18213 * to construct blocks of nvme buffer sgls which contains contiguous xris and
18214 * uses the non-embedded SGL block post mailbox commands to post to the port.
18215 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
18216 * embedded SGL post mailbox command for posting. The @post_nblist passed in
18217 * must be local list, thus no lock is needed when manipulate the list.
18219 * Returns: 0 = failure, non-zero number of successfully posted buffers.
18222 lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
18223 struct list_head *post_nblist, int sb_count)
18225 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
18226 int status, sgl_size;
18227 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
18228 dma_addr_t pdma_phys_sgl1;
18229 int last_xritag = NO_XRI;
18231 LIST_HEAD(prep_nblist);
18232 LIST_HEAD(blck_nblist);
18233 LIST_HEAD(nvme_nblist);
18239 sgl_size = phba->cfg_sg_dma_buf_size;
18240 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
18241 list_del_init(&lpfc_ncmd->list);
18243 if ((last_xritag != NO_XRI) &&
18244 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
18245 /* a hole in xri block, form a sgl posting block */
18246 list_splice_init(&prep_nblist, &blck_nblist);
18247 post_cnt = block_cnt - 1;
18248 /* prepare list for next posting block */
18249 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
18252 /* prepare list for next posting block */
18253 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
18254 /* enough sgls for non-embed sgl mbox command */
18255 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
18256 list_splice_init(&prep_nblist, &blck_nblist);
18257 post_cnt = block_cnt;
18262 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
18264 /* end of repost sgl list condition for NVME buffers */
18265 if (num_posting == sb_count) {
18266 if (post_cnt == 0) {
18267 /* last sgl posting block */
18268 list_splice_init(&prep_nblist, &blck_nblist);
18269 post_cnt = block_cnt;
18270 } else if (block_cnt == 1) {
18271 /* last single sgl with non-contiguous xri */
18272 if (sgl_size > SGL_PAGE_SIZE)
18274 lpfc_ncmd->dma_phys_sgl +
18277 pdma_phys_sgl1 = 0;
18278 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
18279 status = lpfc_sli4_post_sgl(
18280 phba, lpfc_ncmd->dma_phys_sgl,
18281 pdma_phys_sgl1, cur_xritag);
18283 /* Post error. Buffer unavailable. */
18284 lpfc_ncmd->flags |=
18285 LPFC_SBUF_NOT_POSTED;
18287 /* Post success. Bffer available. */
18288 lpfc_ncmd->flags &=
18289 ~LPFC_SBUF_NOT_POSTED;
18290 lpfc_ncmd->status = IOSTAT_SUCCESS;
18293 /* success, put on NVME buffer sgl list */
18294 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
18298 /* continue until a nembed page worth of sgls */
18302 /* post block of NVME buffer list sgls */
18303 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
18306 /* don't reset xirtag due to hole in xri block */
18307 if (block_cnt == 0)
18308 last_xritag = NO_XRI;
18310 /* reset NVME buffer post count for next round of posting */
18313 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
18314 while (!list_empty(&blck_nblist)) {
18315 list_remove_head(&blck_nblist, lpfc_ncmd,
18316 struct lpfc_io_buf, list);
18318 /* Post error. Mark buffer unavailable. */
18319 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
18321 /* Post success, Mark buffer available. */
18322 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
18323 lpfc_ncmd->status = IOSTAT_SUCCESS;
18326 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
18329 /* Push NVME buffers with sgl posted to the available list */
18330 lpfc_io_buf_replenish(phba, &nvme_nblist);
18336 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
18337 * @phba: pointer to lpfc_hba struct that the frame was received on
18338 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18340 * This function checks the fields in the @fc_hdr to see if the FC frame is a
18341 * valid type of frame that the LPFC driver will handle. This function will
18342 * return a zero if the frame is a valid frame or a non zero value when the
18343 * frame does not pass the check.
18346 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
18348 /* make rctl_names static to save stack space */
18349 struct fc_vft_header *fc_vft_hdr;
18350 uint32_t *header = (uint32_t *) fc_hdr;
18352 #define FC_RCTL_MDS_DIAGS 0xF4
18354 switch (fc_hdr->fh_r_ctl) {
18355 case FC_RCTL_DD_UNCAT: /* uncategorized information */
18356 case FC_RCTL_DD_SOL_DATA: /* solicited data */
18357 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
18358 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
18359 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
18360 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
18361 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
18362 case FC_RCTL_DD_CMD_STATUS: /* command status */
18363 case FC_RCTL_ELS_REQ: /* extended link services request */
18364 case FC_RCTL_ELS_REP: /* extended link services reply */
18365 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
18366 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
18367 case FC_RCTL_BA_ABTS: /* basic link service abort */
18368 case FC_RCTL_BA_RMC: /* remove connection */
18369 case FC_RCTL_BA_ACC: /* basic accept */
18370 case FC_RCTL_BA_RJT: /* basic reject */
18371 case FC_RCTL_BA_PRMT:
18372 case FC_RCTL_ACK_1: /* acknowledge_1 */
18373 case FC_RCTL_ACK_0: /* acknowledge_0 */
18374 case FC_RCTL_P_RJT: /* port reject */
18375 case FC_RCTL_F_RJT: /* fabric reject */
18376 case FC_RCTL_P_BSY: /* port busy */
18377 case FC_RCTL_F_BSY: /* fabric busy to data frame */
18378 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
18379 case FC_RCTL_LCR: /* link credit reset */
18380 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
18381 case FC_RCTL_END: /* end */
18383 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
18384 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
18385 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
18386 return lpfc_fc_frame_check(phba, fc_hdr);
18387 case FC_RCTL_BA_NOP: /* basic link service NOP */
18392 switch (fc_hdr->fh_type) {
18405 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
18406 "2538 Received frame rctl:x%x, type:x%x, "
18407 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
18408 fc_hdr->fh_r_ctl, fc_hdr->fh_type,
18409 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
18410 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
18411 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
18412 be32_to_cpu(header[6]));
18415 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
18416 "2539 Dropped frame rctl:x%x type:x%x\n",
18417 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18422 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
18423 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18425 * This function processes the FC header to retrieve the VFI from the VF
18426 * header, if one exists. This function will return the VFI if one exists
18427 * or 0 if no VSAN Header exists.
18430 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
18432 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
18434 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
18436 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
18440 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
18441 * @phba: Pointer to the HBA structure to search for the vport on
18442 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18443 * @fcfi: The FC Fabric ID that the frame came from
18444 * @did: Destination ID to match against
18446 * This function searches the @phba for a vport that matches the content of the
18447 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
18448 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
18449 * returns the matching vport pointer or NULL if unable to match frame to a
18452 static struct lpfc_vport *
18453 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
18454 uint16_t fcfi, uint32_t did)
18456 struct lpfc_vport **vports;
18457 struct lpfc_vport *vport = NULL;
18460 if (did == Fabric_DID)
18461 return phba->pport;
18462 if ((phba->pport->fc_flag & FC_PT2PT) &&
18463 !(phba->link_state == LPFC_HBA_READY))
18464 return phba->pport;
18466 vports = lpfc_create_vport_work_array(phba);
18467 if (vports != NULL) {
18468 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
18469 if (phba->fcf.fcfi == fcfi &&
18470 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
18471 vports[i]->fc_myDID == did) {
18477 lpfc_destroy_vport_work_array(phba, vports);
18482 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
18483 * @vport: The vport to work on.
18485 * This function updates the receive sequence time stamp for this vport. The
18486 * receive sequence time stamp indicates the time that the last frame of the
18487 * the sequence that has been idle for the longest amount of time was received.
18488 * the driver uses this time stamp to indicate if any received sequences have
18492 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
18494 struct lpfc_dmabuf *h_buf;
18495 struct hbq_dmabuf *dmabuf = NULL;
18497 /* get the oldest sequence on the rcv list */
18498 h_buf = list_get_first(&vport->rcv_buffer_list,
18499 struct lpfc_dmabuf, list);
18502 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18503 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
18507 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
18508 * @vport: The vport that the received sequences were sent to.
18510 * This function cleans up all outstanding received sequences. This is called
18511 * by the driver when a link event or user action invalidates all the received
18515 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
18517 struct lpfc_dmabuf *h_buf, *hnext;
18518 struct lpfc_dmabuf *d_buf, *dnext;
18519 struct hbq_dmabuf *dmabuf = NULL;
18521 /* start with the oldest sequence on the rcv list */
18522 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
18523 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18524 list_del_init(&dmabuf->hbuf.list);
18525 list_for_each_entry_safe(d_buf, dnext,
18526 &dmabuf->dbuf.list, list) {
18527 list_del_init(&d_buf->list);
18528 lpfc_in_buf_free(vport->phba, d_buf);
18530 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
18535 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
18536 * @vport: The vport that the received sequences were sent to.
18538 * This function determines whether any received sequences have timed out by
18539 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
18540 * indicates that there is at least one timed out sequence this routine will
18541 * go through the received sequences one at a time from most inactive to most
18542 * active to determine which ones need to be cleaned up. Once it has determined
18543 * that a sequence needs to be cleaned up it will simply free up the resources
18544 * without sending an abort.
18547 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
18549 struct lpfc_dmabuf *h_buf, *hnext;
18550 struct lpfc_dmabuf *d_buf, *dnext;
18551 struct hbq_dmabuf *dmabuf = NULL;
18552 unsigned long timeout;
18553 int abort_count = 0;
18555 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
18556 vport->rcv_buffer_time_stamp);
18557 if (list_empty(&vport->rcv_buffer_list) ||
18558 time_before(jiffies, timeout))
18560 /* start with the oldest sequence on the rcv list */
18561 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
18562 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18563 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
18564 dmabuf->time_stamp);
18565 if (time_before(jiffies, timeout))
18568 list_del_init(&dmabuf->hbuf.list);
18569 list_for_each_entry_safe(d_buf, dnext,
18570 &dmabuf->dbuf.list, list) {
18571 list_del_init(&d_buf->list);
18572 lpfc_in_buf_free(vport->phba, d_buf);
18574 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
18577 lpfc_update_rcv_time_stamp(vport);
18581 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
18582 * @vport: pointer to a vitural port
18583 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
18585 * This function searches through the existing incomplete sequences that have
18586 * been sent to this @vport. If the frame matches one of the incomplete
18587 * sequences then the dbuf in the @dmabuf is added to the list of frames that
18588 * make up that sequence. If no sequence is found that matches this frame then
18589 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
18590 * This function returns a pointer to the first dmabuf in the sequence list that
18591 * the frame was linked to.
18593 static struct hbq_dmabuf *
18594 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
18596 struct fc_frame_header *new_hdr;
18597 struct fc_frame_header *temp_hdr;
18598 struct lpfc_dmabuf *d_buf;
18599 struct lpfc_dmabuf *h_buf;
18600 struct hbq_dmabuf *seq_dmabuf = NULL;
18601 struct hbq_dmabuf *temp_dmabuf = NULL;
18604 INIT_LIST_HEAD(&dmabuf->dbuf.list);
18605 dmabuf->time_stamp = jiffies;
18606 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18608 /* Use the hdr_buf to find the sequence that this frame belongs to */
18609 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
18610 temp_hdr = (struct fc_frame_header *)h_buf->virt;
18611 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
18612 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
18613 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
18615 /* found a pending sequence that matches this frame */
18616 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18621 * This indicates first frame received for this sequence.
18622 * Queue the buffer on the vport's rcv_buffer_list.
18624 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
18625 lpfc_update_rcv_time_stamp(vport);
18628 temp_hdr = seq_dmabuf->hbuf.virt;
18629 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
18630 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
18631 list_del_init(&seq_dmabuf->hbuf.list);
18632 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
18633 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
18634 lpfc_update_rcv_time_stamp(vport);
18637 /* move this sequence to the tail to indicate a young sequence */
18638 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
18639 seq_dmabuf->time_stamp = jiffies;
18640 lpfc_update_rcv_time_stamp(vport);
18641 if (list_empty(&seq_dmabuf->dbuf.list)) {
18642 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
18645 /* find the correct place in the sequence to insert this frame */
18646 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
18648 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18649 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
18651 * If the frame's sequence count is greater than the frame on
18652 * the list then insert the frame right after this frame
18654 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
18655 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
18656 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
18661 if (&d_buf->list == &seq_dmabuf->dbuf.list)
18663 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
18672 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
18673 * @vport: pointer to a vitural port
18674 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18676 * This function tries to abort from the partially assembed sequence, described
18677 * by the information from basic abbort @dmabuf. It checks to see whether such
18678 * partially assembled sequence held by the driver. If so, it shall free up all
18679 * the frames from the partially assembled sequence.
18682 * true -- if there is matching partially assembled sequence present and all
18683 * the frames freed with the sequence;
18684 * false -- if there is no matching partially assembled sequence present so
18685 * nothing got aborted in the lower layer driver
18688 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
18689 struct hbq_dmabuf *dmabuf)
18691 struct fc_frame_header *new_hdr;
18692 struct fc_frame_header *temp_hdr;
18693 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
18694 struct hbq_dmabuf *seq_dmabuf = NULL;
18696 /* Use the hdr_buf to find the sequence that matches this frame */
18697 INIT_LIST_HEAD(&dmabuf->dbuf.list);
18698 INIT_LIST_HEAD(&dmabuf->hbuf.list);
18699 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18700 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
18701 temp_hdr = (struct fc_frame_header *)h_buf->virt;
18702 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
18703 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
18704 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
18706 /* found a pending sequence that matches this frame */
18707 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18711 /* Free up all the frames from the partially assembled sequence */
18713 list_for_each_entry_safe(d_buf, n_buf,
18714 &seq_dmabuf->dbuf.list, list) {
18715 list_del_init(&d_buf->list);
18716 lpfc_in_buf_free(vport->phba, d_buf);
18724 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
18725 * @vport: pointer to a vitural port
18726 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18728 * This function tries to abort from the assembed sequence from upper level
18729 * protocol, described by the information from basic abbort @dmabuf. It
18730 * checks to see whether such pending context exists at upper level protocol.
18731 * If so, it shall clean up the pending context.
18734 * true -- if there is matching pending context of the sequence cleaned
18736 * false -- if there is no matching pending context of the sequence present
18740 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
18742 struct lpfc_hba *phba = vport->phba;
18745 /* Accepting abort at ulp with SLI4 only */
18746 if (phba->sli_rev < LPFC_SLI_REV4)
18749 /* Register all caring upper level protocols to attend abort */
18750 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
18758 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
18759 * @phba: Pointer to HBA context object.
18760 * @cmd_iocbq: pointer to the command iocbq structure.
18761 * @rsp_iocbq: pointer to the response iocbq structure.
18763 * This function handles the sequence abort response iocb command complete
18764 * event. It properly releases the memory allocated to the sequence abort
18768 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
18769 struct lpfc_iocbq *cmd_iocbq,
18770 struct lpfc_iocbq *rsp_iocbq)
18772 struct lpfc_nodelist *ndlp;
18775 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
18776 lpfc_nlp_put(ndlp);
18777 lpfc_sli_release_iocbq(phba, cmd_iocbq);
18780 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
18781 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
18782 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18783 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
18784 rsp_iocbq->iocb.ulpStatus,
18785 rsp_iocbq->iocb.un.ulpWord[4]);
18789 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
18790 * @phba: Pointer to HBA context object.
18791 * @xri: xri id in transaction.
18793 * This function validates the xri maps to the known range of XRIs allocated an
18794 * used by the driver.
18797 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
18802 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
18803 if (xri == phba->sli4_hba.xri_ids[i])
18810 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
18811 * @vport: pointer to a virtual port.
18812 * @fc_hdr: pointer to a FC frame header.
18813 * @aborted: was the partially assembled receive sequence successfully aborted
18815 * This function sends a basic response to a previous unsol sequence abort
18816 * event after aborting the sequence handling.
18819 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
18820 struct fc_frame_header *fc_hdr, bool aborted)
18822 struct lpfc_hba *phba = vport->phba;
18823 struct lpfc_iocbq *ctiocb = NULL;
18824 struct lpfc_nodelist *ndlp;
18825 uint16_t oxid, rxid, xri, lxri;
18826 uint32_t sid, fctl;
18830 if (!lpfc_is_link_up(phba))
18833 sid = sli4_sid_from_fc_hdr(fc_hdr);
18834 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
18835 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
18837 ndlp = lpfc_findnode_did(vport, sid);
18839 ndlp = lpfc_nlp_init(vport, sid);
18841 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
18842 "1268 Failed to allocate ndlp for "
18843 "oxid:x%x SID:x%x\n", oxid, sid);
18846 /* Put ndlp onto pport node list */
18847 lpfc_enqueue_node(vport, ndlp);
18850 /* Allocate buffer for rsp iocb */
18851 ctiocb = lpfc_sli_get_iocbq(phba);
18855 /* Extract the F_CTL field from FC_HDR */
18856 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
18858 icmd = &ctiocb->iocb;
18859 icmd->un.xseq64.bdl.bdeSize = 0;
18860 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
18861 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
18862 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
18863 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
18865 /* Fill in the rest of iocb fields */
18866 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
18867 icmd->ulpBdeCount = 0;
18869 icmd->ulpClass = CLASS3;
18870 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
18871 ctiocb->context1 = lpfc_nlp_get(ndlp);
18872 if (!ctiocb->context1) {
18873 lpfc_sli_release_iocbq(phba, ctiocb);
18877 ctiocb->vport = phba->pport;
18878 ctiocb->cmd_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
18879 ctiocb->sli4_lxritag = NO_XRI;
18880 ctiocb->sli4_xritag = NO_XRI;
18882 if (fctl & FC_FC_EX_CTX) {
18883 /* Exchange responder sent the abort so we
18886 ctiocb->abort_bls = LPFC_ABTS_UNSOL_RSP;
18889 ctiocb->abort_bls = LPFC_ABTS_UNSOL_INT;
18892 lxri = lpfc_sli4_xri_inrange(phba, xri);
18893 if (lxri != NO_XRI)
18894 lpfc_set_rrq_active(phba, ndlp, lxri,
18895 (xri == oxid) ? rxid : oxid, 0);
18896 /* For BA_ABTS from exchange responder, if the logical xri with
18897 * the oxid maps to the FCP XRI range, the port no longer has
18898 * that exchange context, send a BLS_RJT. Override the IOCB for
18901 if ((fctl & FC_FC_EX_CTX) &&
18902 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
18903 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
18904 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
18905 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
18906 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
18909 /* If BA_ABTS failed to abort a partially assembled receive sequence,
18910 * the driver no longer has that exchange, send a BLS_RJT. Override
18911 * the IOCB for a BA_RJT.
18913 if (aborted == false) {
18914 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
18915 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
18916 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
18917 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
18920 if (fctl & FC_FC_EX_CTX) {
18921 /* ABTS sent by responder to CT exchange, construction
18922 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
18923 * field and RX_ID from ABTS for RX_ID field.
18925 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
18927 /* ABTS sent by initiator to CT exchange, construction
18928 * of BA_ACC will need to allocate a new XRI as for the
18931 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
18933 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
18934 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
18936 /* Xmit CT abts response on exchange <xid> */
18937 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
18938 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
18939 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
18941 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
18942 if (rc == IOCB_ERROR) {
18943 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
18944 "2925 Failed to issue CT ABTS RSP x%x on "
18945 "xri x%x, Data x%x\n",
18946 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
18948 lpfc_nlp_put(ndlp);
18949 ctiocb->context1 = NULL;
18950 lpfc_sli_release_iocbq(phba, ctiocb);
18955 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
18956 * @vport: Pointer to the vport on which this sequence was received
18957 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18959 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
18960 * receive sequence is only partially assembed by the driver, it shall abort
18961 * the partially assembled frames for the sequence. Otherwise, if the
18962 * unsolicited receive sequence has been completely assembled and passed to
18963 * the Upper Layer Protocol (ULP), it then mark the per oxid status for the
18964 * unsolicited sequence has been aborted. After that, it will issue a basic
18965 * accept to accept the abort.
18968 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
18969 struct hbq_dmabuf *dmabuf)
18971 struct lpfc_hba *phba = vport->phba;
18972 struct fc_frame_header fc_hdr;
18976 /* Make a copy of fc_hdr before the dmabuf being released */
18977 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
18978 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
18980 if (fctl & FC_FC_EX_CTX) {
18981 /* ABTS by responder to exchange, no cleanup needed */
18984 /* ABTS by initiator to exchange, need to do cleanup */
18985 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
18986 if (aborted == false)
18987 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
18989 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18991 if (phba->nvmet_support) {
18992 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
18996 /* Respond with BA_ACC or BA_RJT accordingly */
18997 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
19001 * lpfc_seq_complete - Indicates if a sequence is complete
19002 * @dmabuf: pointer to a dmabuf that describes the FC sequence
19004 * This function checks the sequence, starting with the frame described by
19005 * @dmabuf, to see if all the frames associated with this sequence are present.
19006 * the frames associated with this sequence are linked to the @dmabuf using the
19007 * dbuf list. This function looks for two major things. 1) That the first frame
19008 * has a sequence count of zero. 2) There is a frame with last frame of sequence
19009 * set. 3) That there are no holes in the sequence count. The function will
19010 * return 1 when the sequence is complete, otherwise it will return 0.
19013 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
19015 struct fc_frame_header *hdr;
19016 struct lpfc_dmabuf *d_buf;
19017 struct hbq_dmabuf *seq_dmabuf;
19021 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
19022 /* make sure first fame of sequence has a sequence count of zero */
19023 if (hdr->fh_seq_cnt != seq_count)
19025 fctl = (hdr->fh_f_ctl[0] << 16 |
19026 hdr->fh_f_ctl[1] << 8 |
19028 /* If last frame of sequence we can return success. */
19029 if (fctl & FC_FC_END_SEQ)
19031 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
19032 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
19033 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
19034 /* If there is a hole in the sequence count then fail. */
19035 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
19037 fctl = (hdr->fh_f_ctl[0] << 16 |
19038 hdr->fh_f_ctl[1] << 8 |
19040 /* If last frame of sequence we can return success. */
19041 if (fctl & FC_FC_END_SEQ)
19048 * lpfc_prep_seq - Prep sequence for ULP processing
19049 * @vport: Pointer to the vport on which this sequence was received
19050 * @seq_dmabuf: pointer to a dmabuf that describes the FC sequence
19052 * This function takes a sequence, described by a list of frames, and creates
19053 * a list of iocbq structures to describe the sequence. This iocbq list will be
19054 * used to issue to the generic unsolicited sequence handler. This routine
19055 * returns a pointer to the first iocbq in the list. If the function is unable
19056 * to allocate an iocbq then it throw out the received frames that were not
19057 * able to be described and return a pointer to the first iocbq. If unable to
19058 * allocate any iocbqs (including the first) this function will return NULL.
19060 static struct lpfc_iocbq *
19061 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
19063 struct hbq_dmabuf *hbq_buf;
19064 struct lpfc_dmabuf *d_buf, *n_buf;
19065 struct lpfc_iocbq *first_iocbq, *iocbq;
19066 struct fc_frame_header *fc_hdr;
19068 uint32_t len, tot_len;
19069 struct ulp_bde64 *pbde;
19071 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
19072 /* remove from receive buffer list */
19073 list_del_init(&seq_dmabuf->hbuf.list);
19074 lpfc_update_rcv_time_stamp(vport);
19075 /* get the Remote Port's SID */
19076 sid = sli4_sid_from_fc_hdr(fc_hdr);
19078 /* Get an iocbq struct to fill in. */
19079 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
19081 /* Initialize the first IOCB. */
19082 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
19083 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
19084 first_iocbq->vport = vport;
19086 /* Check FC Header to see what TYPE of frame we are rcv'ing */
19087 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
19088 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
19089 first_iocbq->iocb.un.rcvels.parmRo =
19090 sli4_did_from_fc_hdr(fc_hdr);
19091 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
19093 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
19094 first_iocbq->iocb.ulpContext = NO_XRI;
19095 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
19096 be16_to_cpu(fc_hdr->fh_ox_id);
19097 /* iocbq is prepped for internal consumption. Physical vpi. */
19098 first_iocbq->iocb.unsli3.rcvsli3.vpi =
19099 vport->phba->vpi_ids[vport->vpi];
19100 /* put the first buffer into the first IOCBq */
19101 tot_len = bf_get(lpfc_rcqe_length,
19102 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
19104 first_iocbq->context2 = &seq_dmabuf->dbuf;
19105 first_iocbq->context3 = NULL;
19106 first_iocbq->iocb.ulpBdeCount = 1;
19107 if (tot_len > LPFC_DATA_BUF_SIZE)
19108 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
19109 LPFC_DATA_BUF_SIZE;
19111 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
19113 first_iocbq->iocb.un.rcvels.remoteID = sid;
19115 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
19117 iocbq = first_iocbq;
19119 * Each IOCBq can have two Buffers assigned, so go through the list
19120 * of buffers for this sequence and save two buffers in each IOCBq
19122 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
19124 lpfc_in_buf_free(vport->phba, d_buf);
19127 if (!iocbq->context3) {
19128 iocbq->context3 = d_buf;
19129 iocbq->iocb.ulpBdeCount++;
19130 /* We need to get the size out of the right CQE */
19131 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
19132 len = bf_get(lpfc_rcqe_length,
19133 &hbq_buf->cq_event.cqe.rcqe_cmpl);
19134 pbde = (struct ulp_bde64 *)
19135 &iocbq->iocb.unsli3.sli3Words[4];
19136 if (len > LPFC_DATA_BUF_SIZE)
19137 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
19139 pbde->tus.f.bdeSize = len;
19141 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
19144 iocbq = lpfc_sli_get_iocbq(vport->phba);
19147 first_iocbq->iocb.ulpStatus =
19148 IOSTAT_FCP_RSP_ERROR;
19149 first_iocbq->iocb.un.ulpWord[4] =
19150 IOERR_NO_RESOURCES;
19152 lpfc_in_buf_free(vport->phba, d_buf);
19155 /* We need to get the size out of the right CQE */
19156 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
19157 len = bf_get(lpfc_rcqe_length,
19158 &hbq_buf->cq_event.cqe.rcqe_cmpl);
19159 iocbq->context2 = d_buf;
19160 iocbq->context3 = NULL;
19161 iocbq->iocb.ulpBdeCount = 1;
19162 if (len > LPFC_DATA_BUF_SIZE)
19163 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
19164 LPFC_DATA_BUF_SIZE;
19166 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
19169 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
19171 iocbq->iocb.un.rcvels.remoteID = sid;
19172 list_add_tail(&iocbq->list, &first_iocbq->list);
19175 /* Free the sequence's header buffer */
19177 lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
19179 return first_iocbq;
19183 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
19184 struct hbq_dmabuf *seq_dmabuf)
19186 struct fc_frame_header *fc_hdr;
19187 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
19188 struct lpfc_hba *phba = vport->phba;
19190 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
19191 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
19193 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19194 "2707 Ring %d handler: Failed to allocate "
19195 "iocb Rctl x%x Type x%x received\n",
19197 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
19200 if (!lpfc_complete_unsol_iocb(phba,
19201 phba->sli4_hba.els_wq->pring,
19202 iocbq, fc_hdr->fh_r_ctl,
19203 fc_hdr->fh_type)) {
19204 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19205 "2540 Ring %d handler: unexpected Rctl "
19206 "x%x Type x%x received\n",
19208 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
19209 lpfc_in_buf_free(phba, &seq_dmabuf->dbuf);
19212 /* Free iocb created in lpfc_prep_seq */
19213 list_for_each_entry_safe(curr_iocb, next_iocb,
19214 &iocbq->list, list) {
19215 list_del_init(&curr_iocb->list);
19216 lpfc_sli_release_iocbq(phba, curr_iocb);
19218 lpfc_sli_release_iocbq(phba, iocbq);
19222 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
19223 struct lpfc_iocbq *rspiocb)
19225 struct lpfc_dmabuf *pcmd = cmdiocb->context2;
19227 if (pcmd && pcmd->virt)
19228 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
19230 lpfc_sli_release_iocbq(phba, cmdiocb);
19231 lpfc_drain_txq(phba);
19235 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
19236 struct hbq_dmabuf *dmabuf)
19238 struct fc_frame_header *fc_hdr;
19239 struct lpfc_hba *phba = vport->phba;
19240 struct lpfc_iocbq *iocbq = NULL;
19241 union lpfc_wqe *wqe;
19242 struct lpfc_dmabuf *pcmd = NULL;
19243 uint32_t frame_len;
19245 unsigned long iflags;
19247 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
19248 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
19250 /* Send the received frame back */
19251 iocbq = lpfc_sli_get_iocbq(phba);
19253 /* Queue cq event and wakeup worker thread to process it */
19254 spin_lock_irqsave(&phba->hbalock, iflags);
19255 list_add_tail(&dmabuf->cq_event.list,
19256 &phba->sli4_hba.sp_queue_event);
19257 phba->hba_flag |= HBA_SP_QUEUE_EVT;
19258 spin_unlock_irqrestore(&phba->hbalock, iflags);
19259 lpfc_worker_wake_up(phba);
19263 /* Allocate buffer for command payload */
19264 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
19266 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
19268 if (!pcmd || !pcmd->virt)
19271 INIT_LIST_HEAD(&pcmd->list);
19273 /* copyin the payload */
19274 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
19276 /* fill in BDE's for command */
19277 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
19278 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
19279 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
19280 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
19282 iocbq->context2 = pcmd;
19283 iocbq->vport = vport;
19284 iocbq->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK;
19285 iocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
19288 * Setup rest of the iocb as though it were a WQE
19289 * Build the SEND_FRAME WQE
19291 wqe = (union lpfc_wqe *)&iocbq->iocb;
19293 wqe->send_frame.frame_len = frame_len;
19294 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
19295 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
19296 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
19297 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
19298 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
19299 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
19301 iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
19302 iocbq->iocb.ulpLe = 1;
19303 iocbq->cmd_cmpl = lpfc_sli4_mds_loopback_cmpl;
19304 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
19305 if (rc == IOCB_ERROR)
19308 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19312 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
19313 "2023 Unable to process MDS loopback frame\n");
19314 if (pcmd && pcmd->virt)
19315 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
19318 lpfc_sli_release_iocbq(phba, iocbq);
19319 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19323 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
19324 * @phba: Pointer to HBA context object.
19325 * @dmabuf: Pointer to a dmabuf that describes the FC sequence.
19327 * This function is called with no lock held. This function processes all
19328 * the received buffers and gives it to upper layers when a received buffer
19329 * indicates that it is the final frame in the sequence. The interrupt
19330 * service routine processes received buffers at interrupt contexts.
19331 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
19332 * appropriate receive function when the final frame in a sequence is received.
19335 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
19336 struct hbq_dmabuf *dmabuf)
19338 struct hbq_dmabuf *seq_dmabuf;
19339 struct fc_frame_header *fc_hdr;
19340 struct lpfc_vport *vport;
19344 /* Process each received buffer */
19345 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
19347 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
19348 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
19349 vport = phba->pport;
19350 /* Handle MDS Loopback frames */
19351 if (!(phba->pport->load_flag & FC_UNLOADING))
19352 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
19354 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19358 /* check to see if this a valid type of frame */
19359 if (lpfc_fc_frame_check(phba, fc_hdr)) {
19360 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19364 if ((bf_get(lpfc_cqe_code,
19365 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
19366 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
19367 &dmabuf->cq_event.cqe.rcqe_cmpl);
19369 fcfi = bf_get(lpfc_rcqe_fcf_id,
19370 &dmabuf->cq_event.cqe.rcqe_cmpl);
19372 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
19373 vport = phba->pport;
19374 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
19375 "2023 MDS Loopback %d bytes\n",
19376 bf_get(lpfc_rcqe_length,
19377 &dmabuf->cq_event.cqe.rcqe_cmpl));
19378 /* Handle MDS Loopback frames */
19379 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
19383 /* d_id this frame is directed to */
19384 did = sli4_did_from_fc_hdr(fc_hdr);
19386 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
19388 /* throw out the frame */
19389 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19393 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
19394 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
19395 (did != Fabric_DID)) {
19397 * Throw out the frame if we are not pt2pt.
19398 * The pt2pt protocol allows for discovery frames
19399 * to be received without a registered VPI.
19401 if (!(vport->fc_flag & FC_PT2PT) ||
19402 (phba->link_state == LPFC_HBA_READY)) {
19403 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19408 /* Handle the basic abort sequence (BA_ABTS) event */
19409 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
19410 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
19414 /* Link this frame */
19415 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
19417 /* unable to add frame to vport - throw it out */
19418 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19421 /* If not last frame in sequence continue processing frames. */
19422 if (!lpfc_seq_complete(seq_dmabuf))
19425 /* Send the complete sequence to the upper layer protocol */
19426 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
19430 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
19431 * @phba: pointer to lpfc hba data structure.
19433 * This routine is invoked to post rpi header templates to the
19434 * HBA consistent with the SLI-4 interface spec. This routine
19435 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
19436 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
19438 * This routine does not require any locks. It's usage is expected
19439 * to be driver load or reset recovery when the driver is
19444 * -EIO - The mailbox failed to complete successfully.
19445 * When this error occurs, the driver is not guaranteed
19446 * to have any rpi regions posted to the device and
19447 * must either attempt to repost the regions or take a
19451 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
19453 struct lpfc_rpi_hdr *rpi_page;
19457 /* SLI4 ports that support extents do not require RPI headers. */
19458 if (!phba->sli4_hba.rpi_hdrs_in_use)
19460 if (phba->sli4_hba.extents_in_use)
19463 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
19465 * Assign the rpi headers a physical rpi only if the driver
19466 * has not initialized those resources. A port reset only
19467 * needs the headers posted.
19469 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
19471 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
19473 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
19474 if (rc != MBX_SUCCESS) {
19475 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19476 "2008 Error %d posting all rpi "
19484 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
19485 LPFC_RPI_RSRC_RDY);
19490 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
19491 * @phba: pointer to lpfc hba data structure.
19492 * @rpi_page: pointer to the rpi memory region.
19494 * This routine is invoked to post a single rpi header to the
19495 * HBA consistent with the SLI-4 interface spec. This memory region
19496 * maps up to 64 rpi context regions.
19500 * -ENOMEM - No available memory
19501 * -EIO - The mailbox failed to complete successfully.
19504 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
19506 LPFC_MBOXQ_t *mboxq;
19507 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
19509 uint32_t shdr_status, shdr_add_status;
19510 union lpfc_sli4_cfg_shdr *shdr;
19512 /* SLI4 ports that support extents do not require RPI headers. */
19513 if (!phba->sli4_hba.rpi_hdrs_in_use)
19515 if (phba->sli4_hba.extents_in_use)
19518 /* The port is notified of the header region via a mailbox command. */
19519 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19521 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19522 "2001 Unable to allocate memory for issuing "
19523 "SLI_CONFIG_SPECIAL mailbox command\n");
19527 /* Post all rpi memory regions to the port. */
19528 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
19529 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
19530 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
19531 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
19532 sizeof(struct lpfc_sli4_cfg_mhdr),
19533 LPFC_SLI4_MBX_EMBED);
19536 /* Post the physical rpi to the port for this rpi header. */
19537 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
19538 rpi_page->start_rpi);
19539 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
19540 hdr_tmpl, rpi_page->page_count);
19542 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
19543 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
19544 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19545 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
19546 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
19547 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
19548 mempool_free(mboxq, phba->mbox_mem_pool);
19549 if (shdr_status || shdr_add_status || rc) {
19550 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19551 "2514 POST_RPI_HDR mailbox failed with "
19552 "status x%x add_status x%x, mbx status x%x\n",
19553 shdr_status, shdr_add_status, rc);
19557 * The next_rpi stores the next logical module-64 rpi value used
19558 * to post physical rpis in subsequent rpi postings.
19560 spin_lock_irq(&phba->hbalock);
19561 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
19562 spin_unlock_irq(&phba->hbalock);
19568 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
19569 * @phba: pointer to lpfc hba data structure.
19571 * This routine is invoked to post rpi header templates to the
19572 * HBA consistent with the SLI-4 interface spec. This routine
19573 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
19574 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
19577 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
19578 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
19581 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
19584 uint16_t max_rpi, rpi_limit;
19585 uint16_t rpi_remaining, lrpi = 0;
19586 struct lpfc_rpi_hdr *rpi_hdr;
19587 unsigned long iflag;
19590 * Fetch the next logical rpi. Because this index is logical,
19591 * the driver starts at 0 each time.
19593 spin_lock_irqsave(&phba->hbalock, iflag);
19594 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
19595 rpi_limit = phba->sli4_hba.next_rpi;
19597 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
19598 if (rpi >= rpi_limit)
19599 rpi = LPFC_RPI_ALLOC_ERROR;
19601 set_bit(rpi, phba->sli4_hba.rpi_bmask);
19602 phba->sli4_hba.max_cfg_param.rpi_used++;
19603 phba->sli4_hba.rpi_count++;
19605 lpfc_printf_log(phba, KERN_INFO,
19606 LOG_NODE | LOG_DISCOVERY,
19607 "0001 Allocated rpi:x%x max:x%x lim:x%x\n",
19608 (int) rpi, max_rpi, rpi_limit);
19611 * Don't try to allocate more rpi header regions if the device limit
19612 * has been exhausted.
19614 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
19615 (phba->sli4_hba.rpi_count >= max_rpi)) {
19616 spin_unlock_irqrestore(&phba->hbalock, iflag);
19621 * RPI header postings are not required for SLI4 ports capable of
19624 if (!phba->sli4_hba.rpi_hdrs_in_use) {
19625 spin_unlock_irqrestore(&phba->hbalock, iflag);
19630 * If the driver is running low on rpi resources, allocate another
19631 * page now. Note that the next_rpi value is used because
19632 * it represents how many are actually in use whereas max_rpi notes
19633 * how many are supported max by the device.
19635 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
19636 spin_unlock_irqrestore(&phba->hbalock, iflag);
19637 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
19638 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
19640 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19641 "2002 Error Could not grow rpi "
19644 lrpi = rpi_hdr->start_rpi;
19645 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
19646 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
19654 * __lpfc_sli4_free_rpi - Release an rpi for reuse.
19655 * @phba: pointer to lpfc hba data structure.
19656 * @rpi: rpi to free
19658 * This routine is invoked to release an rpi to the pool of
19659 * available rpis maintained by the driver.
19662 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
19665 * if the rpi value indicates a prior unreg has already
19666 * been done, skip the unreg.
19668 if (rpi == LPFC_RPI_ALLOC_ERROR)
19671 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
19672 phba->sli4_hba.rpi_count--;
19673 phba->sli4_hba.max_cfg_param.rpi_used--;
19675 lpfc_printf_log(phba, KERN_INFO,
19676 LOG_NODE | LOG_DISCOVERY,
19677 "2016 rpi %x not inuse\n",
19683 * lpfc_sli4_free_rpi - Release an rpi for reuse.
19684 * @phba: pointer to lpfc hba data structure.
19685 * @rpi: rpi to free
19687 * This routine is invoked to release an rpi to the pool of
19688 * available rpis maintained by the driver.
19691 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
19693 spin_lock_irq(&phba->hbalock);
19694 __lpfc_sli4_free_rpi(phba, rpi);
19695 spin_unlock_irq(&phba->hbalock);
19699 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
19700 * @phba: pointer to lpfc hba data structure.
19702 * This routine is invoked to remove the memory region that
19703 * provided rpi via a bitmask.
19706 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
19708 kfree(phba->sli4_hba.rpi_bmask);
19709 kfree(phba->sli4_hba.rpi_ids);
19710 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
19714 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
19715 * @ndlp: pointer to lpfc nodelist data structure.
19716 * @cmpl: completion call-back.
19717 * @arg: data to load as MBox 'caller buffer information'
19719 * This routine is invoked to remove the memory region that
19720 * provided rpi via a bitmask.
19723 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
19724 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
19726 LPFC_MBOXQ_t *mboxq;
19727 struct lpfc_hba *phba = ndlp->phba;
19730 /* The port is notified of the header region via a mailbox command. */
19731 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19735 /* If cmpl assigned, then this nlp_get pairs with
19736 * lpfc_mbx_cmpl_resume_rpi.
19738 * Else cmpl is NULL, then this nlp_get pairs with
19739 * lpfc_sli_def_mbox_cmpl.
19741 if (!lpfc_nlp_get(ndlp)) {
19742 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19743 "2122 %s: Failed to get nlp ref\n",
19745 mempool_free(mboxq, phba->mbox_mem_pool);
19749 /* Post all rpi memory regions to the port. */
19750 lpfc_resume_rpi(mboxq, ndlp);
19752 mboxq->mbox_cmpl = cmpl;
19753 mboxq->ctx_buf = arg;
19755 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19756 mboxq->ctx_ndlp = ndlp;
19757 mboxq->vport = ndlp->vport;
19758 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19759 if (rc == MBX_NOT_FINISHED) {
19760 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19761 "2010 Resume RPI Mailbox failed "
19762 "status %d, mbxStatus x%x\n", rc,
19763 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
19764 lpfc_nlp_put(ndlp);
19765 mempool_free(mboxq, phba->mbox_mem_pool);
19772 * lpfc_sli4_init_vpi - Initialize a vpi with the port
19773 * @vport: Pointer to the vport for which the vpi is being initialized
19775 * This routine is invoked to activate a vpi with the port.
19779 * -Evalue otherwise
19782 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
19784 LPFC_MBOXQ_t *mboxq;
19786 int retval = MBX_SUCCESS;
19788 struct lpfc_hba *phba = vport->phba;
19789 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19792 lpfc_init_vpi(phba, mboxq, vport->vpi);
19793 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
19794 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
19795 if (rc != MBX_SUCCESS) {
19796 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
19797 "2022 INIT VPI Mailbox failed "
19798 "status %d, mbxStatus x%x\n", rc,
19799 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
19802 if (rc != MBX_TIMEOUT)
19803 mempool_free(mboxq, vport->phba->mbox_mem_pool);
19809 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
19810 * @phba: pointer to lpfc hba data structure.
19811 * @mboxq: Pointer to mailbox object.
19813 * This routine is invoked to manually add a single FCF record. The caller
19814 * must pass a completely initialized FCF_Record. This routine takes
19815 * care of the nonembedded mailbox operations.
19818 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
19821 union lpfc_sli4_cfg_shdr *shdr;
19822 uint32_t shdr_status, shdr_add_status;
19824 virt_addr = mboxq->sge_array->addr[0];
19825 /* The IOCTL status is embedded in the mailbox subheader. */
19826 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
19827 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
19828 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
19830 if ((shdr_status || shdr_add_status) &&
19831 (shdr_status != STATUS_FCF_IN_USE))
19832 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19833 "2558 ADD_FCF_RECORD mailbox failed with "
19834 "status x%x add_status x%x\n",
19835 shdr_status, shdr_add_status);
19837 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19841 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
19842 * @phba: pointer to lpfc hba data structure.
19843 * @fcf_record: pointer to the initialized fcf record to add.
19845 * This routine is invoked to manually add a single FCF record. The caller
19846 * must pass a completely initialized FCF_Record. This routine takes
19847 * care of the nonembedded mailbox operations.
19850 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
19853 LPFC_MBOXQ_t *mboxq;
19856 struct lpfc_mbx_sge sge;
19857 uint32_t alloc_len, req_len;
19860 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19862 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19863 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
19867 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
19870 /* Allocate DMA memory and set up the non-embedded mailbox command */
19871 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
19872 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
19873 req_len, LPFC_SLI4_MBX_NEMBED);
19874 if (alloc_len < req_len) {
19875 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19876 "2523 Allocated DMA memory size (x%x) is "
19877 "less than the requested DMA memory "
19878 "size (x%x)\n", alloc_len, req_len);
19879 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19884 * Get the first SGE entry from the non-embedded DMA memory. This
19885 * routine only uses a single SGE.
19887 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
19888 virt_addr = mboxq->sge_array->addr[0];
19890 * Configure the FCF record for FCFI 0. This is the driver's
19891 * hardcoded default and gets used in nonFIP mode.
19893 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
19894 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
19895 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
19898 * Copy the fcf_index and the FCF Record Data. The data starts after
19899 * the FCoE header plus word10. The data copy needs to be endian
19902 bytep += sizeof(uint32_t);
19903 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
19904 mboxq->vport = phba->pport;
19905 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
19906 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19907 if (rc == MBX_NOT_FINISHED) {
19908 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19909 "2515 ADD_FCF_RECORD mailbox failed with "
19910 "status 0x%x\n", rc);
19911 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19920 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
19921 * @phba: pointer to lpfc hba data structure.
19922 * @fcf_record: pointer to the fcf record to write the default data.
19923 * @fcf_index: FCF table entry index.
19925 * This routine is invoked to build the driver's default FCF record. The
19926 * values used are hardcoded. This routine handles memory initialization.
19930 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
19931 struct fcf_record *fcf_record,
19932 uint16_t fcf_index)
19934 memset(fcf_record, 0, sizeof(struct fcf_record));
19935 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
19936 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
19937 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
19938 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
19939 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
19940 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
19941 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
19942 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
19943 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
19944 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
19945 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
19946 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
19947 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
19948 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
19949 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
19950 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
19951 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
19952 /* Set the VLAN bit map */
19953 if (phba->valid_vlan) {
19954 fcf_record->vlan_bitmap[phba->vlan_id / 8]
19955 = 1 << (phba->vlan_id % 8);
19960 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
19961 * @phba: pointer to lpfc hba data structure.
19962 * @fcf_index: FCF table entry offset.
19964 * This routine is invoked to scan the entire FCF table by reading FCF
19965 * record and processing it one at a time starting from the @fcf_index
19966 * for initial FCF discovery or fast FCF failover rediscovery.
19968 * Return 0 if the mailbox command is submitted successfully, none 0
19972 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19975 LPFC_MBOXQ_t *mboxq;
19977 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
19978 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
19979 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19981 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19982 "2000 Failed to allocate mbox for "
19985 goto fail_fcf_scan;
19987 /* Construct the read FCF record mailbox command */
19988 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19991 goto fail_fcf_scan;
19993 /* Issue the mailbox command asynchronously */
19994 mboxq->vport = phba->pport;
19995 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
19997 spin_lock_irq(&phba->hbalock);
19998 phba->hba_flag |= FCF_TS_INPROG;
19999 spin_unlock_irq(&phba->hbalock);
20001 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
20002 if (rc == MBX_NOT_FINISHED)
20005 /* Reset eligible FCF count for new scan */
20006 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
20007 phba->fcf.eligible_fcf_cnt = 0;
20013 lpfc_sli4_mbox_cmd_free(phba, mboxq);
20014 /* FCF scan failed, clear FCF_TS_INPROG flag */
20015 spin_lock_irq(&phba->hbalock);
20016 phba->hba_flag &= ~FCF_TS_INPROG;
20017 spin_unlock_irq(&phba->hbalock);
20023 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
20024 * @phba: pointer to lpfc hba data structure.
20025 * @fcf_index: FCF table entry offset.
20027 * This routine is invoked to read an FCF record indicated by @fcf_index
20028 * and to use it for FLOGI roundrobin FCF failover.
20030 * Return 0 if the mailbox command is submitted successfully, none 0
20034 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
20037 LPFC_MBOXQ_t *mboxq;
20039 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20041 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
20042 "2763 Failed to allocate mbox for "
20045 goto fail_fcf_read;
20047 /* Construct the read FCF record mailbox command */
20048 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
20051 goto fail_fcf_read;
20053 /* Issue the mailbox command asynchronously */
20054 mboxq->vport = phba->pport;
20055 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
20056 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
20057 if (rc == MBX_NOT_FINISHED)
20063 if (error && mboxq)
20064 lpfc_sli4_mbox_cmd_free(phba, mboxq);
20069 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
20070 * @phba: pointer to lpfc hba data structure.
20071 * @fcf_index: FCF table entry offset.
20073 * This routine is invoked to read an FCF record indicated by @fcf_index to
20074 * determine whether it's eligible for FLOGI roundrobin failover list.
20076 * Return 0 if the mailbox command is submitted successfully, none 0
20080 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
20083 LPFC_MBOXQ_t *mboxq;
20085 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20087 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
20088 "2758 Failed to allocate mbox for "
20091 goto fail_fcf_read;
20093 /* Construct the read FCF record mailbox command */
20094 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
20097 goto fail_fcf_read;
20099 /* Issue the mailbox command asynchronously */
20100 mboxq->vport = phba->pport;
20101 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
20102 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
20103 if (rc == MBX_NOT_FINISHED)
20109 if (error && mboxq)
20110 lpfc_sli4_mbox_cmd_free(phba, mboxq);
20115 * lpfc_check_next_fcf_pri_level
20116 * @phba: pointer to the lpfc_hba struct for this port.
20117 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
20118 * routine when the rr_bmask is empty. The FCF indecies are put into the
20119 * rr_bmask based on their priority level. Starting from the highest priority
20120 * to the lowest. The most likely FCF candidate will be in the highest
20121 * priority group. When this routine is called it searches the fcf_pri list for
20122 * next lowest priority group and repopulates the rr_bmask with only those
20125 * 1=success 0=failure
20128 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
20130 uint16_t next_fcf_pri;
20131 uint16_t last_index;
20132 struct lpfc_fcf_pri *fcf_pri;
20136 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
20137 LPFC_SLI4_FCF_TBL_INDX_MAX);
20138 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20139 "3060 Last IDX %d\n", last_index);
20141 /* Verify the priority list has 2 or more entries */
20142 spin_lock_irq(&phba->hbalock);
20143 if (list_empty(&phba->fcf.fcf_pri_list) ||
20144 list_is_singular(&phba->fcf.fcf_pri_list)) {
20145 spin_unlock_irq(&phba->hbalock);
20146 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20147 "3061 Last IDX %d\n", last_index);
20148 return 0; /* Empty rr list */
20150 spin_unlock_irq(&phba->hbalock);
20154 * Clear the rr_bmask and set all of the bits that are at this
20157 memset(phba->fcf.fcf_rr_bmask, 0,
20158 sizeof(*phba->fcf.fcf_rr_bmask));
20159 spin_lock_irq(&phba->hbalock);
20160 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
20161 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
20164 * the 1st priority that has not FLOGI failed
20165 * will be the highest.
20168 next_fcf_pri = fcf_pri->fcf_rec.priority;
20169 spin_unlock_irq(&phba->hbalock);
20170 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
20171 rc = lpfc_sli4_fcf_rr_index_set(phba,
20172 fcf_pri->fcf_rec.fcf_index);
20176 spin_lock_irq(&phba->hbalock);
20179 * if next_fcf_pri was not set above and the list is not empty then
20180 * we have failed flogis on all of them. So reset flogi failed
20181 * and start at the beginning.
20183 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
20184 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
20185 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
20187 * the 1st priority that has not FLOGI failed
20188 * will be the highest.
20191 next_fcf_pri = fcf_pri->fcf_rec.priority;
20192 spin_unlock_irq(&phba->hbalock);
20193 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
20194 rc = lpfc_sli4_fcf_rr_index_set(phba,
20195 fcf_pri->fcf_rec.fcf_index);
20199 spin_lock_irq(&phba->hbalock);
20203 spin_unlock_irq(&phba->hbalock);
20208 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
20209 * @phba: pointer to lpfc hba data structure.
20211 * This routine is to get the next eligible FCF record index in a round
20212 * robin fashion. If the next eligible FCF record index equals to the
20213 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
20214 * shall be returned, otherwise, the next eligible FCF record's index
20215 * shall be returned.
20218 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
20220 uint16_t next_fcf_index;
20223 /* Search start from next bit of currently registered FCF index */
20224 next_fcf_index = phba->fcf.current_rec.fcf_indx;
20227 /* Determine the next fcf index to check */
20228 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
20229 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
20230 LPFC_SLI4_FCF_TBL_INDX_MAX,
20233 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
20234 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20236 * If we have wrapped then we need to clear the bits that
20237 * have been tested so that we can detect when we should
20238 * change the priority level.
20240 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
20241 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
20245 /* Check roundrobin failover list empty condition */
20246 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
20247 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
20249 * If next fcf index is not found check if there are lower
20250 * Priority level fcf's in the fcf_priority list.
20251 * Set up the rr_bmask with all of the avaiable fcf bits
20252 * at that level and continue the selection process.
20254 if (lpfc_check_next_fcf_pri_level(phba))
20255 goto initial_priority;
20256 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
20257 "2844 No roundrobin failover FCF available\n");
20259 return LPFC_FCOE_FCF_NEXT_NONE;
20262 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
20263 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
20264 LPFC_FCF_FLOGI_FAILED) {
20265 if (list_is_singular(&phba->fcf.fcf_pri_list))
20266 return LPFC_FCOE_FCF_NEXT_NONE;
20268 goto next_priority;
20271 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20272 "2845 Get next roundrobin failover FCF (x%x)\n",
20275 return next_fcf_index;
20279 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
20280 * @phba: pointer to lpfc hba data structure.
20281 * @fcf_index: index into the FCF table to 'set'
20283 * This routine sets the FCF record index in to the eligible bmask for
20284 * roundrobin failover search. It checks to make sure that the index
20285 * does not go beyond the range of the driver allocated bmask dimension
20286 * before setting the bit.
20288 * Returns 0 if the index bit successfully set, otherwise, it returns
20292 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
20294 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20295 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20296 "2610 FCF (x%x) reached driver's book "
20297 "keeping dimension:x%x\n",
20298 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
20301 /* Set the eligible FCF record index bmask */
20302 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
20304 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20305 "2790 Set FCF (x%x) to roundrobin FCF failover "
20306 "bmask\n", fcf_index);
20312 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
20313 * @phba: pointer to lpfc hba data structure.
20314 * @fcf_index: index into the FCF table to 'clear'
20316 * This routine clears the FCF record index from the eligible bmask for
20317 * roundrobin failover search. It checks to make sure that the index
20318 * does not go beyond the range of the driver allocated bmask dimension
20319 * before clearing the bit.
20322 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
20324 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
20325 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20326 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20327 "2762 FCF (x%x) reached driver's book "
20328 "keeping dimension:x%x\n",
20329 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
20332 /* Clear the eligible FCF record index bmask */
20333 spin_lock_irq(&phba->hbalock);
20334 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
20336 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
20337 list_del_init(&fcf_pri->list);
20341 spin_unlock_irq(&phba->hbalock);
20342 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
20344 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20345 "2791 Clear FCF (x%x) from roundrobin failover "
20346 "bmask\n", fcf_index);
20350 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
20351 * @phba: pointer to lpfc hba data structure.
20352 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
20354 * This routine is the completion routine for the rediscover FCF table mailbox
20355 * command. If the mailbox command returned failure, it will try to stop the
20356 * FCF rediscover wait timer.
20359 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
20361 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
20362 uint32_t shdr_status, shdr_add_status;
20364 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
20366 shdr_status = bf_get(lpfc_mbox_hdr_status,
20367 &redisc_fcf->header.cfg_shdr.response);
20368 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
20369 &redisc_fcf->header.cfg_shdr.response);
20370 if (shdr_status || shdr_add_status) {
20371 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20372 "2746 Requesting for FCF rediscovery failed "
20373 "status x%x add_status x%x\n",
20374 shdr_status, shdr_add_status);
20375 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
20376 spin_lock_irq(&phba->hbalock);
20377 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
20378 spin_unlock_irq(&phba->hbalock);
20380 * CVL event triggered FCF rediscover request failed,
20381 * last resort to re-try current registered FCF entry.
20383 lpfc_retry_pport_discovery(phba);
20385 spin_lock_irq(&phba->hbalock);
20386 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
20387 spin_unlock_irq(&phba->hbalock);
20389 * DEAD FCF event triggered FCF rediscover request
20390 * failed, last resort to fail over as a link down
20391 * to FCF registration.
20393 lpfc_sli4_fcf_dead_failthrough(phba);
20396 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20397 "2775 Start FCF rediscover quiescent timer\n");
20399 * Start FCF rediscovery wait timer for pending FCF
20400 * before rescan FCF record table.
20402 lpfc_fcf_redisc_wait_start_timer(phba);
20405 mempool_free(mbox, phba->mbox_mem_pool);
20409 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
20410 * @phba: pointer to lpfc hba data structure.
20412 * This routine is invoked to request for rediscovery of the entire FCF table
20416 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
20418 LPFC_MBOXQ_t *mbox;
20419 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
20422 /* Cancel retry delay timers to all vports before FCF rediscover */
20423 lpfc_cancel_all_vport_retry_delay_timer(phba);
20425 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20427 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20428 "2745 Failed to allocate mbox for "
20429 "requesting FCF rediscover.\n");
20433 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
20434 sizeof(struct lpfc_sli4_cfg_mhdr));
20435 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
20436 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
20437 length, LPFC_SLI4_MBX_EMBED);
20439 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
20440 /* Set count to 0 for invalidating the entire FCF database */
20441 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
20443 /* Issue the mailbox command asynchronously */
20444 mbox->vport = phba->pport;
20445 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
20446 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
20448 if (rc == MBX_NOT_FINISHED) {
20449 mempool_free(mbox, phba->mbox_mem_pool);
20456 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
20457 * @phba: pointer to lpfc hba data structure.
20459 * This function is the failover routine as a last resort to the FCF DEAD
20460 * event when driver failed to perform fast FCF failover.
20463 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
20465 uint32_t link_state;
20468 * Last resort as FCF DEAD event failover will treat this as
20469 * a link down, but save the link state because we don't want
20470 * it to be changed to Link Down unless it is already down.
20472 link_state = phba->link_state;
20473 lpfc_linkdown(phba);
20474 phba->link_state = link_state;
20476 /* Unregister FCF if no devices connected to it */
20477 lpfc_unregister_unused_fcf(phba);
20481 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
20482 * @phba: pointer to lpfc hba data structure.
20483 * @rgn23_data: pointer to configure region 23 data.
20485 * This function gets SLI3 port configure region 23 data through memory dump
20486 * mailbox command. When it successfully retrieves data, the size of the data
20487 * will be returned, otherwise, 0 will be returned.
20490 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
20492 LPFC_MBOXQ_t *pmb = NULL;
20494 uint32_t offset = 0;
20500 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20502 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20503 "2600 failed to allocate mailbox memory\n");
20509 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
20510 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
20512 if (rc != MBX_SUCCESS) {
20513 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
20514 "2601 failed to read config "
20515 "region 23, rc 0x%x Status 0x%x\n",
20516 rc, mb->mbxStatus);
20517 mb->un.varDmp.word_cnt = 0;
20520 * dump mem may return a zero when finished or we got a
20521 * mailbox error, either way we are done.
20523 if (mb->un.varDmp.word_cnt == 0)
20526 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
20527 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
20529 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
20530 rgn23_data + offset,
20531 mb->un.varDmp.word_cnt);
20532 offset += mb->un.varDmp.word_cnt;
20533 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
20535 mempool_free(pmb, phba->mbox_mem_pool);
20540 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
20541 * @phba: pointer to lpfc hba data structure.
20542 * @rgn23_data: pointer to configure region 23 data.
20544 * This function gets SLI4 port configure region 23 data through memory dump
20545 * mailbox command. When it successfully retrieves data, the size of the data
20546 * will be returned, otherwise, 0 will be returned.
20549 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
20551 LPFC_MBOXQ_t *mboxq = NULL;
20552 struct lpfc_dmabuf *mp = NULL;
20553 struct lpfc_mqe *mqe;
20554 uint32_t data_length = 0;
20560 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20562 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20563 "3105 failed to allocate mailbox memory\n");
20567 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
20569 mqe = &mboxq->u.mqe;
20570 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
20571 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
20574 data_length = mqe->un.mb_words[5];
20575 if (data_length == 0)
20577 if (data_length > DMP_RGN23_SIZE) {
20581 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
20583 mempool_free(mboxq, phba->mbox_mem_pool);
20585 lpfc_mbuf_free(phba, mp->virt, mp->phys);
20588 return data_length;
20592 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
20593 * @phba: pointer to lpfc hba data structure.
20595 * This function read region 23 and parse TLV for port status to
20596 * decide if the user disaled the port. If the TLV indicates the
20597 * port is disabled, the hba_flag is set accordingly.
20600 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
20602 uint8_t *rgn23_data = NULL;
20603 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
20604 uint32_t offset = 0;
20606 /* Get adapter Region 23 data */
20607 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
20611 if (phba->sli_rev < LPFC_SLI_REV4)
20612 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
20614 if_type = bf_get(lpfc_sli_intf_if_type,
20615 &phba->sli4_hba.sli_intf);
20616 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
20618 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
20624 /* Check the region signature first */
20625 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
20626 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20627 "2619 Config region 23 has bad signature\n");
20632 /* Check the data structure version */
20633 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
20634 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20635 "2620 Config region 23 has bad version\n");
20640 /* Parse TLV entries in the region */
20641 while (offset < data_size) {
20642 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
20645 * If the TLV is not driver specific TLV or driver id is
20646 * not linux driver id, skip the record.
20648 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
20649 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
20650 (rgn23_data[offset + 3] != 0)) {
20651 offset += rgn23_data[offset + 1] * 4 + 4;
20655 /* Driver found a driver specific TLV in the config region */
20656 sub_tlv_len = rgn23_data[offset + 1] * 4;
20661 * Search for configured port state sub-TLV.
20663 while ((offset < data_size) &&
20664 (tlv_offset < sub_tlv_len)) {
20665 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
20670 if (rgn23_data[offset] != PORT_STE_TYPE) {
20671 offset += rgn23_data[offset + 1] * 4 + 4;
20672 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
20676 /* This HBA contains PORT_STE configured */
20677 if (!rgn23_data[offset + 2])
20678 phba->hba_flag |= LINK_DISABLED;
20690 * lpfc_log_fw_write_cmpl - logs firmware write completion status
20691 * @phba: pointer to lpfc hba data structure
20692 * @shdr_status: wr_object rsp's status field
20693 * @shdr_add_status: wr_object rsp's add_status field
20694 * @shdr_add_status_2: wr_object rsp's add_status_2 field
20695 * @shdr_change_status: wr_object rsp's change_status field
20696 * @shdr_csf: wr_object rsp's csf bit
20698 * This routine is intended to be called after a firmware write completes.
20699 * It will log next action items to be performed by the user to instantiate
20700 * the newly downloaded firmware or reason for incompatibility.
20703 lpfc_log_fw_write_cmpl(struct lpfc_hba *phba, u32 shdr_status,
20704 u32 shdr_add_status, u32 shdr_add_status_2,
20705 u32 shdr_change_status, u32 shdr_csf)
20707 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20708 "4198 %s: flash_id x%02x, asic_rev x%02x, "
20709 "status x%02x, add_status x%02x, add_status_2 x%02x, "
20710 "change_status x%02x, csf %01x\n", __func__,
20711 phba->sli4_hba.flash_id, phba->sli4_hba.asic_rev,
20712 shdr_status, shdr_add_status, shdr_add_status_2,
20713 shdr_change_status, shdr_csf);
20715 if (shdr_add_status == LPFC_ADD_STATUS_INCOMPAT_OBJ) {
20716 switch (shdr_add_status_2) {
20717 case LPFC_ADD_STATUS_2_INCOMPAT_FLASH:
20718 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20719 "4199 Firmware write failed: "
20720 "image incompatible with flash x%02x\n",
20721 phba->sli4_hba.flash_id);
20723 case LPFC_ADD_STATUS_2_INCORRECT_ASIC:
20724 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20725 "4200 Firmware write failed: "
20726 "image incompatible with ASIC "
20727 "architecture x%02x\n",
20728 phba->sli4_hba.asic_rev);
20731 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20732 "4210 Firmware write failed: "
20733 "add_status_2 x%02x\n",
20734 shdr_add_status_2);
20737 } else if (!shdr_status && !shdr_add_status) {
20738 if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
20739 shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
20741 shdr_change_status =
20742 LPFC_CHANGE_STATUS_PCI_RESET;
20745 switch (shdr_change_status) {
20746 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
20747 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20748 "3198 Firmware write complete: System "
20749 "reboot required to instantiate\n");
20751 case (LPFC_CHANGE_STATUS_FW_RESET):
20752 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20753 "3199 Firmware write complete: "
20754 "Firmware reset required to "
20757 case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
20758 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20759 "3200 Firmware write complete: Port "
20760 "Migration or PCI Reset required to "
20763 case (LPFC_CHANGE_STATUS_PCI_RESET):
20764 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20765 "3201 Firmware write complete: PCI "
20766 "Reset required to instantiate\n");
20775 * lpfc_wr_object - write an object to the firmware
20776 * @phba: HBA structure that indicates port to create a queue on.
20777 * @dmabuf_list: list of dmabufs to write to the port.
20778 * @size: the total byte value of the objects to write to the port.
20779 * @offset: the current offset to be used to start the transfer.
20781 * This routine will create a wr_object mailbox command to send to the port.
20782 * the mailbox command will be constructed using the dma buffers described in
20783 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
20784 * BDEs that the imbedded mailbox can support. The @offset variable will be
20785 * used to indicate the starting offset of the transfer and will also return
20786 * the offset after the write object mailbox has completed. @size is used to
20787 * determine the end of the object and whether the eof bit should be set.
20789 * Return 0 is successful and offset will contain the the new offset to use
20790 * for the next write.
20791 * Return negative value for error cases.
20794 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
20795 uint32_t size, uint32_t *offset)
20797 struct lpfc_mbx_wr_object *wr_object;
20798 LPFC_MBOXQ_t *mbox;
20800 uint32_t shdr_status, shdr_add_status, shdr_add_status_2;
20801 uint32_t shdr_change_status = 0, shdr_csf = 0;
20803 struct lpfc_dmabuf *dmabuf;
20804 uint32_t written = 0;
20805 bool check_change_status = false;
20807 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20811 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
20812 LPFC_MBOX_OPCODE_WRITE_OBJECT,
20813 sizeof(struct lpfc_mbx_wr_object) -
20814 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
20816 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
20817 wr_object->u.request.write_offset = *offset;
20818 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
20819 wr_object->u.request.object_name[0] =
20820 cpu_to_le32(wr_object->u.request.object_name[0]);
20821 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
20822 list_for_each_entry(dmabuf, dmabuf_list, list) {
20823 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
20825 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
20826 wr_object->u.request.bde[i].addrHigh =
20827 putPaddrHigh(dmabuf->phys);
20828 if (written + SLI4_PAGE_SIZE >= size) {
20829 wr_object->u.request.bde[i].tus.f.bdeSize =
20831 written += (size - written);
20832 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
20833 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
20834 check_change_status = true;
20836 wr_object->u.request.bde[i].tus.f.bdeSize =
20838 written += SLI4_PAGE_SIZE;
20842 wr_object->u.request.bde_count = i;
20843 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
20844 if (!phba->sli4_hba.intr_enable)
20845 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
20847 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
20848 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
20850 /* The IOCTL status is embedded in the mailbox subheader. */
20851 shdr_status = bf_get(lpfc_mbox_hdr_status,
20852 &wr_object->header.cfg_shdr.response);
20853 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
20854 &wr_object->header.cfg_shdr.response);
20855 shdr_add_status_2 = bf_get(lpfc_mbox_hdr_add_status_2,
20856 &wr_object->header.cfg_shdr.response);
20857 if (check_change_status) {
20858 shdr_change_status = bf_get(lpfc_wr_object_change_status,
20859 &wr_object->u.response);
20860 shdr_csf = bf_get(lpfc_wr_object_csf,
20861 &wr_object->u.response);
20864 if (!phba->sli4_hba.intr_enable)
20865 mempool_free(mbox, phba->mbox_mem_pool);
20866 else if (rc != MBX_TIMEOUT)
20867 mempool_free(mbox, phba->mbox_mem_pool);
20868 if (shdr_status || shdr_add_status || shdr_add_status_2 || rc) {
20869 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20870 "3025 Write Object mailbox failed with "
20871 "status x%x add_status x%x, add_status_2 x%x, "
20872 "mbx status x%x\n",
20873 shdr_status, shdr_add_status, shdr_add_status_2,
20876 *offset = shdr_add_status;
20878 *offset += wr_object->u.response.actual_write_length;
20881 if (rc || check_change_status)
20882 lpfc_log_fw_write_cmpl(phba, shdr_status, shdr_add_status,
20883 shdr_add_status_2, shdr_change_status,
20889 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
20890 * @vport: pointer to vport data structure.
20892 * This function iterate through the mailboxq and clean up all REG_LOGIN
20893 * and REG_VPI mailbox commands associated with the vport. This function
20894 * is called when driver want to restart discovery of the vport due to
20895 * a Clear Virtual Link event.
20898 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
20900 struct lpfc_hba *phba = vport->phba;
20901 LPFC_MBOXQ_t *mb, *nextmb;
20902 struct lpfc_dmabuf *mp;
20903 struct lpfc_nodelist *ndlp;
20904 struct lpfc_nodelist *act_mbx_ndlp = NULL;
20905 LIST_HEAD(mbox_cmd_list);
20906 uint8_t restart_loop;
20908 /* Clean up internally queued mailbox commands with the vport */
20909 spin_lock_irq(&phba->hbalock);
20910 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
20911 if (mb->vport != vport)
20914 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
20915 (mb->u.mb.mbxCommand != MBX_REG_VPI))
20918 list_move_tail(&mb->list, &mbox_cmd_list);
20920 /* Clean up active mailbox command with the vport */
20921 mb = phba->sli.mbox_active;
20922 if (mb && (mb->vport == vport)) {
20923 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
20924 (mb->u.mb.mbxCommand == MBX_REG_VPI))
20925 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
20926 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20927 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20928 /* Put reference count for delayed processing */
20929 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
20930 /* Unregister the RPI when mailbox complete */
20931 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
20934 /* Cleanup any mailbox completions which are not yet processed */
20937 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
20939 * If this mailox is already processed or it is
20940 * for another vport ignore it.
20942 if ((mb->vport != vport) ||
20943 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
20946 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
20947 (mb->u.mb.mbxCommand != MBX_REG_VPI))
20950 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
20951 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20952 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20953 /* Unregister the RPI when mailbox complete */
20954 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
20956 spin_unlock_irq(&phba->hbalock);
20957 spin_lock(&ndlp->lock);
20958 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20959 spin_unlock(&ndlp->lock);
20960 spin_lock_irq(&phba->hbalock);
20964 } while (restart_loop);
20966 spin_unlock_irq(&phba->hbalock);
20968 /* Release the cleaned-up mailbox commands */
20969 while (!list_empty(&mbox_cmd_list)) {
20970 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
20971 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20972 mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
20974 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
20977 mb->ctx_buf = NULL;
20978 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20979 mb->ctx_ndlp = NULL;
20981 spin_lock(&ndlp->lock);
20982 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20983 spin_unlock(&ndlp->lock);
20984 lpfc_nlp_put(ndlp);
20987 mempool_free(mb, phba->mbox_mem_pool);
20990 /* Release the ndlp with the cleaned-up active mailbox command */
20991 if (act_mbx_ndlp) {
20992 spin_lock(&act_mbx_ndlp->lock);
20993 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20994 spin_unlock(&act_mbx_ndlp->lock);
20995 lpfc_nlp_put(act_mbx_ndlp);
21000 * lpfc_drain_txq - Drain the txq
21001 * @phba: Pointer to HBA context object.
21003 * This function attempt to submit IOCBs on the txq
21004 * to the adapter. For SLI4 adapters, the txq contains
21005 * ELS IOCBs that have been deferred because the there
21006 * are no SGLs. This congestion can occur with large
21007 * vport counts during node discovery.
21011 lpfc_drain_txq(struct lpfc_hba *phba)
21013 LIST_HEAD(completions);
21014 struct lpfc_sli_ring *pring;
21015 struct lpfc_iocbq *piocbq = NULL;
21016 unsigned long iflags = 0;
21017 char *fail_msg = NULL;
21018 struct lpfc_sglq *sglq;
21019 union lpfc_wqe128 wqe;
21020 uint32_t txq_cnt = 0;
21021 struct lpfc_queue *wq;
21023 if (phba->link_flag & LS_MDS_LOOPBACK) {
21024 /* MDS WQE are posted only to first WQ*/
21025 wq = phba->sli4_hba.hdwq[0].io_wq;
21030 wq = phba->sli4_hba.els_wq;
21033 pring = lpfc_phba_elsring(phba);
21036 if (unlikely(!pring) || list_empty(&pring->txq))
21039 spin_lock_irqsave(&pring->ring_lock, iflags);
21040 list_for_each_entry(piocbq, &pring->txq, list) {
21044 if (txq_cnt > pring->txq_max)
21045 pring->txq_max = txq_cnt;
21047 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21049 while (!list_empty(&pring->txq)) {
21050 spin_lock_irqsave(&pring->ring_lock, iflags);
21052 piocbq = lpfc_sli_ringtx_get(phba, pring);
21054 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21055 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
21056 "2823 txq empty and txq_cnt is %d\n ",
21060 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
21062 __lpfc_sli_ringtx_put(phba, pring, piocbq);
21063 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21068 /* The xri and iocb resources secured,
21069 * attempt to issue request
21071 piocbq->sli4_lxritag = sglq->sli4_lxritag;
21072 piocbq->sli4_xritag = sglq->sli4_xritag;
21073 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
21074 fail_msg = "to convert bpl to sgl";
21075 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
21076 fail_msg = "to convert iocb to wqe";
21077 else if (lpfc_sli4_wq_put(wq, &wqe))
21078 fail_msg = " - Wq is full";
21080 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
21083 /* Failed means we can't issue and need to cancel */
21084 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
21085 "2822 IOCB failed %s iotag 0x%x "
21088 piocbq->iotag, piocbq->sli4_xritag);
21089 list_add_tail(&piocbq->list, &completions);
21092 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21095 /* Cancel all the IOCBs that cannot be issued */
21096 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
21097 IOERR_SLI_ABORTED);
21103 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
21104 * @phba: Pointer to HBA context object.
21105 * @pwqeq: Pointer to command WQE.
21106 * @sglq: Pointer to the scatter gather queue object.
21108 * This routine converts the bpl or bde that is in the WQE
21109 * to a sgl list for the sli4 hardware. The physical address
21110 * of the bpl/bde is converted back to a virtual address.
21111 * If the WQE contains a BPL then the list of BDE's is
21112 * converted to sli4_sge's. If the WQE contains a single
21113 * BDE then it is converted to a single sli_sge.
21114 * The WQE is still in cpu endianness so the contents of
21115 * the bpl can be used without byte swapping.
21117 * Returns valid XRI = Success, NO_XRI = Failure.
21120 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
21121 struct lpfc_sglq *sglq)
21123 uint16_t xritag = NO_XRI;
21124 struct ulp_bde64 *bpl = NULL;
21125 struct ulp_bde64 bde;
21126 struct sli4_sge *sgl = NULL;
21127 struct lpfc_dmabuf *dmabuf;
21128 union lpfc_wqe128 *wqe;
21131 uint32_t offset = 0; /* accumulated offset in the sg request list */
21132 int inbound = 0; /* number of sg reply entries inbound from firmware */
21135 if (!pwqeq || !sglq)
21138 sgl = (struct sli4_sge *)sglq->sgl;
21140 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
21142 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
21143 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
21144 return sglq->sli4_xritag;
21145 numBdes = pwqeq->num_bdes;
21147 /* The addrHigh and addrLow fields within the WQE
21148 * have not been byteswapped yet so there is no
21149 * need to swap them back.
21151 if (pwqeq->context3)
21152 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
21156 bpl = (struct ulp_bde64 *)dmabuf->virt;
21160 for (i = 0; i < numBdes; i++) {
21161 /* Should already be byte swapped. */
21162 sgl->addr_hi = bpl->addrHigh;
21163 sgl->addr_lo = bpl->addrLow;
21165 sgl->word2 = le32_to_cpu(sgl->word2);
21166 if ((i+1) == numBdes)
21167 bf_set(lpfc_sli4_sge_last, sgl, 1);
21169 bf_set(lpfc_sli4_sge_last, sgl, 0);
21170 /* swap the size field back to the cpu so we
21171 * can assign it to the sgl.
21173 bde.tus.w = le32_to_cpu(bpl->tus.w);
21174 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
21175 /* The offsets in the sgl need to be accumulated
21176 * separately for the request and reply lists.
21177 * The request is always first, the reply follows.
21180 case CMD_GEN_REQUEST64_WQE:
21181 /* add up the reply sg entries */
21182 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
21184 /* first inbound? reset the offset */
21187 bf_set(lpfc_sli4_sge_offset, sgl, offset);
21188 bf_set(lpfc_sli4_sge_type, sgl,
21189 LPFC_SGE_TYPE_DATA);
21190 offset += bde.tus.f.bdeSize;
21192 case CMD_FCP_TRSP64_WQE:
21193 bf_set(lpfc_sli4_sge_offset, sgl, 0);
21194 bf_set(lpfc_sli4_sge_type, sgl,
21195 LPFC_SGE_TYPE_DATA);
21197 case CMD_FCP_TSEND64_WQE:
21198 case CMD_FCP_TRECEIVE64_WQE:
21199 bf_set(lpfc_sli4_sge_type, sgl,
21200 bpl->tus.f.bdeFlags);
21204 offset += bde.tus.f.bdeSize;
21205 bf_set(lpfc_sli4_sge_offset, sgl, offset);
21208 sgl->word2 = cpu_to_le32(sgl->word2);
21212 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
21213 /* The addrHigh and addrLow fields of the BDE have not
21214 * been byteswapped yet so they need to be swapped
21215 * before putting them in the sgl.
21217 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
21218 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
21219 sgl->word2 = le32_to_cpu(sgl->word2);
21220 bf_set(lpfc_sli4_sge_last, sgl, 1);
21221 sgl->word2 = cpu_to_le32(sgl->word2);
21222 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
21224 return sglq->sli4_xritag;
21228 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
21229 * @phba: Pointer to HBA context object.
21230 * @qp: Pointer to HDW queue.
21231 * @pwqe: Pointer to command WQE.
21234 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
21235 struct lpfc_iocbq *pwqe)
21237 union lpfc_wqe128 *wqe = &pwqe->wqe;
21238 struct lpfc_async_xchg_ctx *ctxp;
21239 struct lpfc_queue *wq;
21240 struct lpfc_sglq *sglq;
21241 struct lpfc_sli_ring *pring;
21242 unsigned long iflags;
21245 /* NVME_LS and NVME_LS ABTS requests. */
21246 if (pwqe->cmd_flag & LPFC_IO_NVME_LS) {
21247 pring = phba->sli4_hba.nvmels_wq->pring;
21248 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21250 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
21252 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21255 pwqe->sli4_lxritag = sglq->sli4_lxritag;
21256 pwqe->sli4_xritag = sglq->sli4_xritag;
21257 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
21258 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21261 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
21262 pwqe->sli4_xritag);
21263 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
21265 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21269 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21270 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21272 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
21276 /* NVME_FCREQ and NVME_ABTS requests */
21277 if (pwqe->cmd_flag & (LPFC_IO_NVME | LPFC_IO_FCP | LPFC_IO_CMF)) {
21278 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
21282 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
21284 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21286 ret = lpfc_sli4_wq_put(wq, wqe);
21288 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21291 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21292 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21294 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
21298 /* NVMET requests */
21299 if (pwqe->cmd_flag & LPFC_IO_NVMET) {
21300 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
21304 ctxp = pwqe->context2;
21305 sglq = ctxp->ctxbuf->sglq;
21306 if (pwqe->sli4_xritag == NO_XRI) {
21307 pwqe->sli4_lxritag = sglq->sli4_lxritag;
21308 pwqe->sli4_xritag = sglq->sli4_xritag;
21310 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
21311 pwqe->sli4_xritag);
21312 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
21314 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21316 ret = lpfc_sli4_wq_put(wq, wqe);
21318 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21321 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21322 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21324 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
21331 * lpfc_sli4_issue_abort_iotag - SLI-4 WQE init & issue for the Abort
21332 * @phba: Pointer to HBA context object.
21333 * @cmdiocb: Pointer to driver command iocb object.
21334 * @cmpl: completion function.
21336 * Fill the appropriate fields for the abort WQE and call
21337 * internal routine lpfc_sli4_issue_wqe to send the WQE
21338 * This function is called with hbalock held and no ring_lock held.
21340 * RETURNS 0 - SUCCESS
21344 lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
21347 struct lpfc_vport *vport = cmdiocb->vport;
21348 struct lpfc_iocbq *abtsiocb = NULL;
21349 union lpfc_wqe128 *abtswqe;
21350 struct lpfc_io_buf *lpfc_cmd;
21351 int retval = IOCB_ERROR;
21352 u16 xritag = cmdiocb->sli4_xritag;
21355 * The scsi command can not be in txq and it is in flight because the
21356 * pCmd is still pointing at the SCSI command we have to abort. There
21357 * is no need to search the txcmplq. Just send an abort to the FW.
21360 abtsiocb = __lpfc_sli_get_iocbq(phba);
21362 return WQE_NORESOURCE;
21364 /* Indicate the IO is being aborted by the driver. */
21365 cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
21367 abtswqe = &abtsiocb->wqe;
21368 memset(abtswqe, 0, sizeof(*abtswqe));
21370 if (!lpfc_is_link_up(phba))
21371 bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1);
21372 bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG);
21373 abtswqe->abort_cmd.rsrvd5 = 0;
21374 abtswqe->abort_cmd.wqe_com.abort_tag = xritag;
21375 bf_set(wqe_reqtag, &abtswqe->abort_cmd.wqe_com, abtsiocb->iotag);
21376 bf_set(wqe_cmnd, &abtswqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
21377 bf_set(wqe_xri_tag, &abtswqe->generic.wqe_com, 0);
21378 bf_set(wqe_qosd, &abtswqe->abort_cmd.wqe_com, 1);
21379 bf_set(wqe_lenloc, &abtswqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
21380 bf_set(wqe_cmd_type, &abtswqe->abort_cmd.wqe_com, OTHER_COMMAND);
21382 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
21383 abtsiocb->hba_wqidx = cmdiocb->hba_wqidx;
21384 abtsiocb->cmd_flag |= LPFC_USE_FCPWQIDX;
21385 if (cmdiocb->cmd_flag & LPFC_IO_FCP)
21386 abtsiocb->cmd_flag |= LPFC_IO_FCP;
21387 if (cmdiocb->cmd_flag & LPFC_IO_NVME)
21388 abtsiocb->cmd_flag |= LPFC_IO_NVME;
21389 if (cmdiocb->cmd_flag & LPFC_IO_FOF)
21390 abtsiocb->cmd_flag |= LPFC_IO_FOF;
21391 abtsiocb->vport = vport;
21392 abtsiocb->cmd_cmpl = cmpl;
21394 lpfc_cmd = container_of(cmdiocb, struct lpfc_io_buf, cur_iocbq);
21395 retval = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, abtsiocb);
21397 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21398 "0359 Abort xri x%x, original iotag x%x, "
21399 "abort cmd iotag x%x retval x%x\n",
21400 xritag, cmdiocb->iotag, abtsiocb->iotag, retval);
21403 cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
21404 __lpfc_sli_release_iocbq(phba, abtsiocb);
21410 #ifdef LPFC_MXP_STAT
21412 * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
21413 * @phba: pointer to lpfc hba data structure.
21414 * @hwqid: belong to which HWQ.
21416 * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
21417 * 15 seconds after a test case is running.
21419 * The user should call lpfc_debugfs_multixripools_write before running a test
21420 * case to clear stat_snapshot_taken. Then the user starts a test case. During
21421 * test case is running, stat_snapshot_taken is incremented by 1 every time when
21422 * this routine is called from heartbeat timer. When stat_snapshot_taken is
21423 * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
21425 void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
21427 struct lpfc_sli4_hdw_queue *qp;
21428 struct lpfc_multixri_pool *multixri_pool;
21429 struct lpfc_pvt_pool *pvt_pool;
21430 struct lpfc_pbl_pool *pbl_pool;
21433 qp = &phba->sli4_hba.hdwq[hwqid];
21434 multixri_pool = qp->p_multixri_pool;
21435 if (!multixri_pool)
21438 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
21439 pvt_pool = &qp->p_multixri_pool->pvt_pool;
21440 pbl_pool = &qp->p_multixri_pool->pbl_pool;
21441 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21443 multixri_pool->stat_pbl_count = pbl_pool->count;
21444 multixri_pool->stat_pvt_count = pvt_pool->count;
21445 multixri_pool->stat_busy_count = txcmplq_cnt;
21448 multixri_pool->stat_snapshot_taken++;
21453 * lpfc_adjust_pvt_pool_count - Adjust private pool count
21454 * @phba: pointer to lpfc hba data structure.
21455 * @hwqid: belong to which HWQ.
21457 * This routine moves some XRIs from private to public pool when private pool
21460 void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
21462 struct lpfc_multixri_pool *multixri_pool;
21464 u32 prev_io_req_count;
21466 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
21467 if (!multixri_pool)
21469 io_req_count = multixri_pool->io_req_count;
21470 prev_io_req_count = multixri_pool->prev_io_req_count;
21472 if (prev_io_req_count != io_req_count) {
21473 /* Private pool is busy */
21474 multixri_pool->prev_io_req_count = io_req_count;
21476 /* Private pool is not busy.
21477 * Move XRIs from private to public pool.
21479 lpfc_move_xri_pvt_to_pbl(phba, hwqid);
21484 * lpfc_adjust_high_watermark - Adjust high watermark
21485 * @phba: pointer to lpfc hba data structure.
21486 * @hwqid: belong to which HWQ.
21488 * This routine sets high watermark as number of outstanding XRIs,
21489 * but make sure the new value is between xri_limit/2 and xri_limit.
21491 void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
21499 struct lpfc_multixri_pool *multixri_pool;
21500 struct lpfc_sli4_hdw_queue *qp;
21502 qp = &phba->sli4_hba.hdwq[hwqid];
21503 multixri_pool = qp->p_multixri_pool;
21504 if (!multixri_pool)
21506 xri_limit = multixri_pool->xri_limit;
21508 watermark_max = xri_limit;
21509 watermark_min = xri_limit / 2;
21511 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21512 abts_io_bufs = qp->abts_scsi_io_bufs;
21513 abts_io_bufs += qp->abts_nvme_io_bufs;
21515 new_watermark = txcmplq_cnt + abts_io_bufs;
21516 new_watermark = min(watermark_max, new_watermark);
21517 new_watermark = max(watermark_min, new_watermark);
21518 multixri_pool->pvt_pool.high_watermark = new_watermark;
21520 #ifdef LPFC_MXP_STAT
21521 multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
21527 * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
21528 * @phba: pointer to lpfc hba data structure.
21529 * @hwqid: belong to which HWQ.
21531 * This routine is called from hearbeat timer when pvt_pool is idle.
21532 * All free XRIs are moved from private to public pool on hwqid with 2 steps.
21533 * The first step moves (all - low_watermark) amount of XRIs.
21534 * The second step moves the rest of XRIs.
21536 void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
21538 struct lpfc_pbl_pool *pbl_pool;
21539 struct lpfc_pvt_pool *pvt_pool;
21540 struct lpfc_sli4_hdw_queue *qp;
21541 struct lpfc_io_buf *lpfc_ncmd;
21542 struct lpfc_io_buf *lpfc_ncmd_next;
21543 unsigned long iflag;
21544 struct list_head tmp_list;
21547 qp = &phba->sli4_hba.hdwq[hwqid];
21548 pbl_pool = &qp->p_multixri_pool->pbl_pool;
21549 pvt_pool = &qp->p_multixri_pool->pvt_pool;
21552 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
21553 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
21555 if (pvt_pool->count > pvt_pool->low_watermark) {
21556 /* Step 1: move (all - low_watermark) from pvt_pool
21560 /* Move low watermark of bufs from pvt_pool to tmp_list */
21561 INIT_LIST_HEAD(&tmp_list);
21562 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21563 &pvt_pool->list, list) {
21564 list_move_tail(&lpfc_ncmd->list, &tmp_list);
21566 if (tmp_count >= pvt_pool->low_watermark)
21570 /* Move all bufs from pvt_pool to pbl_pool */
21571 list_splice_init(&pvt_pool->list, &pbl_pool->list);
21573 /* Move all bufs from tmp_list to pvt_pool */
21574 list_splice(&tmp_list, &pvt_pool->list);
21576 pbl_pool->count += (pvt_pool->count - tmp_count);
21577 pvt_pool->count = tmp_count;
21579 /* Step 2: move the rest from pvt_pool to pbl_pool */
21580 list_splice_init(&pvt_pool->list, &pbl_pool->list);
21581 pbl_pool->count += pvt_pool->count;
21582 pvt_pool->count = 0;
21585 spin_unlock(&pvt_pool->lock);
21586 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21590 * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
21591 * @phba: pointer to lpfc hba data structure
21592 * @qp: pointer to HDW queue
21593 * @pbl_pool: specified public free XRI pool
21594 * @pvt_pool: specified private free XRI pool
21595 * @count: number of XRIs to move
21597 * This routine tries to move some free common bufs from the specified pbl_pool
21598 * to the specified pvt_pool. It might move less than count XRIs if there's not
21599 * enough in public pool.
21602 * true - if XRIs are successfully moved from the specified pbl_pool to the
21603 * specified pvt_pool
21604 * false - if the specified pbl_pool is empty or locked by someone else
21607 _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
21608 struct lpfc_pbl_pool *pbl_pool,
21609 struct lpfc_pvt_pool *pvt_pool, u32 count)
21611 struct lpfc_io_buf *lpfc_ncmd;
21612 struct lpfc_io_buf *lpfc_ncmd_next;
21613 unsigned long iflag;
21616 ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
21618 if (pbl_pool->count) {
21619 /* Move a batch of XRIs from public to private pool */
21620 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
21621 list_for_each_entry_safe(lpfc_ncmd,
21625 list_move_tail(&lpfc_ncmd->list,
21634 spin_unlock(&pvt_pool->lock);
21635 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21638 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21645 * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
21646 * @phba: pointer to lpfc hba data structure.
21647 * @hwqid: belong to which HWQ.
21648 * @count: number of XRIs to move
21650 * This routine tries to find some free common bufs in one of public pools with
21651 * Round Robin method. The search always starts from local hwqid, then the next
21652 * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
21653 * a batch of free common bufs are moved to private pool on hwqid.
21654 * It might move less than count XRIs if there's not enough in public pool.
21656 void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
21658 struct lpfc_multixri_pool *multixri_pool;
21659 struct lpfc_multixri_pool *next_multixri_pool;
21660 struct lpfc_pvt_pool *pvt_pool;
21661 struct lpfc_pbl_pool *pbl_pool;
21662 struct lpfc_sli4_hdw_queue *qp;
21667 qp = &phba->sli4_hba.hdwq[hwqid];
21668 multixri_pool = qp->p_multixri_pool;
21669 pvt_pool = &multixri_pool->pvt_pool;
21670 pbl_pool = &multixri_pool->pbl_pool;
21672 /* Check if local pbl_pool is available */
21673 ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
21675 #ifdef LPFC_MXP_STAT
21676 multixri_pool->local_pbl_hit_count++;
21681 hwq_count = phba->cfg_hdw_queue;
21683 /* Get the next hwqid which was found last time */
21684 next_hwqid = multixri_pool->rrb_next_hwqid;
21687 /* Go to next hwq */
21688 next_hwqid = (next_hwqid + 1) % hwq_count;
21690 next_multixri_pool =
21691 phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
21692 pbl_pool = &next_multixri_pool->pbl_pool;
21694 /* Check if the public free xri pool is available */
21695 ret = _lpfc_move_xri_pbl_to_pvt(
21696 phba, qp, pbl_pool, pvt_pool, count);
21698 /* Exit while-loop if success or all hwqid are checked */
21699 } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
21701 /* Starting point for the next time */
21702 multixri_pool->rrb_next_hwqid = next_hwqid;
21705 /* stats: all public pools are empty*/
21706 multixri_pool->pbl_empty_count++;
21709 #ifdef LPFC_MXP_STAT
21711 if (next_hwqid == hwqid)
21712 multixri_pool->local_pbl_hit_count++;
21714 multixri_pool->other_pbl_hit_count++;
21720 * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
21721 * @phba: pointer to lpfc hba data structure.
21722 * @hwqid: belong to which HWQ.
21724 * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
21727 void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
21729 struct lpfc_multixri_pool *multixri_pool;
21730 struct lpfc_pvt_pool *pvt_pool;
21732 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
21733 pvt_pool = &multixri_pool->pvt_pool;
21735 if (pvt_pool->count < pvt_pool->low_watermark)
21736 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
21740 * lpfc_release_io_buf - Return one IO buf back to free pool
21741 * @phba: pointer to lpfc hba data structure.
21742 * @lpfc_ncmd: IO buf to be returned.
21743 * @qp: belong to which HWQ.
21745 * This routine returns one IO buf back to free pool. If this is an urgent IO,
21746 * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
21747 * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
21748 * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to
21749 * lpfc_io_buf_list_put.
21751 void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
21752 struct lpfc_sli4_hdw_queue *qp)
21754 unsigned long iflag;
21755 struct lpfc_pbl_pool *pbl_pool;
21756 struct lpfc_pvt_pool *pvt_pool;
21757 struct lpfc_epd_pool *epd_pool;
21763 /* MUST zero fields if buffer is reused by another protocol */
21764 lpfc_ncmd->nvmeCmd = NULL;
21765 lpfc_ncmd->cur_iocbq.cmd_cmpl = NULL;
21767 if (phba->cfg_xpsgl && !phba->nvmet_support &&
21768 !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
21769 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
21771 if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
21772 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
21774 if (phba->cfg_xri_rebalancing) {
21775 if (lpfc_ncmd->expedite) {
21776 /* Return to expedite pool */
21777 epd_pool = &phba->epd_pool;
21778 spin_lock_irqsave(&epd_pool->lock, iflag);
21779 list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
21781 spin_unlock_irqrestore(&epd_pool->lock, iflag);
21785 /* Avoid invalid access if an IO sneaks in and is being rejected
21786 * just _after_ xri pools are destroyed in lpfc_offline.
21787 * Nothing much can be done at this point.
21789 if (!qp->p_multixri_pool)
21792 pbl_pool = &qp->p_multixri_pool->pbl_pool;
21793 pvt_pool = &qp->p_multixri_pool->pvt_pool;
21795 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21796 abts_io_bufs = qp->abts_scsi_io_bufs;
21797 abts_io_bufs += qp->abts_nvme_io_bufs;
21799 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
21800 xri_limit = qp->p_multixri_pool->xri_limit;
21802 #ifdef LPFC_MXP_STAT
21803 if (xri_owned <= xri_limit)
21804 qp->p_multixri_pool->below_limit_count++;
21806 qp->p_multixri_pool->above_limit_count++;
21809 /* XRI goes to either public or private free xri pool
21810 * based on watermark and xri_limit
21812 if ((pvt_pool->count < pvt_pool->low_watermark) ||
21813 (xri_owned < xri_limit &&
21814 pvt_pool->count < pvt_pool->high_watermark)) {
21815 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
21816 qp, free_pvt_pool);
21817 list_add_tail(&lpfc_ncmd->list,
21820 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21822 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
21823 qp, free_pub_pool);
21824 list_add_tail(&lpfc_ncmd->list,
21827 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21830 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
21832 list_add_tail(&lpfc_ncmd->list,
21833 &qp->lpfc_io_buf_list_put);
21835 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
21841 * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
21842 * @phba: pointer to lpfc hba data structure.
21843 * @qp: pointer to HDW queue
21844 * @pvt_pool: pointer to private pool data structure.
21845 * @ndlp: pointer to lpfc nodelist data structure.
21847 * This routine tries to get one free IO buf from private pool.
21850 * pointer to one free IO buf - if private pool is not empty
21851 * NULL - if private pool is empty
21853 static struct lpfc_io_buf *
21854 lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
21855 struct lpfc_sli4_hdw_queue *qp,
21856 struct lpfc_pvt_pool *pvt_pool,
21857 struct lpfc_nodelist *ndlp)
21859 struct lpfc_io_buf *lpfc_ncmd;
21860 struct lpfc_io_buf *lpfc_ncmd_next;
21861 unsigned long iflag;
21863 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
21864 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21865 &pvt_pool->list, list) {
21866 if (lpfc_test_rrq_active(
21867 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
21869 list_del(&lpfc_ncmd->list);
21871 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21874 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21880 * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
21881 * @phba: pointer to lpfc hba data structure.
21883 * This routine tries to get one free IO buf from expedite pool.
21886 * pointer to one free IO buf - if expedite pool is not empty
21887 * NULL - if expedite pool is empty
21889 static struct lpfc_io_buf *
21890 lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
21892 struct lpfc_io_buf *lpfc_ncmd;
21893 struct lpfc_io_buf *lpfc_ncmd_next;
21894 unsigned long iflag;
21895 struct lpfc_epd_pool *epd_pool;
21897 epd_pool = &phba->epd_pool;
21900 spin_lock_irqsave(&epd_pool->lock, iflag);
21901 if (epd_pool->count > 0) {
21902 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21903 &epd_pool->list, list) {
21904 list_del(&lpfc_ncmd->list);
21909 spin_unlock_irqrestore(&epd_pool->lock, iflag);
21915 * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
21916 * @phba: pointer to lpfc hba data structure.
21917 * @ndlp: pointer to lpfc nodelist data structure.
21918 * @hwqid: belong to which HWQ
21919 * @expedite: 1 means this request is urgent.
21921 * This routine will do the following actions and then return a pointer to
21924 * 1. If private free xri count is empty, move some XRIs from public to
21926 * 2. Get one XRI from private free xri pool.
21927 * 3. If we fail to get one from pvt_pool and this is an expedite request,
21928 * get one free xri from expedite pool.
21930 * Note: ndlp is only used on SCSI side for RRQ testing.
21931 * The caller should pass NULL for ndlp on NVME side.
21934 * pointer to one free IO buf - if private pool is not empty
21935 * NULL - if private pool is empty
21937 static struct lpfc_io_buf *
21938 lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
21939 struct lpfc_nodelist *ndlp,
21940 int hwqid, int expedite)
21942 struct lpfc_sli4_hdw_queue *qp;
21943 struct lpfc_multixri_pool *multixri_pool;
21944 struct lpfc_pvt_pool *pvt_pool;
21945 struct lpfc_io_buf *lpfc_ncmd;
21947 qp = &phba->sli4_hba.hdwq[hwqid];
21950 lpfc_printf_log(phba, KERN_INFO,
21951 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21952 "5556 NULL qp for hwqid x%x\n", hwqid);
21955 multixri_pool = qp->p_multixri_pool;
21956 if (!multixri_pool) {
21957 lpfc_printf_log(phba, KERN_INFO,
21958 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21959 "5557 NULL multixri for hwqid x%x\n", hwqid);
21962 pvt_pool = &multixri_pool->pvt_pool;
21964 lpfc_printf_log(phba, KERN_INFO,
21965 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21966 "5558 NULL pvt_pool for hwqid x%x\n", hwqid);
21969 multixri_pool->io_req_count++;
21971 /* If pvt_pool is empty, move some XRIs from public to private pool */
21972 if (pvt_pool->count == 0)
21973 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
21975 /* Get one XRI from private free xri pool */
21976 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
21979 lpfc_ncmd->hdwq = qp;
21980 lpfc_ncmd->hdwq_no = hwqid;
21981 } else if (expedite) {
21982 /* If we fail to get one from pvt_pool and this is an expedite
21983 * request, get one free xri from expedite pool.
21985 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
21991 static inline struct lpfc_io_buf *
21992 lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
21994 struct lpfc_sli4_hdw_queue *qp;
21995 struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
21997 qp = &phba->sli4_hba.hdwq[idx];
21998 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
21999 &qp->lpfc_io_buf_list_get, list) {
22000 if (lpfc_test_rrq_active(phba, ndlp,
22001 lpfc_cmd->cur_iocbq.sli4_lxritag))
22004 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
22007 list_del_init(&lpfc_cmd->list);
22009 lpfc_cmd->hdwq = qp;
22010 lpfc_cmd->hdwq_no = idx;
22017 * lpfc_get_io_buf - Get one IO buffer from free pool
22018 * @phba: The HBA for which this call is being executed.
22019 * @ndlp: pointer to lpfc nodelist data structure.
22020 * @hwqid: belong to which HWQ
22021 * @expedite: 1 means this request is urgent.
22023 * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
22024 * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
22025 * a IO buffer from head of @hdwq io_buf_list and returns to caller.
22027 * Note: ndlp is only used on SCSI side for RRQ testing.
22028 * The caller should pass NULL for ndlp on NVME side.
22032 * Pointer to lpfc_io_buf - Success
22034 struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
22035 struct lpfc_nodelist *ndlp,
22036 u32 hwqid, int expedite)
22038 struct lpfc_sli4_hdw_queue *qp;
22039 unsigned long iflag;
22040 struct lpfc_io_buf *lpfc_cmd;
22042 qp = &phba->sli4_hba.hdwq[hwqid];
22045 lpfc_printf_log(phba, KERN_WARNING,
22046 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
22047 "5555 NULL qp for hwqid x%x\n", hwqid);
22051 if (phba->cfg_xri_rebalancing)
22052 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
22053 phba, ndlp, hwqid, expedite);
22055 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
22056 qp, alloc_xri_get);
22057 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
22058 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
22060 lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
22061 qp, alloc_xri_put);
22062 list_splice(&qp->lpfc_io_buf_list_put,
22063 &qp->lpfc_io_buf_list_get);
22064 qp->get_io_bufs += qp->put_io_bufs;
22065 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
22066 qp->put_io_bufs = 0;
22067 spin_unlock(&qp->io_buf_list_put_lock);
22068 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
22070 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
22072 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
22079 * lpfc_read_object - Retrieve object data from HBA
22080 * @phba: The HBA for which this call is being executed.
22081 * @rdobject: Pathname of object data we want to read.
22082 * @datap: Pointer to where data will be copied to.
22083 * @datasz: size of data area
22085 * This routine is limited to object sizes of LPFC_BPL_SIZE (1024) or less.
22086 * The data will be truncated if datasz is not large enough.
22087 * Version 1 is not supported with Embedded mbox cmd, so we must use version 0.
22088 * Returns the actual bytes read from the object.
22091 lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
22094 struct lpfc_mbx_read_object *read_object;
22095 LPFC_MBOXQ_t *mbox;
22096 int rc, length, eof, j, byte_cnt = 0;
22097 uint32_t shdr_status, shdr_add_status;
22098 union lpfc_sli4_cfg_shdr *shdr;
22099 struct lpfc_dmabuf *pcmd;
22100 u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0};
22102 /* sanity check on queue memory */
22106 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
22109 length = (sizeof(struct lpfc_mbx_read_object) -
22110 sizeof(struct lpfc_sli4_cfg_mhdr));
22111 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
22112 LPFC_MBOX_OPCODE_READ_OBJECT,
22113 length, LPFC_SLI4_MBX_EMBED);
22114 read_object = &mbox->u.mqe.un.read_object;
22115 shdr = (union lpfc_sli4_cfg_shdr *)&read_object->header.cfg_shdr;
22117 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_0);
22118 bf_set(lpfc_mbx_rd_object_rlen, &read_object->u.request, datasz);
22119 read_object->u.request.rd_object_offset = 0;
22120 read_object->u.request.rd_object_cnt = 1;
22122 memset((void *)read_object->u.request.rd_object_name, 0,
22124 scnprintf((char *)rd_object_name, sizeof(rd_object_name), rdobject);
22125 for (j = 0; j < strlen(rdobject); j++)
22126 read_object->u.request.rd_object_name[j] =
22127 cpu_to_le32(rd_object_name[j]);
22129 pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL);
22131 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
22132 if (!pcmd || !pcmd->virt) {
22134 mempool_free(mbox, phba->mbox_mem_pool);
22137 memset((void *)pcmd->virt, 0, LPFC_BPL_SIZE);
22138 read_object->u.request.rd_object_hbuf[0].pa_lo =
22139 putPaddrLow(pcmd->phys);
22140 read_object->u.request.rd_object_hbuf[0].pa_hi =
22141 putPaddrHigh(pcmd->phys);
22142 read_object->u.request.rd_object_hbuf[0].length = LPFC_BPL_SIZE;
22144 mbox->vport = phba->pport;
22145 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
22146 mbox->ctx_buf = NULL;
22147 mbox->ctx_ndlp = NULL;
22149 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
22150 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
22151 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
22153 if (shdr_status == STATUS_FAILED &&
22154 shdr_add_status == ADD_STATUS_INVALID_OBJECT_NAME) {
22155 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
22156 "4674 No port cfg file in FW.\n");
22157 byte_cnt = -ENOENT;
22158 } else if (shdr_status || shdr_add_status || rc) {
22159 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
22160 "2625 READ_OBJECT mailbox failed with "
22161 "status x%x add_status x%x, mbx status x%x\n",
22162 shdr_status, shdr_add_status, rc);
22166 length = read_object->u.response.rd_object_actual_rlen;
22167 eof = bf_get(lpfc_mbx_rd_object_eof, &read_object->u.response);
22168 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_CGN_MGMT,
22169 "2626 READ_OBJECT Success len %d:%d, EOF %d\n",
22170 length, datasz, eof);
22172 /* Detect the port config file exists but is empty */
22173 if (!length && eof) {
22179 lpfc_sli_pcimem_bcopy(pcmd->virt, datap, byte_cnt);
22183 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
22185 mempool_free(mbox, phba->mbox_mem_pool);
22190 * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
22191 * @phba: The HBA for which this call is being executed.
22192 * @lpfc_buf: IO buf structure to append the SGL chunk
22194 * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
22195 * and will allocate an SGL chunk if the pool is empty.
22199 * Pointer to sli4_hybrid_sgl - Success
22201 struct sli4_hybrid_sgl *
22202 lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
22204 struct sli4_hybrid_sgl *list_entry = NULL;
22205 struct sli4_hybrid_sgl *tmp = NULL;
22206 struct sli4_hybrid_sgl *allocated_sgl = NULL;
22207 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22208 struct list_head *buf_list = &hdwq->sgl_list;
22209 unsigned long iflags;
22211 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22213 if (likely(!list_empty(buf_list))) {
22214 /* break off 1 chunk from the sgl_list */
22215 list_for_each_entry_safe(list_entry, tmp,
22216 buf_list, list_node) {
22217 list_move_tail(&list_entry->list_node,
22218 &lpfc_buf->dma_sgl_xtra_list);
22222 /* allocate more */
22223 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22224 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
22225 cpu_to_node(hdwq->io_wq->chann));
22227 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22228 "8353 error kmalloc memory for HDWQ "
22230 lpfc_buf->hdwq_no, __func__);
22234 tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
22235 GFP_ATOMIC, &tmp->dma_phys_sgl);
22236 if (!tmp->dma_sgl) {
22237 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22238 "8354 error pool_alloc memory for HDWQ "
22240 lpfc_buf->hdwq_no, __func__);
22245 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22246 list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
22249 allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
22250 struct sli4_hybrid_sgl,
22253 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22255 return allocated_sgl;
22259 * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
22260 * @phba: The HBA for which this call is being executed.
22261 * @lpfc_buf: IO buf structure with the SGL chunk
22263 * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
22270 lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
22273 struct sli4_hybrid_sgl *list_entry = NULL;
22274 struct sli4_hybrid_sgl *tmp = NULL;
22275 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22276 struct list_head *buf_list = &hdwq->sgl_list;
22277 unsigned long iflags;
22279 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22281 if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
22282 list_for_each_entry_safe(list_entry, tmp,
22283 &lpfc_buf->dma_sgl_xtra_list,
22285 list_move_tail(&list_entry->list_node,
22292 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22297 * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
22298 * @phba: phba object
22299 * @hdwq: hdwq to cleanup sgl buff resources on
22301 * This routine frees all SGL chunks of hdwq SGL chunk pool.
22307 lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
22308 struct lpfc_sli4_hdw_queue *hdwq)
22310 struct list_head *buf_list = &hdwq->sgl_list;
22311 struct sli4_hybrid_sgl *list_entry = NULL;
22312 struct sli4_hybrid_sgl *tmp = NULL;
22313 unsigned long iflags;
22315 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22317 /* Free sgl pool */
22318 list_for_each_entry_safe(list_entry, tmp,
22319 buf_list, list_node) {
22320 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
22321 list_entry->dma_sgl,
22322 list_entry->dma_phys_sgl);
22323 list_del(&list_entry->list_node);
22327 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22331 * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
22332 * @phba: The HBA for which this call is being executed.
22333 * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
22335 * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
22336 * and will allocate an CMD/RSP buffer if the pool is empty.
22340 * Pointer to fcp_cmd_rsp_buf - Success
22342 struct fcp_cmd_rsp_buf *
22343 lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22344 struct lpfc_io_buf *lpfc_buf)
22346 struct fcp_cmd_rsp_buf *list_entry = NULL;
22347 struct fcp_cmd_rsp_buf *tmp = NULL;
22348 struct fcp_cmd_rsp_buf *allocated_buf = NULL;
22349 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22350 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22351 unsigned long iflags;
22353 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22355 if (likely(!list_empty(buf_list))) {
22356 /* break off 1 chunk from the list */
22357 list_for_each_entry_safe(list_entry, tmp,
22360 list_move_tail(&list_entry->list_node,
22361 &lpfc_buf->dma_cmd_rsp_list);
22365 /* allocate more */
22366 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22367 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
22368 cpu_to_node(hdwq->io_wq->chann));
22370 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22371 "8355 error kmalloc memory for HDWQ "
22373 lpfc_buf->hdwq_no, __func__);
22377 tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool,
22379 &tmp->fcp_cmd_rsp_dma_handle);
22381 if (!tmp->fcp_cmnd) {
22382 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22383 "8356 error pool_alloc memory for HDWQ "
22385 lpfc_buf->hdwq_no, __func__);
22390 tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
22391 sizeof(struct fcp_cmnd));
22393 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22394 list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
22397 allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
22398 struct fcp_cmd_rsp_buf,
22401 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22403 return allocated_buf;
22407 * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
22408 * @phba: The HBA for which this call is being executed.
22409 * @lpfc_buf: IO buf structure with the CMD/RSP buf
22411 * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
22418 lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22419 struct lpfc_io_buf *lpfc_buf)
22422 struct fcp_cmd_rsp_buf *list_entry = NULL;
22423 struct fcp_cmd_rsp_buf *tmp = NULL;
22424 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22425 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22426 unsigned long iflags;
22428 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22430 if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
22431 list_for_each_entry_safe(list_entry, tmp,
22432 &lpfc_buf->dma_cmd_rsp_list,
22434 list_move_tail(&list_entry->list_node,
22441 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22446 * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
22447 * @phba: phba object
22448 * @hdwq: hdwq to cleanup cmd rsp buff resources on
22450 * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
22456 lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22457 struct lpfc_sli4_hdw_queue *hdwq)
22459 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22460 struct fcp_cmd_rsp_buf *list_entry = NULL;
22461 struct fcp_cmd_rsp_buf *tmp = NULL;
22462 unsigned long iflags;
22464 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22466 /* Free cmd_rsp buf pool */
22467 list_for_each_entry_safe(list_entry, tmp,
22470 dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
22471 list_entry->fcp_cmnd,
22472 list_entry->fcp_cmd_rsp_dma_handle);
22473 list_del(&list_entry->list_node);
22477 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);