1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
24 #include <linux/blkdev.h>
25 #include <linux/delay.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/idr.h>
28 #include <linux/interrupt.h>
29 #include <linux/module.h>
30 #include <linux/kthread.h>
31 #include <linux/pci.h>
32 #include <linux/spinlock.h>
33 #include <linux/ctype.h>
34 #include <linux/aer.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
37 #include <linux/miscdevice.h>
38 #include <linux/percpu.h>
39 #include <linux/msi.h>
40 #include <linux/irq.h>
41 #include <linux/bitops.h>
42 #include <linux/crash_dump.h>
43 #include <linux/cpu.h>
44 #include <linux/cpuhotplug.h>
46 #include <scsi/scsi.h>
47 #include <scsi/scsi_device.h>
48 #include <scsi/scsi_host.h>
49 #include <scsi/scsi_transport_fc.h>
50 #include <scsi/scsi_tcq.h>
51 #include <scsi/fc/fc_fs.h>
56 #include "lpfc_sli4.h"
58 #include "lpfc_disc.h"
60 #include "lpfc_scsi.h"
61 #include "lpfc_nvme.h"
62 #include "lpfc_logmsg.h"
63 #include "lpfc_crtn.h"
64 #include "lpfc_vport.h"
65 #include "lpfc_version.h"
68 static enum cpuhp_state lpfc_cpuhp_state;
69 /* Used when mapping IRQ vectors in a driver centric manner */
70 static uint32_t lpfc_present_cpu;
72 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
73 static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
74 static void lpfc_cpuhp_add(struct lpfc_hba *phba);
75 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
76 static int lpfc_post_rcv_buf(struct lpfc_hba *);
77 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
78 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
79 static int lpfc_setup_endian_order(struct lpfc_hba *);
80 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
81 static void lpfc_free_els_sgl_list(struct lpfc_hba *);
82 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
83 static void lpfc_init_sgl_list(struct lpfc_hba *);
84 static int lpfc_init_active_sgl_array(struct lpfc_hba *);
85 static void lpfc_free_active_sgl(struct lpfc_hba *);
86 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
87 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
88 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
89 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
90 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
91 static void lpfc_sli4_disable_intr(struct lpfc_hba *);
92 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
93 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
94 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
95 static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
96 static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *);
98 static struct scsi_transport_template *lpfc_transport_template = NULL;
99 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
100 static DEFINE_IDR(lpfc_hba_index);
101 #define LPFC_NVMET_BUF_POST 254
102 static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport);
105 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
106 * @phba: pointer to lpfc hba data structure.
108 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
109 * mailbox command. It retrieves the revision information from the HBA and
110 * collects the Vital Product Data (VPD) about the HBA for preparing the
111 * configuration of the HBA.
115 * -ERESTART - requests the SLI layer to reset the HBA and try again.
116 * Any other value - indicates an error.
119 lpfc_config_port_prep(struct lpfc_hba *phba)
121 lpfc_vpd_t *vp = &phba->vpd;
125 char *lpfc_vpd_data = NULL;
127 static char licensed[56] =
128 "key unlock for use with gnu public licensed code only\0";
129 static int init_key = 1;
131 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
133 phba->link_state = LPFC_HBA_ERROR;
138 phba->link_state = LPFC_INIT_MBX_CMDS;
140 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
142 uint32_t *ptext = (uint32_t *) licensed;
144 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
145 *ptext = cpu_to_be32(*ptext);
149 lpfc_read_nv(phba, pmb);
150 memset((char*)mb->un.varRDnvp.rsvd3, 0,
151 sizeof (mb->un.varRDnvp.rsvd3));
152 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
155 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
157 if (rc != MBX_SUCCESS) {
158 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
159 "0324 Config Port initialization "
160 "error, mbxCmd x%x READ_NVPARM, "
162 mb->mbxCommand, mb->mbxStatus);
163 mempool_free(pmb, phba->mbox_mem_pool);
166 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
168 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
173 * Clear all option bits except LPFC_SLI3_BG_ENABLED,
174 * which was already set in lpfc_get_cfgparam()
176 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
178 /* Setup and issue mailbox READ REV command */
179 lpfc_read_rev(phba, pmb);
180 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
181 if (rc != MBX_SUCCESS) {
182 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
183 "0439 Adapter failed to init, mbxCmd x%x "
184 "READ_REV, mbxStatus x%x\n",
185 mb->mbxCommand, mb->mbxStatus);
186 mempool_free( pmb, phba->mbox_mem_pool);
192 * The value of rr must be 1 since the driver set the cv field to 1.
193 * This setting requires the FW to set all revision fields.
195 if (mb->un.varRdRev.rr == 0) {
197 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
198 "0440 Adapter failed to init, READ_REV has "
199 "missing revision information.\n");
200 mempool_free(pmb, phba->mbox_mem_pool);
204 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
205 mempool_free(pmb, phba->mbox_mem_pool);
209 /* Save information as VPD data */
211 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
212 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
213 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
214 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
215 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
216 vp->rev.biuRev = mb->un.varRdRev.biuRev;
217 vp->rev.smRev = mb->un.varRdRev.smRev;
218 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
219 vp->rev.endecRev = mb->un.varRdRev.endecRev;
220 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
221 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
222 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
223 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
224 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
225 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
227 /* If the sli feature level is less then 9, we must
228 * tear down all RPIs and VPIs on link down if NPIV
231 if (vp->rev.feaLevelHigh < 9)
232 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
234 if (lpfc_is_LC_HBA(phba->pcidev->device))
235 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
236 sizeof (phba->RandomData));
238 /* Get adapter VPD information */
239 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
243 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
244 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
246 if (rc != MBX_SUCCESS) {
247 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
248 "0441 VPD not present on adapter, "
249 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
250 mb->mbxCommand, mb->mbxStatus);
251 mb->un.varDmp.word_cnt = 0;
253 /* dump mem may return a zero when finished or we got a
254 * mailbox error, either way we are done.
256 if (mb->un.varDmp.word_cnt == 0)
259 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
260 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
261 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
262 lpfc_vpd_data + offset,
263 mb->un.varDmp.word_cnt);
264 offset += mb->un.varDmp.word_cnt;
265 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
267 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
269 kfree(lpfc_vpd_data);
271 mempool_free(pmb, phba->mbox_mem_pool);
276 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
277 * @phba: pointer to lpfc hba data structure.
278 * @pmboxq: pointer to the driver internal queue element for mailbox command.
280 * This is the completion handler for driver's configuring asynchronous event
281 * mailbox command to the device. If the mailbox command returns successfully,
282 * it will set internal async event support flag to 1; otherwise, it will
283 * set internal async event support flag to 0.
286 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
288 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
289 phba->temp_sensor_support = 1;
291 phba->temp_sensor_support = 0;
292 mempool_free(pmboxq, phba->mbox_mem_pool);
297 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
298 * @phba: pointer to lpfc hba data structure.
299 * @pmboxq: pointer to the driver internal queue element for mailbox command.
301 * This is the completion handler for dump mailbox command for getting
302 * wake up parameters. When this command complete, the response contain
303 * Option rom version of the HBA. This function translate the version number
304 * into a human readable string and store it in OptionROMVersion.
307 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
310 uint32_t prog_id_word;
312 /* character array used for decoding dist type. */
313 char dist_char[] = "nabx";
315 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
316 mempool_free(pmboxq, phba->mbox_mem_pool);
320 prg = (struct prog_id *) &prog_id_word;
322 /* word 7 contain option rom version */
323 prog_id_word = pmboxq->u.mb.un.varWords[7];
325 /* Decode the Option rom version word to a readable string */
327 dist = dist_char[prg->dist];
329 if ((prg->dist == 3) && (prg->num == 0))
330 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
331 prg->ver, prg->rev, prg->lev);
333 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
334 prg->ver, prg->rev, prg->lev,
336 mempool_free(pmboxq, phba->mbox_mem_pool);
341 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
342 * cfg_soft_wwnn, cfg_soft_wwpn
343 * @vport: pointer to lpfc vport data structure.
350 lpfc_update_vport_wwn(struct lpfc_vport *vport)
352 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
353 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
355 /* If the soft name exists then update it using the service params */
356 if (vport->phba->cfg_soft_wwnn)
357 u64_to_wwn(vport->phba->cfg_soft_wwnn,
358 vport->fc_sparam.nodeName.u.wwn);
359 if (vport->phba->cfg_soft_wwpn)
360 u64_to_wwn(vport->phba->cfg_soft_wwpn,
361 vport->fc_sparam.portName.u.wwn);
364 * If the name is empty or there exists a soft name
365 * then copy the service params name, otherwise use the fc name
367 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
368 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
369 sizeof(struct lpfc_name));
371 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
372 sizeof(struct lpfc_name));
375 * If the port name has changed, then set the Param changes flag
378 if (vport->fc_portname.u.wwn[0] != 0 &&
379 memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
380 sizeof(struct lpfc_name)))
381 vport->vport_flag |= FAWWPN_PARAM_CHG;
383 if (vport->fc_portname.u.wwn[0] == 0 ||
384 vport->phba->cfg_soft_wwpn ||
385 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
386 vport->vport_flag & FAWWPN_SET) {
387 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
388 sizeof(struct lpfc_name));
389 vport->vport_flag &= ~FAWWPN_SET;
390 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
391 vport->vport_flag |= FAWWPN_SET;
394 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
395 sizeof(struct lpfc_name));
399 * lpfc_config_port_post - Perform lpfc initialization after config port
400 * @phba: pointer to lpfc hba data structure.
402 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
403 * command call. It performs all internal resource and state setups on the
404 * port: post IOCB buffers, enable appropriate host interrupt attentions,
405 * ELS ring timers, etc.
409 * Any other value - error.
412 lpfc_config_port_post(struct lpfc_hba *phba)
414 struct lpfc_vport *vport = phba->pport;
415 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
418 struct lpfc_dmabuf *mp;
419 struct lpfc_sli *psli = &phba->sli;
420 uint32_t status, timeout;
424 spin_lock_irq(&phba->hbalock);
426 * If the Config port completed correctly the HBA is not
427 * over heated any more.
429 if (phba->over_temp_state == HBA_OVER_TEMP)
430 phba->over_temp_state = HBA_NORMAL_TEMP;
431 spin_unlock_irq(&phba->hbalock);
433 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
435 phba->link_state = LPFC_HBA_ERROR;
440 /* Get login parameters for NID. */
441 rc = lpfc_read_sparam(phba, pmb, 0);
443 mempool_free(pmb, phba->mbox_mem_pool);
448 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
449 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
450 "0448 Adapter failed init, mbxCmd x%x "
451 "READ_SPARM mbxStatus x%x\n",
452 mb->mbxCommand, mb->mbxStatus);
453 phba->link_state = LPFC_HBA_ERROR;
454 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
455 mempool_free(pmb, phba->mbox_mem_pool);
456 lpfc_mbuf_free(phba, mp->virt, mp->phys);
461 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
463 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
464 lpfc_mbuf_free(phba, mp->virt, mp->phys);
467 lpfc_update_vport_wwn(vport);
469 /* Update the fc_host data structures with new wwn. */
470 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
471 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
472 fc_host_max_npiv_vports(shost) = phba->max_vpi;
474 /* If no serial number in VPD data, use low 6 bytes of WWNN */
475 /* This should be consolidated into parse_vpd ? - mr */
476 if (phba->SerialNumber[0] == 0) {
479 outptr = &vport->fc_nodename.u.s.IEEE[0];
480 for (i = 0; i < 12; i++) {
482 j = ((status & 0xf0) >> 4);
484 phba->SerialNumber[i] =
485 (char)((uint8_t) 0x30 + (uint8_t) j);
487 phba->SerialNumber[i] =
488 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
492 phba->SerialNumber[i] =
493 (char)((uint8_t) 0x30 + (uint8_t) j);
495 phba->SerialNumber[i] =
496 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
500 lpfc_read_config(phba, pmb);
502 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
503 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
504 "0453 Adapter failed to init, mbxCmd x%x "
505 "READ_CONFIG, mbxStatus x%x\n",
506 mb->mbxCommand, mb->mbxStatus);
507 phba->link_state = LPFC_HBA_ERROR;
508 mempool_free( pmb, phba->mbox_mem_pool);
512 /* Check if the port is disabled */
513 lpfc_sli_read_link_ste(phba);
515 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
516 if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
517 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
518 "3359 HBA queue depth changed from %d to %d\n",
519 phba->cfg_hba_queue_depth,
520 mb->un.varRdConfig.max_xri);
521 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
524 phba->lmt = mb->un.varRdConfig.lmt;
526 /* Get the default values for Model Name and Description */
527 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
529 phba->link_state = LPFC_LINK_DOWN;
531 /* Only process IOCBs on ELS ring till hba_state is READY */
532 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
533 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
534 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
535 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
537 /* Post receive buffers for desired rings */
538 if (phba->sli_rev != 3)
539 lpfc_post_rcv_buf(phba);
542 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
544 if (phba->intr_type == MSIX) {
545 rc = lpfc_config_msi(phba, pmb);
547 mempool_free(pmb, phba->mbox_mem_pool);
550 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
551 if (rc != MBX_SUCCESS) {
552 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
553 "0352 Config MSI mailbox command "
554 "failed, mbxCmd x%x, mbxStatus x%x\n",
555 pmb->u.mb.mbxCommand,
556 pmb->u.mb.mbxStatus);
557 mempool_free(pmb, phba->mbox_mem_pool);
562 spin_lock_irq(&phba->hbalock);
563 /* Initialize ERATT handling flag */
564 phba->hba_flag &= ~HBA_ERATT_HANDLED;
566 /* Enable appropriate host interrupts */
567 if (lpfc_readl(phba->HCregaddr, &status)) {
568 spin_unlock_irq(&phba->hbalock);
571 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
572 if (psli->num_rings > 0)
573 status |= HC_R0INT_ENA;
574 if (psli->num_rings > 1)
575 status |= HC_R1INT_ENA;
576 if (psli->num_rings > 2)
577 status |= HC_R2INT_ENA;
578 if (psli->num_rings > 3)
579 status |= HC_R3INT_ENA;
581 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
582 (phba->cfg_poll & DISABLE_FCP_RING_INT))
583 status &= ~(HC_R0INT_ENA);
585 writel(status, phba->HCregaddr);
586 readl(phba->HCregaddr); /* flush */
587 spin_unlock_irq(&phba->hbalock);
589 /* Set up ring-0 (ELS) timer */
590 timeout = phba->fc_ratov * 2;
591 mod_timer(&vport->els_tmofunc,
592 jiffies + msecs_to_jiffies(1000 * timeout));
593 /* Set up heart beat (HB) timer */
594 mod_timer(&phba->hb_tmofunc,
595 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
596 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
597 phba->last_completion_time = jiffies;
598 /* Set up error attention (ERATT) polling timer */
599 mod_timer(&phba->eratt_poll,
600 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
602 if (phba->hba_flag & LINK_DISABLED) {
603 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
604 "2598 Adapter Link is disabled.\n");
605 lpfc_down_link(phba, pmb);
606 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
607 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
608 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
609 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
610 "2599 Adapter failed to issue DOWN_LINK"
611 " mbox command rc 0x%x\n", rc);
613 mempool_free(pmb, phba->mbox_mem_pool);
616 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
617 mempool_free(pmb, phba->mbox_mem_pool);
618 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
622 /* MBOX buffer will be freed in mbox compl */
623 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
625 phba->link_state = LPFC_HBA_ERROR;
629 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
630 pmb->mbox_cmpl = lpfc_config_async_cmpl;
631 pmb->vport = phba->pport;
632 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
634 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
635 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
636 "0456 Adapter failed to issue "
637 "ASYNCEVT_ENABLE mbox status x%x\n",
639 mempool_free(pmb, phba->mbox_mem_pool);
642 /* Get Option rom version */
643 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
645 phba->link_state = LPFC_HBA_ERROR;
649 lpfc_dump_wakeup_param(phba, pmb);
650 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
651 pmb->vport = phba->pport;
652 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
654 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
655 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
656 "0435 Adapter failed "
657 "to get Option ROM version status x%x\n", rc);
658 mempool_free(pmb, phba->mbox_mem_pool);
665 * lpfc_hba_init_link - Initialize the FC link
666 * @phba: pointer to lpfc hba data structure.
667 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
669 * This routine will issue the INIT_LINK mailbox command call.
670 * It is available to other drivers through the lpfc_hba data
671 * structure for use as a delayed link up mechanism with the
672 * module parameter lpfc_suppress_link_up.
676 * Any other value - error
679 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
681 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
685 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
686 * @phba: pointer to lpfc hba data structure.
687 * @fc_topology: desired fc topology.
688 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
690 * This routine will issue the INIT_LINK mailbox command call.
691 * It is available to other drivers through the lpfc_hba data
692 * structure for use as a delayed link up mechanism with the
693 * module parameter lpfc_suppress_link_up.
697 * Any other value - error
700 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
703 struct lpfc_vport *vport = phba->pport;
708 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
710 phba->link_state = LPFC_HBA_ERROR;
716 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
717 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
718 !(phba->lmt & LMT_1Gb)) ||
719 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
720 !(phba->lmt & LMT_2Gb)) ||
721 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
722 !(phba->lmt & LMT_4Gb)) ||
723 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
724 !(phba->lmt & LMT_8Gb)) ||
725 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
726 !(phba->lmt & LMT_10Gb)) ||
727 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
728 !(phba->lmt & LMT_16Gb)) ||
729 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
730 !(phba->lmt & LMT_32Gb)) ||
731 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
732 !(phba->lmt & LMT_64Gb))) {
733 /* Reset link speed to auto */
734 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
735 "1302 Invalid speed for this board:%d "
736 "Reset link speed to auto.\n",
737 phba->cfg_link_speed);
738 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
740 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
741 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
742 if (phba->sli_rev < LPFC_SLI_REV4)
743 lpfc_set_loopback_flag(phba);
744 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
745 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
746 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
747 "0498 Adapter failed to init, mbxCmd x%x "
748 "INIT_LINK, mbxStatus x%x\n",
749 mb->mbxCommand, mb->mbxStatus);
750 if (phba->sli_rev <= LPFC_SLI_REV3) {
751 /* Clear all interrupt enable conditions */
752 writel(0, phba->HCregaddr);
753 readl(phba->HCregaddr); /* flush */
754 /* Clear all pending interrupts */
755 writel(0xffffffff, phba->HAregaddr);
756 readl(phba->HAregaddr); /* flush */
758 phba->link_state = LPFC_HBA_ERROR;
759 if (rc != MBX_BUSY || flag == MBX_POLL)
760 mempool_free(pmb, phba->mbox_mem_pool);
763 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
764 if (flag == MBX_POLL)
765 mempool_free(pmb, phba->mbox_mem_pool);
771 * lpfc_hba_down_link - this routine downs the FC link
772 * @phba: pointer to lpfc hba data structure.
773 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
775 * This routine will issue the DOWN_LINK mailbox command call.
776 * It is available to other drivers through the lpfc_hba data
777 * structure for use to stop the link.
781 * Any other value - error
784 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
789 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
791 phba->link_state = LPFC_HBA_ERROR;
795 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
796 "0491 Adapter Link is disabled.\n");
797 lpfc_down_link(phba, pmb);
798 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
799 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
800 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
801 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
802 "2522 Adapter failed to issue DOWN_LINK"
803 " mbox command rc 0x%x\n", rc);
805 mempool_free(pmb, phba->mbox_mem_pool);
808 if (flag == MBX_POLL)
809 mempool_free(pmb, phba->mbox_mem_pool);
815 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
816 * @phba: pointer to lpfc HBA data structure.
818 * This routine will do LPFC uninitialization before the HBA is reset when
819 * bringing down the SLI Layer.
823 * Any other value - error.
826 lpfc_hba_down_prep(struct lpfc_hba *phba)
828 struct lpfc_vport **vports;
831 if (phba->sli_rev <= LPFC_SLI_REV3) {
832 /* Disable interrupts */
833 writel(0, phba->HCregaddr);
834 readl(phba->HCregaddr); /* flush */
837 if (phba->pport->load_flag & FC_UNLOADING)
838 lpfc_cleanup_discovery_resources(phba->pport);
840 vports = lpfc_create_vport_work_array(phba);
842 for (i = 0; i <= phba->max_vports &&
843 vports[i] != NULL; i++)
844 lpfc_cleanup_discovery_resources(vports[i]);
845 lpfc_destroy_vport_work_array(phba, vports);
851 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
852 * rspiocb which got deferred
854 * @phba: pointer to lpfc HBA data structure.
856 * This routine will cleanup completed slow path events after HBA is reset
857 * when bringing down the SLI Layer.
864 lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
866 struct lpfc_iocbq *rspiocbq;
867 struct hbq_dmabuf *dmabuf;
868 struct lpfc_cq_event *cq_event;
870 spin_lock_irq(&phba->hbalock);
871 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
872 spin_unlock_irq(&phba->hbalock);
874 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
875 /* Get the response iocb from the head of work queue */
876 spin_lock_irq(&phba->hbalock);
877 list_remove_head(&phba->sli4_hba.sp_queue_event,
878 cq_event, struct lpfc_cq_event, list);
879 spin_unlock_irq(&phba->hbalock);
881 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
882 case CQE_CODE_COMPL_WQE:
883 rspiocbq = container_of(cq_event, struct lpfc_iocbq,
885 lpfc_sli_release_iocbq(phba, rspiocbq);
887 case CQE_CODE_RECEIVE:
888 case CQE_CODE_RECEIVE_V1:
889 dmabuf = container_of(cq_event, struct hbq_dmabuf,
891 lpfc_in_buf_free(phba, &dmabuf->dbuf);
897 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
898 * @phba: pointer to lpfc HBA data structure.
900 * This routine will cleanup posted ELS buffers after the HBA is reset
901 * when bringing down the SLI Layer.
908 lpfc_hba_free_post_buf(struct lpfc_hba *phba)
910 struct lpfc_sli *psli = &phba->sli;
911 struct lpfc_sli_ring *pring;
912 struct lpfc_dmabuf *mp, *next_mp;
916 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
917 lpfc_sli_hbqbuf_free_all(phba);
919 /* Cleanup preposted buffers on the ELS ring */
920 pring = &psli->sli3_ring[LPFC_ELS_RING];
921 spin_lock_irq(&phba->hbalock);
922 list_splice_init(&pring->postbufq, &buflist);
923 spin_unlock_irq(&phba->hbalock);
926 list_for_each_entry_safe(mp, next_mp, &buflist, list) {
929 lpfc_mbuf_free(phba, mp->virt, mp->phys);
933 spin_lock_irq(&phba->hbalock);
934 pring->postbufq_cnt -= count;
935 spin_unlock_irq(&phba->hbalock);
940 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
941 * @phba: pointer to lpfc HBA data structure.
943 * This routine will cleanup the txcmplq after the HBA is reset when bringing
944 * down the SLI Layer.
950 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
952 struct lpfc_sli *psli = &phba->sli;
953 struct lpfc_queue *qp = NULL;
954 struct lpfc_sli_ring *pring;
955 LIST_HEAD(completions);
957 struct lpfc_iocbq *piocb, *next_iocb;
959 if (phba->sli_rev != LPFC_SLI_REV4) {
960 for (i = 0; i < psli->num_rings; i++) {
961 pring = &psli->sli3_ring[i];
962 spin_lock_irq(&phba->hbalock);
963 /* At this point in time the HBA is either reset or DOA
964 * Nothing should be on txcmplq as it will
967 list_splice_init(&pring->txcmplq, &completions);
968 pring->txcmplq_cnt = 0;
969 spin_unlock_irq(&phba->hbalock);
971 lpfc_sli_abort_iocb_ring(phba, pring);
973 /* Cancel all the IOCBs from the completions list */
974 lpfc_sli_cancel_iocbs(phba, &completions,
975 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
978 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
982 spin_lock_irq(&pring->ring_lock);
983 list_for_each_entry_safe(piocb, next_iocb,
984 &pring->txcmplq, list)
985 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
986 list_splice_init(&pring->txcmplq, &completions);
987 pring->txcmplq_cnt = 0;
988 spin_unlock_irq(&pring->ring_lock);
989 lpfc_sli_abort_iocb_ring(phba, pring);
991 /* Cancel all the IOCBs from the completions list */
992 lpfc_sli_cancel_iocbs(phba, &completions,
993 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
997 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
998 * @phba: pointer to lpfc HBA data structure.
1000 * This routine will do uninitialization after the HBA is reset when bring
1001 * down the SLI Layer.
1005 * Any other value - error.
1008 lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1010 lpfc_hba_free_post_buf(phba);
1011 lpfc_hba_clean_txcmplq(phba);
1016 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
1017 * @phba: pointer to lpfc HBA data structure.
1019 * This routine will do uninitialization after the HBA is reset when bring
1020 * down the SLI Layer.
1024 * Any other value - error.
1027 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1029 struct lpfc_io_buf *psb, *psb_next;
1030 struct lpfc_async_xchg_ctx *ctxp, *ctxp_next;
1031 struct lpfc_sli4_hdw_queue *qp;
1033 LIST_HEAD(nvme_aborts);
1034 LIST_HEAD(nvmet_aborts);
1035 struct lpfc_sglq *sglq_entry = NULL;
1039 lpfc_sli_hbqbuf_free_all(phba);
1040 lpfc_hba_clean_txcmplq(phba);
1042 /* At this point in time the HBA is either reset or DOA. Either
1043 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
1044 * on the lpfc_els_sgl_list so that it can either be freed if the
1045 * driver is unloading or reposted if the driver is restarting
1049 /* sgl_list_lock required because worker thread uses this
1052 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
1053 list_for_each_entry(sglq_entry,
1054 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1055 sglq_entry->state = SGL_FREED;
1057 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1058 &phba->sli4_hba.lpfc_els_sgl_list);
1061 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
1063 /* abts_xxxx_buf_list_lock required because worker thread uses this
1066 spin_lock_irq(&phba->hbalock);
1068 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1069 qp = &phba->sli4_hba.hdwq[idx];
1071 spin_lock(&qp->abts_io_buf_list_lock);
1072 list_splice_init(&qp->lpfc_abts_io_buf_list,
1075 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1077 psb->status = IOSTAT_SUCCESS;
1080 spin_lock(&qp->io_buf_list_put_lock);
1081 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1082 qp->put_io_bufs += qp->abts_scsi_io_bufs;
1083 qp->put_io_bufs += qp->abts_nvme_io_bufs;
1084 qp->abts_scsi_io_bufs = 0;
1085 qp->abts_nvme_io_bufs = 0;
1086 spin_unlock(&qp->io_buf_list_put_lock);
1087 spin_unlock(&qp->abts_io_buf_list_lock);
1089 spin_unlock_irq(&phba->hbalock);
1091 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1092 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1093 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1095 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1096 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1097 ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP);
1098 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1102 lpfc_sli4_free_sp_events(phba);
1107 * lpfc_hba_down_post - Wrapper func for hba down post routine
1108 * @phba: pointer to lpfc HBA data structure.
1110 * This routine wraps the actual SLI3 or SLI4 routine for performing
1111 * uninitialization after the HBA is reset when bring down the SLI Layer.
1115 * Any other value - error.
1118 lpfc_hba_down_post(struct lpfc_hba *phba)
1120 return (*phba->lpfc_hba_down_post)(phba);
1124 * lpfc_hb_timeout - The HBA-timer timeout handler
1125 * @t: timer context used to obtain the pointer to lpfc hba data structure.
1127 * This is the HBA-timer timeout handler registered to the lpfc driver. When
1128 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
1129 * work-port-events bitmap and the worker thread is notified. This timeout
1130 * event will be used by the worker thread to invoke the actual timeout
1131 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
1132 * be performed in the timeout handler and the HBA timeout event bit shall
1133 * be cleared by the worker thread after it has taken the event bitmap out.
1136 lpfc_hb_timeout(struct timer_list *t)
1138 struct lpfc_hba *phba;
1139 uint32_t tmo_posted;
1140 unsigned long iflag;
1142 phba = from_timer(phba, t, hb_tmofunc);
1144 /* Check for heart beat timeout conditions */
1145 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1146 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1148 phba->pport->work_port_events |= WORKER_HB_TMO;
1149 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1151 /* Tell the worker thread there is work to do */
1153 lpfc_worker_wake_up(phba);
1158 * lpfc_rrq_timeout - The RRQ-timer timeout handler
1159 * @t: timer context used to obtain the pointer to lpfc hba data structure.
1161 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1162 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
1163 * work-port-events bitmap and the worker thread is notified. This timeout
1164 * event will be used by the worker thread to invoke the actual timeout
1165 * handler routine, lpfc_rrq_handler. Any periodical operations will
1166 * be performed in the timeout handler and the RRQ timeout event bit shall
1167 * be cleared by the worker thread after it has taken the event bitmap out.
1170 lpfc_rrq_timeout(struct timer_list *t)
1172 struct lpfc_hba *phba;
1173 unsigned long iflag;
1175 phba = from_timer(phba, t, rrq_tmr);
1176 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1177 if (!(phba->pport->load_flag & FC_UNLOADING))
1178 phba->hba_flag |= HBA_RRQ_ACTIVE;
1180 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1181 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1183 if (!(phba->pport->load_flag & FC_UNLOADING))
1184 lpfc_worker_wake_up(phba);
1188 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
1189 * @phba: pointer to lpfc hba data structure.
1190 * @pmboxq: pointer to the driver internal queue element for mailbox command.
1192 * This is the callback function to the lpfc heart-beat mailbox command.
1193 * If configured, the lpfc driver issues the heart-beat mailbox command to
1194 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1195 * heart-beat mailbox command is issued, the driver shall set up heart-beat
1196 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1197 * heart-beat outstanding state. Once the mailbox command comes back and
1198 * no error conditions detected, the heart-beat mailbox command timer is
1199 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1200 * state is cleared for the next heart-beat. If the timer expired with the
1201 * heart-beat outstanding state set, the driver will put the HBA offline.
1204 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1206 unsigned long drvr_flag;
1208 spin_lock_irqsave(&phba->hbalock, drvr_flag);
1209 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
1210 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1212 /* Check and reset heart-beat timer if necessary */
1213 mempool_free(pmboxq, phba->mbox_mem_pool);
1214 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1215 !(phba->link_state == LPFC_HBA_ERROR) &&
1216 !(phba->pport->load_flag & FC_UNLOADING))
1217 mod_timer(&phba->hb_tmofunc,
1219 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1224 * lpfc_idle_stat_delay_work - idle_stat tracking
1226 * This routine tracks per-cq idle_stat and determines polling decisions.
1232 lpfc_idle_stat_delay_work(struct work_struct *work)
1234 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1236 idle_stat_delay_work);
1237 struct lpfc_queue *cq;
1238 struct lpfc_sli4_hdw_queue *hdwq;
1239 struct lpfc_idle_stat *idle_stat;
1240 u32 i, idle_percent;
1241 u64 wall, wall_idle, diff_wall, diff_idle, busy_time;
1243 if (phba->pport->load_flag & FC_UNLOADING)
1246 if (phba->link_state == LPFC_HBA_ERROR ||
1247 phba->pport->fc_flag & FC_OFFLINE_MODE ||
1248 phba->cmf_active_mode != LPFC_CFG_OFF)
1251 for_each_present_cpu(i) {
1252 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
1255 /* Skip if we've already handled this cq's primary CPU */
1259 idle_stat = &phba->sli4_hba.idle_stat[i];
1261 /* get_cpu_idle_time returns values as running counters. Thus,
1262 * to know the amount for this period, the prior counter values
1263 * need to be subtracted from the current counter values.
1264 * From there, the idle time stat can be calculated as a
1265 * percentage of 100 - the sum of the other consumption times.
1267 wall_idle = get_cpu_idle_time(i, &wall, 1);
1268 diff_idle = wall_idle - idle_stat->prev_idle;
1269 diff_wall = wall - idle_stat->prev_wall;
1271 if (diff_wall <= diff_idle)
1274 busy_time = diff_wall - diff_idle;
1276 idle_percent = div64_u64(100 * busy_time, diff_wall);
1277 idle_percent = 100 - idle_percent;
1279 if (idle_percent < 15)
1280 cq->poll_mode = LPFC_QUEUE_WORK;
1282 cq->poll_mode = LPFC_IRQ_POLL;
1284 idle_stat->prev_idle = wall_idle;
1285 idle_stat->prev_wall = wall;
1289 schedule_delayed_work(&phba->idle_stat_delay_work,
1290 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
1294 lpfc_hb_eq_delay_work(struct work_struct *work)
1296 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1297 struct lpfc_hba, eq_delay_work);
1298 struct lpfc_eq_intr_info *eqi, *eqi_new;
1299 struct lpfc_queue *eq, *eq_next;
1300 unsigned char *ena_delay = NULL;
1304 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
1307 if (phba->link_state == LPFC_HBA_ERROR ||
1308 phba->pport->fc_flag & FC_OFFLINE_MODE)
1311 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
1316 for (i = 0; i < phba->cfg_irq_chann; i++) {
1317 /* Get the EQ corresponding to the IRQ vector */
1318 eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1321 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
1322 eq->q_flag &= ~HBA_EQ_DELAY_CHK;
1323 ena_delay[eq->last_cpu] = 1;
1327 for_each_present_cpu(i) {
1328 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
1330 usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
1331 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
1332 usdelay = LPFC_MAX_AUTO_EQ_DELAY;
1339 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
1340 if (unlikely(eq->last_cpu != i)) {
1341 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1343 list_move_tail(&eq->cpu_list, &eqi_new->list);
1346 if (usdelay != eq->q_mode)
1347 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1355 queue_delayed_work(phba->wq, &phba->eq_delay_work,
1356 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
1360 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution
1361 * @phba: pointer to lpfc hba data structure.
1363 * For each heartbeat, this routine does some heuristic methods to adjust
1364 * XRI distribution. The goal is to fully utilize free XRIs.
1366 static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
1371 hwq_count = phba->cfg_hdw_queue;
1372 for (i = 0; i < hwq_count; i++) {
1373 /* Adjust XRIs in private pool */
1374 lpfc_adjust_pvt_pool_count(phba, i);
1376 /* Adjust high watermark */
1377 lpfc_adjust_high_watermark(phba, i);
1379 #ifdef LPFC_MXP_STAT
1380 /* Snapshot pbl, pvt and busy count */
1381 lpfc_snapshot_mxp(phba, i);
1387 * lpfc_issue_hb_mbox - Issues heart-beat mailbox command
1388 * @phba: pointer to lpfc hba data structure.
1390 * If a HB mbox is not already in progrees, this routine will allocate
1391 * a LPFC_MBOXQ_t, populate it with a MBX_HEARTBEAT (0x31) command,
1392 * and issue it. The HBA_HBEAT_INP flag means the command is in progress.
1395 lpfc_issue_hb_mbox(struct lpfc_hba *phba)
1397 LPFC_MBOXQ_t *pmboxq;
1400 /* Is a Heartbeat mbox already in progress */
1401 if (phba->hba_flag & HBA_HBEAT_INP)
1404 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1408 lpfc_heart_beat(phba, pmboxq);
1409 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1410 pmboxq->vport = phba->pport;
1411 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
1413 if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
1414 mempool_free(pmboxq, phba->mbox_mem_pool);
1417 phba->hba_flag |= HBA_HBEAT_INP;
1423 * lpfc_issue_hb_tmo - Signals heartbeat timer to issue mbox command
1424 * @phba: pointer to lpfc hba data structure.
1426 * The heartbeat timer (every 5 sec) will fire. If the HBA_HBEAT_TMO
1427 * flag is set, it will force a MBX_HEARTBEAT mbox command, regardless
1428 * of the value of lpfc_enable_hba_heartbeat.
1429 * If lpfc_enable_hba_heartbeat is set, the timeout routine will always
1430 * try to issue a MBX_HEARTBEAT mbox command.
1433 lpfc_issue_hb_tmo(struct lpfc_hba *phba)
1435 if (phba->cfg_enable_hba_heartbeat)
1437 phba->hba_flag |= HBA_HBEAT_TMO;
1441 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1442 * @phba: pointer to lpfc hba data structure.
1444 * This is the actual HBA-timer timeout handler to be invoked by the worker
1445 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1446 * handler performs any periodic operations needed for the device. If such
1447 * periodic event has already been attended to either in the interrupt handler
1448 * or by processing slow-ring or fast-ring events within the HBA-timer
1449 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1450 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1451 * is configured and there is no heart-beat mailbox command outstanding, a
1452 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1453 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1457 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1459 struct lpfc_vport **vports;
1460 struct lpfc_dmabuf *buf_ptr;
1463 struct lpfc_sli *psli = &phba->sli;
1464 LIST_HEAD(completions);
1466 if (phba->cfg_xri_rebalancing) {
1467 /* Multi-XRI pools handler */
1468 lpfc_hb_mxp_handler(phba);
1471 vports = lpfc_create_vport_work_array(phba);
1473 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1474 lpfc_rcv_seq_check_edtov(vports[i]);
1475 lpfc_fdmi_change_check(vports[i]);
1477 lpfc_destroy_vport_work_array(phba, vports);
1479 if ((phba->link_state == LPFC_HBA_ERROR) ||
1480 (phba->pport->load_flag & FC_UNLOADING) ||
1481 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1484 if (phba->elsbuf_cnt &&
1485 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1486 spin_lock_irq(&phba->hbalock);
1487 list_splice_init(&phba->elsbuf, &completions);
1488 phba->elsbuf_cnt = 0;
1489 phba->elsbuf_prev_cnt = 0;
1490 spin_unlock_irq(&phba->hbalock);
1492 while (!list_empty(&completions)) {
1493 list_remove_head(&completions, buf_ptr,
1494 struct lpfc_dmabuf, list);
1495 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1499 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1501 /* If there is no heart beat outstanding, issue a heartbeat command */
1502 if (phba->cfg_enable_hba_heartbeat) {
1503 /* If IOs are completing, no need to issue a MBX_HEARTBEAT */
1504 spin_lock_irq(&phba->pport->work_port_lock);
1505 if (time_after(phba->last_completion_time +
1506 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1508 spin_unlock_irq(&phba->pport->work_port_lock);
1509 if (phba->hba_flag & HBA_HBEAT_INP)
1510 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1512 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1515 spin_unlock_irq(&phba->pport->work_port_lock);
1517 /* Check if a MBX_HEARTBEAT is already in progress */
1518 if (phba->hba_flag & HBA_HBEAT_INP) {
1520 * If heart beat timeout called with HBA_HBEAT_INP set
1521 * we need to give the hb mailbox cmd a chance to
1524 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1525 "0459 Adapter heartbeat still outstanding: "
1526 "last compl time was %d ms.\n",
1527 jiffies_to_msecs(jiffies
1528 - phba->last_completion_time));
1529 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1531 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1532 (list_empty(&psli->mboxq))) {
1534 retval = lpfc_issue_hb_mbox(phba);
1536 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1539 phba->skipped_hb = 0;
1540 } else if (time_before_eq(phba->last_completion_time,
1541 phba->skipped_hb)) {
1542 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1543 "2857 Last completion time not "
1544 " updated in %d ms\n",
1545 jiffies_to_msecs(jiffies
1546 - phba->last_completion_time));
1548 phba->skipped_hb = jiffies;
1550 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1554 /* Check to see if we want to force a MBX_HEARTBEAT */
1555 if (phba->hba_flag & HBA_HBEAT_TMO) {
1556 retval = lpfc_issue_hb_mbox(phba);
1558 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1560 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1563 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1566 mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo));
1570 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1571 * @phba: pointer to lpfc hba data structure.
1573 * This routine is called to bring the HBA offline when HBA hardware error
1574 * other than Port Error 6 has been detected.
1577 lpfc_offline_eratt(struct lpfc_hba *phba)
1579 struct lpfc_sli *psli = &phba->sli;
1581 spin_lock_irq(&phba->hbalock);
1582 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1583 spin_unlock_irq(&phba->hbalock);
1584 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1587 lpfc_reset_barrier(phba);
1588 spin_lock_irq(&phba->hbalock);
1589 lpfc_sli_brdreset(phba);
1590 spin_unlock_irq(&phba->hbalock);
1591 lpfc_hba_down_post(phba);
1592 lpfc_sli_brdready(phba, HS_MBRDY);
1593 lpfc_unblock_mgmt_io(phba);
1594 phba->link_state = LPFC_HBA_ERROR;
1599 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1600 * @phba: pointer to lpfc hba data structure.
1602 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1603 * other than Port Error 6 has been detected.
1606 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1608 spin_lock_irq(&phba->hbalock);
1609 if (phba->link_state == LPFC_HBA_ERROR &&
1610 phba->hba_flag & HBA_PCI_ERR) {
1611 spin_unlock_irq(&phba->hbalock);
1614 phba->link_state = LPFC_HBA_ERROR;
1615 spin_unlock_irq(&phba->hbalock);
1617 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1618 lpfc_sli_flush_io_rings(phba);
1620 lpfc_hba_down_post(phba);
1621 lpfc_unblock_mgmt_io(phba);
1625 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1626 * @phba: pointer to lpfc hba data structure.
1628 * This routine is invoked to handle the deferred HBA hardware error
1629 * conditions. This type of error is indicated by HBA by setting ER1
1630 * and another ER bit in the host status register. The driver will
1631 * wait until the ER1 bit clears before handling the error condition.
1634 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1636 uint32_t old_host_status = phba->work_hs;
1637 struct lpfc_sli *psli = &phba->sli;
1639 /* If the pci channel is offline, ignore possible errors,
1640 * since we cannot communicate with the pci card anyway.
1642 if (pci_channel_offline(phba->pcidev)) {
1643 spin_lock_irq(&phba->hbalock);
1644 phba->hba_flag &= ~DEFER_ERATT;
1645 spin_unlock_irq(&phba->hbalock);
1649 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1650 "0479 Deferred Adapter Hardware Error "
1651 "Data: x%x x%x x%x\n",
1652 phba->work_hs, phba->work_status[0],
1653 phba->work_status[1]);
1655 spin_lock_irq(&phba->hbalock);
1656 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1657 spin_unlock_irq(&phba->hbalock);
1661 * Firmware stops when it triggred erratt. That could cause the I/Os
1662 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1663 * SCSI layer retry it after re-establishing link.
1665 lpfc_sli_abort_fcp_rings(phba);
1668 * There was a firmware error. Take the hba offline and then
1669 * attempt to restart it.
1671 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1674 /* Wait for the ER1 bit to clear.*/
1675 while (phba->work_hs & HS_FFER1) {
1677 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1678 phba->work_hs = UNPLUG_ERR ;
1681 /* If driver is unloading let the worker thread continue */
1682 if (phba->pport->load_flag & FC_UNLOADING) {
1689 * This is to ptrotect against a race condition in which
1690 * first write to the host attention register clear the
1691 * host status register.
1693 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1694 phba->work_hs = old_host_status & ~HS_FFER1;
1696 spin_lock_irq(&phba->hbalock);
1697 phba->hba_flag &= ~DEFER_ERATT;
1698 spin_unlock_irq(&phba->hbalock);
1699 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1700 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1704 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1706 struct lpfc_board_event_header board_event;
1707 struct Scsi_Host *shost;
1709 board_event.event_type = FC_REG_BOARD_EVENT;
1710 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1711 shost = lpfc_shost_from_vport(phba->pport);
1712 fc_host_post_vendor_event(shost, fc_get_event_number(),
1713 sizeof(board_event),
1714 (char *) &board_event,
1719 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1720 * @phba: pointer to lpfc hba data structure.
1722 * This routine is invoked to handle the following HBA hardware error
1724 * 1 - HBA error attention interrupt
1725 * 2 - DMA ring index out of range
1726 * 3 - Mailbox command came back as unknown
1729 lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1731 struct lpfc_vport *vport = phba->pport;
1732 struct lpfc_sli *psli = &phba->sli;
1733 uint32_t event_data;
1734 unsigned long temperature;
1735 struct temp_event temp_event_data;
1736 struct Scsi_Host *shost;
1738 /* If the pci channel is offline, ignore possible errors,
1739 * since we cannot communicate with the pci card anyway.
1741 if (pci_channel_offline(phba->pcidev)) {
1742 spin_lock_irq(&phba->hbalock);
1743 phba->hba_flag &= ~DEFER_ERATT;
1744 spin_unlock_irq(&phba->hbalock);
1748 /* If resets are disabled then leave the HBA alone and return */
1749 if (!phba->cfg_enable_hba_reset)
1752 /* Send an internal error event to mgmt application */
1753 lpfc_board_errevt_to_mgmt(phba);
1755 if (phba->hba_flag & DEFER_ERATT)
1756 lpfc_handle_deferred_eratt(phba);
1758 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1759 if (phba->work_hs & HS_FFER6)
1760 /* Re-establishing Link */
1761 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1762 "1301 Re-establishing Link "
1763 "Data: x%x x%x x%x\n",
1764 phba->work_hs, phba->work_status[0],
1765 phba->work_status[1]);
1766 if (phba->work_hs & HS_FFER8)
1767 /* Device Zeroization */
1768 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1769 "2861 Host Authentication device "
1770 "zeroization Data:x%x x%x x%x\n",
1771 phba->work_hs, phba->work_status[0],
1772 phba->work_status[1]);
1774 spin_lock_irq(&phba->hbalock);
1775 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1776 spin_unlock_irq(&phba->hbalock);
1779 * Firmware stops when it triggled erratt with HS_FFER6.
1780 * That could cause the I/Os dropped by the firmware.
1781 * Error iocb (I/O) on txcmplq and let the SCSI layer
1782 * retry it after re-establishing link.
1784 lpfc_sli_abort_fcp_rings(phba);
1787 * There was a firmware error. Take the hba offline and then
1788 * attempt to restart it.
1790 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1792 lpfc_sli_brdrestart(phba);
1793 if (lpfc_online(phba) == 0) { /* Initialize the HBA */
1794 lpfc_unblock_mgmt_io(phba);
1797 lpfc_unblock_mgmt_io(phba);
1798 } else if (phba->work_hs & HS_CRIT_TEMP) {
1799 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1800 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1801 temp_event_data.event_code = LPFC_CRIT_TEMP;
1802 temp_event_data.data = (uint32_t)temperature;
1804 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1805 "0406 Adapter maximum temperature exceeded "
1806 "(%ld), taking this port offline "
1807 "Data: x%x x%x x%x\n",
1808 temperature, phba->work_hs,
1809 phba->work_status[0], phba->work_status[1]);
1811 shost = lpfc_shost_from_vport(phba->pport);
1812 fc_host_post_vendor_event(shost, fc_get_event_number(),
1813 sizeof(temp_event_data),
1814 (char *) &temp_event_data,
1815 SCSI_NL_VID_TYPE_PCI
1816 | PCI_VENDOR_ID_EMULEX);
1818 spin_lock_irq(&phba->hbalock);
1819 phba->over_temp_state = HBA_OVER_TEMP;
1820 spin_unlock_irq(&phba->hbalock);
1821 lpfc_offline_eratt(phba);
1824 /* The if clause above forces this code path when the status
1825 * failure is a value other than FFER6. Do not call the offline
1826 * twice. This is the adapter hardware error path.
1828 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1829 "0457 Adapter Hardware Error "
1830 "Data: x%x x%x x%x\n",
1832 phba->work_status[0], phba->work_status[1]);
1834 event_data = FC_REG_DUMP_EVENT;
1835 shost = lpfc_shost_from_vport(vport);
1836 fc_host_post_vendor_event(shost, fc_get_event_number(),
1837 sizeof(event_data), (char *) &event_data,
1838 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1840 lpfc_offline_eratt(phba);
1846 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1847 * @phba: pointer to lpfc hba data structure.
1848 * @mbx_action: flag for mailbox shutdown action.
1849 * @en_rn_msg: send reset/port recovery message.
1850 * This routine is invoked to perform an SLI4 port PCI function reset in
1851 * response to port status register polling attention. It waits for port
1852 * status register (ERR, RDY, RN) bits before proceeding with function reset.
1853 * During this process, interrupt vectors are freed and later requested
1854 * for handling possible port resource change.
1857 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1862 LPFC_MBOXQ_t *mboxq;
1864 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
1865 LPFC_SLI_INTF_IF_TYPE_2) {
1867 * On error status condition, driver need to wait for port
1868 * ready before performing reset.
1870 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1875 /* need reset: attempt for port recovery */
1877 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1878 "2887 Reset Needed: Attempting Port "
1881 /* If we are no wait, the HBA has been reset and is not
1882 * functional, thus we should clear
1883 * (LPFC_SLI_ACTIVE | LPFC_SLI_MBOX_ACTIVE) flags.
1885 if (mbx_action == LPFC_MBX_NO_WAIT) {
1886 spin_lock_irq(&phba->hbalock);
1887 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
1888 if (phba->sli.mbox_active) {
1889 mboxq = phba->sli.mbox_active;
1890 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
1891 __lpfc_mbox_cmpl_put(phba, mboxq);
1892 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1893 phba->sli.mbox_active = NULL;
1895 spin_unlock_irq(&phba->hbalock);
1898 lpfc_offline_prep(phba, mbx_action);
1899 lpfc_sli_flush_io_rings(phba);
1901 /* release interrupt for possible resource change */
1902 lpfc_sli4_disable_intr(phba);
1903 rc = lpfc_sli_brdrestart(phba);
1905 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1906 "6309 Failed to restart board\n");
1909 /* request and enable interrupt */
1910 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1911 if (intr_mode == LPFC_INTR_ERROR) {
1912 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1913 "3175 Failed to enable interrupt\n");
1916 phba->intr_mode = intr_mode;
1917 rc = lpfc_online(phba);
1919 lpfc_unblock_mgmt_io(phba);
1925 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1926 * @phba: pointer to lpfc hba data structure.
1928 * This routine is invoked to handle the SLI4 HBA hardware error attention
1932 lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1934 struct lpfc_vport *vport = phba->pport;
1935 uint32_t event_data;
1936 struct Scsi_Host *shost;
1938 struct lpfc_register portstat_reg = {0};
1939 uint32_t reg_err1, reg_err2;
1940 uint32_t uerrlo_reg, uemasklo_reg;
1941 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
1942 bool en_rn_msg = true;
1943 struct temp_event temp_event_data;
1944 struct lpfc_register portsmphr_reg;
1947 /* If the pci channel is offline, ignore possible errors, since
1948 * we cannot communicate with the pci card anyway.
1950 if (pci_channel_offline(phba->pcidev)) {
1951 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1952 "3166 pci channel is offline\n");
1956 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
1957 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1959 case LPFC_SLI_INTF_IF_TYPE_0:
1960 pci_rd_rc1 = lpfc_readl(
1961 phba->sli4_hba.u.if_type0.UERRLOregaddr,
1963 pci_rd_rc2 = lpfc_readl(
1964 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1966 /* consider PCI bus read error as pci_channel_offline */
1967 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1969 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
1970 lpfc_sli4_offline_eratt(phba);
1973 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1974 "7623 Checking UE recoverable");
1976 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
1977 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1978 &portsmphr_reg.word0))
1981 smphr_port_status = bf_get(lpfc_port_smphr_port_status,
1983 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1984 LPFC_PORT_SEM_UE_RECOVERABLE)
1986 /*Sleep for 1Sec, before checking SEMAPHORE */
1990 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1991 "4827 smphr_port_status x%x : Waited %dSec",
1992 smphr_port_status, i);
1994 /* Recoverable UE, reset the HBA device */
1995 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1996 LPFC_PORT_SEM_UE_RECOVERABLE) {
1997 for (i = 0; i < 20; i++) {
1999 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
2000 &portsmphr_reg.word0) &&
2001 (LPFC_POST_STAGE_PORT_READY ==
2002 bf_get(lpfc_port_smphr_port_status,
2004 rc = lpfc_sli4_port_sta_fn_reset(phba,
2005 LPFC_MBX_NO_WAIT, en_rn_msg);
2008 lpfc_printf_log(phba, KERN_ERR,
2010 "4215 Failed to recover UE");
2015 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2016 "7624 Firmware not ready: Failing UE recovery,"
2017 " waited %dSec", i);
2018 phba->link_state = LPFC_HBA_ERROR;
2021 case LPFC_SLI_INTF_IF_TYPE_2:
2022 case LPFC_SLI_INTF_IF_TYPE_6:
2023 pci_rd_rc1 = lpfc_readl(
2024 phba->sli4_hba.u.if_type2.STATUSregaddr,
2025 &portstat_reg.word0);
2026 /* consider PCI bus read error as pci_channel_offline */
2027 if (pci_rd_rc1 == -EIO) {
2028 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2029 "3151 PCI bus read access failure: x%x\n",
2030 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
2031 lpfc_sli4_offline_eratt(phba);
2034 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
2035 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
2036 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
2037 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2038 "2889 Port Overtemperature event, "
2039 "taking port offline Data: x%x x%x\n",
2040 reg_err1, reg_err2);
2042 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
2043 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
2044 temp_event_data.event_code = LPFC_CRIT_TEMP;
2045 temp_event_data.data = 0xFFFFFFFF;
2047 shost = lpfc_shost_from_vport(phba->pport);
2048 fc_host_post_vendor_event(shost, fc_get_event_number(),
2049 sizeof(temp_event_data),
2050 (char *)&temp_event_data,
2051 SCSI_NL_VID_TYPE_PCI
2052 | PCI_VENDOR_ID_EMULEX);
2054 spin_lock_irq(&phba->hbalock);
2055 phba->over_temp_state = HBA_OVER_TEMP;
2056 spin_unlock_irq(&phba->hbalock);
2057 lpfc_sli4_offline_eratt(phba);
2060 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2061 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
2062 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2063 "3143 Port Down: Firmware Update "
2066 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2067 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2068 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2069 "3144 Port Down: Debug Dump\n");
2070 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2071 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
2072 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2073 "3145 Port Down: Provisioning\n");
2075 /* If resets are disabled then leave the HBA alone and return */
2076 if (!phba->cfg_enable_hba_reset)
2079 /* Check port status register for function reset */
2080 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
2083 /* don't report event on forced debug dump */
2084 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2085 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2090 /* fall through for not able to recover */
2091 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2092 "3152 Unrecoverable error\n");
2093 phba->link_state = LPFC_HBA_ERROR;
2095 case LPFC_SLI_INTF_IF_TYPE_1:
2099 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2100 "3123 Report dump event to upper layer\n");
2101 /* Send an internal error event to mgmt application */
2102 lpfc_board_errevt_to_mgmt(phba);
2104 event_data = FC_REG_DUMP_EVENT;
2105 shost = lpfc_shost_from_vport(vport);
2106 fc_host_post_vendor_event(shost, fc_get_event_number(),
2107 sizeof(event_data), (char *) &event_data,
2108 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
2112 * lpfc_handle_eratt - Wrapper func for handling hba error attention
2113 * @phba: pointer to lpfc HBA data structure.
2115 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
2116 * routine from the API jump table function pointer from the lpfc_hba struct.
2120 * Any other value - error.
2123 lpfc_handle_eratt(struct lpfc_hba *phba)
2125 (*phba->lpfc_handle_eratt)(phba);
2129 * lpfc_handle_latt - The HBA link event handler
2130 * @phba: pointer to lpfc hba data structure.
2132 * This routine is invoked from the worker thread to handle a HBA host
2133 * attention link event. SLI3 only.
2136 lpfc_handle_latt(struct lpfc_hba *phba)
2138 struct lpfc_vport *vport = phba->pport;
2139 struct lpfc_sli *psli = &phba->sli;
2141 volatile uint32_t control;
2142 struct lpfc_dmabuf *mp;
2145 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2148 goto lpfc_handle_latt_err_exit;
2151 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2154 goto lpfc_handle_latt_free_pmb;
2157 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2160 goto lpfc_handle_latt_free_mp;
2163 /* Cleanup any outstanding ELS commands */
2164 lpfc_els_flush_all_cmd(phba);
2166 psli->slistat.link_event++;
2167 lpfc_read_topology(phba, pmb, mp);
2168 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2170 /* Block ELS IOCBs until we have processed this mbox command */
2171 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2172 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
2173 if (rc == MBX_NOT_FINISHED) {
2175 goto lpfc_handle_latt_free_mbuf;
2178 /* Clear Link Attention in HA REG */
2179 spin_lock_irq(&phba->hbalock);
2180 writel(HA_LATT, phba->HAregaddr);
2181 readl(phba->HAregaddr); /* flush */
2182 spin_unlock_irq(&phba->hbalock);
2186 lpfc_handle_latt_free_mbuf:
2187 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
2188 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2189 lpfc_handle_latt_free_mp:
2191 lpfc_handle_latt_free_pmb:
2192 mempool_free(pmb, phba->mbox_mem_pool);
2193 lpfc_handle_latt_err_exit:
2194 /* Enable Link attention interrupts */
2195 spin_lock_irq(&phba->hbalock);
2196 psli->sli_flag |= LPFC_PROCESS_LA;
2197 control = readl(phba->HCregaddr);
2198 control |= HC_LAINT_ENA;
2199 writel(control, phba->HCregaddr);
2200 readl(phba->HCregaddr); /* flush */
2202 /* Clear Link Attention in HA REG */
2203 writel(HA_LATT, phba->HAregaddr);
2204 readl(phba->HAregaddr); /* flush */
2205 spin_unlock_irq(&phba->hbalock);
2206 lpfc_linkdown(phba);
2207 phba->link_state = LPFC_HBA_ERROR;
2209 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2210 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
2216 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
2217 * @phba: pointer to lpfc hba data structure.
2218 * @vpd: pointer to the vital product data.
2219 * @len: length of the vital product data in bytes.
2221 * This routine parses the Vital Product Data (VPD). The VPD is treated as
2222 * an array of characters. In this routine, the ModelName, ProgramType, and
2223 * ModelDesc, etc. fields of the phba data structure will be populated.
2226 * 0 - pointer to the VPD passed in is NULL
2230 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
2232 uint8_t lenlo, lenhi;
2242 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2243 "0455 Vital Product Data: x%x x%x x%x x%x\n",
2244 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2246 while (!finished && (index < (len - 4))) {
2247 switch (vpd[index]) {
2255 i = ((((unsigned short)lenhi) << 8) + lenlo);
2264 Length = ((((unsigned short)lenhi) << 8) + lenlo);
2265 if (Length > len - index)
2266 Length = len - index;
2267 while (Length > 0) {
2268 /* Look for Serial Number */
2269 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
2276 phba->SerialNumber[j++] = vpd[index++];
2280 phba->SerialNumber[j] = 0;
2283 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
2284 phba->vpd_flag |= VPD_MODEL_DESC;
2291 phba->ModelDesc[j++] = vpd[index++];
2295 phba->ModelDesc[j] = 0;
2298 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
2299 phba->vpd_flag |= VPD_MODEL_NAME;
2306 phba->ModelName[j++] = vpd[index++];
2310 phba->ModelName[j] = 0;
2313 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
2314 phba->vpd_flag |= VPD_PROGRAM_TYPE;
2321 phba->ProgramType[j++] = vpd[index++];
2325 phba->ProgramType[j] = 0;
2328 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
2329 phba->vpd_flag |= VPD_PORT;
2336 if ((phba->sli_rev == LPFC_SLI_REV4) &&
2337 (phba->sli4_hba.pport_name_sta ==
2338 LPFC_SLI4_PPNAME_GET)) {
2342 phba->Port[j++] = vpd[index++];
2346 if ((phba->sli_rev != LPFC_SLI_REV4) ||
2347 (phba->sli4_hba.pport_name_sta ==
2348 LPFC_SLI4_PPNAME_NON))
2375 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
2376 * @phba: pointer to lpfc hba data structure.
2377 * @mdp: pointer to the data structure to hold the derived model name.
2378 * @descp: pointer to the data structure to hold the derived description.
2380 * This routine retrieves HBA's description based on its registered PCI device
2381 * ID. The @descp passed into this function points to an array of 256 chars. It
2382 * shall be returned with the model name, maximum speed, and the host bus type.
2383 * The @mdp passed into this function points to an array of 80 chars. When the
2384 * function returns, the @mdp will be filled with the model name.
2387 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2390 uint16_t dev_id = phba->pcidev->device;
2393 int oneConnect = 0; /* default is not a oneConnect */
2398 } m = {"<Unknown>", "", ""};
2400 if (mdp && mdp[0] != '\0'
2401 && descp && descp[0] != '\0')
2404 if (phba->lmt & LMT_64Gb)
2406 else if (phba->lmt & LMT_32Gb)
2408 else if (phba->lmt & LMT_16Gb)
2410 else if (phba->lmt & LMT_10Gb)
2412 else if (phba->lmt & LMT_8Gb)
2414 else if (phba->lmt & LMT_4Gb)
2416 else if (phba->lmt & LMT_2Gb)
2418 else if (phba->lmt & LMT_1Gb)
2426 case PCI_DEVICE_ID_FIREFLY:
2427 m = (typeof(m)){"LP6000", "PCI",
2428 "Obsolete, Unsupported Fibre Channel Adapter"};
2430 case PCI_DEVICE_ID_SUPERFLY:
2431 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2432 m = (typeof(m)){"LP7000", "PCI", ""};
2434 m = (typeof(m)){"LP7000E", "PCI", ""};
2435 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2437 case PCI_DEVICE_ID_DRAGONFLY:
2438 m = (typeof(m)){"LP8000", "PCI",
2439 "Obsolete, Unsupported Fibre Channel Adapter"};
2441 case PCI_DEVICE_ID_CENTAUR:
2442 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2443 m = (typeof(m)){"LP9002", "PCI", ""};
2445 m = (typeof(m)){"LP9000", "PCI", ""};
2446 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2448 case PCI_DEVICE_ID_RFLY:
2449 m = (typeof(m)){"LP952", "PCI",
2450 "Obsolete, Unsupported Fibre Channel Adapter"};
2452 case PCI_DEVICE_ID_PEGASUS:
2453 m = (typeof(m)){"LP9802", "PCI-X",
2454 "Obsolete, Unsupported Fibre Channel Adapter"};
2456 case PCI_DEVICE_ID_THOR:
2457 m = (typeof(m)){"LP10000", "PCI-X",
2458 "Obsolete, Unsupported Fibre Channel Adapter"};
2460 case PCI_DEVICE_ID_VIPER:
2461 m = (typeof(m)){"LPX1000", "PCI-X",
2462 "Obsolete, Unsupported Fibre Channel Adapter"};
2464 case PCI_DEVICE_ID_PFLY:
2465 m = (typeof(m)){"LP982", "PCI-X",
2466 "Obsolete, Unsupported Fibre Channel Adapter"};
2468 case PCI_DEVICE_ID_TFLY:
2469 m = (typeof(m)){"LP1050", "PCI-X",
2470 "Obsolete, Unsupported Fibre Channel Adapter"};
2472 case PCI_DEVICE_ID_HELIOS:
2473 m = (typeof(m)){"LP11000", "PCI-X2",
2474 "Obsolete, Unsupported Fibre Channel Adapter"};
2476 case PCI_DEVICE_ID_HELIOS_SCSP:
2477 m = (typeof(m)){"LP11000-SP", "PCI-X2",
2478 "Obsolete, Unsupported Fibre Channel Adapter"};
2480 case PCI_DEVICE_ID_HELIOS_DCSP:
2481 m = (typeof(m)){"LP11002-SP", "PCI-X2",
2482 "Obsolete, Unsupported Fibre Channel Adapter"};
2484 case PCI_DEVICE_ID_NEPTUNE:
2485 m = (typeof(m)){"LPe1000", "PCIe",
2486 "Obsolete, Unsupported Fibre Channel Adapter"};
2488 case PCI_DEVICE_ID_NEPTUNE_SCSP:
2489 m = (typeof(m)){"LPe1000-SP", "PCIe",
2490 "Obsolete, Unsupported Fibre Channel Adapter"};
2492 case PCI_DEVICE_ID_NEPTUNE_DCSP:
2493 m = (typeof(m)){"LPe1002-SP", "PCIe",
2494 "Obsolete, Unsupported Fibre Channel Adapter"};
2496 case PCI_DEVICE_ID_BMID:
2497 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2499 case PCI_DEVICE_ID_BSMB:
2500 m = (typeof(m)){"LP111", "PCI-X2",
2501 "Obsolete, Unsupported Fibre Channel Adapter"};
2503 case PCI_DEVICE_ID_ZEPHYR:
2504 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2506 case PCI_DEVICE_ID_ZEPHYR_SCSP:
2507 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2509 case PCI_DEVICE_ID_ZEPHYR_DCSP:
2510 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2513 case PCI_DEVICE_ID_ZMID:
2514 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2516 case PCI_DEVICE_ID_ZSMB:
2517 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2519 case PCI_DEVICE_ID_LP101:
2520 m = (typeof(m)){"LP101", "PCI-X",
2521 "Obsolete, Unsupported Fibre Channel Adapter"};
2523 case PCI_DEVICE_ID_LP10000S:
2524 m = (typeof(m)){"LP10000-S", "PCI",
2525 "Obsolete, Unsupported Fibre Channel Adapter"};
2527 case PCI_DEVICE_ID_LP11000S:
2528 m = (typeof(m)){"LP11000-S", "PCI-X2",
2529 "Obsolete, Unsupported Fibre Channel Adapter"};
2531 case PCI_DEVICE_ID_LPE11000S:
2532 m = (typeof(m)){"LPe11000-S", "PCIe",
2533 "Obsolete, Unsupported Fibre Channel Adapter"};
2535 case PCI_DEVICE_ID_SAT:
2536 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2538 case PCI_DEVICE_ID_SAT_MID:
2539 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2541 case PCI_DEVICE_ID_SAT_SMB:
2542 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2544 case PCI_DEVICE_ID_SAT_DCSP:
2545 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2547 case PCI_DEVICE_ID_SAT_SCSP:
2548 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2550 case PCI_DEVICE_ID_SAT_S:
2551 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2553 case PCI_DEVICE_ID_HORNET:
2554 m = (typeof(m)){"LP21000", "PCIe",
2555 "Obsolete, Unsupported FCoE Adapter"};
2558 case PCI_DEVICE_ID_PROTEUS_VF:
2559 m = (typeof(m)){"LPev12000", "PCIe IOV",
2560 "Obsolete, Unsupported Fibre Channel Adapter"};
2562 case PCI_DEVICE_ID_PROTEUS_PF:
2563 m = (typeof(m)){"LPev12000", "PCIe IOV",
2564 "Obsolete, Unsupported Fibre Channel Adapter"};
2566 case PCI_DEVICE_ID_PROTEUS_S:
2567 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2568 "Obsolete, Unsupported Fibre Channel Adapter"};
2570 case PCI_DEVICE_ID_TIGERSHARK:
2572 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2574 case PCI_DEVICE_ID_TOMCAT:
2576 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2578 case PCI_DEVICE_ID_FALCON:
2579 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2580 "EmulexSecure Fibre"};
2582 case PCI_DEVICE_ID_BALIUS:
2583 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2584 "Obsolete, Unsupported Fibre Channel Adapter"};
2586 case PCI_DEVICE_ID_LANCER_FC:
2587 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2589 case PCI_DEVICE_ID_LANCER_FC_VF:
2590 m = (typeof(m)){"LPe16000", "PCIe",
2591 "Obsolete, Unsupported Fibre Channel Adapter"};
2593 case PCI_DEVICE_ID_LANCER_FCOE:
2595 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2597 case PCI_DEVICE_ID_LANCER_FCOE_VF:
2599 m = (typeof(m)){"OCe15100", "PCIe",
2600 "Obsolete, Unsupported FCoE"};
2602 case PCI_DEVICE_ID_LANCER_G6_FC:
2603 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2605 case PCI_DEVICE_ID_LANCER_G7_FC:
2606 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2608 case PCI_DEVICE_ID_LANCER_G7P_FC:
2609 m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"};
2611 case PCI_DEVICE_ID_SKYHAWK:
2612 case PCI_DEVICE_ID_SKYHAWK_VF:
2614 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2617 m = (typeof(m)){"Unknown", "", ""};
2621 if (mdp && mdp[0] == '\0')
2622 snprintf(mdp, 79,"%s", m.name);
2624 * oneConnect hba requires special processing, they are all initiators
2625 * and we put the port number on the end
2627 if (descp && descp[0] == '\0') {
2629 snprintf(descp, 255,
2630 "Emulex OneConnect %s, %s Initiator %s",
2633 else if (max_speed == 0)
2634 snprintf(descp, 255,
2636 m.name, m.bus, m.function);
2638 snprintf(descp, 255,
2639 "Emulex %s %d%s %s %s",
2640 m.name, max_speed, (GE) ? "GE" : "Gb",
2646 * lpfc_sli3_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2647 * @phba: pointer to lpfc hba data structure.
2648 * @pring: pointer to a IOCB ring.
2649 * @cnt: the number of IOCBs to be posted to the IOCB ring.
2651 * This routine posts a given number of IOCBs with the associated DMA buffer
2652 * descriptors specified by the cnt argument to the given IOCB ring.
2655 * The number of IOCBs NOT able to be posted to the IOCB ring.
2658 lpfc_sli3_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2661 struct lpfc_iocbq *iocb;
2662 struct lpfc_dmabuf *mp1, *mp2;
2664 cnt += pring->missbufcnt;
2666 /* While there are buffers to post */
2668 /* Allocate buffer for command iocb */
2669 iocb = lpfc_sli_get_iocbq(phba);
2671 pring->missbufcnt = cnt;
2676 /* 2 buffers can be posted per command */
2677 /* Allocate buffer to post */
2678 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2680 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2681 if (!mp1 || !mp1->virt) {
2683 lpfc_sli_release_iocbq(phba, iocb);
2684 pring->missbufcnt = cnt;
2688 INIT_LIST_HEAD(&mp1->list);
2689 /* Allocate buffer to post */
2691 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2693 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2695 if (!mp2 || !mp2->virt) {
2697 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2699 lpfc_sli_release_iocbq(phba, iocb);
2700 pring->missbufcnt = cnt;
2704 INIT_LIST_HEAD(&mp2->list);
2709 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2710 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2711 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2712 icmd->ulpBdeCount = 1;
2715 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2716 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2717 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2719 icmd->ulpBdeCount = 2;
2722 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2725 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2727 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2731 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2735 lpfc_sli_release_iocbq(phba, iocb);
2736 pring->missbufcnt = cnt;
2739 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2741 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2743 pring->missbufcnt = 0;
2748 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2749 * @phba: pointer to lpfc hba data structure.
2751 * This routine posts initial receive IOCB buffers to the ELS ring. The
2752 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2753 * set to 64 IOCBs. SLI3 only.
2756 * 0 - success (currently always success)
2759 lpfc_post_rcv_buf(struct lpfc_hba *phba)
2761 struct lpfc_sli *psli = &phba->sli;
2763 /* Ring 0, ELS / CT buffers */
2764 lpfc_sli3_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2765 /* Ring 2 - FCP no buffers needed */
2770 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2773 * lpfc_sha_init - Set up initial array of hash table entries
2774 * @HashResultPointer: pointer to an array as hash table.
2776 * This routine sets up the initial values to the array of hash table entries
2780 lpfc_sha_init(uint32_t * HashResultPointer)
2782 HashResultPointer[0] = 0x67452301;
2783 HashResultPointer[1] = 0xEFCDAB89;
2784 HashResultPointer[2] = 0x98BADCFE;
2785 HashResultPointer[3] = 0x10325476;
2786 HashResultPointer[4] = 0xC3D2E1F0;
2790 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2791 * @HashResultPointer: pointer to an initial/result hash table.
2792 * @HashWorkingPointer: pointer to an working hash table.
2794 * This routine iterates an initial hash table pointed by @HashResultPointer
2795 * with the values from the working hash table pointeed by @HashWorkingPointer.
2796 * The results are putting back to the initial hash table, returned through
2797 * the @HashResultPointer as the result hash table.
2800 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2804 uint32_t A, B, C, D, E;
2807 HashWorkingPointer[t] =
2809 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2811 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2812 } while (++t <= 79);
2814 A = HashResultPointer[0];
2815 B = HashResultPointer[1];
2816 C = HashResultPointer[2];
2817 D = HashResultPointer[3];
2818 E = HashResultPointer[4];
2822 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2823 } else if (t < 40) {
2824 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2825 } else if (t < 60) {
2826 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2828 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2830 TEMP += S(5, A) + E + HashWorkingPointer[t];
2836 } while (++t <= 79);
2838 HashResultPointer[0] += A;
2839 HashResultPointer[1] += B;
2840 HashResultPointer[2] += C;
2841 HashResultPointer[3] += D;
2842 HashResultPointer[4] += E;
2847 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2848 * @RandomChallenge: pointer to the entry of host challenge random number array.
2849 * @HashWorking: pointer to the entry of the working hash array.
2851 * This routine calculates the working hash array referred by @HashWorking
2852 * from the challenge random numbers associated with the host, referred by
2853 * @RandomChallenge. The result is put into the entry of the working hash
2854 * array and returned by reference through @HashWorking.
2857 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2859 *HashWorking = (*RandomChallenge ^ *HashWorking);
2863 * lpfc_hba_init - Perform special handling for LC HBA initialization
2864 * @phba: pointer to lpfc hba data structure.
2865 * @hbainit: pointer to an array of unsigned 32-bit integers.
2867 * This routine performs the special handling for LC HBA initialization.
2870 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2873 uint32_t *HashWorking;
2874 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2876 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2880 HashWorking[0] = HashWorking[78] = *pwwnn++;
2881 HashWorking[1] = HashWorking[79] = *pwwnn;
2883 for (t = 0; t < 7; t++)
2884 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2886 lpfc_sha_init(hbainit);
2887 lpfc_sha_iterate(hbainit, HashWorking);
2892 * lpfc_cleanup - Performs vport cleanups before deleting a vport
2893 * @vport: pointer to a virtual N_Port data structure.
2895 * This routine performs the necessary cleanups before deleting the @vport.
2896 * It invokes the discovery state machine to perform necessary state
2897 * transitions and to release the ndlps associated with the @vport. Note,
2898 * the physical port is treated as @vport 0.
2901 lpfc_cleanup(struct lpfc_vport *vport)
2903 struct lpfc_hba *phba = vport->phba;
2904 struct lpfc_nodelist *ndlp, *next_ndlp;
2907 if (phba->link_state > LPFC_LINK_DOWN)
2908 lpfc_port_link_failure(vport);
2910 /* Clean up VMID resources */
2911 if (lpfc_is_vmid_enabled(phba))
2912 lpfc_vmid_vport_cleanup(vport);
2914 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2915 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2916 ndlp->nlp_DID == Fabric_DID) {
2917 /* Just free up ndlp with Fabric_DID for vports */
2922 if (ndlp->nlp_DID == Fabric_Cntl_DID &&
2923 ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2928 /* Fabric Ports not in UNMAPPED state are cleaned up in the
2931 if (ndlp->nlp_type & NLP_FABRIC &&
2932 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
2933 lpfc_disc_state_machine(vport, ndlp, NULL,
2934 NLP_EVT_DEVICE_RECOVERY);
2936 if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD)))
2937 lpfc_disc_state_machine(vport, ndlp, NULL,
2941 /* At this point, ALL ndlp's should be gone
2942 * because of the previous NLP_EVT_DEVICE_RM.
2943 * Lets wait for this to happen, if needed.
2945 while (!list_empty(&vport->fc_nodes)) {
2947 lpfc_printf_vlog(vport, KERN_ERR,
2949 "0233 Nodelist not empty\n");
2950 list_for_each_entry_safe(ndlp, next_ndlp,
2951 &vport->fc_nodes, nlp_listp) {
2952 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2954 "0282 did:x%x ndlp:x%px "
2955 "refcnt:%d xflags x%x nflag x%x\n",
2956 ndlp->nlp_DID, (void *)ndlp,
2957 kref_read(&ndlp->kref),
2958 ndlp->fc4_xpt_flags,
2964 /* Wait for any activity on ndlps to settle */
2967 lpfc_cleanup_vports_rrqs(vport, NULL);
2971 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2972 * @vport: pointer to a virtual N_Port data structure.
2974 * This routine stops all the timers associated with a @vport. This function
2975 * is invoked before disabling or deleting a @vport. Note that the physical
2976 * port is treated as @vport 0.
2979 lpfc_stop_vport_timers(struct lpfc_vport *vport)
2981 del_timer_sync(&vport->els_tmofunc);
2982 del_timer_sync(&vport->delayed_disc_tmo);
2983 lpfc_can_disctmo(vport);
2988 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2989 * @phba: pointer to lpfc hba data structure.
2991 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2992 * caller of this routine should already hold the host lock.
2995 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2997 /* Clear pending FCF rediscovery wait flag */
2998 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
3000 /* Now, try to stop the timer */
3001 del_timer(&phba->fcf.redisc_wait);
3005 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
3006 * @phba: pointer to lpfc hba data structure.
3008 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
3009 * checks whether the FCF rediscovery wait timer is pending with the host
3010 * lock held before proceeding with disabling the timer and clearing the
3011 * wait timer pendig flag.
3014 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
3016 spin_lock_irq(&phba->hbalock);
3017 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3018 /* FCF rediscovery timer already fired or stopped */
3019 spin_unlock_irq(&phba->hbalock);
3022 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3023 /* Clear failover in progress flags */
3024 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
3025 spin_unlock_irq(&phba->hbalock);
3029 * lpfc_cmf_stop - Stop CMF processing
3030 * @phba: pointer to lpfc hba data structure.
3032 * This is called when the link goes down or if CMF mode is turned OFF.
3033 * It is also called when going offline or unloaded just before the
3034 * congestion info buffer is unregistered.
3037 lpfc_cmf_stop(struct lpfc_hba *phba)
3040 struct lpfc_cgn_stat *cgs;
3042 /* We only do something if CMF is enabled */
3043 if (!phba->sli4_hba.pc_sli4_params.cmf)
3046 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3047 "6221 Stop CMF / Cancel Timer\n");
3049 /* Cancel the CMF timer */
3050 hrtimer_cancel(&phba->cmf_timer);
3052 /* Zero CMF counters */
3053 atomic_set(&phba->cmf_busy, 0);
3054 for_each_present_cpu(cpu) {
3055 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3056 atomic64_set(&cgs->total_bytes, 0);
3057 atomic64_set(&cgs->rcv_bytes, 0);
3058 atomic_set(&cgs->rx_io_cnt, 0);
3059 atomic64_set(&cgs->rx_latency, 0);
3061 atomic_set(&phba->cmf_bw_wait, 0);
3063 /* Resume any blocked IO - Queue unblock on workqueue */
3064 queue_work(phba->wq, &phba->unblock_request_work);
3067 static inline uint64_t
3068 lpfc_get_max_line_rate(struct lpfc_hba *phba)
3070 uint64_t rate = lpfc_sli_port_speed_get(phba);
3072 return ((((unsigned long)rate) * 1024 * 1024) / 10);
3076 lpfc_cmf_signal_init(struct lpfc_hba *phba)
3078 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3079 "6223 Signal CMF init\n");
3081 /* Use the new fc_linkspeed to recalculate */
3082 phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
3083 phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba);
3084 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
3085 phba->cmf_interval_rate, 1000);
3086 phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count;
3088 /* This is a signal to firmware to sync up CMF BW with link speed */
3089 lpfc_issue_cmf_sync_wqe(phba, 0, 0);
3093 * lpfc_cmf_start - Start CMF processing
3094 * @phba: pointer to lpfc hba data structure.
3096 * This is called when the link comes up or if CMF mode is turned OFF
3097 * to Monitor or Managed.
3100 lpfc_cmf_start(struct lpfc_hba *phba)
3102 struct lpfc_cgn_stat *cgs;
3105 /* We only do something if CMF is enabled */
3106 if (!phba->sli4_hba.pc_sli4_params.cmf ||
3107 phba->cmf_active_mode == LPFC_CFG_OFF)
3110 /* Reinitialize congestion buffer info */
3111 lpfc_init_congestion_buf(phba);
3113 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
3114 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
3115 atomic_set(&phba->cgn_sync_alarm_cnt, 0);
3116 atomic_set(&phba->cgn_sync_warn_cnt, 0);
3118 atomic_set(&phba->cmf_busy, 0);
3119 for_each_present_cpu(cpu) {
3120 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3121 atomic64_set(&cgs->total_bytes, 0);
3122 atomic64_set(&cgs->rcv_bytes, 0);
3123 atomic_set(&cgs->rx_io_cnt, 0);
3124 atomic64_set(&cgs->rx_latency, 0);
3126 phba->cmf_latency.tv_sec = 0;
3127 phba->cmf_latency.tv_nsec = 0;
3129 lpfc_cmf_signal_init(phba);
3131 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3132 "6222 Start CMF / Timer\n");
3134 phba->cmf_timer_cnt = 0;
3135 hrtimer_start(&phba->cmf_timer,
3136 ktime_set(0, LPFC_CMF_INTERVAL * 1000000),
3138 /* Setup for latency check in IO cmpl routines */
3139 ktime_get_real_ts64(&phba->cmf_latency);
3141 atomic_set(&phba->cmf_bw_wait, 0);
3142 atomic_set(&phba->cmf_stop_io, 0);
3146 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
3147 * @phba: pointer to lpfc hba data structure.
3149 * This routine stops all the timers associated with a HBA. This function is
3150 * invoked before either putting a HBA offline or unloading the driver.
3153 lpfc_stop_hba_timers(struct lpfc_hba *phba)
3156 lpfc_stop_vport_timers(phba->pport);
3157 cancel_delayed_work_sync(&phba->eq_delay_work);
3158 cancel_delayed_work_sync(&phba->idle_stat_delay_work);
3159 del_timer_sync(&phba->sli.mbox_tmo);
3160 del_timer_sync(&phba->fabric_block_timer);
3161 del_timer_sync(&phba->eratt_poll);
3162 del_timer_sync(&phba->hb_tmofunc);
3163 if (phba->sli_rev == LPFC_SLI_REV4) {
3164 del_timer_sync(&phba->rrq_tmr);
3165 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
3167 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
3169 switch (phba->pci_dev_grp) {
3170 case LPFC_PCI_DEV_LP:
3171 /* Stop any LightPulse device specific driver timers */
3172 del_timer_sync(&phba->fcp_poll_timer);
3174 case LPFC_PCI_DEV_OC:
3175 /* Stop any OneConnect device specific driver timers */
3176 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3179 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3180 "0297 Invalid device group (x%x)\n",
3188 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
3189 * @phba: pointer to lpfc hba data structure.
3190 * @mbx_action: flag for mailbox no wait action.
3192 * This routine marks a HBA's management interface as blocked. Once the HBA's
3193 * management interface is marked as blocked, all the user space access to
3194 * the HBA, whether they are from sysfs interface or libdfc interface will
3195 * all be blocked. The HBA is set to block the management interface when the
3196 * driver prepares the HBA interface for online or offline.
3199 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
3201 unsigned long iflag;
3202 uint8_t actcmd = MBX_HEARTBEAT;
3203 unsigned long timeout;
3205 spin_lock_irqsave(&phba->hbalock, iflag);
3206 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
3207 spin_unlock_irqrestore(&phba->hbalock, iflag);
3208 if (mbx_action == LPFC_MBX_NO_WAIT)
3210 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
3211 spin_lock_irqsave(&phba->hbalock, iflag);
3212 if (phba->sli.mbox_active) {
3213 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
3214 /* Determine how long we might wait for the active mailbox
3215 * command to be gracefully completed by firmware.
3217 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
3218 phba->sli.mbox_active) * 1000) + jiffies;
3220 spin_unlock_irqrestore(&phba->hbalock, iflag);
3222 /* Wait for the outstnading mailbox command to complete */
3223 while (phba->sli.mbox_active) {
3224 /* Check active mailbox complete status every 2ms */
3226 if (time_after(jiffies, timeout)) {
3227 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3228 "2813 Mgmt IO is Blocked %x "
3229 "- mbox cmd %x still active\n",
3230 phba->sli.sli_flag, actcmd);
3237 * lpfc_sli4_node_prep - Assign RPIs for active nodes.
3238 * @phba: pointer to lpfc hba data structure.
3240 * Allocate RPIs for all active remote nodes. This is needed whenever
3241 * an SLI4 adapter is reset and the driver is not unloading. Its purpose
3242 * is to fixup the temporary rpi assignments.
3245 lpfc_sli4_node_prep(struct lpfc_hba *phba)
3247 struct lpfc_nodelist *ndlp, *next_ndlp;
3248 struct lpfc_vport **vports;
3251 if (phba->sli_rev != LPFC_SLI_REV4)
3254 vports = lpfc_create_vport_work_array(phba);
3258 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3259 if (vports[i]->load_flag & FC_UNLOADING)
3262 list_for_each_entry_safe(ndlp, next_ndlp,
3263 &vports[i]->fc_nodes,
3265 rpi = lpfc_sli4_alloc_rpi(phba);
3266 if (rpi == LPFC_RPI_ALLOC_ERROR) {
3267 /* TODO print log? */
3270 ndlp->nlp_rpi = rpi;
3271 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3272 LOG_NODE | LOG_DISCOVERY,
3273 "0009 Assign RPI x%x to ndlp x%px "
3274 "DID:x%06x flg:x%x\n",
3275 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
3279 lpfc_destroy_vport_work_array(phba, vports);
3283 * lpfc_create_expedite_pool - create expedite pool
3284 * @phba: pointer to lpfc hba data structure.
3286 * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0
3287 * to expedite pool. Mark them as expedite.
3289 static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
3291 struct lpfc_sli4_hdw_queue *qp;
3292 struct lpfc_io_buf *lpfc_ncmd;
3293 struct lpfc_io_buf *lpfc_ncmd_next;
3294 struct lpfc_epd_pool *epd_pool;
3295 unsigned long iflag;
3297 epd_pool = &phba->epd_pool;
3298 qp = &phba->sli4_hba.hdwq[0];
3300 spin_lock_init(&epd_pool->lock);
3301 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3302 spin_lock(&epd_pool->lock);
3303 INIT_LIST_HEAD(&epd_pool->list);
3304 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3305 &qp->lpfc_io_buf_list_put, list) {
3306 list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
3307 lpfc_ncmd->expedite = true;
3310 if (epd_pool->count >= XRI_BATCH)
3313 spin_unlock(&epd_pool->lock);
3314 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3318 * lpfc_destroy_expedite_pool - destroy expedite pool
3319 * @phba: pointer to lpfc hba data structure.
3321 * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put
3322 * of HWQ 0. Clear the mark.
3324 static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
3326 struct lpfc_sli4_hdw_queue *qp;
3327 struct lpfc_io_buf *lpfc_ncmd;
3328 struct lpfc_io_buf *lpfc_ncmd_next;
3329 struct lpfc_epd_pool *epd_pool;
3330 unsigned long iflag;
3332 epd_pool = &phba->epd_pool;
3333 qp = &phba->sli4_hba.hdwq[0];
3335 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3336 spin_lock(&epd_pool->lock);
3337 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3338 &epd_pool->list, list) {
3339 list_move_tail(&lpfc_ncmd->list,
3340 &qp->lpfc_io_buf_list_put);
3341 lpfc_ncmd->flags = false;
3345 spin_unlock(&epd_pool->lock);
3346 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3350 * lpfc_create_multixri_pools - create multi-XRI pools
3351 * @phba: pointer to lpfc hba data structure.
3353 * This routine initialize public, private per HWQ. Then, move XRIs from
3354 * lpfc_io_buf_list_put to public pool. High and low watermark are also
3357 void lpfc_create_multixri_pools(struct lpfc_hba *phba)
3362 struct lpfc_io_buf *lpfc_ncmd;
3363 struct lpfc_io_buf *lpfc_ncmd_next;
3364 unsigned long iflag;
3365 struct lpfc_sli4_hdw_queue *qp;
3366 struct lpfc_multixri_pool *multixri_pool;
3367 struct lpfc_pbl_pool *pbl_pool;
3368 struct lpfc_pvt_pool *pvt_pool;
3370 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3371 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3372 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3373 phba->sli4_hba.io_xri_cnt);
3375 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3376 lpfc_create_expedite_pool(phba);
3378 hwq_count = phba->cfg_hdw_queue;
3379 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3381 for (i = 0; i < hwq_count; i++) {
3382 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
3384 if (!multixri_pool) {
3385 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3386 "1238 Failed to allocate memory for "
3389 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3390 lpfc_destroy_expedite_pool(phba);
3394 qp = &phba->sli4_hba.hdwq[j];
3395 kfree(qp->p_multixri_pool);
3398 phba->cfg_xri_rebalancing = 0;
3402 qp = &phba->sli4_hba.hdwq[i];
3403 qp->p_multixri_pool = multixri_pool;
3405 multixri_pool->xri_limit = count_per_hwq;
3406 multixri_pool->rrb_next_hwqid = i;
3408 /* Deal with public free xri pool */
3409 pbl_pool = &multixri_pool->pbl_pool;
3410 spin_lock_init(&pbl_pool->lock);
3411 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3412 spin_lock(&pbl_pool->lock);
3413 INIT_LIST_HEAD(&pbl_pool->list);
3414 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3415 &qp->lpfc_io_buf_list_put, list) {
3416 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
3420 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3421 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3422 pbl_pool->count, i);
3423 spin_unlock(&pbl_pool->lock);
3424 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3426 /* Deal with private free xri pool */
3427 pvt_pool = &multixri_pool->pvt_pool;
3428 pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
3429 pvt_pool->low_watermark = XRI_BATCH;
3430 spin_lock_init(&pvt_pool->lock);
3431 spin_lock_irqsave(&pvt_pool->lock, iflag);
3432 INIT_LIST_HEAD(&pvt_pool->list);
3433 pvt_pool->count = 0;
3434 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
3439 * lpfc_destroy_multixri_pools - destroy multi-XRI pools
3440 * @phba: pointer to lpfc hba data structure.
3442 * This routine returns XRIs from public/private to lpfc_io_buf_list_put.
3444 static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
3448 struct lpfc_io_buf *lpfc_ncmd;
3449 struct lpfc_io_buf *lpfc_ncmd_next;
3450 unsigned long iflag;
3451 struct lpfc_sli4_hdw_queue *qp;
3452 struct lpfc_multixri_pool *multixri_pool;
3453 struct lpfc_pbl_pool *pbl_pool;
3454 struct lpfc_pvt_pool *pvt_pool;
3456 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3457 lpfc_destroy_expedite_pool(phba);
3459 if (!(phba->pport->load_flag & FC_UNLOADING))
3460 lpfc_sli_flush_io_rings(phba);
3462 hwq_count = phba->cfg_hdw_queue;
3464 for (i = 0; i < hwq_count; i++) {
3465 qp = &phba->sli4_hba.hdwq[i];
3466 multixri_pool = qp->p_multixri_pool;
3470 qp->p_multixri_pool = NULL;
3472 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3474 /* Deal with public free xri pool */
3475 pbl_pool = &multixri_pool->pbl_pool;
3476 spin_lock(&pbl_pool->lock);
3478 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3479 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3480 pbl_pool->count, i);
3482 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3483 &pbl_pool->list, list) {
3484 list_move_tail(&lpfc_ncmd->list,
3485 &qp->lpfc_io_buf_list_put);
3490 INIT_LIST_HEAD(&pbl_pool->list);
3491 pbl_pool->count = 0;
3493 spin_unlock(&pbl_pool->lock);
3495 /* Deal with private free xri pool */
3496 pvt_pool = &multixri_pool->pvt_pool;
3497 spin_lock(&pvt_pool->lock);
3499 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3500 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3501 pvt_pool->count, i);
3503 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3504 &pvt_pool->list, list) {
3505 list_move_tail(&lpfc_ncmd->list,
3506 &qp->lpfc_io_buf_list_put);
3511 INIT_LIST_HEAD(&pvt_pool->list);
3512 pvt_pool->count = 0;
3514 spin_unlock(&pvt_pool->lock);
3515 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3517 kfree(multixri_pool);
3522 * lpfc_online - Initialize and bring a HBA online
3523 * @phba: pointer to lpfc hba data structure.
3525 * This routine initializes the HBA and brings a HBA online. During this
3526 * process, the management interface is blocked to prevent user space access
3527 * to the HBA interfering with the driver initialization.
3534 lpfc_online(struct lpfc_hba *phba)
3536 struct lpfc_vport *vport;
3537 struct lpfc_vport **vports;
3539 bool vpis_cleared = false;
3543 vport = phba->pport;
3545 if (!(vport->fc_flag & FC_OFFLINE_MODE))
3548 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3549 "0458 Bring Adapter online\n");
3551 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
3553 if (phba->sli_rev == LPFC_SLI_REV4) {
3554 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
3555 lpfc_unblock_mgmt_io(phba);
3558 spin_lock_irq(&phba->hbalock);
3559 if (!phba->sli4_hba.max_cfg_param.vpi_used)
3560 vpis_cleared = true;
3561 spin_unlock_irq(&phba->hbalock);
3563 /* Reestablish the local initiator port.
3564 * The offline process destroyed the previous lport.
3566 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3567 !phba->nvmet_support) {
3568 error = lpfc_nvme_create_localport(phba->pport);
3570 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3571 "6132 NVME restore reg failed "
3572 "on nvmei error x%x\n", error);
3575 lpfc_sli_queue_init(phba);
3576 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
3577 lpfc_unblock_mgmt_io(phba);
3582 vports = lpfc_create_vport_work_array(phba);
3583 if (vports != NULL) {
3584 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3585 struct Scsi_Host *shost;
3586 shost = lpfc_shost_from_vport(vports[i]);
3587 spin_lock_irq(shost->host_lock);
3588 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
3589 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3590 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3591 if (phba->sli_rev == LPFC_SLI_REV4) {
3592 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
3593 if ((vpis_cleared) &&
3594 (vports[i]->port_type !=
3595 LPFC_PHYSICAL_PORT))
3598 spin_unlock_irq(shost->host_lock);
3601 lpfc_destroy_vport_work_array(phba, vports);
3603 if (phba->cfg_xri_rebalancing)
3604 lpfc_create_multixri_pools(phba);
3606 lpfc_cpuhp_add(phba);
3608 lpfc_unblock_mgmt_io(phba);
3613 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
3614 * @phba: pointer to lpfc hba data structure.
3616 * This routine marks a HBA's management interface as not blocked. Once the
3617 * HBA's management interface is marked as not blocked, all the user space
3618 * access to the HBA, whether they are from sysfs interface or libdfc
3619 * interface will be allowed. The HBA is set to block the management interface
3620 * when the driver prepares the HBA interface for online or offline and then
3621 * set to unblock the management interface afterwards.
3624 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3626 unsigned long iflag;
3628 spin_lock_irqsave(&phba->hbalock, iflag);
3629 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3630 spin_unlock_irqrestore(&phba->hbalock, iflag);
3634 * lpfc_offline_prep - Prepare a HBA to be brought offline
3635 * @phba: pointer to lpfc hba data structure.
3636 * @mbx_action: flag for mailbox shutdown action.
3638 * This routine is invoked to prepare a HBA to be brought offline. It performs
3639 * unregistration login to all the nodes on all vports and flushes the mailbox
3640 * queue to make it ready to be brought offline.
3643 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
3645 struct lpfc_vport *vport = phba->pport;
3646 struct lpfc_nodelist *ndlp, *next_ndlp;
3647 struct lpfc_vport **vports;
3648 struct Scsi_Host *shost;
3652 if (vport->fc_flag & FC_OFFLINE_MODE)
3655 lpfc_block_mgmt_io(phba, mbx_action);
3657 lpfc_linkdown(phba);
3659 offline = pci_channel_offline(phba->pcidev);
3661 /* Issue an unreg_login to all nodes on all vports */
3662 vports = lpfc_create_vport_work_array(phba);
3663 if (vports != NULL) {
3664 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3665 if (vports[i]->load_flag & FC_UNLOADING)
3667 shost = lpfc_shost_from_vport(vports[i]);
3668 spin_lock_irq(shost->host_lock);
3669 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
3670 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3671 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
3672 spin_unlock_irq(shost->host_lock);
3674 shost = lpfc_shost_from_vport(vports[i]);
3675 list_for_each_entry_safe(ndlp, next_ndlp,
3676 &vports[i]->fc_nodes,
3679 spin_lock_irq(&ndlp->lock);
3680 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3681 spin_unlock_irq(&ndlp->lock);
3684 spin_lock_irq(&ndlp->lock);
3685 ndlp->nlp_flag &= ~(NLP_UNREG_INP |
3686 NLP_RPI_REGISTERED);
3687 spin_unlock_irq(&ndlp->lock);
3689 lpfc_unreg_rpi(vports[i], ndlp);
3692 * Whenever an SLI4 port goes offline, free the
3693 * RPI. Get a new RPI when the adapter port
3694 * comes back online.
3696 if (phba->sli_rev == LPFC_SLI_REV4) {
3697 lpfc_printf_vlog(vports[i], KERN_INFO,
3698 LOG_NODE | LOG_DISCOVERY,
3699 "0011 Free RPI x%x on "
3700 "ndlp: x%px did x%x\n",
3701 ndlp->nlp_rpi, ndlp,
3703 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
3704 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
3707 if (ndlp->nlp_type & NLP_FABRIC) {
3708 lpfc_disc_state_machine(vports[i], ndlp,
3709 NULL, NLP_EVT_DEVICE_RECOVERY);
3711 /* Don't remove the node unless the node
3712 * has been unregistered with the
3713 * transport, and we're not in recovery
3714 * before dev_loss_tmo triggered.
3715 * Otherwise, let dev_loss take care of
3718 if (!(ndlp->save_flags &
3719 NLP_IN_RECOV_POST_DEV_LOSS) &&
3720 !(ndlp->fc4_xpt_flags &
3721 (NVME_XPT_REGD | SCSI_XPT_REGD)))
3722 lpfc_disc_state_machine
3730 lpfc_destroy_vport_work_array(phba, vports);
3732 lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
3735 flush_workqueue(phba->wq);
3739 * lpfc_offline - Bring a HBA offline
3740 * @phba: pointer to lpfc hba data structure.
3742 * This routine actually brings a HBA offline. It stops all the timers
3743 * associated with the HBA, brings down the SLI layer, and eventually
3744 * marks the HBA as in offline state for the upper layer protocol.
3747 lpfc_offline(struct lpfc_hba *phba)
3749 struct Scsi_Host *shost;
3750 struct lpfc_vport **vports;
3753 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3756 /* stop port and all timers associated with this hba */
3757 lpfc_stop_port(phba);
3759 /* Tear down the local and target port registrations. The
3760 * nvme transports need to cleanup.
3762 lpfc_nvmet_destroy_targetport(phba);
3763 lpfc_nvme_destroy_localport(phba->pport);
3765 vports = lpfc_create_vport_work_array(phba);
3767 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3768 lpfc_stop_vport_timers(vports[i]);
3769 lpfc_destroy_vport_work_array(phba, vports);
3770 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3771 "0460 Bring Adapter offline\n");
3772 /* Bring down the SLI Layer and cleanup. The HBA is offline
3774 lpfc_sli_hba_down(phba);
3775 spin_lock_irq(&phba->hbalock);
3777 spin_unlock_irq(&phba->hbalock);
3778 vports = lpfc_create_vport_work_array(phba);
3780 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3781 shost = lpfc_shost_from_vport(vports[i]);
3782 spin_lock_irq(shost->host_lock);
3783 vports[i]->work_port_events = 0;
3784 vports[i]->fc_flag |= FC_OFFLINE_MODE;
3785 spin_unlock_irq(shost->host_lock);
3787 lpfc_destroy_vport_work_array(phba, vports);
3788 /* If OFFLINE flag is clear (i.e. unloading), cpuhp removal is handled
3791 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3792 __lpfc_cpuhp_remove(phba);
3794 if (phba->cfg_xri_rebalancing)
3795 lpfc_destroy_multixri_pools(phba);
3799 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
3800 * @phba: pointer to lpfc hba data structure.
3802 * This routine is to free all the SCSI buffers and IOCBs from the driver
3803 * list back to kernel. It is called from lpfc_pci_remove_one to free
3804 * the internal resources before the device is removed from the system.
3807 lpfc_scsi_free(struct lpfc_hba *phba)
3809 struct lpfc_io_buf *sb, *sb_next;
3811 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3814 spin_lock_irq(&phba->hbalock);
3816 /* Release all the lpfc_scsi_bufs maintained by this host. */
3818 spin_lock(&phba->scsi_buf_list_put_lock);
3819 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3821 list_del(&sb->list);
3822 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3825 phba->total_scsi_bufs--;
3827 spin_unlock(&phba->scsi_buf_list_put_lock);
3829 spin_lock(&phba->scsi_buf_list_get_lock);
3830 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3832 list_del(&sb->list);
3833 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3836 phba->total_scsi_bufs--;
3838 spin_unlock(&phba->scsi_buf_list_get_lock);
3839 spin_unlock_irq(&phba->hbalock);
3843 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists
3844 * @phba: pointer to lpfc hba data structure.
3846 * This routine is to free all the IO buffers and IOCBs from the driver
3847 * list back to kernel. It is called from lpfc_pci_remove_one to free
3848 * the internal resources before the device is removed from the system.
3851 lpfc_io_free(struct lpfc_hba *phba)
3853 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
3854 struct lpfc_sli4_hdw_queue *qp;
3857 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3858 qp = &phba->sli4_hba.hdwq[idx];
3859 /* Release all the lpfc_nvme_bufs maintained by this host. */
3860 spin_lock(&qp->io_buf_list_put_lock);
3861 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3862 &qp->lpfc_io_buf_list_put,
3864 list_del(&lpfc_ncmd->list);
3866 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3867 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3868 if (phba->cfg_xpsgl && !phba->nvmet_support)
3869 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3870 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3872 qp->total_io_bufs--;
3874 spin_unlock(&qp->io_buf_list_put_lock);
3876 spin_lock(&qp->io_buf_list_get_lock);
3877 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3878 &qp->lpfc_io_buf_list_get,
3880 list_del(&lpfc_ncmd->list);
3882 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3883 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3884 if (phba->cfg_xpsgl && !phba->nvmet_support)
3885 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3886 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3888 qp->total_io_bufs--;
3890 spin_unlock(&qp->io_buf_list_get_lock);
3895 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
3896 * @phba: pointer to lpfc hba data structure.
3898 * This routine first calculates the sizes of the current els and allocated
3899 * scsi sgl lists, and then goes through all sgls to updates the physical
3900 * XRIs assigned due to port function reset. During port initialization, the
3901 * current els and allocated scsi sgl lists are 0s.
3904 * 0 - successful (for now, it always returns 0)
3907 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
3909 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3910 uint16_t i, lxri, xri_cnt, els_xri_cnt;
3911 LIST_HEAD(els_sgl_list);
3915 * update on pci function's els xri-sgl list
3917 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3919 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
3920 /* els xri-sgl expanded */
3921 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
3922 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3923 "3157 ELS xri-sgl count increased from "
3924 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3926 /* allocate the additional els sgls */
3927 for (i = 0; i < xri_cnt; i++) {
3928 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3930 if (sglq_entry == NULL) {
3931 lpfc_printf_log(phba, KERN_ERR,
3933 "2562 Failure to allocate an "
3934 "ELS sgl entry:%d\n", i);
3938 sglq_entry->buff_type = GEN_BUFF_TYPE;
3939 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
3941 if (sglq_entry->virt == NULL) {
3943 lpfc_printf_log(phba, KERN_ERR,
3945 "2563 Failure to allocate an "
3946 "ELS mbuf:%d\n", i);
3950 sglq_entry->sgl = sglq_entry->virt;
3951 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3952 sglq_entry->state = SGL_FREED;
3953 list_add_tail(&sglq_entry->list, &els_sgl_list);
3955 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
3956 list_splice_init(&els_sgl_list,
3957 &phba->sli4_hba.lpfc_els_sgl_list);
3958 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
3959 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
3960 /* els xri-sgl shrinked */
3961 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
3962 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3963 "3158 ELS xri-sgl count decreased from "
3964 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3966 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
3967 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
3969 /* release extra els sgls from list */
3970 for (i = 0; i < xri_cnt; i++) {
3971 list_remove_head(&els_sgl_list,
3972 sglq_entry, struct lpfc_sglq, list);
3974 __lpfc_mbuf_free(phba, sglq_entry->virt,
3979 list_splice_init(&els_sgl_list,
3980 &phba->sli4_hba.lpfc_els_sgl_list);
3981 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
3983 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3984 "3163 ELS xri-sgl count unchanged: %d\n",
3986 phba->sli4_hba.els_xri_cnt = els_xri_cnt;
3988 /* update xris to els sgls on the list */
3990 sglq_entry_next = NULL;
3991 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3992 &phba->sli4_hba.lpfc_els_sgl_list, list) {
3993 lxri = lpfc_sli4_next_xritag(phba);
3994 if (lxri == NO_XRI) {
3995 lpfc_printf_log(phba, KERN_ERR,
3997 "2400 Failed to allocate xri for "
4002 sglq_entry->sli4_lxritag = lxri;
4003 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4008 lpfc_free_els_sgl_list(phba);
4013 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
4014 * @phba: pointer to lpfc hba data structure.
4016 * This routine first calculates the sizes of the current els and allocated
4017 * scsi sgl lists, and then goes through all sgls to updates the physical
4018 * XRIs assigned due to port function reset. During port initialization, the
4019 * current els and allocated scsi sgl lists are 0s.
4022 * 0 - successful (for now, it always returns 0)
4025 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
4027 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
4028 uint16_t i, lxri, xri_cnt, els_xri_cnt;
4029 uint16_t nvmet_xri_cnt;
4030 LIST_HEAD(nvmet_sgl_list);
4034 * update on pci function's nvmet xri-sgl list
4036 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4038 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
4039 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4040 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
4041 /* els xri-sgl expanded */
4042 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
4043 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4044 "6302 NVMET xri-sgl cnt grew from %d to %d\n",
4045 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
4046 /* allocate the additional nvmet sgls */
4047 for (i = 0; i < xri_cnt; i++) {
4048 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
4050 if (sglq_entry == NULL) {
4051 lpfc_printf_log(phba, KERN_ERR,
4053 "6303 Failure to allocate an "
4054 "NVMET sgl entry:%d\n", i);
4058 sglq_entry->buff_type = NVMET_BUFF_TYPE;
4059 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
4061 if (sglq_entry->virt == NULL) {
4063 lpfc_printf_log(phba, KERN_ERR,
4065 "6304 Failure to allocate an "
4066 "NVMET buf:%d\n", i);
4070 sglq_entry->sgl = sglq_entry->virt;
4071 memset(sglq_entry->sgl, 0,
4072 phba->cfg_sg_dma_buf_size);
4073 sglq_entry->state = SGL_FREED;
4074 list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
4076 spin_lock_irq(&phba->hbalock);
4077 spin_lock(&phba->sli4_hba.sgl_list_lock);
4078 list_splice_init(&nvmet_sgl_list,
4079 &phba->sli4_hba.lpfc_nvmet_sgl_list);
4080 spin_unlock(&phba->sli4_hba.sgl_list_lock);
4081 spin_unlock_irq(&phba->hbalock);
4082 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
4083 /* nvmet xri-sgl shrunk */
4084 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
4085 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4086 "6305 NVMET xri-sgl count decreased from "
4087 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
4089 spin_lock_irq(&phba->hbalock);
4090 spin_lock(&phba->sli4_hba.sgl_list_lock);
4091 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
4093 /* release extra nvmet sgls from list */
4094 for (i = 0; i < xri_cnt; i++) {
4095 list_remove_head(&nvmet_sgl_list,
4096 sglq_entry, struct lpfc_sglq, list);
4098 lpfc_nvmet_buf_free(phba, sglq_entry->virt,
4103 list_splice_init(&nvmet_sgl_list,
4104 &phba->sli4_hba.lpfc_nvmet_sgl_list);
4105 spin_unlock(&phba->sli4_hba.sgl_list_lock);
4106 spin_unlock_irq(&phba->hbalock);
4108 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4109 "6306 NVMET xri-sgl count unchanged: %d\n",
4111 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
4113 /* update xris to nvmet sgls on the list */
4115 sglq_entry_next = NULL;
4116 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
4117 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
4118 lxri = lpfc_sli4_next_xritag(phba);
4119 if (lxri == NO_XRI) {
4120 lpfc_printf_log(phba, KERN_ERR,
4122 "6307 Failed to allocate xri for "
4127 sglq_entry->sli4_lxritag = lxri;
4128 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4133 lpfc_free_nvmet_sgl_list(phba);
4138 lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
4141 struct lpfc_sli4_hdw_queue *qp;
4142 struct lpfc_io_buf *lpfc_cmd;
4143 struct lpfc_io_buf *iobufp, *prev_iobufp;
4144 int idx, cnt, xri, inserted;
4147 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4148 qp = &phba->sli4_hba.hdwq[idx];
4149 spin_lock_irq(&qp->io_buf_list_get_lock);
4150 spin_lock(&qp->io_buf_list_put_lock);
4152 /* Take everything off the get and put lists */
4153 list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
4154 list_splice(&qp->lpfc_io_buf_list_put, &blist);
4155 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
4156 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
4157 cnt += qp->get_io_bufs + qp->put_io_bufs;
4158 qp->get_io_bufs = 0;
4159 qp->put_io_bufs = 0;
4160 qp->total_io_bufs = 0;
4161 spin_unlock(&qp->io_buf_list_put_lock);
4162 spin_unlock_irq(&qp->io_buf_list_get_lock);
4166 * Take IO buffers off blist and put on cbuf sorted by XRI.
4167 * This is because POST_SGL takes a sequential range of XRIs
4168 * to post to the firmware.
4170 for (idx = 0; idx < cnt; idx++) {
4171 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
4175 list_add_tail(&lpfc_cmd->list, cbuf);
4178 xri = lpfc_cmd->cur_iocbq.sli4_xritag;
4181 list_for_each_entry(iobufp, cbuf, list) {
4182 if (xri < iobufp->cur_iocbq.sli4_xritag) {
4184 list_add(&lpfc_cmd->list,
4185 &prev_iobufp->list);
4187 list_add(&lpfc_cmd->list, cbuf);
4191 prev_iobufp = iobufp;
4194 list_add_tail(&lpfc_cmd->list, cbuf);
4200 lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
4202 struct lpfc_sli4_hdw_queue *qp;
4203 struct lpfc_io_buf *lpfc_cmd;
4206 qp = phba->sli4_hba.hdwq;
4208 while (!list_empty(cbuf)) {
4209 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4210 list_remove_head(cbuf, lpfc_cmd,
4211 struct lpfc_io_buf, list);
4215 qp = &phba->sli4_hba.hdwq[idx];
4216 lpfc_cmd->hdwq_no = idx;
4217 lpfc_cmd->hdwq = qp;
4218 lpfc_cmd->cur_iocbq.cmd_cmpl = NULL;
4219 spin_lock(&qp->io_buf_list_put_lock);
4220 list_add_tail(&lpfc_cmd->list,
4221 &qp->lpfc_io_buf_list_put);
4223 qp->total_io_bufs++;
4224 spin_unlock(&qp->io_buf_list_put_lock);
4231 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping
4232 * @phba: pointer to lpfc hba data structure.
4234 * This routine first calculates the sizes of the current els and allocated
4235 * scsi sgl lists, and then goes through all sgls to updates the physical
4236 * XRIs assigned due to port function reset. During port initialization, the
4237 * current els and allocated scsi sgl lists are 0s.
4240 * 0 - successful (for now, it always returns 0)
4243 lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
4245 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
4246 uint16_t i, lxri, els_xri_cnt;
4247 uint16_t io_xri_cnt, io_xri_max;
4248 LIST_HEAD(io_sgl_list);
4252 * update on pci function's allocated nvme xri-sgl list
4255 /* maximum number of xris available for nvme buffers */
4256 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4257 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4258 phba->sli4_hba.io_xri_max = io_xri_max;
4260 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4261 "6074 Current allocated XRI sgl count:%d, "
4262 "maximum XRI count:%d\n",
4263 phba->sli4_hba.io_xri_cnt,
4264 phba->sli4_hba.io_xri_max);
4266 cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
4268 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
4269 /* max nvme xri shrunk below the allocated nvme buffers */
4270 io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4271 phba->sli4_hba.io_xri_max;
4272 /* release the extra allocated nvme buffers */
4273 for (i = 0; i < io_xri_cnt; i++) {
4274 list_remove_head(&io_sgl_list, lpfc_ncmd,
4275 struct lpfc_io_buf, list);
4277 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4279 lpfc_ncmd->dma_handle);
4283 phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
4286 /* update xris associated to remaining allocated nvme buffers */
4288 lpfc_ncmd_next = NULL;
4289 phba->sli4_hba.io_xri_cnt = cnt;
4290 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4291 &io_sgl_list, list) {
4292 lxri = lpfc_sli4_next_xritag(phba);
4293 if (lxri == NO_XRI) {
4294 lpfc_printf_log(phba, KERN_ERR,
4296 "6075 Failed to allocate xri for "
4301 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
4302 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4304 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
4313 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec
4314 * @phba: Pointer to lpfc hba data structure.
4315 * @num_to_alloc: The requested number of buffers to allocate.
4317 * This routine allocates nvme buffers for device with SLI-4 interface spec,
4318 * the nvme buffer contains all the necessary information needed to initiate
4319 * an I/O. After allocating up to @num_to_allocate IO buffers and put
4320 * them on a list, it post them to the port by using SGL block post.
4323 * int - number of IO buffers that were allocated and posted.
4324 * 0 = failure, less than num_to_alloc is a partial failure.
4327 lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
4329 struct lpfc_io_buf *lpfc_ncmd;
4330 struct lpfc_iocbq *pwqeq;
4331 uint16_t iotag, lxri = 0;
4332 int bcnt, num_posted;
4333 LIST_HEAD(prep_nblist);
4334 LIST_HEAD(post_nblist);
4335 LIST_HEAD(nvme_nblist);
4337 phba->sli4_hba.io_xri_cnt = 0;
4338 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
4339 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL);
4343 * Get memory from the pci pool to map the virt space to
4344 * pci bus space for an I/O. The DMA buffer includes the
4345 * number of SGE's necessary to support the sg_tablesize.
4347 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
4349 &lpfc_ncmd->dma_handle);
4350 if (!lpfc_ncmd->data) {
4355 if (phba->cfg_xpsgl && !phba->nvmet_support) {
4356 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
4359 * 4K Page alignment is CRITICAL to BlockGuard, double
4362 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4363 (((unsigned long)(lpfc_ncmd->data) &
4364 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
4365 lpfc_printf_log(phba, KERN_ERR,
4367 "3369 Memory alignment err: "
4369 (unsigned long)lpfc_ncmd->data);
4370 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4372 lpfc_ncmd->dma_handle);
4378 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
4380 lxri = lpfc_sli4_next_xritag(phba);
4381 if (lxri == NO_XRI) {
4382 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4383 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4387 pwqeq = &lpfc_ncmd->cur_iocbq;
4389 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */
4390 iotag = lpfc_sli_next_iotag(phba, pwqeq);
4392 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4393 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4395 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4396 "6121 Failed to allocate IOTAG for"
4397 " XRI:0x%x\n", lxri);
4398 lpfc_sli4_free_xri(phba, lxri);
4401 pwqeq->sli4_lxritag = lxri;
4402 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4403 pwqeq->context1 = lpfc_ncmd;
4405 /* Initialize local short-hand pointers. */
4406 lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
4407 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
4408 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
4409 spin_lock_init(&lpfc_ncmd->buf_lock);
4411 /* add the nvme buffer to a post list */
4412 list_add_tail(&lpfc_ncmd->list, &post_nblist);
4413 phba->sli4_hba.io_xri_cnt++;
4415 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
4416 "6114 Allocate %d out of %d requested new NVME "
4417 "buffers\n", bcnt, num_to_alloc);
4419 /* post the list of nvme buffer sgls to port if available */
4420 if (!list_empty(&post_nblist))
4421 num_posted = lpfc_sli4_post_io_sgl_list(
4422 phba, &post_nblist, bcnt);
4430 lpfc_get_wwpn(struct lpfc_hba *phba)
4434 LPFC_MBOXQ_t *mboxq;
4437 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4440 return (uint64_t)-1;
4442 /* First get WWN of HBA instance */
4443 lpfc_read_nv(phba, mboxq);
4444 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4445 if (rc != MBX_SUCCESS) {
4446 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4447 "6019 Mailbox failed , mbxCmd x%x "
4448 "READ_NV, mbxStatus x%x\n",
4449 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4450 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4451 mempool_free(mboxq, phba->mbox_mem_pool);
4452 return (uint64_t) -1;
4455 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
4456 /* wwn is WWPN of HBA instance */
4457 mempool_free(mboxq, phba->mbox_mem_pool);
4458 if (phba->sli_rev == LPFC_SLI_REV4)
4459 return be64_to_cpu(wwn);
4461 return rol64(wwn, 32);
4465 * lpfc_vmid_res_alloc - Allocates resources for VMID
4466 * @phba: pointer to lpfc hba data structure.
4467 * @vport: pointer to vport data structure
4469 * This routine allocated the resources needed for the VMID.
4476 lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport)
4478 /* VMID feature is supported only on SLI4 */
4479 if (phba->sli_rev == LPFC_SLI_REV3) {
4480 phba->cfg_vmid_app_header = 0;
4481 phba->cfg_vmid_priority_tagging = 0;
4484 if (lpfc_is_vmid_enabled(phba)) {
4486 kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid),
4491 rwlock_init(&vport->vmid_lock);
4493 /* Set the VMID parameters for the vport */
4494 vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging;
4495 vport->vmid_inactivity_timeout =
4496 phba->cfg_vmid_inactivity_timeout;
4497 vport->max_vmid = phba->cfg_max_vmid;
4498 vport->cur_vmid_cnt = 0;
4500 vport->vmid_priority_range = bitmap_zalloc
4501 (LPFC_VMID_MAX_PRIORITY_RANGE, GFP_KERNEL);
4503 if (!vport->vmid_priority_range) {
4508 hash_init(vport->hash_table);
4514 * lpfc_create_port - Create an FC port
4515 * @phba: pointer to lpfc hba data structure.
4516 * @instance: a unique integer ID to this FC port.
4517 * @dev: pointer to the device data structure.
4519 * This routine creates a FC port for the upper layer protocol. The FC port
4520 * can be created on top of either a physical port or a virtual port provided
4521 * by the HBA. This routine also allocates a SCSI host data structure (shost)
4522 * and associates the FC port created before adding the shost into the SCSI
4526 * @vport - pointer to the virtual N_Port data structure.
4527 * NULL - port create failed.
4530 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4532 struct lpfc_vport *vport;
4533 struct Scsi_Host *shost = NULL;
4534 struct scsi_host_template *template;
4538 bool use_no_reset_hba = false;
4541 if (lpfc_no_hba_reset_cnt) {
4542 if (phba->sli_rev < LPFC_SLI_REV4 &&
4543 dev == &phba->pcidev->dev) {
4544 /* Reset the port first */
4545 lpfc_sli_brdrestart(phba);
4546 rc = lpfc_sli_chipset_init(phba);
4550 wwn = lpfc_get_wwpn(phba);
4553 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
4554 if (wwn == lpfc_no_hba_reset[i]) {
4555 lpfc_printf_log(phba, KERN_ERR,
4557 "6020 Setting use_no_reset port=%llx\n",
4559 use_no_reset_hba = true;
4564 /* Seed template for SCSI host registration */
4565 if (dev == &phba->pcidev->dev) {
4566 template = &phba->port_template;
4568 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
4569 /* Seed physical port template */
4570 memcpy(template, &lpfc_template, sizeof(*template));
4572 if (use_no_reset_hba)
4573 /* template is for a no reset SCSI Host */
4574 template->eh_host_reset_handler = NULL;
4576 /* Template for all vports this physical port creates */
4577 memcpy(&phba->vport_template, &lpfc_template,
4579 phba->vport_template.shost_attrs = lpfc_vport_attrs;
4580 phba->vport_template.eh_bus_reset_handler = NULL;
4581 phba->vport_template.eh_host_reset_handler = NULL;
4582 phba->vport_template.vendor_id = 0;
4584 /* Initialize the host templates with updated value */
4585 if (phba->sli_rev == LPFC_SLI_REV4) {
4586 template->sg_tablesize = phba->cfg_scsi_seg_cnt;
4587 phba->vport_template.sg_tablesize =
4588 phba->cfg_scsi_seg_cnt;
4590 template->sg_tablesize = phba->cfg_sg_seg_cnt;
4591 phba->vport_template.sg_tablesize =
4592 phba->cfg_sg_seg_cnt;
4596 /* NVMET is for physical port only */
4597 memcpy(template, &lpfc_template_nvme,
4601 template = &phba->vport_template;
4604 shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
4608 vport = (struct lpfc_vport *) shost->hostdata;
4610 vport->load_flag |= FC_LOADING;
4611 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4612 vport->fc_rscn_flush = 0;
4613 lpfc_get_vport_cfgparam(vport);
4615 /* Adjust value in vport */
4616 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
4618 shost->unique_id = instance;
4619 shost->max_id = LPFC_MAX_TARGET;
4620 shost->max_lun = vport->cfg_max_luns;
4621 shost->this_id = -1;
4622 shost->max_cmd_len = 16;
4624 if (phba->sli_rev == LPFC_SLI_REV4) {
4625 if (!phba->cfg_fcp_mq_threshold ||
4626 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4627 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4629 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4630 phba->cfg_fcp_mq_threshold);
4632 shost->dma_boundary =
4633 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
4635 if (phba->cfg_xpsgl && !phba->nvmet_support)
4636 shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
4638 shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
4640 /* SLI-3 has a limited number of hardware queues (3),
4641 * thus there is only one for FCP processing.
4643 shost->nr_hw_queues = 1;
4646 * Set initial can_queue value since 0 is no longer supported and
4647 * scsi_add_host will fail. This will be adjusted later based on the
4648 * max xri value determined in hba setup.
4650 shost->can_queue = phba->cfg_hba_queue_depth - 10;
4651 if (dev != &phba->pcidev->dev) {
4652 shost->transportt = lpfc_vport_transport_template;
4653 vport->port_type = LPFC_NPIV_PORT;
4655 shost->transportt = lpfc_transport_template;
4656 vport->port_type = LPFC_PHYSICAL_PORT;
4659 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4660 "9081 CreatePort TMPLATE type %x TBLsize %d "
4662 vport->port_type, shost->sg_tablesize,
4663 phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
4665 /* Allocate the resources for VMID */
4666 rc = lpfc_vmid_res_alloc(phba, vport);
4671 /* Initialize all internally managed lists. */
4672 INIT_LIST_HEAD(&vport->fc_nodes);
4673 INIT_LIST_HEAD(&vport->rcv_buffer_list);
4674 spin_lock_init(&vport->work_port_lock);
4676 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
4678 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
4680 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
4682 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4683 lpfc_setup_bg(phba, shost);
4685 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
4689 spin_lock_irq(&phba->port_list_lock);
4690 list_add_tail(&vport->listentry, &phba->port_list);
4691 spin_unlock_irq(&phba->port_list_lock);
4696 bitmap_free(vport->vmid_priority_range);
4698 scsi_host_put(shost);
4704 * destroy_port - destroy an FC port
4705 * @vport: pointer to an lpfc virtual N_Port data structure.
4707 * This routine destroys a FC port from the upper layer protocol. All the
4708 * resources associated with the port are released.
4711 destroy_port(struct lpfc_vport *vport)
4713 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4714 struct lpfc_hba *phba = vport->phba;
4716 lpfc_debugfs_terminate(vport);
4717 fc_remove_host(shost);
4718 scsi_remove_host(shost);
4720 spin_lock_irq(&phba->port_list_lock);
4721 list_del_init(&vport->listentry);
4722 spin_unlock_irq(&phba->port_list_lock);
4724 lpfc_cleanup(vport);
4729 * lpfc_get_instance - Get a unique integer ID
4731 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
4732 * uses the kernel idr facility to perform the task.
4735 * instance - a unique integer ID allocated as the new instance.
4736 * -1 - lpfc get instance failed.
4739 lpfc_get_instance(void)
4743 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
4744 return ret < 0 ? -1 : ret;
4748 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
4749 * @shost: pointer to SCSI host data structure.
4750 * @time: elapsed time of the scan in jiffies.
4752 * This routine is called by the SCSI layer with a SCSI host to determine
4753 * whether the scan host is finished.
4755 * Note: there is no scan_start function as adapter initialization will have
4756 * asynchronously kicked off the link initialization.
4759 * 0 - SCSI host scan is not over yet.
4760 * 1 - SCSI host scan is over.
4762 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
4764 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4765 struct lpfc_hba *phba = vport->phba;
4768 spin_lock_irq(shost->host_lock);
4770 if (vport->load_flag & FC_UNLOADING) {
4774 if (time >= msecs_to_jiffies(30 * 1000)) {
4775 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4776 "0461 Scanning longer than 30 "
4777 "seconds. Continuing initialization\n");
4781 if (time >= msecs_to_jiffies(15 * 1000) &&
4782 phba->link_state <= LPFC_LINK_DOWN) {
4783 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4784 "0465 Link down longer than 15 "
4785 "seconds. Continuing initialization\n");
4790 if (vport->port_state != LPFC_VPORT_READY)
4792 if (vport->num_disc_nodes || vport->fc_prli_sent)
4794 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
4796 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
4802 spin_unlock_irq(shost->host_lock);
4806 static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
4808 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4809 struct lpfc_hba *phba = vport->phba;
4811 fc_host_supported_speeds(shost) = 0;
4813 * Avoid reporting supported link speed for FCoE as it can't be
4814 * controlled via FCoE.
4816 if (phba->hba_flag & HBA_FCOE_MODE)
4819 if (phba->lmt & LMT_256Gb)
4820 fc_host_supported_speeds(shost) |= FC_PORTSPEED_256GBIT;
4821 if (phba->lmt & LMT_128Gb)
4822 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
4823 if (phba->lmt & LMT_64Gb)
4824 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4825 if (phba->lmt & LMT_32Gb)
4826 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4827 if (phba->lmt & LMT_16Gb)
4828 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4829 if (phba->lmt & LMT_10Gb)
4830 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4831 if (phba->lmt & LMT_8Gb)
4832 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4833 if (phba->lmt & LMT_4Gb)
4834 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4835 if (phba->lmt & LMT_2Gb)
4836 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4837 if (phba->lmt & LMT_1Gb)
4838 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4842 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
4843 * @shost: pointer to SCSI host data structure.
4845 * This routine initializes a given SCSI host attributes on a FC port. The
4846 * SCSI host can be either on top of a physical port or a virtual port.
4848 void lpfc_host_attrib_init(struct Scsi_Host *shost)
4850 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4851 struct lpfc_hba *phba = vport->phba;
4853 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
4856 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4857 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4858 fc_host_supported_classes(shost) = FC_COS_CLASS3;
4860 memset(fc_host_supported_fc4s(shost), 0,
4861 sizeof(fc_host_supported_fc4s(shost)));
4862 fc_host_supported_fc4s(shost)[2] = 1;
4863 fc_host_supported_fc4s(shost)[7] = 1;
4865 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
4866 sizeof fc_host_symbolic_name(shost));
4868 lpfc_host_supported_speeds_set(shost);
4870 fc_host_maxframe_size(shost) =
4871 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
4872 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
4874 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
4876 /* This value is also unchanging */
4877 memset(fc_host_active_fc4s(shost), 0,
4878 sizeof(fc_host_active_fc4s(shost)));
4879 fc_host_active_fc4s(shost)[2] = 1;
4880 fc_host_active_fc4s(shost)[7] = 1;
4882 fc_host_max_npiv_vports(shost) = phba->max_vpi;
4883 spin_lock_irq(shost->host_lock);
4884 vport->load_flag &= ~FC_LOADING;
4885 spin_unlock_irq(shost->host_lock);
4889 * lpfc_stop_port_s3 - Stop SLI3 device port
4890 * @phba: pointer to lpfc hba data structure.
4892 * This routine is invoked to stop an SLI3 device port, it stops the device
4893 * from generating interrupts and stops the device driver's timers for the
4897 lpfc_stop_port_s3(struct lpfc_hba *phba)
4899 /* Clear all interrupt enable conditions */
4900 writel(0, phba->HCregaddr);
4901 readl(phba->HCregaddr); /* flush */
4902 /* Clear all pending interrupts */
4903 writel(0xffffffff, phba->HAregaddr);
4904 readl(phba->HAregaddr); /* flush */
4906 /* Reset some HBA SLI setup states */
4907 lpfc_stop_hba_timers(phba);
4908 phba->pport->work_port_events = 0;
4912 * lpfc_stop_port_s4 - Stop SLI4 device port
4913 * @phba: pointer to lpfc hba data structure.
4915 * This routine is invoked to stop an SLI4 device port, it stops the device
4916 * from generating interrupts and stops the device driver's timers for the
4920 lpfc_stop_port_s4(struct lpfc_hba *phba)
4922 /* Reset some HBA SLI4 setup states */
4923 lpfc_stop_hba_timers(phba);
4925 phba->pport->work_port_events = 0;
4926 phba->sli4_hba.intr_enable = 0;
4930 * lpfc_stop_port - Wrapper function for stopping hba port
4931 * @phba: Pointer to HBA context object.
4933 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
4934 * the API jump table function pointer from the lpfc_hba struct.
4937 lpfc_stop_port(struct lpfc_hba *phba)
4939 phba->lpfc_stop_port(phba);
4942 flush_workqueue(phba->wq);
4946 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
4947 * @phba: Pointer to hba for which this call is being executed.
4949 * This routine starts the timer waiting for the FCF rediscovery to complete.
4952 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
4954 unsigned long fcf_redisc_wait_tmo =
4955 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
4956 /* Start fcf rediscovery wait period timer */
4957 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
4958 spin_lock_irq(&phba->hbalock);
4959 /* Allow action to new fcf asynchronous event */
4960 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
4961 /* Mark the FCF rediscovery pending state */
4962 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
4963 spin_unlock_irq(&phba->hbalock);
4967 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
4968 * @t: Timer context used to obtain the pointer to lpfc hba data structure.
4970 * This routine is invoked when waiting for FCF table rediscover has been
4971 * timed out. If new FCF record(s) has (have) been discovered during the
4972 * wait period, a new FCF event shall be added to the FCOE async event
4973 * list, and then worker thread shall be waked up for processing from the
4974 * worker thread context.
4977 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
4979 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
4981 /* Don't send FCF rediscovery event if timer cancelled */
4982 spin_lock_irq(&phba->hbalock);
4983 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
4984 spin_unlock_irq(&phba->hbalock);
4987 /* Clear FCF rediscovery timer pending flag */
4988 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
4989 /* FCF rediscovery event to worker thread */
4990 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
4991 spin_unlock_irq(&phba->hbalock);
4992 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
4993 "2776 FCF rediscover quiescent timer expired\n");
4994 /* wake up worker thread */
4995 lpfc_worker_wake_up(phba);
4999 * lpfc_vmid_poll - VMID timeout detection
5000 * @t: Timer context used to obtain the pointer to lpfc hba data structure.
5002 * This routine is invoked when there is no I/O on by a VM for the specified
5003 * amount of time. When this situation is detected, the VMID has to be
5004 * deregistered from the switch and all the local resources freed. The VMID
5005 * will be reassigned to the VM once the I/O begins.
5008 lpfc_vmid_poll(struct timer_list *t)
5010 struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll);
5013 /* check if there is a need to issue QFPA */
5014 if (phba->pport->vmid_priority_tagging) {
5016 phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
5019 /* Is the vmid inactivity timer enabled */
5020 if (phba->pport->vmid_inactivity_timeout ||
5021 phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) {
5023 phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID;
5027 lpfc_worker_wake_up(phba);
5029 /* restart the timer for the next iteration */
5030 mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 *
5035 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
5036 * @phba: pointer to lpfc hba data structure.
5037 * @acqe_link: pointer to the async link completion queue entry.
5039 * This routine is to parse the SLI4 link-attention link fault code.
5042 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
5043 struct lpfc_acqe_link *acqe_link)
5045 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
5046 case LPFC_ASYNC_LINK_FAULT_NONE:
5047 case LPFC_ASYNC_LINK_FAULT_LOCAL:
5048 case LPFC_ASYNC_LINK_FAULT_REMOTE:
5049 case LPFC_ASYNC_LINK_FAULT_LR_LRR:
5052 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5053 "0398 Unknown link fault code: x%x\n",
5054 bf_get(lpfc_acqe_link_fault, acqe_link));
5060 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
5061 * @phba: pointer to lpfc hba data structure.
5062 * @acqe_link: pointer to the async link completion queue entry.
5064 * This routine is to parse the SLI4 link attention type and translate it
5065 * into the base driver's link attention type coding.
5067 * Return: Link attention type in terms of base driver's coding.
5070 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
5071 struct lpfc_acqe_link *acqe_link)
5075 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
5076 case LPFC_ASYNC_LINK_STATUS_DOWN:
5077 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
5078 att_type = LPFC_ATT_LINK_DOWN;
5080 case LPFC_ASYNC_LINK_STATUS_UP:
5081 /* Ignore physical link up events - wait for logical link up */
5082 att_type = LPFC_ATT_RESERVED;
5084 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
5085 att_type = LPFC_ATT_LINK_UP;
5088 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5089 "0399 Invalid link attention type: x%x\n",
5090 bf_get(lpfc_acqe_link_status, acqe_link));
5091 att_type = LPFC_ATT_RESERVED;
5098 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
5099 * @phba: pointer to lpfc hba data structure.
5101 * This routine is to get an SLI3 FC port's link speed in Mbps.
5103 * Return: link speed in terms of Mbps.
5106 lpfc_sli_port_speed_get(struct lpfc_hba *phba)
5108 uint32_t link_speed;
5110 if (!lpfc_is_link_up(phba))
5113 if (phba->sli_rev <= LPFC_SLI_REV3) {
5114 switch (phba->fc_linkspeed) {
5115 case LPFC_LINK_SPEED_1GHZ:
5118 case LPFC_LINK_SPEED_2GHZ:
5121 case LPFC_LINK_SPEED_4GHZ:
5124 case LPFC_LINK_SPEED_8GHZ:
5127 case LPFC_LINK_SPEED_10GHZ:
5130 case LPFC_LINK_SPEED_16GHZ:
5137 if (phba->sli4_hba.link_state.logical_speed)
5139 phba->sli4_hba.link_state.logical_speed;
5141 link_speed = phba->sli4_hba.link_state.speed;
5147 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
5148 * @phba: pointer to lpfc hba data structure.
5149 * @evt_code: asynchronous event code.
5150 * @speed_code: asynchronous event link speed code.
5152 * This routine is to parse the giving SLI4 async event link speed code into
5153 * value of Mbps for the link speed.
5155 * Return: link speed in terms of Mbps.
5158 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
5161 uint32_t port_speed;
5164 case LPFC_TRAILER_CODE_LINK:
5165 switch (speed_code) {
5166 case LPFC_ASYNC_LINK_SPEED_ZERO:
5169 case LPFC_ASYNC_LINK_SPEED_10MBPS:
5172 case LPFC_ASYNC_LINK_SPEED_100MBPS:
5175 case LPFC_ASYNC_LINK_SPEED_1GBPS:
5178 case LPFC_ASYNC_LINK_SPEED_10GBPS:
5181 case LPFC_ASYNC_LINK_SPEED_20GBPS:
5184 case LPFC_ASYNC_LINK_SPEED_25GBPS:
5187 case LPFC_ASYNC_LINK_SPEED_40GBPS:
5190 case LPFC_ASYNC_LINK_SPEED_100GBPS:
5191 port_speed = 100000;
5197 case LPFC_TRAILER_CODE_FC:
5198 switch (speed_code) {
5199 case LPFC_FC_LA_SPEED_UNKNOWN:
5202 case LPFC_FC_LA_SPEED_1G:
5205 case LPFC_FC_LA_SPEED_2G:
5208 case LPFC_FC_LA_SPEED_4G:
5211 case LPFC_FC_LA_SPEED_8G:
5214 case LPFC_FC_LA_SPEED_10G:
5217 case LPFC_FC_LA_SPEED_16G:
5220 case LPFC_FC_LA_SPEED_32G:
5223 case LPFC_FC_LA_SPEED_64G:
5226 case LPFC_FC_LA_SPEED_128G:
5227 port_speed = 128000;
5229 case LPFC_FC_LA_SPEED_256G:
5230 port_speed = 256000;
5243 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
5244 * @phba: pointer to lpfc hba data structure.
5245 * @acqe_link: pointer to the async link completion queue entry.
5247 * This routine is to handle the SLI4 asynchronous FCoE link event.
5250 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
5251 struct lpfc_acqe_link *acqe_link)
5253 struct lpfc_dmabuf *mp;
5256 struct lpfc_mbx_read_top *la;
5260 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
5261 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
5263 phba->fcoe_eventtag = acqe_link->event_tag;
5264 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5266 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5267 "0395 The mboxq allocation failed\n");
5270 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5272 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5273 "0396 The lpfc_dmabuf allocation failed\n");
5276 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
5278 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5279 "0397 The mbuf allocation failed\n");
5280 goto out_free_dmabuf;
5283 /* Cleanup any outstanding ELS commands */
5284 lpfc_els_flush_all_cmd(phba);
5286 /* Block ELS IOCBs until we have done process link event */
5287 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
5289 /* Update link event statistics */
5290 phba->sli.slistat.link_event++;
5292 /* Create lpfc_handle_latt mailbox command from link ACQE */
5293 lpfc_read_topology(phba, pmb, mp);
5294 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5295 pmb->vport = phba->pport;
5297 /* Keep the link status for extra SLI4 state machine reference */
5298 phba->sli4_hba.link_state.speed =
5299 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
5300 bf_get(lpfc_acqe_link_speed, acqe_link));
5301 phba->sli4_hba.link_state.duplex =
5302 bf_get(lpfc_acqe_link_duplex, acqe_link);
5303 phba->sli4_hba.link_state.status =
5304 bf_get(lpfc_acqe_link_status, acqe_link);
5305 phba->sli4_hba.link_state.type =
5306 bf_get(lpfc_acqe_link_type, acqe_link);
5307 phba->sli4_hba.link_state.number =
5308 bf_get(lpfc_acqe_link_number, acqe_link);
5309 phba->sli4_hba.link_state.fault =
5310 bf_get(lpfc_acqe_link_fault, acqe_link);
5311 phba->sli4_hba.link_state.logical_speed =
5312 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
5314 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5315 "2900 Async FC/FCoE Link event - Speed:%dGBit "
5316 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
5317 "Logical speed:%dMbps Fault:%d\n",
5318 phba->sli4_hba.link_state.speed,
5319 phba->sli4_hba.link_state.topology,
5320 phba->sli4_hba.link_state.status,
5321 phba->sli4_hba.link_state.type,
5322 phba->sli4_hba.link_state.number,
5323 phba->sli4_hba.link_state.logical_speed,
5324 phba->sli4_hba.link_state.fault);
5326 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
5327 * topology info. Note: Optional for non FC-AL ports.
5329 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
5330 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5331 if (rc == MBX_NOT_FINISHED) {
5332 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5333 goto out_free_dmabuf;
5338 * For FCoE Mode: fill in all the topology information we need and call
5339 * the READ_TOPOLOGY completion routine to continue without actually
5340 * sending the READ_TOPOLOGY mailbox command to the port.
5342 /* Initialize completion status */
5344 mb->mbxStatus = MBX_SUCCESS;
5346 /* Parse port fault information field */
5347 lpfc_sli4_parse_latt_fault(phba, acqe_link);
5349 /* Parse and translate link attention fields */
5350 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
5351 la->eventTag = acqe_link->event_tag;
5352 bf_set(lpfc_mbx_read_top_att_type, la, att_type);
5353 bf_set(lpfc_mbx_read_top_link_spd, la,
5354 (bf_get(lpfc_acqe_link_speed, acqe_link)));
5356 /* Fake the the following irrelvant fields */
5357 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
5358 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
5359 bf_set(lpfc_mbx_read_top_il, la, 0);
5360 bf_set(lpfc_mbx_read_top_pb, la, 0);
5361 bf_set(lpfc_mbx_read_top_fa, la, 0);
5362 bf_set(lpfc_mbx_read_top_mm, la, 0);
5364 /* Invoke the lpfc_handle_latt mailbox command callback function */
5365 lpfc_mbx_cmpl_read_topology(phba, pmb);
5372 mempool_free(pmb, phba->mbox_mem_pool);
5376 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read
5378 * @phba: pointer to lpfc hba data structure.
5379 * @speed_code: asynchronous event link speed code.
5381 * This routine is to parse the giving SLI4 async event link speed code into
5382 * value of Read topology link speed.
5384 * Return: link speed in terms of Read topology.
5387 lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
5391 switch (speed_code) {
5392 case LPFC_FC_LA_SPEED_1G:
5393 port_speed = LPFC_LINK_SPEED_1GHZ;
5395 case LPFC_FC_LA_SPEED_2G:
5396 port_speed = LPFC_LINK_SPEED_2GHZ;
5398 case LPFC_FC_LA_SPEED_4G:
5399 port_speed = LPFC_LINK_SPEED_4GHZ;
5401 case LPFC_FC_LA_SPEED_8G:
5402 port_speed = LPFC_LINK_SPEED_8GHZ;
5404 case LPFC_FC_LA_SPEED_16G:
5405 port_speed = LPFC_LINK_SPEED_16GHZ;
5407 case LPFC_FC_LA_SPEED_32G:
5408 port_speed = LPFC_LINK_SPEED_32GHZ;
5410 case LPFC_FC_LA_SPEED_64G:
5411 port_speed = LPFC_LINK_SPEED_64GHZ;
5413 case LPFC_FC_LA_SPEED_128G:
5414 port_speed = LPFC_LINK_SPEED_128GHZ;
5416 case LPFC_FC_LA_SPEED_256G:
5417 port_speed = LPFC_LINK_SPEED_256GHZ;
5428 lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba)
5430 struct rxtable_entry *entry;
5431 int cnt = 0, head, tail, last, start;
5433 head = atomic_read(&phba->rxtable_idx_head);
5434 tail = atomic_read(&phba->rxtable_idx_tail);
5435 if (!phba->rxtable || head == tail) {
5436 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
5437 "4411 Rxtable is empty\n");
5443 /* Display the last LPFC_MAX_RXMONITOR_DUMP entries from the rxtable */
5444 while (start != last) {
5448 start = LPFC_MAX_RXMONITOR_ENTRY - 1;
5449 entry = &phba->rxtable[start];
5450 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5451 "4410 %02d: MBPI %lld Xmit %lld Cmpl %lld "
5452 "Lat %lld ASz %lld Info %02d BWUtil %d "
5454 cnt, entry->max_bytes_per_interval,
5455 entry->total_bytes, entry->rcv_bytes,
5456 entry->avg_io_latency, entry->avg_io_size,
5457 entry->cmf_info, entry->timer_utilization,
5458 entry->timer_interval, start);
5460 if (cnt >= LPFC_MAX_RXMONITOR_DUMP)
5466 * lpfc_cgn_update_stat - Save data into congestion stats buffer
5467 * @phba: pointer to lpfc hba data structure.
5468 * @dtag: FPIN descriptor received
5470 * Increment the FPIN received counter/time when it happens.
5473 lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag)
5475 struct lpfc_cgn_info *cp;
5477 struct timespec64 cur_time;
5481 /* Make sure we have a congestion info buffer */
5484 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5485 ktime_get_real_ts64(&cur_time);
5486 time64_to_tm(cur_time.tv_sec, 0, &broken);
5488 /* Update congestion statistics */
5490 case ELS_DTAG_LNK_INTEGRITY:
5491 cnt = le32_to_cpu(cp->link_integ_notification);
5493 cp->link_integ_notification = cpu_to_le32(cnt);
5495 cp->cgn_stat_lnk_month = broken.tm_mon + 1;
5496 cp->cgn_stat_lnk_day = broken.tm_mday;
5497 cp->cgn_stat_lnk_year = broken.tm_year - 100;
5498 cp->cgn_stat_lnk_hour = broken.tm_hour;
5499 cp->cgn_stat_lnk_min = broken.tm_min;
5500 cp->cgn_stat_lnk_sec = broken.tm_sec;
5502 case ELS_DTAG_DELIVERY:
5503 cnt = le32_to_cpu(cp->delivery_notification);
5505 cp->delivery_notification = cpu_to_le32(cnt);
5507 cp->cgn_stat_del_month = broken.tm_mon + 1;
5508 cp->cgn_stat_del_day = broken.tm_mday;
5509 cp->cgn_stat_del_year = broken.tm_year - 100;
5510 cp->cgn_stat_del_hour = broken.tm_hour;
5511 cp->cgn_stat_del_min = broken.tm_min;
5512 cp->cgn_stat_del_sec = broken.tm_sec;
5514 case ELS_DTAG_PEER_CONGEST:
5515 cnt = le32_to_cpu(cp->cgn_peer_notification);
5517 cp->cgn_peer_notification = cpu_to_le32(cnt);
5519 cp->cgn_stat_peer_month = broken.tm_mon + 1;
5520 cp->cgn_stat_peer_day = broken.tm_mday;
5521 cp->cgn_stat_peer_year = broken.tm_year - 100;
5522 cp->cgn_stat_peer_hour = broken.tm_hour;
5523 cp->cgn_stat_peer_min = broken.tm_min;
5524 cp->cgn_stat_peer_sec = broken.tm_sec;
5526 case ELS_DTAG_CONGESTION:
5527 cnt = le32_to_cpu(cp->cgn_notification);
5529 cp->cgn_notification = cpu_to_le32(cnt);
5531 cp->cgn_stat_cgn_month = broken.tm_mon + 1;
5532 cp->cgn_stat_cgn_day = broken.tm_mday;
5533 cp->cgn_stat_cgn_year = broken.tm_year - 100;
5534 cp->cgn_stat_cgn_hour = broken.tm_hour;
5535 cp->cgn_stat_cgn_min = broken.tm_min;
5536 cp->cgn_stat_cgn_sec = broken.tm_sec;
5538 if (phba->cgn_fpin_frequency &&
5539 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5540 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5541 cp->cgn_stat_npm = value;
5543 value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
5544 LPFC_CGN_CRC32_SEED);
5545 cp->cgn_info_crc = cpu_to_le32(value);
5549 * lpfc_cgn_save_evt_cnt - Save data into registered congestion buffer
5550 * @phba: pointer to lpfc hba data structure.
5552 * Save the congestion event data every minute.
5553 * On the hour collapse all the minute data into hour data. Every day
5554 * collapse all the hour data into daily data. Separate driver
5555 * and fabrc congestion event counters that will be saved out
5556 * to the registered congestion buffer every minute.
5559 lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba)
5561 struct lpfc_cgn_info *cp;
5563 struct timespec64 cur_time;
5565 uint16_t value, mvalue;
5568 uint32_t dvalue, wvalue, lvalue, avalue;
5574 /* Make sure we have a congestion info buffer */
5577 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5579 if (time_before(jiffies, phba->cgn_evt_timestamp))
5581 phba->cgn_evt_timestamp = jiffies +
5582 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
5583 phba->cgn_evt_minute++;
5585 /* We should get to this point in the routine on 1 minute intervals */
5587 ktime_get_real_ts64(&cur_time);
5588 time64_to_tm(cur_time.tv_sec, 0, &broken);
5590 if (phba->cgn_fpin_frequency &&
5591 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5592 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5593 cp->cgn_stat_npm = value;
5596 /* Read and clear the latency counters for this minute */
5597 lvalue = atomic_read(&phba->cgn_latency_evt_cnt);
5598 latsum = atomic64_read(&phba->cgn_latency_evt);
5599 atomic_set(&phba->cgn_latency_evt_cnt, 0);
5600 atomic64_set(&phba->cgn_latency_evt, 0);
5602 /* We need to store MB/sec bandwidth in the congestion information.
5603 * block_cnt is count of 512 byte blocks for the entire minute,
5604 * bps will get bytes per sec before finally converting to MB/sec.
5606 bps = div_u64(phba->rx_block_cnt, LPFC_SEC_MIN) * 512;
5607 phba->rx_block_cnt = 0;
5608 mvalue = bps / (1024 * 1024); /* convert to MB/sec */
5611 /* cgn parameters */
5612 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
5613 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
5614 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
5615 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
5617 /* Fill in default LUN qdepth */
5618 value = (uint16_t)(phba->pport->cfg_lun_queue_depth);
5619 cp->cgn_lunq = cpu_to_le16(value);
5621 /* Record congestion buffer info - every minute
5622 * cgn_driver_evt_cnt (Driver events)
5623 * cgn_fabric_warn_cnt (Congestion Warnings)
5624 * cgn_latency_evt_cnt / cgn_latency_evt (IO Latency)
5625 * cgn_fabric_alarm_cnt (Congestion Alarms)
5627 index = ++cp->cgn_index_minute;
5628 if (cp->cgn_index_minute == LPFC_MIN_HOUR) {
5629 cp->cgn_index_minute = 0;
5633 /* Get the number of driver events in this sample and reset counter */
5634 dvalue = atomic_read(&phba->cgn_driver_evt_cnt);
5635 atomic_set(&phba->cgn_driver_evt_cnt, 0);
5637 /* Get the number of warning events - FPIN and Signal for this minute */
5639 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) ||
5640 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
5641 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5642 wvalue = atomic_read(&phba->cgn_fabric_warn_cnt);
5643 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
5645 /* Get the number of alarm events - FPIN and Signal for this minute */
5647 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) ||
5648 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5649 avalue = atomic_read(&phba->cgn_fabric_alarm_cnt);
5650 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
5652 /* Collect the driver, warning, alarm and latency counts for this
5653 * minute into the driver congestion buffer.
5655 ptr = &cp->cgn_drvr_min[index];
5656 value = (uint16_t)dvalue;
5657 *ptr = cpu_to_le16(value);
5659 ptr = &cp->cgn_warn_min[index];
5660 value = (uint16_t)wvalue;
5661 *ptr = cpu_to_le16(value);
5663 ptr = &cp->cgn_alarm_min[index];
5664 value = (uint16_t)avalue;
5665 *ptr = cpu_to_le16(value);
5667 lptr = &cp->cgn_latency_min[index];
5669 lvalue = (uint32_t)div_u64(latsum, lvalue);
5670 *lptr = cpu_to_le32(lvalue);
5675 /* Collect the bandwidth value into the driver's congesion buffer. */
5676 mptr = &cp->cgn_bw_min[index];
5677 *mptr = cpu_to_le16(mvalue);
5679 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5680 "2418 Congestion Info - minute (%d): %d %d %d %d %d\n",
5681 index, dvalue, wvalue, *lptr, mvalue, avalue);
5684 if ((phba->cgn_evt_minute % LPFC_MIN_HOUR) == 0) {
5685 /* Record congestion buffer info - every hour
5686 * Collapse all minutes into an hour
5688 index = ++cp->cgn_index_hour;
5689 if (cp->cgn_index_hour == LPFC_HOUR_DAY) {
5690 cp->cgn_index_hour = 0;
5700 for (i = 0; i < LPFC_MIN_HOUR; i++) {
5701 dvalue += le16_to_cpu(cp->cgn_drvr_min[i]);
5702 wvalue += le16_to_cpu(cp->cgn_warn_min[i]);
5703 lvalue += le32_to_cpu(cp->cgn_latency_min[i]);
5704 mbps += le16_to_cpu(cp->cgn_bw_min[i]);
5705 avalue += le16_to_cpu(cp->cgn_alarm_min[i]);
5707 if (lvalue) /* Avg of latency averages */
5708 lvalue /= LPFC_MIN_HOUR;
5709 if (mbps) /* Avg of Bandwidth averages */
5710 mvalue = mbps / LPFC_MIN_HOUR;
5712 lptr = &cp->cgn_drvr_hr[index];
5713 *lptr = cpu_to_le32(dvalue);
5714 lptr = &cp->cgn_warn_hr[index];
5715 *lptr = cpu_to_le32(wvalue);
5716 lptr = &cp->cgn_latency_hr[index];
5717 *lptr = cpu_to_le32(lvalue);
5718 mptr = &cp->cgn_bw_hr[index];
5719 *mptr = cpu_to_le16(mvalue);
5720 lptr = &cp->cgn_alarm_hr[index];
5721 *lptr = cpu_to_le32(avalue);
5723 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5724 "2419 Congestion Info - hour "
5725 "(%d): %d %d %d %d %d\n",
5726 index, dvalue, wvalue, lvalue, mvalue, avalue);
5730 if ((phba->cgn_evt_minute % LPFC_MIN_DAY) == 0) {
5731 /* Record congestion buffer info - every hour
5732 * Collapse all hours into a day. Rotate days
5733 * after LPFC_MAX_CGN_DAYS.
5735 index = ++cp->cgn_index_day;
5736 if (cp->cgn_index_day == LPFC_MAX_CGN_DAYS) {
5737 cp->cgn_index_day = 0;
5741 /* Anytime we overwrite daily index 0, after we wrap,
5742 * we will be overwriting the oldest day, so we must
5743 * update the congestion data start time for that day.
5744 * That start time should have previously been saved after
5745 * we wrote the last days worth of data.
5747 if ((phba->hba_flag & HBA_CGN_DAY_WRAP) && index == 0) {
5748 time64_to_tm(phba->cgn_daily_ts.tv_sec, 0, &broken);
5750 cp->cgn_info_month = broken.tm_mon + 1;
5751 cp->cgn_info_day = broken.tm_mday;
5752 cp->cgn_info_year = broken.tm_year - 100;
5753 cp->cgn_info_hour = broken.tm_hour;
5754 cp->cgn_info_minute = broken.tm_min;
5755 cp->cgn_info_second = broken.tm_sec;
5758 (phba, KERN_INFO, LOG_CGN_MGMT,
5759 "2646 CGNInfo idx0 Start Time: "
5760 "%d/%d/%d %d:%d:%d\n",
5761 cp->cgn_info_day, cp->cgn_info_month,
5762 cp->cgn_info_year, cp->cgn_info_hour,
5763 cp->cgn_info_minute, cp->cgn_info_second);
5772 for (i = 0; i < LPFC_HOUR_DAY; i++) {
5773 dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]);
5774 wvalue += le32_to_cpu(cp->cgn_warn_hr[i]);
5775 lvalue += le32_to_cpu(cp->cgn_latency_hr[i]);
5776 mbps += le16_to_cpu(cp->cgn_bw_hr[i]);
5777 avalue += le32_to_cpu(cp->cgn_alarm_hr[i]);
5779 if (lvalue) /* Avg of latency averages */
5780 lvalue /= LPFC_HOUR_DAY;
5781 if (mbps) /* Avg of Bandwidth averages */
5782 mvalue = mbps / LPFC_HOUR_DAY;
5784 lptr = &cp->cgn_drvr_day[index];
5785 *lptr = cpu_to_le32(dvalue);
5786 lptr = &cp->cgn_warn_day[index];
5787 *lptr = cpu_to_le32(wvalue);
5788 lptr = &cp->cgn_latency_day[index];
5789 *lptr = cpu_to_le32(lvalue);
5790 mptr = &cp->cgn_bw_day[index];
5791 *mptr = cpu_to_le16(mvalue);
5792 lptr = &cp->cgn_alarm_day[index];
5793 *lptr = cpu_to_le32(avalue);
5795 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5796 "2420 Congestion Info - daily (%d): "
5798 index, dvalue, wvalue, lvalue, mvalue, avalue);
5800 /* We just wrote LPFC_MAX_CGN_DAYS of data,
5801 * so we are wrapped on any data after this.
5802 * Save this as the start time for the next day.
5804 if (index == (LPFC_MAX_CGN_DAYS - 1)) {
5805 phba->hba_flag |= HBA_CGN_DAY_WRAP;
5806 ktime_get_real_ts64(&phba->cgn_daily_ts);
5810 /* Use the frequency found in the last rcv'ed FPIN */
5811 value = phba->cgn_fpin_frequency;
5812 cp->cgn_warn_freq = cpu_to_le16(value);
5813 cp->cgn_alarm_freq = cpu_to_le16(value);
5815 lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
5816 LPFC_CGN_CRC32_SEED);
5817 cp->cgn_info_crc = cpu_to_le32(lvalue);
5821 * lpfc_calc_cmf_latency - latency from start of rxate timer interval
5822 * @phba: The Hba for which this call is being executed.
5824 * The routine calculates the latency from the beginning of the CMF timer
5825 * interval to the current point in time. It is called from IO completion
5826 * when we exceed our Bandwidth limitation for the time interval.
5829 lpfc_calc_cmf_latency(struct lpfc_hba *phba)
5831 struct timespec64 cmpl_time;
5834 ktime_get_real_ts64(&cmpl_time);
5836 /* This routine works on a ms granularity so sec and usec are
5837 * converted accordingly.
5839 if (cmpl_time.tv_sec == phba->cmf_latency.tv_sec) {
5840 msec = (cmpl_time.tv_nsec - phba->cmf_latency.tv_nsec) /
5843 if (cmpl_time.tv_nsec >= phba->cmf_latency.tv_nsec) {
5844 msec = (cmpl_time.tv_sec -
5845 phba->cmf_latency.tv_sec) * MSEC_PER_SEC;
5846 msec += ((cmpl_time.tv_nsec -
5847 phba->cmf_latency.tv_nsec) / NSEC_PER_MSEC);
5849 msec = (cmpl_time.tv_sec - phba->cmf_latency.tv_sec -
5851 msec += (((NSEC_PER_SEC - phba->cmf_latency.tv_nsec) +
5852 cmpl_time.tv_nsec) / NSEC_PER_MSEC);
5859 * lpfc_cmf_timer - This is the timer function for one congestion
5861 * @timer: Pointer to the high resolution timer that expired
5863 static enum hrtimer_restart
5864 lpfc_cmf_timer(struct hrtimer *timer)
5866 struct lpfc_hba *phba = container_of(timer, struct lpfc_hba,
5868 struct rxtable_entry *entry;
5870 uint32_t head, tail;
5871 uint32_t busy, max_read;
5872 uint64_t total, rcv, lat, mbpi;
5873 int timer_interval = LPFC_CMF_INTERVAL;
5875 struct lpfc_cgn_stat *cgs;
5878 /* Only restart the timer if congestion mgmt is on */
5879 if (phba->cmf_active_mode == LPFC_CFG_OFF ||
5880 !phba->cmf_latency.tv_sec) {
5881 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5882 "6224 CMF timer exit: %d %lld\n",
5883 phba->cmf_active_mode,
5884 (uint64_t)phba->cmf_latency.tv_sec);
5885 return HRTIMER_NORESTART;
5888 /* If pport is not ready yet, just exit and wait for
5889 * the next timer cycle to hit.
5894 /* Do not block SCSI IO while in the timer routine since
5895 * total_bytes will be cleared
5897 atomic_set(&phba->cmf_stop_io, 1);
5899 /* First we need to calculate the actual ms between
5900 * the last timer interrupt and this one. We ask for
5901 * LPFC_CMF_INTERVAL, however the actual time may
5902 * vary depending on system overhead.
5904 ms = lpfc_calc_cmf_latency(phba);
5907 /* Immediately after we calculate the time since the last
5908 * timer interrupt, set the start time for the next
5911 ktime_get_real_ts64(&phba->cmf_latency);
5913 phba->cmf_link_byte_count =
5914 div_u64(phba->cmf_max_line_rate * LPFC_CMF_INTERVAL, 1000);
5916 /* Collect all the stats from the prior timer interval */
5921 for_each_present_cpu(cpu) {
5922 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
5923 total += atomic64_xchg(&cgs->total_bytes, 0);
5924 io_cnt += atomic_xchg(&cgs->rx_io_cnt, 0);
5925 lat += atomic64_xchg(&cgs->rx_latency, 0);
5926 rcv += atomic64_xchg(&cgs->rcv_bytes, 0);
5929 /* Before we issue another CMF_SYNC_WQE, retrieve the BW
5930 * returned from the last CMF_SYNC_WQE issued, from
5931 * cmf_last_sync_bw. This will be the target BW for
5932 * this next timer interval.
5934 if (phba->cmf_active_mode == LPFC_CFG_MANAGED &&
5935 phba->link_state != LPFC_LINK_DOWN &&
5936 phba->hba_flag & HBA_SETUP) {
5937 mbpi = phba->cmf_last_sync_bw;
5938 phba->cmf_last_sync_bw = 0;
5939 lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total);
5941 /* For Monitor mode or link down we want mbpi
5942 * to be the full link speed
5944 mbpi = phba->cmf_link_byte_count;
5946 phba->cmf_timer_cnt++;
5949 /* Update congestion info buffer latency in us */
5950 atomic_add(io_cnt, &phba->cgn_latency_evt_cnt);
5951 atomic64_add(lat, &phba->cgn_latency_evt);
5953 busy = atomic_xchg(&phba->cmf_busy, 0);
5954 max_read = atomic_xchg(&phba->rx_max_read_cnt, 0);
5956 /* Calculate MBPI for the next timer interval */
5958 if (mbpi > phba->cmf_link_byte_count ||
5959 phba->cmf_active_mode == LPFC_CFG_MONITOR)
5960 mbpi = phba->cmf_link_byte_count;
5962 /* Change max_bytes_per_interval to what the prior
5963 * CMF_SYNC_WQE cmpl indicated.
5965 if (mbpi != phba->cmf_max_bytes_per_interval)
5966 phba->cmf_max_bytes_per_interval = mbpi;
5969 /* Save rxmonitor information for debug */
5970 if (phba->rxtable) {
5971 head = atomic_xchg(&phba->rxtable_idx_head,
5972 LPFC_RXMONITOR_TABLE_IN_USE);
5973 entry = &phba->rxtable[head];
5974 entry->total_bytes = total;
5975 entry->rcv_bytes = rcv;
5976 entry->cmf_busy = busy;
5977 entry->cmf_info = phba->cmf_active_info;
5979 entry->avg_io_latency = div_u64(lat, io_cnt);
5980 entry->avg_io_size = div_u64(rcv, io_cnt);
5982 entry->avg_io_latency = 0;
5983 entry->avg_io_size = 0;
5985 entry->max_read_cnt = max_read;
5986 entry->io_cnt = io_cnt;
5987 entry->max_bytes_per_interval = mbpi;
5988 if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
5989 entry->timer_utilization = phba->cmf_last_ts;
5991 entry->timer_utilization = ms;
5992 entry->timer_interval = ms;
5993 phba->cmf_last_ts = 0;
5995 /* Increment rxtable index */
5996 head = (head + 1) % LPFC_MAX_RXMONITOR_ENTRY;
5997 tail = atomic_read(&phba->rxtable_idx_tail);
5999 tail = (tail + 1) % LPFC_MAX_RXMONITOR_ENTRY;
6000 atomic_set(&phba->rxtable_idx_tail, tail);
6002 atomic_set(&phba->rxtable_idx_head, head);
6005 if (phba->cmf_active_mode == LPFC_CFG_MONITOR) {
6006 /* If Monitor mode, check if we are oversubscribed
6007 * against the full line rate.
6009 if (mbpi && total > mbpi)
6010 atomic_inc(&phba->cgn_driver_evt_cnt);
6012 phba->rx_block_cnt += div_u64(rcv, 512); /* save 512 byte block cnt */
6014 /* Each minute save Fabric and Driver congestion information */
6015 lpfc_cgn_save_evt_cnt(phba);
6017 /* Since we need to call lpfc_cgn_save_evt_cnt every minute, on the
6018 * minute, adjust our next timer interval, if needed, to ensure a
6019 * 1 minute granularity when we get the next timer interrupt.
6021 if (time_after(jiffies + msecs_to_jiffies(LPFC_CMF_INTERVAL),
6022 phba->cgn_evt_timestamp)) {
6023 timer_interval = jiffies_to_msecs(phba->cgn_evt_timestamp -
6025 if (timer_interval <= 0)
6026 timer_interval = LPFC_CMF_INTERVAL;
6028 /* If we adjust timer_interval, max_bytes_per_interval
6029 * needs to be adjusted as well.
6031 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
6032 timer_interval, 1000);
6033 if (phba->cmf_active_mode == LPFC_CFG_MONITOR)
6034 phba->cmf_max_bytes_per_interval =
6035 phba->cmf_link_byte_count;
6038 /* Since total_bytes has already been zero'ed, its okay to unblock
6039 * after max_bytes_per_interval is setup.
6041 if (atomic_xchg(&phba->cmf_bw_wait, 0))
6042 queue_work(phba->wq, &phba->unblock_request_work);
6044 /* SCSI IO is now unblocked */
6045 atomic_set(&phba->cmf_stop_io, 0);
6048 hrtimer_forward_now(timer,
6049 ktime_set(0, timer_interval * NSEC_PER_MSEC));
6050 return HRTIMER_RESTART;
6053 #define trunk_link_status(__idx)\
6054 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
6055 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
6056 "Link up" : "Link down") : "NA"
6057 /* Did port __idx reported an error */
6058 #define trunk_port_fault(__idx)\
6059 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
6060 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
6063 lpfc_update_trunk_link_status(struct lpfc_hba *phba,
6064 struct lpfc_acqe_fc_la *acqe_fc)
6066 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
6067 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
6069 phba->sli4_hba.link_state.speed =
6070 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
6071 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6073 phba->sli4_hba.link_state.logical_speed =
6074 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
6075 /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */
6076 phba->fc_linkspeed =
6077 lpfc_async_link_speed_to_read_top(
6079 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6081 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) {
6082 phba->trunk_link.link0.state =
6083 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
6084 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6085 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
6087 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
6088 phba->trunk_link.link1.state =
6089 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
6090 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6091 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
6093 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
6094 phba->trunk_link.link2.state =
6095 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
6096 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6097 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
6099 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
6100 phba->trunk_link.link3.state =
6101 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
6102 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6103 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
6106 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6107 "2910 Async FC Trunking Event - Speed:%d\n"
6108 "\tLogical speed:%d "
6109 "port0: %s port1: %s port2: %s port3: %s\n",
6110 phba->sli4_hba.link_state.speed,
6111 phba->sli4_hba.link_state.logical_speed,
6112 trunk_link_status(0), trunk_link_status(1),
6113 trunk_link_status(2), trunk_link_status(3));
6115 if (phba->cmf_active_mode != LPFC_CFG_OFF)
6116 lpfc_cmf_signal_init(phba);
6119 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6120 "3202 trunk error:0x%x (%s) seen on port0:%s "
6122 * SLI-4: We have only 0xA error codes
6123 * defined as of now. print an appropriate
6124 * message in case driver needs to be updated.
6126 "port1:%s port2:%s port3:%s\n", err, err > 0xA ?
6127 "UNDEFINED. update driver." : trunk_errmsg[err],
6128 trunk_port_fault(0), trunk_port_fault(1),
6129 trunk_port_fault(2), trunk_port_fault(3));
6134 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
6135 * @phba: pointer to lpfc hba data structure.
6136 * @acqe_fc: pointer to the async fc completion queue entry.
6138 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
6139 * that the event was received and then issue a read_topology mailbox command so
6140 * that the rest of the driver will treat it the same as SLI3.
6143 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
6145 struct lpfc_dmabuf *mp;
6148 struct lpfc_mbx_read_top *la;
6151 if (bf_get(lpfc_trailer_type, acqe_fc) !=
6152 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
6153 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6154 "2895 Non FC link Event detected.(%d)\n",
6155 bf_get(lpfc_trailer_type, acqe_fc));
6159 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
6160 LPFC_FC_LA_TYPE_TRUNKING_EVENT) {
6161 lpfc_update_trunk_link_status(phba, acqe_fc);
6165 /* Keep the link status for extra SLI4 state machine reference */
6166 phba->sli4_hba.link_state.speed =
6167 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
6168 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6169 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
6170 phba->sli4_hba.link_state.topology =
6171 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
6172 phba->sli4_hba.link_state.status =
6173 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
6174 phba->sli4_hba.link_state.type =
6175 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
6176 phba->sli4_hba.link_state.number =
6177 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
6178 phba->sli4_hba.link_state.fault =
6179 bf_get(lpfc_acqe_link_fault, acqe_fc);
6181 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
6182 LPFC_FC_LA_TYPE_LINK_DOWN)
6183 phba->sli4_hba.link_state.logical_speed = 0;
6184 else if (!phba->sli4_hba.conf_trunk)
6185 phba->sli4_hba.link_state.logical_speed =
6186 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
6188 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6189 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
6190 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
6191 "%dMbps Fault:%d\n",
6192 phba->sli4_hba.link_state.speed,
6193 phba->sli4_hba.link_state.topology,
6194 phba->sli4_hba.link_state.status,
6195 phba->sli4_hba.link_state.type,
6196 phba->sli4_hba.link_state.number,
6197 phba->sli4_hba.link_state.logical_speed,
6198 phba->sli4_hba.link_state.fault);
6199 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6201 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6202 "2897 The mboxq allocation failed\n");
6205 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
6207 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6208 "2898 The lpfc_dmabuf allocation failed\n");
6211 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
6213 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6214 "2899 The mbuf allocation failed\n");
6215 goto out_free_dmabuf;
6218 /* Cleanup any outstanding ELS commands */
6219 lpfc_els_flush_all_cmd(phba);
6221 /* Block ELS IOCBs until we have done process link event */
6222 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
6224 /* Update link event statistics */
6225 phba->sli.slistat.link_event++;
6227 /* Create lpfc_handle_latt mailbox command from link ACQE */
6228 lpfc_read_topology(phba, pmb, mp);
6229 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
6230 pmb->vport = phba->pport;
6232 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
6233 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
6235 switch (phba->sli4_hba.link_state.status) {
6236 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
6237 phba->link_flag |= LS_MDS_LINK_DOWN;
6239 case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
6240 phba->link_flag |= LS_MDS_LOOPBACK;
6246 /* Initialize completion status */
6248 mb->mbxStatus = MBX_SUCCESS;
6250 /* Parse port fault information field */
6251 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
6253 /* Parse and translate link attention fields */
6254 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
6255 la->eventTag = acqe_fc->event_tag;
6257 if (phba->sli4_hba.link_state.status ==
6258 LPFC_FC_LA_TYPE_UNEXP_WWPN) {
6259 bf_set(lpfc_mbx_read_top_att_type, la,
6260 LPFC_FC_LA_TYPE_UNEXP_WWPN);
6262 bf_set(lpfc_mbx_read_top_att_type, la,
6263 LPFC_FC_LA_TYPE_LINK_DOWN);
6265 /* Invoke the mailbox command callback function */
6266 lpfc_mbx_cmpl_read_topology(phba, pmb);
6271 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
6272 if (rc == MBX_NOT_FINISHED) {
6273 lpfc_mbuf_free(phba, mp->virt, mp->phys);
6274 goto out_free_dmabuf;
6281 mempool_free(pmb, phba->mbox_mem_pool);
6285 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
6286 * @phba: pointer to lpfc hba data structure.
6287 * @acqe_sli: pointer to the async SLI completion queue entry.
6289 * This routine is to handle the SLI4 asynchronous SLI events.
6292 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
6298 uint8_t operational = 0;
6299 struct temp_event temp_event_data;
6300 struct lpfc_acqe_misconfigured_event *misconfigured;
6301 struct lpfc_acqe_cgn_signal *cgn_signal;
6302 struct Scsi_Host *shost;
6303 struct lpfc_vport **vports;
6306 evt_type = bf_get(lpfc_trailer_type, acqe_sli);
6308 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6309 "2901 Async SLI event - Type:%d, Event Data: x%08x "
6310 "x%08x x%08x x%08x\n", evt_type,
6311 acqe_sli->event_data1, acqe_sli->event_data2,
6312 acqe_sli->reserved, acqe_sli->trailer);
6314 port_name = phba->Port[0];
6315 if (port_name == 0x00)
6316 port_name = '?'; /* get port name is empty */
6319 case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
6320 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
6321 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
6322 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
6324 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6325 "3190 Over Temperature:%d Celsius- Port Name %c\n",
6326 acqe_sli->event_data1, port_name);
6328 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
6329 shost = lpfc_shost_from_vport(phba->pport);
6330 fc_host_post_vendor_event(shost, fc_get_event_number(),
6331 sizeof(temp_event_data),
6332 (char *)&temp_event_data,
6333 SCSI_NL_VID_TYPE_PCI
6334 | PCI_VENDOR_ID_EMULEX);
6336 case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
6337 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
6338 temp_event_data.event_code = LPFC_NORMAL_TEMP;
6339 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
6341 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6342 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
6343 acqe_sli->event_data1, port_name);
6345 shost = lpfc_shost_from_vport(phba->pport);
6346 fc_host_post_vendor_event(shost, fc_get_event_number(),
6347 sizeof(temp_event_data),
6348 (char *)&temp_event_data,
6349 SCSI_NL_VID_TYPE_PCI
6350 | PCI_VENDOR_ID_EMULEX);
6352 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
6353 misconfigured = (struct lpfc_acqe_misconfigured_event *)
6354 &acqe_sli->event_data1;
6356 /* fetch the status for this port */
6357 switch (phba->sli4_hba.lnk_info.lnk_no) {
6358 case LPFC_LINK_NUMBER_0:
6359 status = bf_get(lpfc_sli_misconfigured_port0_state,
6360 &misconfigured->theEvent);
6361 operational = bf_get(lpfc_sli_misconfigured_port0_op,
6362 &misconfigured->theEvent);
6364 case LPFC_LINK_NUMBER_1:
6365 status = bf_get(lpfc_sli_misconfigured_port1_state,
6366 &misconfigured->theEvent);
6367 operational = bf_get(lpfc_sli_misconfigured_port1_op,
6368 &misconfigured->theEvent);
6370 case LPFC_LINK_NUMBER_2:
6371 status = bf_get(lpfc_sli_misconfigured_port2_state,
6372 &misconfigured->theEvent);
6373 operational = bf_get(lpfc_sli_misconfigured_port2_op,
6374 &misconfigured->theEvent);
6376 case LPFC_LINK_NUMBER_3:
6377 status = bf_get(lpfc_sli_misconfigured_port3_state,
6378 &misconfigured->theEvent);
6379 operational = bf_get(lpfc_sli_misconfigured_port3_op,
6380 &misconfigured->theEvent);
6383 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6385 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
6386 "event: Invalid link %d",
6387 phba->sli4_hba.lnk_info.lnk_no);
6391 /* Skip if optic state unchanged */
6392 if (phba->sli4_hba.lnk_info.optic_state == status)
6396 case LPFC_SLI_EVENT_STATUS_VALID:
6397 sprintf(message, "Physical Link is functional");
6399 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
6400 sprintf(message, "Optics faulted/incorrectly "
6401 "installed/not installed - Reseat optics, "
6402 "if issue not resolved, replace.");
6404 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
6406 "Optics of two types installed - Remove one "
6407 "optic or install matching pair of optics.");
6409 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
6410 sprintf(message, "Incompatible optics - Replace with "
6411 "compatible optics for card to function.");
6413 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
6414 sprintf(message, "Unqualified optics - Replace with "
6415 "Avago optics for Warranty and Technical "
6416 "Support - Link is%s operational",
6417 (operational) ? " not" : "");
6419 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
6420 sprintf(message, "Uncertified optics - Replace with "
6421 "Avago-certified optics to enable link "
6422 "operation - Link is%s operational",
6423 (operational) ? " not" : "");
6426 /* firmware is reporting a status we don't know about */
6427 sprintf(message, "Unknown event status x%02x", status);
6431 /* Issue READ_CONFIG mbox command to refresh supported speeds */
6432 rc = lpfc_sli4_read_config(phba);
6435 lpfc_printf_log(phba, KERN_ERR,
6437 "3194 Unable to retrieve supported "
6438 "speeds, rc = 0x%x\n", rc);
6440 vports = lpfc_create_vport_work_array(phba);
6441 if (vports != NULL) {
6442 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6444 shost = lpfc_shost_from_vport(vports[i]);
6445 lpfc_host_supported_speeds_set(shost);
6448 lpfc_destroy_vport_work_array(phba, vports);
6450 phba->sli4_hba.lnk_info.optic_state = status;
6451 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6452 "3176 Port Name %c %s\n", port_name, message);
6454 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
6455 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6456 "3192 Remote DPort Test Initiated - "
6457 "Event Data1:x%08x Event Data2: x%08x\n",
6458 acqe_sli->event_data1, acqe_sli->event_data2);
6460 case LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG:
6461 /* Call FW to obtain active parms */
6462 lpfc_sli4_cgn_parm_chg_evt(phba);
6464 case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
6465 /* Misconfigured WWN. Reports that the SLI Port is configured
6466 * to use FA-WWN, but the attached device doesn’t support it.
6467 * No driver action is required.
6468 * Event Data1 - N.A, Event Data2 - N.A
6470 lpfc_log_msg(phba, KERN_WARNING, LOG_SLI,
6471 "2699 Misconfigured FA-WWN - Attached device does "
6472 "not support FA-WWN\n");
6474 case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
6475 /* EEPROM failure. No driver action is required */
6476 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6477 "2518 EEPROM failure - "
6478 "Event Data1: x%08x Event Data2: x%08x\n",
6479 acqe_sli->event_data1, acqe_sli->event_data2);
6481 case LPFC_SLI_EVENT_TYPE_CGN_SIGNAL:
6482 if (phba->cmf_active_mode == LPFC_CFG_OFF)
6484 cgn_signal = (struct lpfc_acqe_cgn_signal *)
6485 &acqe_sli->event_data1;
6486 phba->cgn_acqe_cnt++;
6488 cnt = bf_get(lpfc_warn_acqe, cgn_signal);
6489 atomic64_add(cnt, &phba->cgn_acqe_stat.warn);
6490 atomic64_add(cgn_signal->alarm_cnt, &phba->cgn_acqe_stat.alarm);
6492 /* no threshold for CMF, even 1 signal will trigger an event */
6494 /* Alarm overrides warning, so check that first */
6495 if (cgn_signal->alarm_cnt) {
6496 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6497 /* Keep track of alarm cnt for CMF_SYNC_WQE */
6498 atomic_add(cgn_signal->alarm_cnt,
6499 &phba->cgn_sync_alarm_cnt);
6502 /* signal action needs to be taken */
6503 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
6504 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6505 /* Keep track of warning cnt for CMF_SYNC_WQE */
6506 atomic_add(cnt, &phba->cgn_sync_warn_cnt);
6511 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6512 "3193 Unrecognized SLI event, type: 0x%x",
6519 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
6520 * @vport: pointer to vport data structure.
6522 * This routine is to perform Clear Virtual Link (CVL) on a vport in
6523 * response to a CVL event.
6525 * Return the pointer to the ndlp with the vport if successful, otherwise
6528 static struct lpfc_nodelist *
6529 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
6531 struct lpfc_nodelist *ndlp;
6532 struct Scsi_Host *shost;
6533 struct lpfc_hba *phba;
6540 ndlp = lpfc_findnode_did(vport, Fabric_DID);
6542 /* Cannot find existing Fabric ndlp, so allocate a new one */
6543 ndlp = lpfc_nlp_init(vport, Fabric_DID);
6546 /* Set the node type */
6547 ndlp->nlp_type |= NLP_FABRIC;
6548 /* Put ndlp onto node list */
6549 lpfc_enqueue_node(vport, ndlp);
6551 if ((phba->pport->port_state < LPFC_FLOGI) &&
6552 (phba->pport->port_state != LPFC_VPORT_FAILED))
6554 /* If virtual link is not yet instantiated ignore CVL */
6555 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
6556 && (vport->port_state != LPFC_VPORT_FAILED))
6558 shost = lpfc_shost_from_vport(vport);
6561 lpfc_linkdown_port(vport);
6562 lpfc_cleanup_pending_mbox(vport);
6563 spin_lock_irq(shost->host_lock);
6564 vport->fc_flag |= FC_VPORT_CVL_RCVD;
6565 spin_unlock_irq(shost->host_lock);
6571 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
6572 * @phba: pointer to lpfc hba data structure.
6574 * This routine is to perform Clear Virtual Link (CVL) on all vports in
6575 * response to a FCF dead event.
6578 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
6580 struct lpfc_vport **vports;
6583 vports = lpfc_create_vport_work_array(phba);
6585 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
6586 lpfc_sli4_perform_vport_cvl(vports[i]);
6587 lpfc_destroy_vport_work_array(phba, vports);
6591 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
6592 * @phba: pointer to lpfc hba data structure.
6593 * @acqe_fip: pointer to the async fcoe completion queue entry.
6595 * This routine is to handle the SLI4 asynchronous fcoe event.
6598 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
6599 struct lpfc_acqe_fip *acqe_fip)
6601 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
6603 struct lpfc_vport *vport;
6604 struct lpfc_nodelist *ndlp;
6605 int active_vlink_present;
6606 struct lpfc_vport **vports;
6609 phba->fc_eventTag = acqe_fip->event_tag;
6610 phba->fcoe_eventtag = acqe_fip->event_tag;
6611 switch (event_type) {
6612 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
6613 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
6614 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
6615 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6616 "2546 New FCF event, evt_tag:x%x, "
6618 acqe_fip->event_tag,
6621 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
6623 "2788 FCF param modified event, "
6624 "evt_tag:x%x, index:x%x\n",
6625 acqe_fip->event_tag,
6627 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6629 * During period of FCF discovery, read the FCF
6630 * table record indexed by the event to update
6631 * FCF roundrobin failover eligible FCF bmask.
6633 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
6635 "2779 Read FCF (x%x) for updating "
6636 "roundrobin FCF failover bmask\n",
6638 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
6641 /* If the FCF discovery is in progress, do nothing. */
6642 spin_lock_irq(&phba->hbalock);
6643 if (phba->hba_flag & FCF_TS_INPROG) {
6644 spin_unlock_irq(&phba->hbalock);
6647 /* If fast FCF failover rescan event is pending, do nothing */
6648 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
6649 spin_unlock_irq(&phba->hbalock);
6653 /* If the FCF has been in discovered state, do nothing. */
6654 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
6655 spin_unlock_irq(&phba->hbalock);
6658 spin_unlock_irq(&phba->hbalock);
6660 /* Otherwise, scan the entire FCF table and re-discover SAN */
6661 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6662 "2770 Start FCF table scan per async FCF "
6663 "event, evt_tag:x%x, index:x%x\n",
6664 acqe_fip->event_tag, acqe_fip->index);
6665 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
6666 LPFC_FCOE_FCF_GET_FIRST);
6668 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6669 "2547 Issue FCF scan read FCF mailbox "
6670 "command failed (x%x)\n", rc);
6673 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
6674 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6675 "2548 FCF Table full count 0x%x tag 0x%x\n",
6676 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
6677 acqe_fip->event_tag);
6680 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
6681 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6682 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6683 "2549 FCF (x%x) disconnected from network, "
6684 "tag:x%x\n", acqe_fip->index,
6685 acqe_fip->event_tag);
6687 * If we are in the middle of FCF failover process, clear
6688 * the corresponding FCF bit in the roundrobin bitmap.
6690 spin_lock_irq(&phba->hbalock);
6691 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
6692 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
6693 spin_unlock_irq(&phba->hbalock);
6694 /* Update FLOGI FCF failover eligible FCF bmask */
6695 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
6698 spin_unlock_irq(&phba->hbalock);
6700 /* If the event is not for currently used fcf do nothing */
6701 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
6705 * Otherwise, request the port to rediscover the entire FCF
6706 * table for a fast recovery from case that the current FCF
6707 * is no longer valid as we are not in the middle of FCF
6708 * failover process already.
6710 spin_lock_irq(&phba->hbalock);
6711 /* Mark the fast failover process in progress */
6712 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
6713 spin_unlock_irq(&phba->hbalock);
6715 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6716 "2771 Start FCF fast failover process due to "
6717 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
6718 "\n", acqe_fip->event_tag, acqe_fip->index);
6719 rc = lpfc_sli4_redisc_fcf_table(phba);
6721 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
6723 "2772 Issue FCF rediscover mailbox "
6724 "command failed, fail through to FCF "
6726 spin_lock_irq(&phba->hbalock);
6727 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
6728 spin_unlock_irq(&phba->hbalock);
6730 * Last resort will fail over by treating this
6731 * as a link down to FCF registration.
6733 lpfc_sli4_fcf_dead_failthrough(phba);
6735 /* Reset FCF roundrobin bmask for new discovery */
6736 lpfc_sli4_clear_fcf_rr_bmask(phba);
6738 * Handling fast FCF failover to a DEAD FCF event is
6739 * considered equalivant to receiving CVL to all vports.
6741 lpfc_sli4_perform_all_vport_cvl(phba);
6744 case LPFC_FIP_EVENT_TYPE_CVL:
6745 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6746 lpfc_printf_log(phba, KERN_ERR,
6748 "2718 Clear Virtual Link Received for VPI 0x%x"
6749 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
6751 vport = lpfc_find_vport_by_vpid(phba,
6753 ndlp = lpfc_sli4_perform_vport_cvl(vport);
6756 active_vlink_present = 0;
6758 vports = lpfc_create_vport_work_array(phba);
6760 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6762 if ((!(vports[i]->fc_flag &
6763 FC_VPORT_CVL_RCVD)) &&
6764 (vports[i]->port_state > LPFC_FDISC)) {
6765 active_vlink_present = 1;
6769 lpfc_destroy_vport_work_array(phba, vports);
6773 * Don't re-instantiate if vport is marked for deletion.
6774 * If we are here first then vport_delete is going to wait
6775 * for discovery to complete.
6777 if (!(vport->load_flag & FC_UNLOADING) &&
6778 active_vlink_present) {
6780 * If there are other active VLinks present,
6781 * re-instantiate the Vlink using FDISC.
6783 mod_timer(&ndlp->nlp_delayfunc,
6784 jiffies + msecs_to_jiffies(1000));
6785 spin_lock_irq(&ndlp->lock);
6786 ndlp->nlp_flag |= NLP_DELAY_TMO;
6787 spin_unlock_irq(&ndlp->lock);
6788 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
6789 vport->port_state = LPFC_FDISC;
6792 * Otherwise, we request port to rediscover
6793 * the entire FCF table for a fast recovery
6794 * from possible case that the current FCF
6795 * is no longer valid if we are not already
6796 * in the FCF failover process.
6798 spin_lock_irq(&phba->hbalock);
6799 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6800 spin_unlock_irq(&phba->hbalock);
6803 /* Mark the fast failover process in progress */
6804 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
6805 spin_unlock_irq(&phba->hbalock);
6806 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
6808 "2773 Start FCF failover per CVL, "
6809 "evt_tag:x%x\n", acqe_fip->event_tag);
6810 rc = lpfc_sli4_redisc_fcf_table(phba);
6812 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
6814 "2774 Issue FCF rediscover "
6815 "mailbox command failed, "
6816 "through to CVL event\n");
6817 spin_lock_irq(&phba->hbalock);
6818 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
6819 spin_unlock_irq(&phba->hbalock);
6821 * Last resort will be re-try on the
6822 * the current registered FCF entry.
6824 lpfc_retry_pport_discovery(phba);
6827 * Reset FCF roundrobin bmask for new
6830 lpfc_sli4_clear_fcf_rr_bmask(phba);
6834 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6835 "0288 Unknown FCoE event type 0x%x event tag "
6836 "0x%x\n", event_type, acqe_fip->event_tag);
6842 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
6843 * @phba: pointer to lpfc hba data structure.
6844 * @acqe_dcbx: pointer to the async dcbx completion queue entry.
6846 * This routine is to handle the SLI4 asynchronous dcbx event.
6849 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
6850 struct lpfc_acqe_dcbx *acqe_dcbx)
6852 phba->fc_eventTag = acqe_dcbx->event_tag;
6853 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6854 "0290 The SLI4 DCBX asynchronous event is not "
6859 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
6860 * @phba: pointer to lpfc hba data structure.
6861 * @acqe_grp5: pointer to the async grp5 completion queue entry.
6863 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
6864 * is an asynchronous notified of a logical link speed change. The Port
6865 * reports the logical link speed in units of 10Mbps.
6868 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
6869 struct lpfc_acqe_grp5 *acqe_grp5)
6871 uint16_t prev_ll_spd;
6873 phba->fc_eventTag = acqe_grp5->event_tag;
6874 phba->fcoe_eventtag = acqe_grp5->event_tag;
6875 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
6876 phba->sli4_hba.link_state.logical_speed =
6877 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
6878 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6879 "2789 GRP5 Async Event: Updating logical link speed "
6880 "from %dMbps to %dMbps\n", prev_ll_spd,
6881 phba->sli4_hba.link_state.logical_speed);
6885 * lpfc_sli4_async_cmstat_evt - Process the asynchronous cmstat event
6886 * @phba: pointer to lpfc hba data structure.
6888 * This routine is to handle the SLI4 asynchronous cmstat event. A cmstat event
6889 * is an asynchronous notification of a request to reset CM stats.
6892 lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba)
6896 lpfc_init_congestion_stat(phba);
6900 * lpfc_cgn_params_val - Validate FW congestion parameters.
6901 * @phba: pointer to lpfc hba data structure.
6902 * @p_cfg_param: pointer to FW provided congestion parameters.
6904 * This routine validates the congestion parameters passed
6905 * by the FW to the driver via an ACQE event.
6908 lpfc_cgn_params_val(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cfg_param)
6910 spin_lock_irq(&phba->hbalock);
6912 if (!lpfc_rangecheck(p_cfg_param->cgn_param_mode, LPFC_CFG_OFF,
6913 LPFC_CFG_MONITOR)) {
6914 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
6915 "6225 CMF mode param out of range: %d\n",
6916 p_cfg_param->cgn_param_mode);
6917 p_cfg_param->cgn_param_mode = LPFC_CFG_OFF;
6920 spin_unlock_irq(&phba->hbalock);
6924 * lpfc_cgn_params_parse - Process a FW cong parm change event
6925 * @phba: pointer to lpfc hba data structure.
6926 * @p_cgn_param: pointer to a data buffer with the FW cong params.
6927 * @len: the size of pdata in bytes.
6929 * This routine validates the congestion management buffer signature
6930 * from the FW, validates the contents and makes corrections for
6931 * valid, in-range values. If the signature magic is correct and
6932 * after parameter validation, the contents are copied to the driver's
6933 * @phba structure. If the magic is incorrect, an error message is
6937 lpfc_cgn_params_parse(struct lpfc_hba *phba,
6938 struct lpfc_cgn_param *p_cgn_param, uint32_t len)
6940 struct lpfc_cgn_info *cp;
6941 uint32_t crc, oldmode;
6943 /* Make sure the FW has encoded the correct magic number to
6944 * validate the congestion parameter in FW memory.
6946 if (p_cgn_param->cgn_param_magic == LPFC_CFG_PARAM_MAGIC_NUM) {
6947 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
6948 "4668 FW cgn parm buffer data: "
6949 "magic 0x%x version %d mode %d "
6950 "level0 %d level1 %d "
6951 "level2 %d byte13 %d "
6952 "byte14 %d byte15 %d "
6953 "byte11 %d byte12 %d activeMode %d\n",
6954 p_cgn_param->cgn_param_magic,
6955 p_cgn_param->cgn_param_version,
6956 p_cgn_param->cgn_param_mode,
6957 p_cgn_param->cgn_param_level0,
6958 p_cgn_param->cgn_param_level1,
6959 p_cgn_param->cgn_param_level2,
6960 p_cgn_param->byte13,
6961 p_cgn_param->byte14,
6962 p_cgn_param->byte15,
6963 p_cgn_param->byte11,
6964 p_cgn_param->byte12,
6965 phba->cmf_active_mode);
6967 oldmode = phba->cmf_active_mode;
6969 /* Any parameters out of range are corrected to defaults
6970 * by this routine. No need to fail.
6972 lpfc_cgn_params_val(phba, p_cgn_param);
6974 /* Parameters are verified, move them into driver storage */
6975 spin_lock_irq(&phba->hbalock);
6976 memcpy(&phba->cgn_p, p_cgn_param,
6977 sizeof(struct lpfc_cgn_param));
6979 /* Update parameters in congestion info buffer now */
6981 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
6982 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
6983 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
6984 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
6985 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
6986 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
6987 LPFC_CGN_CRC32_SEED);
6988 cp->cgn_info_crc = cpu_to_le32(crc);
6990 spin_unlock_irq(&phba->hbalock);
6992 phba->cmf_active_mode = phba->cgn_p.cgn_param_mode;
6996 if (phba->cgn_p.cgn_param_mode != LPFC_CFG_OFF) {
6997 /* Turning CMF on */
6998 lpfc_cmf_start(phba);
7000 if (phba->link_state >= LPFC_LINK_UP) {
7001 phba->cgn_reg_fpin =
7002 phba->cgn_init_reg_fpin;
7003 phba->cgn_reg_signal =
7004 phba->cgn_init_reg_signal;
7005 lpfc_issue_els_edc(phba->pport, 0);
7009 case LPFC_CFG_MANAGED:
7010 switch (phba->cgn_p.cgn_param_mode) {
7012 /* Turning CMF off */
7013 lpfc_cmf_stop(phba);
7014 if (phba->link_state >= LPFC_LINK_UP)
7015 lpfc_issue_els_edc(phba->pport, 0);
7017 case LPFC_CFG_MONITOR:
7018 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7019 "4661 Switch from MANAGED to "
7021 phba->cmf_max_bytes_per_interval =
7022 phba->cmf_link_byte_count;
7024 /* Resume blocked IO - unblock on workqueue */
7025 queue_work(phba->wq,
7026 &phba->unblock_request_work);
7030 case LPFC_CFG_MONITOR:
7031 switch (phba->cgn_p.cgn_param_mode) {
7033 /* Turning CMF off */
7034 lpfc_cmf_stop(phba);
7035 if (phba->link_state >= LPFC_LINK_UP)
7036 lpfc_issue_els_edc(phba->pport, 0);
7038 case LPFC_CFG_MANAGED:
7039 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7040 "4662 Switch from MONITOR to "
7042 lpfc_cmf_signal_init(phba);
7048 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7049 "4669 FW cgn parm buf wrong magic 0x%x "
7050 "version %d\n", p_cgn_param->cgn_param_magic,
7051 p_cgn_param->cgn_param_version);
7056 * lpfc_sli4_cgn_params_read - Read and Validate FW congestion parameters.
7057 * @phba: pointer to lpfc hba data structure.
7059 * This routine issues a read_object mailbox command to
7060 * get the congestion management parameters from the FW
7061 * parses it and updates the driver maintained values.
7064 * 0 if the object was empty
7065 * -Eval if an error was encountered
7066 * Count if bytes were read from object
7069 lpfc_sli4_cgn_params_read(struct lpfc_hba *phba)
7072 struct lpfc_cgn_param *p_cgn_param = NULL;
7076 /* Find out if the FW has a new set of congestion parameters. */
7077 len = sizeof(struct lpfc_cgn_param);
7078 pdata = kzalloc(len, GFP_KERNEL);
7079 ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME,
7082 /* 0 means no data. A negative means error. A positive means
7083 * bytes were copied.
7086 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7087 "4670 CGN RD OBJ returns no data\n");
7089 } else if (ret < 0) {
7090 /* Some error. Just exit and return it to the caller.*/
7094 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
7095 "6234 READ CGN PARAMS Successful %d\n", len);
7097 /* Parse data pointer over len and update the phba congestion
7098 * parameters with values passed back. The receive rate values
7099 * may have been altered in FW, but take no action here.
7101 p_cgn_param = (struct lpfc_cgn_param *)pdata;
7102 lpfc_cgn_params_parse(phba, p_cgn_param, len);
7110 * lpfc_sli4_cgn_parm_chg_evt - Process a FW congestion param change event
7111 * @phba: pointer to lpfc hba data structure.
7113 * The FW generated Async ACQE SLI event calls this routine when
7114 * the event type is an SLI Internal Port Event and the Event Code
7115 * indicates a change to the FW maintained congestion parameters.
7117 * This routine executes a Read_Object mailbox call to obtain the
7118 * current congestion parameters maintained in FW and corrects
7119 * the driver's active congestion parameters.
7121 * The acqe event is not passed because there is no further data
7124 * Returns nonzero error if event processing encountered an error.
7125 * Zero otherwise for success.
7128 lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *phba)
7132 if (!phba->sli4_hba.pc_sli4_params.cmf) {
7133 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7134 "4664 Cgn Evt when E2E off. Drop event\n");
7138 /* If the event is claiming an empty object, it's ok. A write
7139 * could have cleared it. Only error is a negative return
7142 ret = lpfc_sli4_cgn_params_read(phba);
7144 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7145 "4667 Error reading Cgn Params (%d)\n",
7148 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7149 "4673 CGN Event empty object.\n");
7155 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
7156 * @phba: pointer to lpfc hba data structure.
7158 * This routine is invoked by the worker thread to process all the pending
7159 * SLI4 asynchronous events.
7161 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
7163 struct lpfc_cq_event *cq_event;
7164 unsigned long iflags;
7166 /* First, declare the async event has been handled */
7167 spin_lock_irqsave(&phba->hbalock, iflags);
7168 phba->hba_flag &= ~ASYNC_EVENT;
7169 spin_unlock_irqrestore(&phba->hbalock, iflags);
7171 /* Now, handle all the async events */
7172 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7173 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
7174 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
7175 cq_event, struct lpfc_cq_event, list);
7176 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock,
7179 /* Process the asynchronous event */
7180 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
7181 case LPFC_TRAILER_CODE_LINK:
7182 lpfc_sli4_async_link_evt(phba,
7183 &cq_event->cqe.acqe_link);
7185 case LPFC_TRAILER_CODE_FCOE:
7186 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
7188 case LPFC_TRAILER_CODE_DCBX:
7189 lpfc_sli4_async_dcbx_evt(phba,
7190 &cq_event->cqe.acqe_dcbx);
7192 case LPFC_TRAILER_CODE_GRP5:
7193 lpfc_sli4_async_grp5_evt(phba,
7194 &cq_event->cqe.acqe_grp5);
7196 case LPFC_TRAILER_CODE_FC:
7197 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
7199 case LPFC_TRAILER_CODE_SLI:
7200 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
7202 case LPFC_TRAILER_CODE_CMSTAT:
7203 lpfc_sli4_async_cmstat_evt(phba);
7206 lpfc_printf_log(phba, KERN_ERR,
7208 "1804 Invalid asynchronous event code: "
7209 "x%x\n", bf_get(lpfc_trailer_code,
7210 &cq_event->cqe.mcqe_cmpl));
7214 /* Free the completion event processed to the free pool */
7215 lpfc_sli4_cq_event_release(phba, cq_event);
7216 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7218 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
7222 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
7223 * @phba: pointer to lpfc hba data structure.
7225 * This routine is invoked by the worker thread to process FCF table
7226 * rediscovery pending completion event.
7228 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
7232 spin_lock_irq(&phba->hbalock);
7233 /* Clear FCF rediscovery timeout event */
7234 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
7235 /* Clear driver fast failover FCF record flag */
7236 phba->fcf.failover_rec.flag = 0;
7237 /* Set state for FCF fast failover */
7238 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
7239 spin_unlock_irq(&phba->hbalock);
7241 /* Scan FCF table from the first entry to re-discover SAN */
7242 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
7243 "2777 Start post-quiescent FCF table scan\n");
7244 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
7246 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7247 "2747 Issue FCF scan read FCF mailbox "
7248 "command failed 0x%x\n", rc);
7252 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
7253 * @phba: pointer to lpfc hba data structure.
7254 * @dev_grp: The HBA PCI-Device group number.
7256 * This routine is invoked to set up the per HBA PCI-Device group function
7257 * API jump table entries.
7259 * Return: 0 if success, otherwise -ENODEV
7262 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7266 /* Set up lpfc PCI-device group */
7267 phba->pci_dev_grp = dev_grp;
7269 /* The LPFC_PCI_DEV_OC uses SLI4 */
7270 if (dev_grp == LPFC_PCI_DEV_OC)
7271 phba->sli_rev = LPFC_SLI_REV4;
7273 /* Set up device INIT API function jump table */
7274 rc = lpfc_init_api_table_setup(phba, dev_grp);
7277 /* Set up SCSI API function jump table */
7278 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
7281 /* Set up SLI API function jump table */
7282 rc = lpfc_sli_api_table_setup(phba, dev_grp);
7285 /* Set up MBOX API function jump table */
7286 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
7294 * lpfc_log_intr_mode - Log the active interrupt mode
7295 * @phba: pointer to lpfc hba data structure.
7296 * @intr_mode: active interrupt mode adopted.
7298 * This routine it invoked to log the currently used active interrupt mode
7301 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
7303 switch (intr_mode) {
7305 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7306 "0470 Enable INTx interrupt mode.\n");
7309 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7310 "0481 Enabled MSI interrupt mode.\n");
7313 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7314 "0480 Enabled MSI-X interrupt mode.\n");
7317 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7318 "0482 Illegal interrupt mode.\n");
7325 * lpfc_enable_pci_dev - Enable a generic PCI device.
7326 * @phba: pointer to lpfc hba data structure.
7328 * This routine is invoked to enable the PCI device that is common to all
7333 * other values - error
7336 lpfc_enable_pci_dev(struct lpfc_hba *phba)
7338 struct pci_dev *pdev;
7340 /* Obtain PCI device reference */
7344 pdev = phba->pcidev;
7345 /* Enable PCI device */
7346 if (pci_enable_device_mem(pdev))
7348 /* Request PCI resource for the device */
7349 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
7350 goto out_disable_device;
7351 /* Set up device as PCI master and save state for EEH */
7352 pci_set_master(pdev);
7353 pci_try_set_mwi(pdev);
7354 pci_save_state(pdev);
7356 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
7357 if (pci_is_pcie(pdev))
7358 pdev->needs_freset = 1;
7363 pci_disable_device(pdev);
7365 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7366 "1401 Failed to enable pci device\n");
7371 * lpfc_disable_pci_dev - Disable a generic PCI device.
7372 * @phba: pointer to lpfc hba data structure.
7374 * This routine is invoked to disable the PCI device that is common to all
7378 lpfc_disable_pci_dev(struct lpfc_hba *phba)
7380 struct pci_dev *pdev;
7382 /* Obtain PCI device reference */
7386 pdev = phba->pcidev;
7387 /* Release PCI resource and disable PCI device */
7388 pci_release_mem_regions(pdev);
7389 pci_disable_device(pdev);
7395 * lpfc_reset_hba - Reset a hba
7396 * @phba: pointer to lpfc hba data structure.
7398 * This routine is invoked to reset a hba device. It brings the HBA
7399 * offline, performs a board restart, and then brings the board back
7400 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
7401 * on outstanding mailbox commands.
7404 lpfc_reset_hba(struct lpfc_hba *phba)
7406 /* If resets are disabled then set error state and return. */
7407 if (!phba->cfg_enable_hba_reset) {
7408 phba->link_state = LPFC_HBA_ERROR;
7412 /* If not LPFC_SLI_ACTIVE, force all IO to be flushed */
7413 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) {
7414 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
7416 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
7417 lpfc_sli_flush_io_rings(phba);
7420 lpfc_sli_brdrestart(phba);
7422 lpfc_unblock_mgmt_io(phba);
7426 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
7427 * @phba: pointer to lpfc hba data structure.
7429 * This function enables the PCI SR-IOV virtual functions to a physical
7430 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
7431 * enable the number of virtual functions to the physical function. As
7432 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
7433 * API call does not considered as an error condition for most of the device.
7436 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
7438 struct pci_dev *pdev = phba->pcidev;
7442 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
7446 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
7451 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
7452 * @phba: pointer to lpfc hba data structure.
7453 * @nr_vfn: number of virtual functions to be enabled.
7455 * This function enables the PCI SR-IOV virtual functions to a physical
7456 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
7457 * enable the number of virtual functions to the physical function. As
7458 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
7459 * API call does not considered as an error condition for most of the device.
7462 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
7464 struct pci_dev *pdev = phba->pcidev;
7465 uint16_t max_nr_vfn;
7468 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
7469 if (nr_vfn > max_nr_vfn) {
7470 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7471 "3057 Requested vfs (%d) greater than "
7472 "supported vfs (%d)", nr_vfn, max_nr_vfn);
7476 rc = pci_enable_sriov(pdev, nr_vfn);
7478 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7479 "2806 Failed to enable sriov on this device "
7480 "with vfn number nr_vf:%d, rc:%d\n",
7483 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7484 "2807 Successful enable sriov on this device "
7485 "with vfn number nr_vf:%d\n", nr_vfn);
7490 lpfc_unblock_requests_work(struct work_struct *work)
7492 struct lpfc_hba *phba = container_of(work, struct lpfc_hba,
7493 unblock_request_work);
7495 lpfc_unblock_requests(phba);
7499 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
7500 * @phba: pointer to lpfc hba data structure.
7502 * This routine is invoked to set up the driver internal resources before the
7503 * device specific resource setup to support the HBA device it attached to.
7507 * other values - error
7510 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
7512 struct lpfc_sli *psli = &phba->sli;
7515 * Driver resources common to all SLI revisions
7517 atomic_set(&phba->fast_event_count, 0);
7518 atomic_set(&phba->dbg_log_idx, 0);
7519 atomic_set(&phba->dbg_log_cnt, 0);
7520 atomic_set(&phba->dbg_log_dmping, 0);
7521 spin_lock_init(&phba->hbalock);
7523 /* Initialize port_list spinlock */
7524 spin_lock_init(&phba->port_list_lock);
7525 INIT_LIST_HEAD(&phba->port_list);
7527 INIT_LIST_HEAD(&phba->work_list);
7528 init_waitqueue_head(&phba->wait_4_mlo_m_q);
7530 /* Initialize the wait queue head for the kernel thread */
7531 init_waitqueue_head(&phba->work_waitq);
7533 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7534 "1403 Protocols supported %s %s %s\n",
7535 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
7537 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
7539 (phba->nvmet_support ? "NVMET" : " "));
7541 /* Initialize the IO buffer list used by driver for SLI3 SCSI */
7542 spin_lock_init(&phba->scsi_buf_list_get_lock);
7543 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
7544 spin_lock_init(&phba->scsi_buf_list_put_lock);
7545 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
7547 /* Initialize the fabric iocb list */
7548 INIT_LIST_HEAD(&phba->fabric_iocb_list);
7550 /* Initialize list to save ELS buffers */
7551 INIT_LIST_HEAD(&phba->elsbuf);
7553 /* Initialize FCF connection rec list */
7554 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
7556 /* Initialize OAS configuration list */
7557 spin_lock_init(&phba->devicelock);
7558 INIT_LIST_HEAD(&phba->luns);
7560 /* MBOX heartbeat timer */
7561 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
7562 /* Fabric block timer */
7563 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
7564 /* EA polling mode timer */
7565 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
7566 /* Heartbeat timer */
7567 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
7569 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
7571 INIT_DELAYED_WORK(&phba->idle_stat_delay_work,
7572 lpfc_idle_stat_delay_work);
7573 INIT_WORK(&phba->unblock_request_work, lpfc_unblock_requests_work);
7578 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
7579 * @phba: pointer to lpfc hba data structure.
7581 * This routine is invoked to set up the driver internal resources specific to
7582 * support the SLI-3 HBA device it attached to.
7586 * other values - error
7589 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
7594 * Initialize timers used by driver
7597 /* FCP polling mode timer */
7598 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
7600 /* Host attention work mask setup */
7601 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
7602 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
7604 /* Get all the module params for configuring this host */
7605 lpfc_get_cfgparam(phba);
7606 /* Set up phase-1 common device driver resources */
7608 rc = lpfc_setup_driver_resource_phase1(phba);
7612 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
7613 phba->menlo_flag |= HBA_MENLO_SUPPORT;
7614 /* check for menlo minimum sg count */
7615 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
7616 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
7619 if (!phba->sli.sli3_ring)
7620 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
7621 sizeof(struct lpfc_sli_ring),
7623 if (!phba->sli.sli3_ring)
7627 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
7628 * used to create the sg_dma_buf_pool must be dynamically calculated.
7631 if (phba->sli_rev == LPFC_SLI_REV4)
7632 entry_sz = sizeof(struct sli4_sge);
7634 entry_sz = sizeof(struct ulp_bde64);
7636 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
7637 if (phba->cfg_enable_bg) {
7639 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
7640 * the FCP rsp, and a BDE for each. Sice we have no control
7641 * over how many protection data segments the SCSI Layer
7642 * will hand us (ie: there could be one for every block
7643 * in the IO), we just allocate enough BDEs to accomidate
7644 * our max amount and we need to limit lpfc_sg_seg_cnt to
7645 * minimize the risk of running out.
7647 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7648 sizeof(struct fcp_rsp) +
7649 (LPFC_MAX_SG_SEG_CNT * entry_sz);
7651 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
7652 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
7654 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
7655 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
7658 * The scsi_buf for a regular I/O will hold the FCP cmnd,
7659 * the FCP rsp, a BDE for each, and a BDE for up to
7660 * cfg_sg_seg_cnt data segments.
7662 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7663 sizeof(struct fcp_rsp) +
7664 ((phba->cfg_sg_seg_cnt + 2) * entry_sz);
7666 /* Total BDEs in BPL for scsi_sg_list */
7667 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
7670 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
7671 "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
7672 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
7673 phba->cfg_total_seg_cnt);
7675 phba->max_vpi = LPFC_MAX_VPI;
7676 /* This will be set to correct value after config_port mbox */
7677 phba->max_vports = 0;
7680 * Initialize the SLI Layer to run with lpfc HBAs.
7682 lpfc_sli_setup(phba);
7683 lpfc_sli_queue_init(phba);
7685 /* Allocate device driver memory */
7686 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
7689 phba->lpfc_sg_dma_buf_pool =
7690 dma_pool_create("lpfc_sg_dma_buf_pool",
7691 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
7694 if (!phba->lpfc_sg_dma_buf_pool)
7697 phba->lpfc_cmd_rsp_buf_pool =
7698 dma_pool_create("lpfc_cmd_rsp_buf_pool",
7700 sizeof(struct fcp_cmnd) +
7701 sizeof(struct fcp_rsp),
7704 if (!phba->lpfc_cmd_rsp_buf_pool)
7705 goto fail_free_dma_buf_pool;
7708 * Enable sr-iov virtual functions if supported and configured
7709 * through the module parameter.
7711 if (phba->cfg_sriov_nr_virtfn > 0) {
7712 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
7713 phba->cfg_sriov_nr_virtfn);
7715 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7716 "2808 Requested number of SR-IOV "
7717 "virtual functions (%d) is not "
7719 phba->cfg_sriov_nr_virtfn);
7720 phba->cfg_sriov_nr_virtfn = 0;
7726 fail_free_dma_buf_pool:
7727 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
7728 phba->lpfc_sg_dma_buf_pool = NULL;
7730 lpfc_mem_free(phba);
7735 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
7736 * @phba: pointer to lpfc hba data structure.
7738 * This routine is invoked to unset the driver internal resources set up
7739 * specific for supporting the SLI-3 HBA device it attached to.
7742 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
7744 /* Free device driver memory allocated */
7745 lpfc_mem_free_all(phba);
7751 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
7752 * @phba: pointer to lpfc hba data structure.
7754 * This routine is invoked to set up the driver internal resources specific to
7755 * support the SLI-4 HBA device it attached to.
7759 * other values - error
7762 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
7764 LPFC_MBOXQ_t *mboxq;
7766 int rc, i, max_buf_size;
7773 phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
7774 phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
7775 phba->sli4_hba.curr_disp_cpu = 0;
7777 /* Get all the module params for configuring this host */
7778 lpfc_get_cfgparam(phba);
7780 /* Set up phase-1 common device driver resources */
7781 rc = lpfc_setup_driver_resource_phase1(phba);
7785 /* Before proceed, wait for POST done and device ready */
7786 rc = lpfc_sli4_post_status_check(phba);
7790 /* Allocate all driver workqueues here */
7792 /* The lpfc_wq workqueue for deferred irq use */
7793 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
7796 * Initialize timers used by driver
7799 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
7801 /* FCF rediscover timer */
7802 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
7804 /* CMF congestion timer */
7805 hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7806 phba->cmf_timer.function = lpfc_cmf_timer;
7809 * Control structure for handling external multi-buffer mailbox
7810 * command pass-through.
7812 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
7813 sizeof(struct lpfc_mbox_ext_buf_ctx));
7814 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
7816 phba->max_vpi = LPFC_MAX_VPI;
7818 /* This will be set to correct value after the read_config mbox */
7819 phba->max_vports = 0;
7821 /* Program the default value of vlan_id and fc_map */
7822 phba->valid_vlan = 0;
7823 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
7824 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
7825 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
7828 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
7829 * we will associate a new ring, for each EQ/CQ/WQ tuple.
7830 * The WQ create will allocate the ring.
7833 /* Initialize buffer queue management fields */
7834 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
7835 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
7836 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
7838 /* for VMID idle timeout if VMID is enabled */
7839 if (lpfc_is_vmid_enabled(phba))
7840 timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0);
7843 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
7845 /* Initialize the Abort buffer list used by driver */
7846 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
7847 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
7849 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
7850 /* Initialize the Abort nvme buffer list used by driver */
7851 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
7852 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
7853 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
7854 spin_lock_init(&phba->sli4_hba.t_active_list_lock);
7855 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
7858 /* This abort list used by worker thread */
7859 spin_lock_init(&phba->sli4_hba.sgl_list_lock);
7860 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
7861 spin_lock_init(&phba->sli4_hba.asynce_list_lock);
7862 spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock);
7865 * Initialize driver internal slow-path work queues
7868 /* Driver internel slow-path CQ Event pool */
7869 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
7870 /* Response IOCB work queue list */
7871 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
7872 /* Asynchronous event CQ Event work queue list */
7873 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
7874 /* Slow-path XRI aborted CQ Event work queue list */
7875 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
7876 /* Receive queue CQ Event work queue list */
7877 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
7879 /* Initialize extent block lists. */
7880 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
7881 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
7882 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
7883 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
7885 /* Initialize mboxq lists. If the early init routines fail
7886 * these lists need to be correctly initialized.
7888 INIT_LIST_HEAD(&phba->sli.mboxq);
7889 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
7891 /* initialize optic_state to 0xFF */
7892 phba->sli4_hba.lnk_info.optic_state = 0xff;
7894 /* Allocate device driver memory */
7895 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
7897 goto out_destroy_workqueue;
7899 /* IF Type 2 ports get initialized now. */
7900 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
7901 LPFC_SLI_INTF_IF_TYPE_2) {
7902 rc = lpfc_pci_function_reset(phba);
7907 phba->temp_sensor_support = 1;
7910 /* Create the bootstrap mailbox command */
7911 rc = lpfc_create_bootstrap_mbox(phba);
7915 /* Set up the host's endian order with the device. */
7916 rc = lpfc_setup_endian_order(phba);
7918 goto out_free_bsmbx;
7920 /* Set up the hba's configuration parameters. */
7921 rc = lpfc_sli4_read_config(phba);
7923 goto out_free_bsmbx;
7924 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
7926 goto out_free_bsmbx;
7928 /* IF Type 0 ports get initialized now. */
7929 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7930 LPFC_SLI_INTF_IF_TYPE_0) {
7931 rc = lpfc_pci_function_reset(phba);
7933 goto out_free_bsmbx;
7936 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
7940 goto out_free_bsmbx;
7943 /* Check for NVMET being configured */
7944 phba->nvmet_support = 0;
7945 if (lpfc_enable_nvmet_cnt) {
7947 /* First get WWN of HBA instance */
7948 lpfc_read_nv(phba, mboxq);
7949 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7950 if (rc != MBX_SUCCESS) {
7951 lpfc_printf_log(phba, KERN_ERR,
7953 "6016 Mailbox failed , mbxCmd x%x "
7954 "READ_NV, mbxStatus x%x\n",
7955 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
7956 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
7957 mempool_free(mboxq, phba->mbox_mem_pool);
7959 goto out_free_bsmbx;
7962 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
7964 wwn = cpu_to_be64(wwn);
7965 phba->sli4_hba.wwnn.u.name = wwn;
7966 memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
7968 /* wwn is WWPN of HBA instance */
7969 wwn = cpu_to_be64(wwn);
7970 phba->sli4_hba.wwpn.u.name = wwn;
7972 /* Check to see if it matches any module parameter */
7973 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
7974 if (wwn == lpfc_enable_nvmet[i]) {
7975 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
7976 if (lpfc_nvmet_mem_alloc(phba))
7979 phba->nvmet_support = 1; /* a match */
7981 lpfc_printf_log(phba, KERN_ERR,
7983 "6017 NVME Target %016llx\n",
7986 lpfc_printf_log(phba, KERN_ERR,
7988 "6021 Can't enable NVME Target."
7989 " NVME_TARGET_FC infrastructure"
7990 " is not in kernel\n");
7992 /* Not supported for NVMET */
7993 phba->cfg_xri_rebalancing = 0;
7994 if (phba->irq_chann_mode == NHT_MODE) {
7995 phba->cfg_irq_chann =
7996 phba->sli4_hba.num_present_cpu;
7997 phba->cfg_hdw_queue =
7998 phba->sli4_hba.num_present_cpu;
7999 phba->irq_chann_mode = NORMAL_MODE;
8006 lpfc_nvme_mod_param_dep(phba);
8009 * Get sli4 parameters that override parameters from Port capabilities.
8010 * If this call fails, it isn't critical unless the SLI4 parameters come
8013 rc = lpfc_get_sli4_parameters(phba, mboxq);
8015 if_type = bf_get(lpfc_sli_intf_if_type,
8016 &phba->sli4_hba.sli_intf);
8017 if_fam = bf_get(lpfc_sli_intf_sli_family,
8018 &phba->sli4_hba.sli_intf);
8019 if (phba->sli4_hba.extents_in_use &&
8020 phba->sli4_hba.rpi_hdrs_in_use) {
8021 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8022 "2999 Unsupported SLI4 Parameters "
8023 "Extents and RPI headers enabled.\n");
8024 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
8025 if_fam == LPFC_SLI_INTF_FAMILY_BE2) {
8026 mempool_free(mboxq, phba->mbox_mem_pool);
8028 goto out_free_bsmbx;
8031 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
8032 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) {
8033 mempool_free(mboxq, phba->mbox_mem_pool);
8035 goto out_free_bsmbx;
8040 * 1 for cmd, 1 for rsp, NVME adds an extra one
8041 * for boundary conditions in its max_sgl_segment template.
8044 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
8048 * It doesn't matter what family our adapter is in, we are
8049 * limited to 2 Pages, 512 SGEs, for our SGL.
8050 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
8052 max_buf_size = (2 * SLI4_PAGE_SIZE);
8055 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
8056 * used to create the sg_dma_buf_pool must be calculated.
8058 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
8059 /* Both cfg_enable_bg and cfg_external_dif code paths */
8062 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
8063 * the FCP rsp, and a SGE. Sice we have no control
8064 * over how many protection segments the SCSI Layer
8065 * will hand us (ie: there could be one for every block
8066 * in the IO), just allocate enough SGEs to accomidate
8067 * our max amount and we need to limit lpfc_sg_seg_cnt
8068 * to minimize the risk of running out.
8070 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
8071 sizeof(struct fcp_rsp) + max_buf_size;
8073 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
8074 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
8077 * If supporting DIF, reduce the seg count for scsi to
8078 * allow room for the DIF sges.
8080 if (phba->cfg_enable_bg &&
8081 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
8082 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
8084 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8088 * The scsi_buf for a regular I/O holds the FCP cmnd,
8089 * the FCP rsp, a SGE for each, and a SGE for up to
8090 * cfg_sg_seg_cnt data segments.
8092 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
8093 sizeof(struct fcp_rsp) +
8094 ((phba->cfg_sg_seg_cnt + extra) *
8095 sizeof(struct sli4_sge));
8097 /* Total SGEs for scsi_sg_list */
8098 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
8099 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8102 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
8103 * need to post 1 page for the SGL.
8107 if (phba->cfg_xpsgl && !phba->nvmet_support)
8108 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
8109 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
8110 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
8112 phba->cfg_sg_dma_buf_size =
8113 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
8115 phba->border_sge_num = phba->cfg_sg_dma_buf_size /
8116 sizeof(struct sli4_sge);
8118 /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
8119 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8120 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
8121 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
8122 "6300 Reducing NVME sg segment "
8124 LPFC_MAX_NVME_SEG_CNT);
8125 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
8127 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
8130 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
8131 "9087 sg_seg_cnt:%d dmabuf_size:%d "
8132 "total:%d scsi:%d nvme:%d\n",
8133 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
8134 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
8135 phba->cfg_nvme_seg_cnt);
8137 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
8138 i = phba->cfg_sg_dma_buf_size;
8142 phba->lpfc_sg_dma_buf_pool =
8143 dma_pool_create("lpfc_sg_dma_buf_pool",
8145 phba->cfg_sg_dma_buf_size,
8147 if (!phba->lpfc_sg_dma_buf_pool)
8148 goto out_free_bsmbx;
8150 phba->lpfc_cmd_rsp_buf_pool =
8151 dma_pool_create("lpfc_cmd_rsp_buf_pool",
8153 sizeof(struct fcp_cmnd) +
8154 sizeof(struct fcp_rsp),
8156 if (!phba->lpfc_cmd_rsp_buf_pool)
8157 goto out_free_sg_dma_buf;
8159 mempool_free(mboxq, phba->mbox_mem_pool);
8161 /* Verify OAS is supported */
8162 lpfc_sli4_oas_verify(phba);
8164 /* Verify RAS support on adapter */
8165 lpfc_sli4_ras_init(phba);
8167 /* Verify all the SLI4 queues */
8168 rc = lpfc_sli4_queue_verify(phba);
8170 goto out_free_cmd_rsp_buf;
8172 /* Create driver internal CQE event pool */
8173 rc = lpfc_sli4_cq_event_pool_create(phba);
8175 goto out_free_cmd_rsp_buf;
8177 /* Initialize sgl lists per host */
8178 lpfc_init_sgl_list(phba);
8180 /* Allocate and initialize active sgl array */
8181 rc = lpfc_init_active_sgl_array(phba);
8183 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8184 "1430 Failed to initialize sgl list.\n");
8185 goto out_destroy_cq_event_pool;
8187 rc = lpfc_sli4_init_rpi_hdrs(phba);
8189 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8190 "1432 Failed to initialize rpi headers.\n");
8191 goto out_free_active_sgl;
8194 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
8195 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
8196 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
8198 if (!phba->fcf.fcf_rr_bmask) {
8199 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8200 "2759 Failed allocate memory for FCF round "
8201 "robin failover bmask\n");
8203 goto out_remove_rpi_hdrs;
8206 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
8207 sizeof(struct lpfc_hba_eq_hdl),
8209 if (!phba->sli4_hba.hba_eq_hdl) {
8210 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8211 "2572 Failed allocate memory for "
8212 "fast-path per-EQ handle array\n");
8214 goto out_free_fcf_rr_bmask;
8217 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
8218 sizeof(struct lpfc_vector_map_info),
8220 if (!phba->sli4_hba.cpu_map) {
8221 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8222 "3327 Failed allocate memory for msi-x "
8223 "interrupt vector mapping\n");
8225 goto out_free_hba_eq_hdl;
8228 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
8229 if (!phba->sli4_hba.eq_info) {
8230 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8231 "3321 Failed allocation for per_cpu stats\n");
8233 goto out_free_hba_cpu_map;
8236 phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu,
8237 sizeof(*phba->sli4_hba.idle_stat),
8239 if (!phba->sli4_hba.idle_stat) {
8240 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8241 "3390 Failed allocation for idle_stat\n");
8243 goto out_free_hba_eq_info;
8246 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8247 phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
8248 if (!phba->sli4_hba.c_stat) {
8249 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8250 "3332 Failed allocating per cpu hdwq stats\n");
8252 goto out_free_hba_idle_stat;
8256 phba->cmf_stat = alloc_percpu(struct lpfc_cgn_stat);
8257 if (!phba->cmf_stat) {
8258 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8259 "3331 Failed allocating per cpu cgn stats\n");
8261 goto out_free_hba_hdwq_info;
8265 * Enable sr-iov virtual functions if supported and configured
8266 * through the module parameter.
8268 if (phba->cfg_sriov_nr_virtfn > 0) {
8269 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
8270 phba->cfg_sriov_nr_virtfn);
8272 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8273 "3020 Requested number of SR-IOV "
8274 "virtual functions (%d) is not "
8276 phba->cfg_sriov_nr_virtfn);
8277 phba->cfg_sriov_nr_virtfn = 0;
8283 out_free_hba_hdwq_info:
8284 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8285 free_percpu(phba->sli4_hba.c_stat);
8286 out_free_hba_idle_stat:
8288 kfree(phba->sli4_hba.idle_stat);
8289 out_free_hba_eq_info:
8290 free_percpu(phba->sli4_hba.eq_info);
8291 out_free_hba_cpu_map:
8292 kfree(phba->sli4_hba.cpu_map);
8293 out_free_hba_eq_hdl:
8294 kfree(phba->sli4_hba.hba_eq_hdl);
8295 out_free_fcf_rr_bmask:
8296 kfree(phba->fcf.fcf_rr_bmask);
8297 out_remove_rpi_hdrs:
8298 lpfc_sli4_remove_rpi_hdrs(phba);
8299 out_free_active_sgl:
8300 lpfc_free_active_sgl(phba);
8301 out_destroy_cq_event_pool:
8302 lpfc_sli4_cq_event_pool_destroy(phba);
8303 out_free_cmd_rsp_buf:
8304 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
8305 phba->lpfc_cmd_rsp_buf_pool = NULL;
8306 out_free_sg_dma_buf:
8307 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
8308 phba->lpfc_sg_dma_buf_pool = NULL;
8310 lpfc_destroy_bootstrap_mbox(phba);
8312 lpfc_mem_free(phba);
8313 out_destroy_workqueue:
8314 destroy_workqueue(phba->wq);
8320 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
8321 * @phba: pointer to lpfc hba data structure.
8323 * This routine is invoked to unset the driver internal resources set up
8324 * specific for supporting the SLI-4 HBA device it attached to.
8327 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
8329 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
8331 free_percpu(phba->sli4_hba.eq_info);
8332 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8333 free_percpu(phba->sli4_hba.c_stat);
8335 free_percpu(phba->cmf_stat);
8336 kfree(phba->sli4_hba.idle_stat);
8338 /* Free memory allocated for msi-x interrupt vector to CPU mapping */
8339 kfree(phba->sli4_hba.cpu_map);
8340 phba->sli4_hba.num_possible_cpu = 0;
8341 phba->sli4_hba.num_present_cpu = 0;
8342 phba->sli4_hba.curr_disp_cpu = 0;
8343 cpumask_clear(&phba->sli4_hba.irq_aff_mask);
8345 /* Free memory allocated for fast-path work queue handles */
8346 kfree(phba->sli4_hba.hba_eq_hdl);
8348 /* Free the allocated rpi headers. */
8349 lpfc_sli4_remove_rpi_hdrs(phba);
8350 lpfc_sli4_remove_rpis(phba);
8352 /* Free eligible FCF index bmask */
8353 kfree(phba->fcf.fcf_rr_bmask);
8355 /* Free the ELS sgl list */
8356 lpfc_free_active_sgl(phba);
8357 lpfc_free_els_sgl_list(phba);
8358 lpfc_free_nvmet_sgl_list(phba);
8360 /* Free the completion queue EQ event pool */
8361 lpfc_sli4_cq_event_release_all(phba);
8362 lpfc_sli4_cq_event_pool_destroy(phba);
8364 /* Release resource identifiers. */
8365 lpfc_sli4_dealloc_resource_identifiers(phba);
8367 /* Free the bsmbx region. */
8368 lpfc_destroy_bootstrap_mbox(phba);
8370 /* Free the SLI Layer memory with SLI4 HBAs */
8371 lpfc_mem_free_all(phba);
8373 /* Free the current connect table */
8374 list_for_each_entry_safe(conn_entry, next_conn_entry,
8375 &phba->fcf_conn_rec_list, list) {
8376 list_del_init(&conn_entry->list);
8384 * lpfc_init_api_table_setup - Set up init api function jump table
8385 * @phba: The hba struct for which this call is being executed.
8386 * @dev_grp: The HBA PCI-Device group number.
8388 * This routine sets up the device INIT interface API function jump table
8391 * Returns: 0 - success, -ENODEV - failure.
8394 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8396 phba->lpfc_hba_init_link = lpfc_hba_init_link;
8397 phba->lpfc_hba_down_link = lpfc_hba_down_link;
8398 phba->lpfc_selective_reset = lpfc_selective_reset;
8400 case LPFC_PCI_DEV_LP:
8401 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
8402 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
8403 phba->lpfc_stop_port = lpfc_stop_port_s3;
8405 case LPFC_PCI_DEV_OC:
8406 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
8407 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
8408 phba->lpfc_stop_port = lpfc_stop_port_s4;
8411 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8412 "1431 Invalid HBA PCI-device group: 0x%x\n",
8420 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
8421 * @phba: pointer to lpfc hba data structure.
8423 * This routine is invoked to set up the driver internal resources after the
8424 * device specific resource setup to support the HBA device it attached to.
8428 * other values - error
8431 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
8435 /* Startup the kernel thread for this host adapter. */
8436 phba->worker_thread = kthread_run(lpfc_do_work, phba,
8437 "lpfc_worker_%d", phba->brd_no);
8438 if (IS_ERR(phba->worker_thread)) {
8439 error = PTR_ERR(phba->worker_thread);
8447 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
8448 * @phba: pointer to lpfc hba data structure.
8450 * This routine is invoked to unset the driver internal resources set up after
8451 * the device specific resource setup for supporting the HBA device it
8455 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
8458 flush_workqueue(phba->wq);
8459 destroy_workqueue(phba->wq);
8463 /* Stop kernel worker thread */
8464 if (phba->worker_thread)
8465 kthread_stop(phba->worker_thread);
8469 * lpfc_free_iocb_list - Free iocb list.
8470 * @phba: pointer to lpfc hba data structure.
8472 * This routine is invoked to free the driver's IOCB list and memory.
8475 lpfc_free_iocb_list(struct lpfc_hba *phba)
8477 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
8479 spin_lock_irq(&phba->hbalock);
8480 list_for_each_entry_safe(iocbq_entry, iocbq_next,
8481 &phba->lpfc_iocb_list, list) {
8482 list_del(&iocbq_entry->list);
8484 phba->total_iocbq_bufs--;
8486 spin_unlock_irq(&phba->hbalock);
8492 * lpfc_init_iocb_list - Allocate and initialize iocb list.
8493 * @phba: pointer to lpfc hba data structure.
8494 * @iocb_count: number of requested iocbs
8496 * This routine is invoked to allocate and initizlize the driver's IOCB
8497 * list and set up the IOCB tag array accordingly.
8501 * other values - error
8504 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
8506 struct lpfc_iocbq *iocbq_entry = NULL;
8510 /* Initialize and populate the iocb list per host. */
8511 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
8512 for (i = 0; i < iocb_count; i++) {
8513 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
8514 if (iocbq_entry == NULL) {
8515 printk(KERN_ERR "%s: only allocated %d iocbs of "
8516 "expected %d count. Unloading driver.\n",
8517 __func__, i, iocb_count);
8518 goto out_free_iocbq;
8521 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
8524 printk(KERN_ERR "%s: failed to allocate IOTAG. "
8525 "Unloading driver.\n", __func__);
8526 goto out_free_iocbq;
8528 iocbq_entry->sli4_lxritag = NO_XRI;
8529 iocbq_entry->sli4_xritag = NO_XRI;
8531 spin_lock_irq(&phba->hbalock);
8532 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
8533 phba->total_iocbq_bufs++;
8534 spin_unlock_irq(&phba->hbalock);
8540 lpfc_free_iocb_list(phba);
8546 * lpfc_free_sgl_list - Free a given sgl list.
8547 * @phba: pointer to lpfc hba data structure.
8548 * @sglq_list: pointer to the head of sgl list.
8550 * This routine is invoked to free a give sgl list and memory.
8553 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
8555 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8557 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
8558 list_del(&sglq_entry->list);
8559 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
8565 * lpfc_free_els_sgl_list - Free els sgl list.
8566 * @phba: pointer to lpfc hba data structure.
8568 * This routine is invoked to free the driver's els sgl list and memory.
8571 lpfc_free_els_sgl_list(struct lpfc_hba *phba)
8573 LIST_HEAD(sglq_list);
8575 /* Retrieve all els sgls from driver list */
8576 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
8577 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
8578 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
8580 /* Now free the sgl list */
8581 lpfc_free_sgl_list(phba, &sglq_list);
8585 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
8586 * @phba: pointer to lpfc hba data structure.
8588 * This routine is invoked to free the driver's nvmet sgl list and memory.
8591 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
8593 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8594 LIST_HEAD(sglq_list);
8596 /* Retrieve all nvmet sgls from driver list */
8597 spin_lock_irq(&phba->hbalock);
8598 spin_lock(&phba->sli4_hba.sgl_list_lock);
8599 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
8600 spin_unlock(&phba->sli4_hba.sgl_list_lock);
8601 spin_unlock_irq(&phba->hbalock);
8603 /* Now free the sgl list */
8604 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
8605 list_del(&sglq_entry->list);
8606 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
8610 /* Update the nvmet_xri_cnt to reflect no current sgls.
8611 * The next initialization cycle sets the count and allocates
8612 * the sgls over again.
8614 phba->sli4_hba.nvmet_xri_cnt = 0;
8618 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
8619 * @phba: pointer to lpfc hba data structure.
8621 * This routine is invoked to allocate the driver's active sgl memory.
8622 * This array will hold the sglq_entry's for active IOs.
8625 lpfc_init_active_sgl_array(struct lpfc_hba *phba)
8628 size = sizeof(struct lpfc_sglq *);
8629 size *= phba->sli4_hba.max_cfg_param.max_xri;
8631 phba->sli4_hba.lpfc_sglq_active_list =
8632 kzalloc(size, GFP_KERNEL);
8633 if (!phba->sli4_hba.lpfc_sglq_active_list)
8639 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
8640 * @phba: pointer to lpfc hba data structure.
8642 * This routine is invoked to walk through the array of active sglq entries
8643 * and free all of the resources.
8644 * This is just a place holder for now.
8647 lpfc_free_active_sgl(struct lpfc_hba *phba)
8649 kfree(phba->sli4_hba.lpfc_sglq_active_list);
8653 * lpfc_init_sgl_list - Allocate and initialize sgl list.
8654 * @phba: pointer to lpfc hba data structure.
8656 * This routine is invoked to allocate and initizlize the driver's sgl
8657 * list and set up the sgl xritag tag array accordingly.
8661 lpfc_init_sgl_list(struct lpfc_hba *phba)
8663 /* Initialize and populate the sglq list per host/VF. */
8664 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
8665 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8666 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
8667 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
8669 /* els xri-sgl book keeping */
8670 phba->sli4_hba.els_xri_cnt = 0;
8672 /* nvme xri-buffer book keeping */
8673 phba->sli4_hba.io_xri_cnt = 0;
8677 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
8678 * @phba: pointer to lpfc hba data structure.
8680 * This routine is invoked to post rpi header templates to the
8681 * port for those SLI4 ports that do not support extents. This routine
8682 * posts a PAGE_SIZE memory region to the port to hold up to
8683 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
8684 * and should be called only when interrupts are disabled.
8688 * -ERROR - otherwise.
8691 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
8694 struct lpfc_rpi_hdr *rpi_hdr;
8696 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
8697 if (!phba->sli4_hba.rpi_hdrs_in_use)
8699 if (phba->sli4_hba.extents_in_use)
8702 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
8704 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8705 "0391 Error during rpi post operation\n");
8706 lpfc_sli4_remove_rpis(phba);
8714 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
8715 * @phba: pointer to lpfc hba data structure.
8717 * This routine is invoked to allocate a single 4KB memory region to
8718 * support rpis and stores them in the phba. This single region
8719 * provides support for up to 64 rpis. The region is used globally
8723 * A valid rpi hdr on success.
8724 * A NULL pointer on any failure.
8726 struct lpfc_rpi_hdr *
8727 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
8729 uint16_t rpi_limit, curr_rpi_range;
8730 struct lpfc_dmabuf *dmabuf;
8731 struct lpfc_rpi_hdr *rpi_hdr;
8734 * If the SLI4 port supports extents, posting the rpi header isn't
8735 * required. Set the expected maximum count and let the actual value
8736 * get set when extents are fully allocated.
8738 if (!phba->sli4_hba.rpi_hdrs_in_use)
8740 if (phba->sli4_hba.extents_in_use)
8743 /* The limit on the logical index is just the max_rpi count. */
8744 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
8746 spin_lock_irq(&phba->hbalock);
8748 * Establish the starting RPI in this header block. The starting
8749 * rpi is normalized to a zero base because the physical rpi is
8752 curr_rpi_range = phba->sli4_hba.next_rpi;
8753 spin_unlock_irq(&phba->hbalock);
8755 /* Reached full RPI range */
8756 if (curr_rpi_range == rpi_limit)
8760 * First allocate the protocol header region for the port. The
8761 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
8763 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
8767 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
8768 LPFC_HDR_TEMPLATE_SIZE,
8769 &dmabuf->phys, GFP_KERNEL);
8770 if (!dmabuf->virt) {
8772 goto err_free_dmabuf;
8775 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
8777 goto err_free_coherent;
8780 /* Save the rpi header data for cleanup later. */
8781 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
8783 goto err_free_coherent;
8785 rpi_hdr->dmabuf = dmabuf;
8786 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
8787 rpi_hdr->page_count = 1;
8788 spin_lock_irq(&phba->hbalock);
8790 /* The rpi_hdr stores the logical index only. */
8791 rpi_hdr->start_rpi = curr_rpi_range;
8792 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
8793 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
8795 spin_unlock_irq(&phba->hbalock);
8799 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
8800 dmabuf->virt, dmabuf->phys);
8807 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
8808 * @phba: pointer to lpfc hba data structure.
8810 * This routine is invoked to remove all memory resources allocated
8811 * to support rpis for SLI4 ports not supporting extents. This routine
8812 * presumes the caller has released all rpis consumed by fabric or port
8813 * logins and is prepared to have the header pages removed.
8816 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
8818 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
8820 if (!phba->sli4_hba.rpi_hdrs_in_use)
8823 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
8824 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
8825 list_del(&rpi_hdr->list);
8826 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
8827 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
8828 kfree(rpi_hdr->dmabuf);
8832 /* There are no rpis available to the port now. */
8833 phba->sli4_hba.next_rpi = 0;
8837 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
8838 * @pdev: pointer to pci device data structure.
8840 * This routine is invoked to allocate the driver hba data structure for an
8841 * HBA device. If the allocation is successful, the phba reference to the
8842 * PCI device data structure is set.
8845 * pointer to @phba - successful
8848 static struct lpfc_hba *
8849 lpfc_hba_alloc(struct pci_dev *pdev)
8851 struct lpfc_hba *phba;
8853 /* Allocate memory for HBA structure */
8854 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
8856 dev_err(&pdev->dev, "failed to allocate hba struct\n");
8860 /* Set reference to PCI device in HBA structure */
8861 phba->pcidev = pdev;
8863 /* Assign an unused board number */
8864 phba->brd_no = lpfc_get_instance();
8865 if (phba->brd_no < 0) {
8869 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
8871 spin_lock_init(&phba->ct_ev_lock);
8872 INIT_LIST_HEAD(&phba->ct_ev_waiters);
8878 * lpfc_hba_free - Free driver hba data structure with a device.
8879 * @phba: pointer to lpfc hba data structure.
8881 * This routine is invoked to free the driver hba data structure with an
8885 lpfc_hba_free(struct lpfc_hba *phba)
8887 if (phba->sli_rev == LPFC_SLI_REV4)
8888 kfree(phba->sli4_hba.hdwq);
8890 /* Release the driver assigned board number */
8891 idr_remove(&lpfc_hba_index, phba->brd_no);
8893 /* Free memory allocated with sli3 rings */
8894 kfree(phba->sli.sli3_ring);
8895 phba->sli.sli3_ring = NULL;
8902 * lpfc_create_shost - Create hba physical port with associated scsi host.
8903 * @phba: pointer to lpfc hba data structure.
8905 * This routine is invoked to create HBA physical port and associate a SCSI
8910 * other values - error
8913 lpfc_create_shost(struct lpfc_hba *phba)
8915 struct lpfc_vport *vport;
8916 struct Scsi_Host *shost;
8918 /* Initialize HBA FC structure */
8919 phba->fc_edtov = FF_DEF_EDTOV;
8920 phba->fc_ratov = FF_DEF_RATOV;
8921 phba->fc_altov = FF_DEF_ALTOV;
8922 phba->fc_arbtov = FF_DEF_ARBTOV;
8924 atomic_set(&phba->sdev_cnt, 0);
8925 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
8929 shost = lpfc_shost_from_vport(vport);
8930 phba->pport = vport;
8932 if (phba->nvmet_support) {
8933 /* Only 1 vport (pport) will support NVME target */
8934 phba->targetport = NULL;
8935 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
8936 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC,
8937 "6076 NVME Target Found\n");
8940 lpfc_debugfs_initialize(vport);
8941 /* Put reference to SCSI host to driver's device private data */
8942 pci_set_drvdata(phba->pcidev, shost);
8945 * At this point we are fully registered with PSA. In addition,
8946 * any initial discovery should be completed.
8948 vport->load_flag |= FC_ALLOW_FDMI;
8949 if (phba->cfg_enable_SmartSAN ||
8950 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
8952 /* Setup appropriate attribute masks */
8953 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
8954 if (phba->cfg_enable_SmartSAN)
8955 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
8957 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
8963 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
8964 * @phba: pointer to lpfc hba data structure.
8966 * This routine is invoked to destroy HBA physical port and the associated
8970 lpfc_destroy_shost(struct lpfc_hba *phba)
8972 struct lpfc_vport *vport = phba->pport;
8974 /* Destroy physical port that associated with the SCSI host */
8975 destroy_port(vport);
8981 * lpfc_setup_bg - Setup Block guard structures and debug areas.
8982 * @phba: pointer to lpfc hba data structure.
8983 * @shost: the shost to be used to detect Block guard settings.
8985 * This routine sets up the local Block guard protocol settings for @shost.
8986 * This routine also allocates memory for debugging bg buffers.
8989 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
8994 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
8995 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8996 "1478 Registering BlockGuard with the "
8999 old_mask = phba->cfg_prot_mask;
9000 old_guard = phba->cfg_prot_guard;
9002 /* Only allow supported values */
9003 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
9004 SHOST_DIX_TYPE0_PROTECTION |
9005 SHOST_DIX_TYPE1_PROTECTION);
9006 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
9007 SHOST_DIX_GUARD_CRC);
9009 /* DIF Type 1 protection for profiles AST1/C1 is end to end */
9010 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
9011 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
9013 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
9014 if ((old_mask != phba->cfg_prot_mask) ||
9015 (old_guard != phba->cfg_prot_guard))
9016 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9017 "1475 Registering BlockGuard with the "
9018 "SCSI layer: mask %d guard %d\n",
9019 phba->cfg_prot_mask,
9020 phba->cfg_prot_guard);
9022 scsi_host_set_prot(shost, phba->cfg_prot_mask);
9023 scsi_host_set_guard(shost, phba->cfg_prot_guard);
9025 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9026 "1479 Not Registering BlockGuard with the SCSI "
9027 "layer, Bad protection parameters: %d %d\n",
9028 old_mask, old_guard);
9033 * lpfc_post_init_setup - Perform necessary device post initialization setup.
9034 * @phba: pointer to lpfc hba data structure.
9036 * This routine is invoked to perform all the necessary post initialization
9037 * setup for the device.
9040 lpfc_post_init_setup(struct lpfc_hba *phba)
9042 struct Scsi_Host *shost;
9043 struct lpfc_adapter_event_header adapter_event;
9045 /* Get the default values for Model Name and Description */
9046 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
9049 * hba setup may have changed the hba_queue_depth so we need to
9050 * adjust the value of can_queue.
9052 shost = pci_get_drvdata(phba->pcidev);
9053 shost->can_queue = phba->cfg_hba_queue_depth - 10;
9055 lpfc_host_attrib_init(shost);
9057 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9058 spin_lock_irq(shost->host_lock);
9059 lpfc_poll_start_timer(phba);
9060 spin_unlock_irq(shost->host_lock);
9063 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9064 "0428 Perform SCSI scan\n");
9065 /* Send board arrival event to upper layer */
9066 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
9067 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
9068 fc_host_post_vendor_event(shost, fc_get_event_number(),
9069 sizeof(adapter_event),
9070 (char *) &adapter_event,
9076 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
9077 * @phba: pointer to lpfc hba data structure.
9079 * This routine is invoked to set up the PCI device memory space for device
9080 * with SLI-3 interface spec.
9084 * other values - error
9087 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
9089 struct pci_dev *pdev = phba->pcidev;
9090 unsigned long bar0map_len, bar2map_len;
9098 /* Set the device DMA mask size */
9099 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9101 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9106 /* Get the bus address of Bar0 and Bar2 and the number of bytes
9107 * required by each mapping.
9109 phba->pci_bar0_map = pci_resource_start(pdev, 0);
9110 bar0map_len = pci_resource_len(pdev, 0);
9112 phba->pci_bar2_map = pci_resource_start(pdev, 2);
9113 bar2map_len = pci_resource_len(pdev, 2);
9115 /* Map HBA SLIM to a kernel virtual address. */
9116 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
9117 if (!phba->slim_memmap_p) {
9118 dev_printk(KERN_ERR, &pdev->dev,
9119 "ioremap failed for SLIM memory.\n");
9123 /* Map HBA Control Registers to a kernel virtual address. */
9124 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
9125 if (!phba->ctrl_regs_memmap_p) {
9126 dev_printk(KERN_ERR, &pdev->dev,
9127 "ioremap failed for HBA control registers.\n");
9128 goto out_iounmap_slim;
9131 /* Allocate memory for SLI-2 structures */
9132 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9133 &phba->slim2p.phys, GFP_KERNEL);
9134 if (!phba->slim2p.virt)
9137 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
9138 phba->mbox_ext = (phba->slim2p.virt +
9139 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
9140 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
9141 phba->IOCBs = (phba->slim2p.virt +
9142 offsetof(struct lpfc_sli2_slim, IOCBs));
9144 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
9145 lpfc_sli_hbq_size(),
9146 &phba->hbqslimp.phys,
9148 if (!phba->hbqslimp.virt)
9151 hbq_count = lpfc_sli_hbq_count();
9152 ptr = phba->hbqslimp.virt;
9153 for (i = 0; i < hbq_count; ++i) {
9154 phba->hbqs[i].hbq_virt = ptr;
9155 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
9156 ptr += (lpfc_hbq_defs[i]->entry_count *
9157 sizeof(struct lpfc_hbq_entry));
9159 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
9160 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
9162 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
9164 phba->MBslimaddr = phba->slim_memmap_p;
9165 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
9166 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
9167 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
9168 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
9173 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9174 phba->slim2p.virt, phba->slim2p.phys);
9176 iounmap(phba->ctrl_regs_memmap_p);
9178 iounmap(phba->slim_memmap_p);
9184 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
9185 * @phba: pointer to lpfc hba data structure.
9187 * This routine is invoked to unset the PCI device memory space for device
9188 * with SLI-3 interface spec.
9191 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
9193 struct pci_dev *pdev;
9195 /* Obtain PCI device reference */
9199 pdev = phba->pcidev;
9201 /* Free coherent DMA memory allocated */
9202 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
9203 phba->hbqslimp.virt, phba->hbqslimp.phys);
9204 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9205 phba->slim2p.virt, phba->slim2p.phys);
9207 /* I/O memory unmap */
9208 iounmap(phba->ctrl_regs_memmap_p);
9209 iounmap(phba->slim_memmap_p);
9215 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
9216 * @phba: pointer to lpfc hba data structure.
9218 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
9219 * done and check status.
9221 * Return 0 if successful, otherwise -ENODEV.
9224 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
9226 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
9227 struct lpfc_register reg_data;
9228 int i, port_error = 0;
9231 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
9232 memset(®_data, 0, sizeof(reg_data));
9233 if (!phba->sli4_hba.PSMPHRregaddr)
9236 /* Wait up to 30 seconds for the SLI Port POST done and ready */
9237 for (i = 0; i < 3000; i++) {
9238 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
9239 &portsmphr_reg.word0) ||
9240 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
9241 /* Port has a fatal POST error, break out */
9242 port_error = -ENODEV;
9245 if (LPFC_POST_STAGE_PORT_READY ==
9246 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
9252 * If there was a port error during POST, then don't proceed with
9253 * other register reads as the data may not be valid. Just exit.
9256 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9257 "1408 Port Failed POST - portsmphr=0x%x, "
9258 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
9259 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
9260 portsmphr_reg.word0,
9261 bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
9262 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
9263 bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
9264 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
9265 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
9266 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
9267 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
9268 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
9270 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9271 "2534 Device Info: SLIFamily=0x%x, "
9272 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
9273 "SLIHint_2=0x%x, FT=0x%x\n",
9274 bf_get(lpfc_sli_intf_sli_family,
9275 &phba->sli4_hba.sli_intf),
9276 bf_get(lpfc_sli_intf_slirev,
9277 &phba->sli4_hba.sli_intf),
9278 bf_get(lpfc_sli_intf_if_type,
9279 &phba->sli4_hba.sli_intf),
9280 bf_get(lpfc_sli_intf_sli_hint1,
9281 &phba->sli4_hba.sli_intf),
9282 bf_get(lpfc_sli_intf_sli_hint2,
9283 &phba->sli4_hba.sli_intf),
9284 bf_get(lpfc_sli_intf_func_type,
9285 &phba->sli4_hba.sli_intf));
9287 * Check for other Port errors during the initialization
9288 * process. Fail the load if the port did not come up
9291 if_type = bf_get(lpfc_sli_intf_if_type,
9292 &phba->sli4_hba.sli_intf);
9294 case LPFC_SLI_INTF_IF_TYPE_0:
9295 phba->sli4_hba.ue_mask_lo =
9296 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
9297 phba->sli4_hba.ue_mask_hi =
9298 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
9300 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
9302 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
9303 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
9304 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
9305 lpfc_printf_log(phba, KERN_ERR,
9307 "1422 Unrecoverable Error "
9308 "Detected during POST "
9309 "uerr_lo_reg=0x%x, "
9310 "uerr_hi_reg=0x%x, "
9311 "ue_mask_lo_reg=0x%x, "
9312 "ue_mask_hi_reg=0x%x\n",
9315 phba->sli4_hba.ue_mask_lo,
9316 phba->sli4_hba.ue_mask_hi);
9317 port_error = -ENODEV;
9320 case LPFC_SLI_INTF_IF_TYPE_2:
9321 case LPFC_SLI_INTF_IF_TYPE_6:
9322 /* Final checks. The port status should be clean. */
9323 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
9325 (bf_get(lpfc_sliport_status_err, ®_data) &&
9326 !bf_get(lpfc_sliport_status_rn, ®_data))) {
9327 phba->work_status[0] =
9328 readl(phba->sli4_hba.u.if_type2.
9330 phba->work_status[1] =
9331 readl(phba->sli4_hba.u.if_type2.
9333 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9334 "2888 Unrecoverable port error "
9335 "following POST: port status reg "
9336 "0x%x, port_smphr reg 0x%x, "
9337 "error 1=0x%x, error 2=0x%x\n",
9339 portsmphr_reg.word0,
9340 phba->work_status[0],
9341 phba->work_status[1]);
9342 port_error = -ENODEV;
9345 case LPFC_SLI_INTF_IF_TYPE_1:
9354 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
9355 * @phba: pointer to lpfc hba data structure.
9356 * @if_type: The SLI4 interface type getting configured.
9358 * This routine is invoked to set up SLI4 BAR0 PCI config space register
9362 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
9365 case LPFC_SLI_INTF_IF_TYPE_0:
9366 phba->sli4_hba.u.if_type0.UERRLOregaddr =
9367 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
9368 phba->sli4_hba.u.if_type0.UERRHIregaddr =
9369 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
9370 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
9371 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
9372 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
9373 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
9374 phba->sli4_hba.SLIINTFregaddr =
9375 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9377 case LPFC_SLI_INTF_IF_TYPE_2:
9378 phba->sli4_hba.u.if_type2.EQDregaddr =
9379 phba->sli4_hba.conf_regs_memmap_p +
9380 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
9381 phba->sli4_hba.u.if_type2.ERR1regaddr =
9382 phba->sli4_hba.conf_regs_memmap_p +
9383 LPFC_CTL_PORT_ER1_OFFSET;
9384 phba->sli4_hba.u.if_type2.ERR2regaddr =
9385 phba->sli4_hba.conf_regs_memmap_p +
9386 LPFC_CTL_PORT_ER2_OFFSET;
9387 phba->sli4_hba.u.if_type2.CTRLregaddr =
9388 phba->sli4_hba.conf_regs_memmap_p +
9389 LPFC_CTL_PORT_CTL_OFFSET;
9390 phba->sli4_hba.u.if_type2.STATUSregaddr =
9391 phba->sli4_hba.conf_regs_memmap_p +
9392 LPFC_CTL_PORT_STA_OFFSET;
9393 phba->sli4_hba.SLIINTFregaddr =
9394 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9395 phba->sli4_hba.PSMPHRregaddr =
9396 phba->sli4_hba.conf_regs_memmap_p +
9397 LPFC_CTL_PORT_SEM_OFFSET;
9398 phba->sli4_hba.RQDBregaddr =
9399 phba->sli4_hba.conf_regs_memmap_p +
9400 LPFC_ULP0_RQ_DOORBELL;
9401 phba->sli4_hba.WQDBregaddr =
9402 phba->sli4_hba.conf_regs_memmap_p +
9403 LPFC_ULP0_WQ_DOORBELL;
9404 phba->sli4_hba.CQDBregaddr =
9405 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
9406 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9407 phba->sli4_hba.MQDBregaddr =
9408 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
9409 phba->sli4_hba.BMBXregaddr =
9410 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9412 case LPFC_SLI_INTF_IF_TYPE_6:
9413 phba->sli4_hba.u.if_type2.EQDregaddr =
9414 phba->sli4_hba.conf_regs_memmap_p +
9415 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
9416 phba->sli4_hba.u.if_type2.ERR1regaddr =
9417 phba->sli4_hba.conf_regs_memmap_p +
9418 LPFC_CTL_PORT_ER1_OFFSET;
9419 phba->sli4_hba.u.if_type2.ERR2regaddr =
9420 phba->sli4_hba.conf_regs_memmap_p +
9421 LPFC_CTL_PORT_ER2_OFFSET;
9422 phba->sli4_hba.u.if_type2.CTRLregaddr =
9423 phba->sli4_hba.conf_regs_memmap_p +
9424 LPFC_CTL_PORT_CTL_OFFSET;
9425 phba->sli4_hba.u.if_type2.STATUSregaddr =
9426 phba->sli4_hba.conf_regs_memmap_p +
9427 LPFC_CTL_PORT_STA_OFFSET;
9428 phba->sli4_hba.PSMPHRregaddr =
9429 phba->sli4_hba.conf_regs_memmap_p +
9430 LPFC_CTL_PORT_SEM_OFFSET;
9431 phba->sli4_hba.BMBXregaddr =
9432 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9434 case LPFC_SLI_INTF_IF_TYPE_1:
9436 dev_printk(KERN_ERR, &phba->pcidev->dev,
9437 "FATAL - unsupported SLI4 interface type - %d\n",
9444 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
9445 * @phba: pointer to lpfc hba data structure.
9446 * @if_type: sli if type to operate on.
9448 * This routine is invoked to set up SLI4 BAR1 register memory map.
9451 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
9454 case LPFC_SLI_INTF_IF_TYPE_0:
9455 phba->sli4_hba.PSMPHRregaddr =
9456 phba->sli4_hba.ctrl_regs_memmap_p +
9457 LPFC_SLIPORT_IF0_SMPHR;
9458 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9460 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9462 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9465 case LPFC_SLI_INTF_IF_TYPE_6:
9466 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9467 LPFC_IF6_RQ_DOORBELL;
9468 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9469 LPFC_IF6_WQ_DOORBELL;
9470 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9471 LPFC_IF6_CQ_DOORBELL;
9472 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9473 LPFC_IF6_EQ_DOORBELL;
9474 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9475 LPFC_IF6_MQ_DOORBELL;
9477 case LPFC_SLI_INTF_IF_TYPE_2:
9478 case LPFC_SLI_INTF_IF_TYPE_1:
9480 dev_err(&phba->pcidev->dev,
9481 "FATAL - unsupported SLI4 interface type - %d\n",
9488 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
9489 * @phba: pointer to lpfc hba data structure.
9490 * @vf: virtual function number
9492 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
9493 * based on the given viftual function number, @vf.
9495 * Return 0 if successful, otherwise -ENODEV.
9498 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
9500 if (vf > LPFC_VIR_FUNC_MAX)
9503 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9504 vf * LPFC_VFR_PAGE_SIZE +
9505 LPFC_ULP0_RQ_DOORBELL);
9506 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9507 vf * LPFC_VFR_PAGE_SIZE +
9508 LPFC_ULP0_WQ_DOORBELL);
9509 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9510 vf * LPFC_VFR_PAGE_SIZE +
9511 LPFC_EQCQ_DOORBELL);
9512 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9513 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9514 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
9515 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9516 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
9521 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
9522 * @phba: pointer to lpfc hba data structure.
9524 * This routine is invoked to create the bootstrap mailbox
9525 * region consistent with the SLI-4 interface spec. This
9526 * routine allocates all memory necessary to communicate
9527 * mailbox commands to the port and sets up all alignment
9528 * needs. No locks are expected to be held when calling
9533 * -ENOMEM - could not allocated memory.
9536 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
9539 struct lpfc_dmabuf *dmabuf;
9540 struct dma_address *dma_address;
9544 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
9549 * The bootstrap mailbox region is comprised of 2 parts
9550 * plus an alignment restriction of 16 bytes.
9552 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
9553 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
9554 &dmabuf->phys, GFP_KERNEL);
9555 if (!dmabuf->virt) {
9561 * Initialize the bootstrap mailbox pointers now so that the register
9562 * operations are simple later. The mailbox dma address is required
9563 * to be 16-byte aligned. Also align the virtual memory as each
9564 * maibox is copied into the bmbx mailbox region before issuing the
9565 * command to the port.
9567 phba->sli4_hba.bmbx.dmabuf = dmabuf;
9568 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
9570 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
9571 LPFC_ALIGN_16_BYTE);
9572 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
9573 LPFC_ALIGN_16_BYTE);
9576 * Set the high and low physical addresses now. The SLI4 alignment
9577 * requirement is 16 bytes and the mailbox is posted to the port
9578 * as two 30-bit addresses. The other data is a bit marking whether
9579 * the 30-bit address is the high or low address.
9580 * Upcast bmbx aphys to 64bits so shift instruction compiles
9581 * clean on 32 bit machines.
9583 dma_address = &phba->sli4_hba.bmbx.dma_address;
9584 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
9585 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
9586 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
9587 LPFC_BMBX_BIT1_ADDR_HI);
9589 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
9590 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
9591 LPFC_BMBX_BIT1_ADDR_LO);
9596 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
9597 * @phba: pointer to lpfc hba data structure.
9599 * This routine is invoked to teardown the bootstrap mailbox
9600 * region and release all host resources. This routine requires
9601 * the caller to ensure all mailbox commands recovered, no
9602 * additional mailbox comands are sent, and interrupts are disabled
9603 * before calling this routine.
9607 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
9609 dma_free_coherent(&phba->pcidev->dev,
9610 phba->sli4_hba.bmbx.bmbx_size,
9611 phba->sli4_hba.bmbx.dmabuf->virt,
9612 phba->sli4_hba.bmbx.dmabuf->phys);
9614 kfree(phba->sli4_hba.bmbx.dmabuf);
9615 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
9618 static const char * const lpfc_topo_to_str[] = {
9628 #define LINK_FLAGS_DEF 0x0
9629 #define LINK_FLAGS_P2P 0x1
9630 #define LINK_FLAGS_LOOP 0x2
9632 * lpfc_map_topology - Map the topology read from READ_CONFIG
9633 * @phba: pointer to lpfc hba data structure.
9634 * @rd_config: pointer to read config data
9636 * This routine is invoked to map the topology values as read
9637 * from the read config mailbox command. If the persistent
9638 * topology feature is supported, the firmware will provide the
9639 * saved topology information to be used in INIT_LINK
9642 lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
9646 ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config);
9647 tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config);
9648 pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config);
9650 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9651 "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x",
9654 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9655 "2019 FW does not support persistent topology "
9656 "Using driver parameter defined value [%s]",
9657 lpfc_topo_to_str[phba->cfg_topology]);
9660 /* FW supports persistent topology - override module parameter value */
9661 phba->hba_flag |= HBA_PERSISTENT_TOPO;
9663 /* if ASIC_GEN_NUM >= 0xC) */
9664 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
9665 LPFC_SLI_INTF_IF_TYPE_6) ||
9666 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
9667 LPFC_SLI_INTF_FAMILY_G6)) {
9669 phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
9670 ? FLAGS_TOPOLOGY_MODE_LOOP
9671 : FLAGS_TOPOLOGY_MODE_PT_PT);
9673 phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
9677 /* If topology failover set - pt is '0' or '1' */
9678 phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
9679 FLAGS_TOPOLOGY_MODE_LOOP_PT);
9681 phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
9682 ? FLAGS_TOPOLOGY_MODE_PT_PT
9683 : FLAGS_TOPOLOGY_MODE_LOOP);
9686 if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
9687 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9688 "2020 Using persistent topology value [%s]",
9689 lpfc_topo_to_str[phba->cfg_topology]);
9691 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9692 "2021 Invalid topology values from FW "
9693 "Using driver parameter defined value [%s]",
9694 lpfc_topo_to_str[phba->cfg_topology]);
9699 * lpfc_sli4_read_config - Get the config parameters.
9700 * @phba: pointer to lpfc hba data structure.
9702 * This routine is invoked to read the configuration parameters from the HBA.
9703 * The configuration parameters are used to set the base and maximum values
9704 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
9705 * allocation for the port.
9709 * -ENOMEM - No available memory
9710 * -EIO - The mailbox failed to complete successfully.
9713 lpfc_sli4_read_config(struct lpfc_hba *phba)
9716 struct lpfc_mbx_read_config *rd_config;
9717 union lpfc_sli4_cfg_shdr *shdr;
9718 uint32_t shdr_status, shdr_add_status;
9719 struct lpfc_mbx_get_func_cfg *get_func_cfg;
9720 struct lpfc_rsrc_desc_fcfcoe *desc;
9722 uint16_t forced_link_speed;
9723 uint32_t if_type, qmin;
9724 int length, i, rc = 0, rc2;
9726 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9728 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9729 "2011 Unable to allocate memory for issuing "
9730 "SLI_CONFIG_SPECIAL mailbox command\n");
9734 lpfc_read_config(phba, pmb);
9736 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
9737 if (rc != MBX_SUCCESS) {
9738 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9739 "2012 Mailbox failed , mbxCmd x%x "
9740 "READ_CONFIG, mbxStatus x%x\n",
9741 bf_get(lpfc_mqe_command, &pmb->u.mqe),
9742 bf_get(lpfc_mqe_status, &pmb->u.mqe));
9745 rd_config = &pmb->u.mqe.un.rd_config;
9746 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
9747 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
9748 phba->sli4_hba.lnk_info.lnk_tp =
9749 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
9750 phba->sli4_hba.lnk_info.lnk_no =
9751 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
9752 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9753 "3081 lnk_type:%d, lnk_numb:%d\n",
9754 phba->sli4_hba.lnk_info.lnk_tp,
9755 phba->sli4_hba.lnk_info.lnk_no);
9757 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9758 "3082 Mailbox (x%x) returned ldv:x0\n",
9759 bf_get(lpfc_mqe_command, &pmb->u.mqe));
9760 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
9761 phba->bbcredit_support = 1;
9762 phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
9765 phba->sli4_hba.conf_trunk =
9766 bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
9767 phba->sli4_hba.extents_in_use =
9768 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
9769 phba->sli4_hba.max_cfg_param.max_xri =
9770 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
9771 /* Reduce resource usage in kdump environment */
9772 if (is_kdump_kernel() &&
9773 phba->sli4_hba.max_cfg_param.max_xri > 512)
9774 phba->sli4_hba.max_cfg_param.max_xri = 512;
9775 phba->sli4_hba.max_cfg_param.xri_base =
9776 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
9777 phba->sli4_hba.max_cfg_param.max_vpi =
9778 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
9779 /* Limit the max we support */
9780 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
9781 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
9782 phba->sli4_hba.max_cfg_param.vpi_base =
9783 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
9784 phba->sli4_hba.max_cfg_param.max_rpi =
9785 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
9786 phba->sli4_hba.max_cfg_param.rpi_base =
9787 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
9788 phba->sli4_hba.max_cfg_param.max_vfi =
9789 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
9790 phba->sli4_hba.max_cfg_param.vfi_base =
9791 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
9792 phba->sli4_hba.max_cfg_param.max_fcfi =
9793 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
9794 phba->sli4_hba.max_cfg_param.max_eq =
9795 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
9796 phba->sli4_hba.max_cfg_param.max_rq =
9797 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
9798 phba->sli4_hba.max_cfg_param.max_wq =
9799 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
9800 phba->sli4_hba.max_cfg_param.max_cq =
9801 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
9802 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
9803 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
9804 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
9805 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
9806 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
9807 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
9808 phba->max_vports = phba->max_vpi;
9810 /* Next decide on FPIN or Signal E2E CGN support
9811 * For congestion alarms and warnings valid combination are:
9812 * 1. FPIN alarms / FPIN warnings
9813 * 2. Signal alarms / Signal warnings
9814 * 3. FPIN alarms / Signal warnings
9815 * 4. Signal alarms / FPIN warnings
9817 * Initialize the adapter frequency to 100 mSecs
9819 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
9820 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
9821 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
9823 if (lpfc_use_cgn_signal) {
9824 if (bf_get(lpfc_mbx_rd_conf_wcs, rd_config)) {
9825 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY;
9826 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN;
9828 if (bf_get(lpfc_mbx_rd_conf_acs, rd_config)) {
9829 /* MUST support both alarm and warning
9830 * because EDC does not support alarm alone.
9832 if (phba->cgn_reg_signal !=
9833 EDC_CG_SIG_WARN_ONLY) {
9834 /* Must support both or none */
9835 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
9836 phba->cgn_reg_signal =
9837 EDC_CG_SIG_NOTSUPPORTED;
9839 phba->cgn_reg_signal =
9840 EDC_CG_SIG_WARN_ALARM;
9841 phba->cgn_reg_fpin =
9847 /* Set the congestion initial signal and fpin values. */
9848 phba->cgn_init_reg_fpin = phba->cgn_reg_fpin;
9849 phba->cgn_init_reg_signal = phba->cgn_reg_signal;
9851 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
9852 "6446 READ_CONFIG reg_sig x%x reg_fpin:x%x\n",
9853 phba->cgn_reg_signal, phba->cgn_reg_fpin);
9855 lpfc_map_topology(phba, rd_config);
9856 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9857 "2003 cfg params Extents? %d "
9862 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n",
9863 phba->sli4_hba.extents_in_use,
9864 phba->sli4_hba.max_cfg_param.xri_base,
9865 phba->sli4_hba.max_cfg_param.max_xri,
9866 phba->sli4_hba.max_cfg_param.vpi_base,
9867 phba->sli4_hba.max_cfg_param.max_vpi,
9868 phba->sli4_hba.max_cfg_param.vfi_base,
9869 phba->sli4_hba.max_cfg_param.max_vfi,
9870 phba->sli4_hba.max_cfg_param.rpi_base,
9871 phba->sli4_hba.max_cfg_param.max_rpi,
9872 phba->sli4_hba.max_cfg_param.max_fcfi,
9873 phba->sli4_hba.max_cfg_param.max_eq,
9874 phba->sli4_hba.max_cfg_param.max_cq,
9875 phba->sli4_hba.max_cfg_param.max_wq,
9876 phba->sli4_hba.max_cfg_param.max_rq,
9880 * Calculate queue resources based on how
9881 * many WQ/CQ/EQs are available.
9883 qmin = phba->sli4_hba.max_cfg_param.max_wq;
9884 if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
9885 qmin = phba->sli4_hba.max_cfg_param.max_cq;
9886 if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
9887 qmin = phba->sli4_hba.max_cfg_param.max_eq;
9889 * Whats left after this can go toward NVME / FCP.
9890 * The minus 4 accounts for ELS, NVME LS, MBOX
9891 * plus one extra. When configured for
9892 * NVMET, FCP io channel WQs are not created.
9896 /* Check to see if there is enough for NVME */
9897 if ((phba->cfg_irq_chann > qmin) ||
9898 (phba->cfg_hdw_queue > qmin)) {
9899 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9900 "2005 Reducing Queues - "
9901 "FW resource limitation: "
9902 "WQ %d CQ %d EQ %d: min %d: "
9904 phba->sli4_hba.max_cfg_param.max_wq,
9905 phba->sli4_hba.max_cfg_param.max_cq,
9906 phba->sli4_hba.max_cfg_param.max_eq,
9907 qmin, phba->cfg_irq_chann,
9908 phba->cfg_hdw_queue);
9910 if (phba->cfg_irq_chann > qmin)
9911 phba->cfg_irq_chann = qmin;
9912 if (phba->cfg_hdw_queue > qmin)
9913 phba->cfg_hdw_queue = qmin;
9920 /* Update link speed if forced link speed is supported */
9921 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
9922 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9924 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
9925 if (forced_link_speed) {
9926 phba->hba_flag |= HBA_FORCED_LINK_SPEED;
9928 switch (forced_link_speed) {
9930 phba->cfg_link_speed =
9931 LPFC_USER_LINK_SPEED_1G;
9934 phba->cfg_link_speed =
9935 LPFC_USER_LINK_SPEED_2G;
9938 phba->cfg_link_speed =
9939 LPFC_USER_LINK_SPEED_4G;
9942 phba->cfg_link_speed =
9943 LPFC_USER_LINK_SPEED_8G;
9945 case LINK_SPEED_10G:
9946 phba->cfg_link_speed =
9947 LPFC_USER_LINK_SPEED_10G;
9949 case LINK_SPEED_16G:
9950 phba->cfg_link_speed =
9951 LPFC_USER_LINK_SPEED_16G;
9953 case LINK_SPEED_32G:
9954 phba->cfg_link_speed =
9955 LPFC_USER_LINK_SPEED_32G;
9957 case LINK_SPEED_64G:
9958 phba->cfg_link_speed =
9959 LPFC_USER_LINK_SPEED_64G;
9962 phba->cfg_link_speed =
9963 LPFC_USER_LINK_SPEED_AUTO;
9966 lpfc_printf_log(phba, KERN_ERR,
9968 "0047 Unrecognized link "
9971 phba->cfg_link_speed =
9972 LPFC_USER_LINK_SPEED_AUTO;
9977 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
9978 length = phba->sli4_hba.max_cfg_param.max_xri -
9979 lpfc_sli4_get_els_iocb_cnt(phba);
9980 if (phba->cfg_hba_queue_depth > length) {
9981 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
9982 "3361 HBA queue depth changed from %d to %d\n",
9983 phba->cfg_hba_queue_depth, length);
9984 phba->cfg_hba_queue_depth = length;
9987 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
9988 LPFC_SLI_INTF_IF_TYPE_2)
9991 /* get the pf# and vf# for SLI4 if_type 2 port */
9992 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
9993 sizeof(struct lpfc_sli4_cfg_mhdr));
9994 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
9995 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
9996 length, LPFC_SLI4_MBX_EMBED);
9998 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
9999 shdr = (union lpfc_sli4_cfg_shdr *)
10000 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
10001 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10002 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10003 if (rc2 || shdr_status || shdr_add_status) {
10004 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10005 "3026 Mailbox failed , mbxCmd x%x "
10006 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
10007 bf_get(lpfc_mqe_command, &pmb->u.mqe),
10008 bf_get(lpfc_mqe_status, &pmb->u.mqe));
10012 /* search for fc_fcoe resrouce descriptor */
10013 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
10015 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
10016 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
10017 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
10018 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
10019 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
10020 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
10023 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
10024 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
10025 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
10026 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
10027 phba->sli4_hba.iov.pf_number =
10028 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
10029 phba->sli4_hba.iov.vf_number =
10030 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
10035 if (i < LPFC_RSRC_DESC_MAX_NUM)
10036 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10037 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
10038 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
10039 phba->sli4_hba.iov.vf_number);
10041 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10042 "3028 GET_FUNCTION_CONFIG: failed to find "
10043 "Resource Descriptor:x%x\n",
10044 LPFC_RSRC_DESC_TYPE_FCFCOE);
10047 mempool_free(pmb, phba->mbox_mem_pool);
10052 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
10053 * @phba: pointer to lpfc hba data structure.
10055 * This routine is invoked to setup the port-side endian order when
10056 * the port if_type is 0. This routine has no function for other
10061 * -ENOMEM - No available memory
10062 * -EIO - The mailbox failed to complete successfully.
10065 lpfc_setup_endian_order(struct lpfc_hba *phba)
10067 LPFC_MBOXQ_t *mboxq;
10068 uint32_t if_type, rc = 0;
10069 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
10070 HOST_ENDIAN_HIGH_WORD1};
10072 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10074 case LPFC_SLI_INTF_IF_TYPE_0:
10075 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
10078 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10079 "0492 Unable to allocate memory for "
10080 "issuing SLI_CONFIG_SPECIAL mailbox "
10086 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
10087 * two words to contain special data values and no other data.
10089 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
10090 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
10091 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10092 if (rc != MBX_SUCCESS) {
10093 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10094 "0493 SLI_CONFIG_SPECIAL mailbox "
10095 "failed with status x%x\n",
10099 mempool_free(mboxq, phba->mbox_mem_pool);
10101 case LPFC_SLI_INTF_IF_TYPE_6:
10102 case LPFC_SLI_INTF_IF_TYPE_2:
10103 case LPFC_SLI_INTF_IF_TYPE_1:
10111 * lpfc_sli4_queue_verify - Verify and update EQ counts
10112 * @phba: pointer to lpfc hba data structure.
10114 * This routine is invoked to check the user settable queue counts for EQs.
10115 * After this routine is called the counts will be set to valid values that
10116 * adhere to the constraints of the system's interrupt vectors and the port's
10121 * -ENOMEM - No available memory
10124 lpfc_sli4_queue_verify(struct lpfc_hba *phba)
10127 * Sanity check for configured queue parameters against the run-time
10128 * device parameters
10131 if (phba->nvmet_support) {
10132 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
10133 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
10134 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
10135 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
10138 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10139 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
10140 phba->cfg_hdw_queue, phba->cfg_irq_chann,
10141 phba->cfg_nvmet_mrq);
10143 /* Get EQ depth from module parameter, fake the default for now */
10144 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10145 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10147 /* Get CQ depth from module parameter, fake the default for now */
10148 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10149 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10154 lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
10156 struct lpfc_queue *qdesc;
10160 cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
10161 /* Create Fast Path IO CQs */
10162 if (phba->enab_exp_wqcq_pages)
10163 /* Increase the CQ size when WQEs contain an embedded cdb */
10164 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
10165 phba->sli4_hba.cq_esize,
10166 LPFC_CQE_EXP_COUNT, cpu);
10169 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10170 phba->sli4_hba.cq_esize,
10171 phba->sli4_hba.cq_ecount, cpu);
10173 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10174 "0499 Failed allocate fast-path IO CQ (%d)\n",
10178 qdesc->qe_valid = 1;
10180 qdesc->chann = cpu;
10181 phba->sli4_hba.hdwq[idx].io_cq = qdesc;
10183 /* Create Fast Path IO WQs */
10184 if (phba->enab_exp_wqcq_pages) {
10185 /* Increase the WQ size when WQEs contain an embedded cdb */
10186 wqesize = (phba->fcp_embed_io) ?
10187 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
10188 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
10190 LPFC_WQE_EXP_COUNT, cpu);
10192 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10193 phba->sli4_hba.wq_esize,
10194 phba->sli4_hba.wq_ecount, cpu);
10197 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10198 "0503 Failed allocate fast-path IO WQ (%d)\n",
10203 qdesc->chann = cpu;
10204 phba->sli4_hba.hdwq[idx].io_wq = qdesc;
10205 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10210 * lpfc_sli4_queue_create - Create all the SLI4 queues
10211 * @phba: pointer to lpfc hba data structure.
10213 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
10214 * operation. For each SLI4 queue type, the parameters such as queue entry
10215 * count (queue depth) shall be taken from the module parameter. For now,
10216 * we just use some constant number as place holder.
10220 * -ENOMEM - No availble memory
10221 * -EIO - The mailbox failed to complete successfully.
10224 lpfc_sli4_queue_create(struct lpfc_hba *phba)
10226 struct lpfc_queue *qdesc;
10227 int idx, cpu, eqcpu;
10228 struct lpfc_sli4_hdw_queue *qp;
10229 struct lpfc_vector_map_info *cpup;
10230 struct lpfc_vector_map_info *eqcpup;
10231 struct lpfc_eq_intr_info *eqi;
10234 * Create HBA Record arrays.
10235 * Both NVME and FCP will share that same vectors / EQs
10237 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
10238 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
10239 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
10240 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
10241 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
10242 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
10243 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10244 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10245 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10246 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10248 if (!phba->sli4_hba.hdwq) {
10249 phba->sli4_hba.hdwq = kcalloc(
10250 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
10252 if (!phba->sli4_hba.hdwq) {
10253 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10254 "6427 Failed allocate memory for "
10255 "fast-path Hardware Queue array\n");
10258 /* Prepare hardware queues to take IO buffers */
10259 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10260 qp = &phba->sli4_hba.hdwq[idx];
10261 spin_lock_init(&qp->io_buf_list_get_lock);
10262 spin_lock_init(&qp->io_buf_list_put_lock);
10263 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
10264 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
10265 qp->get_io_bufs = 0;
10266 qp->put_io_bufs = 0;
10267 qp->total_io_bufs = 0;
10268 spin_lock_init(&qp->abts_io_buf_list_lock);
10269 INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list);
10270 qp->abts_scsi_io_bufs = 0;
10271 qp->abts_nvme_io_bufs = 0;
10272 INIT_LIST_HEAD(&qp->sgl_list);
10273 INIT_LIST_HEAD(&qp->cmd_rsp_buf_list);
10274 spin_lock_init(&qp->hdwq_lock);
10278 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10279 if (phba->nvmet_support) {
10280 phba->sli4_hba.nvmet_cqset = kcalloc(
10281 phba->cfg_nvmet_mrq,
10282 sizeof(struct lpfc_queue *),
10284 if (!phba->sli4_hba.nvmet_cqset) {
10285 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10286 "3121 Fail allocate memory for "
10287 "fast-path CQ set array\n");
10290 phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
10291 phba->cfg_nvmet_mrq,
10292 sizeof(struct lpfc_queue *),
10294 if (!phba->sli4_hba.nvmet_mrq_hdr) {
10295 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10296 "3122 Fail allocate memory for "
10297 "fast-path RQ set hdr array\n");
10300 phba->sli4_hba.nvmet_mrq_data = kcalloc(
10301 phba->cfg_nvmet_mrq,
10302 sizeof(struct lpfc_queue *),
10304 if (!phba->sli4_hba.nvmet_mrq_data) {
10305 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10306 "3124 Fail allocate memory for "
10307 "fast-path RQ set data array\n");
10313 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10315 /* Create HBA Event Queues (EQs) */
10316 for_each_present_cpu(cpu) {
10317 /* We only want to create 1 EQ per vector, even though
10318 * multiple CPUs might be using that vector. so only
10319 * selects the CPUs that are LPFC_CPU_FIRST_IRQ.
10321 cpup = &phba->sli4_hba.cpu_map[cpu];
10322 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
10325 /* Get a ptr to the Hardware Queue associated with this CPU */
10326 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10328 /* Allocate an EQ */
10329 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10330 phba->sli4_hba.eq_esize,
10331 phba->sli4_hba.eq_ecount, cpu);
10333 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10334 "0497 Failed allocate EQ (%d)\n",
10338 qdesc->qe_valid = 1;
10339 qdesc->hdwq = cpup->hdwq;
10340 qdesc->chann = cpu; /* First CPU this EQ is affinitized to */
10341 qdesc->last_cpu = qdesc->chann;
10343 /* Save the allocated EQ in the Hardware Queue */
10344 qp->hba_eq = qdesc;
10346 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
10347 list_add(&qdesc->cpu_list, &eqi->list);
10350 /* Now we need to populate the other Hardware Queues, that share
10351 * an IRQ vector, with the associated EQ ptr.
10353 for_each_present_cpu(cpu) {
10354 cpup = &phba->sli4_hba.cpu_map[cpu];
10356 /* Check for EQ already allocated in previous loop */
10357 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
10360 /* Check for multiple CPUs per hdwq */
10361 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10365 /* We need to share an EQ for this hdwq */
10366 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
10367 eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
10368 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
10371 /* Allocate IO Path SLI4 CQ/WQs */
10372 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10373 if (lpfc_alloc_io_wq_cq(phba, idx))
10377 if (phba->nvmet_support) {
10378 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10379 cpu = lpfc_find_cpu_handle(phba, idx,
10380 LPFC_FIND_BY_HDWQ);
10381 qdesc = lpfc_sli4_queue_alloc(phba,
10382 LPFC_DEFAULT_PAGE_SIZE,
10383 phba->sli4_hba.cq_esize,
10384 phba->sli4_hba.cq_ecount,
10387 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10388 "3142 Failed allocate NVME "
10389 "CQ Set (%d)\n", idx);
10392 qdesc->qe_valid = 1;
10394 qdesc->chann = cpu;
10395 phba->sli4_hba.nvmet_cqset[idx] = qdesc;
10400 * Create Slow Path Completion Queues (CQs)
10403 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
10404 /* Create slow-path Mailbox Command Complete Queue */
10405 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10406 phba->sli4_hba.cq_esize,
10407 phba->sli4_hba.cq_ecount, cpu);
10409 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10410 "0500 Failed allocate slow-path mailbox CQ\n");
10413 qdesc->qe_valid = 1;
10414 phba->sli4_hba.mbx_cq = qdesc;
10416 /* Create slow-path ELS Complete Queue */
10417 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10418 phba->sli4_hba.cq_esize,
10419 phba->sli4_hba.cq_ecount, cpu);
10421 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10422 "0501 Failed allocate slow-path ELS CQ\n");
10425 qdesc->qe_valid = 1;
10426 qdesc->chann = cpu;
10427 phba->sli4_hba.els_cq = qdesc;
10431 * Create Slow Path Work Queues (WQs)
10434 /* Create Mailbox Command Queue */
10436 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10437 phba->sli4_hba.mq_esize,
10438 phba->sli4_hba.mq_ecount, cpu);
10440 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10441 "0505 Failed allocate slow-path MQ\n");
10444 qdesc->chann = cpu;
10445 phba->sli4_hba.mbx_wq = qdesc;
10448 * Create ELS Work Queues
10451 /* Create slow-path ELS Work Queue */
10452 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10453 phba->sli4_hba.wq_esize,
10454 phba->sli4_hba.wq_ecount, cpu);
10456 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10457 "0504 Failed allocate slow-path ELS WQ\n");
10460 qdesc->chann = cpu;
10461 phba->sli4_hba.els_wq = qdesc;
10462 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10464 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10465 /* Create NVME LS Complete Queue */
10466 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10467 phba->sli4_hba.cq_esize,
10468 phba->sli4_hba.cq_ecount, cpu);
10470 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10471 "6079 Failed allocate NVME LS CQ\n");
10474 qdesc->chann = cpu;
10475 qdesc->qe_valid = 1;
10476 phba->sli4_hba.nvmels_cq = qdesc;
10478 /* Create NVME LS Work Queue */
10479 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10480 phba->sli4_hba.wq_esize,
10481 phba->sli4_hba.wq_ecount, cpu);
10483 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10484 "6080 Failed allocate NVME LS WQ\n");
10487 qdesc->chann = cpu;
10488 phba->sli4_hba.nvmels_wq = qdesc;
10489 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10493 * Create Receive Queue (RQ)
10496 /* Create Receive Queue for header */
10497 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10498 phba->sli4_hba.rq_esize,
10499 phba->sli4_hba.rq_ecount, cpu);
10501 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10502 "0506 Failed allocate receive HRQ\n");
10505 phba->sli4_hba.hdr_rq = qdesc;
10507 /* Create Receive Queue for data */
10508 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10509 phba->sli4_hba.rq_esize,
10510 phba->sli4_hba.rq_ecount, cpu);
10512 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10513 "0507 Failed allocate receive DRQ\n");
10516 phba->sli4_hba.dat_rq = qdesc;
10518 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
10519 phba->nvmet_support) {
10520 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10521 cpu = lpfc_find_cpu_handle(phba, idx,
10522 LPFC_FIND_BY_HDWQ);
10523 /* Create NVMET Receive Queue for header */
10524 qdesc = lpfc_sli4_queue_alloc(phba,
10525 LPFC_DEFAULT_PAGE_SIZE,
10526 phba->sli4_hba.rq_esize,
10527 LPFC_NVMET_RQE_DEF_COUNT,
10530 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10531 "3146 Failed allocate "
10536 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
10538 /* Only needed for header of RQ pair */
10539 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
10542 if (qdesc->rqbp == NULL) {
10543 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10544 "6131 Failed allocate "
10549 /* Put list in known state in case driver load fails. */
10550 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
10552 /* Create NVMET Receive Queue for data */
10553 qdesc = lpfc_sli4_queue_alloc(phba,
10554 LPFC_DEFAULT_PAGE_SIZE,
10555 phba->sli4_hba.rq_esize,
10556 LPFC_NVMET_RQE_DEF_COUNT,
10559 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10560 "3156 Failed allocate "
10565 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
10569 /* Clear NVME stats */
10570 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10571 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10572 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
10573 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
10577 /* Clear SCSI stats */
10578 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
10579 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10580 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
10581 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
10588 lpfc_sli4_queue_destroy(phba);
10593 __lpfc_sli4_release_queue(struct lpfc_queue **qp)
10596 lpfc_sli4_queue_free(*qp);
10602 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
10609 for (idx = 0; idx < max; idx++)
10610 __lpfc_sli4_release_queue(&(*qs)[idx]);
10617 lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
10619 struct lpfc_sli4_hdw_queue *hdwq;
10620 struct lpfc_queue *eq;
10623 hdwq = phba->sli4_hba.hdwq;
10625 /* Loop thru all Hardware Queues */
10626 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10627 /* Free the CQ/WQ corresponding to the Hardware Queue */
10628 lpfc_sli4_queue_free(hdwq[idx].io_cq);
10629 lpfc_sli4_queue_free(hdwq[idx].io_wq);
10630 hdwq[idx].hba_eq = NULL;
10631 hdwq[idx].io_cq = NULL;
10632 hdwq[idx].io_wq = NULL;
10633 if (phba->cfg_xpsgl && !phba->nvmet_support)
10634 lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]);
10635 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]);
10637 /* Loop thru all IRQ vectors */
10638 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
10639 /* Free the EQ corresponding to the IRQ vector */
10640 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
10641 lpfc_sli4_queue_free(eq);
10642 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
10647 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
10648 * @phba: pointer to lpfc hba data structure.
10650 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
10655 * -ENOMEM - No available memory
10656 * -EIO - The mailbox failed to complete successfully.
10659 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
10662 * Set FREE_INIT before beginning to free the queues.
10663 * Wait until the users of queues to acknowledge to
10664 * release queues by clearing FREE_WAIT.
10666 spin_lock_irq(&phba->hbalock);
10667 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
10668 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
10669 spin_unlock_irq(&phba->hbalock);
10671 spin_lock_irq(&phba->hbalock);
10673 spin_unlock_irq(&phba->hbalock);
10675 lpfc_sli4_cleanup_poll_list(phba);
10677 /* Release HBA eqs */
10678 if (phba->sli4_hba.hdwq)
10679 lpfc_sli4_release_hdwq(phba);
10681 if (phba->nvmet_support) {
10682 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
10683 phba->cfg_nvmet_mrq);
10685 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
10686 phba->cfg_nvmet_mrq);
10687 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
10688 phba->cfg_nvmet_mrq);
10691 /* Release mailbox command work queue */
10692 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
10694 /* Release ELS work queue */
10695 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
10697 /* Release ELS work queue */
10698 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
10700 /* Release unsolicited receive queue */
10701 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
10702 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
10704 /* Release ELS complete queue */
10705 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
10707 /* Release NVME LS complete queue */
10708 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
10710 /* Release mailbox command complete queue */
10711 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
10713 /* Everything on this list has been freed */
10714 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10716 /* Done with freeing the queues */
10717 spin_lock_irq(&phba->hbalock);
10718 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
10719 spin_unlock_irq(&phba->hbalock);
10723 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
10725 struct lpfc_rqb *rqbp;
10726 struct lpfc_dmabuf *h_buf;
10727 struct rqb_dmabuf *rqb_buffer;
10730 while (!list_empty(&rqbp->rqb_buffer_list)) {
10731 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
10732 struct lpfc_dmabuf, list);
10734 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
10735 (rqbp->rqb_free_buffer)(phba, rqb_buffer);
10736 rqbp->buffer_count--;
10742 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
10743 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
10744 int qidx, uint32_t qtype)
10746 struct lpfc_sli_ring *pring;
10749 if (!eq || !cq || !wq) {
10750 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10751 "6085 Fast-path %s (%d) not allocated\n",
10752 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
10756 /* create the Cq first */
10757 rc = lpfc_cq_create(phba, cq, eq,
10758 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
10760 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10761 "6086 Failed setup of CQ (%d), rc = 0x%x\n",
10762 qidx, (uint32_t)rc);
10766 if (qtype != LPFC_MBOX) {
10767 /* Setup cq_map for fast lookup */
10769 *cq_map = cq->queue_id;
10771 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10772 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
10773 qidx, cq->queue_id, qidx, eq->queue_id);
10775 /* create the wq */
10776 rc = lpfc_wq_create(phba, wq, cq, qtype);
10778 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10779 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
10780 qidx, (uint32_t)rc);
10781 /* no need to tear down cq - caller will do so */
10785 /* Bind this CQ/WQ to the NVME ring */
10787 pring->sli.sli4.wqp = (void *)wq;
10790 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10791 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
10792 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
10794 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
10796 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10797 "0539 Failed setup of slow-path MQ: "
10798 "rc = 0x%x\n", rc);
10799 /* no need to tear down cq - caller will do so */
10803 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10804 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
10805 phba->sli4_hba.mbx_wq->queue_id,
10806 phba->sli4_hba.mbx_cq->queue_id);
10813 * lpfc_setup_cq_lookup - Setup the CQ lookup table
10814 * @phba: pointer to lpfc hba data structure.
10816 * This routine will populate the cq_lookup table by all
10817 * available CQ queue_id's.
10820 lpfc_setup_cq_lookup(struct lpfc_hba *phba)
10822 struct lpfc_queue *eq, *childq;
10825 memset(phba->sli4_hba.cq_lookup, 0,
10826 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
10827 /* Loop thru all IRQ vectors */
10828 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
10829 /* Get the EQ corresponding to the IRQ vector */
10830 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
10833 /* Loop through all CQs associated with that EQ */
10834 list_for_each_entry(childq, &eq->child_list, list) {
10835 if (childq->queue_id > phba->sli4_hba.cq_max)
10837 if (childq->subtype == LPFC_IO)
10838 phba->sli4_hba.cq_lookup[childq->queue_id] =
10845 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
10846 * @phba: pointer to lpfc hba data structure.
10848 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
10853 * -ENOMEM - No available memory
10854 * -EIO - The mailbox failed to complete successfully.
10857 lpfc_sli4_queue_setup(struct lpfc_hba *phba)
10859 uint32_t shdr_status, shdr_add_status;
10860 union lpfc_sli4_cfg_shdr *shdr;
10861 struct lpfc_vector_map_info *cpup;
10862 struct lpfc_sli4_hdw_queue *qp;
10863 LPFC_MBOXQ_t *mboxq;
10865 uint32_t length, usdelay;
10868 /* Check for dual-ULP support */
10869 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10871 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10872 "3249 Unable to allocate memory for "
10873 "QUERY_FW_CFG mailbox command\n");
10876 length = (sizeof(struct lpfc_mbx_query_fw_config) -
10877 sizeof(struct lpfc_sli4_cfg_mhdr));
10878 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
10879 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
10880 length, LPFC_SLI4_MBX_EMBED);
10882 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10884 shdr = (union lpfc_sli4_cfg_shdr *)
10885 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
10886 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10887 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10888 if (shdr_status || shdr_add_status || rc) {
10889 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10890 "3250 QUERY_FW_CFG mailbox failed with status "
10891 "x%x add_status x%x, mbx status x%x\n",
10892 shdr_status, shdr_add_status, rc);
10893 mempool_free(mboxq, phba->mbox_mem_pool);
10898 phba->sli4_hba.fw_func_mode =
10899 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
10900 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
10901 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
10902 phba->sli4_hba.physical_port =
10903 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
10904 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10905 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
10906 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
10907 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
10909 mempool_free(mboxq, phba->mbox_mem_pool);
10912 * Set up HBA Event Queues (EQs)
10914 qp = phba->sli4_hba.hdwq;
10916 /* Set up HBA event queue */
10918 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10919 "3147 Fast-path EQs not allocated\n");
10924 /* Loop thru all IRQ vectors */
10925 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
10926 /* Create HBA Event Queues (EQs) in order */
10927 for_each_present_cpu(cpu) {
10928 cpup = &phba->sli4_hba.cpu_map[cpu];
10930 /* Look for the CPU thats using that vector with
10931 * LPFC_CPU_FIRST_IRQ set.
10933 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
10935 if (qidx != cpup->eq)
10938 /* Create an EQ for that vector */
10939 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
10940 phba->cfg_fcp_imax);
10942 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10943 "0523 Failed setup of fast-path"
10944 " EQ (%d), rc = 0x%x\n",
10945 cpup->eq, (uint32_t)rc);
10949 /* Save the EQ for that vector in the hba_eq_hdl */
10950 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
10951 qp[cpup->hdwq].hba_eq;
10953 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10954 "2584 HBA EQ setup: queue[%d]-id=%d\n",
10956 qp[cpup->hdwq].hba_eq->queue_id);
10960 /* Loop thru all Hardware Queues */
10961 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
10962 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
10963 cpup = &phba->sli4_hba.cpu_map[cpu];
10965 /* Create the CQ/WQ corresponding to the Hardware Queue */
10966 rc = lpfc_create_wq_cq(phba,
10967 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
10970 &phba->sli4_hba.hdwq[qidx].io_cq_map,
10974 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10975 "0535 Failed to setup fastpath "
10976 "IO WQ/CQ (%d), rc = 0x%x\n",
10977 qidx, (uint32_t)rc);
10983 * Set up Slow Path Complete Queues (CQs)
10986 /* Set up slow-path MBOX CQ/MQ */
10988 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
10989 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10990 "0528 %s not allocated\n",
10991 phba->sli4_hba.mbx_cq ?
10992 "Mailbox WQ" : "Mailbox CQ");
10997 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
10998 phba->sli4_hba.mbx_cq,
10999 phba->sli4_hba.mbx_wq,
11000 NULL, 0, LPFC_MBOX);
11002 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11003 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
11007 if (phba->nvmet_support) {
11008 if (!phba->sli4_hba.nvmet_cqset) {
11009 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11010 "3165 Fast-path NVME CQ Set "
11011 "array not allocated\n");
11015 if (phba->cfg_nvmet_mrq > 1) {
11016 rc = lpfc_cq_create_set(phba,
11017 phba->sli4_hba.nvmet_cqset,
11019 LPFC_WCQ, LPFC_NVMET);
11021 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11022 "3164 Failed setup of NVME CQ "
11023 "Set, rc = 0x%x\n",
11028 /* Set up NVMET Receive Complete Queue */
11029 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
11031 LPFC_WCQ, LPFC_NVMET);
11033 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11034 "6089 Failed setup NVMET CQ: "
11035 "rc = 0x%x\n", (uint32_t)rc);
11038 phba->sli4_hba.nvmet_cqset[0]->chann = 0;
11040 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11041 "6090 NVMET CQ setup: cq-id=%d, "
11042 "parent eq-id=%d\n",
11043 phba->sli4_hba.nvmet_cqset[0]->queue_id,
11044 qp[0].hba_eq->queue_id);
11048 /* Set up slow-path ELS WQ/CQ */
11049 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
11050 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11051 "0530 ELS %s not allocated\n",
11052 phba->sli4_hba.els_cq ? "WQ" : "CQ");
11056 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11057 phba->sli4_hba.els_cq,
11058 phba->sli4_hba.els_wq,
11059 NULL, 0, LPFC_ELS);
11061 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11062 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
11066 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11067 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
11068 phba->sli4_hba.els_wq->queue_id,
11069 phba->sli4_hba.els_cq->queue_id);
11071 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11072 /* Set up NVME LS Complete Queue */
11073 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
11074 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11075 "6091 LS %s not allocated\n",
11076 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
11080 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11081 phba->sli4_hba.nvmels_cq,
11082 phba->sli4_hba.nvmels_wq,
11083 NULL, 0, LPFC_NVME_LS);
11085 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11086 "0526 Failed setup of NVVME LS WQ/CQ: "
11087 "rc = 0x%x\n", (uint32_t)rc);
11091 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11092 "6096 ELS WQ setup: wq-id=%d, "
11093 "parent cq-id=%d\n",
11094 phba->sli4_hba.nvmels_wq->queue_id,
11095 phba->sli4_hba.nvmels_cq->queue_id);
11099 * Create NVMET Receive Queue (RQ)
11101 if (phba->nvmet_support) {
11102 if ((!phba->sli4_hba.nvmet_cqset) ||
11103 (!phba->sli4_hba.nvmet_mrq_hdr) ||
11104 (!phba->sli4_hba.nvmet_mrq_data)) {
11105 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11106 "6130 MRQ CQ Queues not "
11111 if (phba->cfg_nvmet_mrq > 1) {
11112 rc = lpfc_mrq_create(phba,
11113 phba->sli4_hba.nvmet_mrq_hdr,
11114 phba->sli4_hba.nvmet_mrq_data,
11115 phba->sli4_hba.nvmet_cqset,
11118 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11119 "6098 Failed setup of NVMET "
11120 "MRQ: rc = 0x%x\n",
11126 rc = lpfc_rq_create(phba,
11127 phba->sli4_hba.nvmet_mrq_hdr[0],
11128 phba->sli4_hba.nvmet_mrq_data[0],
11129 phba->sli4_hba.nvmet_cqset[0],
11132 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11133 "6057 Failed setup of NVMET "
11134 "Receive Queue: rc = 0x%x\n",
11140 phba, KERN_INFO, LOG_INIT,
11141 "6099 NVMET RQ setup: hdr-rq-id=%d, "
11142 "dat-rq-id=%d parent cq-id=%d\n",
11143 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
11144 phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
11145 phba->sli4_hba.nvmet_cqset[0]->queue_id);
11150 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
11151 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11152 "0540 Receive Queue not allocated\n");
11157 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
11158 phba->sli4_hba.els_cq, LPFC_USOL);
11160 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11161 "0541 Failed setup of Receive Queue: "
11162 "rc = 0x%x\n", (uint32_t)rc);
11166 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11167 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
11168 "parent cq-id=%d\n",
11169 phba->sli4_hba.hdr_rq->queue_id,
11170 phba->sli4_hba.dat_rq->queue_id,
11171 phba->sli4_hba.els_cq->queue_id);
11173 if (phba->cfg_fcp_imax)
11174 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
11178 for (qidx = 0; qidx < phba->cfg_irq_chann;
11179 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
11180 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
11183 if (phba->sli4_hba.cq_max) {
11184 kfree(phba->sli4_hba.cq_lookup);
11185 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
11186 sizeof(struct lpfc_queue *), GFP_KERNEL);
11187 if (!phba->sli4_hba.cq_lookup) {
11188 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11189 "0549 Failed setup of CQ Lookup table: "
11190 "size 0x%x\n", phba->sli4_hba.cq_max);
11194 lpfc_setup_cq_lookup(phba);
11199 lpfc_sli4_queue_unset(phba);
11205 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
11206 * @phba: pointer to lpfc hba data structure.
11208 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
11213 * -ENOMEM - No available memory
11214 * -EIO - The mailbox failed to complete successfully.
11217 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
11219 struct lpfc_sli4_hdw_queue *qp;
11220 struct lpfc_queue *eq;
11223 /* Unset mailbox command work queue */
11224 if (phba->sli4_hba.mbx_wq)
11225 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
11227 /* Unset NVME LS work queue */
11228 if (phba->sli4_hba.nvmels_wq)
11229 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
11231 /* Unset ELS work queue */
11232 if (phba->sli4_hba.els_wq)
11233 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
11235 /* Unset unsolicited receive queue */
11236 if (phba->sli4_hba.hdr_rq)
11237 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
11238 phba->sli4_hba.dat_rq);
11240 /* Unset mailbox command complete queue */
11241 if (phba->sli4_hba.mbx_cq)
11242 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
11244 /* Unset ELS complete queue */
11245 if (phba->sli4_hba.els_cq)
11246 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
11248 /* Unset NVME LS complete queue */
11249 if (phba->sli4_hba.nvmels_cq)
11250 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
11252 if (phba->nvmet_support) {
11253 /* Unset NVMET MRQ queue */
11254 if (phba->sli4_hba.nvmet_mrq_hdr) {
11255 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11258 phba->sli4_hba.nvmet_mrq_hdr[qidx],
11259 phba->sli4_hba.nvmet_mrq_data[qidx]);
11262 /* Unset NVMET CQ Set complete queue */
11263 if (phba->sli4_hba.nvmet_cqset) {
11264 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11266 phba, phba->sli4_hba.nvmet_cqset[qidx]);
11270 /* Unset fast-path SLI4 queues */
11271 if (phba->sli4_hba.hdwq) {
11272 /* Loop thru all Hardware Queues */
11273 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
11274 /* Destroy the CQ/WQ corresponding to Hardware Queue */
11275 qp = &phba->sli4_hba.hdwq[qidx];
11276 lpfc_wq_destroy(phba, qp->io_wq);
11277 lpfc_cq_destroy(phba, qp->io_cq);
11279 /* Loop thru all IRQ vectors */
11280 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11281 /* Destroy the EQ corresponding to the IRQ vector */
11282 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
11283 lpfc_eq_destroy(phba, eq);
11287 kfree(phba->sli4_hba.cq_lookup);
11288 phba->sli4_hba.cq_lookup = NULL;
11289 phba->sli4_hba.cq_max = 0;
11293 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
11294 * @phba: pointer to lpfc hba data structure.
11296 * This routine is invoked to allocate and set up a pool of completion queue
11297 * events. The body of the completion queue event is a completion queue entry
11298 * CQE. For now, this pool is used for the interrupt service routine to queue
11299 * the following HBA completion queue events for the worker thread to process:
11300 * - Mailbox asynchronous events
11301 * - Receive queue completion unsolicited events
11302 * Later, this can be used for all the slow-path events.
11306 * -ENOMEM - No available memory
11309 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
11311 struct lpfc_cq_event *cq_event;
11314 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
11315 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
11317 goto out_pool_create_fail;
11318 list_add_tail(&cq_event->list,
11319 &phba->sli4_hba.sp_cqe_event_pool);
11323 out_pool_create_fail:
11324 lpfc_sli4_cq_event_pool_destroy(phba);
11329 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
11330 * @phba: pointer to lpfc hba data structure.
11332 * This routine is invoked to free the pool of completion queue events at
11333 * driver unload time. Note that, it is the responsibility of the driver
11334 * cleanup routine to free all the outstanding completion-queue events
11335 * allocated from this pool back into the pool before invoking this routine
11336 * to destroy the pool.
11339 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
11341 struct lpfc_cq_event *cq_event, *next_cq_event;
11343 list_for_each_entry_safe(cq_event, next_cq_event,
11344 &phba->sli4_hba.sp_cqe_event_pool, list) {
11345 list_del(&cq_event->list);
11351 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
11352 * @phba: pointer to lpfc hba data structure.
11354 * This routine is the lock free version of the API invoked to allocate a
11355 * completion-queue event from the free pool.
11357 * Return: Pointer to the newly allocated completion-queue event if successful
11360 struct lpfc_cq_event *
11361 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
11363 struct lpfc_cq_event *cq_event = NULL;
11365 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
11366 struct lpfc_cq_event, list);
11371 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
11372 * @phba: pointer to lpfc hba data structure.
11374 * This routine is the lock version of the API invoked to allocate a
11375 * completion-queue event from the free pool.
11377 * Return: Pointer to the newly allocated completion-queue event if successful
11380 struct lpfc_cq_event *
11381 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
11383 struct lpfc_cq_event *cq_event;
11384 unsigned long iflags;
11386 spin_lock_irqsave(&phba->hbalock, iflags);
11387 cq_event = __lpfc_sli4_cq_event_alloc(phba);
11388 spin_unlock_irqrestore(&phba->hbalock, iflags);
11393 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
11394 * @phba: pointer to lpfc hba data structure.
11395 * @cq_event: pointer to the completion queue event to be freed.
11397 * This routine is the lock free version of the API invoked to release a
11398 * completion-queue event back into the free pool.
11401 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
11402 struct lpfc_cq_event *cq_event)
11404 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
11408 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
11409 * @phba: pointer to lpfc hba data structure.
11410 * @cq_event: pointer to the completion queue event to be freed.
11412 * This routine is the lock version of the API invoked to release a
11413 * completion-queue event back into the free pool.
11416 lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
11417 struct lpfc_cq_event *cq_event)
11419 unsigned long iflags;
11420 spin_lock_irqsave(&phba->hbalock, iflags);
11421 __lpfc_sli4_cq_event_release(phba, cq_event);
11422 spin_unlock_irqrestore(&phba->hbalock, iflags);
11426 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
11427 * @phba: pointer to lpfc hba data structure.
11429 * This routine is to free all the pending completion-queue events to the
11430 * back into the free pool for device reset.
11433 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
11435 LIST_HEAD(cq_event_list);
11436 struct lpfc_cq_event *cq_event;
11437 unsigned long iflags;
11439 /* Retrieve all the pending WCQEs from pending WCQE lists */
11441 /* Pending ELS XRI abort events */
11442 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11443 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
11445 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11447 /* Pending asynnc events */
11448 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
11449 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
11451 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
11453 while (!list_empty(&cq_event_list)) {
11454 list_remove_head(&cq_event_list, cq_event,
11455 struct lpfc_cq_event, list);
11456 lpfc_sli4_cq_event_release(phba, cq_event);
11461 * lpfc_pci_function_reset - Reset pci function.
11462 * @phba: pointer to lpfc hba data structure.
11464 * This routine is invoked to request a PCI function reset. It will destroys
11465 * all resources assigned to the PCI function which originates this request.
11469 * -ENOMEM - No available memory
11470 * -EIO - The mailbox failed to complete successfully.
11473 lpfc_pci_function_reset(struct lpfc_hba *phba)
11475 LPFC_MBOXQ_t *mboxq;
11476 uint32_t rc = 0, if_type;
11477 uint32_t shdr_status, shdr_add_status;
11479 uint32_t port_reset = 0;
11480 union lpfc_sli4_cfg_shdr *shdr;
11481 struct lpfc_register reg_data;
11484 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11486 case LPFC_SLI_INTF_IF_TYPE_0:
11487 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
11490 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11491 "0494 Unable to allocate memory for "
11492 "issuing SLI_FUNCTION_RESET mailbox "
11497 /* Setup PCI function reset mailbox-ioctl command */
11498 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
11499 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
11500 LPFC_SLI4_MBX_EMBED);
11501 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11502 shdr = (union lpfc_sli4_cfg_shdr *)
11503 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
11504 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11505 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
11507 mempool_free(mboxq, phba->mbox_mem_pool);
11508 if (shdr_status || shdr_add_status || rc) {
11509 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11510 "0495 SLI_FUNCTION_RESET mailbox "
11511 "failed with status x%x add_status x%x,"
11512 " mbx status x%x\n",
11513 shdr_status, shdr_add_status, rc);
11517 case LPFC_SLI_INTF_IF_TYPE_2:
11518 case LPFC_SLI_INTF_IF_TYPE_6:
11521 * Poll the Port Status Register and wait for RDY for
11522 * up to 30 seconds. If the port doesn't respond, treat
11525 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
11526 if (lpfc_readl(phba->sli4_hba.u.if_type2.
11527 STATUSregaddr, ®_data.word0)) {
11531 if (bf_get(lpfc_sliport_status_rdy, ®_data))
11536 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) {
11537 phba->work_status[0] = readl(
11538 phba->sli4_hba.u.if_type2.ERR1regaddr);
11539 phba->work_status[1] = readl(
11540 phba->sli4_hba.u.if_type2.ERR2regaddr);
11541 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11542 "2890 Port not ready, port status reg "
11543 "0x%x error 1=0x%x, error 2=0x%x\n",
11545 phba->work_status[0],
11546 phba->work_status[1]);
11553 * Reset the port now
11555 reg_data.word0 = 0;
11556 bf_set(lpfc_sliport_ctrl_end, ®_data,
11557 LPFC_SLIPORT_LITTLE_ENDIAN);
11558 bf_set(lpfc_sliport_ctrl_ip, ®_data,
11559 LPFC_SLIPORT_INIT_PORT);
11560 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
11563 pci_read_config_word(phba->pcidev,
11564 PCI_DEVICE_ID, &devid);
11569 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) {
11575 case LPFC_SLI_INTF_IF_TYPE_1:
11581 /* Catch the not-ready port failure after a port reset. */
11583 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11584 "3317 HBA not functional: IP Reset Failed "
11585 "try: echo fw_reset > board_mode\n");
11593 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
11594 * @phba: pointer to lpfc hba data structure.
11596 * This routine is invoked to set up the PCI device memory space for device
11597 * with SLI-4 interface spec.
11601 * other values - error
11604 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
11606 struct pci_dev *pdev = phba->pcidev;
11607 unsigned long bar0map_len, bar1map_len, bar2map_len;
11614 /* Set the device DMA mask size */
11615 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11617 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11622 * The BARs and register set definitions and offset locations are
11623 * dependent on the if_type.
11625 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
11626 &phba->sli4_hba.sli_intf.word0)) {
11630 /* There is no SLI3 failback for SLI4 devices. */
11631 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
11632 LPFC_SLI_INTF_VALID) {
11633 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11634 "2894 SLI_INTF reg contents invalid "
11635 "sli_intf reg 0x%x\n",
11636 phba->sli4_hba.sli_intf.word0);
11640 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11642 * Get the bus address of SLI4 device Bar regions and the
11643 * number of bytes required by each mapping. The mapping of the
11644 * particular PCI BARs regions is dependent on the type of
11647 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
11648 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
11649 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
11652 * Map SLI4 PCI Config Space Register base to a kernel virtual
11655 phba->sli4_hba.conf_regs_memmap_p =
11656 ioremap(phba->pci_bar0_map, bar0map_len);
11657 if (!phba->sli4_hba.conf_regs_memmap_p) {
11658 dev_printk(KERN_ERR, &pdev->dev,
11659 "ioremap failed for SLI4 PCI config "
11663 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
11664 /* Set up BAR0 PCI config space register memory map */
11665 lpfc_sli4_bar0_register_memmap(phba, if_type);
11667 phba->pci_bar0_map = pci_resource_start(pdev, 1);
11668 bar0map_len = pci_resource_len(pdev, 1);
11669 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
11670 dev_printk(KERN_ERR, &pdev->dev,
11671 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
11674 phba->sli4_hba.conf_regs_memmap_p =
11675 ioremap(phba->pci_bar0_map, bar0map_len);
11676 if (!phba->sli4_hba.conf_regs_memmap_p) {
11677 dev_printk(KERN_ERR, &pdev->dev,
11678 "ioremap failed for SLI4 PCI config "
11682 lpfc_sli4_bar0_register_memmap(phba, if_type);
11685 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
11686 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
11688 * Map SLI4 if type 0 HBA Control Register base to a
11689 * kernel virtual address and setup the registers.
11691 phba->pci_bar1_map = pci_resource_start(pdev,
11693 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
11694 phba->sli4_hba.ctrl_regs_memmap_p =
11695 ioremap(phba->pci_bar1_map,
11697 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
11698 dev_err(&pdev->dev,
11699 "ioremap failed for SLI4 HBA "
11700 "control registers.\n");
11702 goto out_iounmap_conf;
11704 phba->pci_bar2_memmap_p =
11705 phba->sli4_hba.ctrl_regs_memmap_p;
11706 lpfc_sli4_bar1_register_memmap(phba, if_type);
11709 goto out_iounmap_conf;
11713 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
11714 (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
11716 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel
11717 * virtual address and setup the registers.
11719 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
11720 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
11721 phba->sli4_hba.drbl_regs_memmap_p =
11722 ioremap(phba->pci_bar1_map, bar1map_len);
11723 if (!phba->sli4_hba.drbl_regs_memmap_p) {
11724 dev_err(&pdev->dev,
11725 "ioremap failed for SLI4 HBA doorbell registers.\n");
11727 goto out_iounmap_conf;
11729 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
11730 lpfc_sli4_bar1_register_memmap(phba, if_type);
11733 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
11734 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
11736 * Map SLI4 if type 0 HBA Doorbell Register base to
11737 * a kernel virtual address and setup the registers.
11739 phba->pci_bar2_map = pci_resource_start(pdev,
11741 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
11742 phba->sli4_hba.drbl_regs_memmap_p =
11743 ioremap(phba->pci_bar2_map,
11745 if (!phba->sli4_hba.drbl_regs_memmap_p) {
11746 dev_err(&pdev->dev,
11747 "ioremap failed for SLI4 HBA"
11748 " doorbell registers.\n");
11750 goto out_iounmap_ctrl;
11752 phba->pci_bar4_memmap_p =
11753 phba->sli4_hba.drbl_regs_memmap_p;
11754 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
11756 goto out_iounmap_all;
11759 goto out_iounmap_all;
11763 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
11764 pci_resource_start(pdev, PCI_64BIT_BAR4)) {
11766 * Map SLI4 if type 6 HBA DPP Register base to a kernel
11767 * virtual address and setup the registers.
11769 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
11770 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
11771 phba->sli4_hba.dpp_regs_memmap_p =
11772 ioremap(phba->pci_bar2_map, bar2map_len);
11773 if (!phba->sli4_hba.dpp_regs_memmap_p) {
11774 dev_err(&pdev->dev,
11775 "ioremap failed for SLI4 HBA dpp registers.\n");
11777 goto out_iounmap_ctrl;
11779 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
11782 /* Set up the EQ/CQ register handeling functions now */
11784 case LPFC_SLI_INTF_IF_TYPE_0:
11785 case LPFC_SLI_INTF_IF_TYPE_2:
11786 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
11787 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
11788 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
11790 case LPFC_SLI_INTF_IF_TYPE_6:
11791 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
11792 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
11793 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
11802 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
11804 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
11806 iounmap(phba->sli4_hba.conf_regs_memmap_p);
11812 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
11813 * @phba: pointer to lpfc hba data structure.
11815 * This routine is invoked to unset the PCI device memory space for device
11816 * with SLI-4 interface spec.
11819 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
11822 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11825 case LPFC_SLI_INTF_IF_TYPE_0:
11826 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
11827 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
11828 iounmap(phba->sli4_hba.conf_regs_memmap_p);
11830 case LPFC_SLI_INTF_IF_TYPE_2:
11831 iounmap(phba->sli4_hba.conf_regs_memmap_p);
11833 case LPFC_SLI_INTF_IF_TYPE_6:
11834 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
11835 iounmap(phba->sli4_hba.conf_regs_memmap_p);
11836 if (phba->sli4_hba.dpp_regs_memmap_p)
11837 iounmap(phba->sli4_hba.dpp_regs_memmap_p);
11839 case LPFC_SLI_INTF_IF_TYPE_1:
11841 dev_printk(KERN_ERR, &phba->pcidev->dev,
11842 "FATAL - unsupported SLI4 interface type - %d\n",
11849 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
11850 * @phba: pointer to lpfc hba data structure.
11852 * This routine is invoked to enable the MSI-X interrupt vectors to device
11853 * with SLI-3 interface specs.
11857 * other values - error
11860 lpfc_sli_enable_msix(struct lpfc_hba *phba)
11865 /* Set up MSI-X multi-message vectors */
11866 rc = pci_alloc_irq_vectors(phba->pcidev,
11867 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
11869 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11870 "0420 PCI enable MSI-X failed (%d)\n", rc);
11875 * Assign MSI-X vectors to interrupt handlers
11878 /* vector-0 is associated to slow-path handler */
11879 rc = request_irq(pci_irq_vector(phba->pcidev, 0),
11880 &lpfc_sli_sp_intr_handler, 0,
11881 LPFC_SP_DRIVER_HANDLER_NAME, phba);
11883 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11884 "0421 MSI-X slow-path request_irq failed "
11889 /* vector-1 is associated to fast-path handler */
11890 rc = request_irq(pci_irq_vector(phba->pcidev, 1),
11891 &lpfc_sli_fp_intr_handler, 0,
11892 LPFC_FP_DRIVER_HANDLER_NAME, phba);
11895 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11896 "0429 MSI-X fast-path request_irq failed "
11902 * Configure HBA MSI-X attention conditions to messages
11904 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11908 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11909 "0474 Unable to allocate memory for issuing "
11910 "MBOX_CONFIG_MSI command\n");
11913 rc = lpfc_config_msi(phba, pmb);
11916 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
11917 if (rc != MBX_SUCCESS) {
11918 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
11919 "0351 Config MSI mailbox command failed, "
11920 "mbxCmd x%x, mbxStatus x%x\n",
11921 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
11925 /* Free memory allocated for mailbox command */
11926 mempool_free(pmb, phba->mbox_mem_pool);
11930 /* Free memory allocated for mailbox command */
11931 mempool_free(pmb, phba->mbox_mem_pool);
11934 /* free the irq already requested */
11935 free_irq(pci_irq_vector(phba->pcidev, 1), phba);
11938 /* free the irq already requested */
11939 free_irq(pci_irq_vector(phba->pcidev, 0), phba);
11942 /* Unconfigure MSI-X capability structure */
11943 pci_free_irq_vectors(phba->pcidev);
11950 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
11951 * @phba: pointer to lpfc hba data structure.
11953 * This routine is invoked to enable the MSI interrupt mode to device with
11954 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
11955 * enable the MSI vector. The device driver is responsible for calling the
11956 * request_irq() to register MSI vector with a interrupt the handler, which
11957 * is done in this function.
11961 * other values - error
11964 lpfc_sli_enable_msi(struct lpfc_hba *phba)
11968 rc = pci_enable_msi(phba->pcidev);
11970 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11971 "0012 PCI enable MSI mode success.\n");
11973 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11974 "0471 PCI enable MSI mode failed (%d)\n", rc);
11978 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
11979 0, LPFC_DRIVER_NAME, phba);
11981 pci_disable_msi(phba->pcidev);
11982 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11983 "0478 MSI request_irq failed (%d)\n", rc);
11989 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
11990 * @phba: pointer to lpfc hba data structure.
11991 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
11993 * This routine is invoked to enable device interrupt and associate driver's
11994 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
11995 * spec. Depends on the interrupt mode configured to the driver, the driver
11996 * will try to fallback from the configured interrupt mode to an interrupt
11997 * mode which is supported by the platform, kernel, and device in the order
11999 * MSI-X -> MSI -> IRQ.
12003 * other values - error
12006 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
12008 uint32_t intr_mode = LPFC_INTR_ERROR;
12011 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
12012 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
12015 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
12017 if (cfg_mode == 2) {
12018 /* Now, try to enable MSI-X interrupt mode */
12019 retval = lpfc_sli_enable_msix(phba);
12021 /* Indicate initialization to MSI-X mode */
12022 phba->intr_type = MSIX;
12027 /* Fallback to MSI if MSI-X initialization failed */
12028 if (cfg_mode >= 1 && phba->intr_type == NONE) {
12029 retval = lpfc_sli_enable_msi(phba);
12031 /* Indicate initialization to MSI mode */
12032 phba->intr_type = MSI;
12037 /* Fallback to INTx if both MSI-X/MSI initalization failed */
12038 if (phba->intr_type == NONE) {
12039 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
12040 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
12042 /* Indicate initialization to INTx mode */
12043 phba->intr_type = INTx;
12051 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
12052 * @phba: pointer to lpfc hba data structure.
12054 * This routine is invoked to disable device interrupt and disassociate the
12055 * driver's interrupt handler(s) from interrupt vector(s) to device with
12056 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
12057 * release the interrupt vector(s) for the message signaled interrupt.
12060 lpfc_sli_disable_intr(struct lpfc_hba *phba)
12064 if (phba->intr_type == MSIX)
12065 nr_irqs = LPFC_MSIX_VECTORS;
12069 for (i = 0; i < nr_irqs; i++)
12070 free_irq(pci_irq_vector(phba->pcidev, i), phba);
12071 pci_free_irq_vectors(phba->pcidev);
12073 /* Reset interrupt management states */
12074 phba->intr_type = NONE;
12075 phba->sli.slistat.sli_intr = 0;
12079 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue
12080 * @phba: pointer to lpfc hba data structure.
12081 * @id: EQ vector index or Hardware Queue index
12082 * @match: LPFC_FIND_BY_EQ = match by EQ
12083 * LPFC_FIND_BY_HDWQ = match by Hardware Queue
12084 * Return the CPU that matches the selection criteria
12087 lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
12089 struct lpfc_vector_map_info *cpup;
12092 /* Loop through all CPUs */
12093 for_each_present_cpu(cpu) {
12094 cpup = &phba->sli4_hba.cpu_map[cpu];
12096 /* If we are matching by EQ, there may be multiple CPUs using
12097 * using the same vector, so select the one with
12098 * LPFC_CPU_FIRST_IRQ set.
12100 if ((match == LPFC_FIND_BY_EQ) &&
12101 (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
12105 /* If matching by HDWQ, select the first CPU that matches */
12106 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
12114 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded
12115 * @phba: pointer to lpfc hba data structure.
12116 * @cpu: CPU map index
12117 * @phys_id: CPU package physical id
12118 * @core_id: CPU core id
12121 lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
12122 uint16_t phys_id, uint16_t core_id)
12124 struct lpfc_vector_map_info *cpup;
12127 for_each_present_cpu(idx) {
12128 cpup = &phba->sli4_hba.cpu_map[idx];
12129 /* Does the cpup match the one we are looking for */
12130 if ((cpup->phys_id == phys_id) &&
12131 (cpup->core_id == core_id) &&
12140 * lpfc_assign_eq_map_info - Assigns eq for vector_map structure
12141 * @phba: pointer to lpfc hba data structure.
12142 * @eqidx: index for eq and irq vector
12143 * @flag: flags to set for vector_map structure
12144 * @cpu: cpu used to index vector_map structure
12146 * The routine assigns eq info into vector_map structure
12149 lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag,
12152 struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
12153 struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx);
12156 cpup->flag |= flag;
12158 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12159 "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n",
12160 cpu, eqhdl->irq, cpup->eq, cpup->flag);
12164 * lpfc_cpu_map_array_init - Initialize cpu_map structure
12165 * @phba: pointer to lpfc hba data structure.
12167 * The routine initializes the cpu_map array structure
12170 lpfc_cpu_map_array_init(struct lpfc_hba *phba)
12172 struct lpfc_vector_map_info *cpup;
12173 struct lpfc_eq_intr_info *eqi;
12176 for_each_possible_cpu(cpu) {
12177 cpup = &phba->sli4_hba.cpu_map[cpu];
12178 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
12179 cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
12180 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
12181 cpup->eq = LPFC_VECTOR_MAP_EMPTY;
12183 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
12184 INIT_LIST_HEAD(&eqi->list);
12190 * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure
12191 * @phba: pointer to lpfc hba data structure.
12193 * The routine initializes the hba_eq_hdl array structure
12196 lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
12198 struct lpfc_hba_eq_hdl *eqhdl;
12201 for (i = 0; i < phba->cfg_irq_chann; i++) {
12202 eqhdl = lpfc_get_eq_hdl(i);
12203 eqhdl->irq = LPFC_VECTOR_MAP_EMPTY;
12204 eqhdl->phba = phba;
12209 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
12210 * @phba: pointer to lpfc hba data structure.
12211 * @vectors: number of msix vectors allocated.
12213 * The routine will figure out the CPU affinity assignment for every
12214 * MSI-X vector allocated for the HBA.
12215 * In addition, the CPU to IO channel mapping will be calculated
12216 * and the phba->sli4_hba.cpu_map array will reflect this.
12219 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
12221 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
12222 int max_phys_id, min_phys_id;
12223 int max_core_id, min_core_id;
12224 struct lpfc_vector_map_info *cpup;
12225 struct lpfc_vector_map_info *new_cpup;
12227 struct cpuinfo_x86 *cpuinfo;
12229 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12230 struct lpfc_hdwq_stat *c_stat;
12234 min_phys_id = LPFC_VECTOR_MAP_EMPTY;
12236 min_core_id = LPFC_VECTOR_MAP_EMPTY;
12238 /* Update CPU map with physical id and core id of each CPU */
12239 for_each_present_cpu(cpu) {
12240 cpup = &phba->sli4_hba.cpu_map[cpu];
12242 cpuinfo = &cpu_data(cpu);
12243 cpup->phys_id = cpuinfo->phys_proc_id;
12244 cpup->core_id = cpuinfo->cpu_core_id;
12245 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
12246 cpup->flag |= LPFC_CPU_MAP_HYPER;
12248 /* No distinction between CPUs for other platforms */
12250 cpup->core_id = cpu;
12253 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12254 "3328 CPU %d physid %d coreid %d flag x%x\n",
12255 cpu, cpup->phys_id, cpup->core_id, cpup->flag);
12257 if (cpup->phys_id > max_phys_id)
12258 max_phys_id = cpup->phys_id;
12259 if (cpup->phys_id < min_phys_id)
12260 min_phys_id = cpup->phys_id;
12262 if (cpup->core_id > max_core_id)
12263 max_core_id = cpup->core_id;
12264 if (cpup->core_id < min_core_id)
12265 min_core_id = cpup->core_id;
12268 /* After looking at each irq vector assigned to this pcidev, its
12269 * possible to see that not ALL CPUs have been accounted for.
12270 * Next we will set any unassigned (unaffinitized) cpu map
12271 * entries to a IRQ on the same phys_id.
12273 first_cpu = cpumask_first(cpu_present_mask);
12274 start_cpu = first_cpu;
12276 for_each_present_cpu(cpu) {
12277 cpup = &phba->sli4_hba.cpu_map[cpu];
12279 /* Is this CPU entry unassigned */
12280 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
12281 /* Mark CPU as IRQ not assigned by the kernel */
12282 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
12284 /* If so, find a new_cpup thats on the the SAME
12285 * phys_id as cpup. start_cpu will start where we
12286 * left off so all unassigned entries don't get assgined
12287 * the IRQ of the first entry.
12289 new_cpu = start_cpu;
12290 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12291 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12292 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
12293 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
12294 (new_cpup->phys_id == cpup->phys_id))
12296 new_cpu = cpumask_next(
12297 new_cpu, cpu_present_mask);
12298 if (new_cpu == nr_cpumask_bits)
12299 new_cpu = first_cpu;
12301 /* At this point, we leave the CPU as unassigned */
12304 /* We found a matching phys_id, so copy the IRQ info */
12305 cpup->eq = new_cpup->eq;
12307 /* Bump start_cpu to the next slot to minmize the
12308 * chance of having multiple unassigned CPU entries
12309 * selecting the same IRQ.
12311 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12312 if (start_cpu == nr_cpumask_bits)
12313 start_cpu = first_cpu;
12315 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12316 "3337 Set Affinity: CPU %d "
12317 "eq %d from peer cpu %d same "
12319 cpu, cpup->eq, new_cpu,
12324 /* Set any unassigned cpu map entries to a IRQ on any phys_id */
12325 start_cpu = first_cpu;
12327 for_each_present_cpu(cpu) {
12328 cpup = &phba->sli4_hba.cpu_map[cpu];
12330 /* Is this entry unassigned */
12331 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
12332 /* Mark it as IRQ not assigned by the kernel */
12333 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
12335 /* If so, find a new_cpup thats on ANY phys_id
12336 * as the cpup. start_cpu will start where we
12337 * left off so all unassigned entries don't get
12338 * assigned the IRQ of the first entry.
12340 new_cpu = start_cpu;
12341 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12342 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12343 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
12344 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
12346 new_cpu = cpumask_next(
12347 new_cpu, cpu_present_mask);
12348 if (new_cpu == nr_cpumask_bits)
12349 new_cpu = first_cpu;
12351 /* We should never leave an entry unassigned */
12352 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12353 "3339 Set Affinity: CPU %d "
12354 "eq %d UNASSIGNED\n",
12355 cpup->hdwq, cpup->eq);
12358 /* We found an available entry, copy the IRQ info */
12359 cpup->eq = new_cpup->eq;
12361 /* Bump start_cpu to the next slot to minmize the
12362 * chance of having multiple unassigned CPU entries
12363 * selecting the same IRQ.
12365 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12366 if (start_cpu == nr_cpumask_bits)
12367 start_cpu = first_cpu;
12369 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12370 "3338 Set Affinity: CPU %d "
12371 "eq %d from peer cpu %d (%d/%d)\n",
12372 cpu, cpup->eq, new_cpu,
12373 new_cpup->phys_id, new_cpup->core_id);
12377 /* Assign hdwq indices that are unique across all cpus in the map
12378 * that are also FIRST_CPUs.
12381 for_each_present_cpu(cpu) {
12382 cpup = &phba->sli4_hba.cpu_map[cpu];
12384 /* Only FIRST IRQs get a hdwq index assignment. */
12385 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
12388 /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */
12391 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12392 "3333 Set Affinity: CPU %d (phys %d core %d): "
12393 "hdwq %d eq %d flg x%x\n",
12394 cpu, cpup->phys_id, cpup->core_id,
12395 cpup->hdwq, cpup->eq, cpup->flag);
12397 /* Associate a hdwq with each cpu_map entry
12398 * This will be 1 to 1 - hdwq to cpu, unless there are less
12399 * hardware queues then CPUs. For that case we will just round-robin
12400 * the available hardware queues as they get assigned to CPUs.
12401 * The next_idx is the idx from the FIRST_CPU loop above to account
12402 * for irq_chann < hdwq. The idx is used for round-robin assignments
12403 * and needs to start at 0.
12408 for_each_present_cpu(cpu) {
12409 cpup = &phba->sli4_hba.cpu_map[cpu];
12411 /* FIRST cpus are already mapped. */
12412 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
12415 /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq
12416 * of the unassigned cpus to the next idx so that all
12417 * hdw queues are fully utilized.
12419 if (next_idx < phba->cfg_hdw_queue) {
12420 cpup->hdwq = next_idx;
12425 /* Not a First CPU and all hdw_queues are used. Reuse a
12426 * Hardware Queue for another CPU, so be smart about it
12427 * and pick one that has its IRQ/EQ mapped to the same phys_id
12428 * (CPU package) and core_id.
12430 new_cpu = start_cpu;
12431 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12432 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12433 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
12434 new_cpup->phys_id == cpup->phys_id &&
12435 new_cpup->core_id == cpup->core_id) {
12438 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
12439 if (new_cpu == nr_cpumask_bits)
12440 new_cpu = first_cpu;
12443 /* If we can't match both phys_id and core_id,
12444 * settle for just a phys_id match.
12446 new_cpu = start_cpu;
12447 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12448 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12449 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
12450 new_cpup->phys_id == cpup->phys_id)
12453 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
12454 if (new_cpu == nr_cpumask_bits)
12455 new_cpu = first_cpu;
12458 /* Otherwise just round robin on cfg_hdw_queue */
12459 cpup->hdwq = idx % phba->cfg_hdw_queue;
12463 /* We found an available entry, copy the IRQ info */
12464 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12465 if (start_cpu == nr_cpumask_bits)
12466 start_cpu = first_cpu;
12467 cpup->hdwq = new_cpup->hdwq;
12469 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12470 "3335 Set Affinity: CPU %d (phys %d core %d): "
12471 "hdwq %d eq %d flg x%x\n",
12472 cpu, cpup->phys_id, cpup->core_id,
12473 cpup->hdwq, cpup->eq, cpup->flag);
12477 * Initialize the cpu_map slots for not-present cpus in case
12478 * a cpu is hot-added. Perform a simple hdwq round robin assignment.
12481 for_each_possible_cpu(cpu) {
12482 cpup = &phba->sli4_hba.cpu_map[cpu];
12483 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12484 c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
12485 c_stat->hdwq_no = cpup->hdwq;
12487 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
12490 cpup->hdwq = idx++ % phba->cfg_hdw_queue;
12491 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12492 c_stat->hdwq_no = cpup->hdwq;
12494 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12495 "3340 Set Affinity: not present "
12496 "CPU %d hdwq %d\n",
12500 /* The cpu_map array will be used later during initialization
12501 * when EQ / CQ / WQs are allocated and configured.
12507 * lpfc_cpuhp_get_eq
12509 * @phba: pointer to lpfc hba data structure.
12510 * @cpu: cpu going offline
12511 * @eqlist: eq list to append to
12514 lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
12515 struct list_head *eqlist)
12517 const struct cpumask *maskp;
12518 struct lpfc_queue *eq;
12519 struct cpumask *tmp;
12522 tmp = kzalloc(cpumask_size(), GFP_KERNEL);
12526 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12527 maskp = pci_irq_get_affinity(phba->pcidev, idx);
12531 * if irq is not affinitized to the cpu going
12532 * then we don't need to poll the eq attached
12535 if (!cpumask_and(tmp, maskp, cpumask_of(cpu)))
12537 /* get the cpus that are online and are affini-
12538 * tized to this irq vector. If the count is
12539 * more than 1 then cpuhp is not going to shut-
12540 * down this vector. Since this cpu has not
12541 * gone offline yet, we need >1.
12543 cpumask_and(tmp, maskp, cpu_online_mask);
12544 if (cpumask_weight(tmp) > 1)
12547 /* Now that we have an irq to shutdown, get the eq
12548 * mapped to this irq. Note: multiple hdwq's in
12549 * the software can share an eq, but eventually
12550 * only eq will be mapped to this vector
12552 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
12553 list_add(&eq->_poll_list, eqlist);
12559 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
12561 if (phba->sli_rev != LPFC_SLI_REV4)
12564 cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
12567 * unregistering the instance doesn't stop the polling
12568 * timer. Wait for the poll timer to retire.
12571 del_timer_sync(&phba->cpuhp_poll_timer);
12574 static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
12576 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
12579 __lpfc_cpuhp_remove(phba);
12582 static void lpfc_cpuhp_add(struct lpfc_hba *phba)
12584 if (phba->sli_rev != LPFC_SLI_REV4)
12589 if (!list_empty(&phba->poll_list))
12590 mod_timer(&phba->cpuhp_poll_timer,
12591 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
12595 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
12599 static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
12601 if (phba->pport->load_flag & FC_UNLOADING) {
12606 if (phba->sli_rev != LPFC_SLI_REV4) {
12611 /* proceed with the hotplug */
12616 * lpfc_irq_set_aff - set IRQ affinity
12617 * @eqhdl: EQ handle
12618 * @cpu: cpu to set affinity
12622 lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
12624 cpumask_clear(&eqhdl->aff_mask);
12625 cpumask_set_cpu(cpu, &eqhdl->aff_mask);
12626 irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
12627 irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
12631 * lpfc_irq_clear_aff - clear IRQ affinity
12632 * @eqhdl: EQ handle
12636 lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
12638 cpumask_clear(&eqhdl->aff_mask);
12639 irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
12643 * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event
12644 * @phba: pointer to HBA context object.
12645 * @cpu: cpu going offline/online
12646 * @offline: true, cpu is going offline. false, cpu is coming online.
12648 * If cpu is going offline, we'll try our best effort to find the next
12649 * online cpu on the phba's original_mask and migrate all offlining IRQ
12652 * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu.
12654 * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on
12655 * PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
12659 lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
12661 struct lpfc_vector_map_info *cpup;
12662 struct cpumask *aff_mask;
12663 unsigned int cpu_select, cpu_next, idx;
12664 const struct cpumask *orig_mask;
12666 if (phba->irq_chann_mode == NORMAL_MODE)
12669 orig_mask = &phba->sli4_hba.irq_aff_mask;
12671 if (!cpumask_test_cpu(cpu, orig_mask))
12674 cpup = &phba->sli4_hba.cpu_map[cpu];
12676 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
12680 /* Find next online CPU on original mask */
12681 cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
12682 cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
12684 /* Found a valid CPU */
12685 if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
12686 /* Go through each eqhdl and ensure offlining
12687 * cpu aff_mask is migrated
12689 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12690 aff_mask = lpfc_get_aff_mask(idx);
12692 /* Migrate affinity */
12693 if (cpumask_test_cpu(cpu, aff_mask))
12694 lpfc_irq_set_aff(lpfc_get_eq_hdl(idx),
12698 /* Rely on irqbalance if no online CPUs left on NUMA */
12699 for (idx = 0; idx < phba->cfg_irq_chann; idx++)
12700 lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx));
12703 /* Migrate affinity back to this CPU */
12704 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu);
12708 static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
12710 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
12711 struct lpfc_queue *eq, *next;
12716 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
12720 if (__lpfc_cpuhp_checks(phba, &retval))
12723 lpfc_irq_rebalance(phba, cpu, true);
12725 retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
12729 /* start polling on these eq's */
12730 list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
12731 list_del_init(&eq->_poll_list);
12732 lpfc_sli4_start_polling(eq);
12738 static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
12740 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
12741 struct lpfc_queue *eq, *next;
12746 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
12750 if (__lpfc_cpuhp_checks(phba, &retval))
12753 lpfc_irq_rebalance(phba, cpu, false);
12755 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
12756 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
12758 lpfc_sli4_stop_polling(eq);
12765 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
12766 * @phba: pointer to lpfc hba data structure.
12768 * This routine is invoked to enable the MSI-X interrupt vectors to device
12769 * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them
12770 * to cpus on the system.
12772 * When cfg_irq_numa is enabled, the adapter will only allocate vectors for
12773 * the number of cpus on the same numa node as this adapter. The vectors are
12774 * allocated without requesting OS affinity mapping. A vector will be
12775 * allocated and assigned to each online and offline cpu. If the cpu is
12776 * online, then affinity will be set to that cpu. If the cpu is offline, then
12777 * affinity will be set to the nearest peer cpu within the numa node that is
12778 * online. If there are no online cpus within the numa node, affinity is not
12779 * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping
12780 * is consistent with the way cpu online/offline is handled when cfg_irq_numa is
12783 * If numa mode is not enabled and there is more than 1 vector allocated, then
12784 * the driver relies on the managed irq interface where the OS assigns vector to
12785 * cpu affinity. The driver will then use that affinity mapping to setup its
12786 * cpu mapping table.
12790 * other values - error
12793 lpfc_sli4_enable_msix(struct lpfc_hba *phba)
12795 int vectors, rc, index;
12797 const struct cpumask *aff_mask = NULL;
12798 unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
12799 struct lpfc_vector_map_info *cpup;
12800 struct lpfc_hba_eq_hdl *eqhdl;
12801 const struct cpumask *maskp;
12802 unsigned int flags = PCI_IRQ_MSIX;
12804 /* Set up MSI-X multi-message vectors */
12805 vectors = phba->cfg_irq_chann;
12807 if (phba->irq_chann_mode != NORMAL_MODE)
12808 aff_mask = &phba->sli4_hba.irq_aff_mask;
12811 cpu_cnt = cpumask_weight(aff_mask);
12812 vectors = min(phba->cfg_irq_chann, cpu_cnt);
12814 /* cpu: iterates over aff_mask including offline or online
12815 * cpu_select: iterates over online aff_mask to set affinity
12817 cpu = cpumask_first(aff_mask);
12818 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
12820 flags |= PCI_IRQ_AFFINITY;
12823 rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags);
12825 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12826 "0484 PCI enable MSI-X failed (%d)\n", rc);
12831 /* Assign MSI-X vectors to interrupt handlers */
12832 for (index = 0; index < vectors; index++) {
12833 eqhdl = lpfc_get_eq_hdl(index);
12834 name = eqhdl->handler_name;
12835 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
12836 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
12837 LPFC_DRIVER_HANDLER_NAME"%d", index);
12839 eqhdl->idx = index;
12840 rc = request_irq(pci_irq_vector(phba->pcidev, index),
12841 &lpfc_sli4_hba_intr_handler, 0,
12844 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12845 "0486 MSI-X fast-path (%d) "
12846 "request_irq failed (%d)\n", index, rc);
12850 eqhdl->irq = pci_irq_vector(phba->pcidev, index);
12853 /* If found a neighboring online cpu, set affinity */
12854 if (cpu_select < nr_cpu_ids)
12855 lpfc_irq_set_aff(eqhdl, cpu_select);
12857 /* Assign EQ to cpu_map */
12858 lpfc_assign_eq_map_info(phba, index,
12859 LPFC_CPU_FIRST_IRQ,
12862 /* Iterate to next offline or online cpu in aff_mask */
12863 cpu = cpumask_next(cpu, aff_mask);
12865 /* Find next online cpu in aff_mask to set affinity */
12866 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
12867 } else if (vectors == 1) {
12868 cpu = cpumask_first(cpu_present_mask);
12869 lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
12872 maskp = pci_irq_get_affinity(phba->pcidev, index);
12874 /* Loop through all CPUs associated with vector index */
12875 for_each_cpu_and(cpu, maskp, cpu_present_mask) {
12876 cpup = &phba->sli4_hba.cpu_map[cpu];
12878 /* If this is the first CPU thats assigned to
12879 * this vector, set LPFC_CPU_FIRST_IRQ.
12881 * With certain platforms its possible that irq
12882 * vectors are affinitized to all the cpu's.
12883 * This can result in each cpu_map.eq to be set
12884 * to the last vector, resulting in overwrite
12885 * of all the previous cpu_map.eq. Ensure that
12886 * each vector receives a place in cpu_map.
12887 * Later call to lpfc_cpu_affinity_check will
12888 * ensure we are nicely balanced out.
12890 if (cpup->eq != LPFC_VECTOR_MAP_EMPTY)
12892 lpfc_assign_eq_map_info(phba, index,
12893 LPFC_CPU_FIRST_IRQ,
12900 if (vectors != phba->cfg_irq_chann) {
12901 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12902 "3238 Reducing IO channels to match number of "
12903 "MSI-X vectors, requested %d got %d\n",
12904 phba->cfg_irq_chann, vectors);
12905 if (phba->cfg_irq_chann > vectors)
12906 phba->cfg_irq_chann = vectors;
12912 /* free the irq already requested */
12913 for (--index; index >= 0; index--) {
12914 eqhdl = lpfc_get_eq_hdl(index);
12915 lpfc_irq_clear_aff(eqhdl);
12916 irq_set_affinity_hint(eqhdl->irq, NULL);
12917 free_irq(eqhdl->irq, eqhdl);
12920 /* Unconfigure MSI-X capability structure */
12921 pci_free_irq_vectors(phba->pcidev);
12928 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
12929 * @phba: pointer to lpfc hba data structure.
12931 * This routine is invoked to enable the MSI interrupt mode to device with
12932 * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is
12933 * called to enable the MSI vector. The device driver is responsible for
12934 * calling the request_irq() to register MSI vector with a interrupt the
12935 * handler, which is done in this function.
12939 * other values - error
12942 lpfc_sli4_enable_msi(struct lpfc_hba *phba)
12946 struct lpfc_hba_eq_hdl *eqhdl;
12948 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
12949 PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
12951 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12952 "0487 PCI enable MSI mode success.\n");
12954 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12955 "0488 PCI enable MSI mode failed (%d)\n", rc);
12956 return rc ? rc : -1;
12959 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
12960 0, LPFC_DRIVER_NAME, phba);
12962 pci_free_irq_vectors(phba->pcidev);
12963 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12964 "0490 MSI request_irq failed (%d)\n", rc);
12968 eqhdl = lpfc_get_eq_hdl(0);
12969 eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
12971 cpu = cpumask_first(cpu_present_mask);
12972 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
12974 for (index = 0; index < phba->cfg_irq_chann; index++) {
12975 eqhdl = lpfc_get_eq_hdl(index);
12976 eqhdl->idx = index;
12983 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
12984 * @phba: pointer to lpfc hba data structure.
12985 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
12987 * This routine is invoked to enable device interrupt and associate driver's
12988 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
12989 * interface spec. Depends on the interrupt mode configured to the driver,
12990 * the driver will try to fallback from the configured interrupt mode to an
12991 * interrupt mode which is supported by the platform, kernel, and device in
12993 * MSI-X -> MSI -> IRQ.
12997 * other values - error
13000 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
13002 uint32_t intr_mode = LPFC_INTR_ERROR;
13005 if (cfg_mode == 2) {
13006 /* Preparation before conf_msi mbox cmd */
13009 /* Now, try to enable MSI-X interrupt mode */
13010 retval = lpfc_sli4_enable_msix(phba);
13012 /* Indicate initialization to MSI-X mode */
13013 phba->intr_type = MSIX;
13019 /* Fallback to MSI if MSI-X initialization failed */
13020 if (cfg_mode >= 1 && phba->intr_type == NONE) {
13021 retval = lpfc_sli4_enable_msi(phba);
13023 /* Indicate initialization to MSI mode */
13024 phba->intr_type = MSI;
13029 /* Fallback to INTx if both MSI-X/MSI initalization failed */
13030 if (phba->intr_type == NONE) {
13031 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
13032 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
13034 struct lpfc_hba_eq_hdl *eqhdl;
13037 /* Indicate initialization to INTx mode */
13038 phba->intr_type = INTx;
13041 eqhdl = lpfc_get_eq_hdl(0);
13042 eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
13044 cpu = cpumask_first(cpu_present_mask);
13045 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
13047 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
13048 eqhdl = lpfc_get_eq_hdl(idx);
13057 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
13058 * @phba: pointer to lpfc hba data structure.
13060 * This routine is invoked to disable device interrupt and disassociate
13061 * the driver's interrupt handler(s) from interrupt vector(s) to device
13062 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
13063 * will release the interrupt vector(s) for the message signaled interrupt.
13066 lpfc_sli4_disable_intr(struct lpfc_hba *phba)
13068 /* Disable the currently initialized interrupt mode */
13069 if (phba->intr_type == MSIX) {
13071 struct lpfc_hba_eq_hdl *eqhdl;
13073 /* Free up MSI-X multi-message vectors */
13074 for (index = 0; index < phba->cfg_irq_chann; index++) {
13075 eqhdl = lpfc_get_eq_hdl(index);
13076 lpfc_irq_clear_aff(eqhdl);
13077 irq_set_affinity_hint(eqhdl->irq, NULL);
13078 free_irq(eqhdl->irq, eqhdl);
13081 free_irq(phba->pcidev->irq, phba);
13084 pci_free_irq_vectors(phba->pcidev);
13086 /* Reset interrupt management states */
13087 phba->intr_type = NONE;
13088 phba->sli.slistat.sli_intr = 0;
13092 * lpfc_unset_hba - Unset SLI3 hba device initialization
13093 * @phba: pointer to lpfc hba data structure.
13095 * This routine is invoked to unset the HBA device initialization steps to
13096 * a device with SLI-3 interface spec.
13099 lpfc_unset_hba(struct lpfc_hba *phba)
13101 struct lpfc_vport *vport = phba->pport;
13102 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
13104 spin_lock_irq(shost->host_lock);
13105 vport->load_flag |= FC_UNLOADING;
13106 spin_unlock_irq(shost->host_lock);
13108 kfree(phba->vpi_bmask);
13109 kfree(phba->vpi_ids);
13111 lpfc_stop_hba_timers(phba);
13113 phba->pport->work_port_events = 0;
13115 lpfc_sli_hba_down(phba);
13117 lpfc_sli_brdrestart(phba);
13119 lpfc_sli_disable_intr(phba);
13125 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
13126 * @phba: Pointer to HBA context object.
13128 * This function is called in the SLI4 code path to wait for completion
13129 * of device's XRIs exchange busy. It will check the XRI exchange busy
13130 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
13131 * that, it will check the XRI exchange busy on outstanding FCP and ELS
13132 * I/Os every 30 seconds, log error message, and wait forever. Only when
13133 * all XRI exchange busy complete, the driver unload shall proceed with
13134 * invoking the function reset ioctl mailbox command to the CNA and the
13135 * the rest of the driver unload resource release.
13138 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
13140 struct lpfc_sli4_hdw_queue *qp;
13143 int io_xri_cmpl = 1;
13144 int nvmet_xri_cmpl = 1;
13145 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13147 /* Driver just aborted IOs during the hba_unset process. Pause
13148 * here to give the HBA time to complete the IO and get entries
13149 * into the abts lists.
13151 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
13153 /* Wait for NVME pending IO to flush back to transport. */
13154 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13155 lpfc_nvme_wait_for_io_drain(phba);
13158 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13159 qp = &phba->sli4_hba.hdwq[idx];
13160 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list);
13161 if (!io_xri_cmpl) /* if list is NOT empty */
13167 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13169 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13172 while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
13173 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
13174 if (!nvmet_xri_cmpl)
13175 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13176 "6424 NVMET XRI exchange busy "
13177 "wait time: %d seconds.\n",
13180 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13181 "6100 IO XRI exchange busy "
13182 "wait time: %d seconds.\n",
13185 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13186 "2878 ELS XRI exchange busy "
13187 "wait time: %d seconds.\n",
13189 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
13190 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
13192 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
13193 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
13197 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13198 qp = &phba->sli4_hba.hdwq[idx];
13199 io_xri_cmpl = list_empty(
13200 &qp->lpfc_abts_io_buf_list);
13201 if (!io_xri_cmpl) /* if list is NOT empty */
13207 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13208 nvmet_xri_cmpl = list_empty(
13209 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13212 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13218 * lpfc_sli4_hba_unset - Unset the fcoe hba
13219 * @phba: Pointer to HBA context object.
13221 * This function is called in the SLI4 code path to reset the HBA's FCoE
13222 * function. The caller is not required to hold any lock. This routine
13223 * issues PCI function reset mailbox command to reset the FCoE function.
13224 * At the end of the function, it calls lpfc_hba_down_post function to
13225 * free any pending commands.
13228 lpfc_sli4_hba_unset(struct lpfc_hba *phba)
13231 LPFC_MBOXQ_t *mboxq;
13232 struct pci_dev *pdev = phba->pcidev;
13234 lpfc_stop_hba_timers(phba);
13235 hrtimer_cancel(&phba->cmf_timer);
13238 phba->sli4_hba.intr_enable = 0;
13241 * Gracefully wait out the potential current outstanding asynchronous
13245 /* First, block any pending async mailbox command from posted */
13246 spin_lock_irq(&phba->hbalock);
13247 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
13248 spin_unlock_irq(&phba->hbalock);
13249 /* Now, trying to wait it out if we can */
13250 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13252 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
13255 /* Forcefully release the outstanding mailbox command if timed out */
13256 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13257 spin_lock_irq(&phba->hbalock);
13258 mboxq = phba->sli.mbox_active;
13259 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
13260 __lpfc_mbox_cmpl_put(phba, mboxq);
13261 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13262 phba->sli.mbox_active = NULL;
13263 spin_unlock_irq(&phba->hbalock);
13266 /* Abort all iocbs associated with the hba */
13267 lpfc_sli_hba_iocb_abort(phba);
13269 /* Wait for completion of device XRI exchange busy */
13270 lpfc_sli4_xri_exchange_busy_wait(phba);
13272 /* per-phba callback de-registration for hotplug event */
13274 lpfc_cpuhp_remove(phba);
13276 /* Disable PCI subsystem interrupt */
13277 lpfc_sli4_disable_intr(phba);
13279 /* Disable SR-IOV if enabled */
13280 if (phba->cfg_sriov_nr_virtfn)
13281 pci_disable_sriov(pdev);
13283 /* Stop kthread signal shall trigger work_done one more time */
13284 kthread_stop(phba->worker_thread);
13286 /* Disable FW logging to host memory */
13287 lpfc_ras_stop_fwlog(phba);
13289 /* Unset the queues shared with the hardware then release all
13290 * allocated resources.
13292 lpfc_sli4_queue_unset(phba);
13293 lpfc_sli4_queue_destroy(phba);
13295 /* Reset SLI4 HBA FCoE function */
13296 lpfc_pci_function_reset(phba);
13298 /* Free RAS DMA memory */
13299 if (phba->ras_fwlog.ras_enabled)
13300 lpfc_sli4_ras_dma_free(phba);
13302 /* Stop the SLI4 device port */
13304 phba->pport->work_port_events = 0;
13308 lpfc_cgn_crc32(uint32_t crc, u8 byte)
13313 for (bit = 0; bit < 8; bit++) {
13314 msb = (crc >> 31) & 1;
13317 if (msb ^ (byte & 1)) {
13318 crc ^= LPFC_CGN_CRC32_MAGIC_NUMBER;
13327 lpfc_cgn_reverse_bits(uint32_t wd)
13329 uint32_t result = 0;
13332 for (i = 0; i < 32; i++) {
13334 result |= (1 & (wd >> i));
13340 * The routine corresponds with the algorithm the HBA firmware
13341 * uses to validate the data integrity.
13344 lpfc_cgn_calc_crc32(void *ptr, uint32_t byteLen, uint32_t crc)
13348 uint8_t *data = (uint8_t *)ptr;
13350 for (i = 0; i < byteLen; ++i)
13351 crc = lpfc_cgn_crc32(crc, data[i]);
13353 result = ~lpfc_cgn_reverse_bits(crc);
13358 lpfc_init_congestion_buf(struct lpfc_hba *phba)
13360 struct lpfc_cgn_info *cp;
13361 struct timespec64 cmpl_time;
13366 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
13367 "6235 INIT Congestion Buffer %p\n", phba->cgn_i);
13371 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13373 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
13374 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
13375 atomic_set(&phba->cgn_sync_alarm_cnt, 0);
13376 atomic_set(&phba->cgn_sync_warn_cnt, 0);
13378 atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
13379 atomic64_set(&phba->cgn_acqe_stat.warn, 0);
13380 atomic_set(&phba->cgn_driver_evt_cnt, 0);
13381 atomic_set(&phba->cgn_latency_evt_cnt, 0);
13382 atomic64_set(&phba->cgn_latency_evt, 0);
13383 phba->cgn_evt_minute = 0;
13384 phba->hba_flag &= ~HBA_CGN_DAY_WRAP;
13386 memset(cp, 0xff, LPFC_CGN_DATA_SIZE);
13387 cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ);
13388 cp->cgn_info_version = LPFC_CGN_INFO_V3;
13390 /* cgn parameters */
13391 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
13392 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
13393 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
13394 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
13396 ktime_get_real_ts64(&cmpl_time);
13397 time64_to_tm(cmpl_time.tv_sec, 0, &broken);
13399 cp->cgn_info_month = broken.tm_mon + 1;
13400 cp->cgn_info_day = broken.tm_mday;
13401 cp->cgn_info_year = broken.tm_year - 100; /* relative to 2000 */
13402 cp->cgn_info_hour = broken.tm_hour;
13403 cp->cgn_info_minute = broken.tm_min;
13404 cp->cgn_info_second = broken.tm_sec;
13406 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
13407 "2643 CGNInfo Init: Start Time "
13408 "%d/%d/%d %d:%d:%d\n",
13409 cp->cgn_info_day, cp->cgn_info_month,
13410 cp->cgn_info_year, cp->cgn_info_hour,
13411 cp->cgn_info_minute, cp->cgn_info_second);
13413 /* Fill in default LUN qdepth */
13415 size = (uint16_t)(phba->pport->cfg_lun_queue_depth);
13416 cp->cgn_lunq = cpu_to_le16(size);
13419 /* last used Index initialized to 0xff already */
13421 cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
13422 cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
13423 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
13424 cp->cgn_info_crc = cpu_to_le32(crc);
13426 phba->cgn_evt_timestamp = jiffies +
13427 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
13431 lpfc_init_congestion_stat(struct lpfc_hba *phba)
13433 struct lpfc_cgn_info *cp;
13434 struct timespec64 cmpl_time;
13438 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
13439 "6236 INIT Congestion Stat %p\n", phba->cgn_i);
13444 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13445 memset(&cp->cgn_stat_npm, 0, LPFC_CGN_STAT_SIZE);
13447 ktime_get_real_ts64(&cmpl_time);
13448 time64_to_tm(cmpl_time.tv_sec, 0, &broken);
13450 cp->cgn_stat_month = broken.tm_mon + 1;
13451 cp->cgn_stat_day = broken.tm_mday;
13452 cp->cgn_stat_year = broken.tm_year - 100; /* relative to 2000 */
13453 cp->cgn_stat_hour = broken.tm_hour;
13454 cp->cgn_stat_minute = broken.tm_min;
13456 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
13457 "2647 CGNstat Init: Start Time "
13458 "%d/%d/%d %d:%d\n",
13459 cp->cgn_stat_day, cp->cgn_stat_month,
13460 cp->cgn_stat_year, cp->cgn_stat_hour,
13461 cp->cgn_stat_minute);
13463 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
13464 cp->cgn_info_crc = cpu_to_le32(crc);
13468 * __lpfc_reg_congestion_buf - register congestion info buffer with HBA
13469 * @phba: Pointer to hba context object.
13470 * @reg: flag to determine register or unregister.
13473 __lpfc_reg_congestion_buf(struct lpfc_hba *phba, int reg)
13475 struct lpfc_mbx_reg_congestion_buf *reg_congestion_buf;
13476 union lpfc_sli4_cfg_shdr *shdr;
13477 uint32_t shdr_status, shdr_add_status;
13478 LPFC_MBOXQ_t *mboxq;
13484 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13486 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
13487 "2641 REG_CONGESTION_BUF mbox allocation fail: "
13488 "HBA state x%x reg %d\n",
13489 phba->pport->port_state, reg);
13493 length = (sizeof(struct lpfc_mbx_reg_congestion_buf) -
13494 sizeof(struct lpfc_sli4_cfg_mhdr));
13495 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
13496 LPFC_MBOX_OPCODE_REG_CONGESTION_BUF, length,
13497 LPFC_SLI4_MBX_EMBED);
13498 reg_congestion_buf = &mboxq->u.mqe.un.reg_congestion_buf;
13499 bf_set(lpfc_mbx_reg_cgn_buf_type, reg_congestion_buf, 1);
13501 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 1);
13503 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 0);
13504 reg_congestion_buf->length = sizeof(struct lpfc_cgn_info);
13505 reg_congestion_buf->addr_lo =
13506 putPaddrLow(phba->cgn_i->phys);
13507 reg_congestion_buf->addr_hi =
13508 putPaddrHigh(phba->cgn_i->phys);
13510 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
13511 shdr = (union lpfc_sli4_cfg_shdr *)
13512 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
13513 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13514 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
13516 mempool_free(mboxq, phba->mbox_mem_pool);
13517 if (shdr_status || shdr_add_status || rc) {
13518 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13519 "2642 REG_CONGESTION_BUF mailbox "
13520 "failed with status x%x add_status x%x,"
13521 " mbx status x%x reg %d\n",
13522 shdr_status, shdr_add_status, rc, reg);
13529 lpfc_unreg_congestion_buf(struct lpfc_hba *phba)
13531 lpfc_cmf_stop(phba);
13532 return __lpfc_reg_congestion_buf(phba, 0);
13536 lpfc_reg_congestion_buf(struct lpfc_hba *phba)
13538 return __lpfc_reg_congestion_buf(phba, 1);
13542 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
13543 * @phba: Pointer to HBA context object.
13544 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
13546 * This function is called in the SLI4 code path to read the port's
13547 * sli4 capabilities.
13549 * This function may be be called from any context that can block-wait
13550 * for the completion. The expectation is that this routine is called
13551 * typically from probe_one or from the online routine.
13554 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
13557 struct lpfc_mqe *mqe = &mboxq->u.mqe;
13558 struct lpfc_pc_sli4_params *sli4_params;
13561 bool exp_wqcq_pages = true;
13562 struct lpfc_sli4_parameters *mbx_sli4_parameters;
13565 * By default, the driver assumes the SLI4 port requires RPI
13566 * header postings. The SLI4_PARAM response will correct this
13569 phba->sli4_hba.rpi_hdrs_in_use = 1;
13571 /* Read the port's SLI4 Config Parameters */
13572 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
13573 sizeof(struct lpfc_sli4_cfg_mhdr));
13574 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
13575 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
13576 length, LPFC_SLI4_MBX_EMBED);
13577 if (!phba->sli4_hba.intr_enable)
13578 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
13580 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
13581 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
13585 sli4_params = &phba->sli4_hba.pc_sli4_params;
13586 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
13587 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
13588 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
13589 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
13590 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
13591 mbx_sli4_parameters);
13592 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
13593 mbx_sli4_parameters);
13594 if (bf_get(cfg_phwq, mbx_sli4_parameters))
13595 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
13597 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
13598 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
13599 sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope,
13600 mbx_sli4_parameters);
13601 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
13602 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
13603 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
13604 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
13605 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
13606 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
13607 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
13608 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
13609 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
13610 sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters);
13611 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
13612 mbx_sli4_parameters);
13613 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
13614 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
13615 mbx_sli4_parameters);
13616 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
13617 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
13619 /* Check for Extended Pre-Registered SGL support */
13620 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
13622 /* Check for firmware nvme support */
13623 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
13624 bf_get(cfg_xib, mbx_sli4_parameters));
13627 /* Save this to indicate the Firmware supports NVME */
13628 sli4_params->nvme = 1;
13630 /* Firmware NVME support, check driver FC4 NVME support */
13631 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
13632 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
13633 "6133 Disabling NVME support: "
13634 "FC4 type not supported: x%x\n",
13635 phba->cfg_enable_fc4_type);
13639 /* No firmware NVME support, check driver FC4 NVME support */
13640 sli4_params->nvme = 0;
13641 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13642 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
13643 "6101 Disabling NVME support: Not "
13644 "supported by firmware (%d %d) x%x\n",
13645 bf_get(cfg_nvme, mbx_sli4_parameters),
13646 bf_get(cfg_xib, mbx_sli4_parameters),
13647 phba->cfg_enable_fc4_type);
13649 phba->nvmet_support = 0;
13650 phba->cfg_nvmet_mrq = 0;
13651 phba->cfg_nvme_seg_cnt = 0;
13653 /* If no FC4 type support, move to just SCSI support */
13654 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
13656 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
13660 /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
13661 * accommodate 512K and 1M IOs in a single nvme buf.
13663 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13664 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
13666 /* Enable embedded Payload BDE if support is indicated */
13667 if (bf_get(cfg_pbde, mbx_sli4_parameters))
13668 phba->cfg_enable_pbde = 1;
13670 phba->cfg_enable_pbde = 0;
13673 * To support Suppress Response feature we must satisfy 3 conditions.
13674 * lpfc_suppress_rsp module parameter must be set (default).
13675 * In SLI4-Parameters Descriptor:
13676 * Extended Inline Buffers (XIB) must be supported.
13677 * Suppress Response IU Not Supported (SRIUNS) must NOT be supported
13678 * (double negative).
13680 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
13681 !(bf_get(cfg_nosr, mbx_sli4_parameters)))
13682 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
13684 phba->cfg_suppress_rsp = 0;
13686 if (bf_get(cfg_eqdr, mbx_sli4_parameters))
13687 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
13689 /* Make sure that sge_supp_len can be handled by the driver */
13690 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
13691 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
13694 * Check whether the adapter supports an embedded copy of the
13695 * FCP CMD IU within the WQE for FCP_Ixxx commands. In order
13696 * to use this option, 128-byte WQEs must be used.
13698 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
13699 phba->fcp_embed_io = 1;
13701 phba->fcp_embed_io = 0;
13703 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
13704 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
13705 bf_get(cfg_xib, mbx_sli4_parameters),
13706 phba->cfg_enable_pbde,
13707 phba->fcp_embed_io, sli4_params->nvme,
13708 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
13710 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
13711 LPFC_SLI_INTF_IF_TYPE_2) &&
13712 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
13713 LPFC_SLI_INTF_FAMILY_LNCR_A0))
13714 exp_wqcq_pages = false;
13716 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
13717 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
13719 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
13720 phba->enab_exp_wqcq_pages = 1;
13722 phba->enab_exp_wqcq_pages = 0;
13724 * Check if the SLI port supports MDS Diagnostics
13726 if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
13727 phba->mds_diags_support = 1;
13729 phba->mds_diags_support = 0;
13732 * Check if the SLI port supports NSLER
13734 if (bf_get(cfg_nsler, mbx_sli4_parameters))
13743 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
13744 * @pdev: pointer to PCI device
13745 * @pid: pointer to PCI device identifier
13747 * This routine is to be called to attach a device with SLI-3 interface spec
13748 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
13749 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
13750 * information of the device and driver to see if the driver state that it can
13751 * support this kind of device. If the match is successful, the driver core
13752 * invokes this routine. If this routine determines it can claim the HBA, it
13753 * does all the initialization that it needs to do to handle the HBA properly.
13756 * 0 - driver can claim the device
13757 * negative value - driver can not claim the device
13760 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
13762 struct lpfc_hba *phba;
13763 struct lpfc_vport *vport = NULL;
13764 struct Scsi_Host *shost = NULL;
13766 uint32_t cfg_mode, intr_mode;
13768 /* Allocate memory for HBA structure */
13769 phba = lpfc_hba_alloc(pdev);
13773 /* Perform generic PCI device enabling operation */
13774 error = lpfc_enable_pci_dev(phba);
13776 goto out_free_phba;
13778 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
13779 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
13781 goto out_disable_pci_dev;
13783 /* Set up SLI-3 specific device PCI memory space */
13784 error = lpfc_sli_pci_mem_setup(phba);
13786 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13787 "1402 Failed to set up pci memory space.\n");
13788 goto out_disable_pci_dev;
13791 /* Set up SLI-3 specific device driver resources */
13792 error = lpfc_sli_driver_resource_setup(phba);
13794 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13795 "1404 Failed to set up driver resource.\n");
13796 goto out_unset_pci_mem_s3;
13799 /* Initialize and populate the iocb list per host */
13801 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
13803 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13804 "1405 Failed to initialize iocb list.\n");
13805 goto out_unset_driver_resource_s3;
13808 /* Set up common device driver resources */
13809 error = lpfc_setup_driver_resource_phase2(phba);
13811 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13812 "1406 Failed to set up driver resource.\n");
13813 goto out_free_iocb_list;
13816 /* Get the default values for Model Name and Description */
13817 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
13819 /* Create SCSI host to the physical port */
13820 error = lpfc_create_shost(phba);
13822 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13823 "1407 Failed to create scsi host.\n");
13824 goto out_unset_driver_resource;
13827 /* Configure sysfs attributes */
13828 vport = phba->pport;
13829 error = lpfc_alloc_sysfs_attr(vport);
13831 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13832 "1476 Failed to allocate sysfs attr\n");
13833 goto out_destroy_shost;
13836 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
13837 /* Now, trying to enable interrupt and bring up the device */
13838 cfg_mode = phba->cfg_use_msi;
13840 /* Put device to a known state before enabling interrupt */
13841 lpfc_stop_port(phba);
13842 /* Configure and enable interrupt */
13843 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
13844 if (intr_mode == LPFC_INTR_ERROR) {
13845 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13846 "0431 Failed to enable interrupt.\n");
13848 goto out_free_sysfs_attr;
13850 /* SLI-3 HBA setup */
13851 if (lpfc_sli_hba_setup(phba)) {
13852 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13853 "1477 Failed to set up hba\n");
13855 goto out_remove_device;
13858 /* Wait 50ms for the interrupts of previous mailbox commands */
13860 /* Check active interrupts on message signaled interrupts */
13861 if (intr_mode == 0 ||
13862 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
13863 /* Log the current active interrupt mode */
13864 phba->intr_mode = intr_mode;
13865 lpfc_log_intr_mode(phba, intr_mode);
13868 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13869 "0447 Configure interrupt mode (%d) "
13870 "failed active interrupt test.\n",
13872 /* Disable the current interrupt mode */
13873 lpfc_sli_disable_intr(phba);
13874 /* Try next level of interrupt mode */
13875 cfg_mode = --intr_mode;
13879 /* Perform post initialization setup */
13880 lpfc_post_init_setup(phba);
13882 /* Check if there are static vports to be created. */
13883 lpfc_create_static_vport(phba);
13888 lpfc_unset_hba(phba);
13889 out_free_sysfs_attr:
13890 lpfc_free_sysfs_attr(vport);
13892 lpfc_destroy_shost(phba);
13893 out_unset_driver_resource:
13894 lpfc_unset_driver_resource_phase2(phba);
13895 out_free_iocb_list:
13896 lpfc_free_iocb_list(phba);
13897 out_unset_driver_resource_s3:
13898 lpfc_sli_driver_resource_unset(phba);
13899 out_unset_pci_mem_s3:
13900 lpfc_sli_pci_mem_unset(phba);
13901 out_disable_pci_dev:
13902 lpfc_disable_pci_dev(phba);
13904 scsi_host_put(shost);
13906 lpfc_hba_free(phba);
13911 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
13912 * @pdev: pointer to PCI device
13914 * This routine is to be called to disattach a device with SLI-3 interface
13915 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
13916 * removed from PCI bus, it performs all the necessary cleanup for the HBA
13917 * device to be removed from the PCI subsystem properly.
13920 lpfc_pci_remove_one_s3(struct pci_dev *pdev)
13922 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13923 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
13924 struct lpfc_vport **vports;
13925 struct lpfc_hba *phba = vport->phba;
13928 spin_lock_irq(&phba->hbalock);
13929 vport->load_flag |= FC_UNLOADING;
13930 spin_unlock_irq(&phba->hbalock);
13932 lpfc_free_sysfs_attr(vport);
13934 /* Release all the vports against this physical port */
13935 vports = lpfc_create_vport_work_array(phba);
13936 if (vports != NULL)
13937 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
13938 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
13940 fc_vport_terminate(vports[i]->fc_vport);
13942 lpfc_destroy_vport_work_array(phba, vports);
13944 /* Remove FC host with the physical port */
13945 fc_remove_host(shost);
13946 scsi_remove_host(shost);
13948 /* Clean up all nodes, mailboxes and IOs. */
13949 lpfc_cleanup(vport);
13952 * Bring down the SLI Layer. This step disable all interrupts,
13953 * clears the rings, discards all mailbox commands, and resets
13957 /* HBA interrupt will be disabled after this call */
13958 lpfc_sli_hba_down(phba);
13959 /* Stop kthread signal shall trigger work_done one more time */
13960 kthread_stop(phba->worker_thread);
13961 /* Final cleanup of txcmplq and reset the HBA */
13962 lpfc_sli_brdrestart(phba);
13964 kfree(phba->vpi_bmask);
13965 kfree(phba->vpi_ids);
13967 lpfc_stop_hba_timers(phba);
13968 spin_lock_irq(&phba->port_list_lock);
13969 list_del_init(&vport->listentry);
13970 spin_unlock_irq(&phba->port_list_lock);
13972 lpfc_debugfs_terminate(vport);
13974 /* Disable SR-IOV if enabled */
13975 if (phba->cfg_sriov_nr_virtfn)
13976 pci_disable_sriov(pdev);
13978 /* Disable interrupt */
13979 lpfc_sli_disable_intr(phba);
13981 scsi_host_put(shost);
13984 * Call scsi_free before mem_free since scsi bufs are released to their
13985 * corresponding pools here.
13987 lpfc_scsi_free(phba);
13988 lpfc_free_iocb_list(phba);
13990 lpfc_mem_free_all(phba);
13992 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
13993 phba->hbqslimp.virt, phba->hbqslimp.phys);
13995 /* Free resources associated with SLI2 interface */
13996 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
13997 phba->slim2p.virt, phba->slim2p.phys);
13999 /* unmap adapter SLIM and Control Registers */
14000 iounmap(phba->ctrl_regs_memmap_p);
14001 iounmap(phba->slim_memmap_p);
14003 lpfc_hba_free(phba);
14005 pci_release_mem_regions(pdev);
14006 pci_disable_device(pdev);
14010 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
14011 * @dev_d: pointer to device
14013 * This routine is to be called from the kernel's PCI subsystem to support
14014 * system Power Management (PM) to device with SLI-3 interface spec. When
14015 * PM invokes this method, it quiesces the device by stopping the driver's
14016 * worker thread for the device, turning off device's interrupt and DMA,
14017 * and bring the device offline. Note that as the driver implements the
14018 * minimum PM requirements to a power-aware driver's PM support for the
14019 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
14020 * to the suspend() method call will be treated as SUSPEND and the driver will
14021 * fully reinitialize its device during resume() method call, the driver will
14022 * set device to PCI_D3hot state in PCI config space instead of setting it
14023 * according to the @msg provided by the PM.
14026 * 0 - driver suspended the device
14029 static int __maybe_unused
14030 lpfc_pci_suspend_one_s3(struct device *dev_d)
14032 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14033 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14035 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14036 "0473 PCI device Power Management suspend.\n");
14038 /* Bring down the device */
14039 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14040 lpfc_offline(phba);
14041 kthread_stop(phba->worker_thread);
14043 /* Disable interrupt from device */
14044 lpfc_sli_disable_intr(phba);
14050 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
14051 * @dev_d: pointer to device
14053 * This routine is to be called from the kernel's PCI subsystem to support
14054 * system Power Management (PM) to device with SLI-3 interface spec. When PM
14055 * invokes this method, it restores the device's PCI config space state and
14056 * fully reinitializes the device and brings it online. Note that as the
14057 * driver implements the minimum PM requirements to a power-aware driver's
14058 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
14059 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
14060 * driver will fully reinitialize its device during resume() method call,
14061 * the device will be set to PCI_D0 directly in PCI config space before
14062 * restoring the state.
14065 * 0 - driver suspended the device
14068 static int __maybe_unused
14069 lpfc_pci_resume_one_s3(struct device *dev_d)
14071 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14072 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14073 uint32_t intr_mode;
14076 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14077 "0452 PCI device Power Management resume.\n");
14079 /* Startup the kernel thread for this host adapter. */
14080 phba->worker_thread = kthread_run(lpfc_do_work, phba,
14081 "lpfc_worker_%d", phba->brd_no);
14082 if (IS_ERR(phba->worker_thread)) {
14083 error = PTR_ERR(phba->worker_thread);
14084 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14085 "0434 PM resume failed to start worker "
14086 "thread: error=x%x.\n", error);
14090 /* Init cpu_map array */
14091 lpfc_cpu_map_array_init(phba);
14092 /* Init hba_eq_hdl array */
14093 lpfc_hba_eq_hdl_array_init(phba);
14094 /* Configure and enable interrupt */
14095 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14096 if (intr_mode == LPFC_INTR_ERROR) {
14097 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14098 "0430 PM resume Failed to enable interrupt\n");
14101 phba->intr_mode = intr_mode;
14103 /* Restart HBA and bring it online */
14104 lpfc_sli_brdrestart(phba);
14107 /* Log the current active interrupt mode */
14108 lpfc_log_intr_mode(phba, phba->intr_mode);
14114 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
14115 * @phba: pointer to lpfc hba data structure.
14117 * This routine is called to prepare the SLI3 device for PCI slot recover. It
14118 * aborts all the outstanding SCSI I/Os to the pci device.
14121 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
14123 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14124 "2723 PCI channel I/O abort preparing for recovery\n");
14127 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
14128 * and let the SCSI mid-layer to retry them to recover.
14130 lpfc_sli_abort_fcp_rings(phba);
14134 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
14135 * @phba: pointer to lpfc hba data structure.
14137 * This routine is called to prepare the SLI3 device for PCI slot reset. It
14138 * disables the device interrupt and pci device, and aborts the internal FCP
14142 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
14144 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14145 "2710 PCI channel disable preparing for reset\n");
14147 /* Block any management I/Os to the device */
14148 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
14150 /* Block all SCSI devices' I/Os on the host */
14151 lpfc_scsi_dev_block(phba);
14153 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
14154 lpfc_sli_flush_io_rings(phba);
14156 /* stop all timers */
14157 lpfc_stop_hba_timers(phba);
14159 /* Disable interrupt and pci device */
14160 lpfc_sli_disable_intr(phba);
14161 pci_disable_device(phba->pcidev);
14165 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
14166 * @phba: pointer to lpfc hba data structure.
14168 * This routine is called to prepare the SLI3 device for PCI slot permanently
14169 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
14173 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
14175 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14176 "2711 PCI channel permanent disable for failure\n");
14177 /* Block all SCSI devices' I/Os on the host */
14178 lpfc_scsi_dev_block(phba);
14180 /* stop all timers */
14181 lpfc_stop_hba_timers(phba);
14183 /* Clean up all driver's outstanding SCSI I/Os */
14184 lpfc_sli_flush_io_rings(phba);
14188 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
14189 * @pdev: pointer to PCI device.
14190 * @state: the current PCI connection state.
14192 * This routine is called from the PCI subsystem for I/O error handling to
14193 * device with SLI-3 interface spec. This function is called by the PCI
14194 * subsystem after a PCI bus error affecting this device has been detected.
14195 * When this function is invoked, it will need to stop all the I/Os and
14196 * interrupt(s) to the device. Once that is done, it will return
14197 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
14201 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
14202 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
14203 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
14205 static pci_ers_result_t
14206 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
14208 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14209 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14212 case pci_channel_io_normal:
14213 /* Non-fatal error, prepare for recovery */
14214 lpfc_sli_prep_dev_for_recover(phba);
14215 return PCI_ERS_RESULT_CAN_RECOVER;
14216 case pci_channel_io_frozen:
14217 /* Fatal error, prepare for slot reset */
14218 lpfc_sli_prep_dev_for_reset(phba);
14219 return PCI_ERS_RESULT_NEED_RESET;
14220 case pci_channel_io_perm_failure:
14221 /* Permanent failure, prepare for device down */
14222 lpfc_sli_prep_dev_for_perm_failure(phba);
14223 return PCI_ERS_RESULT_DISCONNECT;
14225 /* Unknown state, prepare and request slot reset */
14226 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14227 "0472 Unknown PCI error state: x%x\n", state);
14228 lpfc_sli_prep_dev_for_reset(phba);
14229 return PCI_ERS_RESULT_NEED_RESET;
14234 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
14235 * @pdev: pointer to PCI device.
14237 * This routine is called from the PCI subsystem for error handling to
14238 * device with SLI-3 interface spec. This is called after PCI bus has been
14239 * reset to restart the PCI card from scratch, as if from a cold-boot.
14240 * During the PCI subsystem error recovery, after driver returns
14241 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
14242 * recovery and then call this routine before calling the .resume method
14243 * to recover the device. This function will initialize the HBA device,
14244 * enable the interrupt, but it will just put the HBA to offline state
14245 * without passing any I/O traffic.
14248 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
14249 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
14251 static pci_ers_result_t
14252 lpfc_io_slot_reset_s3(struct pci_dev *pdev)
14254 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14255 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14256 struct lpfc_sli *psli = &phba->sli;
14257 uint32_t intr_mode;
14259 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
14260 if (pci_enable_device_mem(pdev)) {
14261 printk(KERN_ERR "lpfc: Cannot re-enable "
14262 "PCI device after reset.\n");
14263 return PCI_ERS_RESULT_DISCONNECT;
14266 pci_restore_state(pdev);
14269 * As the new kernel behavior of pci_restore_state() API call clears
14270 * device saved_state flag, need to save the restored state again.
14272 pci_save_state(pdev);
14274 if (pdev->is_busmaster)
14275 pci_set_master(pdev);
14277 spin_lock_irq(&phba->hbalock);
14278 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
14279 spin_unlock_irq(&phba->hbalock);
14281 /* Configure and enable interrupt */
14282 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14283 if (intr_mode == LPFC_INTR_ERROR) {
14284 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14285 "0427 Cannot re-enable interrupt after "
14287 return PCI_ERS_RESULT_DISCONNECT;
14289 phba->intr_mode = intr_mode;
14291 /* Take device offline, it will perform cleanup */
14292 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14293 lpfc_offline(phba);
14294 lpfc_sli_brdrestart(phba);
14296 /* Log the current active interrupt mode */
14297 lpfc_log_intr_mode(phba, phba->intr_mode);
14299 return PCI_ERS_RESULT_RECOVERED;
14303 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
14304 * @pdev: pointer to PCI device
14306 * This routine is called from the PCI subsystem for error handling to device
14307 * with SLI-3 interface spec. It is called when kernel error recovery tells
14308 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
14309 * error recovery. After this call, traffic can start to flow from this device
14313 lpfc_io_resume_s3(struct pci_dev *pdev)
14315 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14316 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14318 /* Bring device online, it will be no-op for non-fatal error resume */
14323 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
14324 * @phba: pointer to lpfc hba data structure.
14326 * returns the number of ELS/CT IOCBs to reserve
14329 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
14331 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
14333 if (phba->sli_rev == LPFC_SLI_REV4) {
14334 if (max_xri <= 100)
14336 else if (max_xri <= 256)
14338 else if (max_xri <= 512)
14340 else if (max_xri <= 1024)
14342 else if (max_xri <= 1536)
14344 else if (max_xri <= 2048)
14353 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
14354 * @phba: pointer to lpfc hba data structure.
14356 * returns the number of ELS/CT + NVMET IOCBs to reserve
14359 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
14361 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
14363 if (phba->nvmet_support)
14364 max_xri += LPFC_NVMET_BUF_POST;
14370 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
14371 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
14372 const struct firmware *fw)
14377 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
14378 /* Three cases: (1) FW was not supported on the detected adapter.
14379 * (2) FW update has been locked out administratively.
14380 * (3) Some other error during FW update.
14381 * In each case, an unmaskable message is written to the console
14382 * for admin diagnosis.
14384 if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
14385 (sli_family == LPFC_SLI_INTF_FAMILY_G6 &&
14386 magic_number != MAGIC_NUMBER_G6) ||
14387 (sli_family == LPFC_SLI_INTF_FAMILY_G7 &&
14388 magic_number != MAGIC_NUMBER_G7) ||
14389 (sli_family == LPFC_SLI_INTF_FAMILY_G7P &&
14390 magic_number != MAGIC_NUMBER_G7P)) {
14391 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14392 "3030 This firmware version is not supported on"
14393 " this HBA model. Device:%x Magic:%x Type:%x "
14394 "ID:%x Size %d %zd\n",
14395 phba->pcidev->device, magic_number, ftype, fid,
14398 } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) {
14399 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14400 "3021 Firmware downloads have been prohibited "
14401 "by a system configuration setting on "
14402 "Device:%x Magic:%x Type:%x ID:%x Size %d "
14404 phba->pcidev->device, magic_number, ftype, fid,
14408 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14409 "3022 FW Download failed. Add Status x%x "
14410 "Device:%x Magic:%x Type:%x ID:%x Size %d "
14412 offset, phba->pcidev->device, magic_number,
14413 ftype, fid, fsize, fw->size);
14420 * lpfc_write_firmware - attempt to write a firmware image to the port
14421 * @fw: pointer to firmware image returned from request_firmware.
14422 * @context: pointer to firmware image returned from request_firmware.
14426 lpfc_write_firmware(const struct firmware *fw, void *context)
14428 struct lpfc_hba *phba = (struct lpfc_hba *)context;
14429 char fwrev[FW_REV_STR_SIZE];
14430 struct lpfc_grp_hdr *image;
14431 struct list_head dma_buffer_list;
14433 struct lpfc_dmabuf *dmabuf, *next;
14434 uint32_t offset = 0, temp_offset = 0;
14435 uint32_t magic_number, ftype, fid, fsize;
14437 /* It can be null in no-wait mode, sanity check */
14442 image = (struct lpfc_grp_hdr *)fw->data;
14444 magic_number = be32_to_cpu(image->magic_number);
14445 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
14446 fid = bf_get_be32(lpfc_grp_hdr_id, image);
14447 fsize = be32_to_cpu(image->size);
14449 INIT_LIST_HEAD(&dma_buffer_list);
14450 lpfc_decode_firmware_rev(phba, fwrev, 1);
14451 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
14452 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14453 "3023 Updating Firmware, Current Version:%s "
14454 "New Version:%s\n",
14455 fwrev, image->revision);
14456 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
14457 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
14463 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14467 if (!dmabuf->virt) {
14472 list_add_tail(&dmabuf->list, &dma_buffer_list);
14474 while (offset < fw->size) {
14475 temp_offset = offset;
14476 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
14477 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
14478 memcpy(dmabuf->virt,
14479 fw->data + temp_offset,
14480 fw->size - temp_offset);
14481 temp_offset = fw->size;
14484 memcpy(dmabuf->virt, fw->data + temp_offset,
14486 temp_offset += SLI4_PAGE_SIZE;
14488 rc = lpfc_wr_object(phba, &dma_buffer_list,
14489 (fw->size - offset), &offset);
14491 rc = lpfc_log_write_firmware_error(phba, offset,
14502 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14503 "3029 Skipped Firmware update, Current "
14504 "Version:%s New Version:%s\n",
14505 fwrev, image->revision);
14508 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
14509 list_del(&dmabuf->list);
14510 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
14511 dmabuf->virt, dmabuf->phys);
14514 release_firmware(fw);
14517 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14518 "3062 Firmware update error, status %d.\n", rc);
14520 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14521 "3024 Firmware update success: size %d.\n", rc);
14525 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
14526 * @phba: pointer to lpfc hba data structure.
14527 * @fw_upgrade: which firmware to update.
14529 * This routine is called to perform Linux generic firmware upgrade on device
14530 * that supports such feature.
14533 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
14535 uint8_t file_name[ELX_MODEL_NAME_SIZE];
14537 const struct firmware *fw;
14539 /* Only supported on SLI4 interface type 2 for now */
14540 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
14541 LPFC_SLI_INTF_IF_TYPE_2)
14544 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
14546 if (fw_upgrade == INT_FW_UPGRADE) {
14547 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
14548 file_name, &phba->pcidev->dev,
14549 GFP_KERNEL, (void *)phba,
14550 lpfc_write_firmware);
14551 } else if (fw_upgrade == RUN_FW_UPGRADE) {
14552 ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
14554 lpfc_write_firmware(fw, (void *)phba);
14563 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
14564 * @pdev: pointer to PCI device
14565 * @pid: pointer to PCI device identifier
14567 * This routine is called from the kernel's PCI subsystem to device with
14568 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
14569 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
14570 * information of the device and driver to see if the driver state that it
14571 * can support this kind of device. If the match is successful, the driver
14572 * core invokes this routine. If this routine determines it can claim the HBA,
14573 * it does all the initialization that it needs to do to handle the HBA
14577 * 0 - driver can claim the device
14578 * negative value - driver can not claim the device
14581 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
14583 struct lpfc_hba *phba;
14584 struct lpfc_vport *vport = NULL;
14585 struct Scsi_Host *shost = NULL;
14587 uint32_t cfg_mode, intr_mode;
14589 /* Allocate memory for HBA structure */
14590 phba = lpfc_hba_alloc(pdev);
14594 INIT_LIST_HEAD(&phba->poll_list);
14596 /* Perform generic PCI device enabling operation */
14597 error = lpfc_enable_pci_dev(phba);
14599 goto out_free_phba;
14601 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
14602 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
14604 goto out_disable_pci_dev;
14606 /* Set up SLI-4 specific device PCI memory space */
14607 error = lpfc_sli4_pci_mem_setup(phba);
14609 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14610 "1410 Failed to set up pci memory space.\n");
14611 goto out_disable_pci_dev;
14614 /* Set up SLI-4 Specific device driver resources */
14615 error = lpfc_sli4_driver_resource_setup(phba);
14617 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14618 "1412 Failed to set up driver resource.\n");
14619 goto out_unset_pci_mem_s4;
14622 INIT_LIST_HEAD(&phba->active_rrq_list);
14623 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
14625 /* Set up common device driver resources */
14626 error = lpfc_setup_driver_resource_phase2(phba);
14628 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14629 "1414 Failed to set up driver resource.\n");
14630 goto out_unset_driver_resource_s4;
14633 /* Get the default values for Model Name and Description */
14634 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
14636 /* Now, trying to enable interrupt and bring up the device */
14637 cfg_mode = phba->cfg_use_msi;
14639 /* Put device to a known state before enabling interrupt */
14640 phba->pport = NULL;
14641 lpfc_stop_port(phba);
14643 /* Init cpu_map array */
14644 lpfc_cpu_map_array_init(phba);
14646 /* Init hba_eq_hdl array */
14647 lpfc_hba_eq_hdl_array_init(phba);
14649 /* Configure and enable interrupt */
14650 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
14651 if (intr_mode == LPFC_INTR_ERROR) {
14652 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14653 "0426 Failed to enable interrupt.\n");
14655 goto out_unset_driver_resource;
14657 /* Default to single EQ for non-MSI-X */
14658 if (phba->intr_type != MSIX) {
14659 phba->cfg_irq_chann = 1;
14660 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14661 if (phba->nvmet_support)
14662 phba->cfg_nvmet_mrq = 1;
14665 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
14667 /* Create SCSI host to the physical port */
14668 error = lpfc_create_shost(phba);
14670 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14671 "1415 Failed to create scsi host.\n");
14672 goto out_disable_intr;
14674 vport = phba->pport;
14675 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
14677 /* Configure sysfs attributes */
14678 error = lpfc_alloc_sysfs_attr(vport);
14680 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14681 "1416 Failed to allocate sysfs attr\n");
14682 goto out_destroy_shost;
14685 /* Set up SLI-4 HBA */
14686 if (lpfc_sli4_hba_setup(phba)) {
14687 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14688 "1421 Failed to set up hba\n");
14690 goto out_free_sysfs_attr;
14693 /* Log the current active interrupt mode */
14694 phba->intr_mode = intr_mode;
14695 lpfc_log_intr_mode(phba, intr_mode);
14697 /* Perform post initialization setup */
14698 lpfc_post_init_setup(phba);
14700 /* NVME support in FW earlier in the driver load corrects the
14701 * FC4 type making a check for nvme_support unnecessary.
14703 if (phba->nvmet_support == 0) {
14704 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14705 /* Create NVME binding with nvme_fc_transport. This
14706 * ensures the vport is initialized. If the localport
14707 * create fails, it should not unload the driver to
14708 * support field issues.
14710 error = lpfc_nvme_create_localport(vport);
14712 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14713 "6004 NVME registration "
14714 "failed, error x%x\n",
14720 /* check for firmware upgrade or downgrade */
14721 if (phba->cfg_request_firmware_upgrade)
14722 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
14724 /* Check if there are static vports to be created. */
14725 lpfc_create_static_vport(phba);
14727 /* Enable RAS FW log support */
14728 lpfc_sli4_ras_setup(phba);
14730 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
14731 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
14735 out_free_sysfs_attr:
14736 lpfc_free_sysfs_attr(vport);
14738 lpfc_destroy_shost(phba);
14740 lpfc_sli4_disable_intr(phba);
14741 out_unset_driver_resource:
14742 lpfc_unset_driver_resource_phase2(phba);
14743 out_unset_driver_resource_s4:
14744 lpfc_sli4_driver_resource_unset(phba);
14745 out_unset_pci_mem_s4:
14746 lpfc_sli4_pci_mem_unset(phba);
14747 out_disable_pci_dev:
14748 lpfc_disable_pci_dev(phba);
14750 scsi_host_put(shost);
14752 lpfc_hba_free(phba);
14757 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
14758 * @pdev: pointer to PCI device
14760 * This routine is called from the kernel's PCI subsystem to device with
14761 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
14762 * removed from PCI bus, it performs all the necessary cleanup for the HBA
14763 * device to be removed from the PCI subsystem properly.
14766 lpfc_pci_remove_one_s4(struct pci_dev *pdev)
14768 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14769 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
14770 struct lpfc_vport **vports;
14771 struct lpfc_hba *phba = vport->phba;
14774 /* Mark the device unloading flag */
14775 spin_lock_irq(&phba->hbalock);
14776 vport->load_flag |= FC_UNLOADING;
14777 spin_unlock_irq(&phba->hbalock);
14779 lpfc_unreg_congestion_buf(phba);
14781 lpfc_free_sysfs_attr(vport);
14783 /* Release all the vports against this physical port */
14784 vports = lpfc_create_vport_work_array(phba);
14785 if (vports != NULL)
14786 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
14787 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
14789 fc_vport_terminate(vports[i]->fc_vport);
14791 lpfc_destroy_vport_work_array(phba, vports);
14793 /* Remove FC host with the physical port */
14794 fc_remove_host(shost);
14795 scsi_remove_host(shost);
14797 /* Perform ndlp cleanup on the physical port. The nvme and nvmet
14798 * localports are destroyed after to cleanup all transport memory.
14800 lpfc_cleanup(vport);
14801 lpfc_nvmet_destroy_targetport(phba);
14802 lpfc_nvme_destroy_localport(vport);
14804 /* De-allocate multi-XRI pools */
14805 if (phba->cfg_xri_rebalancing)
14806 lpfc_destroy_multixri_pools(phba);
14809 * Bring down the SLI Layer. This step disables all interrupts,
14810 * clears the rings, discards all mailbox commands, and resets
14811 * the HBA FCoE function.
14813 lpfc_debugfs_terminate(vport);
14815 lpfc_stop_hba_timers(phba);
14816 spin_lock_irq(&phba->port_list_lock);
14817 list_del_init(&vport->listentry);
14818 spin_unlock_irq(&phba->port_list_lock);
14820 /* Perform scsi free before driver resource_unset since scsi
14821 * buffers are released to their corresponding pools here.
14823 lpfc_io_free(phba);
14824 lpfc_free_iocb_list(phba);
14825 lpfc_sli4_hba_unset(phba);
14827 lpfc_unset_driver_resource_phase2(phba);
14828 lpfc_sli4_driver_resource_unset(phba);
14830 /* Unmap adapter Control and Doorbell registers */
14831 lpfc_sli4_pci_mem_unset(phba);
14833 /* Release PCI resources and disable device's PCI function */
14834 scsi_host_put(shost);
14835 lpfc_disable_pci_dev(phba);
14837 /* Finally, free the driver's device data structure */
14838 lpfc_hba_free(phba);
14844 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
14845 * @dev_d: pointer to device
14847 * This routine is called from the kernel's PCI subsystem to support system
14848 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
14849 * this method, it quiesces the device by stopping the driver's worker
14850 * thread for the device, turning off device's interrupt and DMA, and bring
14851 * the device offline. Note that as the driver implements the minimum PM
14852 * requirements to a power-aware driver's PM support for suspend/resume -- all
14853 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
14854 * method call will be treated as SUSPEND and the driver will fully
14855 * reinitialize its device during resume() method call, the driver will set
14856 * device to PCI_D3hot state in PCI config space instead of setting it
14857 * according to the @msg provided by the PM.
14860 * 0 - driver suspended the device
14863 static int __maybe_unused
14864 lpfc_pci_suspend_one_s4(struct device *dev_d)
14866 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14867 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14869 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14870 "2843 PCI device Power Management suspend.\n");
14872 /* Bring down the device */
14873 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14874 lpfc_offline(phba);
14875 kthread_stop(phba->worker_thread);
14877 /* Disable interrupt from device */
14878 lpfc_sli4_disable_intr(phba);
14879 lpfc_sli4_queue_destroy(phba);
14885 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
14886 * @dev_d: pointer to device
14888 * This routine is called from the kernel's PCI subsystem to support system
14889 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
14890 * this method, it restores the device's PCI config space state and fully
14891 * reinitializes the device and brings it online. Note that as the driver
14892 * implements the minimum PM requirements to a power-aware driver's PM for
14893 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
14894 * to the suspend() method call will be treated as SUSPEND and the driver
14895 * will fully reinitialize its device during resume() method call, the device
14896 * will be set to PCI_D0 directly in PCI config space before restoring the
14900 * 0 - driver suspended the device
14903 static int __maybe_unused
14904 lpfc_pci_resume_one_s4(struct device *dev_d)
14906 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14907 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14908 uint32_t intr_mode;
14911 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14912 "0292 PCI device Power Management resume.\n");
14914 /* Startup the kernel thread for this host adapter. */
14915 phba->worker_thread = kthread_run(lpfc_do_work, phba,
14916 "lpfc_worker_%d", phba->brd_no);
14917 if (IS_ERR(phba->worker_thread)) {
14918 error = PTR_ERR(phba->worker_thread);
14919 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14920 "0293 PM resume failed to start worker "
14921 "thread: error=x%x.\n", error);
14925 /* Configure and enable interrupt */
14926 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
14927 if (intr_mode == LPFC_INTR_ERROR) {
14928 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14929 "0294 PM resume Failed to enable interrupt\n");
14932 phba->intr_mode = intr_mode;
14934 /* Restart HBA and bring it online */
14935 lpfc_sli_brdrestart(phba);
14938 /* Log the current active interrupt mode */
14939 lpfc_log_intr_mode(phba, phba->intr_mode);
14945 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
14946 * @phba: pointer to lpfc hba data structure.
14948 * This routine is called to prepare the SLI4 device for PCI slot recover. It
14949 * aborts all the outstanding SCSI I/Os to the pci device.
14952 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
14954 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14955 "2828 PCI channel I/O abort preparing for recovery\n");
14957 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
14958 * and let the SCSI mid-layer to retry them to recover.
14960 lpfc_sli_abort_fcp_rings(phba);
14964 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
14965 * @phba: pointer to lpfc hba data structure.
14967 * This routine is called to prepare the SLI4 device for PCI slot reset. It
14968 * disables the device interrupt and pci device, and aborts the internal FCP
14972 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
14974 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14975 "2826 PCI channel disable preparing for reset\n");
14977 /* Block any management I/Os to the device */
14978 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
14980 /* Block all SCSI devices' I/Os on the host */
14981 lpfc_scsi_dev_block(phba);
14983 /* Flush all driver's outstanding I/Os as we are to reset */
14984 lpfc_sli_flush_io_rings(phba);
14986 /* stop all timers */
14987 lpfc_stop_hba_timers(phba);
14989 /* Disable interrupt and pci device */
14990 lpfc_sli4_disable_intr(phba);
14991 lpfc_sli4_queue_destroy(phba);
14992 pci_disable_device(phba->pcidev);
14996 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
14997 * @phba: pointer to lpfc hba data structure.
14999 * This routine is called to prepare the SLI4 device for PCI slot permanently
15000 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
15004 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
15006 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15007 "2827 PCI channel permanent disable for failure\n");
15009 /* Block all SCSI devices' I/Os on the host */
15010 lpfc_scsi_dev_block(phba);
15012 /* stop all timers */
15013 lpfc_stop_hba_timers(phba);
15015 /* Clean up all driver's outstanding I/Os */
15016 lpfc_sli_flush_io_rings(phba);
15020 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
15021 * @pdev: pointer to PCI device.
15022 * @state: the current PCI connection state.
15024 * This routine is called from the PCI subsystem for error handling to device
15025 * with SLI-4 interface spec. This function is called by the PCI subsystem
15026 * after a PCI bus error affecting this device has been detected. When this
15027 * function is invoked, it will need to stop all the I/Os and interrupt(s)
15028 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
15029 * for the PCI subsystem to perform proper recovery as desired.
15032 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
15033 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15035 static pci_ers_result_t
15036 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
15038 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15039 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15042 case pci_channel_io_normal:
15043 /* Non-fatal error, prepare for recovery */
15044 lpfc_sli4_prep_dev_for_recover(phba);
15045 return PCI_ERS_RESULT_CAN_RECOVER;
15046 case pci_channel_io_frozen:
15047 phba->hba_flag |= HBA_PCI_ERR;
15048 /* Fatal error, prepare for slot reset */
15049 lpfc_sli4_prep_dev_for_reset(phba);
15050 return PCI_ERS_RESULT_NEED_RESET;
15051 case pci_channel_io_perm_failure:
15052 phba->hba_flag |= HBA_PCI_ERR;
15053 /* Permanent failure, prepare for device down */
15054 lpfc_sli4_prep_dev_for_perm_failure(phba);
15055 return PCI_ERS_RESULT_DISCONNECT;
15057 phba->hba_flag |= HBA_PCI_ERR;
15058 /* Unknown state, prepare and request slot reset */
15059 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15060 "2825 Unknown PCI error state: x%x\n", state);
15061 lpfc_sli4_prep_dev_for_reset(phba);
15062 return PCI_ERS_RESULT_NEED_RESET;
15067 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
15068 * @pdev: pointer to PCI device.
15070 * This routine is called from the PCI subsystem for error handling to device
15071 * with SLI-4 interface spec. It is called after PCI bus has been reset to
15072 * restart the PCI card from scratch, as if from a cold-boot. During the
15073 * PCI subsystem error recovery, after the driver returns
15074 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
15075 * recovery and then call this routine before calling the .resume method to
15076 * recover the device. This function will initialize the HBA device, enable
15077 * the interrupt, but it will just put the HBA to offline state without
15078 * passing any I/O traffic.
15081 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
15082 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15084 static pci_ers_result_t
15085 lpfc_io_slot_reset_s4(struct pci_dev *pdev)
15087 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15088 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15089 struct lpfc_sli *psli = &phba->sli;
15090 uint32_t intr_mode;
15092 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
15093 if (pci_enable_device_mem(pdev)) {
15094 printk(KERN_ERR "lpfc: Cannot re-enable "
15095 "PCI device after reset.\n");
15096 return PCI_ERS_RESULT_DISCONNECT;
15099 pci_restore_state(pdev);
15101 phba->hba_flag &= ~HBA_PCI_ERR;
15103 * As the new kernel behavior of pci_restore_state() API call clears
15104 * device saved_state flag, need to save the restored state again.
15106 pci_save_state(pdev);
15108 if (pdev->is_busmaster)
15109 pci_set_master(pdev);
15111 spin_lock_irq(&phba->hbalock);
15112 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
15113 spin_unlock_irq(&phba->hbalock);
15115 /* Init cpu_map array */
15116 lpfc_cpu_map_array_init(phba);
15117 /* Configure and enable interrupt */
15118 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
15119 if (intr_mode == LPFC_INTR_ERROR) {
15120 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15121 "2824 Cannot re-enable interrupt after "
15123 return PCI_ERS_RESULT_DISCONNECT;
15125 phba->intr_mode = intr_mode;
15126 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
15128 /* Log the current active interrupt mode */
15129 lpfc_log_intr_mode(phba, phba->intr_mode);
15131 return PCI_ERS_RESULT_RECOVERED;
15135 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
15136 * @pdev: pointer to PCI device
15138 * This routine is called from the PCI subsystem for error handling to device
15139 * with SLI-4 interface spec. It is called when kernel error recovery tells
15140 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
15141 * error recovery. After this call, traffic can start to flow from this device
15145 lpfc_io_resume_s4(struct pci_dev *pdev)
15147 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15148 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15151 * In case of slot reset, as function reset is performed through
15152 * mailbox command which needs DMA to be enabled, this operation
15153 * has to be moved to the io resume phase. Taking device offline
15154 * will perform the necessary cleanup.
15156 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
15157 /* Perform device reset */
15158 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
15159 lpfc_offline(phba);
15160 lpfc_sli_brdrestart(phba);
15161 /* Bring the device back online */
15167 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
15168 * @pdev: pointer to PCI device
15169 * @pid: pointer to PCI device identifier
15171 * This routine is to be registered to the kernel's PCI subsystem. When an
15172 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
15173 * at PCI device-specific information of the device and driver to see if the
15174 * driver state that it can support this kind of device. If the match is
15175 * successful, the driver core invokes this routine. This routine dispatches
15176 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
15177 * do all the initialization that it needs to do to handle the HBA device
15181 * 0 - driver can claim the device
15182 * negative value - driver can not claim the device
15185 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
15188 struct lpfc_sli_intf intf;
15190 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
15193 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
15194 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
15195 rc = lpfc_pci_probe_one_s4(pdev, pid);
15197 rc = lpfc_pci_probe_one_s3(pdev, pid);
15203 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
15204 * @pdev: pointer to PCI device
15206 * This routine is to be registered to the kernel's PCI subsystem. When an
15207 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
15208 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
15209 * remove routine, which will perform all the necessary cleanup for the
15210 * device to be removed from the PCI subsystem properly.
15213 lpfc_pci_remove_one(struct pci_dev *pdev)
15215 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15216 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15218 switch (phba->pci_dev_grp) {
15219 case LPFC_PCI_DEV_LP:
15220 lpfc_pci_remove_one_s3(pdev);
15222 case LPFC_PCI_DEV_OC:
15223 lpfc_pci_remove_one_s4(pdev);
15226 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15227 "1424 Invalid PCI device group: 0x%x\n",
15228 phba->pci_dev_grp);
15235 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
15236 * @dev: pointer to device
15238 * This routine is to be registered to the kernel's PCI subsystem to support
15239 * system Power Management (PM). When PM invokes this method, it dispatches
15240 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
15241 * suspend the device.
15244 * 0 - driver suspended the device
15247 static int __maybe_unused
15248 lpfc_pci_suspend_one(struct device *dev)
15250 struct Scsi_Host *shost = dev_get_drvdata(dev);
15251 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15254 switch (phba->pci_dev_grp) {
15255 case LPFC_PCI_DEV_LP:
15256 rc = lpfc_pci_suspend_one_s3(dev);
15258 case LPFC_PCI_DEV_OC:
15259 rc = lpfc_pci_suspend_one_s4(dev);
15262 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15263 "1425 Invalid PCI device group: 0x%x\n",
15264 phba->pci_dev_grp);
15271 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
15272 * @dev: pointer to device
15274 * This routine is to be registered to the kernel's PCI subsystem to support
15275 * system Power Management (PM). When PM invokes this method, it dispatches
15276 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
15277 * resume the device.
15280 * 0 - driver suspended the device
15283 static int __maybe_unused
15284 lpfc_pci_resume_one(struct device *dev)
15286 struct Scsi_Host *shost = dev_get_drvdata(dev);
15287 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15290 switch (phba->pci_dev_grp) {
15291 case LPFC_PCI_DEV_LP:
15292 rc = lpfc_pci_resume_one_s3(dev);
15294 case LPFC_PCI_DEV_OC:
15295 rc = lpfc_pci_resume_one_s4(dev);
15298 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15299 "1426 Invalid PCI device group: 0x%x\n",
15300 phba->pci_dev_grp);
15307 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
15308 * @pdev: pointer to PCI device.
15309 * @state: the current PCI connection state.
15311 * This routine is registered to the PCI subsystem for error handling. This
15312 * function is called by the PCI subsystem after a PCI bus error affecting
15313 * this device has been detected. When this routine is invoked, it dispatches
15314 * the action to the proper SLI-3 or SLI-4 device error detected handling
15315 * routine, which will perform the proper error detected operation.
15318 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
15319 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15321 static pci_ers_result_t
15322 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
15324 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15325 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15326 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15328 if (phba->link_state == LPFC_HBA_ERROR &&
15329 phba->hba_flag & HBA_IOQ_FLUSH)
15330 return PCI_ERS_RESULT_NEED_RESET;
15332 switch (phba->pci_dev_grp) {
15333 case LPFC_PCI_DEV_LP:
15334 rc = lpfc_io_error_detected_s3(pdev, state);
15336 case LPFC_PCI_DEV_OC:
15337 rc = lpfc_io_error_detected_s4(pdev, state);
15340 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15341 "1427 Invalid PCI device group: 0x%x\n",
15342 phba->pci_dev_grp);
15349 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
15350 * @pdev: pointer to PCI device.
15352 * This routine is registered to the PCI subsystem for error handling. This
15353 * function is called after PCI bus has been reset to restart the PCI card
15354 * from scratch, as if from a cold-boot. When this routine is invoked, it
15355 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
15356 * routine, which will perform the proper device reset.
15359 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
15360 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15362 static pci_ers_result_t
15363 lpfc_io_slot_reset(struct pci_dev *pdev)
15365 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15366 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15367 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15369 switch (phba->pci_dev_grp) {
15370 case LPFC_PCI_DEV_LP:
15371 rc = lpfc_io_slot_reset_s3(pdev);
15373 case LPFC_PCI_DEV_OC:
15374 rc = lpfc_io_slot_reset_s4(pdev);
15377 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15378 "1428 Invalid PCI device group: 0x%x\n",
15379 phba->pci_dev_grp);
15386 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
15387 * @pdev: pointer to PCI device
15389 * This routine is registered to the PCI subsystem for error handling. It
15390 * is called when kernel error recovery tells the lpfc driver that it is
15391 * OK to resume normal PCI operation after PCI bus error recovery. When
15392 * this routine is invoked, it dispatches the action to the proper SLI-3
15393 * or SLI-4 device io_resume routine, which will resume the device operation.
15396 lpfc_io_resume(struct pci_dev *pdev)
15398 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15399 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15401 switch (phba->pci_dev_grp) {
15402 case LPFC_PCI_DEV_LP:
15403 lpfc_io_resume_s3(pdev);
15405 case LPFC_PCI_DEV_OC:
15406 lpfc_io_resume_s4(pdev);
15409 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15410 "1429 Invalid PCI device group: 0x%x\n",
15411 phba->pci_dev_grp);
15418 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
15419 * @phba: pointer to lpfc hba data structure.
15421 * This routine checks to see if OAS is supported for this adapter. If
15422 * supported, the configure Flash Optimized Fabric flag is set. Otherwise,
15423 * the enable oas flag is cleared and the pool created for OAS device data
15428 lpfc_sli4_oas_verify(struct lpfc_hba *phba)
15431 if (!phba->cfg_EnableXLane)
15434 if (phba->sli4_hba.pc_sli4_params.oas_supported) {
15438 mempool_destroy(phba->device_data_mem_pool);
15439 phba->device_data_mem_pool = NULL;
15446 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
15447 * @phba: pointer to lpfc hba data structure.
15449 * This routine checks to see if RAS is supported by the adapter. Check the
15450 * function through which RAS support enablement is to be done.
15453 lpfc_sli4_ras_init(struct lpfc_hba *phba)
15455 /* if ASIC_GEN_NUM >= 0xC) */
15456 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
15457 LPFC_SLI_INTF_IF_TYPE_6) ||
15458 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
15459 LPFC_SLI_INTF_FAMILY_G6)) {
15460 phba->ras_fwlog.ras_hwsupport = true;
15461 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
15462 phba->cfg_ras_fwlog_buffsize)
15463 phba->ras_fwlog.ras_enabled = true;
15465 phba->ras_fwlog.ras_enabled = false;
15467 phba->ras_fwlog.ras_hwsupport = false;
15472 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
15474 static const struct pci_error_handlers lpfc_err_handler = {
15475 .error_detected = lpfc_io_error_detected,
15476 .slot_reset = lpfc_io_slot_reset,
15477 .resume = lpfc_io_resume,
15480 static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one,
15481 lpfc_pci_suspend_one,
15482 lpfc_pci_resume_one);
15484 static struct pci_driver lpfc_driver = {
15485 .name = LPFC_DRIVER_NAME,
15486 .id_table = lpfc_id_table,
15487 .probe = lpfc_pci_probe_one,
15488 .remove = lpfc_pci_remove_one,
15489 .shutdown = lpfc_pci_remove_one,
15490 .driver.pm = &lpfc_pci_pm_ops_one,
15491 .err_handler = &lpfc_err_handler,
15494 static const struct file_operations lpfc_mgmt_fop = {
15495 .owner = THIS_MODULE,
15498 static struct miscdevice lpfc_mgmt_dev = {
15499 .minor = MISC_DYNAMIC_MINOR,
15500 .name = "lpfcmgmt",
15501 .fops = &lpfc_mgmt_fop,
15505 * lpfc_init - lpfc module initialization routine
15507 * This routine is to be invoked when the lpfc module is loaded into the
15508 * kernel. The special kernel macro module_init() is used to indicate the
15509 * role of this routine to the kernel as lpfc module entry point.
15513 * -ENOMEM - FC attach transport failed
15514 * all others - failed
15521 pr_info(LPFC_MODULE_DESC "\n");
15522 pr_info(LPFC_COPYRIGHT "\n");
15524 error = misc_register(&lpfc_mgmt_dev);
15526 printk(KERN_ERR "Could not register lpfcmgmt device, "
15527 "misc_register returned with status %d", error);
15530 lpfc_transport_functions.vport_create = lpfc_vport_create;
15531 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
15532 lpfc_transport_template =
15533 fc_attach_transport(&lpfc_transport_functions);
15534 if (lpfc_transport_template == NULL)
15536 lpfc_vport_transport_template =
15537 fc_attach_transport(&lpfc_vport_transport_functions);
15538 if (lpfc_vport_transport_template == NULL) {
15539 fc_release_transport(lpfc_transport_template);
15542 lpfc_wqe_cmd_template();
15543 lpfc_nvmet_cmd_template();
15545 /* Initialize in case vector mapping is needed */
15546 lpfc_present_cpu = num_present_cpus();
15548 error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
15549 "lpfc/sli4:online",
15550 lpfc_cpu_online, lpfc_cpu_offline);
15552 goto cpuhp_failure;
15553 lpfc_cpuhp_state = error;
15555 error = pci_register_driver(&lpfc_driver);
15562 cpuhp_remove_multi_state(lpfc_cpuhp_state);
15564 fc_release_transport(lpfc_transport_template);
15565 fc_release_transport(lpfc_vport_transport_template);
15567 misc_deregister(&lpfc_mgmt_dev);
15572 void lpfc_dmp_dbg(struct lpfc_hba *phba)
15574 unsigned int start_idx;
15575 unsigned int dbg_cnt;
15576 unsigned int temp_idx;
15579 unsigned long rem_nsec;
15581 if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
15584 start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ;
15585 dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt);
15588 temp_idx = start_idx;
15589 if (dbg_cnt >= DBG_LOG_SZ) {
15590 dbg_cnt = DBG_LOG_SZ;
15593 if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) {
15594 temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ;
15596 if (start_idx < dbg_cnt)
15597 start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx);
15599 start_idx -= dbg_cnt;
15602 dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n",
15603 start_idx, temp_idx, dbg_cnt);
15605 for (i = 0; i < dbg_cnt; i++) {
15606 if ((start_idx + i) < DBG_LOG_SZ)
15607 temp_idx = (start_idx + i) % DBG_LOG_SZ;
15610 rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC);
15611 dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s",
15613 (unsigned long)phba->dbg_log[temp_idx].t_ns,
15615 phba->dbg_log[temp_idx].log);
15618 atomic_set(&phba->dbg_log_cnt, 0);
15619 atomic_set(&phba->dbg_log_dmping, 0);
15623 void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...)
15627 int dbg_dmping = atomic_read(&phba->dbg_log_dmping);
15628 struct va_format vaf;
15631 va_start(args, fmt);
15632 if (unlikely(dbg_dmping)) {
15635 dev_info(&phba->pcidev->dev, "%pV", &vaf);
15639 idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) %
15642 atomic_inc(&phba->dbg_log_cnt);
15644 vscnprintf(phba->dbg_log[idx].log,
15645 sizeof(phba->dbg_log[idx].log), fmt, args);
15648 phba->dbg_log[idx].t_ns = local_clock();
15652 * lpfc_exit - lpfc module removal routine
15654 * This routine is invoked when the lpfc module is removed from the kernel.
15655 * The special kernel macro module_exit() is used to indicate the role of
15656 * this routine to the kernel as lpfc module exit point.
15661 misc_deregister(&lpfc_mgmt_dev);
15662 pci_unregister_driver(&lpfc_driver);
15663 cpuhp_remove_multi_state(lpfc_cpuhp_state);
15664 fc_release_transport(lpfc_transport_template);
15665 fc_release_transport(lpfc_vport_transport_template);
15666 idr_destroy(&lpfc_hba_index);
15669 module_init(lpfc_init);
15670 module_exit(lpfc_exit);
15671 MODULE_LICENSE("GPL");
15672 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
15673 MODULE_AUTHOR("Broadcom");
15674 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);