1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/idr.h>
26 #include <linux/interrupt.h>
27 #include <linux/kthread.h>
28 #include <linux/pci.h>
29 #include <linux/spinlock.h>
30 #include <linux/ctype.h>
31 #include <linux/aer.h>
32 #include <linux/slab.h>
34 #include <scsi/scsi.h>
35 #include <scsi/scsi_device.h>
36 #include <scsi/scsi_host.h>
37 #include <scsi/scsi_transport_fc.h>
42 #include "lpfc_sli4.h"
44 #include "lpfc_disc.h"
45 #include "lpfc_scsi.h"
47 #include "lpfc_logmsg.h"
48 #include "lpfc_crtn.h"
49 #include "lpfc_vport.h"
50 #include "lpfc_version.h"
53 unsigned long _dump_buf_data_order;
55 unsigned long _dump_buf_dif_order;
56 spinlock_t _dump_buf_lock;
58 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
59 static int lpfc_post_rcv_buf(struct lpfc_hba *);
60 static int lpfc_sli4_queue_create(struct lpfc_hba *);
61 static void lpfc_sli4_queue_destroy(struct lpfc_hba *);
62 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
63 static int lpfc_setup_endian_order(struct lpfc_hba *);
64 static int lpfc_sli4_read_config(struct lpfc_hba *);
65 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
66 static void lpfc_free_sgl_list(struct lpfc_hba *);
67 static int lpfc_init_sgl_list(struct lpfc_hba *);
68 static int lpfc_init_active_sgl_array(struct lpfc_hba *);
69 static void lpfc_free_active_sgl(struct lpfc_hba *);
70 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
71 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
72 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
73 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
74 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
76 static struct scsi_transport_template *lpfc_transport_template = NULL;
77 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
78 static DEFINE_IDR(lpfc_hba_index);
81 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
82 * @phba: pointer to lpfc hba data structure.
84 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
85 * mailbox command. It retrieves the revision information from the HBA and
86 * collects the Vital Product Data (VPD) about the HBA for preparing the
87 * configuration of the HBA.
91 * -ERESTART - requests the SLI layer to reset the HBA and try again.
92 * Any other value - indicates an error.
95 lpfc_config_port_prep(struct lpfc_hba *phba)
97 lpfc_vpd_t *vp = &phba->vpd;
101 char *lpfc_vpd_data = NULL;
103 static char licensed[56] =
104 "key unlock for use with gnu public licensed code only\0";
105 static int init_key = 1;
107 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
109 phba->link_state = LPFC_HBA_ERROR;
114 phba->link_state = LPFC_INIT_MBX_CMDS;
116 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
118 uint32_t *ptext = (uint32_t *) licensed;
120 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
121 *ptext = cpu_to_be32(*ptext);
125 lpfc_read_nv(phba, pmb);
126 memset((char*)mb->un.varRDnvp.rsvd3, 0,
127 sizeof (mb->un.varRDnvp.rsvd3));
128 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
131 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
133 if (rc != MBX_SUCCESS) {
134 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
135 "0324 Config Port initialization "
136 "error, mbxCmd x%x READ_NVPARM, "
138 mb->mbxCommand, mb->mbxStatus);
139 mempool_free(pmb, phba->mbox_mem_pool);
142 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
144 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
148 phba->sli3_options = 0x0;
150 /* Setup and issue mailbox READ REV command */
151 lpfc_read_rev(phba, pmb);
152 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
153 if (rc != MBX_SUCCESS) {
154 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
155 "0439 Adapter failed to init, mbxCmd x%x "
156 "READ_REV, mbxStatus x%x\n",
157 mb->mbxCommand, mb->mbxStatus);
158 mempool_free( pmb, phba->mbox_mem_pool);
164 * The value of rr must be 1 since the driver set the cv field to 1.
165 * This setting requires the FW to set all revision fields.
167 if (mb->un.varRdRev.rr == 0) {
169 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
170 "0440 Adapter failed to init, READ_REV has "
171 "missing revision information.\n");
172 mempool_free(pmb, phba->mbox_mem_pool);
176 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
177 mempool_free(pmb, phba->mbox_mem_pool);
181 /* Save information as VPD data */
183 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
184 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
185 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
186 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
187 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
188 vp->rev.biuRev = mb->un.varRdRev.biuRev;
189 vp->rev.smRev = mb->un.varRdRev.smRev;
190 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
191 vp->rev.endecRev = mb->un.varRdRev.endecRev;
192 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
193 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
194 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
195 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
196 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
197 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
199 /* If the sli feature level is less then 9, we must
200 * tear down all RPIs and VPIs on link down if NPIV
203 if (vp->rev.feaLevelHigh < 9)
204 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
206 if (lpfc_is_LC_HBA(phba->pcidev->device))
207 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
208 sizeof (phba->RandomData));
210 /* Get adapter VPD information */
211 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
216 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
217 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
219 if (rc != MBX_SUCCESS) {
220 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
221 "0441 VPD not present on adapter, "
222 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
223 mb->mbxCommand, mb->mbxStatus);
224 mb->un.varDmp.word_cnt = 0;
226 /* dump mem may return a zero when finished or we got a
227 * mailbox error, either way we are done.
229 if (mb->un.varDmp.word_cnt == 0)
231 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
232 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
233 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
234 lpfc_vpd_data + offset,
235 mb->un.varDmp.word_cnt);
236 offset += mb->un.varDmp.word_cnt;
237 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
238 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
240 kfree(lpfc_vpd_data);
242 mempool_free(pmb, phba->mbox_mem_pool);
247 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
248 * @phba: pointer to lpfc hba data structure.
249 * @pmboxq: pointer to the driver internal queue element for mailbox command.
251 * This is the completion handler for driver's configuring asynchronous event
252 * mailbox command to the device. If the mailbox command returns successfully,
253 * it will set internal async event support flag to 1; otherwise, it will
254 * set internal async event support flag to 0.
257 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
259 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
260 phba->temp_sensor_support = 1;
262 phba->temp_sensor_support = 0;
263 mempool_free(pmboxq, phba->mbox_mem_pool);
268 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
269 * @phba: pointer to lpfc hba data structure.
270 * @pmboxq: pointer to the driver internal queue element for mailbox command.
272 * This is the completion handler for dump mailbox command for getting
273 * wake up parameters. When this command complete, the response contain
274 * Option rom version of the HBA. This function translate the version number
275 * into a human readable string and store it in OptionROMVersion.
278 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
281 uint32_t prog_id_word;
283 /* character array used for decoding dist type. */
284 char dist_char[] = "nabx";
286 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
287 mempool_free(pmboxq, phba->mbox_mem_pool);
291 prg = (struct prog_id *) &prog_id_word;
293 /* word 7 contain option rom version */
294 prog_id_word = pmboxq->u.mb.un.varWords[7];
296 /* Decode the Option rom version word to a readable string */
298 dist = dist_char[prg->dist];
300 if ((prg->dist == 3) && (prg->num == 0))
301 sprintf(phba->OptionROMVersion, "%d.%d%d",
302 prg->ver, prg->rev, prg->lev);
304 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
305 prg->ver, prg->rev, prg->lev,
307 mempool_free(pmboxq, phba->mbox_mem_pool);
312 * lpfc_config_port_post - Perform lpfc initialization after config port
313 * @phba: pointer to lpfc hba data structure.
315 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
316 * command call. It performs all internal resource and state setups on the
317 * port: post IOCB buffers, enable appropriate host interrupt attentions,
318 * ELS ring timers, etc.
322 * Any other value - error.
325 lpfc_config_port_post(struct lpfc_hba *phba)
327 struct lpfc_vport *vport = phba->pport;
328 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
331 struct lpfc_dmabuf *mp;
332 struct lpfc_sli *psli = &phba->sli;
333 uint32_t status, timeout;
337 spin_lock_irq(&phba->hbalock);
339 * If the Config port completed correctly the HBA is not
340 * over heated any more.
342 if (phba->over_temp_state == HBA_OVER_TEMP)
343 phba->over_temp_state = HBA_NORMAL_TEMP;
344 spin_unlock_irq(&phba->hbalock);
346 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
348 phba->link_state = LPFC_HBA_ERROR;
353 /* Get login parameters for NID. */
354 rc = lpfc_read_sparam(phba, pmb, 0);
356 mempool_free(pmb, phba->mbox_mem_pool);
361 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
362 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
363 "0448 Adapter failed init, mbxCmd x%x "
364 "READ_SPARM mbxStatus x%x\n",
365 mb->mbxCommand, mb->mbxStatus);
366 phba->link_state = LPFC_HBA_ERROR;
367 mp = (struct lpfc_dmabuf *) pmb->context1;
368 mempool_free(pmb, phba->mbox_mem_pool);
369 lpfc_mbuf_free(phba, mp->virt, mp->phys);
374 mp = (struct lpfc_dmabuf *) pmb->context1;
376 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
377 lpfc_mbuf_free(phba, mp->virt, mp->phys);
379 pmb->context1 = NULL;
381 if (phba->cfg_soft_wwnn)
382 u64_to_wwn(phba->cfg_soft_wwnn,
383 vport->fc_sparam.nodeName.u.wwn);
384 if (phba->cfg_soft_wwpn)
385 u64_to_wwn(phba->cfg_soft_wwpn,
386 vport->fc_sparam.portName.u.wwn);
387 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
388 sizeof (struct lpfc_name));
389 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
390 sizeof (struct lpfc_name));
392 /* Update the fc_host data structures with new wwn. */
393 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
394 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
395 fc_host_max_npiv_vports(shost) = phba->max_vpi;
397 /* If no serial number in VPD data, use low 6 bytes of WWNN */
398 /* This should be consolidated into parse_vpd ? - mr */
399 if (phba->SerialNumber[0] == 0) {
402 outptr = &vport->fc_nodename.u.s.IEEE[0];
403 for (i = 0; i < 12; i++) {
405 j = ((status & 0xf0) >> 4);
407 phba->SerialNumber[i] =
408 (char)((uint8_t) 0x30 + (uint8_t) j);
410 phba->SerialNumber[i] =
411 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
415 phba->SerialNumber[i] =
416 (char)((uint8_t) 0x30 + (uint8_t) j);
418 phba->SerialNumber[i] =
419 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
423 lpfc_read_config(phba, pmb);
425 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
426 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
427 "0453 Adapter failed to init, mbxCmd x%x "
428 "READ_CONFIG, mbxStatus x%x\n",
429 mb->mbxCommand, mb->mbxStatus);
430 phba->link_state = LPFC_HBA_ERROR;
431 mempool_free( pmb, phba->mbox_mem_pool);
435 /* Check if the port is disabled */
436 lpfc_sli_read_link_ste(phba);
438 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
439 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
440 phba->cfg_hba_queue_depth =
441 (mb->un.varRdConfig.max_xri + 1) -
442 lpfc_sli4_get_els_iocb_cnt(phba);
444 phba->lmt = mb->un.varRdConfig.lmt;
446 /* Get the default values for Model Name and Description */
447 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
449 if ((phba->cfg_link_speed > LINK_SPEED_10G)
450 || ((phba->cfg_link_speed == LINK_SPEED_1G)
451 && !(phba->lmt & LMT_1Gb))
452 || ((phba->cfg_link_speed == LINK_SPEED_2G)
453 && !(phba->lmt & LMT_2Gb))
454 || ((phba->cfg_link_speed == LINK_SPEED_4G)
455 && !(phba->lmt & LMT_4Gb))
456 || ((phba->cfg_link_speed == LINK_SPEED_8G)
457 && !(phba->lmt & LMT_8Gb))
458 || ((phba->cfg_link_speed == LINK_SPEED_10G)
459 && !(phba->lmt & LMT_10Gb))) {
460 /* Reset link speed to auto */
461 lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT,
462 "1302 Invalid speed for this board: "
463 "Reset link speed to auto: x%x\n",
464 phba->cfg_link_speed);
465 phba->cfg_link_speed = LINK_SPEED_AUTO;
468 phba->link_state = LPFC_LINK_DOWN;
470 /* Only process IOCBs on ELS ring till hba_state is READY */
471 if (psli->ring[psli->extra_ring].cmdringaddr)
472 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
473 if (psli->ring[psli->fcp_ring].cmdringaddr)
474 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
475 if (psli->ring[psli->next_ring].cmdringaddr)
476 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
478 /* Post receive buffers for desired rings */
479 if (phba->sli_rev != 3)
480 lpfc_post_rcv_buf(phba);
483 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
485 if (phba->intr_type == MSIX) {
486 rc = lpfc_config_msi(phba, pmb);
488 mempool_free(pmb, phba->mbox_mem_pool);
491 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
492 if (rc != MBX_SUCCESS) {
493 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
494 "0352 Config MSI mailbox command "
495 "failed, mbxCmd x%x, mbxStatus x%x\n",
496 pmb->u.mb.mbxCommand,
497 pmb->u.mb.mbxStatus);
498 mempool_free(pmb, phba->mbox_mem_pool);
503 spin_lock_irq(&phba->hbalock);
504 /* Initialize ERATT handling flag */
505 phba->hba_flag &= ~HBA_ERATT_HANDLED;
507 /* Enable appropriate host interrupts */
508 status = readl(phba->HCregaddr);
509 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
510 if (psli->num_rings > 0)
511 status |= HC_R0INT_ENA;
512 if (psli->num_rings > 1)
513 status |= HC_R1INT_ENA;
514 if (psli->num_rings > 2)
515 status |= HC_R2INT_ENA;
516 if (psli->num_rings > 3)
517 status |= HC_R3INT_ENA;
519 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
520 (phba->cfg_poll & DISABLE_FCP_RING_INT))
521 status &= ~(HC_R0INT_ENA);
523 writel(status, phba->HCregaddr);
524 readl(phba->HCregaddr); /* flush */
525 spin_unlock_irq(&phba->hbalock);
527 /* Set up ring-0 (ELS) timer */
528 timeout = phba->fc_ratov * 2;
529 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
530 /* Set up heart beat (HB) timer */
531 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
532 phba->hb_outstanding = 0;
533 phba->last_completion_time = jiffies;
534 /* Set up error attention (ERATT) polling timer */
535 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
537 if (phba->hba_flag & LINK_DISABLED) {
538 lpfc_printf_log(phba,
540 "2598 Adapter Link is disabled.\n");
541 lpfc_down_link(phba, pmb);
542 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
543 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
544 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
545 lpfc_printf_log(phba,
547 "2599 Adapter failed to issue DOWN_LINK"
548 " mbox command rc 0x%x\n", rc);
550 mempool_free(pmb, phba->mbox_mem_pool);
553 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
554 lpfc_init_link(phba, pmb, phba->cfg_topology,
555 phba->cfg_link_speed);
556 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
557 lpfc_set_loopback_flag(phba);
558 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
559 if (rc != MBX_SUCCESS) {
560 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
561 "0454 Adapter failed to init, mbxCmd x%x "
562 "INIT_LINK, mbxStatus x%x\n",
563 mb->mbxCommand, mb->mbxStatus);
565 /* Clear all interrupt enable conditions */
566 writel(0, phba->HCregaddr);
567 readl(phba->HCregaddr); /* flush */
568 /* Clear all pending interrupts */
569 writel(0xffffffff, phba->HAregaddr);
570 readl(phba->HAregaddr); /* flush */
572 phba->link_state = LPFC_HBA_ERROR;
574 mempool_free(pmb, phba->mbox_mem_pool);
578 /* MBOX buffer will be freed in mbox compl */
579 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
581 phba->link_state = LPFC_HBA_ERROR;
585 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
586 pmb->mbox_cmpl = lpfc_config_async_cmpl;
587 pmb->vport = phba->pport;
588 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
590 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
591 lpfc_printf_log(phba,
594 "0456 Adapter failed to issue "
595 "ASYNCEVT_ENABLE mbox status x%x\n",
597 mempool_free(pmb, phba->mbox_mem_pool);
600 /* Get Option rom version */
601 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
603 phba->link_state = LPFC_HBA_ERROR;
607 lpfc_dump_wakeup_param(phba, pmb);
608 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
609 pmb->vport = phba->pport;
610 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
612 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
613 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
614 "to get Option ROM version status x%x\n", rc);
615 mempool_free(pmb, phba->mbox_mem_pool);
622 * lpfc_hba_init_link - Initialize the FC link
623 * @phba: pointer to lpfc hba data structure.
625 * This routine will issue the INIT_LINK mailbox command call.
626 * It is available to other drivers through the lpfc_hba data
627 * structure for use as a delayed link up mechanism with the
628 * module parameter lpfc_suppress_link_up.
632 * Any other value - error
635 lpfc_hba_init_link(struct lpfc_hba *phba)
637 struct lpfc_vport *vport = phba->pport;
642 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
644 phba->link_state = LPFC_HBA_ERROR;
650 lpfc_init_link(phba, pmb, phba->cfg_topology,
651 phba->cfg_link_speed);
652 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
653 lpfc_set_loopback_flag(phba);
654 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
655 if (rc != MBX_SUCCESS) {
656 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
657 "0498 Adapter failed to init, mbxCmd x%x "
658 "INIT_LINK, mbxStatus x%x\n",
659 mb->mbxCommand, mb->mbxStatus);
660 /* Clear all interrupt enable conditions */
661 writel(0, phba->HCregaddr);
662 readl(phba->HCregaddr); /* flush */
663 /* Clear all pending interrupts */
664 writel(0xffffffff, phba->HAregaddr);
665 readl(phba->HAregaddr); /* flush */
666 phba->link_state = LPFC_HBA_ERROR;
668 mempool_free(pmb, phba->mbox_mem_pool);
671 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
677 * lpfc_hba_down_link - this routine downs the FC link
679 * This routine will issue the DOWN_LINK mailbox command call.
680 * It is available to other drivers through the lpfc_hba data
681 * structure for use to stop the link.
685 * Any other value - error
688 lpfc_hba_down_link(struct lpfc_hba *phba)
693 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
695 phba->link_state = LPFC_HBA_ERROR;
699 lpfc_printf_log(phba,
701 "0491 Adapter Link is disabled.\n");
702 lpfc_down_link(phba, pmb);
703 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
704 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
705 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
706 lpfc_printf_log(phba,
708 "2522 Adapter failed to issue DOWN_LINK"
709 " mbox command rc 0x%x\n", rc);
711 mempool_free(pmb, phba->mbox_mem_pool);
718 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
719 * @phba: pointer to lpfc HBA data structure.
721 * This routine will do LPFC uninitialization before the HBA is reset when
722 * bringing down the SLI Layer.
726 * Any other value - error.
729 lpfc_hba_down_prep(struct lpfc_hba *phba)
731 struct lpfc_vport **vports;
734 if (phba->sli_rev <= LPFC_SLI_REV3) {
735 /* Disable interrupts */
736 writel(0, phba->HCregaddr);
737 readl(phba->HCregaddr); /* flush */
740 if (phba->pport->load_flag & FC_UNLOADING)
741 lpfc_cleanup_discovery_resources(phba->pport);
743 vports = lpfc_create_vport_work_array(phba);
745 for (i = 0; i <= phba->max_vports &&
746 vports[i] != NULL; i++)
747 lpfc_cleanup_discovery_resources(vports[i]);
748 lpfc_destroy_vport_work_array(phba, vports);
754 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
755 * @phba: pointer to lpfc HBA data structure.
757 * This routine will do uninitialization after the HBA is reset when bring
758 * down the SLI Layer.
762 * Any other value - error.
765 lpfc_hba_down_post_s3(struct lpfc_hba *phba)
767 struct lpfc_sli *psli = &phba->sli;
768 struct lpfc_sli_ring *pring;
769 struct lpfc_dmabuf *mp, *next_mp;
770 LIST_HEAD(completions);
773 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
774 lpfc_sli_hbqbuf_free_all(phba);
776 /* Cleanup preposted buffers on the ELS ring */
777 pring = &psli->ring[LPFC_ELS_RING];
778 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
780 pring->postbufq_cnt--;
781 lpfc_mbuf_free(phba, mp->virt, mp->phys);
786 spin_lock_irq(&phba->hbalock);
787 for (i = 0; i < psli->num_rings; i++) {
788 pring = &psli->ring[i];
790 /* At this point in time the HBA is either reset or DOA. Either
791 * way, nothing should be on txcmplq as it will NEVER complete.
793 list_splice_init(&pring->txcmplq, &completions);
794 pring->txcmplq_cnt = 0;
795 spin_unlock_irq(&phba->hbalock);
797 /* Cancel all the IOCBs from the completions list */
798 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
801 lpfc_sli_abort_iocb_ring(phba, pring);
802 spin_lock_irq(&phba->hbalock);
804 spin_unlock_irq(&phba->hbalock);
809 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
810 * @phba: pointer to lpfc HBA data structure.
812 * This routine will do uninitialization after the HBA is reset when bring
813 * down the SLI Layer.
817 * Any other value - error.
820 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
822 struct lpfc_scsi_buf *psb, *psb_next;
825 unsigned long iflag = 0;
826 struct lpfc_sglq *sglq_entry = NULL;
828 ret = lpfc_hba_down_post_s3(phba);
831 /* At this point in time the HBA is either reset or DOA. Either
832 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
833 * on the lpfc_sgl_list so that it can either be freed if the
834 * driver is unloading or reposted if the driver is restarting
837 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */
839 /* abts_sgl_list_lock required because worker thread uses this
842 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
843 list_for_each_entry(sglq_entry,
844 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
845 sglq_entry->state = SGL_FREED;
847 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
848 &phba->sli4_hba.lpfc_sgl_list);
849 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
850 /* abts_scsi_buf_list_lock required because worker thread uses this
853 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
854 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
856 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
857 spin_unlock_irq(&phba->hbalock);
859 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
861 psb->status = IOSTAT_SUCCESS;
863 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
864 list_splice(&aborts, &phba->lpfc_scsi_buf_list);
865 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
870 * lpfc_hba_down_post - Wrapper func for hba down post routine
871 * @phba: pointer to lpfc HBA data structure.
873 * This routine wraps the actual SLI3 or SLI4 routine for performing
874 * uninitialization after the HBA is reset when bring down the SLI Layer.
878 * Any other value - error.
881 lpfc_hba_down_post(struct lpfc_hba *phba)
883 return (*phba->lpfc_hba_down_post)(phba);
887 * lpfc_hb_timeout - The HBA-timer timeout handler
888 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
890 * This is the HBA-timer timeout handler registered to the lpfc driver. When
891 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
892 * work-port-events bitmap and the worker thread is notified. This timeout
893 * event will be used by the worker thread to invoke the actual timeout
894 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
895 * be performed in the timeout handler and the HBA timeout event bit shall
896 * be cleared by the worker thread after it has taken the event bitmap out.
899 lpfc_hb_timeout(unsigned long ptr)
901 struct lpfc_hba *phba;
905 phba = (struct lpfc_hba *)ptr;
907 /* Check for heart beat timeout conditions */
908 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
909 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
911 phba->pport->work_port_events |= WORKER_HB_TMO;
912 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
914 /* Tell the worker thread there is work to do */
916 lpfc_worker_wake_up(phba);
921 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
922 * @phba: pointer to lpfc hba data structure.
923 * @pmboxq: pointer to the driver internal queue element for mailbox command.
925 * This is the callback function to the lpfc heart-beat mailbox command.
926 * If configured, the lpfc driver issues the heart-beat mailbox command to
927 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
928 * heart-beat mailbox command is issued, the driver shall set up heart-beat
929 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
930 * heart-beat outstanding state. Once the mailbox command comes back and
931 * no error conditions detected, the heart-beat mailbox command timer is
932 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
933 * state is cleared for the next heart-beat. If the timer expired with the
934 * heart-beat outstanding state set, the driver will put the HBA offline.
937 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
939 unsigned long drvr_flag;
941 spin_lock_irqsave(&phba->hbalock, drvr_flag);
942 phba->hb_outstanding = 0;
943 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
945 /* Check and reset heart-beat timer is necessary */
946 mempool_free(pmboxq, phba->mbox_mem_pool);
947 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
948 !(phba->link_state == LPFC_HBA_ERROR) &&
949 !(phba->pport->load_flag & FC_UNLOADING))
950 mod_timer(&phba->hb_tmofunc,
951 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
956 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
957 * @phba: pointer to lpfc hba data structure.
959 * This is the actual HBA-timer timeout handler to be invoked by the worker
960 * thread whenever the HBA timer fired and HBA-timeout event posted. This
961 * handler performs any periodic operations needed for the device. If such
962 * periodic event has already been attended to either in the interrupt handler
963 * or by processing slow-ring or fast-ring events within the HBA-timer
964 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
965 * the timer for the next timeout period. If lpfc heart-beat mailbox command
966 * is configured and there is no heart-beat mailbox command outstanding, a
967 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
968 * has been a heart-beat mailbox command outstanding, the HBA shall be put
972 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
974 struct lpfc_vport **vports;
975 LPFC_MBOXQ_t *pmboxq;
976 struct lpfc_dmabuf *buf_ptr;
978 struct lpfc_sli *psli = &phba->sli;
979 LIST_HEAD(completions);
981 vports = lpfc_create_vport_work_array(phba);
983 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
984 lpfc_rcv_seq_check_edtov(vports[i]);
985 lpfc_destroy_vport_work_array(phba, vports);
987 if ((phba->link_state == LPFC_HBA_ERROR) ||
988 (phba->pport->load_flag & FC_UNLOADING) ||
989 (phba->pport->fc_flag & FC_OFFLINE_MODE))
992 spin_lock_irq(&phba->pport->work_port_lock);
994 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
996 spin_unlock_irq(&phba->pport->work_port_lock);
997 if (!phba->hb_outstanding)
998 mod_timer(&phba->hb_tmofunc,
999 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1001 mod_timer(&phba->hb_tmofunc,
1002 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1005 spin_unlock_irq(&phba->pport->work_port_lock);
1007 if (phba->elsbuf_cnt &&
1008 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1009 spin_lock_irq(&phba->hbalock);
1010 list_splice_init(&phba->elsbuf, &completions);
1011 phba->elsbuf_cnt = 0;
1012 phba->elsbuf_prev_cnt = 0;
1013 spin_unlock_irq(&phba->hbalock);
1015 while (!list_empty(&completions)) {
1016 list_remove_head(&completions, buf_ptr,
1017 struct lpfc_dmabuf, list);
1018 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1022 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1024 /* If there is no heart beat outstanding, issue a heartbeat command */
1025 if (phba->cfg_enable_hba_heartbeat) {
1026 if (!phba->hb_outstanding) {
1027 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
1029 mod_timer(&phba->hb_tmofunc,
1030 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1034 lpfc_heart_beat(phba, pmboxq);
1035 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1036 pmboxq->vport = phba->pport;
1037 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
1039 if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
1040 mempool_free(pmboxq, phba->mbox_mem_pool);
1041 mod_timer(&phba->hb_tmofunc,
1042 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1045 mod_timer(&phba->hb_tmofunc,
1046 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1047 phba->hb_outstanding = 1;
1051 * If heart beat timeout called with hb_outstanding set
1052 * we need to take the HBA offline.
1054 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1055 "0459 Adapter heartbeat failure, "
1056 "taking this port offline.\n");
1058 spin_lock_irq(&phba->hbalock);
1059 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1060 spin_unlock_irq(&phba->hbalock);
1062 lpfc_offline_prep(phba);
1064 lpfc_unblock_mgmt_io(phba);
1065 phba->link_state = LPFC_HBA_ERROR;
1066 lpfc_hba_down_post(phba);
1072 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1073 * @phba: pointer to lpfc hba data structure.
1075 * This routine is called to bring the HBA offline when HBA hardware error
1076 * other than Port Error 6 has been detected.
1079 lpfc_offline_eratt(struct lpfc_hba *phba)
1081 struct lpfc_sli *psli = &phba->sli;
1083 spin_lock_irq(&phba->hbalock);
1084 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1085 spin_unlock_irq(&phba->hbalock);
1086 lpfc_offline_prep(phba);
1089 lpfc_reset_barrier(phba);
1090 spin_lock_irq(&phba->hbalock);
1091 lpfc_sli_brdreset(phba);
1092 spin_unlock_irq(&phba->hbalock);
1093 lpfc_hba_down_post(phba);
1094 lpfc_sli_brdready(phba, HS_MBRDY);
1095 lpfc_unblock_mgmt_io(phba);
1096 phba->link_state = LPFC_HBA_ERROR;
1101 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1102 * @phba: pointer to lpfc hba data structure.
1104 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1105 * other than Port Error 6 has been detected.
1108 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1110 lpfc_offline_prep(phba);
1112 lpfc_sli4_brdreset(phba);
1113 lpfc_hba_down_post(phba);
1114 lpfc_sli4_post_status_check(phba);
1115 lpfc_unblock_mgmt_io(phba);
1116 phba->link_state = LPFC_HBA_ERROR;
1120 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1121 * @phba: pointer to lpfc hba data structure.
1123 * This routine is invoked to handle the deferred HBA hardware error
1124 * conditions. This type of error is indicated by HBA by setting ER1
1125 * and another ER bit in the host status register. The driver will
1126 * wait until the ER1 bit clears before handling the error condition.
1129 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1131 uint32_t old_host_status = phba->work_hs;
1132 struct lpfc_sli_ring *pring;
1133 struct lpfc_sli *psli = &phba->sli;
1135 /* If the pci channel is offline, ignore possible errors,
1136 * since we cannot communicate with the pci card anyway.
1138 if (pci_channel_offline(phba->pcidev)) {
1139 spin_lock_irq(&phba->hbalock);
1140 phba->hba_flag &= ~DEFER_ERATT;
1141 spin_unlock_irq(&phba->hbalock);
1145 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1146 "0479 Deferred Adapter Hardware Error "
1147 "Data: x%x x%x x%x\n",
1149 phba->work_status[0], phba->work_status[1]);
1151 spin_lock_irq(&phba->hbalock);
1152 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1153 spin_unlock_irq(&phba->hbalock);
1157 * Firmware stops when it triggred erratt. That could cause the I/Os
1158 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1159 * SCSI layer retry it after re-establishing link.
1161 pring = &psli->ring[psli->fcp_ring];
1162 lpfc_sli_abort_iocb_ring(phba, pring);
1165 * There was a firmware error. Take the hba offline and then
1166 * attempt to restart it.
1168 lpfc_offline_prep(phba);
1171 /* Wait for the ER1 bit to clear.*/
1172 while (phba->work_hs & HS_FFER1) {
1174 phba->work_hs = readl(phba->HSregaddr);
1175 /* If driver is unloading let the worker thread continue */
1176 if (phba->pport->load_flag & FC_UNLOADING) {
1183 * This is to ptrotect against a race condition in which
1184 * first write to the host attention register clear the
1185 * host status register.
1187 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1188 phba->work_hs = old_host_status & ~HS_FFER1;
1190 spin_lock_irq(&phba->hbalock);
1191 phba->hba_flag &= ~DEFER_ERATT;
1192 spin_unlock_irq(&phba->hbalock);
1193 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1194 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1198 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1200 struct lpfc_board_event_header board_event;
1201 struct Scsi_Host *shost;
1203 board_event.event_type = FC_REG_BOARD_EVENT;
1204 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1205 shost = lpfc_shost_from_vport(phba->pport);
1206 fc_host_post_vendor_event(shost, fc_get_event_number(),
1207 sizeof(board_event),
1208 (char *) &board_event,
1213 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1214 * @phba: pointer to lpfc hba data structure.
1216 * This routine is invoked to handle the following HBA hardware error
1218 * 1 - HBA error attention interrupt
1219 * 2 - DMA ring index out of range
1220 * 3 - Mailbox command came back as unknown
1223 lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1225 struct lpfc_vport *vport = phba->pport;
1226 struct lpfc_sli *psli = &phba->sli;
1227 struct lpfc_sli_ring *pring;
1228 uint32_t event_data;
1229 unsigned long temperature;
1230 struct temp_event temp_event_data;
1231 struct Scsi_Host *shost;
1233 /* If the pci channel is offline, ignore possible errors,
1234 * since we cannot communicate with the pci card anyway.
1236 if (pci_channel_offline(phba->pcidev)) {
1237 spin_lock_irq(&phba->hbalock);
1238 phba->hba_flag &= ~DEFER_ERATT;
1239 spin_unlock_irq(&phba->hbalock);
1243 /* If resets are disabled then leave the HBA alone and return */
1244 if (!phba->cfg_enable_hba_reset)
1247 /* Send an internal error event to mgmt application */
1248 lpfc_board_errevt_to_mgmt(phba);
1250 if (phba->hba_flag & DEFER_ERATT)
1251 lpfc_handle_deferred_eratt(phba);
1253 if (phba->work_hs & HS_FFER6) {
1254 /* Re-establishing Link */
1255 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1256 "1301 Re-establishing Link "
1257 "Data: x%x x%x x%x\n",
1259 phba->work_status[0], phba->work_status[1]);
1261 spin_lock_irq(&phba->hbalock);
1262 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1263 spin_unlock_irq(&phba->hbalock);
1266 * Firmware stops when it triggled erratt with HS_FFER6.
1267 * That could cause the I/Os dropped by the firmware.
1268 * Error iocb (I/O) on txcmplq and let the SCSI layer
1269 * retry it after re-establishing link.
1271 pring = &psli->ring[psli->fcp_ring];
1272 lpfc_sli_abort_iocb_ring(phba, pring);
1275 * There was a firmware error. Take the hba offline and then
1276 * attempt to restart it.
1278 lpfc_offline_prep(phba);
1280 lpfc_sli_brdrestart(phba);
1281 if (lpfc_online(phba) == 0) { /* Initialize the HBA */
1282 lpfc_unblock_mgmt_io(phba);
1285 lpfc_unblock_mgmt_io(phba);
1286 } else if (phba->work_hs & HS_CRIT_TEMP) {
1287 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1288 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1289 temp_event_data.event_code = LPFC_CRIT_TEMP;
1290 temp_event_data.data = (uint32_t)temperature;
1292 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1293 "0406 Adapter maximum temperature exceeded "
1294 "(%ld), taking this port offline "
1295 "Data: x%x x%x x%x\n",
1296 temperature, phba->work_hs,
1297 phba->work_status[0], phba->work_status[1]);
1299 shost = lpfc_shost_from_vport(phba->pport);
1300 fc_host_post_vendor_event(shost, fc_get_event_number(),
1301 sizeof(temp_event_data),
1302 (char *) &temp_event_data,
1303 SCSI_NL_VID_TYPE_PCI
1304 | PCI_VENDOR_ID_EMULEX);
1306 spin_lock_irq(&phba->hbalock);
1307 phba->over_temp_state = HBA_OVER_TEMP;
1308 spin_unlock_irq(&phba->hbalock);
1309 lpfc_offline_eratt(phba);
1312 /* The if clause above forces this code path when the status
1313 * failure is a value other than FFER6. Do not call the offline
1314 * twice. This is the adapter hardware error path.
1316 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1317 "0457 Adapter Hardware Error "
1318 "Data: x%x x%x x%x\n",
1320 phba->work_status[0], phba->work_status[1]);
1322 event_data = FC_REG_DUMP_EVENT;
1323 shost = lpfc_shost_from_vport(vport);
1324 fc_host_post_vendor_event(shost, fc_get_event_number(),
1325 sizeof(event_data), (char *) &event_data,
1326 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1328 lpfc_offline_eratt(phba);
1334 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1335 * @phba: pointer to lpfc hba data structure.
1337 * This routine is invoked to handle the SLI4 HBA hardware error attention
1341 lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1343 struct lpfc_vport *vport = phba->pport;
1344 uint32_t event_data;
1345 struct Scsi_Host *shost;
1347 /* If the pci channel is offline, ignore possible errors, since
1348 * we cannot communicate with the pci card anyway.
1350 if (pci_channel_offline(phba->pcidev))
1352 /* If resets are disabled then leave the HBA alone and return */
1353 if (!phba->cfg_enable_hba_reset)
1356 /* Send an internal error event to mgmt application */
1357 lpfc_board_errevt_to_mgmt(phba);
1359 /* For now, the actual action for SLI4 device handling is not
1360 * specified yet, just treated it as adaptor hardware failure
1362 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1363 "0143 SLI4 Adapter Hardware Error Data: x%x x%x\n",
1364 phba->work_status[0], phba->work_status[1]);
1366 event_data = FC_REG_DUMP_EVENT;
1367 shost = lpfc_shost_from_vport(vport);
1368 fc_host_post_vendor_event(shost, fc_get_event_number(),
1369 sizeof(event_data), (char *) &event_data,
1370 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1372 lpfc_sli4_offline_eratt(phba);
1376 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1377 * @phba: pointer to lpfc HBA data structure.
1379 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1380 * routine from the API jump table function pointer from the lpfc_hba struct.
1384 * Any other value - error.
1387 lpfc_handle_eratt(struct lpfc_hba *phba)
1389 (*phba->lpfc_handle_eratt)(phba);
1393 * lpfc_handle_latt - The HBA link event handler
1394 * @phba: pointer to lpfc hba data structure.
1396 * This routine is invoked from the worker thread to handle a HBA host
1397 * attention link event.
1400 lpfc_handle_latt(struct lpfc_hba *phba)
1402 struct lpfc_vport *vport = phba->pport;
1403 struct lpfc_sli *psli = &phba->sli;
1405 volatile uint32_t control;
1406 struct lpfc_dmabuf *mp;
1409 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1412 goto lpfc_handle_latt_err_exit;
1415 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1418 goto lpfc_handle_latt_free_pmb;
1421 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1424 goto lpfc_handle_latt_free_mp;
1427 /* Cleanup any outstanding ELS commands */
1428 lpfc_els_flush_all_cmd(phba);
1430 psli->slistat.link_event++;
1431 lpfc_read_la(phba, pmb, mp);
1432 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
1434 /* Block ELS IOCBs until we have processed this mbox command */
1435 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
1436 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
1437 if (rc == MBX_NOT_FINISHED) {
1439 goto lpfc_handle_latt_free_mbuf;
1442 /* Clear Link Attention in HA REG */
1443 spin_lock_irq(&phba->hbalock);
1444 writel(HA_LATT, phba->HAregaddr);
1445 readl(phba->HAregaddr); /* flush */
1446 spin_unlock_irq(&phba->hbalock);
1450 lpfc_handle_latt_free_mbuf:
1451 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1452 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1453 lpfc_handle_latt_free_mp:
1455 lpfc_handle_latt_free_pmb:
1456 mempool_free(pmb, phba->mbox_mem_pool);
1457 lpfc_handle_latt_err_exit:
1458 /* Enable Link attention interrupts */
1459 spin_lock_irq(&phba->hbalock);
1460 psli->sli_flag |= LPFC_PROCESS_LA;
1461 control = readl(phba->HCregaddr);
1462 control |= HC_LAINT_ENA;
1463 writel(control, phba->HCregaddr);
1464 readl(phba->HCregaddr); /* flush */
1466 /* Clear Link Attention in HA REG */
1467 writel(HA_LATT, phba->HAregaddr);
1468 readl(phba->HAregaddr); /* flush */
1469 spin_unlock_irq(&phba->hbalock);
1470 lpfc_linkdown(phba);
1471 phba->link_state = LPFC_HBA_ERROR;
1473 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1474 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
1480 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
1481 * @phba: pointer to lpfc hba data structure.
1482 * @vpd: pointer to the vital product data.
1483 * @len: length of the vital product data in bytes.
1485 * This routine parses the Vital Product Data (VPD). The VPD is treated as
1486 * an array of characters. In this routine, the ModelName, ProgramType, and
1487 * ModelDesc, etc. fields of the phba data structure will be populated.
1490 * 0 - pointer to the VPD passed in is NULL
1494 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1496 uint8_t lenlo, lenhi;
1506 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1507 "0455 Vital Product Data: x%x x%x x%x x%x\n",
1508 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
1510 while (!finished && (index < (len - 4))) {
1511 switch (vpd[index]) {
1519 i = ((((unsigned short)lenhi) << 8) + lenlo);
1528 Length = ((((unsigned short)lenhi) << 8) + lenlo);
1529 if (Length > len - index)
1530 Length = len - index;
1531 while (Length > 0) {
1532 /* Look for Serial Number */
1533 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
1540 phba->SerialNumber[j++] = vpd[index++];
1544 phba->SerialNumber[j] = 0;
1547 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
1548 phba->vpd_flag |= VPD_MODEL_DESC;
1555 phba->ModelDesc[j++] = vpd[index++];
1559 phba->ModelDesc[j] = 0;
1562 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
1563 phba->vpd_flag |= VPD_MODEL_NAME;
1570 phba->ModelName[j++] = vpd[index++];
1574 phba->ModelName[j] = 0;
1577 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
1578 phba->vpd_flag |= VPD_PROGRAM_TYPE;
1585 phba->ProgramType[j++] = vpd[index++];
1589 phba->ProgramType[j] = 0;
1592 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
1593 phba->vpd_flag |= VPD_PORT;
1600 phba->Port[j++] = vpd[index++];
1630 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
1631 * @phba: pointer to lpfc hba data structure.
1632 * @mdp: pointer to the data structure to hold the derived model name.
1633 * @descp: pointer to the data structure to hold the derived description.
1635 * This routine retrieves HBA's description based on its registered PCI device
1636 * ID. The @descp passed into this function points to an array of 256 chars. It
1637 * shall be returned with the model name, maximum speed, and the host bus type.
1638 * The @mdp passed into this function points to an array of 80 chars. When the
1639 * function returns, the @mdp will be filled with the model name.
1642 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1645 uint16_t dev_id = phba->pcidev->device;
1648 int oneConnect = 0; /* default is not a oneConnect */
1653 } m = {"<Unknown>", "", ""};
1655 if (mdp && mdp[0] != '\0'
1656 && descp && descp[0] != '\0')
1659 if (phba->lmt & LMT_10Gb)
1661 else if (phba->lmt & LMT_8Gb)
1663 else if (phba->lmt & LMT_4Gb)
1665 else if (phba->lmt & LMT_2Gb)
1673 case PCI_DEVICE_ID_FIREFLY:
1674 m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"};
1676 case PCI_DEVICE_ID_SUPERFLY:
1677 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1678 m = (typeof(m)){"LP7000", "PCI",
1679 "Fibre Channel Adapter"};
1681 m = (typeof(m)){"LP7000E", "PCI",
1682 "Fibre Channel Adapter"};
1684 case PCI_DEVICE_ID_DRAGONFLY:
1685 m = (typeof(m)){"LP8000", "PCI",
1686 "Fibre Channel Adapter"};
1688 case PCI_DEVICE_ID_CENTAUR:
1689 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1690 m = (typeof(m)){"LP9002", "PCI",
1691 "Fibre Channel Adapter"};
1693 m = (typeof(m)){"LP9000", "PCI",
1694 "Fibre Channel Adapter"};
1696 case PCI_DEVICE_ID_RFLY:
1697 m = (typeof(m)){"LP952", "PCI",
1698 "Fibre Channel Adapter"};
1700 case PCI_DEVICE_ID_PEGASUS:
1701 m = (typeof(m)){"LP9802", "PCI-X",
1702 "Fibre Channel Adapter"};
1704 case PCI_DEVICE_ID_THOR:
1705 m = (typeof(m)){"LP10000", "PCI-X",
1706 "Fibre Channel Adapter"};
1708 case PCI_DEVICE_ID_VIPER:
1709 m = (typeof(m)){"LPX1000", "PCI-X",
1710 "Fibre Channel Adapter"};
1712 case PCI_DEVICE_ID_PFLY:
1713 m = (typeof(m)){"LP982", "PCI-X",
1714 "Fibre Channel Adapter"};
1716 case PCI_DEVICE_ID_TFLY:
1717 m = (typeof(m)){"LP1050", "PCI-X",
1718 "Fibre Channel Adapter"};
1720 case PCI_DEVICE_ID_HELIOS:
1721 m = (typeof(m)){"LP11000", "PCI-X2",
1722 "Fibre Channel Adapter"};
1724 case PCI_DEVICE_ID_HELIOS_SCSP:
1725 m = (typeof(m)){"LP11000-SP", "PCI-X2",
1726 "Fibre Channel Adapter"};
1728 case PCI_DEVICE_ID_HELIOS_DCSP:
1729 m = (typeof(m)){"LP11002-SP", "PCI-X2",
1730 "Fibre Channel Adapter"};
1732 case PCI_DEVICE_ID_NEPTUNE:
1733 m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"};
1735 case PCI_DEVICE_ID_NEPTUNE_SCSP:
1736 m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"};
1738 case PCI_DEVICE_ID_NEPTUNE_DCSP:
1739 m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"};
1741 case PCI_DEVICE_ID_BMID:
1742 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
1744 case PCI_DEVICE_ID_BSMB:
1745 m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"};
1747 case PCI_DEVICE_ID_ZEPHYR:
1748 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1750 case PCI_DEVICE_ID_ZEPHYR_SCSP:
1751 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1753 case PCI_DEVICE_ID_ZEPHYR_DCSP:
1754 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
1757 case PCI_DEVICE_ID_ZMID:
1758 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
1760 case PCI_DEVICE_ID_ZSMB:
1761 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
1763 case PCI_DEVICE_ID_LP101:
1764 m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"};
1766 case PCI_DEVICE_ID_LP10000S:
1767 m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"};
1769 case PCI_DEVICE_ID_LP11000S:
1770 m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"};
1772 case PCI_DEVICE_ID_LPE11000S:
1773 m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"};
1775 case PCI_DEVICE_ID_SAT:
1776 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
1778 case PCI_DEVICE_ID_SAT_MID:
1779 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
1781 case PCI_DEVICE_ID_SAT_SMB:
1782 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
1784 case PCI_DEVICE_ID_SAT_DCSP:
1785 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
1787 case PCI_DEVICE_ID_SAT_SCSP:
1788 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
1790 case PCI_DEVICE_ID_SAT_S:
1791 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
1793 case PCI_DEVICE_ID_HORNET:
1794 m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"};
1797 case PCI_DEVICE_ID_PROTEUS_VF:
1798 m = (typeof(m)){"LPev12000", "PCIe IOV",
1799 "Fibre Channel Adapter"};
1801 case PCI_DEVICE_ID_PROTEUS_PF:
1802 m = (typeof(m)){"LPev12000", "PCIe IOV",
1803 "Fibre Channel Adapter"};
1805 case PCI_DEVICE_ID_PROTEUS_S:
1806 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
1807 "Fibre Channel Adapter"};
1809 case PCI_DEVICE_ID_TIGERSHARK:
1811 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
1813 case PCI_DEVICE_ID_TOMCAT:
1815 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
1817 case PCI_DEVICE_ID_FALCON:
1818 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
1819 "EmulexSecure Fibre"};
1822 m = (typeof(m)){"Unknown", "", ""};
1826 if (mdp && mdp[0] == '\0')
1827 snprintf(mdp, 79,"%s", m.name);
1828 /* oneConnect hba requires special processing, they are all initiators
1829 * and we put the port number on the end
1831 if (descp && descp[0] == '\0') {
1833 snprintf(descp, 255,
1834 "Emulex OneConnect %s, %s Initiator, Port %s",
1838 snprintf(descp, 255,
1839 "Emulex %s %d%s %s %s",
1840 m.name, max_speed, (GE) ? "GE" : "Gb",
1846 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
1847 * @phba: pointer to lpfc hba data structure.
1848 * @pring: pointer to a IOCB ring.
1849 * @cnt: the number of IOCBs to be posted to the IOCB ring.
1851 * This routine posts a given number of IOCBs with the associated DMA buffer
1852 * descriptors specified by the cnt argument to the given IOCB ring.
1855 * The number of IOCBs NOT able to be posted to the IOCB ring.
1858 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
1861 struct lpfc_iocbq *iocb;
1862 struct lpfc_dmabuf *mp1, *mp2;
1864 cnt += pring->missbufcnt;
1866 /* While there are buffers to post */
1868 /* Allocate buffer for command iocb */
1869 iocb = lpfc_sli_get_iocbq(phba);
1871 pring->missbufcnt = cnt;
1876 /* 2 buffers can be posted per command */
1877 /* Allocate buffer to post */
1878 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1880 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
1881 if (!mp1 || !mp1->virt) {
1883 lpfc_sli_release_iocbq(phba, iocb);
1884 pring->missbufcnt = cnt;
1888 INIT_LIST_HEAD(&mp1->list);
1889 /* Allocate buffer to post */
1891 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1893 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
1895 if (!mp2 || !mp2->virt) {
1897 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1899 lpfc_sli_release_iocbq(phba, iocb);
1900 pring->missbufcnt = cnt;
1904 INIT_LIST_HEAD(&mp2->list);
1909 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
1910 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
1911 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
1912 icmd->ulpBdeCount = 1;
1915 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
1916 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
1917 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
1919 icmd->ulpBdeCount = 2;
1922 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
1925 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
1927 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1931 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
1935 lpfc_sli_release_iocbq(phba, iocb);
1936 pring->missbufcnt = cnt;
1939 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
1941 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
1943 pring->missbufcnt = 0;
1948 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
1949 * @phba: pointer to lpfc hba data structure.
1951 * This routine posts initial receive IOCB buffers to the ELS ring. The
1952 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
1956 * 0 - success (currently always success)
1959 lpfc_post_rcv_buf(struct lpfc_hba *phba)
1961 struct lpfc_sli *psli = &phba->sli;
1963 /* Ring 0, ELS / CT buffers */
1964 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
1965 /* Ring 2 - FCP no buffers needed */
1970 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
1973 * lpfc_sha_init - Set up initial array of hash table entries
1974 * @HashResultPointer: pointer to an array as hash table.
1976 * This routine sets up the initial values to the array of hash table entries
1980 lpfc_sha_init(uint32_t * HashResultPointer)
1982 HashResultPointer[0] = 0x67452301;
1983 HashResultPointer[1] = 0xEFCDAB89;
1984 HashResultPointer[2] = 0x98BADCFE;
1985 HashResultPointer[3] = 0x10325476;
1986 HashResultPointer[4] = 0xC3D2E1F0;
1990 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
1991 * @HashResultPointer: pointer to an initial/result hash table.
1992 * @HashWorkingPointer: pointer to an working hash table.
1994 * This routine iterates an initial hash table pointed by @HashResultPointer
1995 * with the values from the working hash table pointeed by @HashWorkingPointer.
1996 * The results are putting back to the initial hash table, returned through
1997 * the @HashResultPointer as the result hash table.
2000 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2004 uint32_t A, B, C, D, E;
2007 HashWorkingPointer[t] =
2009 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2011 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2012 } while (++t <= 79);
2014 A = HashResultPointer[0];
2015 B = HashResultPointer[1];
2016 C = HashResultPointer[2];
2017 D = HashResultPointer[3];
2018 E = HashResultPointer[4];
2022 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2023 } else if (t < 40) {
2024 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2025 } else if (t < 60) {
2026 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2028 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2030 TEMP += S(5, A) + E + HashWorkingPointer[t];
2036 } while (++t <= 79);
2038 HashResultPointer[0] += A;
2039 HashResultPointer[1] += B;
2040 HashResultPointer[2] += C;
2041 HashResultPointer[3] += D;
2042 HashResultPointer[4] += E;
2047 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2048 * @RandomChallenge: pointer to the entry of host challenge random number array.
2049 * @HashWorking: pointer to the entry of the working hash array.
2051 * This routine calculates the working hash array referred by @HashWorking
2052 * from the challenge random numbers associated with the host, referred by
2053 * @RandomChallenge. The result is put into the entry of the working hash
2054 * array and returned by reference through @HashWorking.
2057 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2059 *HashWorking = (*RandomChallenge ^ *HashWorking);
2063 * lpfc_hba_init - Perform special handling for LC HBA initialization
2064 * @phba: pointer to lpfc hba data structure.
2065 * @hbainit: pointer to an array of unsigned 32-bit integers.
2067 * This routine performs the special handling for LC HBA initialization.
2070 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2073 uint32_t *HashWorking;
2074 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2076 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2080 HashWorking[0] = HashWorking[78] = *pwwnn++;
2081 HashWorking[1] = HashWorking[79] = *pwwnn;
2083 for (t = 0; t < 7; t++)
2084 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2086 lpfc_sha_init(hbainit);
2087 lpfc_sha_iterate(hbainit, HashWorking);
2092 * lpfc_cleanup - Performs vport cleanups before deleting a vport
2093 * @vport: pointer to a virtual N_Port data structure.
2095 * This routine performs the necessary cleanups before deleting the @vport.
2096 * It invokes the discovery state machine to perform necessary state
2097 * transitions and to release the ndlps associated with the @vport. Note,
2098 * the physical port is treated as @vport 0.
2101 lpfc_cleanup(struct lpfc_vport *vport)
2103 struct lpfc_hba *phba = vport->phba;
2104 struct lpfc_nodelist *ndlp, *next_ndlp;
2107 if (phba->link_state > LPFC_LINK_DOWN)
2108 lpfc_port_link_failure(vport);
2110 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2111 if (!NLP_CHK_NODE_ACT(ndlp)) {
2112 ndlp = lpfc_enable_node(vport, ndlp,
2113 NLP_STE_UNUSED_NODE);
2116 spin_lock_irq(&phba->ndlp_lock);
2117 NLP_SET_FREE_REQ(ndlp);
2118 spin_unlock_irq(&phba->ndlp_lock);
2119 /* Trigger the release of the ndlp memory */
2123 spin_lock_irq(&phba->ndlp_lock);
2124 if (NLP_CHK_FREE_REQ(ndlp)) {
2125 /* The ndlp should not be in memory free mode already */
2126 spin_unlock_irq(&phba->ndlp_lock);
2129 /* Indicate request for freeing ndlp memory */
2130 NLP_SET_FREE_REQ(ndlp);
2131 spin_unlock_irq(&phba->ndlp_lock);
2133 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2134 ndlp->nlp_DID == Fabric_DID) {
2135 /* Just free up ndlp with Fabric_DID for vports */
2140 if (ndlp->nlp_type & NLP_FABRIC)
2141 lpfc_disc_state_machine(vport, ndlp, NULL,
2142 NLP_EVT_DEVICE_RECOVERY);
2144 lpfc_disc_state_machine(vport, ndlp, NULL,
2149 /* At this point, ALL ndlp's should be gone
2150 * because of the previous NLP_EVT_DEVICE_RM.
2151 * Lets wait for this to happen, if needed.
2153 while (!list_empty(&vport->fc_nodes)) {
2155 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2156 "0233 Nodelist not empty\n");
2157 list_for_each_entry_safe(ndlp, next_ndlp,
2158 &vport->fc_nodes, nlp_listp) {
2159 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2161 "0282 did:x%x ndlp:x%p "
2162 "usgmap:x%x refcnt:%d\n",
2163 ndlp->nlp_DID, (void *)ndlp,
2166 &ndlp->kref.refcount));
2171 /* Wait for any activity on ndlps to settle */
2177 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2178 * @vport: pointer to a virtual N_Port data structure.
2180 * This routine stops all the timers associated with a @vport. This function
2181 * is invoked before disabling or deleting a @vport. Note that the physical
2182 * port is treated as @vport 0.
2185 lpfc_stop_vport_timers(struct lpfc_vport *vport)
2187 del_timer_sync(&vport->els_tmofunc);
2188 del_timer_sync(&vport->fc_fdmitmo);
2189 lpfc_can_disctmo(vport);
2194 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2195 * @phba: pointer to lpfc hba data structure.
2197 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2198 * caller of this routine should already hold the host lock.
2201 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2203 /* Clear pending FCF rediscovery wait and failover in progress flags */
2204 phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND |
2207 /* Now, try to stop the timer */
2208 del_timer(&phba->fcf.redisc_wait);
2212 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2213 * @phba: pointer to lpfc hba data structure.
2215 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2216 * checks whether the FCF rediscovery wait timer is pending with the host
2217 * lock held before proceeding with disabling the timer and clearing the
2218 * wait timer pendig flag.
2221 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2223 spin_lock_irq(&phba->hbalock);
2224 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2225 /* FCF rediscovery timer already fired or stopped */
2226 spin_unlock_irq(&phba->hbalock);
2229 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2230 spin_unlock_irq(&phba->hbalock);
2234 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2235 * @phba: pointer to lpfc hba data structure.
2237 * This routine stops all the timers associated with a HBA. This function is
2238 * invoked before either putting a HBA offline or unloading the driver.
2241 lpfc_stop_hba_timers(struct lpfc_hba *phba)
2243 lpfc_stop_vport_timers(phba->pport);
2244 del_timer_sync(&phba->sli.mbox_tmo);
2245 del_timer_sync(&phba->fabric_block_timer);
2246 del_timer_sync(&phba->eratt_poll);
2247 del_timer_sync(&phba->hb_tmofunc);
2248 phba->hb_outstanding = 0;
2250 switch (phba->pci_dev_grp) {
2251 case LPFC_PCI_DEV_LP:
2252 /* Stop any LightPulse device specific driver timers */
2253 del_timer_sync(&phba->fcp_poll_timer);
2255 case LPFC_PCI_DEV_OC:
2256 /* Stop any OneConnect device sepcific driver timers */
2257 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2260 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2261 "0297 Invalid device group (x%x)\n",
2269 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2270 * @phba: pointer to lpfc hba data structure.
2272 * This routine marks a HBA's management interface as blocked. Once the HBA's
2273 * management interface is marked as blocked, all the user space access to
2274 * the HBA, whether they are from sysfs interface or libdfc interface will
2275 * all be blocked. The HBA is set to block the management interface when the
2276 * driver prepares the HBA interface for online or offline.
2279 lpfc_block_mgmt_io(struct lpfc_hba * phba)
2281 unsigned long iflag;
2283 spin_lock_irqsave(&phba->hbalock, iflag);
2284 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2285 spin_unlock_irqrestore(&phba->hbalock, iflag);
2289 * lpfc_online - Initialize and bring a HBA online
2290 * @phba: pointer to lpfc hba data structure.
2292 * This routine initializes the HBA and brings a HBA online. During this
2293 * process, the management interface is blocked to prevent user space access
2294 * to the HBA interfering with the driver initialization.
2301 lpfc_online(struct lpfc_hba *phba)
2303 struct lpfc_vport *vport;
2304 struct lpfc_vport **vports;
2309 vport = phba->pport;
2311 if (!(vport->fc_flag & FC_OFFLINE_MODE))
2314 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2315 "0458 Bring Adapter online\n");
2317 lpfc_block_mgmt_io(phba);
2319 if (!lpfc_sli_queue_setup(phba)) {
2320 lpfc_unblock_mgmt_io(phba);
2324 if (phba->sli_rev == LPFC_SLI_REV4) {
2325 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
2326 lpfc_unblock_mgmt_io(phba);
2330 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
2331 lpfc_unblock_mgmt_io(phba);
2336 vports = lpfc_create_vport_work_array(phba);
2338 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2339 struct Scsi_Host *shost;
2340 shost = lpfc_shost_from_vport(vports[i]);
2341 spin_lock_irq(shost->host_lock);
2342 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2343 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2344 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2345 if (phba->sli_rev == LPFC_SLI_REV4)
2346 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
2347 spin_unlock_irq(shost->host_lock);
2349 lpfc_destroy_vport_work_array(phba, vports);
2351 lpfc_unblock_mgmt_io(phba);
2356 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
2357 * @phba: pointer to lpfc hba data structure.
2359 * This routine marks a HBA's management interface as not blocked. Once the
2360 * HBA's management interface is marked as not blocked, all the user space
2361 * access to the HBA, whether they are from sysfs interface or libdfc
2362 * interface will be allowed. The HBA is set to block the management interface
2363 * when the driver prepares the HBA interface for online or offline and then
2364 * set to unblock the management interface afterwards.
2367 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
2369 unsigned long iflag;
2371 spin_lock_irqsave(&phba->hbalock, iflag);
2372 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
2373 spin_unlock_irqrestore(&phba->hbalock, iflag);
2377 * lpfc_offline_prep - Prepare a HBA to be brought offline
2378 * @phba: pointer to lpfc hba data structure.
2380 * This routine is invoked to prepare a HBA to be brought offline. It performs
2381 * unregistration login to all the nodes on all vports and flushes the mailbox
2382 * queue to make it ready to be brought offline.
2385 lpfc_offline_prep(struct lpfc_hba * phba)
2387 struct lpfc_vport *vport = phba->pport;
2388 struct lpfc_nodelist *ndlp, *next_ndlp;
2389 struct lpfc_vport **vports;
2390 struct Scsi_Host *shost;
2393 if (vport->fc_flag & FC_OFFLINE_MODE)
2396 lpfc_block_mgmt_io(phba);
2398 lpfc_linkdown(phba);
2400 /* Issue an unreg_login to all nodes on all vports */
2401 vports = lpfc_create_vport_work_array(phba);
2402 if (vports != NULL) {
2403 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2404 if (vports[i]->load_flag & FC_UNLOADING)
2406 shost = lpfc_shost_from_vport(vports[i]);
2407 spin_lock_irq(shost->host_lock);
2408 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
2409 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2410 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
2411 spin_unlock_irq(shost->host_lock);
2413 shost = lpfc_shost_from_vport(vports[i]);
2414 list_for_each_entry_safe(ndlp, next_ndlp,
2415 &vports[i]->fc_nodes,
2417 if (!NLP_CHK_NODE_ACT(ndlp))
2419 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2421 if (ndlp->nlp_type & NLP_FABRIC) {
2422 lpfc_disc_state_machine(vports[i], ndlp,
2423 NULL, NLP_EVT_DEVICE_RECOVERY);
2424 lpfc_disc_state_machine(vports[i], ndlp,
2425 NULL, NLP_EVT_DEVICE_RM);
2427 spin_lock_irq(shost->host_lock);
2428 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2429 spin_unlock_irq(shost->host_lock);
2430 lpfc_unreg_rpi(vports[i], ndlp);
2434 lpfc_destroy_vport_work_array(phba, vports);
2436 lpfc_sli_mbox_sys_shutdown(phba);
2440 * lpfc_offline - Bring a HBA offline
2441 * @phba: pointer to lpfc hba data structure.
2443 * This routine actually brings a HBA offline. It stops all the timers
2444 * associated with the HBA, brings down the SLI layer, and eventually
2445 * marks the HBA as in offline state for the upper layer protocol.
2448 lpfc_offline(struct lpfc_hba *phba)
2450 struct Scsi_Host *shost;
2451 struct lpfc_vport **vports;
2454 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
2457 /* stop port and all timers associated with this hba */
2458 lpfc_stop_port(phba);
2459 vports = lpfc_create_vport_work_array(phba);
2461 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
2462 lpfc_stop_vport_timers(vports[i]);
2463 lpfc_destroy_vport_work_array(phba, vports);
2464 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2465 "0460 Bring Adapter offline\n");
2466 /* Bring down the SLI Layer and cleanup. The HBA is offline
2468 lpfc_sli_hba_down(phba);
2469 spin_lock_irq(&phba->hbalock);
2471 spin_unlock_irq(&phba->hbalock);
2472 vports = lpfc_create_vport_work_array(phba);
2474 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2475 shost = lpfc_shost_from_vport(vports[i]);
2476 spin_lock_irq(shost->host_lock);
2477 vports[i]->work_port_events = 0;
2478 vports[i]->fc_flag |= FC_OFFLINE_MODE;
2479 spin_unlock_irq(shost->host_lock);
2481 lpfc_destroy_vport_work_array(phba, vports);
2485 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
2486 * @phba: pointer to lpfc hba data structure.
2488 * This routine is to free all the SCSI buffers and IOCBs from the driver
2489 * list back to kernel. It is called from lpfc_pci_remove_one to free
2490 * the internal resources before the device is removed from the system.
2493 * 0 - successful (for now, it always returns 0)
2496 lpfc_scsi_free(struct lpfc_hba *phba)
2498 struct lpfc_scsi_buf *sb, *sb_next;
2499 struct lpfc_iocbq *io, *io_next;
2501 spin_lock_irq(&phba->hbalock);
2502 /* Release all the lpfc_scsi_bufs maintained by this host. */
2503 spin_lock(&phba->scsi_buf_list_lock);
2504 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2505 list_del(&sb->list);
2506 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2509 phba->total_scsi_bufs--;
2511 spin_unlock(&phba->scsi_buf_list_lock);
2513 /* Release all the lpfc_iocbq entries maintained by this host. */
2514 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
2515 list_del(&io->list);
2517 phba->total_iocbq_bufs--;
2519 spin_unlock_irq(&phba->hbalock);
2524 * lpfc_create_port - Create an FC port
2525 * @phba: pointer to lpfc hba data structure.
2526 * @instance: a unique integer ID to this FC port.
2527 * @dev: pointer to the device data structure.
2529 * This routine creates a FC port for the upper layer protocol. The FC port
2530 * can be created on top of either a physical port or a virtual port provided
2531 * by the HBA. This routine also allocates a SCSI host data structure (shost)
2532 * and associates the FC port created before adding the shost into the SCSI
2536 * @vport - pointer to the virtual N_Port data structure.
2537 * NULL - port create failed.
2540 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2542 struct lpfc_vport *vport;
2543 struct Scsi_Host *shost;
2546 if (dev != &phba->pcidev->dev)
2547 shost = scsi_host_alloc(&lpfc_vport_template,
2548 sizeof(struct lpfc_vport));
2550 shost = scsi_host_alloc(&lpfc_template,
2551 sizeof(struct lpfc_vport));
2555 vport = (struct lpfc_vport *) shost->hostdata;
2557 vport->load_flag |= FC_LOADING;
2558 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2559 vport->fc_rscn_flush = 0;
2561 lpfc_get_vport_cfgparam(vport);
2562 shost->unique_id = instance;
2563 shost->max_id = LPFC_MAX_TARGET;
2564 shost->max_lun = vport->cfg_max_luns;
2565 shost->this_id = -1;
2566 shost->max_cmd_len = 16;
2567 if (phba->sli_rev == LPFC_SLI_REV4) {
2568 shost->dma_boundary =
2569 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
2570 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2574 * Set initial can_queue value since 0 is no longer supported and
2575 * scsi_add_host will fail. This will be adjusted later based on the
2576 * max xri value determined in hba setup.
2578 shost->can_queue = phba->cfg_hba_queue_depth - 10;
2579 if (dev != &phba->pcidev->dev) {
2580 shost->transportt = lpfc_vport_transport_template;
2581 vport->port_type = LPFC_NPIV_PORT;
2583 shost->transportt = lpfc_transport_template;
2584 vport->port_type = LPFC_PHYSICAL_PORT;
2587 /* Initialize all internally managed lists. */
2588 INIT_LIST_HEAD(&vport->fc_nodes);
2589 INIT_LIST_HEAD(&vport->rcv_buffer_list);
2590 spin_lock_init(&vport->work_port_lock);
2592 init_timer(&vport->fc_disctmo);
2593 vport->fc_disctmo.function = lpfc_disc_timeout;
2594 vport->fc_disctmo.data = (unsigned long)vport;
2596 init_timer(&vport->fc_fdmitmo);
2597 vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
2598 vport->fc_fdmitmo.data = (unsigned long)vport;
2600 init_timer(&vport->els_tmofunc);
2601 vport->els_tmofunc.function = lpfc_els_timeout;
2602 vport->els_tmofunc.data = (unsigned long)vport;
2603 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
2604 phba->menlo_flag |= HBA_MENLO_SUPPORT;
2605 /* check for menlo minimum sg count */
2606 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) {
2607 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
2608 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2612 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
2616 spin_lock_irq(&phba->hbalock);
2617 list_add_tail(&vport->listentry, &phba->port_list);
2618 spin_unlock_irq(&phba->hbalock);
2622 scsi_host_put(shost);
2628 * destroy_port - destroy an FC port
2629 * @vport: pointer to an lpfc virtual N_Port data structure.
2631 * This routine destroys a FC port from the upper layer protocol. All the
2632 * resources associated with the port are released.
2635 destroy_port(struct lpfc_vport *vport)
2637 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2638 struct lpfc_hba *phba = vport->phba;
2640 lpfc_debugfs_terminate(vport);
2641 fc_remove_host(shost);
2642 scsi_remove_host(shost);
2644 spin_lock_irq(&phba->hbalock);
2645 list_del_init(&vport->listentry);
2646 spin_unlock_irq(&phba->hbalock);
2648 lpfc_cleanup(vport);
2653 * lpfc_get_instance - Get a unique integer ID
2655 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
2656 * uses the kernel idr facility to perform the task.
2659 * instance - a unique integer ID allocated as the new instance.
2660 * -1 - lpfc get instance failed.
2663 lpfc_get_instance(void)
2667 /* Assign an unused number */
2668 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
2670 if (idr_get_new(&lpfc_hba_index, NULL, &instance))
2676 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
2677 * @shost: pointer to SCSI host data structure.
2678 * @time: elapsed time of the scan in jiffies.
2680 * This routine is called by the SCSI layer with a SCSI host to determine
2681 * whether the scan host is finished.
2683 * Note: there is no scan_start function as adapter initialization will have
2684 * asynchronously kicked off the link initialization.
2687 * 0 - SCSI host scan is not over yet.
2688 * 1 - SCSI host scan is over.
2690 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2692 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2693 struct lpfc_hba *phba = vport->phba;
2696 spin_lock_irq(shost->host_lock);
2698 if (vport->load_flag & FC_UNLOADING) {
2702 if (time >= 30 * HZ) {
2703 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2704 "0461 Scanning longer than 30 "
2705 "seconds. Continuing initialization\n");
2709 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
2710 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2711 "0465 Link down longer than 15 "
2712 "seconds. Continuing initialization\n");
2717 if (vport->port_state != LPFC_VPORT_READY)
2719 if (vport->num_disc_nodes || vport->fc_prli_sent)
2721 if (vport->fc_map_cnt == 0 && time < 2 * HZ)
2723 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
2729 spin_unlock_irq(shost->host_lock);
2734 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
2735 * @shost: pointer to SCSI host data structure.
2737 * This routine initializes a given SCSI host attributes on a FC port. The
2738 * SCSI host can be either on top of a physical port or a virtual port.
2740 void lpfc_host_attrib_init(struct Scsi_Host *shost)
2742 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2743 struct lpfc_hba *phba = vport->phba;
2745 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
2748 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
2749 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
2750 fc_host_supported_classes(shost) = FC_COS_CLASS3;
2752 memset(fc_host_supported_fc4s(shost), 0,
2753 sizeof(fc_host_supported_fc4s(shost)));
2754 fc_host_supported_fc4s(shost)[2] = 1;
2755 fc_host_supported_fc4s(shost)[7] = 1;
2757 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
2758 sizeof fc_host_symbolic_name(shost));
2760 fc_host_supported_speeds(shost) = 0;
2761 if (phba->lmt & LMT_10Gb)
2762 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
2763 if (phba->lmt & LMT_8Gb)
2764 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
2765 if (phba->lmt & LMT_4Gb)
2766 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
2767 if (phba->lmt & LMT_2Gb)
2768 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
2769 if (phba->lmt & LMT_1Gb)
2770 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
2772 fc_host_maxframe_size(shost) =
2773 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
2774 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
2776 /* This value is also unchanging */
2777 memset(fc_host_active_fc4s(shost), 0,
2778 sizeof(fc_host_active_fc4s(shost)));
2779 fc_host_active_fc4s(shost)[2] = 1;
2780 fc_host_active_fc4s(shost)[7] = 1;
2782 fc_host_max_npiv_vports(shost) = phba->max_vpi;
2783 spin_lock_irq(shost->host_lock);
2784 vport->load_flag &= ~FC_LOADING;
2785 spin_unlock_irq(shost->host_lock);
2789 * lpfc_stop_port_s3 - Stop SLI3 device port
2790 * @phba: pointer to lpfc hba data structure.
2792 * This routine is invoked to stop an SLI3 device port, it stops the device
2793 * from generating interrupts and stops the device driver's timers for the
2797 lpfc_stop_port_s3(struct lpfc_hba *phba)
2799 /* Clear all interrupt enable conditions */
2800 writel(0, phba->HCregaddr);
2801 readl(phba->HCregaddr); /* flush */
2802 /* Clear all pending interrupts */
2803 writel(0xffffffff, phba->HAregaddr);
2804 readl(phba->HAregaddr); /* flush */
2806 /* Reset some HBA SLI setup states */
2807 lpfc_stop_hba_timers(phba);
2808 phba->pport->work_port_events = 0;
2812 * lpfc_stop_port_s4 - Stop SLI4 device port
2813 * @phba: pointer to lpfc hba data structure.
2815 * This routine is invoked to stop an SLI4 device port, it stops the device
2816 * from generating interrupts and stops the device driver's timers for the
2820 lpfc_stop_port_s4(struct lpfc_hba *phba)
2822 /* Reset some HBA SLI4 setup states */
2823 lpfc_stop_hba_timers(phba);
2824 phba->pport->work_port_events = 0;
2825 phba->sli4_hba.intr_enable = 0;
2829 * lpfc_stop_port - Wrapper function for stopping hba port
2830 * @phba: Pointer to HBA context object.
2832 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
2833 * the API jump table function pointer from the lpfc_hba struct.
2836 lpfc_stop_port(struct lpfc_hba *phba)
2838 phba->lpfc_stop_port(phba);
2842 * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port.
2843 * @phba: pointer to lpfc hba data structure.
2845 * This routine is invoked to remove the driver default fcf record from
2846 * the port. This routine currently acts on FCF Index 0.
2850 lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
2853 LPFC_MBOXQ_t *mboxq;
2854 struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record;
2855 uint32_t mbox_tmo, req_len;
2856 uint32_t shdr_status, shdr_add_status;
2858 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2860 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2861 "2020 Failed to allocate mbox for ADD_FCF cmd\n");
2865 req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) -
2866 sizeof(struct lpfc_sli4_cfg_mhdr);
2867 rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2868 LPFC_MBOX_OPCODE_FCOE_DELETE_FCF,
2869 req_len, LPFC_SLI4_MBX_EMBED);
2871 * In phase 1, there is a single FCF index, 0. In phase2, the driver
2872 * supports multiple FCF indices.
2874 del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry;
2875 bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1);
2876 bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record,
2877 phba->fcf.current_rec.fcf_indx);
2879 if (!phba->sli4_hba.intr_enable)
2880 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
2882 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
2883 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
2885 /* The IOCTL status is embedded in the mailbox subheader. */
2886 shdr_status = bf_get(lpfc_mbox_hdr_status,
2887 &del_fcf_record->header.cfg_shdr.response);
2888 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
2889 &del_fcf_record->header.cfg_shdr.response);
2890 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
2891 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2892 "2516 DEL FCF of default FCF Index failed "
2893 "mbx status x%x, status x%x add_status x%x\n",
2894 rc, shdr_status, shdr_add_status);
2896 if (rc != MBX_TIMEOUT)
2897 mempool_free(mboxq, phba->mbox_mem_pool);
2901 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
2902 * @phba: Pointer to hba for which this call is being executed.
2904 * This routine starts the timer waiting for the FCF rediscovery to complete.
2907 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
2909 unsigned long fcf_redisc_wait_tmo =
2910 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
2911 /* Start fcf rediscovery wait period timer */
2912 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
2913 spin_lock_irq(&phba->hbalock);
2914 /* Allow action to new fcf asynchronous event */
2915 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
2916 /* Mark the FCF rediscovery pending state */
2917 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
2918 spin_unlock_irq(&phba->hbalock);
2922 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
2923 * @ptr: Map to lpfc_hba data structure pointer.
2925 * This routine is invoked when waiting for FCF table rediscover has been
2926 * timed out. If new FCF record(s) has (have) been discovered during the
2927 * wait period, a new FCF event shall be added to the FCOE async event
2928 * list, and then worker thread shall be waked up for processing from the
2929 * worker thread context.
2932 lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
2934 struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
2936 /* Don't send FCF rediscovery event if timer cancelled */
2937 spin_lock_irq(&phba->hbalock);
2938 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2939 spin_unlock_irq(&phba->hbalock);
2942 /* Clear FCF rediscovery timer pending flag */
2943 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2944 /* FCF rediscovery event to worker thread */
2945 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
2946 spin_unlock_irq(&phba->hbalock);
2947 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2948 "2776 FCF rediscover wait timer expired, post "
2949 "a worker thread event for FCF table scan\n");
2950 /* wake up worker thread */
2951 lpfc_worker_wake_up(phba);
2955 * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support
2956 * @phba: pointer to lpfc hba data structure.
2958 * This function uses the QUERY_FW_CFG mailbox command to determine if the
2959 * firmware loaded supports FCoE. A return of zero indicates that the mailbox
2960 * was successful and the firmware supports FCoE. Any other return indicates
2961 * a error. It is assumed that this function will be called before interrupts
2965 lpfc_sli4_fw_cfg_check(struct lpfc_hba *phba)
2968 LPFC_MBOXQ_t *mboxq;
2969 struct lpfc_mbx_query_fw_cfg *query_fw_cfg;
2971 uint32_t shdr_status, shdr_add_status;
2973 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2975 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2976 "2621 Failed to allocate mbox for "
2977 "query firmware config cmd\n");
2980 query_fw_cfg = &mboxq->u.mqe.un.query_fw_cfg;
2981 length = (sizeof(struct lpfc_mbx_query_fw_cfg) -
2982 sizeof(struct lpfc_sli4_cfg_mhdr));
2983 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
2984 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
2985 length, LPFC_SLI4_MBX_EMBED);
2986 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
2987 /* The IOCTL status is embedded in the mailbox subheader. */
2988 shdr_status = bf_get(lpfc_mbox_hdr_status,
2989 &query_fw_cfg->header.cfg_shdr.response);
2990 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
2991 &query_fw_cfg->header.cfg_shdr.response);
2992 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
2993 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2994 "2622 Query Firmware Config failed "
2995 "mbx status x%x, status x%x add_status x%x\n",
2996 rc, shdr_status, shdr_add_status);
2999 if (!bf_get(lpfc_function_mode_fcoe_i, query_fw_cfg)) {
3000 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3001 "2623 FCoE Function not supported by firmware. "
3002 "Function mode = %08x\n",
3003 query_fw_cfg->function_mode);
3006 if (rc != MBX_TIMEOUT)
3007 mempool_free(mboxq, phba->mbox_mem_pool);
3012 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
3013 * @phba: pointer to lpfc hba data structure.
3014 * @acqe_link: pointer to the async link completion queue entry.
3016 * This routine is to parse the SLI4 link-attention link fault code and
3017 * translate it into the base driver's read link attention mailbox command
3020 * Return: Link-attention status in terms of base driver's coding.
3023 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
3024 struct lpfc_acqe_link *acqe_link)
3026 uint16_t latt_fault;
3028 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
3029 case LPFC_ASYNC_LINK_FAULT_NONE:
3030 case LPFC_ASYNC_LINK_FAULT_LOCAL:
3031 case LPFC_ASYNC_LINK_FAULT_REMOTE:
3035 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3036 "0398 Invalid link fault code: x%x\n",
3037 bf_get(lpfc_acqe_link_fault, acqe_link));
3038 latt_fault = MBXERR_ERROR;
3045 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
3046 * @phba: pointer to lpfc hba data structure.
3047 * @acqe_link: pointer to the async link completion queue entry.
3049 * This routine is to parse the SLI4 link attention type and translate it
3050 * into the base driver's link attention type coding.
3052 * Return: Link attention type in terms of base driver's coding.
3055 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
3056 struct lpfc_acqe_link *acqe_link)
3060 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
3061 case LPFC_ASYNC_LINK_STATUS_DOWN:
3062 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
3063 att_type = AT_LINK_DOWN;
3065 case LPFC_ASYNC_LINK_STATUS_UP:
3066 /* Ignore physical link up events - wait for logical link up */
3067 att_type = AT_RESERVED;
3069 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
3070 att_type = AT_LINK_UP;
3073 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3074 "0399 Invalid link attention type: x%x\n",
3075 bf_get(lpfc_acqe_link_status, acqe_link));
3076 att_type = AT_RESERVED;
3083 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
3084 * @phba: pointer to lpfc hba data structure.
3085 * @acqe_link: pointer to the async link completion queue entry.
3087 * This routine is to parse the SLI4 link-attention link speed and translate
3088 * it into the base driver's link-attention link speed coding.
3090 * Return: Link-attention link speed in terms of base driver's coding.
3093 lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
3094 struct lpfc_acqe_link *acqe_link)
3098 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
3099 case LPFC_ASYNC_LINK_SPEED_ZERO:
3100 link_speed = LA_UNKNW_LINK;
3102 case LPFC_ASYNC_LINK_SPEED_10MBPS:
3103 link_speed = LA_UNKNW_LINK;
3105 case LPFC_ASYNC_LINK_SPEED_100MBPS:
3106 link_speed = LA_UNKNW_LINK;
3108 case LPFC_ASYNC_LINK_SPEED_1GBPS:
3109 link_speed = LA_1GHZ_LINK;
3111 case LPFC_ASYNC_LINK_SPEED_10GBPS:
3112 link_speed = LA_10GHZ_LINK;
3115 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3116 "0483 Invalid link-attention link speed: x%x\n",
3117 bf_get(lpfc_acqe_link_speed, acqe_link));
3118 link_speed = LA_UNKNW_LINK;
3125 * lpfc_sli4_async_link_evt - Process the asynchronous link event
3126 * @phba: pointer to lpfc hba data structure.
3127 * @acqe_link: pointer to the async link completion queue entry.
3129 * This routine is to handle the SLI4 asynchronous link event.
3132 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3133 struct lpfc_acqe_link *acqe_link)
3135 struct lpfc_dmabuf *mp;
3141 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
3142 if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP)
3144 phba->fcoe_eventtag = acqe_link->event_tag;
3145 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3147 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3148 "0395 The mboxq allocation failed\n");
3151 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3153 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3154 "0396 The lpfc_dmabuf allocation failed\n");
3157 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3159 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3160 "0397 The mbuf allocation failed\n");
3161 goto out_free_dmabuf;
3164 /* Cleanup any outstanding ELS commands */
3165 lpfc_els_flush_all_cmd(phba);
3167 /* Block ELS IOCBs until we have done process link event */
3168 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3170 /* Update link event statistics */
3171 phba->sli.slistat.link_event++;
3173 /* Create pseudo lpfc_handle_latt mailbox command from link ACQE */
3174 lpfc_read_la(phba, pmb, mp);
3175 pmb->vport = phba->pport;
3177 /* Parse and translate status field */
3179 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
3181 /* Parse and translate link attention fields */
3182 la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
3183 la->eventTag = acqe_link->event_tag;
3184 la->attType = att_type;
3185 la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link);
3187 /* Fake the the following irrelvant fields */
3188 la->topology = TOPOLOGY_PT_PT;
3189 la->granted_AL_PA = 0;
3195 /* Keep the link status for extra SLI4 state machine reference */
3196 phba->sli4_hba.link_state.speed =
3197 bf_get(lpfc_acqe_link_speed, acqe_link);
3198 phba->sli4_hba.link_state.duplex =
3199 bf_get(lpfc_acqe_link_duplex, acqe_link);
3200 phba->sli4_hba.link_state.status =
3201 bf_get(lpfc_acqe_link_status, acqe_link);
3202 phba->sli4_hba.link_state.physical =
3203 bf_get(lpfc_acqe_link_physical, acqe_link);
3204 phba->sli4_hba.link_state.fault =
3205 bf_get(lpfc_acqe_link_fault, acqe_link);
3206 phba->sli4_hba.link_state.logical_speed =
3207 bf_get(lpfc_acqe_qos_link_speed, acqe_link);
3209 /* Invoke the lpfc_handle_latt mailbox command callback function */
3210 lpfc_mbx_cmpl_read_la(phba, pmb);
3217 mempool_free(pmb, phba->mbox_mem_pool);
3221 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
3222 * @vport: pointer to vport data structure.
3224 * This routine is to perform Clear Virtual Link (CVL) on a vport in
3225 * response to a CVL event.
3227 * Return the pointer to the ndlp with the vport if successful, otherwise
3230 static struct lpfc_nodelist *
3231 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
3233 struct lpfc_nodelist *ndlp;
3234 struct Scsi_Host *shost;
3235 struct lpfc_hba *phba;
3239 ndlp = lpfc_findnode_did(vport, Fabric_DID);
3245 if (phba->pport->port_state <= LPFC_FLOGI)
3247 /* If virtual link is not yet instantiated ignore CVL */
3248 if (vport->port_state <= LPFC_FDISC)
3250 shost = lpfc_shost_from_vport(vport);
3253 lpfc_linkdown_port(vport);
3254 lpfc_cleanup_pending_mbox(vport);
3255 spin_lock_irq(shost->host_lock);
3256 vport->fc_flag |= FC_VPORT_CVL_RCVD;
3257 spin_unlock_irq(shost->host_lock);
3263 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
3264 * @vport: pointer to lpfc hba data structure.
3266 * This routine is to perform Clear Virtual Link (CVL) on all vports in
3267 * response to a FCF dead event.
3270 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
3272 struct lpfc_vport **vports;
3275 vports = lpfc_create_vport_work_array(phba);
3277 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3278 lpfc_sli4_perform_vport_cvl(vports[i]);
3279 lpfc_destroy_vport_work_array(phba, vports);
3283 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
3284 * @phba: pointer to lpfc hba data structure.
3285 * @acqe_link: pointer to the async fcoe completion queue entry.
3287 * This routine is to handle the SLI4 asynchronous fcoe event.
3290 lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3291 struct lpfc_acqe_fcoe *acqe_fcoe)
3293 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
3295 struct lpfc_vport *vport;
3296 struct lpfc_nodelist *ndlp;
3297 struct Scsi_Host *shost;
3298 int active_vlink_present;
3299 struct lpfc_vport **vports;
3302 phba->fc_eventTag = acqe_fcoe->event_tag;
3303 phba->fcoe_eventtag = acqe_fcoe->event_tag;
3304 switch (event_type) {
3305 case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
3306 case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD:
3307 if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF)
3308 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3310 "2546 New FCF found event: "
3311 "evt_tag:x%x, fcf_index:x%x\n",
3312 acqe_fcoe->event_tag,
3315 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
3317 "2788 FCF parameter modified event: "
3318 "evt_tag:x%x, fcf_index:x%x\n",
3319 acqe_fcoe->event_tag,
3321 spin_lock_irq(&phba->hbalock);
3322 if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) ||
3323 (phba->hba_flag & FCF_DISC_INPROGRESS)) {
3325 * If the current FCF is in discovered state or
3326 * FCF discovery is in progress, do nothing.
3328 spin_unlock_irq(&phba->hbalock);
3332 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
3334 * If fast FCF failover rescan event is pending,
3337 spin_unlock_irq(&phba->hbalock);
3340 spin_unlock_irq(&phba->hbalock);
3342 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
3343 !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
3345 * During period of FCF discovery, read the FCF
3346 * table record indexed by the event to update
3347 * FCF round robin failover eligible FCF bmask.
3349 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3351 "2779 Read new FCF record with "
3352 "fcf_index:x%x for updating FCF "
3353 "round robin failover bmask\n",
3355 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index);
3358 /* Otherwise, scan the entire FCF table and re-discover SAN */
3359 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3360 "2770 Start FCF table scan due to new FCF "
3361 "event: evt_tag:x%x, fcf_index:x%x\n",
3362 acqe_fcoe->event_tag, acqe_fcoe->index);
3363 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3364 LPFC_FCOE_FCF_GET_FIRST);
3366 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3367 "2547 Issue FCF scan read FCF mailbox "
3368 "command failed 0x%x\n", rc);
3371 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
3372 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3373 "2548 FCF Table full count 0x%x tag 0x%x\n",
3374 bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe),
3375 acqe_fcoe->event_tag);
3378 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
3379 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3380 "2549 FCF disconnected from network index 0x%x"
3381 " tag 0x%x\n", acqe_fcoe->index,
3382 acqe_fcoe->event_tag);
3383 /* If the event is not for currently used fcf do nothing */
3384 if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index)
3386 /* We request port to rediscover the entire FCF table for
3387 * a fast recovery from case that the current FCF record
3388 * is no longer valid if we are not in the middle of FCF
3389 * failover process already.
3391 spin_lock_irq(&phba->hbalock);
3392 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3393 spin_unlock_irq(&phba->hbalock);
3394 /* Update FLOGI FCF failover eligible FCF bmask */
3395 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index);
3398 /* Mark the fast failover process in progress */
3399 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
3400 spin_unlock_irq(&phba->hbalock);
3401 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3402 "2771 Start FCF fast failover process due to "
3403 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
3404 "\n", acqe_fcoe->event_tag, acqe_fcoe->index);
3405 rc = lpfc_sli4_redisc_fcf_table(phba);
3407 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3409 "2772 Issue FCF rediscover mabilbox "
3410 "command failed, fail through to FCF "
3412 spin_lock_irq(&phba->hbalock);
3413 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
3414 spin_unlock_irq(&phba->hbalock);
3416 * Last resort will fail over by treating this
3417 * as a link down to FCF registration.
3419 lpfc_sli4_fcf_dead_failthrough(phba);
3421 /* Handling fast FCF failover to a DEAD FCF event
3422 * is considered equalivant to receiving CVL to all
3425 lpfc_sli4_perform_all_vport_cvl(phba);
3427 case LPFC_FCOE_EVENT_TYPE_CVL:
3428 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3429 "2718 Clear Virtual Link Received for VPI 0x%x"
3430 " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
3431 vport = lpfc_find_vport_by_vpid(phba,
3432 acqe_fcoe->index - phba->vpi_base);
3433 ndlp = lpfc_sli4_perform_vport_cvl(vport);
3436 active_vlink_present = 0;
3438 vports = lpfc_create_vport_work_array(phba);
3440 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
3442 if ((!(vports[i]->fc_flag &
3443 FC_VPORT_CVL_RCVD)) &&
3444 (vports[i]->port_state > LPFC_FDISC)) {
3445 active_vlink_present = 1;
3449 lpfc_destroy_vport_work_array(phba, vports);
3452 if (active_vlink_present) {
3454 * If there are other active VLinks present,
3455 * re-instantiate the Vlink using FDISC.
3457 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
3458 shost = lpfc_shost_from_vport(vport);
3459 spin_lock_irq(shost->host_lock);
3460 ndlp->nlp_flag |= NLP_DELAY_TMO;
3461 spin_unlock_irq(shost->host_lock);
3462 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
3463 vport->port_state = LPFC_FDISC;
3466 * Otherwise, we request port to rediscover
3467 * the entire FCF table for a fast recovery
3468 * from possible case that the current FCF
3469 * is no longer valid if we are not already
3470 * in the FCF failover process.
3472 spin_lock_irq(&phba->hbalock);
3473 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3474 spin_unlock_irq(&phba->hbalock);
3477 /* Mark the fast failover process in progress */
3478 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
3479 spin_unlock_irq(&phba->hbalock);
3480 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3482 "2773 Start FCF fast failover due "
3483 "to CVL event: evt_tag:x%x\n",
3484 acqe_fcoe->event_tag);
3485 rc = lpfc_sli4_redisc_fcf_table(phba);
3487 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3489 "2774 Issue FCF rediscover "
3490 "mabilbox command failed, "
3491 "through to CVL event\n");
3492 spin_lock_irq(&phba->hbalock);
3493 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
3494 spin_unlock_irq(&phba->hbalock);
3496 * Last resort will be re-try on the
3497 * the current registered FCF entry.
3499 lpfc_retry_pport_discovery(phba);
3504 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3505 "0288 Unknown FCoE event type 0x%x event tag "
3506 "0x%x\n", event_type, acqe_fcoe->event_tag);
3512 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
3513 * @phba: pointer to lpfc hba data structure.
3514 * @acqe_link: pointer to the async dcbx completion queue entry.
3516 * This routine is to handle the SLI4 asynchronous dcbx event.
3519 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
3520 struct lpfc_acqe_dcbx *acqe_dcbx)
3522 phba->fc_eventTag = acqe_dcbx->event_tag;
3523 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3524 "0290 The SLI4 DCBX asynchronous event is not "
3529 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
3530 * @phba: pointer to lpfc hba data structure.
3532 * This routine is invoked by the worker thread to process all the pending
3533 * SLI4 asynchronous events.
3535 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
3537 struct lpfc_cq_event *cq_event;
3539 /* First, declare the async event has been handled */
3540 spin_lock_irq(&phba->hbalock);
3541 phba->hba_flag &= ~ASYNC_EVENT;
3542 spin_unlock_irq(&phba->hbalock);
3543 /* Now, handle all the async events */
3544 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
3545 /* Get the first event from the head of the event queue */
3546 spin_lock_irq(&phba->hbalock);
3547 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
3548 cq_event, struct lpfc_cq_event, list);
3549 spin_unlock_irq(&phba->hbalock);
3550 /* Process the asynchronous event */
3551 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
3552 case LPFC_TRAILER_CODE_LINK:
3553 lpfc_sli4_async_link_evt(phba,
3554 &cq_event->cqe.acqe_link);
3556 case LPFC_TRAILER_CODE_FCOE:
3557 lpfc_sli4_async_fcoe_evt(phba,
3558 &cq_event->cqe.acqe_fcoe);
3560 case LPFC_TRAILER_CODE_DCBX:
3561 lpfc_sli4_async_dcbx_evt(phba,
3562 &cq_event->cqe.acqe_dcbx);
3565 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3566 "1804 Invalid asynchrous event code: "
3567 "x%x\n", bf_get(lpfc_trailer_code,
3568 &cq_event->cqe.mcqe_cmpl));
3571 /* Free the completion event processed to the free pool */
3572 lpfc_sli4_cq_event_release(phba, cq_event);
3577 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
3578 * @phba: pointer to lpfc hba data structure.
3580 * This routine is invoked by the worker thread to process FCF table
3581 * rediscovery pending completion event.
3583 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
3587 spin_lock_irq(&phba->hbalock);
3588 /* Clear FCF rediscovery timeout event */
3589 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
3590 /* Clear driver fast failover FCF record flag */
3591 phba->fcf.failover_rec.flag = 0;
3592 /* Set state for FCF fast failover */
3593 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
3594 spin_unlock_irq(&phba->hbalock);
3596 /* Scan FCF table from the first entry to re-discover SAN */
3597 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3598 "2777 Start FCF table scan after FCF "
3599 "rediscovery quiescent period over\n");
3600 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
3602 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3603 "2747 Issue FCF scan read FCF mailbox "
3604 "command failed 0x%x\n", rc);
3608 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
3609 * @phba: pointer to lpfc hba data structure.
3610 * @dev_grp: The HBA PCI-Device group number.
3612 * This routine is invoked to set up the per HBA PCI-Device group function
3613 * API jump table entries.
3615 * Return: 0 if success, otherwise -ENODEV
3618 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3622 /* Set up lpfc PCI-device group */
3623 phba->pci_dev_grp = dev_grp;
3625 /* The LPFC_PCI_DEV_OC uses SLI4 */
3626 if (dev_grp == LPFC_PCI_DEV_OC)
3627 phba->sli_rev = LPFC_SLI_REV4;
3629 /* Set up device INIT API function jump table */
3630 rc = lpfc_init_api_table_setup(phba, dev_grp);
3633 /* Set up SCSI API function jump table */
3634 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
3637 /* Set up SLI API function jump table */
3638 rc = lpfc_sli_api_table_setup(phba, dev_grp);
3641 /* Set up MBOX API function jump table */
3642 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
3650 * lpfc_log_intr_mode - Log the active interrupt mode
3651 * @phba: pointer to lpfc hba data structure.
3652 * @intr_mode: active interrupt mode adopted.
3654 * This routine it invoked to log the currently used active interrupt mode
3657 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
3659 switch (intr_mode) {
3661 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3662 "0470 Enable INTx interrupt mode.\n");
3665 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3666 "0481 Enabled MSI interrupt mode.\n");
3669 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3670 "0480 Enabled MSI-X interrupt mode.\n");
3673 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3674 "0482 Illegal interrupt mode.\n");
3681 * lpfc_enable_pci_dev - Enable a generic PCI device.
3682 * @phba: pointer to lpfc hba data structure.
3684 * This routine is invoked to enable the PCI device that is common to all
3689 * other values - error
3692 lpfc_enable_pci_dev(struct lpfc_hba *phba)
3694 struct pci_dev *pdev;
3697 /* Obtain PCI device reference */
3701 pdev = phba->pcidev;
3702 /* Select PCI BARs */
3703 bars = pci_select_bars(pdev, IORESOURCE_MEM);
3704 /* Enable PCI device */
3705 if (pci_enable_device_mem(pdev))
3707 /* Request PCI resource for the device */
3708 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
3709 goto out_disable_device;
3710 /* Set up device as PCI master and save state for EEH */
3711 pci_set_master(pdev);
3712 pci_try_set_mwi(pdev);
3713 pci_save_state(pdev);
3718 pci_disable_device(pdev);
3724 * lpfc_disable_pci_dev - Disable a generic PCI device.
3725 * @phba: pointer to lpfc hba data structure.
3727 * This routine is invoked to disable the PCI device that is common to all
3731 lpfc_disable_pci_dev(struct lpfc_hba *phba)
3733 struct pci_dev *pdev;
3736 /* Obtain PCI device reference */
3740 pdev = phba->pcidev;
3741 /* Select PCI BARs */
3742 bars = pci_select_bars(pdev, IORESOURCE_MEM);
3743 /* Release PCI resource and disable PCI device */
3744 pci_release_selected_regions(pdev, bars);
3745 pci_disable_device(pdev);
3746 /* Null out PCI private reference to driver */
3747 pci_set_drvdata(pdev, NULL);
3753 * lpfc_reset_hba - Reset a hba
3754 * @phba: pointer to lpfc hba data structure.
3756 * This routine is invoked to reset a hba device. It brings the HBA
3757 * offline, performs a board restart, and then brings the board back
3758 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
3759 * on outstanding mailbox commands.
3762 lpfc_reset_hba(struct lpfc_hba *phba)
3764 /* If resets are disabled then set error state and return. */
3765 if (!phba->cfg_enable_hba_reset) {
3766 phba->link_state = LPFC_HBA_ERROR;
3769 lpfc_offline_prep(phba);
3771 lpfc_sli_brdrestart(phba);
3773 lpfc_unblock_mgmt_io(phba);
3777 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
3778 * @phba: pointer to lpfc hba data structure.
3780 * This routine is invoked to set up the driver internal resources specific to
3781 * support the SLI-3 HBA device it attached to.
3785 * other values - error
3788 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
3790 struct lpfc_sli *psli;
3793 * Initialize timers used by driver
3796 /* Heartbeat timer */
3797 init_timer(&phba->hb_tmofunc);
3798 phba->hb_tmofunc.function = lpfc_hb_timeout;
3799 phba->hb_tmofunc.data = (unsigned long)phba;
3802 /* MBOX heartbeat timer */
3803 init_timer(&psli->mbox_tmo);
3804 psli->mbox_tmo.function = lpfc_mbox_timeout;
3805 psli->mbox_tmo.data = (unsigned long) phba;
3806 /* FCP polling mode timer */
3807 init_timer(&phba->fcp_poll_timer);
3808 phba->fcp_poll_timer.function = lpfc_poll_timeout;
3809 phba->fcp_poll_timer.data = (unsigned long) phba;
3810 /* Fabric block timer */
3811 init_timer(&phba->fabric_block_timer);
3812 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3813 phba->fabric_block_timer.data = (unsigned long) phba;
3814 /* EA polling mode timer */
3815 init_timer(&phba->eratt_poll);
3816 phba->eratt_poll.function = lpfc_poll_eratt;
3817 phba->eratt_poll.data = (unsigned long) phba;
3819 /* Host attention work mask setup */
3820 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
3821 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
3823 /* Get all the module params for configuring this host */
3824 lpfc_get_cfgparam(phba);
3826 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3827 * used to create the sg_dma_buf_pool must be dynamically calculated.
3828 * 2 segments are added since the IOCB needs a command and response bde.
3830 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
3831 sizeof(struct fcp_rsp) +
3832 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
3834 if (phba->cfg_enable_bg) {
3835 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
3836 phba->cfg_sg_dma_buf_size +=
3837 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
3840 /* Also reinitialize the host templates with new values. */
3841 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3842 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3844 phba->max_vpi = LPFC_MAX_VPI;
3845 /* This will be set to correct value after config_port mbox */
3846 phba->max_vports = 0;
3849 * Initialize the SLI Layer to run with lpfc HBAs.
3851 lpfc_sli_setup(phba);
3852 lpfc_sli_queue_setup(phba);
3854 /* Allocate device driver memory */
3855 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
3862 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
3863 * @phba: pointer to lpfc hba data structure.
3865 * This routine is invoked to unset the driver internal resources set up
3866 * specific for supporting the SLI-3 HBA device it attached to.
3869 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
3871 /* Free device driver memory allocated */
3872 lpfc_mem_free_all(phba);
3878 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
3879 * @phba: pointer to lpfc hba data structure.
3881 * This routine is invoked to set up the driver internal resources specific to
3882 * support the SLI-4 HBA device it attached to.
3886 * other values - error
3889 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3891 struct lpfc_sli *psli;
3892 LPFC_MBOXQ_t *mboxq;
3893 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
3894 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
3895 struct lpfc_mqe *mqe;
3898 /* Before proceed, wait for POST done and device ready */
3899 rc = lpfc_sli4_post_status_check(phba);
3904 * Initialize timers used by driver
3907 /* Heartbeat timer */
3908 init_timer(&phba->hb_tmofunc);
3909 phba->hb_tmofunc.function = lpfc_hb_timeout;
3910 phba->hb_tmofunc.data = (unsigned long)phba;
3913 /* MBOX heartbeat timer */
3914 init_timer(&psli->mbox_tmo);
3915 psli->mbox_tmo.function = lpfc_mbox_timeout;
3916 psli->mbox_tmo.data = (unsigned long) phba;
3917 /* Fabric block timer */
3918 init_timer(&phba->fabric_block_timer);
3919 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3920 phba->fabric_block_timer.data = (unsigned long) phba;
3921 /* EA polling mode timer */
3922 init_timer(&phba->eratt_poll);
3923 phba->eratt_poll.function = lpfc_poll_eratt;
3924 phba->eratt_poll.data = (unsigned long) phba;
3925 /* FCF rediscover timer */
3926 init_timer(&phba->fcf.redisc_wait);
3927 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
3928 phba->fcf.redisc_wait.data = (unsigned long)phba;
3931 * We need to do a READ_CONFIG mailbox command here before
3932 * calling lpfc_get_cfgparam. For VFs this will report the
3933 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
3934 * All of the resources allocated
3935 * for this Port are tied to these values.
3937 /* Get all the module params for configuring this host */
3938 lpfc_get_cfgparam(phba);
3939 phba->max_vpi = LPFC_MAX_VPI;
3940 /* This will be set to correct value after the read_config mbox */
3941 phba->max_vports = 0;
3943 /* Program the default value of vlan_id and fc_map */
3944 phba->valid_vlan = 0;
3945 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
3946 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
3947 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
3950 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3951 * used to create the sg_dma_buf_pool must be dynamically calculated.
3952 * 2 segments are added since the IOCB needs a command and response bde.
3953 * To insure that the scsi sgl does not cross a 4k page boundary only
3954 * sgl sizes of must be a power of 2.
3956 buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
3957 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)));
3958 /* Feature Level 1 hardware is limited to 2 pages */
3959 if ((bf_get(lpfc_sli_intf_featurelevel1, &phba->sli4_hba.sli_intf) ==
3960 LPFC_SLI_INTF_FEATURELEVEL1_1))
3961 max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
3963 max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
3964 for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
3965 dma_buf_size < max_buf_size && buf_size > dma_buf_size;
3966 dma_buf_size = dma_buf_size << 1)
3968 if (dma_buf_size == max_buf_size)
3969 phba->cfg_sg_seg_cnt = (dma_buf_size -
3970 sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) -
3971 (2 * sizeof(struct sli4_sge))) /
3972 sizeof(struct sli4_sge);
3973 phba->cfg_sg_dma_buf_size = dma_buf_size;
3975 /* Initialize buffer queue management fields */
3976 hbq_count = lpfc_sli_hbq_count();
3977 for (i = 0; i < hbq_count; ++i)
3978 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
3979 INIT_LIST_HEAD(&phba->rb_pend_list);
3980 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
3981 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
3984 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
3986 /* Initialize the Abort scsi buffer list used by driver */
3987 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
3988 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
3989 /* This abort list used by worker thread */
3990 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
3993 * Initialize dirver internal slow-path work queues
3996 /* Driver internel slow-path CQ Event pool */
3997 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
3998 /* Response IOCB work queue list */
3999 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
4000 /* Asynchronous event CQ Event work queue list */
4001 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
4002 /* Fast-path XRI aborted CQ Event work queue list */
4003 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
4004 /* Slow-path XRI aborted CQ Event work queue list */
4005 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
4006 /* Receive queue CQ Event work queue list */
4007 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
4009 /* Initialize the driver internal SLI layer lists. */
4010 lpfc_sli_setup(phba);
4011 lpfc_sli_queue_setup(phba);
4013 /* Allocate device driver memory */
4014 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
4018 /* Create the bootstrap mailbox command */
4019 rc = lpfc_create_bootstrap_mbox(phba);
4023 /* Set up the host's endian order with the device. */
4024 rc = lpfc_setup_endian_order(phba);
4026 goto out_free_bsmbx;
4028 rc = lpfc_sli4_fw_cfg_check(phba);
4030 goto out_free_bsmbx;
4032 /* Set up the hba's configuration parameters. */
4033 rc = lpfc_sli4_read_config(phba);
4035 goto out_free_bsmbx;
4037 /* Perform a function reset */
4038 rc = lpfc_pci_function_reset(phba);
4040 goto out_free_bsmbx;
4042 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4046 goto out_free_bsmbx;
4049 /* Get the Supported Pages. It is always available. */
4050 lpfc_supported_pages(mboxq);
4051 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4054 mempool_free(mboxq, phba->mbox_mem_pool);
4055 goto out_free_bsmbx;
4058 mqe = &mboxq->u.mqe;
4059 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
4060 LPFC_MAX_SUPPORTED_PAGES);
4061 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
4062 switch (pn_page[i]) {
4063 case LPFC_SLI4_PARAMETERS:
4064 phba->sli4_hba.pc_sli4_params.supported = 1;
4071 /* Read the port's SLI4 Parameters capabilities if supported. */
4072 if (phba->sli4_hba.pc_sli4_params.supported)
4073 rc = lpfc_pc_sli4_params_get(phba, mboxq);
4074 mempool_free(mboxq, phba->mbox_mem_pool);
4077 goto out_free_bsmbx;
4079 /* Create all the SLI4 queues */
4080 rc = lpfc_sli4_queue_create(phba);
4082 goto out_free_bsmbx;
4084 /* Create driver internal CQE event pool */
4085 rc = lpfc_sli4_cq_event_pool_create(phba);
4087 goto out_destroy_queue;
4089 /* Initialize and populate the iocb list per host */
4090 rc = lpfc_init_sgl_list(phba);
4092 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4093 "1400 Failed to initialize sgl list.\n");
4094 goto out_destroy_cq_event_pool;
4096 rc = lpfc_init_active_sgl_array(phba);
4098 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4099 "1430 Failed to initialize sgl list.\n");
4100 goto out_free_sgl_list;
4103 rc = lpfc_sli4_init_rpi_hdrs(phba);
4105 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4106 "1432 Failed to initialize rpi headers.\n");
4107 goto out_free_active_sgl;
4110 /* Allocate eligible FCF bmask memory for FCF round robin failover */
4111 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
4112 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
4114 if (!phba->fcf.fcf_rr_bmask) {
4115 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4116 "2759 Failed allocate memory for FCF round "
4117 "robin failover bmask\n");
4118 goto out_remove_rpi_hdrs;
4121 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
4122 phba->cfg_fcp_eq_count), GFP_KERNEL);
4123 if (!phba->sli4_hba.fcp_eq_hdl) {
4124 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4125 "2572 Failed allocate memory for fast-path "
4126 "per-EQ handle array\n");
4127 goto out_free_fcf_rr_bmask;
4130 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
4131 phba->sli4_hba.cfg_eqn), GFP_KERNEL);
4132 if (!phba->sli4_hba.msix_entries) {
4133 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4134 "2573 Failed allocate memory for msi-x "
4135 "interrupt vector entries\n");
4136 goto out_free_fcp_eq_hdl;
4141 out_free_fcp_eq_hdl:
4142 kfree(phba->sli4_hba.fcp_eq_hdl);
4143 out_free_fcf_rr_bmask:
4144 kfree(phba->fcf.fcf_rr_bmask);
4145 out_remove_rpi_hdrs:
4146 lpfc_sli4_remove_rpi_hdrs(phba);
4147 out_free_active_sgl:
4148 lpfc_free_active_sgl(phba);
4150 lpfc_free_sgl_list(phba);
4151 out_destroy_cq_event_pool:
4152 lpfc_sli4_cq_event_pool_destroy(phba);
4154 lpfc_sli4_queue_destroy(phba);
4156 lpfc_destroy_bootstrap_mbox(phba);
4158 lpfc_mem_free(phba);
4163 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
4164 * @phba: pointer to lpfc hba data structure.
4166 * This routine is invoked to unset the driver internal resources set up
4167 * specific for supporting the SLI-4 HBA device it attached to.
4170 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4172 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
4174 /* unregister default FCFI from the HBA */
4175 lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
4177 /* Free the default FCR table */
4178 lpfc_sli_remove_dflt_fcf(phba);
4180 /* Free memory allocated for msi-x interrupt vector entries */
4181 kfree(phba->sli4_hba.msix_entries);
4183 /* Free memory allocated for fast-path work queue handles */
4184 kfree(phba->sli4_hba.fcp_eq_hdl);
4186 /* Free the allocated rpi headers. */
4187 lpfc_sli4_remove_rpi_hdrs(phba);
4188 lpfc_sli4_remove_rpis(phba);
4190 /* Free eligible FCF index bmask */
4191 kfree(phba->fcf.fcf_rr_bmask);
4193 /* Free the ELS sgl list */
4194 lpfc_free_active_sgl(phba);
4195 lpfc_free_sgl_list(phba);
4197 /* Free the SCSI sgl management array */
4198 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4200 /* Free the SLI4 queues */
4201 lpfc_sli4_queue_destroy(phba);
4203 /* Free the completion queue EQ event pool */
4204 lpfc_sli4_cq_event_release_all(phba);
4205 lpfc_sli4_cq_event_pool_destroy(phba);
4207 /* Reset SLI4 HBA FCoE function */
4208 lpfc_pci_function_reset(phba);
4210 /* Free the bsmbx region. */
4211 lpfc_destroy_bootstrap_mbox(phba);
4213 /* Free the SLI Layer memory with SLI4 HBAs */
4214 lpfc_mem_free_all(phba);
4216 /* Free the current connect table */
4217 list_for_each_entry_safe(conn_entry, next_conn_entry,
4218 &phba->fcf_conn_rec_list, list) {
4219 list_del_init(&conn_entry->list);
4227 * lpfc_init_api_table_setup - Set up init api fucntion jump table
4228 * @phba: The hba struct for which this call is being executed.
4229 * @dev_grp: The HBA PCI-Device group number.
4231 * This routine sets up the device INIT interface API function jump table
4234 * Returns: 0 - success, -ENODEV - failure.
4237 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4239 phba->lpfc_hba_init_link = lpfc_hba_init_link;
4240 phba->lpfc_hba_down_link = lpfc_hba_down_link;
4242 case LPFC_PCI_DEV_LP:
4243 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
4244 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
4245 phba->lpfc_stop_port = lpfc_stop_port_s3;
4247 case LPFC_PCI_DEV_OC:
4248 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
4249 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
4250 phba->lpfc_stop_port = lpfc_stop_port_s4;
4253 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4254 "1431 Invalid HBA PCI-device group: 0x%x\n",
4263 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
4264 * @phba: pointer to lpfc hba data structure.
4266 * This routine is invoked to set up the driver internal resources before the
4267 * device specific resource setup to support the HBA device it attached to.
4271 * other values - error
4274 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
4277 * Driver resources common to all SLI revisions
4279 atomic_set(&phba->fast_event_count, 0);
4280 spin_lock_init(&phba->hbalock);
4282 /* Initialize ndlp management spinlock */
4283 spin_lock_init(&phba->ndlp_lock);
4285 INIT_LIST_HEAD(&phba->port_list);
4286 INIT_LIST_HEAD(&phba->work_list);
4287 init_waitqueue_head(&phba->wait_4_mlo_m_q);
4289 /* Initialize the wait queue head for the kernel thread */
4290 init_waitqueue_head(&phba->work_waitq);
4292 /* Initialize the scsi buffer list used by driver for scsi IO */
4293 spin_lock_init(&phba->scsi_buf_list_lock);
4294 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
4296 /* Initialize the fabric iocb list */
4297 INIT_LIST_HEAD(&phba->fabric_iocb_list);
4299 /* Initialize list to save ELS buffers */
4300 INIT_LIST_HEAD(&phba->elsbuf);
4302 /* Initialize FCF connection rec list */
4303 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
4309 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
4310 * @phba: pointer to lpfc hba data structure.
4312 * This routine is invoked to set up the driver internal resources after the
4313 * device specific resource setup to support the HBA device it attached to.
4317 * other values - error
4320 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
4324 /* Startup the kernel thread for this host adapter. */
4325 phba->worker_thread = kthread_run(lpfc_do_work, phba,
4326 "lpfc_worker_%d", phba->brd_no);
4327 if (IS_ERR(phba->worker_thread)) {
4328 error = PTR_ERR(phba->worker_thread);
4336 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
4337 * @phba: pointer to lpfc hba data structure.
4339 * This routine is invoked to unset the driver internal resources set up after
4340 * the device specific resource setup for supporting the HBA device it
4344 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
4346 /* Stop kernel worker thread */
4347 kthread_stop(phba->worker_thread);
4351 * lpfc_free_iocb_list - Free iocb list.
4352 * @phba: pointer to lpfc hba data structure.
4354 * This routine is invoked to free the driver's IOCB list and memory.
4357 lpfc_free_iocb_list(struct lpfc_hba *phba)
4359 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
4361 spin_lock_irq(&phba->hbalock);
4362 list_for_each_entry_safe(iocbq_entry, iocbq_next,
4363 &phba->lpfc_iocb_list, list) {
4364 list_del(&iocbq_entry->list);
4366 phba->total_iocbq_bufs--;
4368 spin_unlock_irq(&phba->hbalock);
4374 * lpfc_init_iocb_list - Allocate and initialize iocb list.
4375 * @phba: pointer to lpfc hba data structure.
4377 * This routine is invoked to allocate and initizlize the driver's IOCB
4378 * list and set up the IOCB tag array accordingly.
4382 * other values - error
4385 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
4387 struct lpfc_iocbq *iocbq_entry = NULL;
4391 /* Initialize and populate the iocb list per host. */
4392 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
4393 for (i = 0; i < iocb_count; i++) {
4394 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
4395 if (iocbq_entry == NULL) {
4396 printk(KERN_ERR "%s: only allocated %d iocbs of "
4397 "expected %d count. Unloading driver.\n",
4398 __func__, i, LPFC_IOCB_LIST_CNT);
4399 goto out_free_iocbq;
4402 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
4405 printk(KERN_ERR "%s: failed to allocate IOTAG. "
4406 "Unloading driver.\n", __func__);
4407 goto out_free_iocbq;
4409 iocbq_entry->sli4_xritag = NO_XRI;
4411 spin_lock_irq(&phba->hbalock);
4412 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
4413 phba->total_iocbq_bufs++;
4414 spin_unlock_irq(&phba->hbalock);
4420 lpfc_free_iocb_list(phba);
4426 * lpfc_free_sgl_list - Free sgl list.
4427 * @phba: pointer to lpfc hba data structure.
4429 * This routine is invoked to free the driver's sgl list and memory.
4432 lpfc_free_sgl_list(struct lpfc_hba *phba)
4434 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
4435 LIST_HEAD(sglq_list);
4438 spin_lock_irq(&phba->hbalock);
4439 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
4440 spin_unlock_irq(&phba->hbalock);
4442 list_for_each_entry_safe(sglq_entry, sglq_next,
4444 list_del(&sglq_entry->list);
4445 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
4447 phba->sli4_hba.total_sglq_bufs--;
4449 rc = lpfc_sli4_remove_all_sgl_pages(phba);
4451 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4452 "2005 Unable to deregister pages from HBA: %x\n", rc);
4454 kfree(phba->sli4_hba.lpfc_els_sgl_array);
4458 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
4459 * @phba: pointer to lpfc hba data structure.
4461 * This routine is invoked to allocate the driver's active sgl memory.
4462 * This array will hold the sglq_entry's for active IOs.
4465 lpfc_init_active_sgl_array(struct lpfc_hba *phba)
4468 size = sizeof(struct lpfc_sglq *);
4469 size *= phba->sli4_hba.max_cfg_param.max_xri;
4471 phba->sli4_hba.lpfc_sglq_active_list =
4472 kzalloc(size, GFP_KERNEL);
4473 if (!phba->sli4_hba.lpfc_sglq_active_list)
4479 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
4480 * @phba: pointer to lpfc hba data structure.
4482 * This routine is invoked to walk through the array of active sglq entries
4483 * and free all of the resources.
4484 * This is just a place holder for now.
4487 lpfc_free_active_sgl(struct lpfc_hba *phba)
4489 kfree(phba->sli4_hba.lpfc_sglq_active_list);
4493 * lpfc_init_sgl_list - Allocate and initialize sgl list.
4494 * @phba: pointer to lpfc hba data structure.
4496 * This routine is invoked to allocate and initizlize the driver's sgl
4497 * list and set up the sgl xritag tag array accordingly.
4501 * other values - error
4504 lpfc_init_sgl_list(struct lpfc_hba *phba)
4506 struct lpfc_sglq *sglq_entry = NULL;
4510 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4511 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4512 "2400 lpfc_init_sgl_list els %d.\n",
4514 /* Initialize and populate the sglq list per host/VF. */
4515 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
4516 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
4518 /* Sanity check on XRI management */
4519 if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
4520 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4521 "2562 No room left for SCSI XRI allocation: "
4522 "max_xri=%d, els_xri=%d\n",
4523 phba->sli4_hba.max_cfg_param.max_xri,
4528 /* Allocate memory for the ELS XRI management array */
4529 phba->sli4_hba.lpfc_els_sgl_array =
4530 kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
4533 if (!phba->sli4_hba.lpfc_els_sgl_array) {
4534 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4535 "2401 Failed to allocate memory for ELS "
4536 "XRI management array of size %d.\n",
4541 /* Keep the SCSI XRI into the XRI management array */
4542 phba->sli4_hba.scsi_xri_max =
4543 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4544 phba->sli4_hba.scsi_xri_cnt = 0;
4546 phba->sli4_hba.lpfc_scsi_psb_array =
4547 kzalloc((sizeof(struct lpfc_scsi_buf *) *
4548 phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
4550 if (!phba->sli4_hba.lpfc_scsi_psb_array) {
4551 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4552 "2563 Failed to allocate memory for SCSI "
4553 "XRI management array of size %d.\n",
4554 phba->sli4_hba.scsi_xri_max);
4555 kfree(phba->sli4_hba.lpfc_els_sgl_array);
4559 for (i = 0; i < els_xri_cnt; i++) {
4560 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
4561 if (sglq_entry == NULL) {
4562 printk(KERN_ERR "%s: only allocated %d sgls of "
4563 "expected %d count. Unloading driver.\n",
4564 __func__, i, els_xri_cnt);
4568 sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
4569 if (sglq_entry->sli4_xritag == NO_XRI) {
4571 printk(KERN_ERR "%s: failed to allocate XRI.\n"
4572 "Unloading driver.\n", __func__);
4575 sglq_entry->buff_type = GEN_BUFF_TYPE;
4576 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
4577 if (sglq_entry->virt == NULL) {
4579 printk(KERN_ERR "%s: failed to allocate mbuf.\n"
4580 "Unloading driver.\n", __func__);
4583 sglq_entry->sgl = sglq_entry->virt;
4584 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
4586 /* The list order is used by later block SGL registraton */
4587 spin_lock_irq(&phba->hbalock);
4588 sglq_entry->state = SGL_FREED;
4589 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
4590 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
4591 phba->sli4_hba.total_sglq_bufs++;
4592 spin_unlock_irq(&phba->hbalock);
4597 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4598 lpfc_free_sgl_list(phba);
4603 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
4604 * @phba: pointer to lpfc hba data structure.
4606 * This routine is invoked to post rpi header templates to the
4607 * HBA consistent with the SLI-4 interface spec. This routine
4608 * posts a PAGE_SIZE memory region to the port to hold up to
4609 * PAGE_SIZE modulo 64 rpi context headers.
4610 * No locks are held here because this is an initialization routine
4611 * called only from probe or lpfc_online when interrupts are not
4612 * enabled and the driver is reinitializing the device.
4616 * ENOMEM - No availble memory
4617 * EIO - The mailbox failed to complete successfully.
4620 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
4625 struct lpfc_rpi_hdr *rpi_hdr;
4627 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
4630 * Provision an rpi bitmask range for discovery. The total count
4631 * is the difference between max and base + 1.
4633 rpi_count = phba->sli4_hba.max_cfg_param.rpi_base +
4634 phba->sli4_hba.max_cfg_param.max_rpi - 1;
4636 longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG;
4637 phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long),
4639 if (!phba->sli4_hba.rpi_bmask)
4642 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
4644 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4645 "0391 Error during rpi post operation\n");
4646 lpfc_sli4_remove_rpis(phba);
4654 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
4655 * @phba: pointer to lpfc hba data structure.
4657 * This routine is invoked to allocate a single 4KB memory region to
4658 * support rpis and stores them in the phba. This single region
4659 * provides support for up to 64 rpis. The region is used globally
4663 * A valid rpi hdr on success.
4664 * A NULL pointer on any failure.
4666 struct lpfc_rpi_hdr *
4667 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4669 uint16_t rpi_limit, curr_rpi_range;
4670 struct lpfc_dmabuf *dmabuf;
4671 struct lpfc_rpi_hdr *rpi_hdr;
4673 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
4674 phba->sli4_hba.max_cfg_param.max_rpi - 1;
4676 spin_lock_irq(&phba->hbalock);
4677 curr_rpi_range = phba->sli4_hba.next_rpi;
4678 spin_unlock_irq(&phba->hbalock);
4681 * The port has a limited number of rpis. The increment here
4682 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
4683 * and to allow the full max_rpi range per port.
4685 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
4689 * First allocate the protocol header region for the port. The
4690 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
4692 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4696 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4697 LPFC_HDR_TEMPLATE_SIZE,
4700 if (!dmabuf->virt) {
4702 goto err_free_dmabuf;
4705 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
4706 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
4708 goto err_free_coherent;
4711 /* Save the rpi header data for cleanup later. */
4712 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
4714 goto err_free_coherent;
4716 rpi_hdr->dmabuf = dmabuf;
4717 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
4718 rpi_hdr->page_count = 1;
4719 spin_lock_irq(&phba->hbalock);
4720 rpi_hdr->start_rpi = phba->sli4_hba.next_rpi;
4721 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
4724 * The next_rpi stores the next module-64 rpi value to post
4725 * in any subsequent rpi memory region postings.
4727 phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT;
4728 spin_unlock_irq(&phba->hbalock);
4732 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
4733 dmabuf->virt, dmabuf->phys);
4740 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
4741 * @phba: pointer to lpfc hba data structure.
4743 * This routine is invoked to remove all memory resources allocated
4744 * to support rpis. This routine presumes the caller has released all
4745 * rpis consumed by fabric or port logins and is prepared to have
4746 * the header pages removed.
4749 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
4751 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
4753 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
4754 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
4755 list_del(&rpi_hdr->list);
4756 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
4757 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
4758 kfree(rpi_hdr->dmabuf);
4762 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4763 memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask));
4767 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
4768 * @pdev: pointer to pci device data structure.
4770 * This routine is invoked to allocate the driver hba data structure for an
4771 * HBA device. If the allocation is successful, the phba reference to the
4772 * PCI device data structure is set.
4775 * pointer to @phba - successful
4778 static struct lpfc_hba *
4779 lpfc_hba_alloc(struct pci_dev *pdev)
4781 struct lpfc_hba *phba;
4783 /* Allocate memory for HBA structure */
4784 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
4786 dev_err(&pdev->dev, "failed to allocate hba struct\n");
4790 /* Set reference to PCI device in HBA structure */
4791 phba->pcidev = pdev;
4793 /* Assign an unused board number */
4794 phba->brd_no = lpfc_get_instance();
4795 if (phba->brd_no < 0) {
4800 spin_lock_init(&phba->ct_ev_lock);
4801 INIT_LIST_HEAD(&phba->ct_ev_waiters);
4807 * lpfc_hba_free - Free driver hba data structure with a device.
4808 * @phba: pointer to lpfc hba data structure.
4810 * This routine is invoked to free the driver hba data structure with an
4814 lpfc_hba_free(struct lpfc_hba *phba)
4816 /* Release the driver assigned board number */
4817 idr_remove(&lpfc_hba_index, phba->brd_no);
4824 * lpfc_create_shost - Create hba physical port with associated scsi host.
4825 * @phba: pointer to lpfc hba data structure.
4827 * This routine is invoked to create HBA physical port and associate a SCSI
4832 * other values - error
4835 lpfc_create_shost(struct lpfc_hba *phba)
4837 struct lpfc_vport *vport;
4838 struct Scsi_Host *shost;
4840 /* Initialize HBA FC structure */
4841 phba->fc_edtov = FF_DEF_EDTOV;
4842 phba->fc_ratov = FF_DEF_RATOV;
4843 phba->fc_altov = FF_DEF_ALTOV;
4844 phba->fc_arbtov = FF_DEF_ARBTOV;
4846 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
4850 shost = lpfc_shost_from_vport(vport);
4851 phba->pport = vport;
4852 lpfc_debugfs_initialize(vport);
4853 /* Put reference to SCSI host to driver's device private data */
4854 pci_set_drvdata(phba->pcidev, shost);
4860 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
4861 * @phba: pointer to lpfc hba data structure.
4863 * This routine is invoked to destroy HBA physical port and the associated
4867 lpfc_destroy_shost(struct lpfc_hba *phba)
4869 struct lpfc_vport *vport = phba->pport;
4871 /* Destroy physical port that associated with the SCSI host */
4872 destroy_port(vport);
4878 * lpfc_setup_bg - Setup Block guard structures and debug areas.
4879 * @phba: pointer to lpfc hba data structure.
4880 * @shost: the shost to be used to detect Block guard settings.
4882 * This routine sets up the local Block guard protocol settings for @shost.
4883 * This routine also allocates memory for debugging bg buffers.
4886 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4889 if (lpfc_prot_mask && lpfc_prot_guard) {
4890 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4891 "1478 Registering BlockGuard with the "
4893 scsi_host_set_prot(shost, lpfc_prot_mask);
4894 scsi_host_set_guard(shost, lpfc_prot_guard);
4896 if (!_dump_buf_data) {
4898 spin_lock_init(&_dump_buf_lock);
4900 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
4901 if (_dump_buf_data) {
4902 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4903 "9043 BLKGRD: allocated %d pages for "
4904 "_dump_buf_data at 0x%p\n",
4905 (1 << pagecnt), _dump_buf_data);
4906 _dump_buf_data_order = pagecnt;
4907 memset(_dump_buf_data, 0,
4908 ((1 << PAGE_SHIFT) << pagecnt));
4913 if (!_dump_buf_data_order)
4914 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4915 "9044 BLKGRD: ERROR unable to allocate "
4916 "memory for hexdump\n");
4918 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4919 "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
4920 "\n", _dump_buf_data);
4921 if (!_dump_buf_dif) {
4924 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
4925 if (_dump_buf_dif) {
4926 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4927 "9046 BLKGRD: allocated %d pages for "
4928 "_dump_buf_dif at 0x%p\n",
4929 (1 << pagecnt), _dump_buf_dif);
4930 _dump_buf_dif_order = pagecnt;
4931 memset(_dump_buf_dif, 0,
4932 ((1 << PAGE_SHIFT) << pagecnt));
4937 if (!_dump_buf_dif_order)
4938 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4939 "9047 BLKGRD: ERROR unable to allocate "
4940 "memory for hexdump\n");
4942 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4943 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
4948 * lpfc_post_init_setup - Perform necessary device post initialization setup.
4949 * @phba: pointer to lpfc hba data structure.
4951 * This routine is invoked to perform all the necessary post initialization
4952 * setup for the device.
4955 lpfc_post_init_setup(struct lpfc_hba *phba)
4957 struct Scsi_Host *shost;
4958 struct lpfc_adapter_event_header adapter_event;
4960 /* Get the default values for Model Name and Description */
4961 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
4964 * hba setup may have changed the hba_queue_depth so we need to
4965 * adjust the value of can_queue.
4967 shost = pci_get_drvdata(phba->pcidev);
4968 shost->can_queue = phba->cfg_hba_queue_depth - 10;
4969 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4970 lpfc_setup_bg(phba, shost);
4972 lpfc_host_attrib_init(shost);
4974 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
4975 spin_lock_irq(shost->host_lock);
4976 lpfc_poll_start_timer(phba);
4977 spin_unlock_irq(shost->host_lock);
4980 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4981 "0428 Perform SCSI scan\n");
4982 /* Send board arrival event to upper layer */
4983 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
4984 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
4985 fc_host_post_vendor_event(shost, fc_get_event_number(),
4986 sizeof(adapter_event),
4987 (char *) &adapter_event,
4993 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
4994 * @phba: pointer to lpfc hba data structure.
4996 * This routine is invoked to set up the PCI device memory space for device
4997 * with SLI-3 interface spec.
5001 * other values - error
5004 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
5006 struct pci_dev *pdev;
5007 unsigned long bar0map_len, bar2map_len;
5010 int error = -ENODEV;
5012 /* Obtain PCI device reference */
5016 pdev = phba->pcidev;
5018 /* Set the device DMA mask size */
5019 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
5020 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
5021 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
5022 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
5027 /* Get the bus address of Bar0 and Bar2 and the number of bytes
5028 * required by each mapping.
5030 phba->pci_bar0_map = pci_resource_start(pdev, 0);
5031 bar0map_len = pci_resource_len(pdev, 0);
5033 phba->pci_bar2_map = pci_resource_start(pdev, 2);
5034 bar2map_len = pci_resource_len(pdev, 2);
5036 /* Map HBA SLIM to a kernel virtual address. */
5037 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
5038 if (!phba->slim_memmap_p) {
5039 dev_printk(KERN_ERR, &pdev->dev,
5040 "ioremap failed for SLIM memory.\n");
5044 /* Map HBA Control Registers to a kernel virtual address. */
5045 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
5046 if (!phba->ctrl_regs_memmap_p) {
5047 dev_printk(KERN_ERR, &pdev->dev,
5048 "ioremap failed for HBA control registers.\n");
5049 goto out_iounmap_slim;
5052 /* Allocate memory for SLI-2 structures */
5053 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
5057 if (!phba->slim2p.virt)
5060 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
5061 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
5062 phba->mbox_ext = (phba->slim2p.virt +
5063 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
5064 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
5065 phba->IOCBs = (phba->slim2p.virt +
5066 offsetof(struct lpfc_sli2_slim, IOCBs));
5068 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
5069 lpfc_sli_hbq_size(),
5070 &phba->hbqslimp.phys,
5072 if (!phba->hbqslimp.virt)
5075 hbq_count = lpfc_sli_hbq_count();
5076 ptr = phba->hbqslimp.virt;
5077 for (i = 0; i < hbq_count; ++i) {
5078 phba->hbqs[i].hbq_virt = ptr;
5079 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
5080 ptr += (lpfc_hbq_defs[i]->entry_count *
5081 sizeof(struct lpfc_hbq_entry));
5083 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
5084 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
5086 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
5088 INIT_LIST_HEAD(&phba->rb_pend_list);
5090 phba->MBslimaddr = phba->slim_memmap_p;
5091 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
5092 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
5093 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
5094 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
5099 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5100 phba->slim2p.virt, phba->slim2p.phys);
5102 iounmap(phba->ctrl_regs_memmap_p);
5104 iounmap(phba->slim_memmap_p);
5110 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
5111 * @phba: pointer to lpfc hba data structure.
5113 * This routine is invoked to unset the PCI device memory space for device
5114 * with SLI-3 interface spec.
5117 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
5119 struct pci_dev *pdev;
5121 /* Obtain PCI device reference */
5125 pdev = phba->pcidev;
5127 /* Free coherent DMA memory allocated */
5128 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
5129 phba->hbqslimp.virt, phba->hbqslimp.phys);
5130 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5131 phba->slim2p.virt, phba->slim2p.phys);
5133 /* I/O memory unmap */
5134 iounmap(phba->ctrl_regs_memmap_p);
5135 iounmap(phba->slim_memmap_p);
5141 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
5142 * @phba: pointer to lpfc hba data structure.
5144 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
5145 * done and check status.
5147 * Return 0 if successful, otherwise -ENODEV.
5150 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
5152 struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg;
5153 int i, port_error = -ENODEV;
5155 if (!phba->sli4_hba.STAregaddr)
5158 /* Wait up to 30 seconds for the SLI Port POST done and ready */
5159 for (i = 0; i < 3000; i++) {
5160 sta_reg.word0 = readl(phba->sli4_hba.STAregaddr);
5161 /* Encounter fatal POST error, break out */
5162 if (bf_get(lpfc_hst_state_perr, &sta_reg)) {
5163 port_error = -ENODEV;
5166 if (LPFC_POST_STAGE_ARMFW_READY ==
5167 bf_get(lpfc_hst_state_port_status, &sta_reg)) {
5175 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5176 "1408 Failure HBA POST Status: sta_reg=0x%x, "
5177 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, "
5178 "dl=x%x, pstatus=x%x\n", sta_reg.word0,
5179 bf_get(lpfc_hst_state_perr, &sta_reg),
5180 bf_get(lpfc_hst_state_sfi, &sta_reg),
5181 bf_get(lpfc_hst_state_nip, &sta_reg),
5182 bf_get(lpfc_hst_state_ipc, &sta_reg),
5183 bf_get(lpfc_hst_state_xrom, &sta_reg),
5184 bf_get(lpfc_hst_state_dl, &sta_reg),
5185 bf_get(lpfc_hst_state_port_status, &sta_reg));
5187 /* Log device information */
5188 phba->sli4_hba.sli_intf.word0 = readl(phba->sli4_hba.SLIINTFregaddr);
5189 if (bf_get(lpfc_sli_intf_valid,
5190 &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_VALID) {
5191 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5192 "2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
5193 "FeatureL1=0x%x, FeatureL2=0x%x\n",
5194 bf_get(lpfc_sli_intf_sli_family,
5195 &phba->sli4_hba.sli_intf),
5196 bf_get(lpfc_sli_intf_slirev,
5197 &phba->sli4_hba.sli_intf),
5198 bf_get(lpfc_sli_intf_featurelevel1,
5199 &phba->sli4_hba.sli_intf),
5200 bf_get(lpfc_sli_intf_featurelevel2,
5201 &phba->sli4_hba.sli_intf));
5203 phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr);
5204 phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr);
5205 /* With uncoverable error, log the error message and return error */
5206 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
5207 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
5208 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
5209 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
5210 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5211 "1422 HBA Unrecoverable error: "
5212 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
5213 "ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n",
5214 uerrlo_reg.word0, uerrhi_reg.word0,
5215 phba->sli4_hba.ue_mask_lo,
5216 phba->sli4_hba.ue_mask_hi);
5224 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
5225 * @phba: pointer to lpfc hba data structure.
5227 * This routine is invoked to set up SLI4 BAR0 PCI config space register
5231 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
5233 phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
5234 LPFC_UERR_STATUS_LO;
5235 phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
5236 LPFC_UERR_STATUS_HI;
5237 phba->sli4_hba.UEMASKLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
5239 phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
5241 phba->sli4_hba.SLIINTFregaddr = phba->sli4_hba.conf_regs_memmap_p +
5246 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
5247 * @phba: pointer to lpfc hba data structure.
5249 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
5253 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
5256 phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5258 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5260 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5262 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5268 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
5269 * @phba: pointer to lpfc hba data structure.
5270 * @vf: virtual function number
5272 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
5273 * based on the given viftual function number, @vf.
5275 * Return 0 if successful, otherwise -ENODEV.
5278 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
5280 if (vf > LPFC_VIR_FUNC_MAX)
5283 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5284 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
5285 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5286 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
5287 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5288 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
5289 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5290 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
5291 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5292 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
5297 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
5298 * @phba: pointer to lpfc hba data structure.
5300 * This routine is invoked to create the bootstrap mailbox
5301 * region consistent with the SLI-4 interface spec. This
5302 * routine allocates all memory necessary to communicate
5303 * mailbox commands to the port and sets up all alignment
5304 * needs. No locks are expected to be held when calling
5309 * ENOMEM - could not allocated memory.
5312 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
5315 struct lpfc_dmabuf *dmabuf;
5316 struct dma_address *dma_address;
5320 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5325 * The bootstrap mailbox region is comprised of 2 parts
5326 * plus an alignment restriction of 16 bytes.
5328 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
5329 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
5333 if (!dmabuf->virt) {
5337 memset(dmabuf->virt, 0, bmbx_size);
5340 * Initialize the bootstrap mailbox pointers now so that the register
5341 * operations are simple later. The mailbox dma address is required
5342 * to be 16-byte aligned. Also align the virtual memory as each
5343 * maibox is copied into the bmbx mailbox region before issuing the
5344 * command to the port.
5346 phba->sli4_hba.bmbx.dmabuf = dmabuf;
5347 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
5349 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
5350 LPFC_ALIGN_16_BYTE);
5351 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
5352 LPFC_ALIGN_16_BYTE);
5355 * Set the high and low physical addresses now. The SLI4 alignment
5356 * requirement is 16 bytes and the mailbox is posted to the port
5357 * as two 30-bit addresses. The other data is a bit marking whether
5358 * the 30-bit address is the high or low address.
5359 * Upcast bmbx aphys to 64bits so shift instruction compiles
5360 * clean on 32 bit machines.
5362 dma_address = &phba->sli4_hba.bmbx.dma_address;
5363 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
5364 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
5365 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
5366 LPFC_BMBX_BIT1_ADDR_HI);
5368 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
5369 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
5370 LPFC_BMBX_BIT1_ADDR_LO);
5375 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
5376 * @phba: pointer to lpfc hba data structure.
5378 * This routine is invoked to teardown the bootstrap mailbox
5379 * region and release all host resources. This routine requires
5380 * the caller to ensure all mailbox commands recovered, no
5381 * additional mailbox comands are sent, and interrupts are disabled
5382 * before calling this routine.
5386 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
5388 dma_free_coherent(&phba->pcidev->dev,
5389 phba->sli4_hba.bmbx.bmbx_size,
5390 phba->sli4_hba.bmbx.dmabuf->virt,
5391 phba->sli4_hba.bmbx.dmabuf->phys);
5393 kfree(phba->sli4_hba.bmbx.dmabuf);
5394 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
5398 * lpfc_sli4_read_config - Get the config parameters.
5399 * @phba: pointer to lpfc hba data structure.
5401 * This routine is invoked to read the configuration parameters from the HBA.
5402 * The configuration parameters are used to set the base and maximum values
5403 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
5404 * allocation for the port.
5408 * ENOMEM - No availble memory
5409 * EIO - The mailbox failed to complete successfully.
5412 lpfc_sli4_read_config(struct lpfc_hba *phba)
5415 struct lpfc_mbx_read_config *rd_config;
5418 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5420 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5421 "2011 Unable to allocate memory for issuing "
5422 "SLI_CONFIG_SPECIAL mailbox command\n");
5426 lpfc_read_config(phba, pmb);
5428 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5429 if (rc != MBX_SUCCESS) {
5430 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5431 "2012 Mailbox failed , mbxCmd x%x "
5432 "READ_CONFIG, mbxStatus x%x\n",
5433 bf_get(lpfc_mqe_command, &pmb->u.mqe),
5434 bf_get(lpfc_mqe_status, &pmb->u.mqe));
5437 rd_config = &pmb->u.mqe.un.rd_config;
5438 phba->sli4_hba.max_cfg_param.max_xri =
5439 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
5440 phba->sli4_hba.max_cfg_param.xri_base =
5441 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
5442 phba->sli4_hba.max_cfg_param.max_vpi =
5443 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
5444 phba->sli4_hba.max_cfg_param.vpi_base =
5445 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
5446 phba->sli4_hba.max_cfg_param.max_rpi =
5447 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
5448 phba->sli4_hba.max_cfg_param.rpi_base =
5449 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
5450 phba->sli4_hba.max_cfg_param.max_vfi =
5451 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
5452 phba->sli4_hba.max_cfg_param.vfi_base =
5453 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
5454 phba->sli4_hba.max_cfg_param.max_fcfi =
5455 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
5456 phba->sli4_hba.max_cfg_param.fcfi_base =
5457 bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
5458 phba->sli4_hba.max_cfg_param.max_eq =
5459 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
5460 phba->sli4_hba.max_cfg_param.max_rq =
5461 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
5462 phba->sli4_hba.max_cfg_param.max_wq =
5463 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
5464 phba->sli4_hba.max_cfg_param.max_cq =
5465 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
5466 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
5467 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
5468 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
5469 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
5470 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
5471 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
5472 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
5473 phba->max_vports = phba->max_vpi;
5474 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5475 "2003 cfg params XRI(B:%d M:%d), "
5479 "FCFI(B:%d M:%d)\n",
5480 phba->sli4_hba.max_cfg_param.xri_base,
5481 phba->sli4_hba.max_cfg_param.max_xri,
5482 phba->sli4_hba.max_cfg_param.vpi_base,
5483 phba->sli4_hba.max_cfg_param.max_vpi,
5484 phba->sli4_hba.max_cfg_param.vfi_base,
5485 phba->sli4_hba.max_cfg_param.max_vfi,
5486 phba->sli4_hba.max_cfg_param.rpi_base,
5487 phba->sli4_hba.max_cfg_param.max_rpi,
5488 phba->sli4_hba.max_cfg_param.fcfi_base,
5489 phba->sli4_hba.max_cfg_param.max_fcfi);
5491 mempool_free(pmb, phba->mbox_mem_pool);
5493 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
5494 if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri))
5495 phba->cfg_hba_queue_depth =
5496 phba->sli4_hba.max_cfg_param.max_xri;
5501 * lpfc_dev_endian_order_setup - Notify the port of the host's endian order.
5502 * @phba: pointer to lpfc hba data structure.
5504 * This routine is invoked to setup the host-side endian order to the
5505 * HBA consistent with the SLI-4 interface spec.
5509 * ENOMEM - No availble memory
5510 * EIO - The mailbox failed to complete successfully.
5513 lpfc_setup_endian_order(struct lpfc_hba *phba)
5515 LPFC_MBOXQ_t *mboxq;
5517 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
5518 HOST_ENDIAN_HIGH_WORD1};
5520 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5522 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5523 "0492 Unable to allocate memory for issuing "
5524 "SLI_CONFIG_SPECIAL mailbox command\n");
5529 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two
5530 * words to contain special data values and no other data.
5532 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
5533 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
5534 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5535 if (rc != MBX_SUCCESS) {
5536 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5537 "0493 SLI_CONFIG_SPECIAL mailbox failed with "
5543 mempool_free(mboxq, phba->mbox_mem_pool);
5548 * lpfc_sli4_queue_create - Create all the SLI4 queues
5549 * @phba: pointer to lpfc hba data structure.
5551 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
5552 * operation. For each SLI4 queue type, the parameters such as queue entry
5553 * count (queue depth) shall be taken from the module parameter. For now,
5554 * we just use some constant number as place holder.
5558 * ENOMEM - No availble memory
5559 * EIO - The mailbox failed to complete successfully.
5562 lpfc_sli4_queue_create(struct lpfc_hba *phba)
5564 struct lpfc_queue *qdesc;
5565 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
5566 int cfg_fcp_wq_count;
5567 int cfg_fcp_eq_count;
5570 * Sanity check for confiugred queue parameters against the run-time
5574 /* Sanity check on FCP fast-path WQ parameters */
5575 cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
5576 if (cfg_fcp_wq_count >
5577 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
5578 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
5580 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
5581 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5582 "2581 Not enough WQs (%d) from "
5583 "the pci function for supporting "
5585 phba->sli4_hba.max_cfg_param.max_wq,
5586 phba->cfg_fcp_wq_count);
5589 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5590 "2582 Not enough WQs (%d) from the pci "
5591 "function for supporting the requested "
5592 "FCP WQs (%d), the actual FCP WQs can "
5593 "be supported: %d\n",
5594 phba->sli4_hba.max_cfg_param.max_wq,
5595 phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
5597 /* The actual number of FCP work queues adopted */
5598 phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
5600 /* Sanity check on FCP fast-path EQ parameters */
5601 cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
5602 if (cfg_fcp_eq_count >
5603 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
5604 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
5606 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
5607 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5608 "2574 Not enough EQs (%d) from the "
5609 "pci function for supporting FCP "
5611 phba->sli4_hba.max_cfg_param.max_eq,
5612 phba->cfg_fcp_eq_count);
5615 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5616 "2575 Not enough EQs (%d) from the pci "
5617 "function for supporting the requested "
5618 "FCP EQs (%d), the actual FCP EQs can "
5619 "be supported: %d\n",
5620 phba->sli4_hba.max_cfg_param.max_eq,
5621 phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
5623 /* It does not make sense to have more EQs than WQs */
5624 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
5625 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5626 "2593 The FCP EQ count(%d) cannot be greater "
5627 "than the FCP WQ count(%d), limiting the "
5628 "FCP EQ count to %d\n", cfg_fcp_eq_count,
5629 phba->cfg_fcp_wq_count,
5630 phba->cfg_fcp_wq_count);
5631 cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
5633 /* The actual number of FCP event queues adopted */
5634 phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
5635 /* The overall number of event queues used */
5636 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
5639 * Create Event Queues (EQs)
5642 /* Get EQ depth from module parameter, fake the default for now */
5643 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
5644 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
5646 /* Create slow path event queue */
5647 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
5648 phba->sli4_hba.eq_ecount);
5650 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5651 "0496 Failed allocate slow-path EQ\n");
5654 phba->sli4_hba.sp_eq = qdesc;
5656 /* Create fast-path FCP Event Queue(s) */
5657 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
5658 phba->cfg_fcp_eq_count), GFP_KERNEL);
5659 if (!phba->sli4_hba.fp_eq) {
5660 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5661 "2576 Failed allocate memory for fast-path "
5662 "EQ record array\n");
5663 goto out_free_sp_eq;
5665 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
5666 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
5667 phba->sli4_hba.eq_ecount);
5669 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5670 "0497 Failed allocate fast-path EQ\n");
5671 goto out_free_fp_eq;
5673 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
5677 * Create Complete Queues (CQs)
5680 /* Get CQ depth from module parameter, fake the default for now */
5681 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
5682 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
5684 /* Create slow-path Mailbox Command Complete Queue */
5685 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5686 phba->sli4_hba.cq_ecount);
5688 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5689 "0500 Failed allocate slow-path mailbox CQ\n");
5690 goto out_free_fp_eq;
5692 phba->sli4_hba.mbx_cq = qdesc;
5694 /* Create slow-path ELS Complete Queue */
5695 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5696 phba->sli4_hba.cq_ecount);
5698 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5699 "0501 Failed allocate slow-path ELS CQ\n");
5700 goto out_free_mbx_cq;
5702 phba->sli4_hba.els_cq = qdesc;
5705 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
5706 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
5707 phba->cfg_fcp_eq_count), GFP_KERNEL);
5708 if (!phba->sli4_hba.fcp_cq) {
5709 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5710 "2577 Failed allocate memory for fast-path "
5711 "CQ record array\n");
5712 goto out_free_els_cq;
5714 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5715 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5716 phba->sli4_hba.cq_ecount);
5718 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5719 "0499 Failed allocate fast-path FCP "
5720 "CQ (%d)\n", fcp_cqidx);
5721 goto out_free_fcp_cq;
5723 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
5726 /* Create Mailbox Command Queue */
5727 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
5728 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
5730 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
5731 phba->sli4_hba.mq_ecount);
5733 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5734 "0505 Failed allocate slow-path MQ\n");
5735 goto out_free_fcp_cq;
5737 phba->sli4_hba.mbx_wq = qdesc;
5740 * Create all the Work Queues (WQs)
5742 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
5743 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
5745 /* Create slow-path ELS Work Queue */
5746 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5747 phba->sli4_hba.wq_ecount);
5749 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5750 "0504 Failed allocate slow-path ELS WQ\n");
5751 goto out_free_mbx_wq;
5753 phba->sli4_hba.els_wq = qdesc;
5755 /* Create fast-path FCP Work Queue(s) */
5756 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
5757 phba->cfg_fcp_wq_count), GFP_KERNEL);
5758 if (!phba->sli4_hba.fcp_wq) {
5759 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5760 "2578 Failed allocate memory for fast-path "
5761 "WQ record array\n");
5762 goto out_free_els_wq;
5764 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5765 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5766 phba->sli4_hba.wq_ecount);
5768 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5769 "0503 Failed allocate fast-path FCP "
5770 "WQ (%d)\n", fcp_wqidx);
5771 goto out_free_fcp_wq;
5773 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
5777 * Create Receive Queue (RQ)
5779 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
5780 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
5782 /* Create Receive Queue for header */
5783 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5784 phba->sli4_hba.rq_ecount);
5786 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5787 "0506 Failed allocate receive HRQ\n");
5788 goto out_free_fcp_wq;
5790 phba->sli4_hba.hdr_rq = qdesc;
5792 /* Create Receive Queue for data */
5793 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5794 phba->sli4_hba.rq_ecount);
5796 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5797 "0507 Failed allocate receive DRQ\n");
5798 goto out_free_hdr_rq;
5800 phba->sli4_hba.dat_rq = qdesc;
5805 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5806 phba->sli4_hba.hdr_rq = NULL;
5808 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
5809 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
5810 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
5812 kfree(phba->sli4_hba.fcp_wq);
5814 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5815 phba->sli4_hba.els_wq = NULL;
5817 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5818 phba->sli4_hba.mbx_wq = NULL;
5820 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
5821 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
5822 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
5824 kfree(phba->sli4_hba.fcp_cq);
5826 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5827 phba->sli4_hba.els_cq = NULL;
5829 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5830 phba->sli4_hba.mbx_cq = NULL;
5832 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
5833 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
5834 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
5836 kfree(phba->sli4_hba.fp_eq);
5838 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5839 phba->sli4_hba.sp_eq = NULL;
5845 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
5846 * @phba: pointer to lpfc hba data structure.
5848 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
5853 * ENOMEM - No availble memory
5854 * EIO - The mailbox failed to complete successfully.
5857 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
5861 /* Release mailbox command work queue */
5862 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5863 phba->sli4_hba.mbx_wq = NULL;
5865 /* Release ELS work queue */
5866 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5867 phba->sli4_hba.els_wq = NULL;
5869 /* Release FCP work queue */
5870 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5871 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
5872 kfree(phba->sli4_hba.fcp_wq);
5873 phba->sli4_hba.fcp_wq = NULL;
5875 /* Release unsolicited receive queue */
5876 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5877 phba->sli4_hba.hdr_rq = NULL;
5878 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
5879 phba->sli4_hba.dat_rq = NULL;
5881 /* Release ELS complete queue */
5882 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5883 phba->sli4_hba.els_cq = NULL;
5885 /* Release mailbox command complete queue */
5886 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5887 phba->sli4_hba.mbx_cq = NULL;
5889 /* Release FCP response complete queue */
5890 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5891 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
5892 kfree(phba->sli4_hba.fcp_cq);
5893 phba->sli4_hba.fcp_cq = NULL;
5895 /* Release fast-path event queue */
5896 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5897 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
5898 kfree(phba->sli4_hba.fp_eq);
5899 phba->sli4_hba.fp_eq = NULL;
5901 /* Release slow-path event queue */
5902 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5903 phba->sli4_hba.sp_eq = NULL;
5909 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
5910 * @phba: pointer to lpfc hba data structure.
5912 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
5917 * ENOMEM - No availble memory
5918 * EIO - The mailbox failed to complete successfully.
5921 lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5924 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
5925 int fcp_cq_index = 0;
5928 * Set up Event Queues (EQs)
5931 /* Set up slow-path event queue */
5932 if (!phba->sli4_hba.sp_eq) {
5933 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5934 "0520 Slow-path EQ not allocated\n");
5937 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
5940 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5941 "0521 Failed setup of slow-path EQ: "
5945 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5946 "2583 Slow-path EQ setup: queue-id=%d\n",
5947 phba->sli4_hba.sp_eq->queue_id);
5949 /* Set up fast-path event queue */
5950 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
5951 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
5952 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5953 "0522 Fast-path EQ (%d) not "
5954 "allocated\n", fcp_eqidx);
5955 goto out_destroy_fp_eq;
5957 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
5958 phba->cfg_fcp_imax);
5960 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5961 "0523 Failed setup of fast-path EQ "
5962 "(%d), rc = 0x%x\n", fcp_eqidx, rc);
5963 goto out_destroy_fp_eq;
5965 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5966 "2584 Fast-path EQ setup: "
5967 "queue[%d]-id=%d\n", fcp_eqidx,
5968 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
5972 * Set up Complete Queues (CQs)
5975 /* Set up slow-path MBOX Complete Queue as the first CQ */
5976 if (!phba->sli4_hba.mbx_cq) {
5977 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5978 "0528 Mailbox CQ not allocated\n");
5979 goto out_destroy_fp_eq;
5981 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
5982 LPFC_MCQ, LPFC_MBOX);
5984 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5985 "0529 Failed setup of slow-path mailbox CQ: "
5987 goto out_destroy_fp_eq;
5989 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5990 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
5991 phba->sli4_hba.mbx_cq->queue_id,
5992 phba->sli4_hba.sp_eq->queue_id);
5994 /* Set up slow-path ELS Complete Queue */
5995 if (!phba->sli4_hba.els_cq) {
5996 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5997 "0530 ELS CQ not allocated\n");
5998 goto out_destroy_mbx_cq;
6000 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
6001 LPFC_WCQ, LPFC_ELS);
6003 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6004 "0531 Failed setup of slow-path ELS CQ: "
6006 goto out_destroy_mbx_cq;
6008 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6009 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
6010 phba->sli4_hba.els_cq->queue_id,
6011 phba->sli4_hba.sp_eq->queue_id);
6013 /* Set up fast-path FCP Response Complete Queue */
6014 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
6015 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
6016 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6017 "0526 Fast-path FCP CQ (%d) not "
6018 "allocated\n", fcp_cqidx);
6019 goto out_destroy_fcp_cq;
6021 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
6022 phba->sli4_hba.fp_eq[fcp_cqidx],
6023 LPFC_WCQ, LPFC_FCP);
6025 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6026 "0527 Failed setup of fast-path FCP "
6027 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
6028 goto out_destroy_fcp_cq;
6030 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6031 "2588 FCP CQ setup: cq[%d]-id=%d, "
6032 "parent eq[%d]-id=%d\n",
6034 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
6036 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id);
6040 * Set up all the Work Queues (WQs)
6043 /* Set up Mailbox Command Queue */
6044 if (!phba->sli4_hba.mbx_wq) {
6045 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6046 "0538 Slow-path MQ not allocated\n");
6047 goto out_destroy_fcp_cq;
6049 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
6050 phba->sli4_hba.mbx_cq, LPFC_MBOX);
6052 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6053 "0539 Failed setup of slow-path MQ: "
6055 goto out_destroy_fcp_cq;
6057 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6058 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
6059 phba->sli4_hba.mbx_wq->queue_id,
6060 phba->sli4_hba.mbx_cq->queue_id);
6062 /* Set up slow-path ELS Work Queue */
6063 if (!phba->sli4_hba.els_wq) {
6064 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6065 "0536 Slow-path ELS WQ not allocated\n");
6066 goto out_destroy_mbx_wq;
6068 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
6069 phba->sli4_hba.els_cq, LPFC_ELS);
6071 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6072 "0537 Failed setup of slow-path ELS WQ: "
6074 goto out_destroy_mbx_wq;
6076 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6077 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
6078 phba->sli4_hba.els_wq->queue_id,
6079 phba->sli4_hba.els_cq->queue_id);
6081 /* Set up fast-path FCP Work Queue */
6082 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
6083 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
6084 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6085 "0534 Fast-path FCP WQ (%d) not "
6086 "allocated\n", fcp_wqidx);
6087 goto out_destroy_fcp_wq;
6089 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
6090 phba->sli4_hba.fcp_cq[fcp_cq_index],
6093 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6094 "0535 Failed setup of fast-path FCP "
6095 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
6096 goto out_destroy_fcp_wq;
6098 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6099 "2591 FCP WQ setup: wq[%d]-id=%d, "
6100 "parent cq[%d]-id=%d\n",
6102 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
6104 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
6105 /* Round robin FCP Work Queue's Completion Queue assignment */
6106 fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count);
6110 * Create Receive Queue (RQ)
6112 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
6113 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6114 "0540 Receive Queue not allocated\n");
6115 goto out_destroy_fcp_wq;
6117 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
6118 phba->sli4_hba.els_cq, LPFC_USOL);
6120 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6121 "0541 Failed setup of Receive Queue: "
6123 goto out_destroy_fcp_wq;
6125 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6126 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
6127 "parent cq-id=%d\n",
6128 phba->sli4_hba.hdr_rq->queue_id,
6129 phba->sli4_hba.dat_rq->queue_id,
6130 phba->sli4_hba.els_cq->queue_id);
6134 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
6135 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
6136 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6138 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6140 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
6141 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
6142 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6144 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6146 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
6147 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
6148 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6154 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
6155 * @phba: pointer to lpfc hba data structure.
6157 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
6162 * ENOMEM - No availble memory
6163 * EIO - The mailbox failed to complete successfully.
6166 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
6170 /* Unset mailbox command work queue */
6171 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6172 /* Unset ELS work queue */
6173 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6174 /* Unset unsolicited receive queue */
6175 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
6176 /* Unset FCP work queue */
6177 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
6178 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
6179 /* Unset mailbox command complete queue */
6180 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6181 /* Unset ELS complete queue */
6182 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6183 /* Unset FCP response complete queue */
6184 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6185 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
6186 /* Unset fast-path event queue */
6187 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6188 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
6189 /* Unset slow-path event queue */
6190 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6194 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
6195 * @phba: pointer to lpfc hba data structure.
6197 * This routine is invoked to allocate and set up a pool of completion queue
6198 * events. The body of the completion queue event is a completion queue entry
6199 * CQE. For now, this pool is used for the interrupt service routine to queue
6200 * the following HBA completion queue events for the worker thread to process:
6201 * - Mailbox asynchronous events
6202 * - Receive queue completion unsolicited events
6203 * Later, this can be used for all the slow-path events.
6207 * -ENOMEM - No availble memory
6210 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
6212 struct lpfc_cq_event *cq_event;
6215 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
6216 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
6218 goto out_pool_create_fail;
6219 list_add_tail(&cq_event->list,
6220 &phba->sli4_hba.sp_cqe_event_pool);
6224 out_pool_create_fail:
6225 lpfc_sli4_cq_event_pool_destroy(phba);
6230 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
6231 * @phba: pointer to lpfc hba data structure.
6233 * This routine is invoked to free the pool of completion queue events at
6234 * driver unload time. Note that, it is the responsibility of the driver
6235 * cleanup routine to free all the outstanding completion-queue events
6236 * allocated from this pool back into the pool before invoking this routine
6237 * to destroy the pool.
6240 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
6242 struct lpfc_cq_event *cq_event, *next_cq_event;
6244 list_for_each_entry_safe(cq_event, next_cq_event,
6245 &phba->sli4_hba.sp_cqe_event_pool, list) {
6246 list_del(&cq_event->list);
6252 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
6253 * @phba: pointer to lpfc hba data structure.
6255 * This routine is the lock free version of the API invoked to allocate a
6256 * completion-queue event from the free pool.
6258 * Return: Pointer to the newly allocated completion-queue event if successful
6261 struct lpfc_cq_event *
6262 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
6264 struct lpfc_cq_event *cq_event = NULL;
6266 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
6267 struct lpfc_cq_event, list);
6272 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
6273 * @phba: pointer to lpfc hba data structure.
6275 * This routine is the lock version of the API invoked to allocate a
6276 * completion-queue event from the free pool.
6278 * Return: Pointer to the newly allocated completion-queue event if successful
6281 struct lpfc_cq_event *
6282 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
6284 struct lpfc_cq_event *cq_event;
6285 unsigned long iflags;
6287 spin_lock_irqsave(&phba->hbalock, iflags);
6288 cq_event = __lpfc_sli4_cq_event_alloc(phba);
6289 spin_unlock_irqrestore(&phba->hbalock, iflags);
6294 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
6295 * @phba: pointer to lpfc hba data structure.
6296 * @cq_event: pointer to the completion queue event to be freed.
6298 * This routine is the lock free version of the API invoked to release a
6299 * completion-queue event back into the free pool.
6302 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
6303 struct lpfc_cq_event *cq_event)
6305 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
6309 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
6310 * @phba: pointer to lpfc hba data structure.
6311 * @cq_event: pointer to the completion queue event to be freed.
6313 * This routine is the lock version of the API invoked to release a
6314 * completion-queue event back into the free pool.
6317 lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
6318 struct lpfc_cq_event *cq_event)
6320 unsigned long iflags;
6321 spin_lock_irqsave(&phba->hbalock, iflags);
6322 __lpfc_sli4_cq_event_release(phba, cq_event);
6323 spin_unlock_irqrestore(&phba->hbalock, iflags);
6327 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
6328 * @phba: pointer to lpfc hba data structure.
6330 * This routine is to free all the pending completion-queue events to the
6331 * back into the free pool for device reset.
6334 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
6337 struct lpfc_cq_event *cqe;
6338 unsigned long iflags;
6340 /* Retrieve all the pending WCQEs from pending WCQE lists */
6341 spin_lock_irqsave(&phba->hbalock, iflags);
6342 /* Pending FCP XRI abort events */
6343 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
6345 /* Pending ELS XRI abort events */
6346 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
6348 /* Pending asynnc events */
6349 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
6351 spin_unlock_irqrestore(&phba->hbalock, iflags);
6353 while (!list_empty(&cqelist)) {
6354 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
6355 lpfc_sli4_cq_event_release(phba, cqe);
6360 * lpfc_pci_function_reset - Reset pci function.
6361 * @phba: pointer to lpfc hba data structure.
6363 * This routine is invoked to request a PCI function reset. It will destroys
6364 * all resources assigned to the PCI function which originates this request.
6368 * ENOMEM - No availble memory
6369 * EIO - The mailbox failed to complete successfully.
6372 lpfc_pci_function_reset(struct lpfc_hba *phba)
6374 LPFC_MBOXQ_t *mboxq;
6376 uint32_t shdr_status, shdr_add_status;
6377 union lpfc_sli4_cfg_shdr *shdr;
6379 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6381 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6382 "0494 Unable to allocate memory for issuing "
6383 "SLI_FUNCTION_RESET mailbox command\n");
6387 /* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */
6388 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
6389 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
6390 LPFC_SLI4_MBX_EMBED);
6391 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6392 shdr = (union lpfc_sli4_cfg_shdr *)
6393 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
6394 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6395 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6396 if (rc != MBX_TIMEOUT)
6397 mempool_free(mboxq, phba->mbox_mem_pool);
6398 if (shdr_status || shdr_add_status || rc) {
6399 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6400 "0495 SLI_FUNCTION_RESET mailbox failed with "
6401 "status x%x add_status x%x, mbx status x%x\n",
6402 shdr_status, shdr_add_status, rc);
6409 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
6410 * @phba: pointer to lpfc hba data structure.
6411 * @cnt: number of nop mailbox commands to send.
6413 * This routine is invoked to send a number @cnt of NOP mailbox command and
6414 * wait for each command to complete.
6416 * Return: the number of NOP mailbox command completed.
6419 lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
6421 LPFC_MBOXQ_t *mboxq;
6422 int length, cmdsent;
6425 uint32_t shdr_status, shdr_add_status;
6426 union lpfc_sli4_cfg_shdr *shdr;
6429 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6430 "2518 Requested to send 0 NOP mailbox cmd\n");
6434 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6436 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6437 "2519 Unable to allocate memory for issuing "
6438 "NOP mailbox command\n");
6442 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */
6443 length = (sizeof(struct lpfc_mbx_nop) -
6444 sizeof(struct lpfc_sli4_cfg_mhdr));
6445 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
6446 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
6448 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
6449 for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
6450 if (!phba->sli4_hba.intr_enable)
6451 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6453 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
6454 if (rc == MBX_TIMEOUT)
6456 /* Check return status */
6457 shdr = (union lpfc_sli4_cfg_shdr *)
6458 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
6459 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6460 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
6462 if (shdr_status || shdr_add_status || rc) {
6463 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6464 "2520 NOP mailbox command failed "
6465 "status x%x add_status x%x mbx "
6466 "status x%x\n", shdr_status,
6467 shdr_add_status, rc);
6472 if (rc != MBX_TIMEOUT)
6473 mempool_free(mboxq, phba->mbox_mem_pool);
6479 * lpfc_sli4_fcfi_unreg - Unregister fcfi to device
6480 * @phba: pointer to lpfc hba data structure.
6483 * This routine is invoked to unregister a FCFI from device.
6486 lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
6491 unsigned long flags;
6493 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6498 lpfc_unreg_fcfi(mbox, fcfi);
6500 if (!phba->sli4_hba.intr_enable)
6501 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6503 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
6504 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6506 if (rc != MBX_TIMEOUT)
6507 mempool_free(mbox, phba->mbox_mem_pool);
6508 if (rc != MBX_SUCCESS)
6509 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6510 "2517 Unregister FCFI command failed "
6511 "status %d, mbxStatus x%x\n", rc,
6512 bf_get(lpfc_mqe_status, &mbox->u.mqe));
6514 spin_lock_irqsave(&phba->hbalock, flags);
6515 /* Mark the FCFI is no longer registered */
6516 phba->fcf.fcf_flag &=
6517 ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_SCAN_DONE);
6518 spin_unlock_irqrestore(&phba->hbalock, flags);
6523 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
6524 * @phba: pointer to lpfc hba data structure.
6526 * This routine is invoked to set up the PCI device memory space for device
6527 * with SLI-4 interface spec.
6531 * other values - error
6534 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
6536 struct pci_dev *pdev;
6537 unsigned long bar0map_len, bar1map_len, bar2map_len;
6538 int error = -ENODEV;
6540 /* Obtain PCI device reference */
6544 pdev = phba->pcidev;
6546 /* Set the device DMA mask size */
6547 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
6548 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
6549 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
6550 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
6555 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
6556 * number of bytes required by each mapping. They are actually
6557 * mapping to the PCI BAR regions 0 or 1, 2, and 4 by the SLI4 device.
6559 if (pci_resource_start(pdev, 0)) {
6560 phba->pci_bar0_map = pci_resource_start(pdev, 0);
6561 bar0map_len = pci_resource_len(pdev, 0);
6563 phba->pci_bar0_map = pci_resource_start(pdev, 1);
6564 bar0map_len = pci_resource_len(pdev, 1);
6566 phba->pci_bar1_map = pci_resource_start(pdev, 2);
6567 bar1map_len = pci_resource_len(pdev, 2);
6569 phba->pci_bar2_map = pci_resource_start(pdev, 4);
6570 bar2map_len = pci_resource_len(pdev, 4);
6572 /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
6573 phba->sli4_hba.conf_regs_memmap_p =
6574 ioremap(phba->pci_bar0_map, bar0map_len);
6575 if (!phba->sli4_hba.conf_regs_memmap_p) {
6576 dev_printk(KERN_ERR, &pdev->dev,
6577 "ioremap failed for SLI4 PCI config registers.\n");
6581 /* Map SLI4 HBA Control Register base to a kernel virtual address. */
6582 phba->sli4_hba.ctrl_regs_memmap_p =
6583 ioremap(phba->pci_bar1_map, bar1map_len);
6584 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
6585 dev_printk(KERN_ERR, &pdev->dev,
6586 "ioremap failed for SLI4 HBA control registers.\n");
6587 goto out_iounmap_conf;
6590 /* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */
6591 phba->sli4_hba.drbl_regs_memmap_p =
6592 ioremap(phba->pci_bar2_map, bar2map_len);
6593 if (!phba->sli4_hba.drbl_regs_memmap_p) {
6594 dev_printk(KERN_ERR, &pdev->dev,
6595 "ioremap failed for SLI4 HBA doorbell registers.\n");
6596 goto out_iounmap_ctrl;
6599 /* Set up BAR0 PCI config space register memory map */
6600 lpfc_sli4_bar0_register_memmap(phba);
6602 /* Set up BAR1 register memory map */
6603 lpfc_sli4_bar1_register_memmap(phba);
6605 /* Set up BAR2 register memory map */
6606 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
6608 goto out_iounmap_all;
6613 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
6615 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
6617 iounmap(phba->sli4_hba.conf_regs_memmap_p);
6623 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
6624 * @phba: pointer to lpfc hba data structure.
6626 * This routine is invoked to unset the PCI device memory space for device
6627 * with SLI-4 interface spec.
6630 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
6632 struct pci_dev *pdev;
6634 /* Obtain PCI device reference */
6638 pdev = phba->pcidev;
6640 /* Free coherent DMA memory allocated */
6642 /* Unmap I/O memory space */
6643 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
6644 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
6645 iounmap(phba->sli4_hba.conf_regs_memmap_p);
6651 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
6652 * @phba: pointer to lpfc hba data structure.
6654 * This routine is invoked to enable the MSI-X interrupt vectors to device
6655 * with SLI-3 interface specs. The kernel function pci_enable_msix() is
6656 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
6657 * invoked, enables either all or nothing, depending on the current
6658 * availability of PCI vector resources. The device driver is responsible
6659 * for calling the individual request_irq() to register each MSI-X vector
6660 * with a interrupt handler, which is done in this function. Note that
6661 * later when device is unloading, the driver should always call free_irq()
6662 * on all MSI-X vectors it has done request_irq() on before calling
6663 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
6664 * will be left with MSI-X enabled and leaks its vectors.
6668 * other values - error
6671 lpfc_sli_enable_msix(struct lpfc_hba *phba)
6676 /* Set up MSI-X multi-message vectors */
6677 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6678 phba->msix_entries[i].entry = i;
6680 /* Configure MSI-X capability structure */
6681 rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
6682 ARRAY_SIZE(phba->msix_entries));
6684 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6685 "0420 PCI enable MSI-X failed (%d)\n", rc);
6688 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6689 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6690 "0477 MSI-X entry[%d]: vector=x%x "
6692 phba->msix_entries[i].vector,
6693 phba->msix_entries[i].entry);
6695 * Assign MSI-X vectors to interrupt handlers
6698 /* vector-0 is associated to slow-path handler */
6699 rc = request_irq(phba->msix_entries[0].vector,
6700 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
6701 LPFC_SP_DRIVER_HANDLER_NAME, phba);
6703 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6704 "0421 MSI-X slow-path request_irq failed "
6709 /* vector-1 is associated to fast-path handler */
6710 rc = request_irq(phba->msix_entries[1].vector,
6711 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
6712 LPFC_FP_DRIVER_HANDLER_NAME, phba);
6715 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6716 "0429 MSI-X fast-path request_irq failed "
6722 * Configure HBA MSI-X attention conditions to messages
6724 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6728 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6729 "0474 Unable to allocate memory for issuing "
6730 "MBOX_CONFIG_MSI command\n");
6733 rc = lpfc_config_msi(phba, pmb);
6736 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6737 if (rc != MBX_SUCCESS) {
6738 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
6739 "0351 Config MSI mailbox command failed, "
6740 "mbxCmd x%x, mbxStatus x%x\n",
6741 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
6745 /* Free memory allocated for mailbox command */
6746 mempool_free(pmb, phba->mbox_mem_pool);
6750 /* Free memory allocated for mailbox command */
6751 mempool_free(pmb, phba->mbox_mem_pool);
6754 /* free the irq already requested */
6755 free_irq(phba->msix_entries[1].vector, phba);
6758 /* free the irq already requested */
6759 free_irq(phba->msix_entries[0].vector, phba);
6762 /* Unconfigure MSI-X capability structure */
6763 pci_disable_msix(phba->pcidev);
6768 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
6769 * @phba: pointer to lpfc hba data structure.
6771 * This routine is invoked to release the MSI-X vectors and then disable the
6772 * MSI-X interrupt mode to device with SLI-3 interface spec.
6775 lpfc_sli_disable_msix(struct lpfc_hba *phba)
6779 /* Free up MSI-X multi-message vectors */
6780 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6781 free_irq(phba->msix_entries[i].vector, phba);
6783 pci_disable_msix(phba->pcidev);
6789 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
6790 * @phba: pointer to lpfc hba data structure.
6792 * This routine is invoked to enable the MSI interrupt mode to device with
6793 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
6794 * enable the MSI vector. The device driver is responsible for calling the
6795 * request_irq() to register MSI vector with a interrupt the handler, which
6796 * is done in this function.
6800 * other values - error
6803 lpfc_sli_enable_msi(struct lpfc_hba *phba)
6807 rc = pci_enable_msi(phba->pcidev);
6809 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6810 "0462 PCI enable MSI mode success.\n");
6812 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6813 "0471 PCI enable MSI mode failed (%d)\n", rc);
6817 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6818 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6820 pci_disable_msi(phba->pcidev);
6821 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6822 "0478 MSI request_irq failed (%d)\n", rc);
6828 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
6829 * @phba: pointer to lpfc hba data structure.
6831 * This routine is invoked to disable the MSI interrupt mode to device with
6832 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
6833 * done request_irq() on before calling pci_disable_msi(). Failure to do so
6834 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6838 lpfc_sli_disable_msi(struct lpfc_hba *phba)
6840 free_irq(phba->pcidev->irq, phba);
6841 pci_disable_msi(phba->pcidev);
6846 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
6847 * @phba: pointer to lpfc hba data structure.
6849 * This routine is invoked to enable device interrupt and associate driver's
6850 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
6851 * spec. Depends on the interrupt mode configured to the driver, the driver
6852 * will try to fallback from the configured interrupt mode to an interrupt
6853 * mode which is supported by the platform, kernel, and device in the order
6855 * MSI-X -> MSI -> IRQ.
6859 * other values - error
6862 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
6864 uint32_t intr_mode = LPFC_INTR_ERROR;
6867 if (cfg_mode == 2) {
6868 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
6869 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
6871 /* Now, try to enable MSI-X interrupt mode */
6872 retval = lpfc_sli_enable_msix(phba);
6874 /* Indicate initialization to MSI-X mode */
6875 phba->intr_type = MSIX;
6881 /* Fallback to MSI if MSI-X initialization failed */
6882 if (cfg_mode >= 1 && phba->intr_type == NONE) {
6883 retval = lpfc_sli_enable_msi(phba);
6885 /* Indicate initialization to MSI mode */
6886 phba->intr_type = MSI;
6891 /* Fallback to INTx if both MSI-X/MSI initalization failed */
6892 if (phba->intr_type == NONE) {
6893 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6894 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6896 /* Indicate initialization to INTx mode */
6897 phba->intr_type = INTx;
6905 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
6906 * @phba: pointer to lpfc hba data structure.
6908 * This routine is invoked to disable device interrupt and disassociate the
6909 * driver's interrupt handler(s) from interrupt vector(s) to device with
6910 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
6911 * release the interrupt vector(s) for the message signaled interrupt.
6914 lpfc_sli_disable_intr(struct lpfc_hba *phba)
6916 /* Disable the currently initialized interrupt mode */
6917 if (phba->intr_type == MSIX)
6918 lpfc_sli_disable_msix(phba);
6919 else if (phba->intr_type == MSI)
6920 lpfc_sli_disable_msi(phba);
6921 else if (phba->intr_type == INTx)
6922 free_irq(phba->pcidev->irq, phba);
6924 /* Reset interrupt management states */
6925 phba->intr_type = NONE;
6926 phba->sli.slistat.sli_intr = 0;
6932 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
6933 * @phba: pointer to lpfc hba data structure.
6935 * This routine is invoked to enable the MSI-X interrupt vectors to device
6936 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
6937 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
6938 * enables either all or nothing, depending on the current availability of
6939 * PCI vector resources. The device driver is responsible for calling the
6940 * individual request_irq() to register each MSI-X vector with a interrupt
6941 * handler, which is done in this function. Note that later when device is
6942 * unloading, the driver should always call free_irq() on all MSI-X vectors
6943 * it has done request_irq() on before calling pci_disable_msix(). Failure
6944 * to do so results in a BUG_ON() and a device will be left with MSI-X
6945 * enabled and leaks its vectors.
6949 * other values - error
6952 lpfc_sli4_enable_msix(struct lpfc_hba *phba)
6956 /* Set up MSI-X multi-message vectors */
6957 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
6958 phba->sli4_hba.msix_entries[index].entry = index;
6960 /* Configure MSI-X capability structure */
6961 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
6962 phba->sli4_hba.cfg_eqn);
6964 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6965 "0484 PCI enable MSI-X failed (%d)\n", rc);
6968 /* Log MSI-X vector assignment */
6969 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
6970 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6971 "0489 MSI-X entry[%d]: vector=x%x "
6972 "message=%d\n", index,
6973 phba->sli4_hba.msix_entries[index].vector,
6974 phba->sli4_hba.msix_entries[index].entry);
6976 * Assign MSI-X vectors to interrupt handlers
6979 /* The first vector must associated to slow-path handler for MQ */
6980 rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
6981 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
6982 LPFC_SP_DRIVER_HANDLER_NAME, phba);
6984 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6985 "0485 MSI-X slow-path request_irq failed "
6990 /* The rest of the vector(s) are associated to fast-path handler(s) */
6991 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) {
6992 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
6993 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
6994 rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
6995 &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
6996 LPFC_FP_DRIVER_HANDLER_NAME,
6997 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6999 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7000 "0486 MSI-X fast-path (%d) "
7001 "request_irq failed (%d)\n", index, rc);
7009 /* free the irq already requested */
7010 for (--index; index >= 1; index--)
7011 free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
7012 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7014 /* free the irq already requested */
7015 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
7018 /* Unconfigure MSI-X capability structure */
7019 pci_disable_msix(phba->pcidev);
7024 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
7025 * @phba: pointer to lpfc hba data structure.
7027 * This routine is invoked to release the MSI-X vectors and then disable the
7028 * MSI-X interrupt mode to device with SLI-4 interface spec.
7031 lpfc_sli4_disable_msix(struct lpfc_hba *phba)
7035 /* Free up MSI-X multi-message vectors */
7036 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
7038 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++)
7039 free_irq(phba->sli4_hba.msix_entries[index].vector,
7040 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7042 pci_disable_msix(phba->pcidev);
7048 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
7049 * @phba: pointer to lpfc hba data structure.
7051 * This routine is invoked to enable the MSI interrupt mode to device with
7052 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
7053 * to enable the MSI vector. The device driver is responsible for calling
7054 * the request_irq() to register MSI vector with a interrupt the handler,
7055 * which is done in this function.
7059 * other values - error
7062 lpfc_sli4_enable_msi(struct lpfc_hba *phba)
7066 rc = pci_enable_msi(phba->pcidev);
7068 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7069 "0487 PCI enable MSI mode success.\n");
7071 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7072 "0488 PCI enable MSI mode failed (%d)\n", rc);
7076 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
7077 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7079 pci_disable_msi(phba->pcidev);
7080 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7081 "0490 MSI request_irq failed (%d)\n", rc);
7084 for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
7085 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
7086 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
7093 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
7094 * @phba: pointer to lpfc hba data structure.
7096 * This routine is invoked to disable the MSI interrupt mode to device with
7097 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
7098 * done request_irq() on before calling pci_disable_msi(). Failure to do so
7099 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
7103 lpfc_sli4_disable_msi(struct lpfc_hba *phba)
7105 free_irq(phba->pcidev->irq, phba);
7106 pci_disable_msi(phba->pcidev);
7111 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
7112 * @phba: pointer to lpfc hba data structure.
7114 * This routine is invoked to enable device interrupt and associate driver's
7115 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
7116 * interface spec. Depends on the interrupt mode configured to the driver,
7117 * the driver will try to fallback from the configured interrupt mode to an
7118 * interrupt mode which is supported by the platform, kernel, and device in
7120 * MSI-X -> MSI -> IRQ.
7124 * other values - error
7127 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
7129 uint32_t intr_mode = LPFC_INTR_ERROR;
7132 if (cfg_mode == 2) {
7133 /* Preparation before conf_msi mbox cmd */
7136 /* Now, try to enable MSI-X interrupt mode */
7137 retval = lpfc_sli4_enable_msix(phba);
7139 /* Indicate initialization to MSI-X mode */
7140 phba->intr_type = MSIX;
7146 /* Fallback to MSI if MSI-X initialization failed */
7147 if (cfg_mode >= 1 && phba->intr_type == NONE) {
7148 retval = lpfc_sli4_enable_msi(phba);
7150 /* Indicate initialization to MSI mode */
7151 phba->intr_type = MSI;
7156 /* Fallback to INTx if both MSI-X/MSI initalization failed */
7157 if (phba->intr_type == NONE) {
7158 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
7159 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7161 /* Indicate initialization to INTx mode */
7162 phba->intr_type = INTx;
7164 for (index = 0; index < phba->cfg_fcp_eq_count;
7166 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
7167 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
7175 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
7176 * @phba: pointer to lpfc hba data structure.
7178 * This routine is invoked to disable device interrupt and disassociate
7179 * the driver's interrupt handler(s) from interrupt vector(s) to device
7180 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
7181 * will release the interrupt vector(s) for the message signaled interrupt.
7184 lpfc_sli4_disable_intr(struct lpfc_hba *phba)
7186 /* Disable the currently initialized interrupt mode */
7187 if (phba->intr_type == MSIX)
7188 lpfc_sli4_disable_msix(phba);
7189 else if (phba->intr_type == MSI)
7190 lpfc_sli4_disable_msi(phba);
7191 else if (phba->intr_type == INTx)
7192 free_irq(phba->pcidev->irq, phba);
7194 /* Reset interrupt management states */
7195 phba->intr_type = NONE;
7196 phba->sli.slistat.sli_intr = 0;
7202 * lpfc_unset_hba - Unset SLI3 hba device initialization
7203 * @phba: pointer to lpfc hba data structure.
7205 * This routine is invoked to unset the HBA device initialization steps to
7206 * a device with SLI-3 interface spec.
7209 lpfc_unset_hba(struct lpfc_hba *phba)
7211 struct lpfc_vport *vport = phba->pport;
7212 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7214 spin_lock_irq(shost->host_lock);
7215 vport->load_flag |= FC_UNLOADING;
7216 spin_unlock_irq(shost->host_lock);
7218 lpfc_stop_hba_timers(phba);
7220 phba->pport->work_port_events = 0;
7222 lpfc_sli_hba_down(phba);
7224 lpfc_sli_brdrestart(phba);
7226 lpfc_sli_disable_intr(phba);
7232 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
7233 * @phba: pointer to lpfc hba data structure.
7235 * This routine is invoked to unset the HBA device initialization steps to
7236 * a device with SLI-4 interface spec.
7239 lpfc_sli4_unset_hba(struct lpfc_hba *phba)
7241 struct lpfc_vport *vport = phba->pport;
7242 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7244 spin_lock_irq(shost->host_lock);
7245 vport->load_flag |= FC_UNLOADING;
7246 spin_unlock_irq(shost->host_lock);
7248 phba->pport->work_port_events = 0;
7250 lpfc_sli4_hba_down(phba);
7252 lpfc_sli4_disable_intr(phba);
7258 * lpfc_sli4_hba_unset - Unset the fcoe hba
7259 * @phba: Pointer to HBA context object.
7261 * This function is called in the SLI4 code path to reset the HBA's FCoE
7262 * function. The caller is not required to hold any lock. This routine
7263 * issues PCI function reset mailbox command to reset the FCoE function.
7264 * At the end of the function, it calls lpfc_hba_down_post function to
7265 * free any pending commands.
7268 lpfc_sli4_hba_unset(struct lpfc_hba *phba)
7271 LPFC_MBOXQ_t *mboxq;
7273 lpfc_stop_hba_timers(phba);
7274 phba->sli4_hba.intr_enable = 0;
7277 * Gracefully wait out the potential current outstanding asynchronous
7281 /* First, block any pending async mailbox command from posted */
7282 spin_lock_irq(&phba->hbalock);
7283 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
7284 spin_unlock_irq(&phba->hbalock);
7285 /* Now, trying to wait it out if we can */
7286 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7288 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
7291 /* Forcefully release the outstanding mailbox command if timed out */
7292 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7293 spin_lock_irq(&phba->hbalock);
7294 mboxq = phba->sli.mbox_active;
7295 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
7296 __lpfc_mbox_cmpl_put(phba, mboxq);
7297 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7298 phba->sli.mbox_active = NULL;
7299 spin_unlock_irq(&phba->hbalock);
7302 /* Tear down the queues in the HBA */
7303 lpfc_sli4_queue_unset(phba);
7305 /* Disable PCI subsystem interrupt */
7306 lpfc_sli4_disable_intr(phba);
7308 /* Stop kthread signal shall trigger work_done one more time */
7309 kthread_stop(phba->worker_thread);
7311 /* Stop the SLI4 device port */
7312 phba->pport->work_port_events = 0;
7316 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
7317 * @phba: Pointer to HBA context object.
7318 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
7320 * This function is called in the SLI4 code path to read the port's
7321 * sli4 capabilities.
7323 * This function may be be called from any context that can block-wait
7324 * for the completion. The expectation is that this routine is called
7325 * typically from probe_one or from the online routine.
7328 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7331 struct lpfc_mqe *mqe;
7332 struct lpfc_pc_sli4_params *sli4_params;
7336 mqe = &mboxq->u.mqe;
7338 /* Read the port's SLI4 Parameters port capabilities */
7339 lpfc_sli4_params(mboxq);
7340 if (!phba->sli4_hba.intr_enable)
7341 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7343 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES);
7344 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
7350 sli4_params = &phba->sli4_hba.pc_sli4_params;
7351 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
7352 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
7353 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
7354 sli4_params->featurelevel_1 = bf_get(featurelevel_1,
7355 &mqe->un.sli4_params);
7356 sli4_params->featurelevel_2 = bf_get(featurelevel_2,
7357 &mqe->un.sli4_params);
7358 sli4_params->proto_types = mqe->un.sli4_params.word3;
7359 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
7360 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
7361 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
7362 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
7363 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
7364 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
7365 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
7366 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
7367 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
7368 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
7369 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
7370 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
7371 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
7372 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
7373 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
7374 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
7375 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
7376 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
7377 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
7378 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
7383 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
7384 * @pdev: pointer to PCI device
7385 * @pid: pointer to PCI device identifier
7387 * This routine is to be called to attach a device with SLI-3 interface spec
7388 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
7389 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
7390 * information of the device and driver to see if the driver state that it can
7391 * support this kind of device. If the match is successful, the driver core
7392 * invokes this routine. If this routine determines it can claim the HBA, it
7393 * does all the initialization that it needs to do to handle the HBA properly.
7396 * 0 - driver can claim the device
7397 * negative value - driver can not claim the device
7399 static int __devinit
7400 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
7402 struct lpfc_hba *phba;
7403 struct lpfc_vport *vport = NULL;
7404 struct Scsi_Host *shost = NULL;
7406 uint32_t cfg_mode, intr_mode;
7408 /* Allocate memory for HBA structure */
7409 phba = lpfc_hba_alloc(pdev);
7413 /* Perform generic PCI device enabling operation */
7414 error = lpfc_enable_pci_dev(phba);
7416 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7417 "1401 Failed to enable pci device.\n");
7421 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
7422 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
7424 goto out_disable_pci_dev;
7426 /* Set up SLI-3 specific device PCI memory space */
7427 error = lpfc_sli_pci_mem_setup(phba);
7429 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7430 "1402 Failed to set up pci memory space.\n");
7431 goto out_disable_pci_dev;
7434 /* Set up phase-1 common device driver resources */
7435 error = lpfc_setup_driver_resource_phase1(phba);
7437 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7438 "1403 Failed to set up driver resource.\n");
7439 goto out_unset_pci_mem_s3;
7442 /* Set up SLI-3 specific device driver resources */
7443 error = lpfc_sli_driver_resource_setup(phba);
7445 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7446 "1404 Failed to set up driver resource.\n");
7447 goto out_unset_pci_mem_s3;
7450 /* Initialize and populate the iocb list per host */
7451 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
7453 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7454 "1405 Failed to initialize iocb list.\n");
7455 goto out_unset_driver_resource_s3;
7458 /* Set up common device driver resources */
7459 error = lpfc_setup_driver_resource_phase2(phba);
7461 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7462 "1406 Failed to set up driver resource.\n");
7463 goto out_free_iocb_list;
7466 /* Create SCSI host to the physical port */
7467 error = lpfc_create_shost(phba);
7469 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7470 "1407 Failed to create scsi host.\n");
7471 goto out_unset_driver_resource;
7474 /* Configure sysfs attributes */
7475 vport = phba->pport;
7476 error = lpfc_alloc_sysfs_attr(vport);
7478 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7479 "1476 Failed to allocate sysfs attr\n");
7480 goto out_destroy_shost;
7483 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
7484 /* Now, trying to enable interrupt and bring up the device */
7485 cfg_mode = phba->cfg_use_msi;
7487 /* Put device to a known state before enabling interrupt */
7488 lpfc_stop_port(phba);
7489 /* Configure and enable interrupt */
7490 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
7491 if (intr_mode == LPFC_INTR_ERROR) {
7492 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7493 "0431 Failed to enable interrupt.\n");
7495 goto out_free_sysfs_attr;
7497 /* SLI-3 HBA setup */
7498 if (lpfc_sli_hba_setup(phba)) {
7499 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7500 "1477 Failed to set up hba\n");
7502 goto out_remove_device;
7505 /* Wait 50ms for the interrupts of previous mailbox commands */
7507 /* Check active interrupts on message signaled interrupts */
7508 if (intr_mode == 0 ||
7509 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
7510 /* Log the current active interrupt mode */
7511 phba->intr_mode = intr_mode;
7512 lpfc_log_intr_mode(phba, intr_mode);
7515 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7516 "0447 Configure interrupt mode (%d) "
7517 "failed active interrupt test.\n",
7519 /* Disable the current interrupt mode */
7520 lpfc_sli_disable_intr(phba);
7521 /* Try next level of interrupt mode */
7522 cfg_mode = --intr_mode;
7526 /* Perform post initialization setup */
7527 lpfc_post_init_setup(phba);
7529 /* Check if there are static vports to be created. */
7530 lpfc_create_static_vport(phba);
7535 lpfc_unset_hba(phba);
7536 out_free_sysfs_attr:
7537 lpfc_free_sysfs_attr(vport);
7539 lpfc_destroy_shost(phba);
7540 out_unset_driver_resource:
7541 lpfc_unset_driver_resource_phase2(phba);
7543 lpfc_free_iocb_list(phba);
7544 out_unset_driver_resource_s3:
7545 lpfc_sli_driver_resource_unset(phba);
7546 out_unset_pci_mem_s3:
7547 lpfc_sli_pci_mem_unset(phba);
7548 out_disable_pci_dev:
7549 lpfc_disable_pci_dev(phba);
7551 scsi_host_put(shost);
7553 lpfc_hba_free(phba);
7558 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
7559 * @pdev: pointer to PCI device
7561 * This routine is to be called to disattach a device with SLI-3 interface
7562 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
7563 * removed from PCI bus, it performs all the necessary cleanup for the HBA
7564 * device to be removed from the PCI subsystem properly.
7566 static void __devexit
7567 lpfc_pci_remove_one_s3(struct pci_dev *pdev)
7569 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7570 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
7571 struct lpfc_vport **vports;
7572 struct lpfc_hba *phba = vport->phba;
7574 int bars = pci_select_bars(pdev, IORESOURCE_MEM);
7576 spin_lock_irq(&phba->hbalock);
7577 vport->load_flag |= FC_UNLOADING;
7578 spin_unlock_irq(&phba->hbalock);
7580 lpfc_free_sysfs_attr(vport);
7582 /* Release all the vports against this physical port */
7583 vports = lpfc_create_vport_work_array(phba);
7585 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
7586 fc_vport_terminate(vports[i]->fc_vport);
7587 lpfc_destroy_vport_work_array(phba, vports);
7589 /* Remove FC host and then SCSI host with the physical port */
7590 fc_remove_host(shost);
7591 scsi_remove_host(shost);
7592 lpfc_cleanup(vport);
7595 * Bring down the SLI Layer. This step disable all interrupts,
7596 * clears the rings, discards all mailbox commands, and resets
7600 /* HBA interrupt will be diabled after this call */
7601 lpfc_sli_hba_down(phba);
7602 /* Stop kthread signal shall trigger work_done one more time */
7603 kthread_stop(phba->worker_thread);
7604 /* Final cleanup of txcmplq and reset the HBA */
7605 lpfc_sli_brdrestart(phba);
7607 lpfc_stop_hba_timers(phba);
7608 spin_lock_irq(&phba->hbalock);
7609 list_del_init(&vport->listentry);
7610 spin_unlock_irq(&phba->hbalock);
7612 lpfc_debugfs_terminate(vport);
7614 /* Disable interrupt */
7615 lpfc_sli_disable_intr(phba);
7617 pci_set_drvdata(pdev, NULL);
7618 scsi_host_put(shost);
7621 * Call scsi_free before mem_free since scsi bufs are released to their
7622 * corresponding pools here.
7624 lpfc_scsi_free(phba);
7625 lpfc_mem_free_all(phba);
7627 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
7628 phba->hbqslimp.virt, phba->hbqslimp.phys);
7630 /* Free resources associated with SLI2 interface */
7631 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7632 phba->slim2p.virt, phba->slim2p.phys);
7634 /* unmap adapter SLIM and Control Registers */
7635 iounmap(phba->ctrl_regs_memmap_p);
7636 iounmap(phba->slim_memmap_p);
7638 lpfc_hba_free(phba);
7640 pci_release_selected_regions(pdev, bars);
7641 pci_disable_device(pdev);
7645 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
7646 * @pdev: pointer to PCI device
7647 * @msg: power management message
7649 * This routine is to be called from the kernel's PCI subsystem to support
7650 * system Power Management (PM) to device with SLI-3 interface spec. When
7651 * PM invokes this method, it quiesces the device by stopping the driver's
7652 * worker thread for the device, turning off device's interrupt and DMA,
7653 * and bring the device offline. Note that as the driver implements the
7654 * minimum PM requirements to a power-aware driver's PM support for the
7655 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
7656 * to the suspend() method call will be treated as SUSPEND and the driver will
7657 * fully reinitialize its device during resume() method call, the driver will
7658 * set device to PCI_D3hot state in PCI config space instead of setting it
7659 * according to the @msg provided by the PM.
7662 * 0 - driver suspended the device
7666 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
7668 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7669 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7671 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7672 "0473 PCI device Power Management suspend.\n");
7674 /* Bring down the device */
7675 lpfc_offline_prep(phba);
7677 kthread_stop(phba->worker_thread);
7679 /* Disable interrupt from device */
7680 lpfc_sli_disable_intr(phba);
7682 /* Save device state to PCI config space */
7683 pci_save_state(pdev);
7684 pci_set_power_state(pdev, PCI_D3hot);
7690 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
7691 * @pdev: pointer to PCI device
7693 * This routine is to be called from the kernel's PCI subsystem to support
7694 * system Power Management (PM) to device with SLI-3 interface spec. When PM
7695 * invokes this method, it restores the device's PCI config space state and
7696 * fully reinitializes the device and brings it online. Note that as the
7697 * driver implements the minimum PM requirements to a power-aware driver's
7698 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
7699 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
7700 * driver will fully reinitialize its device during resume() method call,
7701 * the device will be set to PCI_D0 directly in PCI config space before
7702 * restoring the state.
7705 * 0 - driver suspended the device
7709 lpfc_pci_resume_one_s3(struct pci_dev *pdev)
7711 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7712 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7716 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7717 "0452 PCI device Power Management resume.\n");
7719 /* Restore device state from PCI config space */
7720 pci_set_power_state(pdev, PCI_D0);
7721 pci_restore_state(pdev);
7724 * As the new kernel behavior of pci_restore_state() API call clears
7725 * device saved_state flag, need to save the restored state again.
7727 pci_save_state(pdev);
7729 if (pdev->is_busmaster)
7730 pci_set_master(pdev);
7732 /* Startup the kernel thread for this host adapter. */
7733 phba->worker_thread = kthread_run(lpfc_do_work, phba,
7734 "lpfc_worker_%d", phba->brd_no);
7735 if (IS_ERR(phba->worker_thread)) {
7736 error = PTR_ERR(phba->worker_thread);
7737 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7738 "0434 PM resume failed to start worker "
7739 "thread: error=x%x.\n", error);
7743 /* Configure and enable interrupt */
7744 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
7745 if (intr_mode == LPFC_INTR_ERROR) {
7746 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7747 "0430 PM resume Failed to enable interrupt\n");
7750 phba->intr_mode = intr_mode;
7752 /* Restart HBA and bring it online */
7753 lpfc_sli_brdrestart(phba);
7756 /* Log the current active interrupt mode */
7757 lpfc_log_intr_mode(phba, phba->intr_mode);
7763 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
7764 * @phba: pointer to lpfc hba data structure.
7766 * This routine is called to prepare the SLI3 device for PCI slot recover. It
7767 * aborts and stops all the on-going I/Os on the pci device.
7770 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
7772 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7773 "2723 PCI channel I/O abort preparing for recovery\n");
7774 /* Prepare for bringing HBA offline */
7775 lpfc_offline_prep(phba);
7776 /* Clear sli active flag to prevent sysfs access to HBA */
7777 spin_lock_irq(&phba->hbalock);
7778 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
7779 spin_unlock_irq(&phba->hbalock);
7780 /* Stop and flush all I/Os and bring HBA offline */
7785 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
7786 * @phba: pointer to lpfc hba data structure.
7788 * This routine is called to prepare the SLI3 device for PCI slot reset. It
7789 * disables the device interrupt and pci device, and aborts the internal FCP
7793 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
7795 struct lpfc_sli *psli = &phba->sli;
7796 struct lpfc_sli_ring *pring;
7798 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7799 "2710 PCI channel disable preparing for reset\n");
7800 /* Disable interrupt and pci device */
7801 lpfc_sli_disable_intr(phba);
7802 pci_disable_device(phba->pcidev);
7804 * There may be I/Os dropped by the firmware.
7805 * Error iocb (I/O) on txcmplq and let the SCSI layer
7806 * retry it after re-establishing link.
7808 pring = &psli->ring[psli->fcp_ring];
7809 lpfc_sli_abort_iocb_ring(phba, pring);
7813 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
7814 * @phba: pointer to lpfc hba data structure.
7816 * This routine is called to prepare the SLI3 device for PCI slot permanently
7817 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
7821 lpfc_prep_dev_for_perm_failure(struct lpfc_hba *phba)
7823 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7824 "2711 PCI channel permanent disable for failure\n");
7825 /* Clean up all driver's outstanding SCSI I/Os */
7826 lpfc_sli_flush_fcp_rings(phba);
7830 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
7831 * @pdev: pointer to PCI device.
7832 * @state: the current PCI connection state.
7834 * This routine is called from the PCI subsystem for I/O error handling to
7835 * device with SLI-3 interface spec. This function is called by the PCI
7836 * subsystem after a PCI bus error affecting this device has been detected.
7837 * When this function is invoked, it will need to stop all the I/Os and
7838 * interrupt(s) to the device. Once that is done, it will return
7839 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
7843 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
7844 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7845 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7847 static pci_ers_result_t
7848 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
7850 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7851 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7853 /* Block all SCSI devices' I/Os on the host */
7854 lpfc_scsi_dev_block(phba);
7857 case pci_channel_io_normal:
7858 /* Non-fatal error, prepare for recovery */
7859 lpfc_sli_prep_dev_for_recover(phba);
7860 return PCI_ERS_RESULT_CAN_RECOVER;
7861 case pci_channel_io_frozen:
7862 /* Fatal error, prepare for slot reset */
7863 lpfc_sli_prep_dev_for_reset(phba);
7864 return PCI_ERS_RESULT_NEED_RESET;
7865 case pci_channel_io_perm_failure:
7866 /* Permanent failure, prepare for device down */
7867 lpfc_prep_dev_for_perm_failure(phba);
7868 return PCI_ERS_RESULT_DISCONNECT;
7870 /* Unknown state, prepare and request slot reset */
7871 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7872 "0472 Unknown PCI error state: x%x\n", state);
7873 lpfc_sli_prep_dev_for_reset(phba);
7874 return PCI_ERS_RESULT_NEED_RESET;
7879 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
7880 * @pdev: pointer to PCI device.
7882 * This routine is called from the PCI subsystem for error handling to
7883 * device with SLI-3 interface spec. This is called after PCI bus has been
7884 * reset to restart the PCI card from scratch, as if from a cold-boot.
7885 * During the PCI subsystem error recovery, after driver returns
7886 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
7887 * recovery and then call this routine before calling the .resume method
7888 * to recover the device. This function will initialize the HBA device,
7889 * enable the interrupt, but it will just put the HBA to offline state
7890 * without passing any I/O traffic.
7893 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
7894 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7896 static pci_ers_result_t
7897 lpfc_io_slot_reset_s3(struct pci_dev *pdev)
7899 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7900 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7901 struct lpfc_sli *psli = &phba->sli;
7904 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
7905 if (pci_enable_device_mem(pdev)) {
7906 printk(KERN_ERR "lpfc: Cannot re-enable "
7907 "PCI device after reset.\n");
7908 return PCI_ERS_RESULT_DISCONNECT;
7911 pci_restore_state(pdev);
7914 * As the new kernel behavior of pci_restore_state() API call clears
7915 * device saved_state flag, need to save the restored state again.
7917 pci_save_state(pdev);
7919 if (pdev->is_busmaster)
7920 pci_set_master(pdev);
7922 spin_lock_irq(&phba->hbalock);
7923 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
7924 spin_unlock_irq(&phba->hbalock);
7926 /* Configure and enable interrupt */
7927 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
7928 if (intr_mode == LPFC_INTR_ERROR) {
7929 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7930 "0427 Cannot re-enable interrupt after "
7932 return PCI_ERS_RESULT_DISCONNECT;
7934 phba->intr_mode = intr_mode;
7936 /* Take device offline; this will perform cleanup */
7938 lpfc_sli_brdrestart(phba);
7940 /* Log the current active interrupt mode */
7941 lpfc_log_intr_mode(phba, phba->intr_mode);
7943 return PCI_ERS_RESULT_RECOVERED;
7947 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
7948 * @pdev: pointer to PCI device
7950 * This routine is called from the PCI subsystem for error handling to device
7951 * with SLI-3 interface spec. It is called when kernel error recovery tells
7952 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
7953 * error recovery. After this call, traffic can start to flow from this device
7957 lpfc_io_resume_s3(struct pci_dev *pdev)
7959 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7960 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7962 /* Bring the device online */
7965 /* Clean up Advanced Error Reporting (AER) if needed */
7966 if (phba->hba_flag & HBA_AER_ENABLED)
7967 pci_cleanup_aer_uncorrect_error_status(pdev);
7971 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
7972 * @phba: pointer to lpfc hba data structure.
7974 * returns the number of ELS/CT IOCBs to reserve
7977 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
7979 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
7981 if (phba->sli_rev == LPFC_SLI_REV4) {
7984 else if (max_xri <= 256)
7986 else if (max_xri <= 512)
7988 else if (max_xri <= 1024)
7997 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
7998 * @pdev: pointer to PCI device
7999 * @pid: pointer to PCI device identifier
8001 * This routine is called from the kernel's PCI subsystem to device with
8002 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
8003 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
8004 * information of the device and driver to see if the driver state that it
8005 * can support this kind of device. If the match is successful, the driver
8006 * core invokes this routine. If this routine determines it can claim the HBA,
8007 * it does all the initialization that it needs to do to handle the HBA
8011 * 0 - driver can claim the device
8012 * negative value - driver can not claim the device
8014 static int __devinit
8015 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
8017 struct lpfc_hba *phba;
8018 struct lpfc_vport *vport = NULL;
8019 struct Scsi_Host *shost = NULL;
8021 uint32_t cfg_mode, intr_mode;
8024 /* Allocate memory for HBA structure */
8025 phba = lpfc_hba_alloc(pdev);
8029 /* Perform generic PCI device enabling operation */
8030 error = lpfc_enable_pci_dev(phba);
8032 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8033 "1409 Failed to enable pci device.\n");
8037 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
8038 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
8040 goto out_disable_pci_dev;
8042 /* Set up SLI-4 specific device PCI memory space */
8043 error = lpfc_sli4_pci_mem_setup(phba);
8045 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8046 "1410 Failed to set up pci memory space.\n");
8047 goto out_disable_pci_dev;
8050 /* Set up phase-1 common device driver resources */
8051 error = lpfc_setup_driver_resource_phase1(phba);
8053 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8054 "1411 Failed to set up driver resource.\n");
8055 goto out_unset_pci_mem_s4;
8058 /* Set up SLI-4 Specific device driver resources */
8059 error = lpfc_sli4_driver_resource_setup(phba);
8061 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8062 "1412 Failed to set up driver resource.\n");
8063 goto out_unset_pci_mem_s4;
8066 /* Initialize and populate the iocb list per host */
8067 error = lpfc_init_iocb_list(phba,
8068 phba->sli4_hba.max_cfg_param.max_xri);
8070 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8071 "1413 Failed to initialize iocb list.\n");
8072 goto out_unset_driver_resource_s4;
8075 /* Set up common device driver resources */
8076 error = lpfc_setup_driver_resource_phase2(phba);
8078 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8079 "1414 Failed to set up driver resource.\n");
8080 goto out_free_iocb_list;
8083 /* Create SCSI host to the physical port */
8084 error = lpfc_create_shost(phba);
8086 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8087 "1415 Failed to create scsi host.\n");
8088 goto out_unset_driver_resource;
8091 /* Configure sysfs attributes */
8092 vport = phba->pport;
8093 error = lpfc_alloc_sysfs_attr(vport);
8095 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8096 "1416 Failed to allocate sysfs attr\n");
8097 goto out_destroy_shost;
8100 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
8101 /* Now, trying to enable interrupt and bring up the device */
8102 cfg_mode = phba->cfg_use_msi;
8104 /* Put device to a known state before enabling interrupt */
8105 lpfc_stop_port(phba);
8106 /* Configure and enable interrupt */
8107 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
8108 if (intr_mode == LPFC_INTR_ERROR) {
8109 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8110 "0426 Failed to enable interrupt.\n");
8112 goto out_free_sysfs_attr;
8114 /* Default to single FCP EQ for non-MSI-X */
8115 if (phba->intr_type != MSIX)
8116 phba->cfg_fcp_eq_count = 1;
8117 /* Set up SLI-4 HBA */
8118 if (lpfc_sli4_hba_setup(phba)) {
8119 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8120 "1421 Failed to set up hba\n");
8122 goto out_disable_intr;
8125 /* Send NOP mbx cmds for non-INTx mode active interrupt test */
8127 mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
8130 /* Check active interrupts received only for MSI/MSI-X */
8131 if (intr_mode == 0 ||
8132 phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
8133 /* Log the current active interrupt mode */
8134 phba->intr_mode = intr_mode;
8135 lpfc_log_intr_mode(phba, intr_mode);
8138 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8139 "0451 Configure interrupt mode (%d) "
8140 "failed active interrupt test.\n",
8142 /* Unset the preivous SLI-4 HBA setup */
8143 lpfc_sli4_unset_hba(phba);
8144 /* Try next level of interrupt mode */
8145 cfg_mode = --intr_mode;
8148 /* Perform post initialization setup */
8149 lpfc_post_init_setup(phba);
8151 /* Check if there are static vports to be created. */
8152 lpfc_create_static_vport(phba);
8157 lpfc_sli4_disable_intr(phba);
8158 out_free_sysfs_attr:
8159 lpfc_free_sysfs_attr(vport);
8161 lpfc_destroy_shost(phba);
8162 out_unset_driver_resource:
8163 lpfc_unset_driver_resource_phase2(phba);
8165 lpfc_free_iocb_list(phba);
8166 out_unset_driver_resource_s4:
8167 lpfc_sli4_driver_resource_unset(phba);
8168 out_unset_pci_mem_s4:
8169 lpfc_sli4_pci_mem_unset(phba);
8170 out_disable_pci_dev:
8171 lpfc_disable_pci_dev(phba);
8173 scsi_host_put(shost);
8175 lpfc_hba_free(phba);
8180 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
8181 * @pdev: pointer to PCI device
8183 * This routine is called from the kernel's PCI subsystem to device with
8184 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
8185 * removed from PCI bus, it performs all the necessary cleanup for the HBA
8186 * device to be removed from the PCI subsystem properly.
8188 static void __devexit
8189 lpfc_pci_remove_one_s4(struct pci_dev *pdev)
8191 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8192 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
8193 struct lpfc_vport **vports;
8194 struct lpfc_hba *phba = vport->phba;
8197 /* Mark the device unloading flag */
8198 spin_lock_irq(&phba->hbalock);
8199 vport->load_flag |= FC_UNLOADING;
8200 spin_unlock_irq(&phba->hbalock);
8202 /* Free the HBA sysfs attributes */
8203 lpfc_free_sysfs_attr(vport);
8205 /* Release all the vports against this physical port */
8206 vports = lpfc_create_vport_work_array(phba);
8208 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
8209 fc_vport_terminate(vports[i]->fc_vport);
8210 lpfc_destroy_vport_work_array(phba, vports);
8212 /* Remove FC host and then SCSI host with the physical port */
8213 fc_remove_host(shost);
8214 scsi_remove_host(shost);
8216 /* Perform cleanup on the physical port */
8217 lpfc_cleanup(vport);
8220 * Bring down the SLI Layer. This step disables all interrupts,
8221 * clears the rings, discards all mailbox commands, and resets
8222 * the HBA FCoE function.
8224 lpfc_debugfs_terminate(vport);
8225 lpfc_sli4_hba_unset(phba);
8227 spin_lock_irq(&phba->hbalock);
8228 list_del_init(&vport->listentry);
8229 spin_unlock_irq(&phba->hbalock);
8231 /* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi
8232 * buffers are released to their corresponding pools here.
8234 lpfc_scsi_free(phba);
8235 lpfc_sli4_driver_resource_unset(phba);
8237 /* Unmap adapter Control and Doorbell registers */
8238 lpfc_sli4_pci_mem_unset(phba);
8240 /* Release PCI resources and disable device's PCI function */
8241 scsi_host_put(shost);
8242 lpfc_disable_pci_dev(phba);
8244 /* Finally, free the driver's device data structure */
8245 lpfc_hba_free(phba);
8251 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
8252 * @pdev: pointer to PCI device
8253 * @msg: power management message
8255 * This routine is called from the kernel's PCI subsystem to support system
8256 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
8257 * this method, it quiesces the device by stopping the driver's worker
8258 * thread for the device, turning off device's interrupt and DMA, and bring
8259 * the device offline. Note that as the driver implements the minimum PM
8260 * requirements to a power-aware driver's PM support for suspend/resume -- all
8261 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
8262 * method call will be treated as SUSPEND and the driver will fully
8263 * reinitialize its device during resume() method call, the driver will set
8264 * device to PCI_D3hot state in PCI config space instead of setting it
8265 * according to the @msg provided by the PM.
8268 * 0 - driver suspended the device
8272 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
8274 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8275 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8277 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8278 "0298 PCI device Power Management suspend.\n");
8280 /* Bring down the device */
8281 lpfc_offline_prep(phba);
8283 kthread_stop(phba->worker_thread);
8285 /* Disable interrupt from device */
8286 lpfc_sli4_disable_intr(phba);
8288 /* Save device state to PCI config space */
8289 pci_save_state(pdev);
8290 pci_set_power_state(pdev, PCI_D3hot);
8296 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
8297 * @pdev: pointer to PCI device
8299 * This routine is called from the kernel's PCI subsystem to support system
8300 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
8301 * this method, it restores the device's PCI config space state and fully
8302 * reinitializes the device and brings it online. Note that as the driver
8303 * implements the minimum PM requirements to a power-aware driver's PM for
8304 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
8305 * to the suspend() method call will be treated as SUSPEND and the driver
8306 * will fully reinitialize its device during resume() method call, the device
8307 * will be set to PCI_D0 directly in PCI config space before restoring the
8311 * 0 - driver suspended the device
8315 lpfc_pci_resume_one_s4(struct pci_dev *pdev)
8317 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8318 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8322 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8323 "0292 PCI device Power Management resume.\n");
8325 /* Restore device state from PCI config space */
8326 pci_set_power_state(pdev, PCI_D0);
8327 pci_restore_state(pdev);
8330 * As the new kernel behavior of pci_restore_state() API call clears
8331 * device saved_state flag, need to save the restored state again.
8333 pci_save_state(pdev);
8335 if (pdev->is_busmaster)
8336 pci_set_master(pdev);
8338 /* Startup the kernel thread for this host adapter. */
8339 phba->worker_thread = kthread_run(lpfc_do_work, phba,
8340 "lpfc_worker_%d", phba->brd_no);
8341 if (IS_ERR(phba->worker_thread)) {
8342 error = PTR_ERR(phba->worker_thread);
8343 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8344 "0293 PM resume failed to start worker "
8345 "thread: error=x%x.\n", error);
8349 /* Configure and enable interrupt */
8350 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
8351 if (intr_mode == LPFC_INTR_ERROR) {
8352 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8353 "0294 PM resume Failed to enable interrupt\n");
8356 phba->intr_mode = intr_mode;
8358 /* Restart HBA and bring it online */
8359 lpfc_sli_brdrestart(phba);
8362 /* Log the current active interrupt mode */
8363 lpfc_log_intr_mode(phba, phba->intr_mode);
8369 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
8370 * @pdev: pointer to PCI device.
8371 * @state: the current PCI connection state.
8373 * This routine is called from the PCI subsystem for error handling to device
8374 * with SLI-4 interface spec. This function is called by the PCI subsystem
8375 * after a PCI bus error affecting this device has been detected. When this
8376 * function is invoked, it will need to stop all the I/Os and interrupt(s)
8377 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
8378 * for the PCI subsystem to perform proper recovery as desired.
8381 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
8382 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8384 static pci_ers_result_t
8385 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
8387 return PCI_ERS_RESULT_NEED_RESET;
8391 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
8392 * @pdev: pointer to PCI device.
8394 * This routine is called from the PCI subsystem for error handling to device
8395 * with SLI-4 interface spec. It is called after PCI bus has been reset to
8396 * restart the PCI card from scratch, as if from a cold-boot. During the
8397 * PCI subsystem error recovery, after the driver returns
8398 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
8399 * recovery and then call this routine before calling the .resume method to
8400 * recover the device. This function will initialize the HBA device, enable
8401 * the interrupt, but it will just put the HBA to offline state without
8402 * passing any I/O traffic.
8405 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
8406 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8408 static pci_ers_result_t
8409 lpfc_io_slot_reset_s4(struct pci_dev *pdev)
8411 return PCI_ERS_RESULT_RECOVERED;
8415 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
8416 * @pdev: pointer to PCI device
8418 * This routine is called from the PCI subsystem for error handling to device
8419 * with SLI-4 interface spec. It is called when kernel error recovery tells
8420 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
8421 * error recovery. After this call, traffic can start to flow from this device
8425 lpfc_io_resume_s4(struct pci_dev *pdev)
8431 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
8432 * @pdev: pointer to PCI device
8433 * @pid: pointer to PCI device identifier
8435 * This routine is to be registered to the kernel's PCI subsystem. When an
8436 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
8437 * at PCI device-specific information of the device and driver to see if the
8438 * driver state that it can support this kind of device. If the match is
8439 * successful, the driver core invokes this routine. This routine dispatches
8440 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
8441 * do all the initialization that it needs to do to handle the HBA device
8445 * 0 - driver can claim the device
8446 * negative value - driver can not claim the device
8448 static int __devinit
8449 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
8452 struct lpfc_sli_intf intf;
8454 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
8457 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
8458 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
8459 rc = lpfc_pci_probe_one_s4(pdev, pid);
8461 rc = lpfc_pci_probe_one_s3(pdev, pid);
8467 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
8468 * @pdev: pointer to PCI device
8470 * This routine is to be registered to the kernel's PCI subsystem. When an
8471 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
8472 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
8473 * remove routine, which will perform all the necessary cleanup for the
8474 * device to be removed from the PCI subsystem properly.
8476 static void __devexit
8477 lpfc_pci_remove_one(struct pci_dev *pdev)
8479 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8480 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8482 switch (phba->pci_dev_grp) {
8483 case LPFC_PCI_DEV_LP:
8484 lpfc_pci_remove_one_s3(pdev);
8486 case LPFC_PCI_DEV_OC:
8487 lpfc_pci_remove_one_s4(pdev);
8490 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8491 "1424 Invalid PCI device group: 0x%x\n",
8499 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
8500 * @pdev: pointer to PCI device
8501 * @msg: power management message
8503 * This routine is to be registered to the kernel's PCI subsystem to support
8504 * system Power Management (PM). When PM invokes this method, it dispatches
8505 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
8506 * suspend the device.
8509 * 0 - driver suspended the device
8513 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
8515 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8516 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8519 switch (phba->pci_dev_grp) {
8520 case LPFC_PCI_DEV_LP:
8521 rc = lpfc_pci_suspend_one_s3(pdev, msg);
8523 case LPFC_PCI_DEV_OC:
8524 rc = lpfc_pci_suspend_one_s4(pdev, msg);
8527 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8528 "1425 Invalid PCI device group: 0x%x\n",
8536 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
8537 * @pdev: pointer to PCI device
8539 * This routine is to be registered to the kernel's PCI subsystem to support
8540 * system Power Management (PM). When PM invokes this method, it dispatches
8541 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
8542 * resume the device.
8545 * 0 - driver suspended the device
8549 lpfc_pci_resume_one(struct pci_dev *pdev)
8551 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8552 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8555 switch (phba->pci_dev_grp) {
8556 case LPFC_PCI_DEV_LP:
8557 rc = lpfc_pci_resume_one_s3(pdev);
8559 case LPFC_PCI_DEV_OC:
8560 rc = lpfc_pci_resume_one_s4(pdev);
8563 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8564 "1426 Invalid PCI device group: 0x%x\n",
8572 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
8573 * @pdev: pointer to PCI device.
8574 * @state: the current PCI connection state.
8576 * This routine is registered to the PCI subsystem for error handling. This
8577 * function is called by the PCI subsystem after a PCI bus error affecting
8578 * this device has been detected. When this routine is invoked, it dispatches
8579 * the action to the proper SLI-3 or SLI-4 device error detected handling
8580 * routine, which will perform the proper error detected operation.
8583 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
8584 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8586 static pci_ers_result_t
8587 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
8589 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8590 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8591 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
8593 switch (phba->pci_dev_grp) {
8594 case LPFC_PCI_DEV_LP:
8595 rc = lpfc_io_error_detected_s3(pdev, state);
8597 case LPFC_PCI_DEV_OC:
8598 rc = lpfc_io_error_detected_s4(pdev, state);
8601 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8602 "1427 Invalid PCI device group: 0x%x\n",
8610 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
8611 * @pdev: pointer to PCI device.
8613 * This routine is registered to the PCI subsystem for error handling. This
8614 * function is called after PCI bus has been reset to restart the PCI card
8615 * from scratch, as if from a cold-boot. When this routine is invoked, it
8616 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
8617 * routine, which will perform the proper device reset.
8620 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
8621 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8623 static pci_ers_result_t
8624 lpfc_io_slot_reset(struct pci_dev *pdev)
8626 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8627 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8628 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
8630 switch (phba->pci_dev_grp) {
8631 case LPFC_PCI_DEV_LP:
8632 rc = lpfc_io_slot_reset_s3(pdev);
8634 case LPFC_PCI_DEV_OC:
8635 rc = lpfc_io_slot_reset_s4(pdev);
8638 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8639 "1428 Invalid PCI device group: 0x%x\n",
8647 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
8648 * @pdev: pointer to PCI device
8650 * This routine is registered to the PCI subsystem for error handling. It
8651 * is called when kernel error recovery tells the lpfc driver that it is
8652 * OK to resume normal PCI operation after PCI bus error recovery. When
8653 * this routine is invoked, it dispatches the action to the proper SLI-3
8654 * or SLI-4 device io_resume routine, which will resume the device operation.
8657 lpfc_io_resume(struct pci_dev *pdev)
8659 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8660 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8662 switch (phba->pci_dev_grp) {
8663 case LPFC_PCI_DEV_LP:
8664 lpfc_io_resume_s3(pdev);
8666 case LPFC_PCI_DEV_OC:
8667 lpfc_io_resume_s4(pdev);
8670 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8671 "1429 Invalid PCI device group: 0x%x\n",
8678 static struct pci_device_id lpfc_id_table[] = {
8679 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
8680 PCI_ANY_ID, PCI_ANY_ID, },
8681 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
8682 PCI_ANY_ID, PCI_ANY_ID, },
8683 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
8684 PCI_ANY_ID, PCI_ANY_ID, },
8685 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
8686 PCI_ANY_ID, PCI_ANY_ID, },
8687 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
8688 PCI_ANY_ID, PCI_ANY_ID, },
8689 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
8690 PCI_ANY_ID, PCI_ANY_ID, },
8691 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
8692 PCI_ANY_ID, PCI_ANY_ID, },
8693 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
8694 PCI_ANY_ID, PCI_ANY_ID, },
8695 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
8696 PCI_ANY_ID, PCI_ANY_ID, },
8697 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
8698 PCI_ANY_ID, PCI_ANY_ID, },
8699 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
8700 PCI_ANY_ID, PCI_ANY_ID, },
8701 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
8702 PCI_ANY_ID, PCI_ANY_ID, },
8703 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
8704 PCI_ANY_ID, PCI_ANY_ID, },
8705 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
8706 PCI_ANY_ID, PCI_ANY_ID, },
8707 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
8708 PCI_ANY_ID, PCI_ANY_ID, },
8709 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
8710 PCI_ANY_ID, PCI_ANY_ID, },
8711 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
8712 PCI_ANY_ID, PCI_ANY_ID, },
8713 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
8714 PCI_ANY_ID, PCI_ANY_ID, },
8715 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
8716 PCI_ANY_ID, PCI_ANY_ID, },
8717 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
8718 PCI_ANY_ID, PCI_ANY_ID, },
8719 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
8720 PCI_ANY_ID, PCI_ANY_ID, },
8721 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
8722 PCI_ANY_ID, PCI_ANY_ID, },
8723 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
8724 PCI_ANY_ID, PCI_ANY_ID, },
8725 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
8726 PCI_ANY_ID, PCI_ANY_ID, },
8727 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
8728 PCI_ANY_ID, PCI_ANY_ID, },
8729 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
8730 PCI_ANY_ID, PCI_ANY_ID, },
8731 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
8732 PCI_ANY_ID, PCI_ANY_ID, },
8733 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
8734 PCI_ANY_ID, PCI_ANY_ID, },
8735 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
8736 PCI_ANY_ID, PCI_ANY_ID, },
8737 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
8738 PCI_ANY_ID, PCI_ANY_ID, },
8739 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
8740 PCI_ANY_ID, PCI_ANY_ID, },
8741 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
8742 PCI_ANY_ID, PCI_ANY_ID, },
8743 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
8744 PCI_ANY_ID, PCI_ANY_ID, },
8745 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
8746 PCI_ANY_ID, PCI_ANY_ID, },
8747 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
8748 PCI_ANY_ID, PCI_ANY_ID, },
8749 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
8750 PCI_ANY_ID, PCI_ANY_ID, },
8751 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
8752 PCI_ANY_ID, PCI_ANY_ID, },
8753 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
8754 PCI_ANY_ID, PCI_ANY_ID, },
8755 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT,
8756 PCI_ANY_ID, PCI_ANY_ID, },
8757 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
8758 PCI_ANY_ID, PCI_ANY_ID, },
8762 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
8764 static struct pci_error_handlers lpfc_err_handler = {
8765 .error_detected = lpfc_io_error_detected,
8766 .slot_reset = lpfc_io_slot_reset,
8767 .resume = lpfc_io_resume,
8770 static struct pci_driver lpfc_driver = {
8771 .name = LPFC_DRIVER_NAME,
8772 .id_table = lpfc_id_table,
8773 .probe = lpfc_pci_probe_one,
8774 .remove = __devexit_p(lpfc_pci_remove_one),
8775 .suspend = lpfc_pci_suspend_one,
8776 .resume = lpfc_pci_resume_one,
8777 .err_handler = &lpfc_err_handler,
8781 * lpfc_init - lpfc module initialization routine
8783 * This routine is to be invoked when the lpfc module is loaded into the
8784 * kernel. The special kernel macro module_init() is used to indicate the
8785 * role of this routine to the kernel as lpfc module entry point.
8789 * -ENOMEM - FC attach transport failed
8790 * all others - failed
8797 printk(LPFC_MODULE_DESC "\n");
8798 printk(LPFC_COPYRIGHT "\n");
8800 if (lpfc_enable_npiv) {
8801 lpfc_transport_functions.vport_create = lpfc_vport_create;
8802 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
8804 lpfc_transport_template =
8805 fc_attach_transport(&lpfc_transport_functions);
8806 if (lpfc_transport_template == NULL)
8808 if (lpfc_enable_npiv) {
8809 lpfc_vport_transport_template =
8810 fc_attach_transport(&lpfc_vport_transport_functions);
8811 if (lpfc_vport_transport_template == NULL) {
8812 fc_release_transport(lpfc_transport_template);
8816 error = pci_register_driver(&lpfc_driver);
8818 fc_release_transport(lpfc_transport_template);
8819 if (lpfc_enable_npiv)
8820 fc_release_transport(lpfc_vport_transport_template);
8827 * lpfc_exit - lpfc module removal routine
8829 * This routine is invoked when the lpfc module is removed from the kernel.
8830 * The special kernel macro module_exit() is used to indicate the role of
8831 * this routine to the kernel as lpfc module exit point.
8836 pci_unregister_driver(&lpfc_driver);
8837 fc_release_transport(lpfc_transport_template);
8838 if (lpfc_enable_npiv)
8839 fc_release_transport(lpfc_vport_transport_template);
8840 if (_dump_buf_data) {
8841 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
8842 "_dump_buf_data at 0x%p\n",
8843 (1L << _dump_buf_data_order), _dump_buf_data);
8844 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
8847 if (_dump_buf_dif) {
8848 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
8849 "_dump_buf_dif at 0x%p\n",
8850 (1L << _dump_buf_dif_order), _dump_buf_dif);
8851 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
8855 module_init(lpfc_init);
8856 module_exit(lpfc_exit);
8857 MODULE_LICENSE("GPL");
8858 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
8859 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
8860 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);