#include <linux/reboot.h> /* notifier code */
#include <linux/sched.h>
#include <linux/workqueue.h>
+#include <linux/sort.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport_fc.h>
#include "mptbase.h"
#include "mptscsih.h"
module_param(mpt_pq_filter, int, 0);
MODULE_PARM_DESC(mpt_pq_filter, " Enable peripheral qualifier filter: enable=1 (default=0)");
+#define MPTFC_DEV_LOSS_TMO (60)
+static int mptfc_dev_loss_tmo = MPTFC_DEV_LOSS_TMO; /* reasonable default */
+module_param(mptfc_dev_loss_tmo, int, 0);
+MODULE_PARM_DESC(mptfc_dev_loss_tmo, " Initial time the driver programs the "
+ " transport to wait for an rport to "
+ " return following a device loss event."
+ " Default=60.");
+
static int mptfcDoneCtx = -1;
static int mptfcTaskCtx = -1;
static int mptfcInternalCtx = -1; /* Used only for internal commands */
+int mptfc_slave_alloc(struct scsi_device *device);
+static int mptfc_qcmd(struct scsi_cmnd *SCpnt,
+ void (*done)(struct scsi_cmnd *));
+
+static void mptfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout);
+static void __devexit mptfc_remove(struct pci_dev *pdev);
+
static struct scsi_host_template mptfc_driver_template = {
.module = THIS_MODULE,
.proc_name = "mptfc",
.proc_info = mptscsih_proc_info,
.name = "MPT FC Host",
.info = mptscsih_info,
- .queuecommand = mptscsih_qcmd,
+ .queuecommand = mptfc_qcmd,
.target_alloc = mptscsih_target_alloc,
- .slave_alloc = mptscsih_slave_alloc,
+ .slave_alloc = mptfc_slave_alloc,
.slave_configure = mptscsih_slave_configure,
.target_destroy = mptscsih_target_destroy,
.slave_destroy = mptscsih_slave_destroy,
};
MODULE_DEVICE_TABLE(pci, mptfc_pci_table);
-/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static struct scsi_transport_template *mptfc_transport_template = NULL;
+
+struct fc_function_template mptfc_transport_functions = {
+ .dd_fcrport_size = 8,
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_port_id = 1,
+ .show_rport_supported_classes = 1,
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+ .show_starget_port_id = 1,
+ .set_rport_dev_loss_tmo = mptfc_set_rport_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+
+};
+
+/* FIXME! values controlling firmware RESCAN event
+ * need to be set low to allow dev_loss_tmo to
+ * work as expected. Currently, firmware doesn't
+ * notify driver of RESCAN event until some number
+ * of seconds elapse. This value can be set via
+ * lsiutil.
+ */
+static void
+mptfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
+{
+ if (timeout > 0)
+ rport->dev_loss_tmo = timeout;
+ else
+ rport->dev_loss_tmo = mptfc_dev_loss_tmo;
+}
+
+static int
+mptfc_FcDevPage0_cmp_func(const void *a, const void *b)
+{
+ FCDevicePage0_t **aa = (FCDevicePage0_t **)a;
+ FCDevicePage0_t **bb = (FCDevicePage0_t **)b;
+
+ if ((*aa)->CurrentBus == (*bb)->CurrentBus) {
+ if ((*aa)->CurrentTargetID == (*bb)->CurrentTargetID)
+ return 0;
+ if ((*aa)->CurrentTargetID < (*bb)->CurrentTargetID)
+ return -1;
+ return 1;
+ }
+ if ((*aa)->CurrentBus < (*bb)->CurrentBus)
+ return -1;
+ return 1;
+}
+
+static int
+mptfc_GetFcDevPage0(MPT_ADAPTER *ioc, int ioc_port,
+ void(*func)(MPT_ADAPTER *ioc,int channel, FCDevicePage0_t *arg))
+{
+ ConfigPageHeader_t hdr;
+ CONFIGPARMS cfg;
+ FCDevicePage0_t *ppage0_alloc, *fc;
+ dma_addr_t page0_dma;
+ int data_sz;
+ int ii;
+
+ FCDevicePage0_t *p0_array=NULL, *p_p0;
+ FCDevicePage0_t **pp0_array=NULL, **p_pp0;
+
+ int rc = -ENOMEM;
+ U32 port_id = 0xffffff;
+ int num_targ = 0;
+ int max_bus = ioc->facts.MaxBuses;
+ int max_targ = ioc->facts.MaxDevices;
+
+ if (max_bus == 0 || max_targ == 0)
+ goto out;
+
+ data_sz = sizeof(FCDevicePage0_t) * max_bus * max_targ;
+ p_p0 = p0_array = kzalloc(data_sz, GFP_KERNEL);
+ if (!p0_array)
+ goto out;
+
+ data_sz = sizeof(FCDevicePage0_t *) * max_bus * max_targ;
+ p_pp0 = pp0_array = kzalloc(data_sz, GFP_KERNEL);
+ if (!pp0_array)
+ goto out;
+
+ do {
+ /* Get FC Device Page 0 header */
+ hdr.PageVersion = 0;
+ hdr.PageLength = 0;
+ hdr.PageNumber = 0;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_FC_DEVICE;
+ cfg.cfghdr.hdr = &hdr;
+ cfg.physAddr = -1;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.dir = 0;
+ cfg.pageAddr = port_id;
+ cfg.timeout = 0;
+
+ if ((rc = mpt_config(ioc, &cfg)) != 0)
+ break;
+
+ if (hdr.PageLength <= 0)
+ break;
+
+ data_sz = hdr.PageLength * 4;
+ ppage0_alloc = pci_alloc_consistent(ioc->pcidev, data_sz,
+ &page0_dma);
+ rc = -ENOMEM;
+ if (!ppage0_alloc)
+ break;
+
+ cfg.physAddr = page0_dma;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ if ((rc = mpt_config(ioc, &cfg)) == 0) {
+ ppage0_alloc->PortIdentifier =
+ le32_to_cpu(ppage0_alloc->PortIdentifier);
+
+ ppage0_alloc->WWNN.Low =
+ le32_to_cpu(ppage0_alloc->WWNN.Low);
+
+ ppage0_alloc->WWNN.High =
+ le32_to_cpu(ppage0_alloc->WWNN.High);
+
+ ppage0_alloc->WWPN.Low =
+ le32_to_cpu(ppage0_alloc->WWPN.Low);
+
+ ppage0_alloc->WWPN.High =
+ le32_to_cpu(ppage0_alloc->WWPN.High);
+
+ ppage0_alloc->BBCredit =
+ le16_to_cpu(ppage0_alloc->BBCredit);
+
+ ppage0_alloc->MaxRxFrameSize =
+ le16_to_cpu(ppage0_alloc->MaxRxFrameSize);
+
+ port_id = ppage0_alloc->PortIdentifier;
+ num_targ++;
+ *p_p0 = *ppage0_alloc; /* save data */
+ *p_pp0++ = p_p0++; /* save addr */
+ }
+ pci_free_consistent(ioc->pcidev, data_sz,
+ (u8 *) ppage0_alloc, page0_dma);
+ if (rc != 0)
+ break;
+
+ } while (port_id <= 0xff0000);
+
+ if (num_targ) {
+ /* sort array */
+ if (num_targ > 1)
+ sort (pp0_array, num_targ, sizeof(FCDevicePage0_t *),
+ mptfc_FcDevPage0_cmp_func, NULL);
+ /* call caller's func for each targ */
+ for (ii = 0; ii < num_targ; ii++) {
+ fc = *(pp0_array+ii);
+ func(ioc, ioc_port, fc);
+ }
+ }
+
+ out:
+ if (pp0_array)
+ kfree(pp0_array);
+ if (p0_array)
+ kfree(p0_array);
+ return rc;
+}
+
+static int
+mptfc_generate_rport_ids(FCDevicePage0_t *pg0, struct fc_rport_identifiers *rid)
+{
+ /* not currently usable */
+ if (pg0->Flags & (MPI_FC_DEVICE_PAGE0_FLAGS_PLOGI_INVALID |
+ MPI_FC_DEVICE_PAGE0_FLAGS_PRLI_INVALID))
+ return -1;
+
+ if (!(pg0->Flags & MPI_FC_DEVICE_PAGE0_FLAGS_TARGETID_BUS_VALID))
+ return -1;
+
+ if (!(pg0->Protocol & MPI_FC_DEVICE_PAGE0_PROT_FCP_TARGET))
+ return -1;
+
+ /*
+ * board data structure already normalized to platform endianness
+ * shifted to avoid unaligned access on 64 bit architecture
+ */
+ rid->node_name = ((u64)pg0->WWNN.High) << 32 | (u64)pg0->WWNN.Low;
+ rid->port_name = ((u64)pg0->WWPN.High) << 32 | (u64)pg0->WWPN.Low;
+ rid->port_id = pg0->PortIdentifier;
+ rid->roles = FC_RPORT_ROLE_UNKNOWN;
+ rid->roles |= FC_RPORT_ROLE_FCP_TARGET;
+ if (pg0->Protocol & MPI_FC_DEVICE_PAGE0_PROT_FCP_INITIATOR)
+ rid->roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+
+ return 0;
+}
+
+static void
+mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0)
+{
+ struct fc_rport_identifiers rport_ids;
+ struct fc_rport *rport;
+ struct mptfc_rport_info *ri;
+ int match = 0;
+ u64 port_name;
+ unsigned long flags;
+
+ if (mptfc_generate_rport_ids(pg0, &rport_ids) < 0)
+ return;
+
+ /* scan list looking for a match */
+ spin_lock_irqsave(&ioc->fc_rport_lock, flags);
+ list_for_each_entry(ri, &ioc->fc_rports, list) {
+ port_name = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low;
+ if (port_name == rport_ids.port_name) { /* match */
+ list_move_tail(&ri->list, &ioc->fc_rports);
+ match = 1;
+ break;
+ }
+ }
+ if (!match) { /* allocate one */
+ spin_unlock_irqrestore(&ioc->fc_rport_lock, flags);
+ ri = kzalloc(sizeof(struct mptfc_rport_info), GFP_KERNEL);
+ if (!ri)
+ return;
+ spin_lock_irqsave(&ioc->fc_rport_lock, flags);
+ list_add_tail(&ri->list, &ioc->fc_rports);
+ }
+
+ ri->pg0 = *pg0; /* add/update pg0 data */
+ ri->flags &= ~MPT_RPORT_INFO_FLAGS_MISSING;
+
+ if (!(ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED)) {
+ ri->flags |= MPT_RPORT_INFO_FLAGS_REGISTERED;
+ spin_unlock_irqrestore(&ioc->fc_rport_lock, flags);
+ rport = fc_remote_port_add(ioc->sh,channel, &rport_ids);
+ spin_lock_irqsave(&ioc->fc_rport_lock, flags);
+ if (rport) {
+ if (*((struct mptfc_rport_info **)rport->dd_data) != ri) {
+ ri->flags &= ~MPT_RPORT_INFO_FLAGS_MAPPED_VDEV;
+ ri->vdev = NULL;
+ ri->rport = rport;
+ *((struct mptfc_rport_info **)rport->dd_data) = ri;
+ }
+ rport->dev_loss_tmo = mptfc_dev_loss_tmo;
+ /*
+ * if already mapped, remap here. If not mapped,
+ * slave_alloc will allocate vdev and map
+ */
+ if (ri->flags & MPT_RPORT_INFO_FLAGS_MAPPED_VDEV) {
+ ri->vdev->target_id = ri->pg0.CurrentTargetID;
+ ri->vdev->bus_id = ri->pg0.CurrentBus;
+ ri->vdev->vtarget->target_id = ri->vdev->target_id;
+ ri->vdev->vtarget->bus_id = ri->vdev->bus_id;
+ }
+ #ifdef MPT_DEBUG
+ printk ("mptfc_reg_dev.%d: %x, %llx / %llx, tid %d, "
+ "rport tid %d, tmo %d\n",
+ ioc->sh->host_no,
+ pg0->PortIdentifier,
+ pg0->WWNN,
+ pg0->WWPN,
+ pg0->CurrentTargetID,
+ ri->rport->scsi_target_id,
+ ri->rport->dev_loss_tmo);
+ #endif
+ } else {
+ list_del(&ri->list);
+ kfree(ri);
+ ri = NULL;
+ }
+ }
+ spin_unlock_irqrestore(&ioc->fc_rport_lock,flags);
+
+}
+
/*
- * mptfc_probe - Installs scsi devices per bus.
- * @pdev: Pointer to pci_dev structure
- *
- * Returns 0 for success, non-zero for failure.
- *
+ * OS entry point to allow host driver to alloc memory
+ * for each scsi device. Called once per device the bus scan.
+ * Return non-zero if allocation fails.
+ * Init memory once per LUN.
*/
+int
+mptfc_slave_alloc(struct scsi_device *sdev)
+{
+ MPT_SCSI_HOST *hd;
+ VirtTarget *vtarget;
+ VirtDevice *vdev;
+ struct scsi_target *starget;
+ struct fc_rport *rport;
+ struct mptfc_rport_info *ri;
+ unsigned long flags;
+
+
+ rport = starget_to_rport(scsi_target(sdev));
+
+ if (!rport || fc_remote_port_chkready(rport))
+ return -ENXIO;
+
+ hd = (MPT_SCSI_HOST *)sdev->host->hostdata;
+
+ vdev = kmalloc(sizeof(VirtDevice), GFP_KERNEL);
+ if (!vdev) {
+ printk(MYIOC_s_ERR_FMT "slave_alloc kmalloc(%zd) FAILED!\n",
+ hd->ioc->name, sizeof(VirtDevice));
+ return -ENOMEM;
+ }
+ memset(vdev, 0, sizeof(VirtDevice));
+
+ spin_lock_irqsave(&hd->ioc->fc_rport_lock,flags);
+
+ if (!(ri = *((struct mptfc_rport_info **)rport->dd_data))) {
+ spin_unlock_irqrestore(&hd->ioc->fc_rport_lock,flags);
+ kfree(vdev);
+ return -ENODEV;
+ }
+
+ sdev->hostdata = vdev;
+ starget = scsi_target(sdev);
+ vtarget = starget->hostdata;
+ if (vtarget->num_luns == 0) {
+ vtarget->tflags = MPT_TARGET_FLAGS_Q_YES |
+ MPT_TARGET_FLAGS_VALID_INQUIRY;
+ hd->Targets[sdev->id] = vtarget;
+ }
+
+ vtarget->target_id = vdev->target_id;
+ vtarget->bus_id = vdev->bus_id;
+
+ vdev->vtarget = vtarget;
+ vdev->ioc_id = hd->ioc->id;
+ vdev->lun = sdev->lun;
+ vdev->target_id = ri->pg0.CurrentTargetID;
+ vdev->bus_id = ri->pg0.CurrentBus;
+
+ ri->flags |= MPT_RPORT_INFO_FLAGS_MAPPED_VDEV;
+ ri->vdev = vdev;
+
+ spin_unlock_irqrestore(&hd->ioc->fc_rport_lock,flags);
+
+ vtarget->num_luns++;
+
+#ifdef MPT_DEBUG
+ printk ("mptfc_slv_alloc.%d: num_luns %d, sdev.id %d, "
+ "CurrentTargetID %d, %x %llx %llx\n",
+ sdev->host->host_no,
+ vtarget->num_luns,
+ sdev->id, ri->pg0.CurrentTargetID,
+ ri->pg0.PortIdentifier, ri->pg0.WWPN, ri->pg0.WWNN);
+#endif
+
+ return 0;
+}
+
+static int
+mptfc_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+{
+ struct fc_rport *rport = starget_to_rport(scsi_target(SCpnt->device));
+ int err;
+
+ err = fc_remote_port_chkready(rport);
+ if (unlikely(err)) {
+ SCpnt->result = err;
+ done(SCpnt);
+ return 0;
+ }
+ return mptscsih_qcmd(SCpnt,done);
+}
+
+static void
+mptfc_init_host_attr(MPT_ADAPTER *ioc,int portnum)
+{
+ unsigned class = 0, cos = 0;
+
+ /* don't know what to do as only one scsi (fc) host was allocated */
+ if (portnum != 0)
+ return;
+
+ class = ioc->fc_port_page0[portnum].SupportedServiceClass;
+ if (class & MPI_FCPORTPAGE0_SUPPORT_CLASS_1)
+ cos |= FC_COS_CLASS1;
+ if (class & MPI_FCPORTPAGE0_SUPPORT_CLASS_2)
+ cos |= FC_COS_CLASS2;
+ if (class & MPI_FCPORTPAGE0_SUPPORT_CLASS_3)
+ cos |= FC_COS_CLASS3;
+
+ fc_host_node_name(ioc->sh) =
+ (u64)ioc->fc_port_page0[portnum].WWNN.High << 32
+ | (u64)ioc->fc_port_page0[portnum].WWNN.Low;
+
+ fc_host_port_name(ioc->sh) =
+ (u64)ioc->fc_port_page0[portnum].WWPN.High << 32
+ | (u64)ioc->fc_port_page0[portnum].WWPN.Low;
+
+ fc_host_port_id(ioc->sh) = ioc->fc_port_page0[portnum].PortIdentifier;
+
+ fc_host_supported_classes(ioc->sh) = cos;
+
+ fc_host_tgtid_bind_type(ioc->sh) = FC_TGTID_BIND_BY_WWPN;
+}
+
+static void
+mptfc_rescan_devices(void *arg)
+{
+ MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
+ int ii;
+ int work_to_do;
+ unsigned long flags;
+ struct mptfc_rport_info *ri;
+
+ do {
+ /* start by tagging all ports as missing */
+ spin_lock_irqsave(&ioc->fc_rport_lock,flags);
+ list_for_each_entry(ri, &ioc->fc_rports, list) {
+ if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) {
+ ri->flags |= MPT_RPORT_INFO_FLAGS_MISSING;
+ }
+ }
+ spin_unlock_irqrestore(&ioc->fc_rport_lock,flags);
+
+ /*
+ * now rescan devices known to adapter,
+ * will reregister existing rports
+ */
+ for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) {
+ (void) mptbase_GetFcPortPage0(ioc, ii);
+ mptfc_init_host_attr(ioc,ii); /* refresh */
+ mptfc_GetFcDevPage0(ioc,ii,mptfc_register_dev);
+ }
+
+ /* delete devices still missing */
+ spin_lock_irqsave(&ioc->fc_rport_lock, flags);
+ list_for_each_entry(ri, &ioc->fc_rports, list) {
+ /* if newly missing, delete it */
+ if ((ri->flags & (MPT_RPORT_INFO_FLAGS_REGISTERED |
+ MPT_RPORT_INFO_FLAGS_MISSING))
+ == (MPT_RPORT_INFO_FLAGS_REGISTERED |
+ MPT_RPORT_INFO_FLAGS_MISSING)) {
+
+ ri->flags &= ~(MPT_RPORT_INFO_FLAGS_REGISTERED|
+ MPT_RPORT_INFO_FLAGS_MISSING);
+ fc_remote_port_delete(ri->rport);
+ /*
+ * remote port not really deleted 'cause
+ * binding is by WWPN and driver only
+ * registers FCP_TARGETs
+ */
+ #ifdef MPT_DEBUG
+ printk ("mptfc_rescan.%d: %llx deleted\n",
+ ioc->sh->host_no, ri->pg0.WWPN);
+ #endif
+ }
+ }
+ spin_unlock_irqrestore(&ioc->fc_rport_lock,flags);
+
+ /*
+ * allow multiple passes as target state
+ * might have changed during scan
+ */
+ spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
+ if (ioc->fc_rescan_work_count > 2) /* only need one more */
+ ioc->fc_rescan_work_count = 2;
+ work_to_do = --ioc->fc_rescan_work_count;
+ spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
+ } while (work_to_do);
+}
+
static int
mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int ioc_cap;
int error=0;
int r;
-
+
if ((r = mpt_attach(pdev,id)) != 0)
return r;
-
+
ioc = pci_get_drvdata(pdev);
ioc->DoneCtx = mptfcDoneCtx;
ioc->TaskCtx = mptfcTaskCtx;
printk(MYIOC_s_WARN_FMT
"Skipping ioc=%p because SCSI Initiator mode is NOT enabled!\n",
ioc->name, ioc);
- return 0;
+ return -ENODEV;
}
sh = scsi_host_alloc(&mptfc_driver_template, sizeof(MPT_SCSI_HOST));
goto out_mptfc_probe;
}
+ INIT_WORK(&ioc->fc_rescan_work, mptfc_rescan_devices,(void *)ioc);
+
spin_lock_irqsave(&ioc->FreeQlock, flags);
/* Attach the SCSI Host to the IOC structure
hd->scandv_wait_done = 0;
hd->last_queue_full = 0;
+ sh->transportt = mptfc_transport_template;
error = scsi_add_host (sh, &ioc->pcidev->dev);
if(error) {
dprintk((KERN_ERR MYNAM
goto out_mptfc_probe;
}
- scsi_scan_host(sh);
+ for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) {
+ mptfc_init_host_attr(ioc,ii);
+ mptfc_GetFcDevPage0(ioc,ii,mptfc_register_dev);
+ }
+
return 0;
out_mptfc_probe:
.name = "mptfc",
.id_table = mptfc_pci_table,
.probe = mptfc_probe,
- .remove = __devexit_p(mptscsih_remove),
+ .remove = __devexit_p(mptfc_remove),
.shutdown = mptscsih_shutdown,
#ifdef CONFIG_PM
.suspend = mptscsih_suspend,
static int __init
mptfc_init(void)
{
+ int error;
show_mptmod_ver(my_NAME, my_VERSION);
+ /* sanity check module parameter */
+ if (mptfc_dev_loss_tmo == 0)
+ mptfc_dev_loss_tmo = MPTFC_DEV_LOSS_TMO;
+
+ mptfc_transport_template =
+ fc_attach_transport(&mptfc_transport_functions);
+
+ if (!mptfc_transport_template)
+ return -ENODEV;
+
mptfcDoneCtx = mpt_register(mptscsih_io_done, MPTFC_DRIVER);
mptfcTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTFC_DRIVER);
mptfcInternalCtx = mpt_register(mptscsih_scandv_complete, MPTFC_DRIVER);
": Registered for IOC reset notifications\n"));
}
- return pci_register_driver(&mptfc_driver);
+ error = pci_register_driver(&mptfc_driver);
+ if (error) {
+ fc_release_transport(mptfc_transport_template);
+ }
+
+ return error;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mptfc_remove - Removed fc infrastructure for devices
+ * @pdev: Pointer to pci_dev structure
+ *
+ */
+static void __devexit mptfc_remove(struct pci_dev *pdev)
+{
+ MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
+ struct mptfc_rport_info *p, *n;
+
+ fc_remove_host(ioc->sh);
+
+ list_for_each_entry_safe(p, n, &ioc->fc_rports, list) {
+ list_del(&p->list);
+ kfree(p);
+ }
+
+ mptscsih_remove(pdev);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
mptfc_exit(void)
{
pci_unregister_driver(&mptfc_driver);
-
+ fc_release_transport(mptfc_transport_template);
+
mpt_reset_deregister(mptfcDoneCtx);
dprintk((KERN_INFO MYNAM
": Deregistered for IOC reset notifications\n"));