[SCSI] lpfc 8.3.45: Incorporated support of a low-latency io path
authorJames Smart <james.smart@emulex.com>
Thu, 20 Feb 2014 14:56:45 +0000 (09:56 -0500)
committerJames Bottomley <JBottomley@Parallels.com>
Sat, 15 Mar 2014 17:18:56 +0000 (10:18 -0700)
Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
13 files changed:
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_crtn.h
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/scsi/lpfc/lpfc_hw.h
drivers/scsi/lpfc/lpfc_hw4.h
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_mem.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/lpfc/lpfc_scsi.h
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/lpfc/lpfc_sli.h
drivers/scsi/lpfc/lpfc_sli4.h

index 4de9555..bbc5be6 100644 (file)
@@ -722,6 +722,20 @@ struct lpfc_hba {
        uint32_t cfg_hba_queue_depth;
        uint32_t cfg_enable_hba_reset;
        uint32_t cfg_enable_hba_heartbeat;
+       uint32_t cfg_fof;
+       uint32_t cfg_EnableXLane;
+       uint8_t cfg_oas_tgt_wwpn[8];
+       uint8_t cfg_oas_vpt_wwpn[8];
+       uint32_t cfg_oas_lun_state;
+#define OAS_LUN_ENABLE 1
+#define OAS_LUN_DISABLE        0
+       uint32_t cfg_oas_lun_status;
+#define OAS_LUN_STATUS_EXISTS  0x01
+       uint32_t cfg_oas_flags;
+#define OAS_FIND_ANY_VPORT     0x01
+#define OAS_FIND_ANY_TARGET    0x02
+#define OAS_LUN_VALID  0x04
+       uint32_t cfg_XLanePriority;
        uint32_t cfg_enable_bg;
        uint32_t cfg_hostmem_hgp;
        uint32_t cfg_log_verbose;
@@ -973,6 +987,9 @@ struct lpfc_hba {
        atomic_t sdev_cnt;
        uint8_t fips_spec_rev;
        uint8_t fips_level;
+       spinlock_t devicelock;  /* lock for luns list */
+       mempool_t *device_data_mem_pool;
+       struct list_head luns;
 };
 
 static inline struct Scsi_Host *
index 00656fc..ba8b77a 100644 (file)
@@ -529,6 +529,27 @@ lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr,
 }
 
 /**
+ * lpfc_oas_supported_show - Return whether or not Optimized Access Storage
+ *                         (OAS) is supported.
+ * @dev: class unused variable.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the module description text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_oas_supported_show(struct device *dev, struct device_attribute *attr,
+                       char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+       struct lpfc_hba *phba = vport->phba;
+
+       return snprintf(buf, PAGE_SIZE, "%d\n",
+                       phba->sli4_hba.pc_sli4_params.oas_supported);
+}
+
+/**
  * lpfc_link_state_store - Transition the link_state on an HBA port
  * @dev: class device that is converted into a Scsi_host.
  * @attr: device attribute, not used.
@@ -2041,9 +2062,53 @@ static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL);
 static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO,
                   lpfc_sriov_hw_max_virtfn_show, NULL);
 static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
+static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show,
+                  NULL);
 
 static char *lpfc_soft_wwn_key = "C99G71SL8032A";
+#define WWN_SZ 8
+/**
+ * lpfc_wwn_set - Convert string to the 8 byte WWN value.
+ * @buf: WWN string.
+ * @cnt: Length of string.
+ * @wwn: Array to receive converted wwn value.
+ *
+ * Returns:
+ * -EINVAL if the buffer does not contain a valid wwn
+ * 0 success
+ **/
+static size_t
+lpfc_wwn_set(const char *buf, size_t cnt, char wwn[])
+{
+       unsigned int i, j;
+
+       /* Count may include a LF at end of string */
+       if (buf[cnt-1] == '\n')
+               cnt--;
 
+       if ((cnt < 16) || (cnt > 18) || ((cnt == 17) && (*buf++ != 'x')) ||
+           ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
+               return -EINVAL;
+
+       memset(wwn, 0, WWN_SZ);
+
+       /* Validate and store the new name */
+       for (i = 0, j = 0; i < 16; i++) {
+               if ((*buf >= 'a') && (*buf <= 'f'))
+                       j = ((j << 4) | ((*buf++ - 'a') + 10));
+               else if ((*buf >= 'A') && (*buf <= 'F'))
+                       j = ((j << 4) | ((*buf++ - 'A') + 10));
+               else if ((*buf >= '0') && (*buf <= '9'))
+                       j = ((j << 4) | (*buf++ - '0'));
+               else
+                       return -EINVAL;
+               if (i % 2) {
+                       wwn[i/2] = j & 0xff;
+                       j = 0;
+               }
+       }
+       return 0;
+}
 /**
  * lpfc_soft_wwn_enable_store - Allows setting of the wwn if the key is valid
  * @dev: class device that is converted into a Scsi_host.
@@ -2132,9 +2197,9 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
        struct lpfc_hba   *phba = vport->phba;
        struct completion online_compl;
-       int stat1=0, stat2=0;
-       unsigned int i, j, cnt=count;
-       u8 wwpn[8];
+       int stat1 = 0, stat2 = 0;
+       unsigned int cnt = count;
+       u8 wwpn[WWN_SZ];
        int rc;
 
        if (!phba->cfg_enable_hba_reset)
@@ -2149,29 +2214,19 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
        if (buf[cnt-1] == '\n')
                cnt--;
 
-       if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) ||
-           ((cnt == 17) && (*buf++ != 'x')) ||
-           ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
+       if (!phba->soft_wwn_enable)
                return -EINVAL;
 
+       /* lock setting wwpn, wwnn down */
        phba->soft_wwn_enable = 0;
 
-       memset(wwpn, 0, sizeof(wwpn));
-
-       /* Validate and store the new name */
-       for (i=0, j=0; i < 16; i++) {
-               int value;
-
-               value = hex_to_bin(*buf++);
-               if (value >= 0)
-                       j = (j << 4) | value;
-               else
-                       return -EINVAL;
-               if (i % 2) {
-                       wwpn[i/2] = j & 0xff;
-                       j = 0;
-               }
+       rc = lpfc_wwn_set(buf, cnt, wwpn);
+       if (!rc) {
+               /* not able to set wwpn, unlock it */
+               phba->soft_wwn_enable = 1;
+               return rc;
        }
+
        phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
        fc_host_port_name(shost) = phba->cfg_soft_wwpn;
        if (phba->cfg_soft_wwnn)
@@ -2198,7 +2253,7 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
                                "reinit adapter - %d\n", stat2);
        return (stat1 || stat2) ? -EIO : count;
 }
-static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\
+static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,
                   lpfc_soft_wwpn_show, lpfc_soft_wwpn_store);
 
 /**
@@ -2235,39 +2290,25 @@ lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
 {
        struct Scsi_Host *shost = class_to_shost(dev);
        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
-       unsigned int i, j, cnt=count;
-       u8 wwnn[8];
+       unsigned int cnt = count;
+       u8 wwnn[WWN_SZ];
+       int rc;
 
        /* count may include a LF at end of string */
        if (buf[cnt-1] == '\n')
                cnt--;
 
-       if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) ||
-           ((cnt == 17) && (*buf++ != 'x')) ||
-           ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
+       if (!phba->soft_wwn_enable)
                return -EINVAL;
 
-       /*
-        * Allow wwnn to be set many times, as long as the enable is set.
-        * However, once the wwpn is set, everything locks.
-        */
-
-       memset(wwnn, 0, sizeof(wwnn));
-
-       /* Validate and store the new name */
-       for (i=0, j=0; i < 16; i++) {
-               int value;
-
-               value = hex_to_bin(*buf++);
-               if (value >= 0)
-                       j = (j << 4) | value;
-               else
-                       return -EINVAL;
-               if (i % 2) {
-                       wwnn[i/2] = j & 0xff;
-                       j = 0;
-               }
+       rc = lpfc_wwn_set(buf, cnt, wwnn);
+       if (!rc) {
+               /* Allow wwnn to be set many times, as long as the enable
+                * is set. However, once the wwpn is set, everything locks.
+                */
+               return rc;
        }
+
        phba->cfg_soft_wwnn = wwn_to_u64(wwnn);
 
        dev_printk(KERN_NOTICE, &phba->pcidev->dev,
@@ -2276,9 +2317,438 @@ lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
 
        return count;
 }
-static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,\
+static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,
                   lpfc_soft_wwnn_show, lpfc_soft_wwnn_store);
 
+/**
+ * lpfc_oas_tgt_show - Return wwpn of target whose luns maybe enabled for
+ *                   Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * Returns:
+ * value of count
+ **/
+static ssize_t
+lpfc_oas_tgt_show(struct device *dev, struct device_attribute *attr,
+                 char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+       return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+                       wwn_to_u64(phba->cfg_oas_tgt_wwpn));
+}
+
+/**
+ * lpfc_oas_tgt_store - Store wwpn of target whose luns maybe enabled for
+ *                   Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ * @count: Size of the data buffer.
+ *
+ * Returns:
+ * -EINVAL count is invalid, invalid wwpn byte invalid
+ * -EPERM oas is not supported by hba
+ * value of count on success
+ **/
+static ssize_t
+lpfc_oas_tgt_store(struct device *dev, struct device_attribute *attr,
+                  const char *buf, size_t count)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+       unsigned int cnt = count;
+       uint8_t wwpn[WWN_SZ];
+       int rc;
+
+       if (!phba->cfg_EnableXLane)
+               return -EPERM;
+
+       /* count may include a LF at end of string */
+       if (buf[cnt-1] == '\n')
+               cnt--;
+
+       rc = lpfc_wwn_set(buf, cnt, wwpn);
+       if (rc)
+               return rc;
+
+       memcpy(phba->cfg_oas_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
+       memcpy(phba->sli4_hba.oas_next_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
+       if (wwn_to_u64(wwpn) == 0)
+               phba->cfg_oas_flags |= OAS_FIND_ANY_TARGET;
+       else
+               phba->cfg_oas_flags &= ~OAS_FIND_ANY_TARGET;
+       phba->cfg_oas_flags &= ~OAS_LUN_VALID;
+       phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
+       return count;
+}
+static DEVICE_ATTR(lpfc_xlane_tgt, S_IRUGO | S_IWUSR,
+                  lpfc_oas_tgt_show, lpfc_oas_tgt_store);
+
+/**
+ * lpfc_oas_vpt_show - Return wwpn of vport whose targets maybe enabled
+ *                   for Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * Returns:
+ * value of count on success
+ **/
+static ssize_t
+lpfc_oas_vpt_show(struct device *dev, struct device_attribute *attr,
+                 char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+       return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+                       wwn_to_u64(phba->cfg_oas_vpt_wwpn));
+}
+
+/**
+ * lpfc_oas_vpt_store - Store wwpn of vport whose targets maybe enabled
+ *                   for Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ * @count: Size of the data buffer.
+ *
+ * Returns:
+ * -EINVAL count is invalid, invalid wwpn byte invalid
+ * -EPERM oas is not supported by hba
+ * value of count on success
+ **/
+static ssize_t
+lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr,
+                  const char *buf, size_t count)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+       unsigned int cnt = count;
+       uint8_t wwpn[WWN_SZ];
+       int rc;
+
+       if (!phba->cfg_EnableXLane)
+               return -EPERM;
+
+       /* count may include a LF at end of string */
+       if (buf[cnt-1] == '\n')
+               cnt--;
+
+       rc = lpfc_wwn_set(buf, cnt, wwpn);
+       if (rc)
+               return rc;
+
+       memcpy(phba->cfg_oas_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
+       memcpy(phba->sli4_hba.oas_next_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
+       if (wwn_to_u64(wwpn) == 0)
+               phba->cfg_oas_flags |= OAS_FIND_ANY_VPORT;
+       else
+               phba->cfg_oas_flags &= ~OAS_FIND_ANY_VPORT;
+       phba->cfg_oas_flags &= ~OAS_LUN_VALID;
+       phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
+       return count;
+}
+static DEVICE_ATTR(lpfc_xlane_vpt, S_IRUGO | S_IWUSR,
+                  lpfc_oas_vpt_show, lpfc_oas_vpt_store);
+
+/**
+ * lpfc_oas_lun_state_show - Return the current state (enabled or disabled)
+ *                         of whether luns will be enabled or disabled
+ *                         for Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * Returns:
+ * size of formatted string.
+ **/
+static ssize_t
+lpfc_oas_lun_state_show(struct device *dev, struct device_attribute *attr,
+                       char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state);
+}
+
+/**
+ * lpfc_oas_lun_state_store - Store the state (enabled or disabled)
+ *                         of whether luns will be enabled or disabled
+ *                         for Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ * @count: Size of the data buffer.
+ *
+ * Returns:
+ * -EINVAL count is invalid, invalid wwpn byte invalid
+ * -EPERM oas is not supported by hba
+ * value of count on success
+ **/
+static ssize_t
+lpfc_oas_lun_state_store(struct device *dev, struct device_attribute *attr,
+                        const char *buf, size_t count)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+       int val = 0;
+
+       if (!phba->cfg_EnableXLane)
+               return -EPERM;
+
+       if (!isdigit(buf[0]))
+               return -EINVAL;
+
+       if (sscanf(buf, "%i", &val) != 1)
+               return -EINVAL;
+
+       if ((val != 0) && (val != 1))
+               return -EINVAL;
+
+       phba->cfg_oas_lun_state = val;
+
+       return strlen(buf);
+}
+static DEVICE_ATTR(lpfc_xlane_lun_state, S_IRUGO | S_IWUSR,
+                  lpfc_oas_lun_state_show, lpfc_oas_lun_state_store);
+
+/**
+ * lpfc_oas_lun_status_show - Return the status of the Optimized Access
+ *                          Storage (OAS) lun returned by the
+ *                          lpfc_oas_lun_show function.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * Returns:
+ * size of formatted string.
+ **/
+static ssize_t
+lpfc_oas_lun_status_show(struct device *dev, struct device_attribute *attr,
+                        char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+       if (!(phba->cfg_oas_flags & OAS_LUN_VALID))
+               return -EFAULT;
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status);
+}
+static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO,
+                  lpfc_oas_lun_status_show, NULL);
+
+
+/**
+ * lpfc_oas_lun_state_set - enable or disable a lun for Optimized Access Storage
+ *                        (OAS) operations.
+ * @phba: lpfc_hba pointer.
+ * @ndlp: pointer to fcp target node.
+ * @lun: the fc lun for setting oas state.
+ * @oas_state: the oas state to be set to the lun.
+ *
+ * Returns:
+ * SUCCESS : 0
+ * -EPERM OAS is not enabled or not supported by this port.
+ *
+ */
+static size_t
+lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
+                      uint8_t tgt_wwpn[], uint64_t lun, uint32_t oas_state)
+{
+
+       int rc = 0;
+
+       if (!phba->cfg_EnableXLane)
+               return -EPERM;
+
+       if (oas_state) {
+               if (!lpfc_enable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
+                                        (struct lpfc_name *)tgt_wwpn, lun))
+                       rc = -ENOMEM;
+       } else {
+               lpfc_disable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
+                                    (struct lpfc_name *)tgt_wwpn, lun);
+       }
+       return rc;
+
+}
+
+/**
+ * lpfc_oas_lun_get_next - get the next lun that has been enabled for Optimized
+ *                       Access Storage (OAS) operations.
+ * @phba: lpfc_hba pointer.
+ * @vpt_wwpn: wwpn of the vport associated with the returned lun
+ * @tgt_wwpn: wwpn of the target associated with the returned lun
+ * @lun_status: status of the lun returned lun
+ *
+ * Returns the first or next lun enabled for OAS operations for the vport/target
+ * specified.  If a lun is found, its vport wwpn, target wwpn and status is
+ * returned.  If the lun is not found, NOT_OAS_ENABLED_LUN is returned.
+ *
+ * Return:
+ * lun that is OAS enabled for the vport/target
+ * NOT_OAS_ENABLED_LUN when no oas enabled lun found.
+ */
+static uint64_t
+lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
+                     uint8_t tgt_wwpn[], uint32_t *lun_status)
+{
+       uint64_t found_lun;
+
+       if (unlikely(!phba) || !vpt_wwpn || !tgt_wwpn)
+               return NOT_OAS_ENABLED_LUN;
+       if (lpfc_find_next_oas_lun(phba, (struct lpfc_name *)
+                                  phba->sli4_hba.oas_next_vpt_wwpn,
+                                  (struct lpfc_name *)
+                                  phba->sli4_hba.oas_next_tgt_wwpn,
+                                  &phba->sli4_hba.oas_next_lun,
+                                  (struct lpfc_name *)vpt_wwpn,
+                                  (struct lpfc_name *)tgt_wwpn,
+                                  &found_lun, lun_status))
+               return found_lun;
+       else
+               return NOT_OAS_ENABLED_LUN;
+}
+
+/**
+ * lpfc_oas_lun_state_change - enable/disable a lun for OAS operations
+ * @phba: lpfc_hba pointer.
+ * @vpt_wwpn: vport wwpn by reference.
+ * @tgt_wwpn: target wwpn by reference.
+ * @lun: the fc lun for setting oas state.
+ * @oas_state: the oas state to be set to the oas_lun.
+ *
+ * This routine enables (OAS_LUN_ENABLE) or disables (OAS_LUN_DISABLE)
+ * a lun for OAS operations.
+ *
+ * Return:
+ * SUCCESS: 0
+ * -ENOMEM: failed to enable an lun for OAS operations
+ * -EPERM: OAS is not enabled
+ */
+static ssize_t
+lpfc_oas_lun_state_change(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
+                         uint8_t tgt_wwpn[], uint64_t lun,
+                         uint32_t oas_state)
+{
+
+       int rc;
+
+       rc = lpfc_oas_lun_state_set(phba, vpt_wwpn, tgt_wwpn, lun,
+                                       oas_state);
+       return rc;
+}
+
+/**
+ * lpfc_oas_lun_show - Return oas enabled luns from a chosen target
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * This routine returns a lun enabled for OAS each time the function
+ * is called.
+ *
+ * Returns:
+ * SUCCESS: size of formatted string.
+ * -EFAULT: target or vport wwpn was not set properly.
+ * -EPERM: oas is not enabled.
+ **/
+static ssize_t
+lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr,
+                 char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+       uint64_t oas_lun;
+       int len = 0;
+
+       if (!phba->cfg_EnableXLane)
+               return -EPERM;
+
+       if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
+               if (!(phba->cfg_oas_flags & OAS_FIND_ANY_VPORT))
+                       return -EFAULT;
+
+       if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
+               if (!(phba->cfg_oas_flags & OAS_FIND_ANY_TARGET))
+                       return -EFAULT;
+
+       oas_lun = lpfc_oas_lun_get_next(phba, phba->cfg_oas_vpt_wwpn,
+                                       phba->cfg_oas_tgt_wwpn,
+                                       &phba->cfg_oas_lun_status);
+       if (oas_lun != NOT_OAS_ENABLED_LUN)
+               phba->cfg_oas_flags |= OAS_LUN_VALID;
+
+       len += snprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun);
+
+       return len;
+}
+
+/**
+ * lpfc_oas_lun_store - Sets the OAS state for lun
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * This function sets the OAS state for lun.  Before this function is called,
+ * the vport wwpn, target wwpn, and oas state need to be set.
+ *
+ * Returns:
+ * SUCCESS: size of formatted string.
+ * -EFAULT: target or vport wwpn was not set properly.
+ * -EPERM: oas is not enabled.
+ * size of formatted string.
+ **/
+static ssize_t
+lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr,
+                  const char *buf, size_t count)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+       uint64_t scsi_lun;
+       ssize_t rc;
+
+       if (!phba->cfg_EnableXLane)
+               return -EPERM;
+
+       if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
+               return -EFAULT;
+
+       if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
+               return -EFAULT;
+
+       if (!isdigit(buf[0]))
+               return -EINVAL;
+
+       if (sscanf(buf, "0x%llx", &scsi_lun) != 1)
+               return -EINVAL;
+
+       lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                       "3372 Try to set vport 0x%llx target 0x%llx lun:%lld "
+                       "with oas set to %d\n",
+                       wwn_to_u64(phba->cfg_oas_vpt_wwpn),
+                       wwn_to_u64(phba->cfg_oas_tgt_wwpn), scsi_lun,
+                       phba->cfg_oas_lun_state);
+
+       rc = lpfc_oas_lun_state_change(phba, phba->cfg_oas_vpt_wwpn,
+                                          phba->cfg_oas_tgt_wwpn, scsi_lun,
+                                          phba->cfg_oas_lun_state);
+
+       if (rc)
+               return rc;
+
+       return count;
+}
+static DEVICE_ATTR(lpfc_xlane_lun, S_IRUGO | S_IWUSR,
+                  lpfc_oas_lun_show, lpfc_oas_lun_store);
 
 static int lpfc_poll = 0;
 module_param(lpfc_poll, int, S_IRUGO);
@@ -4157,6 +4627,21 @@ LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver.");
 LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat.");
 
 /*
+# lpfc_EnableXLane: Enable Express Lane Feature
+#      0x0   Express Lane Feature disabled
+#      0x1   Express Lane Feature enabled
+# Value range is [0,1]. Default value is 0.
+*/
+LPFC_ATTR_R(EnableXLane, 0, 0, 1, "Enable Express Lane Feature.");
+
+/*
+# lpfc_XLanePriority:  Define CS_CTL priority for Express Lane Feature
+#       0x0 - 0x7f  = CS_CTL field in FC header (high 7 bits)
+# Value range is [0x0,0x7f]. Default value is 0
+*/
+LPFC_ATTR_R(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature.");
+
+/*
 # lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF)
 #       0  = BlockGuard disabled (default)
 #       1  = BlockGuard enabled
@@ -4317,6 +4802,13 @@ struct device_attribute *lpfc_hba_attrs[] = {
        &dev_attr_lpfc_soft_wwn_enable,
        &dev_attr_lpfc_enable_hba_reset,
        &dev_attr_lpfc_enable_hba_heartbeat,
+       &dev_attr_lpfc_EnableXLane,
+       &dev_attr_lpfc_XLanePriority,
+       &dev_attr_lpfc_xlane_lun,
+       &dev_attr_lpfc_xlane_tgt,
+       &dev_attr_lpfc_xlane_vpt,
+       &dev_attr_lpfc_xlane_lun_state,
+       &dev_attr_lpfc_xlane_lun_status,
        &dev_attr_lpfc_sg_seg_cnt,
        &dev_attr_lpfc_max_scsicmpl_time,
        &dev_attr_lpfc_stat_data_ctrl,
@@ -4335,6 +4827,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
        &dev_attr_lpfc_dss,
        &dev_attr_lpfc_sriov_hw_max_virtfn,
        &dev_attr_protocol,
+       &dev_attr_lpfc_xlane_supported,
        NULL,
 };
 
@@ -5296,11 +5789,20 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
        lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
        lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
        lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
+       lpfc_EnableXLane_init(phba, lpfc_EnableXLane);
+       if (phba->sli_rev != LPFC_SLI_REV4)
+               phba->cfg_EnableXLane = 0;
+       lpfc_XLanePriority_init(phba, lpfc_XLanePriority);
+       memset(phba->cfg_oas_tgt_wwpn, 0, (8 * sizeof(uint8_t)));
+       memset(phba->cfg_oas_vpt_wwpn, 0, (8 * sizeof(uint8_t)));
+       phba->cfg_oas_lun_state = 0;
+       phba->cfg_oas_lun_status = 0;
+       phba->cfg_oas_flags = 0;
        lpfc_enable_bg_init(phba, lpfc_enable_bg);
        if (phba->sli_rev == LPFC_SLI_REV4)
                phba->cfg_poll = 0;
        else
-       phba->cfg_poll = lpfc_poll;
+               phba->cfg_poll = lpfc_poll;
        phba->cfg_soft_wwnn = 0L;
        phba->cfg_soft_wwpn = 0L;
        lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
index 0909451..4d5ee77 100644 (file)
@@ -187,6 +187,11 @@ void lpfc_offline_prep(struct lpfc_hba *, int);
 void lpfc_offline(struct lpfc_hba *);
 void lpfc_reset_hba(struct lpfc_hba *);
 
+int lpfc_fof_queue_create(struct lpfc_hba *);
+int lpfc_fof_queue_setup(struct lpfc_hba *);
+int lpfc_fof_queue_destroy(struct lpfc_hba *);
+irqreturn_t lpfc_sli4_fof_intr_handler(int, void *);
+
 int lpfc_sli_setup(struct lpfc_hba *);
 int lpfc_sli_queue_setup(struct lpfc_hba *);
 
@@ -472,3 +477,20 @@ void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *);
 uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *);
 int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t);
 void lpfc_sli4_offline_eratt(struct lpfc_hba *);
+
+struct lpfc_device_data *lpfc_create_device_data(struct lpfc_hba *,
+                                               struct lpfc_name *,
+                                               struct lpfc_name *,
+                                               uint64_t, bool);
+void lpfc_delete_device_data(struct lpfc_hba *, struct lpfc_device_data*);
+struct lpfc_device_data *__lpfc_get_device_data(struct lpfc_hba *,
+                                       struct list_head *list,
+                                       struct lpfc_name *,
+                                       struct lpfc_name *, uint64_t);
+bool lpfc_enable_oas_lun(struct lpfc_hba *, struct lpfc_name *,
+                        struct lpfc_name *, uint64_t);
+bool lpfc_disable_oas_lun(struct lpfc_hba *, struct lpfc_name *,
+                         struct lpfc_name *, uint64_t);
+bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *,
+                           struct lpfc_name *, uint64_t *, struct lpfc_name *,
+                           struct lpfc_name *, uint64_t *, uint32_t *);
index c3c3fbe..828c08e 100644 (file)
@@ -2280,6 +2280,104 @@ proc_cq:
                }
        }
 
+       if (phba->cfg_fof) {
+               /* FOF EQ */
+               qp = phba->sli4_hba.fof_eq;
+               if (!qp)
+                       goto out;
+
+               len += snprintf(pbuffer+len,
+                       LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                       "\nFOF EQ info: "
+                       "EQ-STAT[max:x%x noE:x%x "
+                       "bs:x%x proc:x%llx]\n",
+                       qp->q_cnt_1, qp->q_cnt_2,
+                       qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
+
+               len += snprintf(pbuffer+len,
+                       LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                       "EQID[%02d], "
+                       "QE-CNT[%04d], QE-SIZE[%04d], "
+                       "HOST-IDX[%04d], PORT-IDX[%04d]",
+                       qp->queue_id,
+                       qp->entry_count,
+                       qp->entry_size,
+                       qp->host_index,
+                       qp->hba_index);
+
+               /* Reset max counter */
+               qp->EQ_max_eqe = 0;
+
+               len +=  snprintf(pbuffer+len,
+                       LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+               if (len >= max_cnt)
+                       goto too_big;
+       }
+
+       if (phba->cfg_EnableXLane) {
+
+               /* OAS CQ */
+               qp = phba->sli4_hba.oas_cq;
+               if (qp) {
+                       len += snprintf(pbuffer+len,
+                               LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                               "\tOAS CQ info: ");
+                       len += snprintf(pbuffer+len,
+                               LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                               "AssocEQID[%02d]: "
+                               "CQ STAT[max:x%x relw:x%x "
+                               "xabt:x%x wq:x%llx]\n",
+                               qp->assoc_qid,
+                               qp->q_cnt_1, qp->q_cnt_2,
+                               qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
+                       len += snprintf(pbuffer+len,
+                               LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                               "\tCQID[%02d], "
+                               "QE-CNT[%04d], QE-SIZE[%04d], "
+                               "HOST-IDX[%04d], PORT-IDX[%04d]",
+                               qp->queue_id, qp->entry_count,
+                               qp->entry_size, qp->host_index,
+                               qp->hba_index);
+
+                       /* Reset max counter */
+                       qp->CQ_max_cqe = 0;
+
+                       len +=  snprintf(pbuffer+len,
+                               LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+                       if (len >= max_cnt)
+                               goto too_big;
+               }
+
+               /* OAS WQ */
+               qp = phba->sli4_hba.oas_wq;
+               if (qp) {
+                       len += snprintf(pbuffer+len,
+                               LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                               "\t\tOAS WQ info: ");
+                       len += snprintf(pbuffer+len,
+                               LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                               "AssocCQID[%02d]: "
+                               "WQ-STAT[oflow:x%x posted:x%llx]\n",
+                               qp->assoc_qid,
+                               qp->q_cnt_1, (unsigned long long)qp->q_cnt_4);
+                       len += snprintf(pbuffer+len,
+                               LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                               "\t\tWQID[%02d], "
+                               "QE-CNT[%04d], QE-SIZE[%04d], "
+                               "HOST-IDX[%04d], PORT-IDX[%04d]",
+                               qp->queue_id,
+                               qp->entry_count,
+                               qp->entry_size,
+                               qp->host_index,
+                               qp->hba_index);
+
+                       len +=  snprintf(pbuffer+len,
+                               LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+                       if (len >= max_cnt)
+                               goto too_big;
+               }
+       }
+out:
        spin_unlock_irq(&phba->hbalock);
        return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
 
index 6f927d3..3d9438c 100644 (file)
@@ -45,6 +45,7 @@
 #define LPFC_EXTRA_RING          1     /* ring 1 for other protocols */
 #define LPFC_ELS_RING            2     /* ring 2 for ELS commands */
 #define LPFC_FCP_NEXT_RING       3
+#define LPFC_FCP_OAS_RING        3
 
 #define SLI2_IOCB_CMD_R0_ENTRIES    172        /* SLI-2 FCP command ring entries */
 #define SLI2_IOCB_RSP_R0_ENTRIES    134        /* SLI-2 FCP response ring entries */
index 5464b11..fd79f7d 100644 (file)
@@ -2616,6 +2616,9 @@ struct lpfc_sli4_parameters {
 #define cfg_phwq_SHIFT                         15
 #define cfg_phwq_MASK                          0x00000001
 #define cfg_phwq_WORD                          word12
+#define cfg_oas_SHIFT                          25
+#define cfg_oas_MASK                           0x00000001
+#define cfg_oas_WORD                           word12
 #define cfg_loopbk_scope_SHIFT                 28
 #define cfg_loopbk_scope_MASK                  0x0000000f
 #define cfg_loopbk_scope_WORD                  word12
@@ -3322,6 +3325,9 @@ struct wqe_common {
 #define wqe_ebde_cnt_SHIFT    0
 #define wqe_ebde_cnt_MASK     0x0000000f
 #define wqe_ebde_cnt_WORD     word10
+#define wqe_oas_SHIFT         6
+#define wqe_oas_MASK          0x00000001
+#define wqe_oas_WORD          word10
 #define wqe_lenloc_SHIFT      7
 #define wqe_lenloc_MASK       0x00000003
 #define wqe_lenloc_WORD       word10
index aa29ea0..157ad1c 100644 (file)
@@ -80,6 +80,7 @@ static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
 static void lpfc_sli4_disable_intr(struct lpfc_hba *);
 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
+static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
 
 static struct scsi_transport_template *lpfc_transport_template = NULL;
 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
@@ -4856,6 +4857,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
        uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
        struct lpfc_mqe *mqe;
        int longs;
+       int fof_vectors = 0;
 
        /* Get all the module params for configuring this host */
        lpfc_get_cfgparam(phba);
@@ -5121,6 +5123,12 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                }
        }
        mempool_free(mboxq, phba->mbox_mem_pool);
+
+       /* Verify OAS is supported */
+       lpfc_sli4_oas_verify(phba);
+       if (phba->cfg_fof)
+               fof_vectors = 1;
+
        /* Verify all the SLI4 queues */
        rc = lpfc_sli4_queue_verify(phba);
        if (rc)
@@ -5162,7 +5170,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 
        phba->sli4_hba.fcp_eq_hdl =
                        kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
-                           phba->cfg_fcp_io_channel), GFP_KERNEL);
+                           (fof_vectors + phba->cfg_fcp_io_channel)),
+                           GFP_KERNEL);
        if (!phba->sli4_hba.fcp_eq_hdl) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "2572 Failed allocate memory for "
@@ -5172,7 +5181,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
        }
 
        phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
-                                     phba->cfg_fcp_io_channel), GFP_KERNEL);
+                                 (fof_vectors +
+                                  phba->cfg_fcp_io_channel)), GFP_KERNEL);
        if (!phba->sli4_hba.msix_entries) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "2573 Failed allocate memory for msi-x "
@@ -5393,6 +5403,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
        /* Initialize FCF connection rec list */
        INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
 
+       /* Initialize OAS configuration list */
+       spin_lock_init(&phba->devicelock);
+       INIT_LIST_HEAD(&phba->luns);
+
        return 0;
 }
 
@@ -6819,6 +6833,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
        int cfg_fcp_io_channel;
        uint32_t cpu;
        uint32_t i = 0;
+       int fof_vectors = phba->cfg_fof ? 1 : 0;
 
        /*
         * Sanity check for configured queue parameters against the run-time
@@ -6845,7 +6860,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
                cfg_fcp_io_channel = i;
        }
 
-       if (cfg_fcp_io_channel >
+       if (cfg_fcp_io_channel + fof_vectors >
            phba->sli4_hba.max_cfg_param.max_eq) {
                if (phba->sli4_hba.max_cfg_param.max_eq <
                    LPFC_FCP_IO_CHAN_MIN) {
@@ -6862,7 +6877,8 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
                                "available EQs: from %d to %d\n",
                                cfg_fcp_io_channel,
                                phba->sli4_hba.max_cfg_param.max_eq);
-               cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq;
+               cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq -
+                       fof_vectors;
        }
 
        /* The actual number of FCP event queues adopted */
@@ -7073,6 +7089,9 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
        }
        phba->sli4_hba.dat_rq = qdesc;
 
+       /* Create the Queues needed for Flash Optimized Fabric operations */
+       if (phba->cfg_fof)
+               lpfc_fof_queue_create(phba);
        return 0;
 
 out_error:
@@ -7097,6 +7116,9 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
 {
        int idx;
 
+       if (phba->cfg_fof)
+               lpfc_fof_queue_destroy(phba);
+
        if (phba->sli4_hba.hba_eq != NULL) {
                /* Release HBA event queue */
                for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
@@ -7481,8 +7503,20 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                        phba->sli4_hba.hdr_rq->queue_id,
                        phba->sli4_hba.dat_rq->queue_id,
                        phba->sli4_hba.els_cq->queue_id);
+
+       if (phba->cfg_fof) {
+               rc = lpfc_fof_queue_setup(phba);
+               if (rc) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "0549 Failed setup of FOF Queues: "
+                                       "rc = 0x%x\n", rc);
+                       goto out_destroy_els_rq;
+               }
+       }
        return 0;
 
+out_destroy_els_rq:
+       lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
 out_destroy_els_wq:
        lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
 out_destroy_mbx_wq:
@@ -7521,6 +7555,9 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
 {
        int fcp_qidx;
 
+       /* Unset the queues created for Flash Optimized Fabric operations */
+       if (phba->cfg_fof)
+               lpfc_fof_queue_destroy(phba);
        /* Unset mailbox command work queue */
        lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
        /* Unset ELS work queue */
@@ -8638,6 +8675,10 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
 
        /* Configure MSI-X capability structure */
        vectors = phba->cfg_fcp_io_channel;
+       if (phba->cfg_fof) {
+               phba->sli4_hba.msix_entries[index].entry = index;
+               vectors++;
+       }
 enable_msix_vectors:
        rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
                             vectors);
@@ -8667,7 +8708,15 @@ enable_msix_vectors:
                phba->sli4_hba.fcp_eq_hdl[index].idx = index;
                phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
                atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1);
-               rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
+               if (phba->cfg_fof && (index == (vectors - 1)))
+                       rc = request_irq(
+                               phba->sli4_hba.msix_entries[index].vector,
+                                &lpfc_sli4_fof_intr_handler, IRQF_SHARED,
+                                (char *)&phba->sli4_hba.handler_name[index],
+                                &phba->sli4_hba.fcp_eq_hdl[index]);
+               else
+                       rc = request_irq(
+                               phba->sli4_hba.msix_entries[index].vector,
                                 &lpfc_sli4_hba_intr_handler, IRQF_SHARED,
                                 (char *)&phba->sli4_hba.handler_name[index],
                                 &phba->sli4_hba.fcp_eq_hdl[index]);
@@ -8679,6 +8728,9 @@ enable_msix_vectors:
                }
        }
 
+       if (phba->cfg_fof)
+               vectors--;
+
        if (vectors != phba->cfg_fcp_io_channel) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "3238 Reducing IO channels to match number of "
@@ -8724,7 +8776,10 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba)
                free_irq(phba->sli4_hba.msix_entries[index].vector,
                         &phba->sli4_hba.fcp_eq_hdl[index]);
        }
-
+       if (phba->cfg_fof) {
+               free_irq(phba->sli4_hba.msix_entries[index].vector,
+                        &phba->sli4_hba.fcp_eq_hdl[index]);
+       }
        /* Disable MSI-X */
        pci_disable_msix(phba->pcidev);
 
@@ -8774,6 +8829,10 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
                phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
        }
 
+       if (phba->cfg_fof) {
+               phba->sli4_hba.fcp_eq_hdl[index].idx = index;
+               phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
+       }
        return 0;
 }
 
@@ -8856,6 +8915,12 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
                                atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
                                        fcp_eq_in_use, 1);
                        }
+                       if (phba->cfg_fof) {
+                               phba->sli4_hba.fcp_eq_hdl[index].idx = index;
+                               phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
+                               atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
+                                       fcp_eq_in_use, 1);
+                       }
                }
        }
        return intr_mode;
@@ -9166,6 +9231,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
                phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
        sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
        sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
+       sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
        sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
        sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
        sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
@@ -10799,6 +10865,169 @@ lpfc_io_resume(struct pci_dev *pdev)
        return;
 }
 
+/**
+ * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine checks to see if OAS is supported for this adapter. If
+ * supported, the configure Flash Optimized Fabric flag is set.  Otherwise,
+ * the enable oas flag is cleared and the pool created for OAS device data
+ * is destroyed.
+ *
+ **/
+void
+lpfc_sli4_oas_verify(struct lpfc_hba *phba)
+{
+
+       if (!phba->cfg_EnableXLane)
+               return;
+
+       if (phba->sli4_hba.pc_sli4_params.oas_supported) {
+               phba->cfg_fof = 1;
+       } else {
+               phba->cfg_EnableXLane = 0;
+               if (phba->device_data_mem_pool)
+                       mempool_destroy(phba->device_data_mem_pool);
+               phba->device_data_mem_pool = NULL;
+       }
+
+       return;
+}
+
+/**
+ * lpfc_fof_queue_setup - Set up all the fof queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up all the fof queues for the FC HBA
+ * operation.
+ *
+ * Return codes
+ *      0 - successful
+ *      -ENOMEM - No available memory
+ **/
+int
+lpfc_fof_queue_setup(struct lpfc_hba *phba)
+{
+       struct lpfc_sli *psli = &phba->sli;
+       int rc;
+
+       rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX);
+       if (rc)
+               return -ENOMEM;
+
+       if (phba->cfg_EnableXLane) {
+
+               rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq,
+                                   phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP);
+               if (rc)
+                       goto out_oas_cq;
+
+               rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq,
+                                   phba->sli4_hba.oas_cq, LPFC_FCP);
+               if (rc)
+                       goto out_oas_wq;
+
+               phba->sli4_hba.oas_cq->pring = &psli->ring[LPFC_FCP_OAS_RING];
+               phba->sli4_hba.oas_ring = &psli->ring[LPFC_FCP_OAS_RING];
+       }
+
+       return 0;
+
+out_oas_wq:
+       if (phba->cfg_EnableXLane)
+               lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq);
+out_oas_cq:
+       lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq);
+       return rc;
+
+}
+
+/**
+ * lpfc_fof_queue_create - Create all the fof queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate all the fof queues for the FC HBA
+ * operation. For each SLI4 queue type, the parameters such as queue entry
+ * count (queue depth) shall be taken from the module parameter. For now,
+ * we just use some constant number as place holder.
+ *
+ * Return codes
+ *      0 - successful
+ *      -ENOMEM - No availble memory
+ *      -EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_fof_queue_create(struct lpfc_hba *phba)
+{
+       struct lpfc_queue *qdesc;
+
+       /* Create FOF EQ */
+       qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+                                     phba->sli4_hba.eq_ecount);
+       if (!qdesc)
+               goto out_error;
+
+       phba->sli4_hba.fof_eq = qdesc;
+
+       if (phba->cfg_EnableXLane) {
+
+               /* Create OAS CQ */
+               qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+                                                     phba->sli4_hba.cq_ecount);
+               if (!qdesc)
+                       goto out_error;
+
+               phba->sli4_hba.oas_cq = qdesc;
+
+               /* Create OAS WQ */
+               qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+                                             phba->sli4_hba.wq_ecount);
+               if (!qdesc)
+                       goto out_error;
+
+               phba->sli4_hba.oas_wq = qdesc;
+
+       }
+       return 0;
+
+out_error:
+       lpfc_fof_queue_destroy(phba);
+       return -ENOMEM;
+}
+
+/**
+ * lpfc_fof_queue_destroy - Destroy all the fof queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release all the SLI4 queues with the FC HBA
+ * operation.
+ *
+ * Return codes
+ *      0 - successful
+ **/
+int
+lpfc_fof_queue_destroy(struct lpfc_hba *phba)
+{
+       /* Release FOF Event queue */
+       if (phba->sli4_hba.fof_eq != NULL) {
+               lpfc_sli4_queue_free(phba->sli4_hba.fof_eq);
+               phba->sli4_hba.fof_eq = NULL;
+       }
+
+       /* Release OAS Completion queue */
+       if (phba->sli4_hba.oas_cq != NULL) {
+               lpfc_sli4_queue_free(phba->sli4_hba.oas_cq);
+               phba->sli4_hba.oas_cq = NULL;
+       }
+
+       /* Release OAS Work queue */
+       if (phba->sli4_hba.oas_wq != NULL) {
+               lpfc_sli4_queue_free(phba->sli4_hba.oas_wq);
+               phba->sli4_hba.oas_wq = NULL;
+       }
+       return 0;
+}
+
 static struct pci_device_id lpfc_id_table[] = {
        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
                PCI_ANY_ID, PCI_ANY_ID, },
index b1db23c..ed419aa 100644 (file)
@@ -42,6 +42,7 @@
 
 #define LPFC_MBUF_POOL_SIZE     64      /* max elements in MBUF safety pool */
 #define LPFC_MEM_POOL_SIZE      64      /* max elem in non-DMA safety pool */
+#define LPFC_DEVICE_DATA_POOL_SIZE 64   /* max elements in device data pool */
 
 int
 lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) {
@@ -164,6 +165,16 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
                phba->lpfc_drb_pool = NULL;
        }
 
+       if (phba->cfg_EnableXLane) {
+               phba->device_data_mem_pool = mempool_create_kmalloc_pool(
+                                       LPFC_DEVICE_DATA_POOL_SIZE,
+                                       sizeof(struct lpfc_device_data));
+               if (!phba->device_data_mem_pool)
+                       goto fail_free_hrb_pool;
+       } else {
+               phba->device_data_mem_pool = NULL;
+       }
+
        return 0;
  fail_free_hrb_pool:
        pci_pool_destroy(phba->lpfc_hrb_pool);
@@ -206,6 +217,7 @@ lpfc_mem_free(struct lpfc_hba *phba)
 {
        int i;
        struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
+       struct lpfc_device_data *device_data;
 
        /* Free HBQ pools */
        lpfc_sli_hbqbuf_free_all(phba);
@@ -249,6 +261,19 @@ lpfc_mem_free(struct lpfc_hba *phba)
        pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
        phba->lpfc_scsi_dma_buf_pool = NULL;
 
+       /* Free Device Data memory pool */
+       if (phba->device_data_mem_pool) {
+               /* Ensure all objects have been returned to the pool */
+               while (!list_empty(&phba->luns)) {
+                       device_data = list_first_entry(&phba->luns,
+                                                      struct lpfc_device_data,
+                                                      listentry);
+                       list_del(&device_data->listentry);
+                       mempool_free(device_data, phba->device_data_mem_pool);
+               }
+               mempool_destroy(phba->device_data_mem_pool);
+       }
+       phba->device_data_mem_pool = NULL;
        return;
 }
 
index 0b08188..4015fcc 100644 (file)
@@ -68,6 +68,17 @@ struct scsi_dif_tuple {
        __be32 ref_tag;         /* Target LBA or indirect LBA */
 };
 
+static struct lpfc_rport_data *
+lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
+{
+       struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
+
+       if (vport->phba->cfg_EnableXLane)
+               return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
+       else
+               return (struct lpfc_rport_data *)sdev->hostdata;
+}
+
 static void
 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
 static void
@@ -306,7 +317,7 @@ lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
        old_queue_depth = sdev->queue_depth;
        scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
        new_queue_depth = sdev->queue_depth;
-       rdata = sdev->hostdata;
+       rdata = lpfc_rport_data_from_scsi_device(sdev);
        if (rdata)
                lpfc_send_sdev_queuedepth_change_event(phba, vport,
                                                       rdata->pnode, sdev->lun,
@@ -1502,7 +1513,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
        }
 
        /* Next check if we need to match the remote NPortID or WWPN */
-       rdata = sc->device->hostdata;
+       rdata = lpfc_rport_data_from_scsi_device(sc->device);
        if (rdata && rdata->pnode) {
                ndlp = rdata->pnode;
 
@@ -3507,6 +3518,14 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
         * we need to set word 4 of IOCB here
         */
        iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
+
+       /*
+        * If the OAS driver feature is enabled and the lun is enabled for
+        * OAS, set the oas iocb related flags.
+        */
+       if ((phba->cfg_EnableXLane) && ((struct lpfc_device_data *)
+               scsi_cmnd->device->hostdata)->oas_enabled)
+               lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_OAS;
        return 0;
 }
 
@@ -4691,12 +4710,13 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
 {
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
        struct lpfc_hba   *phba = vport->phba;
-       struct lpfc_rport_data *rdata = cmnd->device->hostdata;
+       struct lpfc_rport_data *rdata;
        struct lpfc_nodelist *ndlp;
        struct lpfc_scsi_buf *lpfc_cmd;
        struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
        int err;
 
+       rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
        err = fc_remote_port_chkready(rport);
        if (err) {
                cmnd->result = err;
@@ -5179,10 +5199,11 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
 static int
 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
 {
-       struct lpfc_rport_data *rdata = cmnd->device->hostdata;
+       struct lpfc_rport_data *rdata;
        struct lpfc_nodelist *pnode;
        unsigned long later;
 
+       rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
        if (!rdata) {
                lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
                        "0797 Tgt Map rport failure: rdata x%p\n", rdata);
@@ -5200,7 +5221,7 @@ lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
                if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
                        return SUCCESS;
                schedule_timeout_uninterruptible(msecs_to_jiffies(500));
-               rdata = cmnd->device->hostdata;
+               rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
                if (!rdata)
                        return FAILED;
                pnode = rdata->pnode;
@@ -5272,13 +5293,14 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
 {
        struct Scsi_Host  *shost = cmnd->device->host;
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
-       struct lpfc_rport_data *rdata = cmnd->device->hostdata;
+       struct lpfc_rport_data *rdata;
        struct lpfc_nodelist *pnode;
        unsigned tgt_id = cmnd->device->id;
        unsigned int lun_id = cmnd->device->lun;
        struct lpfc_scsi_event_header scsi_event;
        int status;
 
+       rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
        if (!rdata) {
                lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
                        "0798 Device Reset rport failure: rdata x%p\n", rdata);
@@ -5341,13 +5363,14 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
 {
        struct Scsi_Host  *shost = cmnd->device->host;
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
-       struct lpfc_rport_data *rdata = cmnd->device->hostdata;
+       struct lpfc_rport_data *rdata;
        struct lpfc_nodelist *pnode;
        unsigned tgt_id = cmnd->device->id;
        unsigned int lun_id = cmnd->device->lun;
        struct lpfc_scsi_event_header scsi_event;
        int status;
 
+       rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
        if (!rdata) {
                lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
                        "0799 Target Reset rport failure: rdata x%p\n", rdata);
@@ -5547,11 +5570,45 @@ lpfc_slave_alloc(struct scsi_device *sdev)
        uint32_t num_to_alloc = 0;
        int num_allocated = 0;
        uint32_t sdev_cnt;
+       struct lpfc_device_data *device_data;
+       unsigned long flags;
+       struct lpfc_name target_wwpn;
 
        if (!rport || fc_remote_port_chkready(rport))
                return -ENXIO;
 
-       sdev->hostdata = rport->dd_data;
+       if (phba->cfg_EnableXLane) {
+
+               /*
+                * Check to see if the device data structure for the lun
+                * exists.  If not, create one.
+                */
+
+               u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
+               spin_lock_irqsave(&phba->devicelock, flags);
+               device_data = __lpfc_get_device_data(phba,
+                                                    &phba->luns,
+                                                    &vport->fc_portname,
+                                                    &target_wwpn,
+                                                    sdev->lun);
+               if (!device_data) {
+                       spin_unlock_irqrestore(&phba->devicelock, flags);
+                       device_data = lpfc_create_device_data(phba,
+                                                       &vport->fc_portname,
+                                                       &target_wwpn,
+                                                       sdev->lun, true);
+                       if (!device_data)
+                               return -ENOMEM;
+                       spin_lock_irqsave(&phba->devicelock, flags);
+                       list_add_tail(&device_data->listentry, &phba->luns);
+               }
+               device_data->rport_data = rport->dd_data;
+               device_data->available = true;
+               spin_unlock_irqrestore(&phba->devicelock, flags);
+               sdev->hostdata = device_data;
+       } else {
+               sdev->hostdata = rport->dd_data;
+       }
        sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
 
        /*
@@ -5641,11 +5698,344 @@ lpfc_slave_destroy(struct scsi_device *sdev)
 {
        struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
        struct lpfc_hba   *phba = vport->phba;
+       unsigned long flags;
+       struct lpfc_device_data *device_data = sdev->hostdata;
+
        atomic_dec(&phba->sdev_cnt);
+       if ((phba->cfg_EnableXLane) && (device_data)) {
+               spin_lock_irqsave(&phba->devicelock, flags);
+               device_data->available = false;
+               if (!device_data->oas_enabled)
+                       lpfc_delete_device_data(phba, device_data);
+               spin_unlock_irqrestore(&phba->devicelock, flags);
+       }
        sdev->hostdata = NULL;
        return;
 }
 
+/**
+ * lpfc_create_device_data - creates and initializes device data structure for OAS
+ * @pha: Pointer to host bus adapter structure.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @lun: Lun on target
+ * @atomic_create: Flag to indicate if memory should be allocated using the
+ *               GFP_ATOMIC flag or not.
+ *
+ * This routine creates a device data structure which will contain identifying
+ * information for the device (host wwpn, target wwpn, lun), state of OAS,
+ * whether or not the corresponding lun is available by the system,
+ * and pointer to the rport data.
+ *
+ * Return codes:
+ *   NULL - Error
+ *   Pointer to lpfc_device_data - Success
+ **/
+struct lpfc_device_data*
+lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
+                       struct lpfc_name *target_wwpn, uint64_t lun,
+                       bool atomic_create)
+{
+
+       struct lpfc_device_data *lun_info;
+       int memory_flags;
+
+       if (unlikely(!phba) || !vport_wwpn || !target_wwpn  ||
+           !(phba->cfg_EnableXLane))
+               return NULL;
+
+       /* Attempt to create the device data to contain lun info */
+
+       if (atomic_create)
+               memory_flags = GFP_ATOMIC;
+       else
+               memory_flags = GFP_KERNEL;
+       lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
+       if (!lun_info)
+               return NULL;
+       INIT_LIST_HEAD(&lun_info->listentry);
+       lun_info->rport_data  = NULL;
+       memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
+              sizeof(struct lpfc_name));
+       memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
+              sizeof(struct lpfc_name));
+       lun_info->device_id.lun = lun;
+       lun_info->oas_enabled = false;
+       lun_info->available = false;
+       return lun_info;
+}
+
+/**
+ * lpfc_delete_device_data - frees a device data structure for OAS
+ * @pha: Pointer to host bus adapter structure.
+ * @lun_info: Pointer to device data structure to free.
+ *
+ * This routine frees the previously allocated device data structure passed.
+ *
+ **/
+void
+lpfc_delete_device_data(struct lpfc_hba *phba,
+                       struct lpfc_device_data *lun_info)
+{
+
+       if (unlikely(!phba) || !lun_info  ||
+           !(phba->cfg_EnableXLane))
+               return;
+
+       if (!list_empty(&lun_info->listentry))
+               list_del(&lun_info->listentry);
+       mempool_free(lun_info, phba->device_data_mem_pool);
+       return;
+}
+
+/**
+ * __lpfc_get_device_data - returns the device data for the specified lun
+ * @pha: Pointer to host bus adapter structure.
+ * @list: Point to list to search.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @lun: Lun on target
+ *
+ * This routine searches the list passed for the specified lun's device data.
+ * This function does not hold locks, it is the responsibility of the caller
+ * to ensure the proper lock is held before calling the function.
+ *
+ * Return codes:
+ *   NULL - Error
+ *   Pointer to lpfc_device_data - Success
+ **/
+struct lpfc_device_data*
+__lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
+                      struct lpfc_name *vport_wwpn,
+                      struct lpfc_name *target_wwpn, uint64_t lun)
+{
+
+       struct lpfc_device_data *lun_info;
+
+       if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
+           !phba->cfg_EnableXLane)
+               return NULL;
+
+       /* Check to see if the lun is already enabled for OAS. */
+
+       list_for_each_entry(lun_info, list, listentry) {
+               if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
+                           sizeof(struct lpfc_name)) == 0) &&
+                   (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
+                           sizeof(struct lpfc_name)) == 0) &&
+                   (lun_info->device_id.lun == lun))
+                       return lun_info;
+       }
+
+       return NULL;
+}
+
+/**
+ * lpfc_find_next_oas_lun - searches for the next oas lun
+ * @pha: Pointer to host bus adapter structure.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @starting_lun: Pointer to the lun to start searching for
+ * @found_vport_wwpn: Pointer to the found lun's vport wwpn information
+ * @found_target_wwpn: Pointer to the found lun's target wwpn information
+ * @found_lun: Pointer to the found lun.
+ * @found_lun_status: Pointer to status of the found lun.
+ *
+ * This routine searches the luns list for the specified lun
+ * or the first lun for the vport/target.  If the vport wwpn contains
+ * a zero value then a specific vport is not specified. In this case
+ * any vport which contains the lun will be considered a match.  If the
+ * target wwpn contains a zero value then a specific target is not specified.
+ * In this case any target which contains the lun will be considered a
+ * match.  If the lun is found, the lun, vport wwpn, target wwpn and lun status
+ * are returned.  The function will also return the next lun if available.
+ * If the next lun is not found, starting_lun parameter will be set to
+ * NO_MORE_OAS_LUN.
+ *
+ * Return codes:
+ *   non-0 - Error
+ *   0 - Success
+ **/
+bool
+lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
+                      struct lpfc_name *target_wwpn, uint64_t *starting_lun,
+                      struct lpfc_name *found_vport_wwpn,
+                      struct lpfc_name *found_target_wwpn,
+                      uint64_t *found_lun,
+                      uint32_t *found_lun_status)
+{
+
+       unsigned long flags;
+       struct lpfc_device_data *lun_info;
+       struct lpfc_device_id *device_id;
+       uint64_t lun;
+       bool found = false;
+
+       if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
+           !starting_lun || !found_vport_wwpn ||
+           !found_target_wwpn || !found_lun || !found_lun_status ||
+           (*starting_lun == NO_MORE_OAS_LUN) ||
+           !phba->cfg_EnableXLane)
+               return false;
+
+       lun = *starting_lun;
+       *found_lun = NO_MORE_OAS_LUN;
+       *starting_lun = NO_MORE_OAS_LUN;
+
+       /* Search for lun or the lun closet in value */
+
+       spin_lock_irqsave(&phba->devicelock, flags);
+       list_for_each_entry(lun_info, &phba->luns, listentry) {
+               if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
+                    (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
+                           sizeof(struct lpfc_name)) == 0)) &&
+                   ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
+                    (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
+                           sizeof(struct lpfc_name)) == 0)) &&
+                   (lun_info->oas_enabled)) {
+                       device_id = &lun_info->device_id;
+                       if ((!found) &&
+                           ((lun == FIND_FIRST_OAS_LUN) ||
+                            (device_id->lun == lun))) {
+                               *found_lun = device_id->lun;
+                               memcpy(found_vport_wwpn,
+                                      &device_id->vport_wwpn,
+                                      sizeof(struct lpfc_name));
+                               memcpy(found_target_wwpn,
+                                      &device_id->target_wwpn,
+                                      sizeof(struct lpfc_name));
+                               if (lun_info->available)
+                                       *found_lun_status =
+                                               OAS_LUN_STATUS_EXISTS;
+                               else
+                                       *found_lun_status = 0;
+                               if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
+                                       memset(vport_wwpn, 0x0,
+                                              sizeof(struct lpfc_name));
+                               if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
+                                       memset(target_wwpn, 0x0,
+                                              sizeof(struct lpfc_name));
+                               found = true;
+                       } else if (found) {
+                               *starting_lun = device_id->lun;
+                               memcpy(vport_wwpn, &device_id->vport_wwpn,
+                                      sizeof(struct lpfc_name));
+                               memcpy(target_wwpn, &device_id->target_wwpn,
+                                      sizeof(struct lpfc_name));
+                               break;
+                       }
+               }
+       }
+       spin_unlock_irqrestore(&phba->devicelock, flags);
+       return found;
+}
+
+/**
+ * lpfc_enable_oas_lun - enables a lun for OAS operations
+ * @pha: Pointer to host bus adapter structure.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @lun: Lun
+ *
+ * This routine enables a lun for oas operations.  The routines does so by
+ * doing the following :
+ *
+ *   1) Checks to see if the device data for the lun has been created.
+ *   2) If found, sets the OAS enabled flag if not set and returns.
+ *   3) Otherwise, creates a device data structure.
+ *   4) If successfully created, indicates the device data is for an OAS lun,
+ *   indicates the lun is not available and add to the list of luns.
+ *
+ * Return codes:
+ *   false - Error
+ *   true - Success
+ **/
+bool
+lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
+                   struct lpfc_name *target_wwpn, uint64_t lun)
+{
+
+       struct lpfc_device_data *lun_info;
+       unsigned long flags;
+
+       if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
+           !phba->cfg_EnableXLane)
+               return false;
+
+       spin_lock_irqsave(&phba->devicelock, flags);
+
+       /* Check to see if the device data for the lun has been created */
+       lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
+                                         target_wwpn, lun);
+       if (lun_info) {
+               if (!lun_info->oas_enabled)
+                       lun_info->oas_enabled = true;
+               spin_unlock_irqrestore(&phba->devicelock, flags);
+               return true;
+       }
+
+       /* Create an lun info structure and add to list of luns */
+       lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
+                                          false);
+       if (lun_info) {
+               lun_info->oas_enabled = true;
+               lun_info->available = false;
+               list_add_tail(&lun_info->listentry, &phba->luns);
+               spin_unlock_irqrestore(&phba->devicelock, flags);
+               return true;
+       }
+       spin_unlock_irqrestore(&phba->devicelock, flags);
+       return false;
+}
+
+/**
+ * lpfc_disable_oas_lun - disables a lun for OAS operations
+ * @pha: Pointer to host bus adapter structure.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @lun: Lun
+ *
+ * This routine disables a lun for oas operations.  The routines does so by
+ * doing the following :
+ *
+ *   1) Checks to see if the device data for the lun is created.
+ *   2) If present, clears the flag indicating this lun is for OAS.
+ *   3) If the lun is not available by the system, the device data is
+ *   freed.
+ *
+ * Return codes:
+ *   false - Error
+ *   true - Success
+ **/
+bool
+lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
+                    struct lpfc_name *target_wwpn, uint64_t lun)
+{
+
+       struct lpfc_device_data *lun_info;
+       unsigned long flags;
+
+       if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
+           !phba->cfg_EnableXLane)
+               return false;
+
+       spin_lock_irqsave(&phba->devicelock, flags);
+
+       /* Check to see if the lun is available. */
+       lun_info = __lpfc_get_device_data(phba,
+                                         &phba->luns, vport_wwpn,
+                                         target_wwpn, lun);
+       if (lun_info) {
+               lun_info->oas_enabled = false;
+               if (!lun_info->available)
+                       lpfc_delete_device_data(phba, lun_info);
+               spin_unlock_irqrestore(&phba->devicelock, flags);
+               return true;
+       }
+
+       spin_unlock_irqrestore(&phba->devicelock, flags);
+       return false;
+}
 
 struct scsi_host_template lpfc_template = {
        .module                 = THIS_MODULE,
index 852ff7d..0120bfc 100644 (file)
@@ -41,6 +41,20 @@ struct lpfc_rport_data {
        struct lpfc_nodelist *pnode;    /* Pointer to the node structure. */
 };
 
+struct lpfc_device_id {
+       struct lpfc_name vport_wwpn;
+       struct lpfc_name target_wwpn;
+       uint64_t lun;
+};
+
+struct lpfc_device_data {
+       struct list_head listentry;
+       struct lpfc_rport_data *rport_data;
+       struct lpfc_device_id device_id;
+       bool oas_enabled;
+       bool available;
+};
+
 struct fcp_rsp {
        uint32_t rspRsvd1;      /* FC Word 0, byte 0:3 */
        uint32_t rspRsvd2;      /* FC Word 1, byte 0:3 */
@@ -166,3 +180,7 @@ struct lpfc_scsi_buf {
 #define LPFC_SCSI_DMA_EXT_SIZE 264
 #define LPFC_BPL_SIZE          1024
 #define MDAC_DIRECT_CMD                  0x22
+
+#define FIND_FIRST_OAS_LUN              0
+#define NO_MORE_OAS_LUN                        -1
+#define NOT_OAS_ENABLED_LUN            NO_MORE_OAS_LUN
index c7181d8..38e56d9 100644 (file)
@@ -4976,12 +4976,19 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
                                             LPFC_QUEUE_REARM);
                } while (++fcp_eqidx < phba->cfg_fcp_io_channel);
        }
+
+       if (phba->cfg_EnableXLane)
+               lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM);
+
        if (phba->sli4_hba.hba_eq) {
                for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
                     fcp_eqidx++)
                        lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx],
                                             LPFC_QUEUE_REARM);
        }
+
+       if (phba->cfg_fof)
+               lpfc_sli4_eq_release(phba->sli4_hba.fof_eq, LPFC_QUEUE_REARM);
 }
 
 /**
@@ -8256,6 +8263,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
                bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
                bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
+               if (iocbq->iocb_flag & LPFC_IO_OAS) {
+                       bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
+                       if (phba->cfg_XLanePriority) {
+                               bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
+                               bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
+                                      (phba->cfg_XLanePriority << 1));
+                       }
+               }
                break;
        case CMD_FCP_IREAD64_CR:
                /* word3 iocb=iotag wqe=payload_offset_len */
@@ -8277,6 +8292,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
                bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
                bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
+               if (iocbq->iocb_flag & LPFC_IO_OAS) {
+                       bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
+                       if (phba->cfg_XLanePriority) {
+                               bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
+                               bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
+                                      (phba->cfg_XLanePriority << 1));
+                       }
+               }
                break;
        case CMD_FCP_ICMND64_CR:
                /* word3 iocb=iotag wqe=payload_offset_len */
@@ -8297,6 +8320,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
                bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
                       iocbq->iocb.ulpFCP2Rcvy);
+               if (iocbq->iocb_flag & LPFC_IO_OAS) {
+                       bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
+                       if (phba->cfg_XLanePriority) {
+                               bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
+                               bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
+                                      (phba->cfg_XLanePriority << 1));
+                       }
+               }
                break;
        case CMD_GEN_REQUEST64_CR:
                /* For this command calculate the xmit length of the
@@ -8529,6 +8560,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
 {
        struct lpfc_sglq *sglq;
        union lpfc_wqe wqe;
+       struct lpfc_queue *wq;
        struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
 
        if (piocb->sli4_xritag == NO_XRI) {
@@ -8581,11 +8613,14 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
                return IOCB_ERROR;
 
        if ((piocb->iocb_flag & LPFC_IO_FCP) ||
-               (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
-               if (unlikely(!phba->sli4_hba.fcp_wq))
-                       return IOCB_ERROR;
-               if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
-                                    &wqe))
+           (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
+               if (!phba->cfg_EnableXLane || (!(piocb->iocb_flag &
+                       LPFC_IO_OAS))) {
+                       wq = phba->sli4_hba.fcp_wq[piocb->fcp_wqidx];
+               } else {
+                       wq = phba->sli4_hba.oas_wq;
+               }
+               if (lpfc_sli4_wq_put(wq, &wqe))
                        return IOCB_ERROR;
        } else {
                if (unlikely(!phba->sli4_hba.els_wq))
@@ -8675,12 +8710,20 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
 
        if (phba->sli_rev == LPFC_SLI_REV4) {
                if (piocb->iocb_flag &  LPFC_IO_FCP) {
-                       if (unlikely(!phba->sli4_hba.fcp_wq))
-                               return IOCB_ERROR;
-                       idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
-                       piocb->fcp_wqidx = idx;
-                       ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
-
+                       if (!phba->cfg_EnableXLane || (!(piocb->iocb_flag &
+                               LPFC_IO_OAS))) {
+                               if (unlikely(!phba->sli4_hba.fcp_wq))
+                                       return IOCB_ERROR;
+                               idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
+                               piocb->fcp_wqidx = idx;
+                               ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
+                       } else {
+                               if (unlikely(!phba->sli4_hba.oas_wq))
+                                       return IOCB_ERROR;
+                               idx = 0;
+                               piocb->fcp_wqidx = 0;
+                               ring_number =  LPFC_FCP_OAS_RING;
+                       }
                        pring = &phba->sli.ring[ring_number];
                        spin_lock_irqsave(&pring->ring_lock, iflags);
                        rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
@@ -12138,6 +12181,175 @@ lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
        lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
 }
 
+
+/**
+ * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue
+ *                          entry
+ * @phba: Pointer to HBA context object.
+ * @eqe: Pointer to fast-path event queue entry.
+ *
+ * This routine process a event queue entry from the Flash Optimized Fabric
+ * event queue.  It will check the MajorCode and MinorCode to determine this
+ * is for a completion event on a completion queue, if not, an error shall be
+ * logged and just return. Otherwise, it will get to the corresponding
+ * completion queue and process all the entries on the completion queue, rearm
+ * the completion queue, and then return.
+ **/
+static void
+lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
+{
+       struct lpfc_queue *cq;
+       struct lpfc_cqe *cqe;
+       bool workposted = false;
+       uint16_t cqid;
+       int ecount = 0;
+
+       if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "9147 Not a valid completion "
+                               "event: majorcode=x%x, minorcode=x%x\n",
+                               bf_get_le32(lpfc_eqe_major_code, eqe),
+                               bf_get_le32(lpfc_eqe_minor_code, eqe));
+               return;
+       }
+
+       /* Get the reference to the corresponding CQ */
+       cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
+
+       /* Next check for OAS */
+       cq = phba->sli4_hba.oas_cq;
+       if (unlikely(!cq)) {
+               if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
+                       lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                                       "9148 OAS completion queue "
+                                       "does not exist\n");
+               return;
+       }
+
+       if (unlikely(cqid != cq->queue_id)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "9149 Miss-matched fast-path compl "
+                               "queue id: eqcqid=%d, fcpcqid=%d\n",
+                               cqid, cq->queue_id);
+               return;
+       }
+
+       /* Process all the entries to the OAS CQ */
+       while ((cqe = lpfc_sli4_cq_get(cq))) {
+               workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
+               if (!(++ecount % cq->entry_repost))
+                       lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+       }
+
+       /* Track the max number of CQEs processed in 1 EQ */
+       if (ecount > cq->CQ_max_cqe)
+               cq->CQ_max_cqe = ecount;
+
+       /* Catch the no cq entry condition */
+       if (unlikely(ecount == 0))
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "9153 No entry from fast-path completion "
+                               "queue fcpcqid=%d\n", cq->queue_id);
+
+       /* In any case, flash and re-arm the CQ */
+       lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
+
+       /* wake up worker thread if there are works to be done */
+       if (workposted)
+               lpfc_worker_wake_up(phba);
+}
+
+/**
+ * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is directly called from the PCI layer as an interrupt
+ * service routine when device with SLI-4 interface spec is enabled with
+ * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric
+ * IOCB ring event in the HBA. However, when the device is enabled with either
+ * MSI or Pin-IRQ interrupt mode, this function is called as part of the
+ * device-level interrupt handler. When the PCI slot is in error recovery
+ * or the HBA is undergoing initialization, the interrupt handler will not
+ * process the interrupt. The Flash Optimized Fabric ring event are handled in
+ * the intrrupt context. This function is called without any lock held.
+ * It gets the hbalock to access and update SLI data structures. Note that,
+ * the EQ to CQ are one-to-one map such that the EQ index is
+ * equal to that of CQ index.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
+{
+       struct lpfc_hba *phba;
+       struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
+       struct lpfc_queue *eq;
+       struct lpfc_eqe *eqe;
+       unsigned long iflag;
+       int ecount = 0;
+       uint32_t eqidx;
+
+       /* Get the driver's phba structure from the dev_id */
+       fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
+       phba = fcp_eq_hdl->phba;
+       eqidx = fcp_eq_hdl->idx;
+
+       if (unlikely(!phba))
+               return IRQ_NONE;
+
+       /* Get to the EQ struct associated with this vector */
+       eq = phba->sli4_hba.fof_eq;
+       if (unlikely(!eq))
+               return IRQ_NONE;
+
+       /* Check device state for handling interrupt */
+       if (unlikely(lpfc_intr_state_check(phba))) {
+               eq->EQ_badstate++;
+               /* Check again for link_state with lock held */
+               spin_lock_irqsave(&phba->hbalock, iflag);
+               if (phba->link_state < LPFC_LINK_DOWN)
+                       /* Flush, clear interrupt, and rearm the EQ */
+                       lpfc_sli4_eq_flush(phba, eq);
+               spin_unlock_irqrestore(&phba->hbalock, iflag);
+               return IRQ_NONE;
+       }
+
+       /*
+        * Process all the event on FCP fast-path EQ
+        */
+       while ((eqe = lpfc_sli4_eq_get(eq))) {
+               lpfc_sli4_fof_handle_eqe(phba, eqe);
+               if (!(++ecount % eq->entry_repost))
+                       lpfc_sli4_eq_release(eq, LPFC_QUEUE_NOARM);
+               eq->EQ_processed++;
+       }
+
+       /* Track the max number of EQEs processed in 1 intr */
+       if (ecount > eq->EQ_max_eqe)
+               eq->EQ_max_eqe = ecount;
+
+
+       if (unlikely(ecount == 0)) {
+               eq->EQ_no_entry++;
+
+               if (phba->intr_type == MSIX)
+                       /* MSI-X treated interrupt served as no EQ share INT */
+                       lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+                                       "9145 MSI-X interrupt with no EQE\n");
+               else {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                                       "9146 ISR interrupt with no EQE\n");
+                       /* Non MSI-X treated on interrupt as EQ share INT */
+                       return IRQ_NONE;
+               }
+       }
+       /* Always clear and re-arm the fast-path EQ */
+       lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
+       return IRQ_HANDLED;
+}
+
 /**
  * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
  * @irq: Interrupt number.
@@ -12293,6 +12505,13 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
                        hba_handled |= true;
        }
 
+       if (phba->cfg_fof) {
+               hba_irq_rc = lpfc_sli4_fof_intr_handler(irq,
+                                       &phba->sli4_hba.fcp_eq_hdl[0]);
+               if (hba_irq_rc == IRQ_HANDLED)
+                       hba_handled |= true;
+       }
+
        return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
 } /* lpfc_sli4_intr_handler */
 
index 6b0f247..6f04080 100644 (file)
@@ -78,6 +78,8 @@ struct lpfc_iocbq {
 #define LPFC_FIP_ELS_ID_MASK   0xc000  /* ELS_ID range 0-3, non-shifted mask */
 #define LPFC_FIP_ELS_ID_SHIFT  14
 
+#define LPFC_IO_OAS            0x10000 /* OAS FCP IO */
+
        uint32_t drvrTimeout;   /* driver timeout in seconds */
        uint32_t fcp_wqidx;     /* index to FCP work queue */
        struct lpfc_vport *vport;/* virtual port pointer */
index 298c8cd..e432590 100644 (file)
 #define LPFC_FCP_IO_CHAN_MIN       1
 #define LPFC_FCP_IO_CHAN_MAX       16
 
+/* Number of channels used for Flash Optimized Fabric (FOF) operations */
+
+#define LPFC_FOF_IO_CHAN_NUM       1
+
 /*
  * Provide the default FCF Record attributes used by the driver
  * when nonFIP mode is configured and there is no other default
@@ -399,6 +403,7 @@ struct lpfc_pc_sli4_params {
        uint32_t if_page_sz;
        uint32_t rq_db_window;
        uint32_t loopbk_scope;
+       uint32_t oas_supported;
        uint32_t eq_pages_max;
        uint32_t eqe_size;
        uint32_t cq_pages_max;
@@ -439,6 +444,8 @@ struct lpfc_sli4_lnk_info {
        uint8_t lnk_no;
 };
 
+#define LPFC_SLI4_HANDLER_CNT          (LPFC_FCP_IO_CHAN_MAX+ \
+                                        LPFC_FOF_IO_CHAN_NUM)
 #define LPFC_SLI4_HANDLER_NAME_SZ      16
 
 /* Used for IRQ vector to CPU mapping */
@@ -507,7 +514,7 @@ struct lpfc_sli4_hba {
        struct lpfc_register sli_intf;
        struct lpfc_pc_sli4_params pc_sli4_params;
        struct msix_entry *msix_entries;
-       uint8_t handler_name[LPFC_FCP_IO_CHAN_MAX][LPFC_SLI4_HANDLER_NAME_SZ];
+       uint8_t handler_name[LPFC_SLI4_HANDLER_CNT][LPFC_SLI4_HANDLER_NAME_SZ];
        struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
 
        /* Pointers to the constructed SLI4 queues */
@@ -527,6 +534,17 @@ struct lpfc_sli4_hba {
        uint32_t ulp0_mode;     /* ULP0 protocol mode */
        uint32_t ulp1_mode;     /* ULP1 protocol mode */
 
+       struct lpfc_queue *fof_eq; /* Flash Optimized Fabric Event queue */
+
+       /* Optimized Access Storage specific queues/structures */
+
+       struct lpfc_queue *oas_cq; /* OAS completion queue */
+       struct lpfc_queue *oas_wq; /* OAS Work queue */
+       struct lpfc_sli_ring *oas_ring;
+       uint64_t oas_next_lun;
+       uint8_t oas_next_tgt_wwpn[8];
+       uint8_t oas_next_vpt_wwpn[8];
+
        /* Setup information for various queue parameters */
        int eq_esize;
        int eq_ecount;