Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 26 May 2022 02:09:48 +0000 (19:09 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 26 May 2022 02:09:48 +0000 (19:09 -0700)
Pull SCSI updates from James Bottomley:
 "This consists of a small set of driver updates (lpfc, ufs, mpt3sas
  mpi3mr, iscsi target). Apart from that this is mostly small fixes with
  very few core changes (the biggest one being VPD caching)"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (177 commits)
  scsi: target: tcmu: Avoid holding XArray lock when calling lock_page
  scsi: elx: efct: Remove NULL check after calling container_of()
  scsi: dpt_i2o: Drop redundant spinlock initialization
  scsi: qedf: Remove redundant variable op
  scsi: hisi_sas: Fix memory ordering in hisi_sas_task_deliver()
  scsi: fnic: Replace DMA mask of 64 bits with 47 bits
  scsi: mpi3mr: Add target device related sysfs attributes
  scsi: mpi3mr: Add shost related sysfs attributes
  scsi: elx: efct: Remove redundant memset() statement
  scsi: megaraid_sas: Remove redundant memset() statement
  scsi: mpi3mr: Return error if dma_alloc_coherent() fails
  scsi: hisi_sas: Fix rescan after deleting a disk
  scsi: hisi_sas: Use sas_ata_wait_after_reset() in IT nexus reset
  scsi: libsas: Refactor sas_ata_hard_reset()
  scsi: mpt3sas: Update driver version to 42.100.00.00
  scsi: mpt3sas: Fix junk chars displayed while printing ChipName
  scsi: ipr: Use kobj_to_dev()
  scsi: mpi3mr: Fix a NULL vs IS_ERR() bug in mpi3mr_bsg_init()
  scsi: bnx2fc: Avoid using get_cpu() in bnx2fc_cmd_alloc()
  scsi: libfc: Remove get_cpu() semantics in fc_exch_em_alloc()
  ...

169 files changed:
Documentation/ABI/testing/sysfs-driver-ufs
drivers/infiniband/ulp/isert/ib_isert.c
drivers/infiniband/ulp/isert/ib_isert.h
drivers/message/fusion/mptctl.c
drivers/scsi/aacraid/aacraid.h
drivers/scsi/aha1542.c
drivers/scsi/bfa/bfad_debugfs.c
drivers/scsi/bfa/bfad_im.c
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/bnx2fc/bnx2fc_io.c
drivers/scsi/dc395x.c
drivers/scsi/dpt_i2o.c
drivers/scsi/elx/efct/efct_hw.c
drivers/scsi/elx/efct/efct_io.c
drivers/scsi/elx/efct/efct_lio.c
drivers/scsi/fcoe/fcoe.c
drivers/scsi/fcoe/fcoe_ctlr.c
drivers/scsi/fcoe/fcoe_transport.c
drivers/scsi/fnic/fnic.h
drivers/scsi/fnic/fnic_debugfs.c
drivers/scsi/fnic/fnic_main.c
drivers/scsi/hisi_sas/hisi_sas_main.c
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
drivers/scsi/hosts.c
drivers/scsi/ipr.c
drivers/scsi/libfc/fc_exch.c
drivers/scsi/libfc/fc_fcp.c
drivers/scsi/libfc/fc_lport.c
drivers/scsi/libsas/sas_ata.c
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_bsg.c
drivers/scsi/lpfc/lpfc_crtn.h
drivers/scsi/lpfc/lpfc_ct.c
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_hbadisc.c
drivers/scsi/lpfc/lpfc_hw.h
drivers/scsi/lpfc/lpfc_hw4.h
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_logmsg.h
drivers/scsi/lpfc/lpfc_mbox.c
drivers/scsi/lpfc/lpfc_nportdisc.c
drivers/scsi/lpfc/lpfc_nvme.c
drivers/scsi/lpfc/lpfc_nvmet.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/lpfc/lpfc_sli.h
drivers/scsi/lpfc/lpfc_sli4.h
drivers/scsi/lpfc/lpfc_version.h
drivers/scsi/lpfc/lpfc_vport.c
drivers/scsi/mac53c94.c
drivers/scsi/megaraid.c
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/megaraid/megaraid_sas_fusion.c
drivers/scsi/mpi3mr/Kconfig
drivers/scsi/mpi3mr/Makefile
drivers/scsi/mpi3mr/mpi/mpi30_init.h
drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
drivers/scsi/mpi3mr/mpi/mpi30_pci.h
drivers/scsi/mpi3mr/mpi3mr.h
drivers/scsi/mpi3mr/mpi3mr_app.c [new file with mode: 0644]
drivers/scsi/mpi3mr/mpi3mr_debug.h
drivers/scsi/mpi3mr/mpi3mr_fw.c
drivers/scsi/mpi3mr/mpi3mr_os.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/mpt3sas/mpt3sas_base.h
drivers/scsi/mpt3sas/mpt3sas_ctl.c
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/pmcraid.c
drivers/scsi/qedf/qedf_attr.c
drivers/scsi/qedf/qedf_io.c
drivers/scsi/qedf/qedf_main.c
drivers/scsi/qla2xxx/qla_edif.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/qla2xxx/qla_target.h
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/scsi.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/sd.c
drivers/scsi/sd.h
drivers/scsi/sd_dif.c
drivers/scsi/sd_zbc.c
drivers/scsi/sr.c
drivers/scsi/ufs/Kconfig
drivers/scsi/ufs/cdns-pltfrm.c
drivers/scsi/ufs/tc-dwc-g210-pci.c
drivers/scsi/ufs/tc-dwc-g210-pltfrm.c
drivers/scsi/ufs/tc-dwc-g210.c
drivers/scsi/ufs/tc-dwc-g210.h
drivers/scsi/ufs/ti-j721e-ufs.c
drivers/scsi/ufs/ufs-debugfs.c
drivers/scsi/ufs/ufs-exynos.c
drivers/scsi/ufs/ufs-exynos.h
drivers/scsi/ufs/ufs-hisi.c
drivers/scsi/ufs/ufs-hwmon.c
drivers/scsi/ufs/ufs-mediatek.c
drivers/scsi/ufs/ufs-qcom-ice.c
drivers/scsi/ufs/ufs-qcom.c
drivers/scsi/ufs/ufs-qcom.h
drivers/scsi/ufs/ufs-sysfs.c
drivers/scsi/ufs/ufs-sysfs.h
drivers/scsi/ufs/ufs.h
drivers/scsi/ufs/ufs_bsg.c
drivers/scsi/ufs/ufs_bsg.h
drivers/scsi/ufs/ufs_quirks.h
drivers/scsi/ufs/ufshcd-crypto.h
drivers/scsi/ufs/ufshcd-dwc.c
drivers/scsi/ufs/ufshcd-dwc.h
drivers/scsi/ufs/ufshcd-pci.c
drivers/scsi/ufs/ufshcd-pltfrm.c
drivers/scsi/ufs/ufshcd-priv.h [new file with mode: 0644]
drivers/scsi/ufs/ufshcd.c
drivers/scsi/ufs/ufshcd.h
drivers/scsi/ufs/ufshci.h
drivers/scsi/ufs/ufshpb.c
drivers/scsi/ufs/ufshpb.h
drivers/scsi/ufs/unipro.h
drivers/scsi/vmw_pvscsi.c
drivers/target/iscsi/cxgbit/cxgbit.h
drivers/target/iscsi/cxgbit/cxgbit_cm.c
drivers/target/iscsi/cxgbit/cxgbit_ddp.c
drivers/target/iscsi/cxgbit/cxgbit_main.c
drivers/target/iscsi/cxgbit/cxgbit_target.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target.h
drivers/target/iscsi/iscsi_target_auth.c
drivers/target/iscsi/iscsi_target_auth.h
drivers/target/iscsi/iscsi_target_configfs.c
drivers/target/iscsi/iscsi_target_datain_values.c
drivers/target/iscsi/iscsi_target_datain_values.h
drivers/target/iscsi/iscsi_target_device.c
drivers/target/iscsi/iscsi_target_device.h
drivers/target/iscsi/iscsi_target_erl0.c
drivers/target/iscsi/iscsi_target_erl0.h
drivers/target/iscsi/iscsi_target_erl1.c
drivers/target/iscsi/iscsi_target_erl1.h
drivers/target/iscsi/iscsi_target_erl2.c
drivers/target/iscsi/iscsi_target_erl2.h
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_login.h
drivers/target/iscsi/iscsi_target_nego.c
drivers/target/iscsi/iscsi_target_nego.h
drivers/target/iscsi/iscsi_target_nodeattrib.c
drivers/target/iscsi/iscsi_target_parameters.c
drivers/target/iscsi/iscsi_target_parameters.h
drivers/target/iscsi/iscsi_target_seq_pdu_list.c
drivers/target/iscsi/iscsi_target_seq_pdu_list.h
drivers/target/iscsi/iscsi_target_stat.c
drivers/target/iscsi/iscsi_target_tmr.c
drivers/target/iscsi/iscsi_target_tmr.h
drivers/target/iscsi/iscsi_target_tpg.c
drivers/target/iscsi/iscsi_target_tpg.h
drivers/target/iscsi/iscsi_target_util.c
drivers/target/iscsi/iscsi_target_util.h
drivers/target/target_core_configfs.c
drivers/target/target_core_spc.c
drivers/target/target_core_user.c
include/scsi/libfcoe.h
include/scsi/libiscsi.h
include/scsi/sas_ata.h
include/scsi/scsi_device.h
include/scsi/scsi_proto.h
include/target/iscsi/iscsi_target_core.h
include/target/iscsi/iscsi_transport.h
include/uapi/scsi/scsi_bsg_mpi3mr.h [new file with mode: 0644]

index a44ef8b..6b248ab 100644 (file)
@@ -1518,7 +1518,7 @@ Description:      This entry shows the number of reads that cannot be changed to
 
                The file is read only.
 
-What:          /sys/class/scsi_device/*/device/hpb_stats/rb_noti_cnt
+What:          /sys/class/scsi_device/*/device/hpb_stats/rcmd_noti_cnt
 Date:          June 2021
 Contact:       Daejun Park <daejun7.park@samsung.com>
 Description:   This entry shows the number of response UPIUs that has
@@ -1526,19 +1526,23 @@ Description:    This entry shows the number of response UPIUs that has
 
                The file is read only.
 
-What:          /sys/class/scsi_device/*/device/hpb_stats/rb_active_cnt
+What:          /sys/class/scsi_device/*/device/hpb_stats/rcmd_active_cnt
 Date:          June 2021
 Contact:       Daejun Park <daejun7.park@samsung.com>
-Description:   This entry shows the number of active sub-regions recommended by
-               response UPIUs.
+Description:   For the HPB device control mode, this entry shows the number of
+        active sub-regions recommended by response UPIUs. For the HPB host control
+        mode, this entry shows the number of active sub-regions recommended by the
+        HPB host control mode heuristic algorithm.
 
                The file is read only.
 
-What:          /sys/class/scsi_device/*/device/hpb_stats/rb_inactive_cnt
+What:          /sys/class/scsi_device/*/device/hpb_stats/rcmd_inactive_cnt
 Date:          June 2021
 Contact:       Daejun Park <daejun7.park@samsung.com>
-Description:   This entry shows the number of inactive regions recommended by
-               response UPIUs.
+Description:   For the HPB device control mode, this entry shows the number of
+        inactive regions recommended by response UPIUs. For the HPB host control
+        mode, this entry shows the number of inactive regions recommended by the
+        HPB host control mode heuristic algorithm.
 
                The file is read only.
 
index 636d590..48064bd 100644 (file)
@@ -46,7 +46,7 @@ static struct workqueue_struct *isert_comp_wq;
 static struct workqueue_struct *isert_release_wq;
 
 static int
-isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
+isert_put_response(struct iscsit_conn *conn, struct iscsit_cmd *cmd);
 static int
 isert_login_post_recv(struct isert_conn *isert_conn);
 static int
@@ -909,7 +909,7 @@ isert_login_post_recv(struct isert_conn *isert_conn)
 }
 
 static int
-isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
+isert_put_login_tx(struct iscsit_conn *conn, struct iscsi_login *login,
                   u32 length)
 {
        struct isert_conn *isert_conn = conn->context;
@@ -976,7 +976,7 @@ isert_rx_login_req(struct isert_conn *isert_conn)
 {
        struct iser_rx_desc *rx_desc = isert_conn->login_desc;
        int rx_buflen = isert_conn->login_req_len;
-       struct iscsi_conn *conn = isert_conn->conn;
+       struct iscsit_conn *conn = isert_conn->conn;
        struct iscsi_login *login = conn->conn_login;
        int size;
 
@@ -1020,21 +1020,21 @@ isert_rx_login_req(struct isert_conn *isert_conn)
        schedule_delayed_work(&conn->login_work, 0);
 }
 
-static struct iscsi_cmd
-*isert_allocate_cmd(struct iscsi_conn *conn, struct iser_rx_desc *rx_desc)
+static struct iscsit_cmd
+*isert_allocate_cmd(struct iscsit_conn *conn, struct iser_rx_desc *rx_desc)
 {
        struct isert_conn *isert_conn = conn->context;
        struct isert_cmd *isert_cmd;
-       struct iscsi_cmd *cmd;
+       struct iscsit_cmd *cmd;
 
        cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
        if (!cmd) {
-               isert_err("Unable to allocate iscsi_cmd + isert_cmd\n");
+               isert_err("Unable to allocate iscsit_cmd + isert_cmd\n");
                return NULL;
        }
        isert_cmd = iscsit_priv_cmd(cmd);
        isert_cmd->conn = isert_conn;
-       isert_cmd->iscsi_cmd = cmd;
+       isert_cmd->iscsit_cmd = cmd;
        isert_cmd->rx_desc = rx_desc;
 
        return cmd;
@@ -1042,10 +1042,10 @@ static struct iscsi_cmd
 
 static int
 isert_handle_scsi_cmd(struct isert_conn *isert_conn,
-                     struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd,
+                     struct isert_cmd *isert_cmd, struct iscsit_cmd *cmd,
                      struct iser_rx_desc *rx_desc, unsigned char *buf)
 {
-       struct iscsi_conn *conn = isert_conn->conn;
+       struct iscsit_conn *conn = isert_conn->conn;
        struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
        int imm_data, imm_data_len, unsol_data, sg_nents, rc;
        bool dump_payload = false;
@@ -1114,8 +1114,8 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
                           struct iser_rx_desc *rx_desc, unsigned char *buf)
 {
        struct scatterlist *sg_start;
-       struct iscsi_conn *conn = isert_conn->conn;
-       struct iscsi_cmd *cmd = NULL;
+       struct iscsit_conn *conn = isert_conn->conn;
+       struct iscsit_cmd *cmd = NULL;
        struct iscsi_data *hdr = (struct iscsi_data *)buf;
        u32 unsol_data_len = ntoh24(hdr->dlength);
        int rc, sg_nents, sg_off, page_off;
@@ -1171,10 +1171,10 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
 
 static int
 isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
-                    struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
+                    struct iscsit_cmd *cmd, struct iser_rx_desc *rx_desc,
                     unsigned char *buf)
 {
-       struct iscsi_conn *conn = isert_conn->conn;
+       struct iscsit_conn *conn = isert_conn->conn;
        struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
        int rc;
 
@@ -1190,10 +1190,10 @@ isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
 
 static int
 isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
-                     struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
+                     struct iscsit_cmd *cmd, struct iser_rx_desc *rx_desc,
                      struct iscsi_text *hdr)
 {
-       struct iscsi_conn *conn = isert_conn->conn;
+       struct iscsit_conn *conn = isert_conn->conn;
        u32 payload_length = ntoh24(hdr->dlength);
        int rc;
        unsigned char *text_in = NULL;
@@ -1220,8 +1220,8 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
                uint32_t write_stag, uint64_t write_va)
 {
        struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc);
-       struct iscsi_conn *conn = isert_conn->conn;
-       struct iscsi_cmd *cmd;
+       struct iscsit_conn *conn = isert_conn->conn;
+       struct iscsit_cmd *cmd;
        struct isert_cmd *isert_cmd;
        int ret = -EINVAL;
        u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
@@ -1404,7 +1404,7 @@ isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc)
 static void
 isert_rdma_rw_ctx_destroy(struct isert_cmd *cmd, struct isert_conn *conn)
 {
-       struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd;
+       struct se_cmd *se_cmd = &cmd->iscsit_cmd->se_cmd;
        enum dma_data_direction dir = target_reverse_dma_direction(se_cmd);
 
        if (!cmd->rw.nr_ops)
@@ -1426,9 +1426,9 @@ isert_rdma_rw_ctx_destroy(struct isert_cmd *cmd, struct isert_conn *conn)
 static void
 isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
 {
-       struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
+       struct iscsit_cmd *cmd = isert_cmd->iscsit_cmd;
        struct isert_conn *isert_conn = isert_cmd->conn;
-       struct iscsi_conn *conn = isert_conn->conn;
+       struct iscsit_conn *conn = isert_conn->conn;
        struct iscsi_text_rsp *hdr;
 
        isert_dbg("Cmd %p\n", isert_cmd);
@@ -1575,7 +1575,7 @@ isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
        struct isert_device *device = isert_conn->device;
        struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe);
        struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc);
-       struct se_cmd *cmd = &isert_cmd->iscsi_cmd->se_cmd;
+       struct se_cmd *cmd = &isert_cmd->iscsit_cmd->se_cmd;
        int ret = 0;
 
        if (unlikely(wc->status != IB_WC_SUCCESS)) {
@@ -1604,7 +1604,7 @@ isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
                /*
                 * XXX: isert_put_response() failure is not retried.
                 */
-               ret = isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd);
+               ret = isert_put_response(isert_conn->conn, isert_cmd->iscsit_cmd);
                if (ret)
                        pr_warn_ratelimited("isert_put_response() ret: %d\n", ret);
        }
@@ -1617,7 +1617,7 @@ isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
        struct isert_device *device = isert_conn->device;
        struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe);
        struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc);
-       struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
+       struct iscsit_cmd *cmd = isert_cmd->iscsit_cmd;
        struct se_cmd *se_cmd = &cmd->se_cmd;
        int ret = 0;
 
@@ -1662,7 +1662,7 @@ isert_do_control_comp(struct work_struct *work)
                        struct isert_cmd, comp_work);
        struct isert_conn *isert_conn = isert_cmd->conn;
        struct ib_device *ib_dev = isert_conn->cm_id->device;
-       struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
+       struct iscsit_cmd *cmd = isert_cmd->iscsit_cmd;
 
        isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state);
 
@@ -1720,7 +1720,7 @@ isert_send_done(struct ib_cq *cq, struct ib_wc *wc)
 
        isert_dbg("Cmd %p\n", isert_cmd);
 
-       switch (isert_cmd->iscsi_cmd->i_state) {
+       switch (isert_cmd->iscsit_cmd->i_state) {
        case ISTATE_SEND_TASKMGTRSP:
        case ISTATE_SEND_LOGOUTRSP:
        case ISTATE_SEND_REJECT:
@@ -1731,7 +1731,7 @@ isert_send_done(struct ib_cq *cq, struct ib_wc *wc)
                queue_work(isert_comp_wq, &isert_cmd->comp_work);
                return;
        default:
-               isert_cmd->iscsi_cmd->i_state = ISTATE_SENT_STATUS;
+               isert_cmd->iscsit_cmd->i_state = ISTATE_SENT_STATUS;
                isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
                break;
        }
@@ -1755,7 +1755,7 @@ isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
 }
 
 static int
-isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+isert_put_response(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
 {
        struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
        struct isert_conn *isert_conn = conn->context;
@@ -1806,7 +1806,7 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
 }
 
 static void
-isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+isert_aborted_task(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
 {
        struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
        struct isert_conn *isert_conn = conn->context;
@@ -1822,7 +1822,7 @@ isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
 }
 
 static enum target_prot_op
-isert_get_sup_prot_ops(struct iscsi_conn *conn)
+isert_get_sup_prot_ops(struct iscsit_conn *conn)
 {
        struct isert_conn *isert_conn = conn->context;
        struct isert_device *device = isert_conn->device;
@@ -1842,7 +1842,7 @@ isert_get_sup_prot_ops(struct iscsi_conn *conn)
 }
 
 static int
-isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+isert_put_nopin(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
                bool nopout_response)
 {
        struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
@@ -1862,7 +1862,7 @@ isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
 }
 
 static int
-isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+isert_put_logout_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
 {
        struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
        struct isert_conn *isert_conn = conn->context;
@@ -1880,7 +1880,7 @@ isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
 }
 
 static int
-isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+isert_put_tm_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
 {
        struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
        struct isert_conn *isert_conn = conn->context;
@@ -1898,7 +1898,7 @@ isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
 }
 
 static int
-isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+isert_put_reject(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
 {
        struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
        struct isert_conn *isert_conn = conn->context;
@@ -1933,7 +1933,7 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
 }
 
 static int
-isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+isert_put_text_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
 {
        struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
        struct isert_conn *isert_conn = conn->context;
@@ -2035,7 +2035,7 @@ static int
 isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn,
                struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
 {
-       struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd;
+       struct se_cmd *se_cmd = &cmd->iscsit_cmd->se_cmd;
        enum dma_data_direction dir = target_reverse_dma_direction(se_cmd);
        u8 port_num = conn->cm_id->port_num;
        u64 addr;
@@ -2048,7 +2048,7 @@ isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn,
        if (dir == DMA_FROM_DEVICE) {
                addr = cmd->write_va;
                rkey = cmd->write_stag;
-               offset = cmd->iscsi_cmd->write_data_done;
+               offset = cmd->iscsit_cmd->write_data_done;
        } else {
                addr = cmd->read_va;
                rkey = cmd->read_stag;
@@ -2088,7 +2088,7 @@ rdma_ctx_post:
 }
 
 static int
-isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+isert_put_datain(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
 {
        struct se_cmd *se_cmd = &cmd->se_cmd;
        struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
@@ -2129,7 +2129,7 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
 }
 
 static int
-isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
+isert_get_dataout(struct iscsit_conn *conn, struct iscsit_cmd *cmd, bool recovery)
 {
        struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
        int ret;
@@ -2147,7 +2147,7 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
 }
 
 static int
-isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
+isert_immediate_queue(struct iscsit_conn *conn, struct iscsit_cmd *cmd, int state)
 {
        struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
        int ret = 0;
@@ -2172,7 +2172,7 @@ isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
 }
 
 static int
-isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
+isert_response_queue(struct iscsit_conn *conn, struct iscsit_cmd *cmd, int state)
 {
        struct isert_conn *isert_conn = conn->context;
        int ret;
@@ -2332,7 +2332,7 @@ isert_rdma_accept(struct isert_conn *isert_conn)
 }
 
 static int
-isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
+isert_get_login_rx(struct iscsit_conn *conn, struct iscsi_login *login)
 {
        struct isert_conn *isert_conn = conn->context;
        int ret;
@@ -2368,7 +2368,7 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
 }
 
 static void
-isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
+isert_set_conn_info(struct iscsi_np *np, struct iscsit_conn *conn,
                    struct isert_conn *isert_conn)
 {
        struct rdma_cm_id *cm_id = isert_conn->cm_id;
@@ -2381,7 +2381,7 @@ isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
 }
 
 static int
-isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
+isert_accept_np(struct iscsi_np *np, struct iscsit_conn *conn)
 {
        struct isert_np *isert_np = np->np_context;
        struct isert_conn *isert_conn;
@@ -2489,7 +2489,7 @@ static void isert_release_work(struct work_struct *work)
 static void
 isert_wait4logout(struct isert_conn *isert_conn)
 {
-       struct iscsi_conn *conn = isert_conn->conn;
+       struct iscsit_conn *conn = isert_conn->conn;
 
        isert_info("conn %p\n", isert_conn);
 
@@ -2501,9 +2501,9 @@ isert_wait4logout(struct isert_conn *isert_conn)
 }
 
 static void
-isert_wait4cmds(struct iscsi_conn *conn)
+isert_wait4cmds(struct iscsit_conn *conn)
 {
-       isert_info("iscsi_conn %p\n", conn);
+       isert_info("iscsit_conn %p\n", conn);
 
        if (conn->sess) {
                target_stop_session(conn->sess->se_sess);
@@ -2521,9 +2521,9 @@ isert_wait4cmds(struct iscsi_conn *conn)
  * before blocking on the target_wait_for_session_cmds
  */
 static void
-isert_put_unsol_pending_cmds(struct iscsi_conn *conn)
+isert_put_unsol_pending_cmds(struct iscsit_conn *conn)
 {
-       struct iscsi_cmd *cmd, *tmp;
+       struct iscsit_cmd *cmd, *tmp;
        static LIST_HEAD(drop_cmd_list);
 
        spin_lock_bh(&conn->cmd_lock);
@@ -2546,7 +2546,7 @@ isert_put_unsol_pending_cmds(struct iscsi_conn *conn)
        }
 }
 
-static void isert_wait_conn(struct iscsi_conn *conn)
+static void isert_wait_conn(struct iscsit_conn *conn)
 {
        struct isert_conn *isert_conn = conn->context;
 
@@ -2564,7 +2564,7 @@ static void isert_wait_conn(struct iscsi_conn *conn)
        queue_work(isert_release_wq, &isert_conn->release_work);
 }
 
-static void isert_free_conn(struct iscsi_conn *conn)
+static void isert_free_conn(struct iscsit_conn *conn)
 {
        struct isert_conn *isert_conn = conn->context;
 
@@ -2572,7 +2572,7 @@ static void isert_free_conn(struct iscsi_conn *conn)
        isert_put_conn(isert_conn);
 }
 
-static void isert_get_rx_pdu(struct iscsi_conn *conn)
+static void isert_get_rx_pdu(struct iscsit_conn *conn)
 {
        struct completion comp;
 
index ca8cfeb..0b2dfd6 100644 (file)
@@ -146,7 +146,7 @@ struct isert_cmd {
        u64                     pdu_buf_dma;
        u32                     pdu_buf_len;
        struct isert_conn       *conn;
-       struct iscsi_cmd        *iscsi_cmd;
+       struct iscsit_cmd       *iscsit_cmd;
        struct iser_tx_desc     tx_desc;
        struct iser_rx_desc     *rx_desc;
        struct rdma_rw_ctx      rw;
@@ -173,7 +173,7 @@ struct isert_conn {
        u64                     login_rsp_dma;
        struct iser_rx_desc     *rx_descs;
        struct ib_recv_wr       rx_wr[ISERT_QP_MAX_RECV_DTOS];
-       struct iscsi_conn       *conn;
+       struct iscsit_conn      *conn;
        struct list_head        node;
        struct completion       login_comp;
        struct completion       login_req_comp;
index 03c8fb1..f9ee957 100644 (file)
@@ -2334,7 +2334,6 @@ mptctl_hp_hostinfo(MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size)
        ToolboxIstwiReadWriteRequest_t  *IstwiRWRequest;
        MPT_FRAME_HDR           *mf = NULL;
        unsigned long           timeleft;
-       int                     retval;
        u32                     msgcontext;
 
        /* Reset long to int. Should affect IA64 and SPARC only
@@ -2488,7 +2487,6 @@ mptctl_hp_hostinfo(MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size)
        ioc->add_sge((char *)&IstwiRWRequest->SGL,
            (MPT_SGE_FLAGS_SSIMPLE_READ|4), buf_dma);
 
-       retval = 0;
        SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context,
                                IstwiRWRequest->MsgContext);
        INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status)
@@ -2498,7 +2496,6 @@ retry_wait:
        timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done,
                        HZ*MPT_IOCTL_DEFAULT_TIMEOUT);
        if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
-               retval = -ETIME;
                printk(MYIOC_s_WARN_FMT "%s: failed\n", ioc->name, __func__);
                if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
                        mpt_free_msg_frame(ioc, mf);
index f849e7c..5e115e8 100644 (file)
@@ -121,7 +121,7 @@ enum {
 #define SA_AIF_PDEV_CHANGE             (1<<4)
 #define SA_AIF_LDEV_CHANGE             (1<<5)
 #define SA_AIF_BPSTAT_CHANGE           (1<<30)
-#define SA_AIF_BPCFG_CHANGE            (1<<31)
+#define SA_AIF_BPCFG_CHANGE            (1U<<31)
 
 #define HBA_MAX_SG_EMBEDDED            28
 #define HBA_MAX_SG_SEPARATE            90
index cf7bba2..552ca95 100644 (file)
@@ -302,7 +302,7 @@ static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
                if (flag & SCRD)
                        printk("SCRD ");
                printk("status %02x\n", inb(STATUS(sh->io_port)));
-       };
+       }
 #endif
        number_serviced = 0;
 
@@ -344,7 +344,7 @@ static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
                        if (!number_serviced)
                                shost_printk(KERN_WARNING, sh, "interrupt received, but no mail.\n");
                        return IRQ_HANDLED;
-               };
+               }
 
                mbo = (scsi2int(mb[mbi].ccbptr) - (unsigned long)aha1542->ccb_handle) / sizeof(struct ccb);
                mbistatus = mb[mbi].status;
@@ -408,7 +408,7 @@ static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
                                                 */
                scsi_done(tmp_cmd);
                number_serviced++;
-       };
+       }
 }
 
 static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
@@ -534,7 +534,7 @@ static void setup_mailboxes(struct Scsi_Host *sh)
                any2scsi(aha1542->mb[i].ccbptr,
                         aha1542->ccb_handle + i * sizeof(struct ccb));
                aha1542->mb[AHA1542_MAILBOXES + i].status = 0;
-       };
+       }
        aha1542_intr_reset(sh->io_port);        /* reset interrupts, so they don't block */
        any2scsi(mb_cmd + 2, aha1542->mb_handle);
        if (aha1542_out(sh->io_port, mb_cmd, 5))
@@ -549,7 +549,7 @@ static int aha1542_getconfig(struct Scsi_Host *sh)
        i = inb(STATUS(sh->io_port));
        if (i & DF) {
                i = inb(DATA(sh->io_port));
-       };
+       }
        aha1542_outb(sh->io_port, CMD_RETCONF);
        aha1542_in(sh->io_port, inquiry_result, 3, 0);
        if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 0))
@@ -578,7 +578,7 @@ static int aha1542_getconfig(struct Scsi_Host *sh)
        default:
                shost_printk(KERN_ERR, sh, "Unable to determine DMA channel.\n");
                return -1;
-       };
+       }
        switch (inquiry_result[1]) {
        case 0x40:
                sh->irq = 15;
@@ -601,7 +601,7 @@ static int aha1542_getconfig(struct Scsi_Host *sh)
        default:
                shost_printk(KERN_ERR, sh, "Unable to determine IRQ level.\n");
                return -1;
-       };
+       }
        sh->this_id = inquiry_result[2] & 7;
        return 0;
 }
@@ -636,7 +636,7 @@ static int aha1542_mbenable(struct Scsi_Host *sh)
 
                if (aha1542_out(sh->io_port, mbenable_cmd, 3))
                        goto fail;
-       };
+       }
        while (0) {
 fail:
                shost_printk(KERN_ERR, sh, "Mailbox init failed\n");
@@ -654,7 +654,7 @@ static int aha1542_query(struct Scsi_Host *sh)
        i = inb(STATUS(sh->io_port));
        if (i & DF) {
                i = inb(DATA(sh->io_port));
-       };
+       }
        aha1542_outb(sh->io_port, CMD_INQUIRY);
        aha1542_in(sh->io_port, inquiry_result, 4, 0);
        if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 0))
@@ -673,7 +673,7 @@ static int aha1542_query(struct Scsi_Host *sh)
        if (inquiry_result[0] == 0x43) {
                shost_printk(KERN_INFO, sh, "Emulation mode not supported for AHA-1740 hardware, use aha1740 driver instead.\n");
                return 1;
-       };
+       }
 
        /*
         * Always call this - boards that do not support extended bios translation
index fd1b378..52db147 100644 (file)
@@ -371,8 +371,7 @@ bfad_debugfs_release_fwtrc(struct inode *inode, struct file *file)
        if (!fw_debug)
                return 0;
 
-       if (fw_debug->debug_buffer)
-               vfree(fw_debug->debug_buffer);
+       vfree(fw_debug->debug_buffer);
 
        file->private_data = NULL;
        kfree(fw_debug);
index 8419a1a..c335f7a 100644 (file)
@@ -755,7 +755,6 @@ void
 bfad_destroy_workq(struct bfad_im_s *im)
 {
        if (im && im->drv_workq) {
-               flush_workqueue(im->drv_workq);
                destroy_workqueue(im->drv_workq);
                im->drv_workq = NULL;
        }
index d295867..05ddbb9 100644 (file)
@@ -273,7 +273,6 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
        struct fcoe_port        *port;
        struct fcoe_hdr         *hp;
        struct bnx2fc_rport     *tgt;
-       struct fc_stats         *stats;
        u8                      sof, eof;
        u32                     crc;
        unsigned int            hlen, tlen, elen;
@@ -399,10 +398,8 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
        }
 
        /*update tx stats */
-       stats = per_cpu_ptr(lport->stats, get_cpu());
-       stats->TxFrames++;
-       stats->TxWords += wlen;
-       put_cpu();
+       this_cpu_inc(lport->stats->TxFrames);
+       this_cpu_add(lport->stats->TxWords, wlen);
 
        /* send down to lld */
        fr_dev(fp) = lport;
@@ -512,7 +509,6 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
        u32 fr_len, fr_crc;
        struct fc_lport *lport;
        struct fcoe_rcv_info *fr;
-       struct fc_stats *stats;
        struct fc_frame_header *fh;
        struct fcoe_crc_eof crc_eof;
        struct fc_frame *fp;
@@ -543,10 +539,8 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
        skb_pull(skb, sizeof(struct fcoe_hdr));
        fr_len = skb->len - sizeof(struct fcoe_crc_eof);
 
-       stats = per_cpu_ptr(lport->stats, get_cpu());
-       stats->RxFrames++;
-       stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
-       put_cpu();
+       this_cpu_inc(lport->stats->RxFrames);
+       this_cpu_add(lport->stats->RxWords, fr_len / FCOE_WORD_TO_BYTE);
 
        fp = (struct fc_frame *)skb;
        fc_frame_init(fp);
@@ -633,9 +627,7 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
        fr_crc = le32_to_cpu(fr_crc(fp));
 
        if (unlikely(fr_crc != ~crc32(~0, skb->data, fr_len))) {
-               stats = per_cpu_ptr(lport->stats, get_cpu());
-               crc_err = (stats->InvalidCRCCount++);
-               put_cpu();
+               crc_err = this_cpu_inc_return(lport->stats->InvalidCRCCount);
                if (crc_err < 5)
                        printk(KERN_WARNING PFX "dropping frame with "
                               "CRC error\n");
@@ -964,9 +956,7 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
                                mutex_unlock(&lport->lp_mutex);
                                fc_host_port_type(lport->host) =
                                        FC_PORTTYPE_UNKNOWN;
-                               per_cpu_ptr(lport->stats,
-                                           get_cpu())->LinkFailureCount++;
-                               put_cpu();
+                               this_cpu_inc(lport->stats->LinkFailureCount);
                                fcoe_clean_pending_queue(lport);
                                wait_for_upload = 1;
                        }
index 962454f..b42a9ac 100644 (file)
@@ -472,7 +472,7 @@ struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
        u32 free_sqes;
        u32 max_sqes;
        u16 xid;
-       int index = get_cpu();
+       int index = raw_smp_processor_id();
 
        max_sqes = BNX2FC_SCSI_MAX_SQES;
        /*
@@ -485,7 +485,6 @@ struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
            (tgt->num_active_ios.counter  >= max_sqes) ||
            (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) {
                spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
-               put_cpu();
                return NULL;
        }
 
@@ -498,7 +497,6 @@ struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
        atomic_inc(&tgt->num_active_ios);
        atomic_dec(&tgt->free_sqes);
        spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
-       put_cpu();
 
        INIT_LIST_HEAD(&io_req->link);
 
@@ -2032,7 +2030,6 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
        struct bnx2fc_interface *interface = port->priv;
        struct bnx2fc_hba *hba = interface->hba;
        struct fc_lport *lport = port->lport;
-       struct fc_stats *stats;
        int task_idx, index;
        u16 xid;
 
@@ -2045,20 +2042,18 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
        io_req->data_xfer_len = scsi_bufflen(sc_cmd);
        bnx2fc_priv(sc_cmd)->io_req = io_req;
 
-       stats = per_cpu_ptr(lport->stats, get_cpu());
        if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
                io_req->io_req_flags = BNX2FC_READ;
-               stats->InputRequests++;
-               stats->InputBytes += io_req->data_xfer_len;
+               this_cpu_inc(lport->stats->InputRequests);
+               this_cpu_add(lport->stats->InputBytes, io_req->data_xfer_len);
        } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
                io_req->io_req_flags = BNX2FC_WRITE;
-               stats->OutputRequests++;
-               stats->OutputBytes += io_req->data_xfer_len;
+               this_cpu_inc(lport->stats->OutputRequests);
+               this_cpu_add(lport->stats->OutputBytes, io_req->data_xfer_len);
        } else {
                io_req->io_req_flags = 0;
-               stats->ControlRequests++;
+               this_cpu_inc(lport->stats->ControlRequests);
        }
-       put_cpu();
 
        xid = io_req->xid;
 
index 67a8971..670a836 100644 (file)
@@ -3585,10 +3585,19 @@ static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
 #endif
        if (dcb->target_lun != 0) {
                /* Copy settings */
-               struct DeviceCtlBlk *p;
-               list_for_each_entry(p, &acb->dcb_list, list)
-                       if (p->target_id == dcb->target_id)
+               struct DeviceCtlBlk *p = NULL, *iter;
+
+               list_for_each_entry(iter, &acb->dcb_list, list)
+                       if (iter->target_id == dcb->target_id) {
+                               p = iter;
                                break;
+                       }
+
+               if (!p) {
+                       kfree(dcb);
+                       return NULL;
+               }
+
                dprintkdbg(DBG_1, 
                       "device_alloc: <%02i-%i> copy from <%02i-%i>\n",
                       dcb->target_id, dcb->target_lun,
index 93227c0..2e9155b 100644 (file)
@@ -1000,7 +1000,6 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
 
        // Initializing the spinlocks
        spin_lock_init(&pHba->state_lock);
-       spin_lock_init(&adpt_post_wait_lock);
 
        if(raptorFlag == 0){
                printk(KERN_INFO "Adaptec I2O RAID controller"
index d4bb379..5a55250 100644 (file)
@@ -1402,7 +1402,6 @@ efct_hw_command(struct efct_hw *hw, u8 *cmd, u32 opts, void *cb, void *arg)
                mutex_lock(&hw->bmbx_lock);
                bmbx = hw->sli.bmbx.virt;
 
-               memset(bmbx, 0, SLI4_BMBX_SIZE);
                memcpy(bmbx, cmd, SLI4_BMBX_SIZE);
 
                if (sli_bmbx_command(&hw->sli) == 0) {
index c3247b9..c612f0a 100644 (file)
@@ -62,7 +62,6 @@ efct_io_pool_create(struct efct *efct, u32 num_sgl)
                        return NULL;
                }
 
-               memset(io->sgl, 0, sizeof(*io->sgl) * num_sgl);
                io->sgl_allocated = num_sgl;
                io->sgl_count = 0;
 
index 8b004a5..be4b5c1 100644 (file)
@@ -370,9 +370,6 @@ static int efct_lio_get_cmd_state(struct se_cmd *cmd)
                container_of(cmd, struct efct_scsi_tgt_io, cmd);
        struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
 
-       if (!io)
-               return 0;
-
        return io->tgt_io.state;
 }
 
index 79b2827..c2a5910 100644 (file)
@@ -1434,8 +1434,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
 
        return NET_RX_SUCCESS;
 err:
-       per_cpu_ptr(lport->stats, get_cpu())->ErrorFrames++;
-       put_cpu();
+       this_cpu_inc(lport->stats->ErrorFrames);
 err2:
        kfree_skb(skb);
        return NET_RX_DROP;
@@ -1453,9 +1452,10 @@ static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
        struct fcoe_percpu_s *fps;
        int rc;
 
-       fps = &get_cpu_var(fcoe_percpu);
+       local_lock(&fcoe_percpu.lock);
+       fps = this_cpu_ptr(&fcoe_percpu);
        rc = fcoe_get_paged_crc_eof(skb, tlen, fps);
-       put_cpu_var(fcoe_percpu);
+       local_unlock(&fcoe_percpu.lock);
 
        return rc;
 }
@@ -1474,7 +1474,6 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
        struct ethhdr *eh;
        struct fcoe_crc_eof *cp;
        struct sk_buff *skb;
-       struct fc_stats *stats;
        struct fc_frame_header *fh;
        unsigned int hlen;              /* header length implies the version */
        unsigned int tlen;              /* trailer length */
@@ -1585,10 +1584,8 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
                skb_shinfo(skb)->gso_size = 0;
        }
        /* update tx stats: regardless if LLD fails */
-       stats = per_cpu_ptr(lport->stats, get_cpu());
-       stats->TxFrames++;
-       stats->TxWords += wlen;
-       put_cpu();
+       this_cpu_inc(lport->stats->TxFrames);
+       this_cpu_add(lport->stats->TxWords, wlen);
 
        /* send down to lld */
        fr_dev(fp) = lport;
@@ -1610,7 +1607,6 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
        struct fcoe_interface *fcoe;
        struct fc_frame_header *fh;
        struct sk_buff *skb = (struct sk_buff *)fp;
-       struct fc_stats *stats;
 
        /*
         * We only check CRC if no offload is available and if it is
@@ -1640,11 +1636,8 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
                return 0;
        }
 
-       stats = per_cpu_ptr(lport->stats, get_cpu());
-       stats->InvalidCRCCount++;
-       if (stats->InvalidCRCCount < 5)
+       if (this_cpu_inc_return(lport->stats->InvalidCRCCount) < 5)
                printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
-       put_cpu();
        return -EINVAL;
 }
 
@@ -1657,7 +1650,6 @@ static void fcoe_recv_frame(struct sk_buff *skb)
        u32 fr_len;
        struct fc_lport *lport;
        struct fcoe_rcv_info *fr;
-       struct fc_stats *stats;
        struct fcoe_crc_eof crc_eof;
        struct fc_frame *fp;
        struct fcoe_hdr *hp;
@@ -1685,9 +1677,11 @@ static void fcoe_recv_frame(struct sk_buff *skb)
         */
        hp = (struct fcoe_hdr *) skb_network_header(skb);
 
-       stats = per_cpu_ptr(lport->stats, get_cpu());
        if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
-               if (stats->ErrorFrames < 5)
+               struct fc_stats *stats;
+
+               stats = per_cpu_ptr(lport->stats, raw_smp_processor_id());
+               if (READ_ONCE(stats->ErrorFrames) < 5)
                        printk(KERN_WARNING "fcoe: FCoE version "
                               "mismatch: The frame has "
                               "version %x, but the "
@@ -1700,8 +1694,8 @@ static void fcoe_recv_frame(struct sk_buff *skb)
        skb_pull(skb, sizeof(struct fcoe_hdr));
        fr_len = skb->len - sizeof(struct fcoe_crc_eof);
 
-       stats->RxFrames++;
-       stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
+       this_cpu_inc(lport->stats->RxFrames);
+       this_cpu_add(lport->stats->RxWords, fr_len / FCOE_WORD_TO_BYTE);
 
        fp = (struct fc_frame *)skb;
        fc_frame_init(fp);
@@ -1717,13 +1711,11 @@ static void fcoe_recv_frame(struct sk_buff *skb)
                goto drop;
 
        if (!fcoe_filter_frames(lport, fp)) {
-               put_cpu();
                fc_exch_recv(lport, fp);
                return;
        }
 drop:
-       stats->ErrorFrames++;
-       put_cpu();
+       this_cpu_inc(lport->stats->ErrorFrames);
        kfree_skb(skb);
 }
 
@@ -1847,7 +1839,6 @@ static int fcoe_device_notification(struct notifier_block *notifier,
        struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
        struct fcoe_ctlr *ctlr;
        struct fcoe_interface *fcoe;
-       struct fc_stats *stats;
        u32 link_possible = 1;
        u32 mfs;
        int rc = NOTIFY_OK;
@@ -1921,9 +1912,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
                        break;
                case FCOE_CTLR_ENABLED:
                case FCOE_CTLR_UNUSED:
-                       stats = per_cpu_ptr(lport->stats, get_cpu());
-                       stats->LinkFailureCount++;
-                       put_cpu();
+                       this_cpu_inc(lport->stats->LinkFailureCount);
                        fcoe_clean_pending_queue(lport);
                }
        }
@@ -2488,6 +2477,7 @@ static int __init fcoe_init(void)
                p = per_cpu_ptr(&fcoe_percpu, cpu);
                INIT_WORK(&p->work, fcoe_receive_work);
                skb_queue_head_init(&p->fcoe_rx_list);
+               local_lock_init(&p->lock);
        }
 
        /* Setup link change notification */
@@ -2580,7 +2570,7 @@ static void fcoe_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
        /* pre-FIP */
        if (is_zero_ether_addr(mac))
                fcoe_ctlr_recv_flogi(fip, lport, fp);
-       if (!is_zero_ether_addr(mac))
+       else
                fcoe_update_src_mac(lport, mac);
 done:
        fc_lport_flogi_resp(seq, fp, lport);
index 558f3f4..39e16ea 100644 (file)
@@ -824,22 +824,21 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
        unsigned long deadline;
        unsigned long sel_time = 0;
        struct list_head del_list;
-       struct fc_stats *stats;
 
        INIT_LIST_HEAD(&del_list);
 
-       stats = per_cpu_ptr(fip->lp->stats, get_cpu());
-
        list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
                deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2;
                if (fip->sel_fcf == fcf) {
                        if (time_after(jiffies, deadline)) {
-                               stats->MissDiscAdvCount++;
+                               u64 miss_cnt;
+
+                               miss_cnt = this_cpu_inc_return(fip->lp->stats->MissDiscAdvCount);
                                printk(KERN_INFO "libfcoe: host%d: "
                                       "Missing Discovery Advertisement "
                                       "for fab %16.16llx count %lld\n",
                                       fip->lp->host->host_no, fcf->fabric_name,
-                                      stats->MissDiscAdvCount);
+                                      miss_cnt);
                        } else if (time_after(next_timer, deadline))
                                next_timer = deadline;
                }
@@ -855,7 +854,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
                         */
                        list_del(&fcf->list);
                        list_add(&fcf->list, &del_list);
-                       stats->VLinkFailureCount++;
+                       this_cpu_inc(fip->lp->stats->VLinkFailureCount);
                } else {
                        if (time_after(next_timer, deadline))
                                next_timer = deadline;
@@ -864,7 +863,6 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
                                sel_time = fcf->time;
                }
        }
-       put_cpu();
 
        list_for_each_entry_safe(fcf, next, &del_list, list) {
                /* Removes fcf from current list */
@@ -1142,7 +1140,6 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
        struct fip_desc *desc;
        struct fip_encaps *els;
        struct fcoe_fcf *sel;
-       struct fc_stats *stats;
        enum fip_desc_type els_dtype = 0;
        u8 els_op;
        u8 sub;
@@ -1286,10 +1283,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
        fr_dev(fp) = lport;
        fr_encaps(fp) = els_dtype;
 
-       stats = per_cpu_ptr(lport->stats, get_cpu());
-       stats->RxFrames++;
-       stats->RxWords += skb->len / FIP_BPW;
-       put_cpu();
+       this_cpu_inc(lport->stats->RxFrames);
+       this_cpu_add(lport->stats->RxWords, skb->len / FIP_BPW);
 
        fc_exch_recv(lport, fp);
        return;
@@ -1427,9 +1422,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
                                                      ntoh24(vp->fd_fc_id));
                        if (vn_port && (vn_port == lport)) {
                                mutex_lock(&fip->ctlr_mutex);
-                               per_cpu_ptr(lport->stats,
-                                           get_cpu())->VLinkFailureCount++;
-                               put_cpu();
+                               this_cpu_inc(lport->stats->VLinkFailureCount);
                                fcoe_ctlr_reset(fip);
                                mutex_unlock(&fip->ctlr_mutex);
                        }
@@ -1457,8 +1450,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
                 * followed by physical port
                 */
                mutex_lock(&fip->ctlr_mutex);
-               per_cpu_ptr(lport->stats, get_cpu())->VLinkFailureCount++;
-               put_cpu();
+               this_cpu_inc(lport->stats->VLinkFailureCount);
                fcoe_ctlr_reset(fip);
                mutex_unlock(&fip->ctlr_mutex);
 
index 4d0e19e..62341c6 100644 (file)
@@ -183,9 +183,9 @@ void __fcoe_get_lesb(struct fc_lport *lport,
        memset(lesb, 0, sizeof(*lesb));
        for_each_possible_cpu(cpu) {
                stats = per_cpu_ptr(lport->stats, cpu);
-               lfc += stats->LinkFailureCount;
-               vlfc += stats->VLinkFailureCount;
-               mdac += stats->MissDiscAdvCount;
+               lfc += READ_ONCE(stats->LinkFailureCount);
+               vlfc += READ_ONCE(stats->VLinkFailureCount);
+               mdac += READ_ONCE(stats->MissDiscAdvCount);
        }
        lesb->lesb_link_fail = htonl(lfc);
        lesb->lesb_vlink_fail = htonl(vlfc);
index aa07189..85ec616 100644 (file)
@@ -39,7 +39,7 @@
 
 #define DRV_NAME               "fnic"
 #define DRV_DESCRIPTION                "Cisco FCoE HBA Driver"
-#define DRV_VERSION            "1.6.0.53"
+#define DRV_VERSION            "1.6.0.54"
 #define PFX                    DRV_NAME ": "
 #define DFX                     DRV_NAME "%d: "
 
index e732650..866b4c9 100644 (file)
@@ -86,8 +86,7 @@ void fnic_debugfs_terminate(void)
        debugfs_remove(fnic_trace_debugfs_root);
        fnic_trace_debugfs_root = NULL;
 
-       if (fc_trc_flag)
-               vfree(fc_trc_flag);
+       vfree(fc_trc_flag);
 }
 
 /*
index 9161bd2..51e7c34 100644 (file)
@@ -612,10 +612,10 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        pci_set_master(pdev);
 
        /* Query PCI controller on system for DMA addressing
-        * limitation for the device.  Try 64-bit first, and
-        * fail to 32-bit.
+        * limitation for the device.  Try 47-bit first, and
+        * fail to 32-bit. Cisco VIC supports 47 bits only.
         */
-       err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(47));
        if (err) {
                err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
@@ -1146,10 +1146,8 @@ static void __exit fnic_cleanup_module(void)
 {
        pci_unregister_driver(&fnic_driver);
        destroy_workqueue(fnic_event_queue);
-       if (fnic_fip_queue) {
-               flush_workqueue(fnic_fip_queue);
+       if (fnic_fip_queue)
                destroy_workqueue(fnic_fip_queue);
-       }
        kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
        kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
        kmem_cache_destroy(fnic_io_req_cache);
index 4bda2f6..764e859 100644 (file)
@@ -446,6 +446,8 @@ void hisi_sas_task_deliver(struct hisi_hba *hisi_hba,
                return;
        }
 
+       /* Make slot memories observable before marking as ready */
+       smp_wmb();
        WRITE_ONCE(slot->ready, 1);
 
        spin_lock(&dq->lock);
@@ -709,8 +711,6 @@ static int hisi_sas_init_device(struct domain_device *device)
        struct scsi_lun lun;
        int retry = HISI_SAS_DISK_RECOVER_CNT;
        struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
-       struct device *dev = hisi_hba->dev;
-       struct sas_phy *local_phy;
 
        switch (device->dev_type) {
        case SAS_END_DEVICE:
@@ -729,30 +729,18 @@ static int hisi_sas_init_device(struct domain_device *device)
        case SAS_SATA_PM_PORT:
        case SAS_SATA_PENDING:
                /*
-                * send HARD RESET to clear previous affiliation of
-                * STP target port
+                * If an expander is swapped when a SATA disk is attached then
+                * we should issue a hard reset to clear previous affiliation
+                * of STP target port, see SPL (chapter 6.19.4).
+                *
+                * However we don't need to issue a hard reset here for these
+                * reasons:
+                * a. When probing the device, libsas/libata already issues a
+                * hard reset in sas_probe_sata() -> ata_sas_async_probe().
+                * Note that in hisi_sas_debug_I_T_nexus_reset() we take care
+                * to issue a hard reset by checking the dev status (== INIT).
+                * b. When resetting the controller, this is simply unnecessary.
                 */
-               local_phy = sas_get_local_phy(device);
-               if (!scsi_is_sas_phy_local(local_phy) &&
-                   !test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) {
-                       unsigned long deadline = ata_deadline(jiffies, 20000);
-                       struct sata_device *sata_dev = &device->sata_dev;
-                       struct ata_host *ata_host = sata_dev->ata_host;
-                       struct ata_port_operations *ops = ata_host->ops;
-                       struct ata_port *ap = sata_dev->ap;
-                       struct ata_link *link;
-                       unsigned int classes;
-
-                       ata_for_each_link(link, ap, EDGE)
-                               rc = ops->hardreset(link, &classes,
-                                                   deadline);
-               }
-               sas_put_local_phy(local_phy);
-               if (rc) {
-                       dev_warn(dev, "SATA disk hardreset fail: %d\n", rc);
-                       return rc;
-               }
-
                while (retry-- > 0) {
                        rc = hisi_sas_softreset_ata_disk(device);
                        if (!rc)
@@ -768,15 +756,19 @@ static int hisi_sas_init_device(struct domain_device *device)
 
 int hisi_sas_slave_alloc(struct scsi_device *sdev)
 {
-       struct domain_device *ddev;
+       struct domain_device *ddev = sdev_to_domain_dev(sdev);
+       struct hisi_sas_device *sas_dev = ddev->lldd_dev;
        int rc;
 
        rc = sas_slave_alloc(sdev);
        if (rc)
                return rc;
-       ddev = sdev_to_domain_dev(sdev);
 
-       return hisi_sas_init_device(ddev);
+       rc = hisi_sas_init_device(ddev);
+       if (rc)
+               return rc;
+       sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
+       return 0;
 }
 EXPORT_SYMBOL_GPL(hisi_sas_slave_alloc);
 
@@ -826,7 +818,6 @@ static int hisi_sas_dev_found(struct domain_device *device)
        dev_info(dev, "dev[%d:%x] found\n",
                sas_dev->device_id, sas_dev->dev_type);
 
-       sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
        return 0;
 
 err_out:
@@ -1710,13 +1701,18 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
                /* report PHY down if timed out */
                if (rc == -ETIMEDOUT)
                        hisi_sas_phy_down(hisi_hba, sas_phy->id, 0, GFP_KERNEL);
-       } else if (sas_dev->dev_status != HISI_SAS_DEV_INIT) {
-               /*
-                * If in init state, we rely on caller to wait for link to be
-                * ready; otherwise, except phy reset is fail, delay.
-                */
-               if (!rc)
-                       msleep(2000);
+               return rc;
+       }
+
+       if (rc)
+               return rc;
+
+       /* Remote phy */
+       if (dev_is_sata(device)) {
+               rc = sas_ata_wait_after_reset(device,
+                                       HISI_SAS_WAIT_PHYUP_TIMEOUT);
+       } else {
+               msleep(2000);
        }
 
        return rc;
index 79f87d7..7d819fc 100644 (file)
@@ -1563,9 +1563,15 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
 
        phy->port_id = port_id;
 
-       /* Call pm_runtime_put_sync() with pairs in hisi_sas_phyup_pm_work() */
+       /*
+        * Call pm_runtime_get_noresume() which pairs with
+        * hisi_sas_phyup_pm_work() -> pm_runtime_put_sync().
+        * For failure call pm_runtime_put() as we are in a hardirq context.
+        */
        pm_runtime_get_noresume(dev);
-       hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP_PM);
+       res = hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP_PM);
+       if (!res)
+               pm_runtime_put(dev);
 
        res = IRQ_HANDLED;
 
index f69b77c..8352f90 100644 (file)
@@ -229,10 +229,6 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
        if (error)
                goto fail;
 
-       error = scsi_mq_setup_tags(shost);
-       if (error)
-               goto fail;
-
        if (!shost->shost_gendev.parent)
                shost->shost_gendev.parent = dev ? dev : &platform_bus;
        if (!dma_dev)
@@ -240,6 +236,10 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
 
        shost->dma_dev = dma_dev;
 
+       error = scsi_mq_setup_tags(shost);
+       if (error)
+               goto fail;
+
        /*
         * Increase usage count temporarily here so that calling
         * scsi_autopm_put_host() will trigger runtime idle if there is
index 104bee9..256ec6d 100644 (file)
@@ -3456,7 +3456,7 @@ static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
                              struct bin_attribute *bin_attr,
                              char *buf, loff_t off, size_t count)
 {
-       struct device *dev = container_of(kobj, struct device, kobj);
+       struct device *dev = kobj_to_dev(kobj);
        struct Scsi_Host *shost = class_to_shost(dev);
        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
        unsigned long lock_flags = 0;
@@ -4182,7 +4182,7 @@ static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
                                struct bin_attribute *bin_attr, char *buf,
                                loff_t off, size_t count)
 {
-       struct device *cdev = container_of(kobj, struct device, kobj);
+       struct device *cdev = kobj_to_dev(kobj);
        struct Scsi_Host *shost = class_to_shost(cdev);
        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
        struct ipr_hostrcb *hostrcb;
@@ -4206,7 +4206,7 @@ static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
                                struct bin_attribute *bin_attr, char *buf,
                                loff_t off, size_t count)
 {
-       struct device *cdev = container_of(kobj, struct device, kobj);
+       struct device *cdev = kobj_to_dev(kobj);
        struct Scsi_Host *shost = class_to_shost(cdev);
        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
        struct ipr_hostrcb *hostrcb;
@@ -4267,7 +4267,7 @@ static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
                             struct bin_attribute *bin_attr,
                             char *buf, loff_t off, size_t count)
 {
-       struct device *cdev = container_of(kobj, struct device, kobj);
+       struct device *cdev = kobj_to_dev(kobj);
        struct Scsi_Host *shost = class_to_shost(cdev);
        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
        struct ipr_dump *dump;
@@ -4456,7 +4456,7 @@ static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
                              struct bin_attribute *bin_attr,
                              char *buf, loff_t off, size_t count)
 {
-       struct device *cdev = container_of(kobj, struct device, kobj);
+       struct device *cdev = kobj_to_dev(kobj);
        struct Scsi_Host *shost = class_to_shost(cdev);
        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
        int rc;
@@ -10092,7 +10092,6 @@ static irqreturn_t ipr_test_intr(int irq, void *devp)
 {
        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
        unsigned long lock_flags = 0;
-       irqreturn_t rc = IRQ_HANDLED;
 
        dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
@@ -10101,7 +10100,7 @@ static irqreturn_t ipr_test_intr(int irq, void *devp)
        wake_up(&ioa_cfg->msi_wait_q);
 
        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
-       return rc;
+       return IRQ_HANDLED;
 }
 
 /**
index aa223db..1d91c45 100644 (file)
@@ -825,10 +825,9 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
        }
        memset(ep, 0, sizeof(*ep));
 
-       cpu = get_cpu();
+       cpu = raw_smp_processor_id();
        pool = per_cpu_ptr(mp->pool, cpu);
        spin_lock_bh(&pool->lock);
-       put_cpu();
 
        /* peek cache of free slot */
        if (pool->left != FC_XID_UNKNOWN) {
index bce90eb..945adca 100644 (file)
@@ -143,8 +143,7 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp)
                INIT_LIST_HEAD(&fsp->list);
                spin_lock_init(&fsp->scsi_pkt_lock);
        } else {
-               per_cpu_ptr(lport->stats, get_cpu())->FcpPktAllocFails++;
-               put_cpu();
+               this_cpu_inc(lport->stats->FcpPktAllocFails);
        }
        return fsp;
 }
@@ -266,8 +265,7 @@ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
        if (!fsp->seq_ptr)
                return -EINVAL;
 
-       per_cpu_ptr(fsp->lp->stats, get_cpu())->FcpPktAborts++;
-       put_cpu();
+       this_cpu_inc(fsp->lp->stats->FcpPktAborts);
 
        fsp->state |= FC_SRB_ABORT_PENDING;
        rc = fc_seq_exch_abort(fsp->seq_ptr, 0);
@@ -436,8 +434,7 @@ static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport,
        if (likely(fp))
                return fp;
 
-       per_cpu_ptr(lport->stats, get_cpu())->FcpFrameAllocFails++;
-       put_cpu();
+       this_cpu_inc(lport->stats->FcpFrameAllocFails);
        /* error case */
        fc_fcp_can_queue_ramp_down(lport);
        shost_printk(KERN_ERR, lport->host,
@@ -471,7 +468,6 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
 {
        struct scsi_cmnd *sc = fsp->cmd;
        struct fc_lport *lport = fsp->lp;
-       struct fc_stats *stats;
        struct fc_frame_header *fh;
        size_t start_offset;
        size_t offset;
@@ -533,14 +529,12 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
 
                if (~crc != le32_to_cpu(fr_crc(fp))) {
 crc_err:
-                       stats = per_cpu_ptr(lport->stats, get_cpu());
-                       stats->ErrorFrames++;
+                       this_cpu_inc(lport->stats->ErrorFrames);
                        /* per cpu count, not total count, but OK for limit */
-                       if (stats->InvalidCRCCount++ < FC_MAX_ERROR_CNT)
+                       if (this_cpu_inc_return(lport->stats->InvalidCRCCount) < FC_MAX_ERROR_CNT)
                                printk(KERN_WARNING "libfc: CRC error on data "
                                       "frame for port (%6.6x)\n",
                                       lport->port_id);
-                       put_cpu();
                        /*
                         * Assume the frame is total garbage.
                         * We may have copied it over the good part
@@ -1861,7 +1855,6 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
        struct fc_fcp_pkt *fsp;
        int rval;
        int rc = 0;
-       struct fc_stats *stats;
 
        rval = fc_remote_port_chkready(rport);
        if (rval) {
@@ -1913,20 +1906,18 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
        /*
         * setup the data direction
         */
-       stats = per_cpu_ptr(lport->stats, get_cpu());
        if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
                fsp->req_flags = FC_SRB_READ;
-               stats->InputRequests++;
-               stats->InputBytes += fsp->data_len;
+               this_cpu_inc(lport->stats->InputRequests);
+               this_cpu_add(lport->stats->InputBytes, fsp->data_len);
        } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
                fsp->req_flags = FC_SRB_WRITE;
-               stats->OutputRequests++;
-               stats->OutputBytes += fsp->data_len;
+               this_cpu_inc(lport->stats->OutputRequests);
+               this_cpu_add(lport->stats->OutputBytes, fsp->data_len);
        } else {
                fsp->req_flags = 0;
-               stats->ControlRequests++;
+               this_cpu_inc(lport->stats->ControlRequests);
        }
-       put_cpu();
 
        /*
         * send it to the lower layer
index 19cd4a9..9c02c95 100644 (file)
@@ -308,21 +308,21 @@ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
 
                stats = per_cpu_ptr(lport->stats, cpu);
 
-               fc_stats->tx_frames += stats->TxFrames;
-               fc_stats->tx_words += stats->TxWords;
-               fc_stats->rx_frames += stats->RxFrames;
-               fc_stats->rx_words += stats->RxWords;
-               fc_stats->error_frames += stats->ErrorFrames;
-               fc_stats->invalid_crc_count += stats->InvalidCRCCount;
-               fc_stats->fcp_input_requests += stats->InputRequests;
-               fc_stats->fcp_output_requests += stats->OutputRequests;
-               fc_stats->fcp_control_requests += stats->ControlRequests;
-               fcp_in_bytes += stats->InputBytes;
-               fcp_out_bytes += stats->OutputBytes;
-               fc_stats->fcp_packet_alloc_failures += stats->FcpPktAllocFails;
-               fc_stats->fcp_packet_aborts += stats->FcpPktAborts;
-               fc_stats->fcp_frame_alloc_failures += stats->FcpFrameAllocFails;
-               fc_stats->link_failure_count += stats->LinkFailureCount;
+               fc_stats->tx_frames += READ_ONCE(stats->TxFrames);
+               fc_stats->tx_words += READ_ONCE(stats->TxWords);
+               fc_stats->rx_frames += READ_ONCE(stats->RxFrames);
+               fc_stats->rx_words += READ_ONCE(stats->RxWords);
+               fc_stats->error_frames += READ_ONCE(stats->ErrorFrames);
+               fc_stats->invalid_crc_count += READ_ONCE(stats->InvalidCRCCount);
+               fc_stats->fcp_input_requests += READ_ONCE(stats->InputRequests);
+               fc_stats->fcp_output_requests += READ_ONCE(stats->OutputRequests);
+               fc_stats->fcp_control_requests += READ_ONCE(stats->ControlRequests);
+               fcp_in_bytes += READ_ONCE(stats->InputBytes);
+               fcp_out_bytes += READ_ONCE(stats->OutputBytes);
+               fc_stats->fcp_packet_alloc_failures += READ_ONCE(stats->FcpPktAllocFails);
+               fc_stats->fcp_packet_aborts += READ_ONCE(stats->FcpPktAborts);
+               fc_stats->fcp_frame_alloc_failures += READ_ONCE(stats->FcpFrameAllocFails);
+               fc_stats->link_failure_count += READ_ONCE(stats->LinkFailureCount);
        }
        fc_stats->fcp_input_megabytes = div_u64(fcp_in_bytes, 1000000);
        fc_stats->fcp_output_megabytes = div_u64(fcp_out_bytes, 1000000);
index d34c82e..d35c929 100644 (file)
@@ -358,22 +358,14 @@ static int sas_ata_printk(const char *level, const struct domain_device *ddev,
        return r;
 }
 
-static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class,
-                             unsigned long deadline)
+int sas_ata_wait_after_reset(struct domain_device *dev, unsigned long deadline)
 {
-       int ret = 0, res;
-       struct sas_phy *phy;
-       struct ata_port *ap = link->ap;
+       struct sata_device *sata_dev = &dev->sata_dev;
        int (*check_ready)(struct ata_link *link);
-       struct domain_device *dev = ap->private_data;
-       struct sas_internal *i = dev_to_sas_internal(dev);
-
-       res = i->dft->lldd_I_T_nexus_reset(dev);
-       if (res == -ENODEV)
-               return res;
-
-       if (res != TMF_RESP_FUNC_COMPLETE)
-               sas_ata_printk(KERN_DEBUG, dev, "Unable to reset ata device?\n");
+       struct ata_port *ap = sata_dev->ap;
+       struct ata_link *link = &ap->link;
+       struct sas_phy *phy;
+       int ret;
 
        phy = sas_get_local_phy(dev);
        if (scsi_is_sas_phy_local(phy))
@@ -386,6 +378,27 @@ static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class,
        if (ret && ret != -EAGAIN)
                sas_ata_printk(KERN_ERR, dev, "reset failed (errno=%d)\n", ret);
 
+       return ret;
+}
+EXPORT_SYMBOL_GPL(sas_ata_wait_after_reset);
+
+static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class,
+                             unsigned long deadline)
+{
+       struct ata_port *ap = link->ap;
+       struct domain_device *dev = ap->private_data;
+       struct sas_internal *i = dev_to_sas_internal(dev);
+       int ret;
+
+       ret = i->dft->lldd_I_T_nexus_reset(dev);
+       if (ret == -ENODEV)
+               return ret;
+
+       if (ret != TMF_RESP_FUNC_COMPLETE)
+               sas_ata_printk(KERN_DEBUG, dev, "Unable to reset ata device?\n");
+
+       ret = sas_ata_wait_after_reset(dev, deadline);
+
        *class = dev->sata_dev.class;
 
        ap->cbl = ATA_CBL_SATA;
index 0025760..da9070c 100644 (file)
@@ -604,6 +604,7 @@ struct lpfc_vport {
 #define FC_VFI_REGISTERED      0x800000 /* VFI is registered */
 #define FC_FDISC_COMPLETED     0x1000000/* FDISC completed */
 #define FC_DISC_DELAYED                0x2000000/* Delay NPort discovery */
+#define FC_RSCN_MEMENTO                0x4000000/* RSCN cmd processed */
 
        uint32_t ct_flags;
 #define FC_CT_RFF_ID           0x1      /* RFF_ID accepted by switch */
@@ -611,6 +612,7 @@ struct lpfc_vport {
 #define FC_CT_RSNN_NN          0x4      /* RSNN_NN accepted by switch */
 #define FC_CT_RSPN_ID          0x8      /* RSPN_ID accepted by switch */
 #define FC_CT_RFT_ID           0x10     /* RFT_ID accepted by switch */
+#define FC_CT_RPRT_DEFER       0x20     /* Defer issuing FDMI RPRT */
 
        struct list_head fc_nodes;
 
@@ -713,6 +715,7 @@ struct lpfc_vport {
 #define LPFC_VMID_QFPA_CMPL            0x4
 #define LPFC_VMID_QOS_ENABLED          0x8
 #define LPFC_VMID_TIMER_ENBLD          0x10
+#define LPFC_VMID_TYPE_PRIO            0x20
        struct fc_qfpa_res *qfpa_res;
 
        struct fc_vport *fc_vport;
@@ -738,9 +741,8 @@ struct lpfc_vport {
        struct list_head rcv_buffer_list;
        unsigned long rcv_buffer_time_stamp;
        uint32_t vport_flag;
-#define STATIC_VPORT   1
-#define FAWWPN_SET     2
-#define FAWWPN_PARAM_CHG       4
+#define STATIC_VPORT           0x1
+#define FAWWPN_PARAM_CHG       0x2
 
        uint16_t fdmi_num_disc;
        uint32_t fdmi_hba_mask;
@@ -1025,6 +1027,7 @@ struct lpfc_hba {
 #define LS_MDS_LINK_DOWN      0x8      /* MDS Diagnostics Link Down */
 #define LS_MDS_LOOPBACK       0x10     /* MDS Diagnostics Link Up (Loopback) */
 #define LS_CT_VEN_RPA         0x20     /* Vendor RPA sent to switch */
+#define LS_EXTERNAL_LOOPBACK  0x40     /* External loopback plug inserted */
 
        uint32_t hba_flag;      /* hba generic flags */
 #define HBA_ERATT_HANDLED      0x1 /* This flag is set when eratt handled */
@@ -1057,6 +1060,7 @@ struct lpfc_hba {
 #define HBA_HBEAT_INP          0x4000000 /* mbox HBEAT is in progress */
 #define HBA_HBEAT_TMO          0x8000000 /* HBEAT initiated after timeout */
 #define HBA_FLOGI_OUTSTANDING  0x10000000 /* FLOGI is outstanding */
+#define HBA_RHBA_CMPL          0x20000000 /* RHBA FDMI command is successful */
 
        struct completion *fw_dump_cmpl; /* cmpl event tracker for fw_dump */
        uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
index 9b982cc..3caaa7c 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -1120,12 +1120,22 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
                                len += scnprintf(buf + len, PAGE_SIZE-len,
                                                "   Private Loop\n");
                } else {
-                       if (vport->fc_flag & FC_FABRIC)
-                               len += scnprintf(buf + len, PAGE_SIZE-len,
-                                               "   Fabric\n");
-                       else
+                       if (vport->fc_flag & FC_FABRIC) {
+                               if (phba->sli_rev == LPFC_SLI_REV4 &&
+                                   vport->port_type == LPFC_PHYSICAL_PORT &&
+                                   phba->sli4_hba.fawwpn_flag &
+                                       LPFC_FAWWPN_FABRIC)
+                                       len += scnprintf(buf + len,
+                                                        PAGE_SIZE - len,
+                                                        "   Fabric FA-PWWN\n");
+                               else
+                                       len += scnprintf(buf + len,
+                                                        PAGE_SIZE - len,
+                                                        "   Fabric\n");
+                       } else {
                                len += scnprintf(buf + len, PAGE_SIZE-len,
                                                "   Point-2-Point\n");
+                       }
                }
        }
 
@@ -6878,17 +6888,34 @@ lpfc_get_stats(struct Scsi_Host *shost)
        memset(hs, 0, sizeof (struct fc_host_statistics));
 
        hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt;
+       hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt;
+
        /*
-        * The MBX_READ_STATUS returns tx_k_bytes which has to
-        * converted to words
+        * The MBX_READ_STATUS returns tx_k_bytes which has to be
+        * converted to words.
+        *
+        * Check if extended byte flag is set, to know when to collect upper
+        * bits of 64 bit wide statistics counter.
         */
-       hs->tx_words = (uint64_t)
-                       ((uint64_t)pmb->un.varRdStatus.xmitByteCnt
-                       * (uint64_t)256);
-       hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt;
-       hs->rx_words = (uint64_t)
-                       ((uint64_t)pmb->un.varRdStatus.rcvByteCnt
-                        * (uint64_t)256);
+       if (pmb->un.varRdStatus.xkb & RD_ST_XKB) {
+               hs->tx_words = (u64)
+                              ((((u64)(pmb->un.varRdStatus.xmit_xkb &
+                                       RD_ST_XMIT_XKB_MASK) << 32) |
+                               (u64)pmb->un.varRdStatus.xmitByteCnt) *
+                               (u64)256);
+               hs->rx_words = (u64)
+                              ((((u64)(pmb->un.varRdStatus.rcv_xkb &
+                                       RD_ST_RCV_XKB_MASK) << 32) |
+                               (u64)pmb->un.varRdStatus.rcvByteCnt) *
+                               (u64)256);
+       } else {
+               hs->tx_words = (uint64_t)
+                               ((uint64_t)pmb->un.varRdStatus.xmitByteCnt
+                               * (uint64_t)256);
+               hs->rx_words = (uint64_t)
+                               ((uint64_t)pmb->un.varRdStatus.rcvByteCnt
+                                * (uint64_t)256);
+       }
 
        memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
        pmb->mbxCommand = MBX_READ_LNK_STAT;
index 8b586fa..676e7d5 100644 (file)
@@ -310,7 +310,7 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
        int rc = 0;
        u32 ulp_status, ulp_word4, total_data_placed;
 
-       dd_data = cmdiocbq->context1;
+       dd_data = cmdiocbq->context_un.dd_data;
 
        /* Determine if job has been aborted */
        spin_lock_irqsave(&phba->ct_ev_lock, flags);
@@ -328,10 +328,10 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
        spin_unlock_irqrestore(&phba->hbalock, flags);
 
        iocb = &dd_data->context_un.iocb;
-       ndlp = iocb->cmdiocbq->context_un.ndlp;
+       ndlp = iocb->cmdiocbq->ndlp;
        rmp = iocb->rmp;
-       cmp = cmdiocbq->context2;
-       bmp = cmdiocbq->context3;
+       cmp = cmdiocbq->cmd_dmabuf;
+       bmp = cmdiocbq->bpl_dmabuf;
        ulp_status = get_job_ulpstatus(phba, rspiocbq);
        ulp_word4 = get_job_word4(phba, rspiocbq);
        total_data_placed = get_job_data_placed(phba, rspiocbq);
@@ -470,14 +470,12 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
 
        cmdiocbq->num_bdes = num_entry;
        cmdiocbq->vport = phba->pport;
-       cmdiocbq->context2 = cmp;
-       cmdiocbq->context3 = bmp;
+       cmdiocbq->cmd_dmabuf = cmp;
+       cmdiocbq->bpl_dmabuf = bmp;
        cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
 
        cmdiocbq->cmd_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
-       cmdiocbq->context1 = dd_data;
-       cmdiocbq->context2 = cmp;
-       cmdiocbq->context3 = bmp;
+       cmdiocbq->context_un.dd_data = dd_data;
 
        dd_data->type = TYPE_IOCB;
        dd_data->set_job = job;
@@ -495,8 +493,8 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
                readl(phba->HCregaddr); /* flush */
        }
 
-       cmdiocbq->context_un.ndlp = lpfc_nlp_get(ndlp);
-       if (!cmdiocbq->context_un.ndlp) {
+       cmdiocbq->ndlp = lpfc_nlp_get(ndlp);
+       if (!cmdiocbq->ndlp) {
                rc = -ENODEV;
                goto free_rmp;
        }
@@ -573,9 +571,9 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
        int rc = 0;
        u32 ulp_status, ulp_word4, total_data_placed;
 
-       dd_data = cmdiocbq->context1;
+       dd_data = cmdiocbq->context_un.dd_data;
        ndlp = dd_data->context_un.iocb.ndlp;
-       cmdiocbq->context1 = ndlp;
+       cmdiocbq->ndlp = ndlp;
 
        /* Determine if job has been aborted */
        spin_lock_irqsave(&phba->ct_ev_lock, flags);
@@ -595,7 +593,7 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
        ulp_status = get_job_ulpstatus(phba, rspiocbq);
        ulp_word4 = get_job_word4(phba, rspiocbq);
        total_data_placed = get_job_data_placed(phba, rspiocbq);
-       pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2;
+       pcmd = cmdiocbq->cmd_dmabuf;
        prsp = (struct lpfc_dmabuf *)pcmd->list.next;
 
        /* Copy the completed job data or determine the job status if job is
@@ -711,8 +709,8 @@ lpfc_bsg_rport_els(struct bsg_job *job)
        /* Transfer the request payload to allocated command dma buffer */
        sg_copy_to_buffer(job->request_payload.sg_list,
                          job->request_payload.sg_cnt,
-                         ((struct lpfc_dmabuf *)cmdiocbq->context2)->virt,
-                         job->request_payload.payload_len);
+                         cmdiocbq->cmd_dmabuf->virt,
+                         cmdsize);
 
        rpi = ndlp->nlp_rpi;
 
@@ -722,8 +720,8 @@ lpfc_bsg_rport_els(struct bsg_job *job)
        else
                cmdiocbq->iocb.ulpContext = rpi;
        cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
-       cmdiocbq->context1 = dd_data;
-       cmdiocbq->context_un.ndlp = ndlp;
+       cmdiocbq->context_un.dd_data = dd_data;
+       cmdiocbq->ndlp = ndlp;
        cmdiocbq->cmd_cmpl = lpfc_bsg_rport_els_cmp;
        dd_data->type = TYPE_IOCB;
        dd_data->set_job = job;
@@ -742,12 +740,6 @@ lpfc_bsg_rport_els(struct bsg_job *job)
                readl(phba->HCregaddr); /* flush */
        }
 
-       cmdiocbq->context1 = lpfc_nlp_get(ndlp);
-       if (!cmdiocbq->context1) {
-               rc = -EIO;
-               goto linkdown_err;
-       }
-
        rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
        if (rc == IOCB_SUCCESS) {
                spin_lock_irqsave(&phba->hbalock, flags);
@@ -917,8 +909,8 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
        struct ulp_bde64 *bde;
        dma_addr_t dma_addr;
        int i;
-       struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
-       struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
+       struct lpfc_dmabuf *bdeBuf1 = piocbq->cmd_dmabuf;
+       struct lpfc_dmabuf *bdeBuf2 = piocbq->bpl_dmabuf;
        struct lpfc_sli_ct_request *ct_req;
        struct bsg_job *job = NULL;
        struct fc_bsg_reply *bsg_reply;
@@ -985,9 +977,8 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                list_for_each_entry(iocbq, &head, list) {
                        size = 0;
                        if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
-                               bdeBuf1 = iocbq->context2;
-                               bdeBuf2 = iocbq->context3;
-
+                               bdeBuf1 = iocbq->cmd_dmabuf;
+                               bdeBuf2 = iocbq->bpl_dmabuf;
                        }
                        if (phba->sli_rev == LPFC_SLI_REV4)
                                bde_count = iocbq->wcqe_cmpl.word3;
@@ -1384,7 +1375,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
        int rc = 0;
        u32 ulp_status, ulp_word4;
 
-       dd_data = cmdiocbq->context1;
+       dd_data = cmdiocbq->context_un.dd_data;
 
        /* Determine if job has been aborted */
        spin_lock_irqsave(&phba->ct_ev_lock, flags);
@@ -1401,8 +1392,8 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
        spin_unlock_irqrestore(&phba->hbalock, flags);
 
        ndlp = dd_data->context_un.iocb.ndlp;
-       cmp = cmdiocbq->context2;
-       bmp = cmdiocbq->context3;
+       cmp = cmdiocbq->cmd_dmabuf;
+       bmp = cmdiocbq->bpl_dmabuf;
 
        ulp_status = get_job_ulpstatus(phba, rspiocbq);
        ulp_word4 = get_job_word4(phba, rspiocbq);
@@ -1529,10 +1520,10 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
 
        ctiocb->cmd_flag |= LPFC_IO_LIBDFC;
        ctiocb->vport = phba->pport;
-       ctiocb->context1 = dd_data;
-       ctiocb->context2 = cmp;
-       ctiocb->context3 = bmp;
-       ctiocb->context_un.ndlp = ndlp;
+       ctiocb->context_un.dd_data = dd_data;
+       ctiocb->cmd_dmabuf = cmp;
+       ctiocb->bpl_dmabuf = bmp;
+       ctiocb->ndlp = ndlp;
        ctiocb->cmd_cmpl = lpfc_issue_ct_rsp_cmp;
 
        dd_data->type = TYPE_IOCB;
@@ -2671,7 +2662,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
        ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
        ctreq->CommandResponse.bits.Size = 0;
 
-       cmdiocbq->context3 = dmabuf;
+       cmdiocbq->bpl_dmabuf = dmabuf;
        cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
        cmdiocbq->vport = phba->pport;
        cmdiocbq->cmd_cmpl = NULL;
@@ -3231,7 +3222,7 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job)
        cmdiocbq->cmd_flag |= LPFC_IO_LOOPBACK;
        cmdiocbq->vport = phba->pport;
        cmdiocbq->cmd_cmpl = NULL;
-       cmdiocbq->context3 = txbmp;
+       cmdiocbq->bpl_dmabuf = txbmp;
 
        if (phba->sli_rev < LPFC_SLI_REV4) {
                lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, txbmp, 0, txxri,
@@ -3384,7 +3375,7 @@ job_error:
  * This is completion handler function for mailbox commands issued from
  * lpfc_bsg_issue_mbox function. This function is called by the
  * mailbox event handler function with no lock held. This function
- * will wake up thread waiting on the wait queue pointed by context1
+ * will wake up thread waiting on the wait queue pointed by dd_data
  * of the mailbox.
  **/
 static void
@@ -5034,9 +5025,9 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
        unsigned int rsp_size;
        int rc = 0;
 
-       dd_data = cmdiocbq->context1;
-       cmp = cmdiocbq->context2;
-       bmp = cmdiocbq->context3;
+       dd_data = cmdiocbq->context_un.dd_data;
+       cmp = cmdiocbq->cmd_dmabuf;
+       bmp = cmdiocbq->bpl_dmabuf;
        menlo = &dd_data->context_un.menlo;
        rmp = menlo->rmp;
        rsp = &rspiocbq->iocb;
@@ -5233,9 +5224,9 @@ lpfc_menlo_cmd(struct bsg_job *job)
        /* We want the firmware to timeout before we do */
        cmd->ulpTimeout = MENLO_TIMEOUT - 5;
        cmdiocbq->cmd_cmpl = lpfc_bsg_menlo_cmd_cmp;
-       cmdiocbq->context1 = dd_data;
-       cmdiocbq->context2 = cmp;
-       cmdiocbq->context3 = bmp;
+       cmdiocbq->context_un.dd_data = dd_data;
+       cmdiocbq->cmd_dmabuf = cmp;
+       cmdiocbq->bpl_dmabuf = bmp;
        if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
                cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
                cmd->ulpPU = MENLO_PU; /* 3 */
index 9897a1a..b0775be 100644 (file)
@@ -32,7 +32,9 @@ int lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
 int lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *, struct lpfcMboxq *);
 void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
-
+int lpfc_mbox_rsrc_prep(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox);
+void lpfc_mbox_rsrc_cleanup(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
+                           enum lpfc_mbox_ctx locked);
 void lpfc_heart_beat(struct lpfc_hba *, LPFC_MBOXQ_t *);
 int lpfc_read_topology(struct lpfc_hba *, LPFC_MBOXQ_t *, struct lpfc_dmabuf *);
 void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -432,6 +434,7 @@ void lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virtp, dma_addr_t dma);
 
 void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *);
 void lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp);
+void lpfc_setup_fdmi_mask(struct lpfc_vport *vport);
 int lpfc_link_reset(struct lpfc_vport *vport);
 
 /* Function prototypes. */
index 4b024aa..9d36b20 100644 (file)
@@ -118,22 +118,22 @@ lpfc_ct_unsol_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
        struct lpfc_nodelist *ndlp;
        struct lpfc_dmabuf *mp, *bmp;
 
-       ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
+       ndlp = cmdiocb->ndlp;
        if (ndlp)
                lpfc_nlp_put(ndlp);
 
-       mp = cmdiocb->context2;
-       bmp = cmdiocb->context3;
+       mp = cmdiocb->rsp_dmabuf;
+       bmp = cmdiocb->bpl_dmabuf;
        if (mp) {
                lpfc_mbuf_free(phba, mp->virt, mp->phys);
                kfree(mp);
-               cmdiocb->context2 = NULL;
+               cmdiocb->rsp_dmabuf = NULL;
        }
 
        if (bmp) {
                lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
                kfree(bmp);
-               cmdiocb->context3 = NULL;
+               cmdiocb->bpl_dmabuf = NULL;
        }
 
        lpfc_sli_release_iocbq(phba, cmdiocb);
@@ -232,18 +232,17 @@ lpfc_ct_reject_event(struct lpfc_nodelist *ndlp,
        }
 
        /* Save for completion so we can release these resources */
-       cmdiocbq->context2 = (uint8_t *)mp;
-       cmdiocbq->context3 = (uint8_t *)bmp;
+       cmdiocbq->rsp_dmabuf = mp;
+       cmdiocbq->bpl_dmabuf = bmp;
        cmdiocbq->cmd_cmpl = lpfc_ct_unsol_cmpl;
        tmo = (3 * phba->fc_ratov);
 
        cmdiocbq->retry = 0;
        cmdiocbq->vport = vport;
-       cmdiocbq->context_un.ndlp = NULL;
        cmdiocbq->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
 
-       cmdiocbq->context1 = lpfc_nlp_get(ndlp);
-       if (!cmdiocbq->context1)
+       cmdiocbq->ndlp = lpfc_nlp_get(ndlp);
+       if (!cmdiocbq->ndlp)
                goto ct_no_ndlp;
 
        rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
@@ -310,8 +309,8 @@ lpfc_ct_handle_mibreq(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocbq)
                return;
        }
 
-       ct_req = ((struct lpfc_sli_ct_request *)
-                (((struct lpfc_dmabuf *)ctiocbq->context2)->virt));
+       ct_req = (struct lpfc_sli_ct_request *)ctiocbq->cmd_dmabuf->virt;
+
        mi_cmd = ct_req->CommandResponse.bits.CmdRsp;
        lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
                         "6442 : MI Cmd : x%x Not Supported\n", mi_cmd);
@@ -347,14 +346,14 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
        uint32_t size;
        struct list_head head;
        struct lpfc_sli_ct_request *ct_req;
-       struct lpfc_dmabuf *bdeBuf1 = ctiocbq->context2;
-       struct lpfc_dmabuf *bdeBuf2 = ctiocbq->context3;
+       struct lpfc_dmabuf *bdeBuf1 = ctiocbq->cmd_dmabuf;
+       struct lpfc_dmabuf *bdeBuf2 = ctiocbq->bpl_dmabuf;
        u32 status, parameter, bde_count = 0;
        struct lpfc_wcqe_complete *wcqe_cmpl = NULL;
 
-       ctiocbq->context1 = NULL;
-       ctiocbq->context2 = NULL;
-       ctiocbq->context3 = NULL;
+       ctiocbq->cmd_dmabuf = NULL;
+       ctiocbq->rsp_dmabuf = NULL;
+       ctiocbq->bpl_dmabuf = NULL;
 
        wcqe_cmpl = &ctiocbq->wcqe_cmpl;
        status = get_job_ulpstatus(phba, ctiocbq);
@@ -382,12 +381,11 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
        if (bde_count == 0)
                return;
 
-       ctiocbq->context2 = bdeBuf1;
+       ctiocbq->cmd_dmabuf = bdeBuf1;
        if (bde_count == 2)
-               ctiocbq->context3 = bdeBuf2;
+               ctiocbq->bpl_dmabuf = bdeBuf2;
 
-       ct_req = ((struct lpfc_sli_ct_request *)
-                (((struct lpfc_dmabuf *)ctiocbq->context2)->virt));
+       ct_req = (struct lpfc_sli_ct_request *)ctiocbq->cmd_dmabuf->virt;
 
        if (ct_req->FsType == SLI_CT_MANAGEMENT_SERVICE &&
            ct_req->FsSubType == SLI_CT_MIB_Subtypes) {
@@ -408,8 +406,8 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 
                        if (!bde_count)
                                continue;
-                       bdeBuf1 = iocb->context2;
-                       iocb->context2 = NULL;
+                       bdeBuf1 = iocb->cmd_dmabuf;
+                       iocb->cmd_dmabuf = NULL;
                        if (phba->sli_rev == LPFC_SLI_REV4)
                                size = iocb->wqe.gen_req.bde.tus.f.bdeSize;
                        else
@@ -417,8 +415,8 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                        lpfc_ct_unsol_buffer(phba, ctiocbq, bdeBuf1, size);
                        lpfc_in_buf_free(phba, bdeBuf1);
                        if (bde_count == 2) {
-                               bdeBuf2 = iocb->context3;
-                               iocb->context3 = NULL;
+                               bdeBuf2 = iocb->bpl_dmabuf;
+                               iocb->bpl_dmabuf = NULL;
                                if (phba->sli_rev == LPFC_SLI_REV4)
                                        size = iocb->unsol_rcv_len;
                                else
@@ -549,24 +547,25 @@ lpfc_ct_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocb)
 {
        struct lpfc_dmabuf *buf_ptr;
 
-       /* I/O job is complete so context is now invalid*/
-       ctiocb->context_un.ndlp = NULL;
-       if (ctiocb->context1) {
-               buf_ptr = (struct lpfc_dmabuf *) ctiocb->context1;
+       /* IOCBQ job structure gets cleaned during release.  Just release
+        * the dma buffers here.
+        */
+       if (ctiocb->cmd_dmabuf) {
+               buf_ptr = ctiocb->cmd_dmabuf;
                lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
                kfree(buf_ptr);
-               ctiocb->context1 = NULL;
+               ctiocb->cmd_dmabuf = NULL;
        }
-       if (ctiocb->context2) {
-               lpfc_free_ct_rsp(phba, (struct lpfc_dmabuf *) ctiocb->context2);
-               ctiocb->context2 = NULL;
+       if (ctiocb->rsp_dmabuf) {
+               lpfc_free_ct_rsp(phba, ctiocb->rsp_dmabuf);
+               ctiocb->rsp_dmabuf = NULL;
        }
 
-       if (ctiocb->context3) {
-               buf_ptr = (struct lpfc_dmabuf *) ctiocb->context3;
+       if (ctiocb->bpl_dmabuf) {
+               buf_ptr = ctiocb->bpl_dmabuf;
                lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
                kfree(buf_ptr);
-               ctiocb->context3 = NULL;
+               ctiocb->bpl_dmabuf = NULL;
        }
        lpfc_sli_release_iocbq(phba, ctiocb);
        return 0;
@@ -605,11 +604,11 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
        /* Update the num_entry bde count */
        geniocb->num_bdes = num_entry;
 
-       geniocb->context3 = (uint8_t *) bmp;
+       geniocb->bpl_dmabuf = bmp;
 
        /* Save for completion so we can release these resources */
-       geniocb->context1 = (uint8_t *) inp;
-       geniocb->context2 = (uint8_t *) outp;
+       geniocb->cmd_dmabuf = inp;
+       geniocb->rsp_dmabuf = outp;
 
        geniocb->event_tag = event_tag;
 
@@ -635,8 +634,8 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
        geniocb->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
        geniocb->vport = vport;
        geniocb->retry = retry;
-       geniocb->context_un.ndlp = lpfc_nlp_get(ndlp);
-       if (!geniocb->context_un.ndlp)
+       geniocb->ndlp = lpfc_nlp_get(ndlp);
+       if (!geniocb->ndlp)
                goto out;
 
        rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0);
@@ -926,13 +925,12 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
        int rc, type;
 
        /* First save ndlp, before we overwrite it */
-       ndlp = cmdiocb->context_un.ndlp;
+       ndlp = cmdiocb->ndlp;
 
        /* we pass cmdiocb to state machine which needs rspiocb as well */
-       cmdiocb->context_un.rsp_iocb = rspiocb;
-
-       inp = (struct lpfc_dmabuf *) cmdiocb->context1;
-       outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+       cmdiocb->rsp_iocb = rspiocb;
+       inp = cmdiocb->cmd_dmabuf;
+       outp = cmdiocb->rsp_dmabuf;
 
        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
                 "GID_FT cmpl:     status:x%x/x%x rtry:%d",
@@ -962,9 +960,15 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
        }
        if (lpfc_error_lost_link(ulp_status, ulp_word4)) {
                lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
-                                "0226 NS query failed due to link event\n");
+                                "0226 NS query failed due to link event: "
+                                "ulp_status x%x ulp_word4 x%x fc_flag x%x "
+                                "port_state x%x gidft_inp x%x\n",
+                                ulp_status, ulp_word4, vport->fc_flag,
+                                vport->port_state, vport->gidft_inp);
                if (vport->fc_flag & FC_RSCN_MODE)
                        lpfc_els_flush_rscn(vport);
+               if (vport->gidft_inp)
+                       vport->gidft_inp--;
                goto out;
        }
 
@@ -1143,12 +1147,12 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
        int rc;
 
        /* First save ndlp, before we overwrite it */
-       ndlp = cmdiocb->context_un.ndlp;
+       ndlp = cmdiocb->ndlp;
 
        /* we pass cmdiocb to state machine which needs rspiocb as well */
-       cmdiocb->context_un.rsp_iocb = rspiocb;
-       inp = (struct lpfc_dmabuf *)cmdiocb->context1;
-       outp = (struct lpfc_dmabuf *)cmdiocb->context2;
+       cmdiocb->rsp_iocb = rspiocb;
+       inp = cmdiocb->cmd_dmabuf;
+       outp = cmdiocb->rsp_dmabuf;
 
        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
                              "GID_PT cmpl:     status:x%x/x%x rtry:%d",
@@ -1179,9 +1183,15 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
        }
        if (lpfc_error_lost_link(ulp_status, ulp_word4)) {
                lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
-                                "4166 NS query failed due to link event\n");
+                                "4166 NS query failed due to link event: "
+                                "ulp_status x%x ulp_word4 x%x fc_flag x%x "
+                                "port_state x%x gidft_inp x%x\n",
+                                ulp_status, ulp_word4, vport->fc_flag,
+                                vport->port_state, vport->gidft_inp);
                if (vport->fc_flag & FC_RSCN_MODE)
                        lpfc_els_flush_rscn(vport);
+               if (vport->gidft_inp)
+                       vport->gidft_inp--;
                goto out;
        }
 
@@ -1346,8 +1356,8 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 {
        struct lpfc_vport *vport = cmdiocb->vport;
        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-       struct lpfc_dmabuf *inp = (struct lpfc_dmabuf *) cmdiocb->context1;
-       struct lpfc_dmabuf *outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+       struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf;
+       struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf;
        struct lpfc_sli_ct_request *CTrsp;
        int did, rc, retry;
        uint8_t fbits;
@@ -1426,7 +1436,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                                         cmdiocb->retry, did);
                                if (rc == 0) {
                                        /* success */
-                                       free_ndlp = cmdiocb->context_un.ndlp;
+                                       free_ndlp = cmdiocb->ndlp;
                                        lpfc_ct_free_iocb(phba, cmdiocb);
                                        lpfc_nlp_put(free_ndlp);
                                        return;
@@ -1483,7 +1493,7 @@ out:
        }
 
 iocb_free:
-       free_ndlp = cmdiocb->context_un.ndlp;
+       free_ndlp = cmdiocb->ndlp;
        lpfc_ct_free_iocb(phba, cmdiocb);
        lpfc_nlp_put(free_ndlp);
        return;
@@ -1494,8 +1504,8 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                        struct lpfc_iocbq *rspiocb)
 {
        struct lpfc_vport *vport = cmdiocb->vport;
-       struct lpfc_dmabuf *inp = (struct lpfc_dmabuf *)cmdiocb->context1;
-       struct lpfc_dmabuf *outp = (struct lpfc_dmabuf *)cmdiocb->context2;
+       struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf;
+       struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf;
        struct lpfc_sli_ct_request *CTrsp;
        int did;
        struct lpfc_nodelist *ndlp = NULL;
@@ -1519,7 +1529,7 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
        }
 
        /* Preserve the nameserver node to release the reference. */
-       ns_ndlp = cmdiocb->context_un.ndlp;
+       ns_ndlp = cmdiocb->ndlp;
 
        if (ulp_status == IOSTAT_SUCCESS) {
                /* Good status, continue checking */
@@ -1605,13 +1615,13 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
        u32 ulp_word4 = get_job_word4(phba, rspiocb);
 
        /* First save ndlp, before we overwrite it */
-       ndlp = cmdiocb->context_un.ndlp;
+       ndlp = cmdiocb->ndlp;
 
        /* we pass cmdiocb to state machine which needs rspiocb as well */
-       cmdiocb->context_un.rsp_iocb = rspiocb;
+       cmdiocb->rsp_iocb = rspiocb;
 
-       inp = (struct lpfc_dmabuf *) cmdiocb->context1;
-       outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+       inp = cmdiocb->cmd_dmabuf;
+       outp = cmdiocb->rsp_dmabuf;
 
        cmdcode = be16_to_cpu(((struct lpfc_sli_ct_request *) inp->virt)->
                                        CommandResponse.bits.CmdRsp);
@@ -1672,8 +1682,8 @@ lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                struct lpfc_dmabuf *outp;
                struct lpfc_sli_ct_request *CTrsp;
 
-               outp = (struct lpfc_dmabuf *) cmdiocb->context2;
-               CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+               outp = cmdiocb->rsp_dmabuf;
+               CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
                if (CTrsp->CommandResponse.bits.CmdRsp ==
                    be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
                        vport->ct_flags |= FC_CT_RFT_ID;
@@ -1693,7 +1703,7 @@ lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                struct lpfc_dmabuf *outp;
                struct lpfc_sli_ct_request *CTrsp;
 
-               outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+               outp = cmdiocb->rsp_dmabuf;
                CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
                if (CTrsp->CommandResponse.bits.CmdRsp ==
                    be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
@@ -1714,8 +1724,8 @@ lpfc_cmpl_ct_cmd_rspn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                struct lpfc_dmabuf *outp;
                struct lpfc_sli_ct_request *CTrsp;
 
-               outp = (struct lpfc_dmabuf *) cmdiocb->context2;
-               CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+               outp = cmdiocb->rsp_dmabuf;
+               CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
                if (CTrsp->CommandResponse.bits.CmdRsp ==
                    be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
                        vport->ct_flags |= FC_CT_RSPN_ID;
@@ -1735,7 +1745,7 @@ lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                struct lpfc_dmabuf *outp;
                struct lpfc_sli_ct_request *CTrsp;
 
-               outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+               outp = cmdiocb->rsp_dmabuf;
                CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
                if (CTrsp->CommandResponse.bits.CmdRsp ==
                    be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
@@ -1768,8 +1778,8 @@ lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                struct lpfc_dmabuf *outp;
                struct lpfc_sli_ct_request *CTrsp;
 
-               outp = (struct lpfc_dmabuf *) cmdiocb->context2;
-               CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+               outp = cmdiocb->rsp_dmabuf;
+               CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
                if (CTrsp->CommandResponse.bits.CmdRsp ==
                    be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
                        vport->ct_flags |= FC_CT_RFF_ID;
@@ -1865,7 +1875,7 @@ lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb)
        struct lpfc_dmabuf *mp;
        uint32_t type;
 
-       mp = cmdiocb->context1;
+       mp = cmdiocb->cmd_dmabuf;
        if (mp == NULL)
                return 0;
        CtReq = (struct lpfc_sli_ct_request *)mp->virt;
@@ -2018,28 +2028,30 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
                vport->ct_flags &= ~FC_CT_RFT_ID;
                CtReq->CommandResponse.bits.CmdRsp =
                    cpu_to_be16(SLI_CTNS_RFT_ID);
-               CtReq->un.rft.PortId = cpu_to_be32(vport->fc_myDID);
+               CtReq->un.rft.port_id = cpu_to_be32(vport->fc_myDID);
+
+               /* Register Application Services type if vmid enabled. */
+               if (phba->cfg_vmid_app_header)
+                       CtReq->un.rft.app_serv_reg =
+                               cpu_to_be32(RFT_APP_SERV_REG);
 
                /* Register FC4 FCP type if enabled.  */
                if (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH ||
                    vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)
-                       CtReq->un.rft.fcpReg = 1;
+                       CtReq->un.rft.fcp_reg = cpu_to_be32(RFT_FCP_REG);
 
-               /* Register NVME type if enabled.  Defined LE and swapped.
-                * rsvd[0] is used as word1 because of the hard-coded
-                * word0 usage in the ct_request data structure.
-                */
+               /* Register NVME type if enabled. */
                if (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH ||
                    vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
-                       CtReq->un.rft.rsvd[0] =
-                               cpu_to_be32(LPFC_FC4_TYPE_BITMASK);
+                       CtReq->un.rft.nvme_reg = cpu_to_be32(RFT_NVME_REG);
 
                ptr = (uint32_t *)CtReq;
                lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
-                                "6433 Issue RFT (%s %s): %08x %08x %08x %08x "
-                                "%08x %08x %08x %08x\n",
-                                CtReq->un.rft.fcpReg ? "FCP" : " ",
-                                CtReq->un.rft.rsvd[0] ? "NVME" : " ",
+                                "6433 Issue RFT (%s %s %s): %08x %08x %08x "
+                                "%08x %08x %08x %08x %08x\n",
+                                CtReq->un.rft.fcp_reg ? "FCP" : " ",
+                                CtReq->un.rft.nvme_reg ? "NVME" : " ",
+                                CtReq->un.rft.app_serv_reg ? "APPS" : " ",
                                 *ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3),
                                 *(ptr + 4), *(ptr + 5),
                                 *(ptr + 6), *(ptr + 7));
@@ -2156,6 +2168,41 @@ ns_cmd_exit:
 }
 
 /**
+ * lpfc_fdmi_rprt_defer - Check for any deferred FDMI RPRT commands
+ * @phba: Pointer to HBA context object.
+ * @mask: Initial port attributes mask
+ *
+ * This function checks to see if any vports have deferred their FDMI RPRT.
+ * A vports RPRT may be deferred if it is issued before the primary ports
+ * RHBA completes.
+ */
+static void
+lpfc_fdmi_rprt_defer(struct lpfc_hba *phba, uint32_t mask)
+{
+       struct lpfc_vport **vports;
+       struct lpfc_vport *vport;
+       struct lpfc_nodelist *ndlp;
+       int i;
+
+       phba->hba_flag |= HBA_RHBA_CMPL;
+       vports = lpfc_create_vport_work_array(phba);
+       if (vports) {
+               for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+                       vport = vports[i];
+                       ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
+                       if (!ndlp)
+                               continue;
+                       if (vport->ct_flags & FC_CT_RPRT_DEFER) {
+                               vport->ct_flags &= ~FC_CT_RPRT_DEFER;
+                               vport->fdmi_port_mask = mask;
+                               lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 0);
+                       }
+               }
+       }
+       lpfc_destroy_vport_work_array(phba, vports);
+}
+
+/**
  * lpfc_cmpl_ct_disc_fdmi - Handle a discovery FDMI completion
  * @phba: Pointer to HBA context object.
  * @cmdiocb: Pointer to the command IOCBQ.
@@ -2169,8 +2216,8 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                       struct lpfc_iocbq *rspiocb)
 {
        struct lpfc_vport *vport = cmdiocb->vport;
-       struct lpfc_dmabuf *inp = cmdiocb->context1;
-       struct lpfc_dmabuf *outp = cmdiocb->context2;
+       struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf;
+       struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf;
        struct lpfc_sli_ct_request *CTcmd = inp->virt;
        struct lpfc_sli_ct_request *CTrsp = outp->virt;
        uint16_t fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp;
@@ -2224,7 +2271,7 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                                 ulp_word4);
        }
 
-       free_ndlp = cmdiocb->context_un.ndlp;
+       free_ndlp = cmdiocb->ndlp;
        lpfc_ct_free_iocb(phba, cmdiocb);
        lpfc_nlp_put(free_ndlp);
 
@@ -2236,15 +2283,19 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
        cmd =  be16_to_cpu(fdmi_cmd);
        if (fdmi_rsp == cpu_to_be16(SLI_CT_RESPONSE_FS_RJT)) {
                /* FDMI rsp failed */
-               lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+               lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_ELS,
                                 "0220 FDMI cmd failed FS_RJT Data: x%x", cmd);
 
                /* Should we fallback to FDMI-2 / FDMI-1 ? */
                switch (cmd) {
                case SLI_MGMT_RHBA:
                        if (vport->fdmi_hba_mask == LPFC_FDMI2_HBA_ATTR) {
-                               /* Fallback to FDMI-1 */
+                               /* Fallback to FDMI-1 for HBA attributes */
                                vport->fdmi_hba_mask = LPFC_FDMI1_HBA_ATTR;
+
+                               /* If HBA attributes are FDMI1, so should
+                                * port attributes be for consistency.
+                                */
                                vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR;
                                /* Start over */
                                lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
@@ -2252,6 +2303,11 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                        return;
 
                case SLI_MGMT_RPRT:
+                       if (vport->port_type != LPFC_PHYSICAL_PORT) {
+                               ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
+                               if (!ndlp)
+                                       return;
+                       }
                        if (vport->fdmi_port_mask == LPFC_FDMI2_PORT_ATTR) {
                                /* Fallback to FDMI-1 */
                                vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR;
@@ -2272,9 +2328,9 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                                phba->link_flag &= ~LS_CT_VEN_RPA;
                                if (phba->cmf_active_mode == LPFC_CFG_OFF)
                                        return;
-                               lpfc_printf_log(phba, KERN_ERR,
+                               lpfc_printf_log(phba, KERN_WARNING,
                                                LOG_DISCOVERY | LOG_ELS,
-                                               "6460 VEN FDMI RPA failure\n");
+                                               "6460 VEN FDMI RPA RJT\n");
                                return;
                        }
                        if (vport->fdmi_port_mask == LPFC_FDMI2_PORT_ATTR) {
@@ -2301,6 +2357,9 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
         */
        switch (cmd) {
        case SLI_MGMT_RHBA:
+               /* Check for any RPRTs deferred till after RHBA completes */
+               lpfc_fdmi_rprt_defer(phba, vport->fdmi_port_mask);
+
                lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA, 0);
                break;
 
@@ -2309,10 +2368,26 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                break;
 
        case SLI_MGMT_DPRT:
-               if (vport->port_type == LPFC_PHYSICAL_PORT)
+               if (vport->port_type == LPFC_PHYSICAL_PORT) {
                        lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RHBA, 0);
-               else
-                       lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 0);
+               } else {
+                       ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
+                       if (!ndlp)
+                               return;
+
+                       /* Only issue a RPRT for the vport if the RHBA
+                        * for the physical port completes successfully.
+                        * We may have to defer the RPRT accordingly.
+                        */
+                       if (phba->hba_flag & HBA_RHBA_CMPL) {
+                               lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 0);
+                       } else {
+                               lpfc_printf_vlog(vport, KERN_INFO,
+                                                LOG_DISCOVERY,
+                                                "6078 RPRT deferred\n");
+                               vport->ct_flags |= FC_CT_RPRT_DEFER;
+                       }
+               }
                break;
        case SLI_MGMT_RPA:
                if (vport->port_type == LPFC_PHYSICAL_PORT &&
@@ -2327,7 +2402,8 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                                break;
                        }
 
-                       lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+                       lpfc_printf_log(phba, KERN_INFO,
+                                       LOG_DISCOVERY | LOG_CGN_MGMT,
                                        "6210 Issue Vendor MI FDMI %x\n",
                                        phba->sli4_hba.pc_sli4_params.mi_ver);
 
@@ -2396,6 +2472,9 @@ lpfc_fdmi_change_check(struct lpfc_vport *vport)
                        phba->link_flag &= ~LS_CT_VEN_RPA;
                        lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
                } else {
+                       ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
+                       if (!ndlp)
+                               return;
                        lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
                }
 
@@ -2417,6 +2496,9 @@ lpfc_fdmi_change_check(struct lpfc_vport *vport)
                lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA,
                              LPFC_FDMI_PORT_ATTR_num_disc);
        } else {
+               ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
+               if (!ndlp)
+                       return;
                lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT,
                              LPFC_FDMI_PORT_ATTR_num_disc);
        }
@@ -2830,31 +2912,59 @@ lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport,
        struct lpfc_hba   *phba = vport->phba;
        struct lpfc_fdmi_attr_entry *ae;
        uint32_t size;
+       u32 tcfg;
+       u8 i, cnt;
 
        ae = &ad->AttrValue;
 
        ae->un.AttrInt = 0;
        if (!(phba->hba_flag & HBA_FCOE_MODE)) {
-               if (phba->lmt & LMT_256Gb)
-                       ae->un.AttrInt |= HBA_PORTSPEED_256GFC;
-               if (phba->lmt & LMT_128Gb)
-                       ae->un.AttrInt |= HBA_PORTSPEED_128GFC;
-               if (phba->lmt & LMT_64Gb)
-                       ae->un.AttrInt |= HBA_PORTSPEED_64GFC;
-               if (phba->lmt & LMT_32Gb)
-                       ae->un.AttrInt |= HBA_PORTSPEED_32GFC;
-               if (phba->lmt & LMT_16Gb)
-                       ae->un.AttrInt |= HBA_PORTSPEED_16GFC;
-               if (phba->lmt & LMT_10Gb)
-                       ae->un.AttrInt |= HBA_PORTSPEED_10GFC;
-               if (phba->lmt & LMT_8Gb)
-                       ae->un.AttrInt |= HBA_PORTSPEED_8GFC;
-               if (phba->lmt & LMT_4Gb)
-                       ae->un.AttrInt |= HBA_PORTSPEED_4GFC;
-               if (phba->lmt & LMT_2Gb)
-                       ae->un.AttrInt |= HBA_PORTSPEED_2GFC;
-               if (phba->lmt & LMT_1Gb)
-                       ae->un.AttrInt |= HBA_PORTSPEED_1GFC;
+               cnt = 0;
+               if (phba->sli_rev == LPFC_SLI_REV4) {
+                       tcfg = phba->sli4_hba.conf_trunk;
+                       for (i = 0; i < 4; i++, tcfg >>= 1)
+                               if (tcfg & 1)
+                                       cnt++;
+               }
+
+               if (cnt > 2) { /* 4 lane trunk group */
+                       if (phba->lmt & LMT_64Gb)
+                               ae->un.AttrInt |= HBA_PORTSPEED_256GFC;
+                       if (phba->lmt & LMT_32Gb)
+                               ae->un.AttrInt |= HBA_PORTSPEED_128GFC;
+                       if (phba->lmt & LMT_16Gb)
+                               ae->un.AttrInt |= HBA_PORTSPEED_64GFC;
+               } else if (cnt) { /* 2 lane trunk group */
+                       if (phba->lmt & LMT_128Gb)
+                               ae->un.AttrInt |= HBA_PORTSPEED_256GFC;
+                       if (phba->lmt & LMT_64Gb)
+                               ae->un.AttrInt |= HBA_PORTSPEED_128GFC;
+                       if (phba->lmt & LMT_32Gb)
+                               ae->un.AttrInt |= HBA_PORTSPEED_64GFC;
+                       if (phba->lmt & LMT_16Gb)
+                               ae->un.AttrInt |= HBA_PORTSPEED_32GFC;
+               } else {
+                       if (phba->lmt & LMT_256Gb)
+                               ae->un.AttrInt |= HBA_PORTSPEED_256GFC;
+                       if (phba->lmt & LMT_128Gb)
+                               ae->un.AttrInt |= HBA_PORTSPEED_128GFC;
+                       if (phba->lmt & LMT_64Gb)
+                               ae->un.AttrInt |= HBA_PORTSPEED_64GFC;
+                       if (phba->lmt & LMT_32Gb)
+                               ae->un.AttrInt |= HBA_PORTSPEED_32GFC;
+                       if (phba->lmt & LMT_16Gb)
+                               ae->un.AttrInt |= HBA_PORTSPEED_16GFC;
+                       if (phba->lmt & LMT_10Gb)
+                               ae->un.AttrInt |= HBA_PORTSPEED_10GFC;
+                       if (phba->lmt & LMT_8Gb)
+                               ae->un.AttrInt |= HBA_PORTSPEED_8GFC;
+                       if (phba->lmt & LMT_4Gb)
+                               ae->un.AttrInt |= HBA_PORTSPEED_4GFC;
+                       if (phba->lmt & LMT_2Gb)
+                               ae->un.AttrInt |= HBA_PORTSPEED_2GFC;
+                       if (phba->lmt & LMT_1Gb)
+                               ae->un.AttrInt |= HBA_PORTSPEED_1GFC;
+               }
        } else {
                /* FCoE links support only one speed */
                switch (phba->fc_linkspeed) {
@@ -3125,6 +3235,7 @@ static int
 lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport,
                                   struct lpfc_fdmi_attr_def *ad)
 {
+       struct lpfc_hba *phba = vport->phba;
        struct lpfc_fdmi_attr_entry *ae;
        uint32_t size;
 
@@ -3135,7 +3246,8 @@ lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport,
        ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */
 
        /* Check to see if NVME is configured or not */
-       if (vport->phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+       if (vport == phba->pport &&
+           phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
                ae->un.AttrTypes[6] = 0x1; /* Type 0x28 - NVME */
 
        size = FOURBYTES + 32;
@@ -3459,8 +3571,10 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 
        /* FDMI request */
        lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
-                        "0218 FDMI Request Data: x%x x%x x%x\n",
-                        vport->fc_flag, vport->port_state, cmdcode);
+                        "0218 FDMI Request x%x mask x%x Data: x%x x%x x%x\n",
+                        cmdcode, new_mask, vport->fdmi_port_mask,
+                        vport->fc_flag, vport->port_state);
+
        CtReq = (struct lpfc_sli_ct_request *)mp->virt;
 
        /* First populate the CT_IU preamble */
@@ -3529,6 +3643,12 @@ hba_out:
                break;
 
        case SLI_MGMT_RPRT:
+               if (vport->port_type != LPFC_PHYSICAL_PORT) {
+                       ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
+                       if (!ndlp)
+                               return 0;
+               }
+               fallthrough;
        case SLI_MGMT_RPA:
                pab = (struct lpfc_fdmi_reg_portattr *)&CtReq->un.PortID;
                if (cmdcode == SLI_MGMT_RPRT) {
@@ -3593,6 +3713,12 @@ port_out:
                rsp_size = FC_MAX_NS_RSP;
                fallthrough;
        case SLI_MGMT_DPRT:
+               if (vport->port_type != LPFC_PHYSICAL_PORT) {
+                       ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
+                       if (!ndlp)
+                               return 0;
+               }
+               fallthrough;
        case SLI_MGMT_DPA:
                pe = (struct lpfc_fdmi_port_entry *)&CtReq->un.PortID;
                memcpy((uint8_t *)&pe->PortName,
@@ -3780,8 +3906,8 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                      struct lpfc_iocbq *rspiocb)
 {
        struct lpfc_vport *vport = cmdiocb->vport;
-       struct lpfc_dmabuf *inp = cmdiocb->context1;
-       struct lpfc_dmabuf *outp = cmdiocb->context2;
+       struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf;
+       struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf;
        struct lpfc_sli_ct_request *ctcmd = inp->virt;
        struct lpfc_sli_ct_request *ctrsp = outp->virt;
        u16 rsp = ctrsp->CommandResponse.bits.CmdRsp;
index 872a263..07f9a6e 100644 (file)
@@ -152,7 +152,7 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
  * Buffer Descriptor Entries (BDEs), allocates buffers for both command
  * payload and response payload (if expected). The reference count on the
  * ndlp is incremented by 1 and the reference to the ndlp is put into
- * context1 of the IOCB data structure for this IOCB to hold the ndlp
+ * ndlp of the IOCB data structure for this IOCB to hold the ndlp
  * reference for the command's callback function to access later.
  *
  * Return code
@@ -279,8 +279,8 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, u8 expect_rsp,
                bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
        }
 
-       elsiocb->context2 = pcmd;
-       elsiocb->context3 = pbuflist;
+       elsiocb->cmd_dmabuf = pcmd;
+       elsiocb->bpl_dmabuf = pbuflist;
        elsiocb->retry = retry;
        elsiocb->vport = vport;
        elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
@@ -345,7 +345,6 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
 {
        struct lpfc_hba  *phba = vport->phba;
        LPFC_MBOXQ_t *mbox;
-       struct lpfc_dmabuf *mp;
        struct lpfc_nodelist *ndlp;
        struct serv_parm *sp;
        int rc;
@@ -395,7 +394,7 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
        mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
        if (!mbox->ctx_ndlp) {
                err = 6;
-               goto fail_no_ndlp;
+               goto fail_free_mbox;
        }
 
        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
@@ -411,13 +410,8 @@ fail_issue_reg_login:
         * for the failed mbox command.
         */
        lpfc_nlp_put(ndlp);
-fail_no_ndlp:
-       mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
-       lpfc_mbuf_free(phba, mp->virt, mp->phys);
-       kfree(mp);
 fail_free_mbox:
-       mempool_free(mbox, phba->mbox_mem_pool);
-
+       lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
 fail:
        lpfc_vport_set_state(vport, FC_VPORT_FAILED);
        lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -465,45 +459,37 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
 
        /* Supply CSP's only if we are fabric connect or pt-to-pt connect */
        if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) {
-               dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
-               if (!dmabuf) {
-                       rc = -ENOMEM;
-                       goto fail;
-               }
-               dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
-               if (!dmabuf->virt) {
+               rc = lpfc_mbox_rsrc_prep(phba, mboxq);
+               if (rc) {
                        rc = -ENOMEM;
-                       goto fail;
+                       goto fail_mbox;
                }
+               dmabuf = mboxq->ctx_buf;
                memcpy(dmabuf->virt, &phba->fc_fabparam,
                       sizeof(struct serv_parm));
        }
 
        vport->port_state = LPFC_FABRIC_CFG_LINK;
-       if (dmabuf)
+       if (dmabuf) {
                lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
-       else
+               /* lpfc_reg_vfi memsets the mailbox.  Restore the ctx_buf. */
+               mboxq->ctx_buf = dmabuf;
+       } else {
                lpfc_reg_vfi(mboxq, vport, 0);
+       }
 
        mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
        mboxq->vport = vport;
-       mboxq->ctx_buf = dmabuf;
        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
        if (rc == MBX_NOT_FINISHED) {
                rc = -ENXIO;
-               goto fail;
+               goto fail_mbox;
        }
        return 0;
 
+fail_mbox:
+       lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
 fail:
-       if (mboxq)
-               mempool_free(mboxq, phba->mbox_mem_pool);
-       if (dmabuf) {
-               if (dmabuf->virt)
-                       lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
-               kfree(dmabuf);
-       }
-
        lpfc_vport_set_state(vport, FC_VPORT_FAILED);
        lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
                         "0289 Issue Register VFI failed: Err %d\n", rc);
@@ -959,9 +945,9 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 {
        struct lpfc_vport *vport = cmdiocb->vport;
        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
-       struct lpfc_nodelist *ndlp = cmdiocb->context1;
+       struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
        IOCB_t *irsp;
-       struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
+       struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp;
        struct serv_parm *sp;
        uint16_t fcf_index;
        int rc;
@@ -1119,7 +1105,8 @@ stop_rr_fcf_flogi:
                         sp->cmn.priority_tagging, kref_read(&ndlp->kref));
 
        if (sp->cmn.priority_tagging)
-               vport->vmid_flag |= LPFC_VMID_ISSUE_QFPA;
+               vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA |
+                                                 LPFC_VMID_TYPE_PRIO);
 
        if (vport->port_state == LPFC_FLOGI) {
                /*
@@ -1232,7 +1219,7 @@ lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
        uint32_t cmd;
        u32 ulp_status, ulp_word4;
 
-       pcmd = (uint32_t *)(((struct lpfc_dmabuf *)cmdiocb->context2)->virt);
+       pcmd = (uint32_t *)cmdiocb->cmd_dmabuf->virt;
        cmd = *pcmd;
 
        ulp_status = get_job_ulpstatus(phba, rspiocb);
@@ -1265,7 +1252,7 @@ lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
  * out FLOGI ELS command with one outstanding fabric IOCB at a time.
  *
  * Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
  * the IOCB for the completion callback function to the FLOGI ELS command.
  *
  * Return code
@@ -1295,7 +1282,7 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                return 1;
 
        wqe = &elsiocb->wqe;
-       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+       pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
        icmd = &elsiocb->iocb;
 
        /* For FLOGI request, remainder of payload is service parameters */
@@ -1372,8 +1359,8 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                "Issue FLOGI:     opt:x%x",
                phba->sli3_options, 0, 0);
 
-       elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1) {
+       elsiocb->ndlp = lpfc_nlp_get(ndlp);
+       if (!elsiocb->ndlp) {
                lpfc_els_free_iocb(phba, elsiocb);
                return 1;
        }
@@ -1387,6 +1374,9 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 
        phba->hba_flag |= (HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING);
 
+       /* Clear external loopback plug detected flag */
+       phba->link_flag &= ~LS_EXTERNAL_LOOPBACK;
+
        /* Check for a deferred FLOGI ACC condition */
        if (phba->defer_flogi_acc_flag) {
                /* lookup ndlp for received FLOGI */
@@ -1474,7 +1464,7 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
        list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
                ulp_command = get_job_cmnd(phba, iocb);
                if (ulp_command == CMD_ELS_REQUEST64_CR) {
-                       ndlp = (struct lpfc_nodelist *)(iocb->context1);
+                       ndlp = iocb->ndlp;
                        if (ndlp && ndlp->nlp_DID == Fabric_DID) {
                                if ((phba->pport->fc_flag & FC_PT2PT) &&
                                    !(phba->pport->fc_flag & FC_PT2PT_PLOGI))
@@ -1531,11 +1521,16 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
                lpfc_enqueue_node(vport, ndlp);
        }
 
+       /* Reset the Fabric flag, topology change may have happened */
+       vport->fc_flag &= ~FC_FABRIC;
        if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
-               /* This decrement of reference count to node shall kick off
-                * the release of the node.
+               /* A node reference should be retained while registered with a
+                * transport or dev-loss-evt work is pending.
+                * Otherwise, decrement node reference to trigger release.
                 */
-               lpfc_nlp_put(ndlp);
+               if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) &&
+                   !(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
+                       lpfc_nlp_put(ndlp);
                return 0;
        }
        return 1;
@@ -1578,10 +1573,13 @@ lpfc_initial_fdisc(struct lpfc_vport *vport)
        }
 
        if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
-               /* decrement node reference count to trigger the release of
-                * the node.
+               /* A node reference should be retained while registered with a
+                * transport or dev-loss-evt work is pending.
+                * Otherwise, decrement node reference to trigger release.
                 */
-               lpfc_nlp_put(ndlp);
+               if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) &&
+                   !(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
+                       lpfc_nlp_put(ndlp);
                return 0;
        }
        return 1;
@@ -1888,6 +1886,7 @@ lpfc_end_rscn(struct lpfc_vport *vport)
                else {
                        spin_lock_irq(shost->host_lock);
                        vport->fc_flag &= ~FC_RSCN_MODE;
+                       vport->fc_flag |= FC_RSCN_MEMENTO;
                        spin_unlock_irq(shost->host_lock);
                }
        }
@@ -1910,14 +1909,14 @@ lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                  struct lpfc_iocbq *rspiocb)
 {
        struct lpfc_vport *vport = cmdiocb->vport;
-       struct lpfc_nodelist *ndlp = cmdiocb->context1;
+       struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
        struct lpfc_node_rrq *rrq;
        u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
        u32 ulp_word4 = get_job_word4(phba, rspiocb);
 
        /* we pass cmdiocb to state machine which needs rspiocb as well */
        rrq = cmdiocb->context_un.rrq;
-       cmdiocb->context_un.rsp_iocb = rspiocb;
+       cmdiocb->rsp_iocb = rspiocb;
 
        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
                "RRQ cmpl:      status:x%x/x%x did:x%x",
@@ -1983,9 +1982,10 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
        int disc;
        struct serv_parm *sp = NULL;
        u32 ulp_status, ulp_word4, did, iotag;
+       bool release_node = false;
 
        /* we pass cmdiocb to state machine which needs rspiocb as well */
-       cmdiocb->context_un.rsp_iocb = rspiocb;
+       cmdiocb->rsp_iocb = rspiocb;
 
        ulp_status = get_job_ulpstatus(phba, rspiocb);
        ulp_word4 = get_job_word4(phba, rspiocb);
@@ -2071,23 +2071,24 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                        spin_unlock_irq(&ndlp->lock);
                        goto out;
                }
-               spin_unlock_irq(&ndlp->lock);
 
                /* No PLOGI collision and the node is not registered with the
                 * scsi or nvme transport. It is no longer an active node. Just
                 * start the device remove process.
                 */
                if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
-                       spin_lock_irq(&ndlp->lock);
                        ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
-                       spin_unlock_irq(&ndlp->lock);
+                       if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
+                               release_node = true;
+               }
+               spin_unlock_irq(&ndlp->lock);
+
+               if (release_node)
                        lpfc_disc_state_machine(vport, ndlp, cmdiocb,
                                                NLP_EVT_DEVICE_RM);
-               }
        } else {
                /* Good status, call state machine */
-               prsp = list_entry(((struct lpfc_dmabuf *)
-                                  cmdiocb->context2)->list.next,
+               prsp = list_entry(cmdiocb->cmd_dmabuf->list.next,
                                  struct lpfc_dmabuf, list);
                ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
 
@@ -2132,7 +2133,7 @@ out:
 
 out_freeiocb:
        /* Release the reference on the original I/O request. */
-       free_ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
+       free_ndlp = cmdiocb->ndlp;
 
        lpfc_els_free_iocb(phba, cmdiocb);
        lpfc_nlp_put(free_ndlp);
@@ -2152,7 +2153,7 @@ out_freeiocb:
  * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
  *
  * Note that the ndlp reference count will be incremented by 1 for holding
- * the ndlp and the reference to ndlp will be stored into the context1 field
+ * the ndlp and the reference to ndlp will be stored into the ndlp field
  * of the IOCB for the completion callback function to the PLOGI ELS command.
  *
  * Return code
@@ -2203,7 +2204,7 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
        ndlp->nlp_flag &= ~NLP_FCP_PRLI_RJT;
        spin_unlock_irq(&ndlp->lock);
 
-       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+       pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
 
        /* For PLOGI request, remainder of payload is service parameters */
        *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
@@ -2255,8 +2256,8 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
                              "Issue PLOGI:     did:x%x refcnt %d",
                              did, kref_read(&ndlp->kref), 0);
-       elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1) {
+       elsiocb->ndlp = lpfc_nlp_get(ndlp);
+       if (!elsiocb->ndlp) {
                lpfc_els_free_iocb(phba, elsiocb);
                return 1;
        }
@@ -2294,11 +2295,12 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
        u32 loglevel;
        u32 ulp_status;
        u32 ulp_word4;
+       bool release_node = false;
 
        /* we pass cmdiocb to state machine which needs rspiocb as well */
-       cmdiocb->context_un.rsp_iocb = rspiocb;
+       cmdiocb->rsp_iocb = rspiocb;
 
-       ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+       ndlp = cmdiocb->ndlp;
 
        ulp_status = get_job_ulpstatus(phba, rspiocb);
        ulp_word4 = get_job_word4(phba, rspiocb);
@@ -2370,14 +2372,18 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                 * it is no longer an active node.  Otherwise devloss
                 * handles the final cleanup.
                 */
+               spin_lock_irq(&ndlp->lock);
                if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) &&
                    !ndlp->fc4_prli_sent) {
-                       spin_lock_irq(&ndlp->lock);
                        ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
-                       spin_unlock_irq(&ndlp->lock);
+                       if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
+                               release_node = true;
+               }
+               spin_unlock_irq(&ndlp->lock);
+
+               if (release_node)
                        lpfc_disc_state_machine(vport, ndlp, cmdiocb,
                                                NLP_EVT_DEVICE_RM);
-               }
        } else {
                /* Good status, call state machine.  However, if another
                 * PRLI is outstanding, don't call the state machine
@@ -2407,7 +2413,7 @@ out:
  * routine lpfc_sli_issue_iocb() to send out PRLI command.
  *
  * Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
  * the IOCB for the completion callback function to the PRLI ELS command.
  *
  * Return code
@@ -2428,13 +2434,14 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        u32 local_nlp_type, elscmd;
 
        /*
-        * If we are in RSCN mode, the FC4 types supported from a
+        * If discovery was kicked off from RSCN mode,
+        * the FC4 types supported from a
         * previous GFT_ID command may not be accurate. So, if we
         * are a NVME Initiator, always look for the possibility of
         * the remote NPort beng a NVME Target.
         */
        if (phba->sli_rev == LPFC_SLI_REV4 &&
-           vport->fc_flag & FC_RSCN_MODE &&
+           vport->fc_flag & (FC_RSCN_MODE | FC_RSCN_MEMENTO) &&
            vport->nvmei_support)
                ndlp->nlp_fc4_type |= NLP_FC4_NVME;
        local_nlp_type = ndlp->nlp_fc4_type;
@@ -2481,7 +2488,7 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        if (!elsiocb)
                return 1;
 
-       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+       pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
 
        /* For PRLI request, remainder of payload is service parameters */
        memset(pcmd, 0, cmdsize);
@@ -2555,33 +2562,32 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 
        phba->fc_stat.elsXmitPRLI++;
        elsiocb->cmd_cmpl = lpfc_cmpl_els_prli;
-       spin_lock_irq(&ndlp->lock);
-       ndlp->nlp_flag |= NLP_PRLI_SND;
-
-       /* The vport counters are used for lpfc_scan_finished, but
-        * the ndlp is used to track outstanding PRLIs for different
-        * FC4 types.
-        */
-       vport->fc_prli_sent++;
-       ndlp->fc4_prli_sent++;
-       spin_unlock_irq(&ndlp->lock);
 
        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
                              "Issue PRLI:  did:x%x refcnt %d",
                              ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
-       elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1) {
+       elsiocb->ndlp = lpfc_nlp_get(ndlp);
+       if (!elsiocb->ndlp) {
                lpfc_els_free_iocb(phba, elsiocb);
-               goto err;
+               return 1;
        }
 
        rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
        if (rc == IOCB_ERROR) {
                lpfc_els_free_iocb(phba, elsiocb);
                lpfc_nlp_put(ndlp);
-               goto err;
+               return 1;
        }
 
+       /* The vport counters are used for lpfc_scan_finished, but
+        * the ndlp is used to track outstanding PRLIs for different
+        * FC4 types.
+        */
+       spin_lock_irq(&ndlp->lock);
+       ndlp->nlp_flag |= NLP_PRLI_SND;
+       vport->fc_prli_sent++;
+       ndlp->fc4_prli_sent++;
+       spin_unlock_irq(&ndlp->lock);
 
        /* The driver supports 2 FC4 types.  Make sure
         * a PRLI is issued for all types before exiting.
@@ -2591,12 +2597,6 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                goto send_next_prli;
        else
                return 0;
-
-err:
-       spin_lock_irq(&ndlp->lock);
-       ndlp->nlp_flag &= ~NLP_PRLI_SND;
-       spin_unlock_irq(&ndlp->lock);
-       return 1;
 }
 
 /**
@@ -2749,11 +2749,12 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
        struct lpfc_nodelist *ndlp;
        int  disc;
        u32 ulp_status, ulp_word4, tmo;
+       bool release_node = false;
 
        /* we pass cmdiocb to state machine which needs rspiocb as well */
-       cmdiocb->context_un.rsp_iocb = rspiocb;
+       cmdiocb->rsp_iocb = rspiocb;
 
-       ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+       ndlp = cmdiocb->ndlp;
 
        ulp_status = get_job_ulpstatus(phba, rspiocb);
        ulp_word4 = get_job_word4(phba, rspiocb);
@@ -2815,13 +2816,17 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                 * transport, it is no longer an active node. Otherwise
                 * devloss handles the final cleanup.
                 */
+               spin_lock_irq(&ndlp->lock);
                if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
-                       spin_lock_irq(&ndlp->lock);
                        ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
-                       spin_unlock_irq(&ndlp->lock);
+                       if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
+                               release_node = true;
+               }
+               spin_unlock_irq(&ndlp->lock);
+
+               if (release_node)
                        lpfc_disc_state_machine(vport, ndlp, cmdiocb,
                                                NLP_EVT_DEVICE_RM);
-               }
        } else
                /* Good status, call state machine */
                lpfc_disc_state_machine(vport, ndlp, cmdiocb,
@@ -2848,7 +2853,7 @@ out:
  * to issue the ADISC ELS command.
  *
  * Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
  * the IOCB for the completion callback function to the ADISC ELS command.
  *
  * Return code
@@ -2872,7 +2877,7 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        if (!elsiocb)
                return 1;
 
-       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+       pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
 
        /* For ADISC request, remainder of payload is service parameters */
        *((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
@@ -2890,8 +2895,8 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        spin_lock_irq(&ndlp->lock);
        ndlp->nlp_flag |= NLP_ADISC_SND;
        spin_unlock_irq(&ndlp->lock);
-       elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1) {
+       elsiocb->ndlp = lpfc_nlp_get(ndlp);
+       if (!elsiocb->ndlp) {
                lpfc_els_free_iocb(phba, elsiocb);
                goto err;
        }
@@ -2931,7 +2936,7 @@ static void
 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                   struct lpfc_iocbq *rspiocb)
 {
-       struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+       struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
        struct lpfc_vport *vport = ndlp->vport;
        IOCB_t *irsp;
        unsigned long flags;
@@ -2942,7 +2947,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
        u32 tmo;
 
        /* we pass cmdiocb to state machine which needs rspiocb as well */
-       cmdiocb->context_un.rsp_iocb = rspiocb;
+       cmdiocb->rsp_iocb = rspiocb;
 
        ulp_status = get_job_ulpstatus(phba, rspiocb);
        ulp_word4 = get_job_word4(phba, rspiocb);
@@ -2993,7 +2998,10 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                                 ndlp->nlp_DID, ulp_status,
                                 ulp_word4);
 
+               /* Call NLP_EVT_DEVICE_RM if link is down or LOGO is aborted */
                if (lpfc_error_lost_link(ulp_status, ulp_word4)) {
+                       lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+                                               NLP_EVT_DEVICE_RM);
                        skip_recovery = 1;
                        goto out;
                }
@@ -3081,7 +3089,7 @@ out:
  * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command.
  *
  * Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
  * the IOCB for the completion callback function to the LOGO ELS command.
  *
  * Callers of this routine are expected to unregister the RPI first
@@ -3113,7 +3121,7 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        if (!elsiocb)
                return 1;
 
-       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+       pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
        *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
        pcmd += sizeof(uint32_t);
 
@@ -3128,8 +3136,8 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        ndlp->nlp_flag |= NLP_LOGO_SND;
        ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
        spin_unlock_irq(&ndlp->lock);
-       elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1) {
+       elsiocb->ndlp = lpfc_nlp_get(ndlp);
+       if (!elsiocb->ndlp) {
                lpfc_els_free_iocb(phba, elsiocb);
                goto err;
        }
@@ -3207,7 +3215,7 @@ lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
        /* Check to see if link went down during discovery */
        lpfc_els_chk_latt(vport);
 
-       free_ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
+       free_ndlp = cmdiocb->ndlp;
 
        lpfc_els_free_iocb(phba, cmdiocb);
        lpfc_nlp_put(free_ndlp);
@@ -3234,7 +3242,6 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp)
        struct lpfc_hba *phba = vport->phba;
        struct lpfc_nodelist *ns_ndlp;
        LPFC_MBOXQ_t *mbox;
-       struct lpfc_dmabuf *mp;
 
        if (fc_ndlp->nlp_flag & NLP_RPI_REGISTERED)
                return rc;
@@ -3271,7 +3278,7 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp)
        mbox->ctx_ndlp = lpfc_nlp_get(fc_ndlp);
        if (!mbox->ctx_ndlp) {
                rc = -ENOMEM;
-               goto out_mem;
+               goto out;
        }
 
        mbox->vport = vport;
@@ -3279,21 +3286,15 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp)
        if (rc == MBX_NOT_FINISHED) {
                rc = -ENODEV;
                lpfc_nlp_put(fc_ndlp);
-               goto out_mem;
+               goto out;
        }
        /* Success path. Exit. */
        lpfc_nlp_set_state(vport, fc_ndlp,
                           NLP_STE_REG_LOGIN_ISSUE);
        return 0;
 
- out_mem:
-       fc_ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
-       mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
-       lpfc_mbuf_free(phba, mp->virt, mp->phys);
-       kfree(mp);
-
  out:
-       mempool_free(mbox, phba->mbox_mem_pool);
+       lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
        lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
                         "0938 %s: failed to format reg_login "
                         "Data: x%x x%x x%x x%x\n", __func__,
@@ -3323,7 +3324,7 @@ lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
        struct lpfc_dmabuf *pcmd, *prsp;
        u32 *pdata;
        u32 cmd;
-       struct lpfc_nodelist *ndlp = cmdiocb->context1;
+       struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
        u32 ulp_status, ulp_word4, tmo, did, iotag;
 
        ulp_status = get_job_ulpstatus(phba, rspiocb);
@@ -3348,7 +3349,7 @@ lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                         "0217 ELS cmd tag x%x completes Data: x%x x%x x%x x%x\n",
                         iotag, ulp_status, ulp_word4, tmo, cmdiocb->retry);
 
-       pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
+       pcmd = cmdiocb->cmd_dmabuf;
        if (!pcmd)
                goto out;
 
@@ -3371,7 +3372,6 @@ lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                                lpfc_issue_els_edc(vport, cmdiocb->retry);
                                break;
                        case ELS_CMD_RDF:
-                               cmdiocb->context1 = NULL; /* save ndlp refcnt */
                                lpfc_issue_els_rdf(vport, cmdiocb->retry);
                                break;
                        }
@@ -3439,7 +3439,7 @@ out:
  * routine is invoked to send the SCR IOCB.
  *
  * Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
  * the IOCB for the completion callback function to the SCR ELS command.
  *
  * Return code
@@ -3481,7 +3481,7 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
                        return 1;
                }
        }
-       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+       pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
 
        *((uint32_t *) (pcmd)) = ELS_CMD_SCR;
        pcmd += sizeof(uint32_t);
@@ -3496,8 +3496,8 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
 
        phba->fc_stat.elsXmitSCR++;
        elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd;
-       elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1) {
+       elsiocb->ndlp = lpfc_nlp_get(ndlp);
+       if (!elsiocb->ndlp) {
                lpfc_els_free_iocb(phba, elsiocb);
                return 1;
        }
@@ -3528,7 +3528,7 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
  *  replay the RSCN to registered recipients.
  *
  * Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
  * the IOCB for the completion callback function to the RSCN ELS command.
  *
  * Return code
@@ -3578,7 +3578,7 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
        if (!elsiocb)
                return 1;
 
-       event = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
+       event = elsiocb->cmd_dmabuf->virt;
 
        event->rscn.rscn_cmd = ELS_RSCN;
        event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page);
@@ -3593,8 +3593,8 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
 
        phba->fc_stat.elsXmitRSCN++;
        elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd;
-       elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1) {
+       elsiocb->ndlp = lpfc_nlp_get(ndlp);
+       if (!elsiocb->ndlp) {
                lpfc_els_free_iocb(phba, elsiocb);
                return 1;
        }
@@ -3627,7 +3627,7 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
  * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command.
  *
  * Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
  * the IOCB for the completion callback function to the FARPR ELS command.
  *
  * Return code
@@ -3662,7 +3662,7 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
        if (!elsiocb)
                return 1;
 
-       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+       pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
 
        *((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
        pcmd += sizeof(uint32_t);
@@ -3692,8 +3692,8 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
 
        phba->fc_stat.elsXmitFARPR++;
        elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd;
-       elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1) {
+       elsiocb->ndlp = lpfc_nlp_get(ndlp);
+       if (!elsiocb->ndlp) {
                lpfc_els_free_iocb(phba, elsiocb);
                return 1;
        }
@@ -3724,7 +3724,7 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
  * for diagnostic functions.
  *
  * Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
  * the IOCB for the completion callback function to the RDF ELS command.
  *
  * Return code
@@ -3761,8 +3761,7 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
                return -ENOMEM;
 
        /* Configure the payload for the supported FPIN events. */
-       prdf = (struct lpfc_els_rdf_req *)
-               (((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+       prdf = (struct lpfc_els_rdf_req *)elsiocb->cmd_dmabuf->virt;
        memset(prdf, 0, cmdsize);
        prdf->rdf.fpin_cmd = ELS_RDF;
        prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) -
@@ -3783,8 +3782,8 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
 
        phba->cgn_fpin_frequency = LPFC_FPIN_INIT_FREQ;
        elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd;
-       elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1) {
+       elsiocb->ndlp = lpfc_nlp_get(ndlp);
+       if (!elsiocb->ndlp) {
                lpfc_els_free_iocb(phba, elsiocb);
                return -EIO;
        }
@@ -3855,9 +3854,6 @@ lpfc_least_capable_settings(struct lpfc_hba *phba,
 {
        u32 rsp_sig_cap = 0, drv_sig_cap = 0;
        u32 rsp_sig_freq_cyc = 0, rsp_sig_freq_scale = 0;
-       struct lpfc_cgn_info *cp;
-       u32 crc;
-       u16 sig_freq;
 
        /* Get rsp signal and frequency capabilities.  */
        rsp_sig_cap = be32_to_cpu(pcgd->xmt_signal_capability);
@@ -3913,25 +3909,7 @@ lpfc_least_capable_settings(struct lpfc_hba *phba,
                }
        }
 
-       if (!phba->cgn_i)
-               return;
-
-       /* Update signal frequency in congestion info buffer */
-       cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
-
-       /* Frequency (in ms) Signal Warning/Signal Congestion Notifications
-        * are received by the HBA
-        */
-       sig_freq = phba->cgn_sig_freq;
-
-       if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY)
-               cp->cgn_warn_freq = cpu_to_le16(sig_freq);
-       if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
-               cp->cgn_alarm_freq = cpu_to_le16(sig_freq);
-               cp->cgn_warn_freq = cpu_to_le16(sig_freq);
-       }
-       crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
-       cp->cgn_info_crc = cpu_to_le32(crc);
+       /* We are NOT recording signal frequency in congestion info buffer */
        return;
 
 out_no_support:
@@ -3973,7 +3951,7 @@ lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
        struct lpfc_nodelist *ndlp;
        u32 ulp_status, ulp_word4, tmo, did, iotag;
 
-       ndlp = cmdiocb->context1;
+       ndlp = cmdiocb->ndlp;
 
        ulp_status = get_job_ulpstatus(phba, rspiocb);
        ulp_word4 = get_job_word4(phba, rspiocb);
@@ -3997,7 +3975,7 @@ lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                        "4201 EDC cmd tag x%x completes Data: x%x x%x x%x\n",
                        iotag, ulp_status, ulp_word4, tmo);
 
-       pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
+       pcmd = cmdiocb->cmd_dmabuf;
        if (!pcmd)
                goto out;
 
@@ -4246,7 +4224,7 @@ lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry)
                goto try_rdf;
 
        /* Configure the payload for the supported Diagnostics capabilities. */
-       pcmd = (u8 *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+       pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
        memset(pcmd, 0, cmdsize);
        edc_req = (struct lpfc_els_edc_req *)pcmd;
        edc_req->edc.desc_len = cpu_to_be32(cgn_desc_size);
@@ -4258,15 +4236,15 @@ lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry)
 
        phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
 
-       lpfc_printf_vlog(vport, KERN_INFO, LOG_CGN_MGMT,
+       lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
                         "4623 Xmit EDC to remote "
                         "NPORT x%x reg_sig x%x reg_fpin:x%x\n",
                         ndlp->nlp_DID, phba->cgn_reg_signal,
                         phba->cgn_reg_fpin);
 
        elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd;
-       elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1) {
+       elsiocb->ndlp = lpfc_nlp_get(ndlp);
+       if (!elsiocb->ndlp) {
                lpfc_els_free_iocb(phba, elsiocb);
                return -EIO;
        }
@@ -4544,8 +4522,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 {
        struct lpfc_vport *vport = cmdiocb->vport;
        union lpfc_wqe128 *irsp = &rspiocb->wqe;
-       struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
-       struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+       struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
+       struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
        uint32_t *elscmd;
        struct ls_rjt stat;
        int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
@@ -4557,7 +4535,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
        u32 ulp_word4 = get_job_word4(phba, rspiocb);
 
 
-       /* Note: context2 may be 0 for internal driver abort
+       /* Note: cmd_dmabuf may be 0 for internal driver abort
         * of delays ELS command.
         */
 
@@ -5068,10 +5046,10 @@ lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
  * command IOCB data structure contains the reference to various associated
  * resources, these fields must be set to NULL if the associated reference
  * not present:
- *   context1 - reference to ndlp
- *   context2 - reference to cmd
- *   context2->next - reference to rsp
- *   context3 - reference to bpl
+ *   cmd_dmabuf - reference to cmd.
+ *   cmd_dmabuf->next - reference to rsp
+ *   rsp_dmabuf - unused
+ *   bpl_dmabuf - reference to bpl
  *
  * It first properly decrements the reference count held on ndlp for the
  * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not
@@ -5091,19 +5069,19 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
 {
        struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
 
-       /* The I/O iocb is complete.  Clear the context1 data. */
-       elsiocb->context1 = NULL;
+       /* The I/O iocb is complete.  Clear the node and first dmbuf */
+       elsiocb->ndlp = NULL;
 
-       /* context2  = cmd,  context2->next = rsp, context3 = bpl */
-       if (elsiocb->context2) {
+       /* cmd_dmabuf = cmd,  cmd_dmabuf->next = rsp, bpl_dmabuf = bpl */
+       if (elsiocb->cmd_dmabuf) {
                if (elsiocb->cmd_flag & LPFC_DELAY_MEM_FREE) {
                        /* Firmware could still be in progress of DMAing
                         * payload, so don't free data buffer till after
                         * a hbeat.
                         */
                        elsiocb->cmd_flag &= ~LPFC_DELAY_MEM_FREE;
-                       buf_ptr = elsiocb->context2;
-                       elsiocb->context2 = NULL;
+                       buf_ptr = elsiocb->cmd_dmabuf;
+                       elsiocb->cmd_dmabuf = NULL;
                        if (buf_ptr) {
                                buf_ptr1 = NULL;
                                spin_lock_irq(&phba->hbalock);
@@ -5122,16 +5100,16 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
                                spin_unlock_irq(&phba->hbalock);
                        }
                } else {
-                       buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
+                       buf_ptr1 = elsiocb->cmd_dmabuf;
                        lpfc_els_free_data(phba, buf_ptr1);
-                       elsiocb->context2 = NULL;
+                       elsiocb->cmd_dmabuf = NULL;
                }
        }
 
-       if (elsiocb->context3) {
-               buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
+       if (elsiocb->bpl_dmabuf) {
+               buf_ptr = elsiocb->bpl_dmabuf;
                lpfc_els_free_bpl(phba, buf_ptr);
-               elsiocb->context3 = NULL;
+               elsiocb->bpl_dmabuf = NULL;
        }
        lpfc_sli_release_iocbq(phba, elsiocb);
        return 0;
@@ -5147,7 +5125,7 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
  * Accept (ACC) Response ELS command. This routine is invoked to indicate
  * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to
  * release the ndlp if it has the last reference remaining (reference count
- * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1
+ * is 1). If succeeded (meaning ndlp released), it sets the iocb ndlp
  * field to NULL to inform the following lpfc_els_free_iocb() routine no
  * ndlp reference count needs to be decremented. Otherwise, the ndlp
  * reference use-count shall be decremented by the lpfc_els_free_iocb()
@@ -5158,7 +5136,7 @@ static void
 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                       struct lpfc_iocbq *rspiocb)
 {
-       struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+       struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
        struct lpfc_vport *vport = cmdiocb->vport;
        u32 ulp_status, ulp_word4;
 
@@ -5204,7 +5182,7 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                        /* Indicate the node has already released, should
                         * not reference to it from within lpfc_els_free_iocb.
                         */
-                       cmdiocb->context1 = NULL;
+                       cmdiocb->ndlp = NULL;
                }
        }
  out:
@@ -5232,14 +5210,10 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 void
 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
-       struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
-       struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+       struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
        u32 mbx_flag = pmb->mbox_flag;
        u32 mbx_cmd = pmb->u.mb.mbxCommand;
 
-       pmb->ctx_buf = NULL;
-       pmb->ctx_ndlp = NULL;
-
        if (ndlp) {
                lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
                                 "0006 rpi x%x DID:%x flg:%x %d x%px "
@@ -5262,10 +5236,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
                lpfc_drop_node(ndlp->vport, ndlp);
        }
 
-       lpfc_mbuf_free(phba, mp->virt, mp->phys);
-       kfree(mp);
-       mempool_free(pmb, phba->mbox_mem_pool);
-       return;
+       lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
 }
 
 /**
@@ -5285,12 +5256,11 @@ static void
 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                  struct lpfc_iocbq *rspiocb)
 {
-       struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+       struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
        struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
        struct Scsi_Host  *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
        IOCB_t  *irsp;
        LPFC_MBOXQ_t *mbox = NULL;
-       struct lpfc_dmabuf *mp = NULL;
        u32 ulp_status, ulp_word4, tmo, did, iotag;
 
        if (!vport) {
@@ -5316,14 +5286,8 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 
        /* Check to see if link went down during discovery */
        if (!ndlp || lpfc_els_chk_latt(vport)) {
-               if (mbox) {
-                       mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
-                       if (mp) {
-                               lpfc_mbuf_free(phba, mp->virt, mp->phys);
-                               kfree(mp);
-                       }
-                       mempool_free(mbox, phba->mbox_mem_pool);
-               }
+               if (mbox)
+                       lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
                goto out;
        }
 
@@ -5354,14 +5318,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                                                         ndlp->nlp_state,
                                                         ndlp->nlp_rpi,
                                                         ndlp->nlp_flag);
-                                       mp = mbox->ctx_buf;
-                                       if (mp) {
-                                               lpfc_mbuf_free(phba, mp->virt,
-                                                              mp->phys);
-                                               kfree(mp);
-                                       }
-                                       mempool_free(mbox, phba->mbox_mem_pool);
-                                       goto out;
+                                       goto out_free_mbox;
                                }
                        }
 
@@ -5370,7 +5327,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                         */
                        mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
                        if (!mbox->ctx_ndlp)
-                               goto out;
+                               goto out_free_mbox;
 
                        mbox->vport = vport;
                        if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
@@ -5402,12 +5359,8 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                                ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
                                ndlp->nlp_rpi);
                }
-               mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
-               if (mp) {
-                       lpfc_mbuf_free(phba, mp->virt, mp->phys);
-                       kfree(mp);
-               }
-               mempool_free(mbox, phba->mbox_mem_pool);
+out_free_mbox:
+               lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
        }
 out:
        if (ndlp && shost) {
@@ -5459,7 +5412,7 @@ out:
  * mailbox command to the HBA later when callback is invoked.
  *
  * Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
  * the IOCB for the completion callback function to the corresponding
  * response ELS IOCB command.
  *
@@ -5516,7 +5469,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
                                oldcmd->unsli3.rcvsli3.ox_id;
                }
 
-               pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+               pcmd = elsiocb->cmd_dmabuf->virt;
                *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
                pcmd += sizeof(uint32_t);
 
@@ -5551,7 +5504,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
                                oldcmd->unsli3.rcvsli3.ox_id;
                }
 
-               pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+               pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
 
                if (mbox)
                        elsiocb->context_un.mbox = mbox;
@@ -5629,9 +5582,9 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
                                oldcmd->unsli3.rcvsli3.ox_id;
                }
 
-               pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+               pcmd = (u8 *) elsiocb->cmd_dmabuf->virt;
 
-               memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
+               memcpy(pcmd, oldiocb->cmd_dmabuf->virt,
                       sizeof(uint32_t) + sizeof(PRLO));
                *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
                els_pkt_ptr = (ELS_PKT *) pcmd;
@@ -5667,7 +5620,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
                                oldcmd->unsli3.rcvsli3.ox_id;
                }
 
-               pcmd = (((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+               pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
                rdf_resp = (struct fc_els_rdf_resp *)pcmd;
                memset(rdf_resp, 0, sizeof(*rdf_resp));
                rdf_resp->acc_hdr.la_cmd = ELS_LS_ACC;
@@ -5695,8 +5648,8 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
        }
 
        phba->fc_stat.elsXmitACC++;
-       elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1) {
+       elsiocb->ndlp = lpfc_nlp_get(ndlp);
+       if (!elsiocb->ndlp) {
                lpfc_els_free_iocb(phba, elsiocb);
                return 1;
        }
@@ -5733,7 +5686,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
  * to issue to the HBA later.
  *
  * Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
  * the IOCB for the completion callback function to the reject response
  * ELS IOCB command.
  *
@@ -5774,7 +5727,7 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
                icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
        }
 
-       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+       pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
 
        *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
        pcmd += sizeof(uint32_t);
@@ -5797,8 +5750,8 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
 
        phba->fc_stat.elsXmitLSRJT++;
        elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
-       elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1) {
+       elsiocb->ndlp = lpfc_nlp_get(ndlp);
+       if (!elsiocb->ndlp) {
                lpfc_els_free_iocb(phba, elsiocb);
                return 1;
        }
@@ -5870,8 +5823,7 @@ lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
                icmd->unsli3.rcvsli3.ox_id = cmd->unsli3.rcvsli3.ox_id;
        }
 
-       pcmd = (((struct lpfc_dmabuf *)elsiocb->context2)->virt);
-
+       pcmd = elsiocb->cmd_dmabuf->virt;
        memset(pcmd, 0, cmdsize);
 
        edc_rsp = (struct lpfc_els_edc_rsp *)pcmd;
@@ -5891,8 +5843,8 @@ lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
        elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
 
        phba->fc_stat.elsXmitACC++;
-       elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1) {
+       elsiocb->ndlp = lpfc_nlp_get(ndlp);
+       if (!elsiocb->ndlp) {
                lpfc_els_free_iocb(phba, elsiocb);
                return 1;
        }
@@ -5927,7 +5879,7 @@ lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
  * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
  *
  * Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
  * the IOCB for the completion callback function to the ADISC Accept response
  * ELS IOCB command.
  *
@@ -5980,7 +5932,7 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
                         elsiocb->iotag, ulp_context,
                         ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
                         ndlp->nlp_rpi);
-       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+       pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
 
        *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
        pcmd += sizeof(uint32_t);
@@ -5997,8 +5949,8 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
 
        phba->fc_stat.elsXmitACC++;
        elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
-       elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1) {
+       elsiocb->ndlp = lpfc_nlp_get(ndlp);
+       if (!elsiocb->ndlp) {
                lpfc_els_free_iocb(phba, elsiocb);
                return 1;
        }
@@ -6024,7 +5976,7 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
  * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
  *
  * Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
  * the IOCB for the completion callback function to the PRLI Accept response
  * ELS IOCB command.
  *
@@ -6054,7 +6006,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
        /* Need the incoming PRLI payload to determine if the ACC is for an
         * FC4 or NVME PRLI type.  The PRLI type is at word 1.
         */
-       req_buf = (struct lpfc_dmabuf *)oldiocb->context2;
+       req_buf = oldiocb->cmd_dmabuf;
        req_payload = (((uint32_t *)req_buf->virt) + 1);
 
        /* PRLI type payload is at byte 3 for FCP or NVME. */
@@ -6102,7 +6054,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
                         elsiocb->iotag, ulp_context,
                         ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
                         ndlp->nlp_rpi);
-       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+       pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
        memset(pcmd, 0, cmdsize);
 
        *((uint32_t *)(pcmd)) = elsrspcmd;
@@ -6175,8 +6127,8 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
 
        phba->fc_stat.elsXmitACC++;
        elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
-       elsiocb->context1 =  lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1) {
+       elsiocb->ndlp =  lpfc_nlp_get(ndlp);
+       if (!elsiocb->ndlp) {
                lpfc_els_free_iocb(phba, elsiocb);
                return 1;
        }
@@ -6204,7 +6156,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
  * issue the response.
  *
  * Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
  * the IOCB for the completion callback function.
  *
  * Return code
@@ -6255,7 +6207,7 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
        lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
                         "0132 Xmit RNID ACC response tag x%x xri x%x\n",
                         elsiocb->iotag, ulp_context);
-       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+       pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
        *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
        pcmd += sizeof(uint32_t);
 
@@ -6289,8 +6241,8 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
 
        phba->fc_stat.elsXmitACC++;
        elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
-       elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1) {
+       elsiocb->ndlp = lpfc_nlp_get(ndlp);
+       if (!elsiocb->ndlp) {
                lpfc_els_free_iocb(phba, elsiocb);
                return 1;
        }
@@ -6325,7 +6277,7 @@ lpfc_els_clear_rrq(struct lpfc_vport *vport,
        struct lpfc_node_rrq *prrq;
 
 
-       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt);
+       pcmd = (uint8_t *)iocb->cmd_dmabuf->virt;
        pcmd += sizeof(uint32_t);
        rrq = (struct RRQ *)pcmd;
        rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg);
@@ -6412,7 +6364,7 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
        lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
                         "2876 Xmit ECHO ACC response tag x%x xri x%x\n",
                         elsiocb->iotag, ulp_context);
-       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+       pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
        *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
        pcmd += sizeof(uint32_t);
        memcpy(pcmd, data, cmdsize - sizeof(uint32_t));
@@ -6423,8 +6375,8 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
 
        phba->fc_stat.elsXmitACC++;
        elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
-       elsiocb->context1 =  lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1) {
+       elsiocb->ndlp =  lpfc_nlp_get(ndlp);
+       if (!elsiocb->ndlp) {
                lpfc_els_free_iocb(phba, elsiocb);
                return 1;
        }
@@ -7048,9 +7000,8 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
                        elsiocb->iotag, ulp_context,
                        ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
                        ndlp->nlp_rpi);
-       rdp_res = (struct fc_rdp_res_frame *)
-               (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
-       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+       rdp_res = (struct fc_rdp_res_frame *)elsiocb->cmd_dmabuf->virt;
+       pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
        memset(pcmd, 0, sizeof(struct fc_rdp_res_frame));
        *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
 
@@ -7101,15 +7052,14 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
        elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
 
        /* Now that we know the true size of the payload, update the BPL */
-       bpl = (struct ulp_bde64 *)
-               (((struct lpfc_dmabuf *)(elsiocb->context3))->virt);
+       bpl = (struct ulp_bde64 *)elsiocb->bpl_dmabuf->virt;
        bpl->tus.f.bdeSize = len;
        bpl->tus.f.bdeFlags = 0;
        bpl->tus.w = le32_to_cpu(bpl->tus.w);
 
        phba->fc_stat.elsXmitACC++;
-       elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1) {
+       elsiocb->ndlp = lpfc_nlp_get(ndlp);
+       if (!elsiocb->ndlp) {
                lpfc_els_free_iocb(phba, elsiocb);
                goto free_rdp_context;
        }
@@ -7143,7 +7093,7 @@ error:
                icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
        }
 
-       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+       pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
 
        *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
        stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
@@ -7151,8 +7101,8 @@ error:
 
        phba->fc_stat.elsXmitLSRJT++;
        elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
-       elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1) {
+       elsiocb->ndlp = lpfc_nlp_get(ndlp);
+       if (!elsiocb->ndlp) {
                lpfc_els_free_iocb(phba, elsiocb);
                goto free_rdp_context;
        }
@@ -7175,7 +7125,6 @@ static int
 lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context)
 {
        LPFC_MBOXQ_t *mbox = NULL;
-       struct lpfc_dmabuf *mp;
        int rc;
 
        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -7186,21 +7135,19 @@ lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context)
        }
 
        if (lpfc_sli4_dump_page_a0(phba, mbox))
-               goto prep_mbox_fail;
+               goto rdp_fail;
        mbox->vport = rdp_context->ndlp->vport;
        mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0;
        mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
        if (rc == MBX_NOT_FINISHED) {
-               mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
-               lpfc_mbuf_free(phba, mp->virt, mp->phys);
-               goto issue_mbox_fail;
+               lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
+               return 1;
        }
 
        return 0;
 
-prep_mbox_fail:
-issue_mbox_fail:
+rdp_fail:
        mempool_free(mbox, phba->mbox_mem_pool);
        return 1;
 }
@@ -7248,7 +7195,7 @@ lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
                goto error;
        }
 
-       pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+       pcmd = cmdiocb->cmd_dmabuf;
        rdp_req = (struct fc_rdp_req_frame *) pcmd->virt;
 
        lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -7360,8 +7307,7 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        if (!elsiocb)
                goto free_lcb_context;
 
-       lcb_res = (struct fc_lcb_res_frame *)
-               (((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+       lcb_res = (struct fc_lcb_res_frame *)elsiocb->cmd_dmabuf->virt;
 
        memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame));
 
@@ -7376,7 +7322,7 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
                icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
        }
 
-       pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+       pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
        *((uint32_t *)(pcmd)) = ELS_CMD_ACC;
        lcb_res->lcb_sub_command = lcb_context->sub_command;
        lcb_res->lcb_type = lcb_context->type;
@@ -7386,8 +7332,8 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
        phba->fc_stat.elsXmitACC++;
 
-       elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1) {
+       elsiocb->ndlp = lpfc_nlp_get(ndlp);
+       if (!elsiocb->ndlp) {
                lpfc_els_free_iocb(phba, elsiocb);
                goto out;
        }
@@ -7421,7 +7367,7 @@ error:
                icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
        }
 
-       pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+       pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
 
        *((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT;
        stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
@@ -7432,8 +7378,8 @@ error:
 
        elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
        phba->fc_stat.elsXmitLSRJT++;
-       elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1) {
+       elsiocb->ndlp = lpfc_nlp_get(ndlp);
+       if (!elsiocb->ndlp) {
                lpfc_els_free_iocb(phba, elsiocb);
                goto free_lcb_context;
        }
@@ -7545,7 +7491,7 @@ lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
        u8 state, rjt_err = 0;
        struct ls_rjt stat;
 
-       pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
+       pcmd = cmdiocb->cmd_dmabuf;
        lp = (uint8_t *)pcmd->virt;
        beacon = (struct fc_lcb_request_frame *)pcmd->virt;
 
@@ -7742,10 +7688,10 @@ return_did_out:
 static int
 lpfc_rscn_recovery_check(struct lpfc_vport *vport)
 {
-       struct lpfc_nodelist *ndlp = NULL;
+       struct lpfc_nodelist *ndlp = NULL, *n;
 
        /* Move all affected nodes by pending RSCNs to NPR state. */
-       list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+       list_for_each_entry_safe(ndlp, n, &vport->fc_nodes, nlp_listp) {
                if ((ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
                    !lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
                        continue;
@@ -7791,7 +7737,7 @@ lpfc_send_rscn_event(struct lpfc_vport *vport,
        uint32_t payload_len;
        struct lpfc_rscn_event_header *rscn_event_data;
 
-       pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+       pcmd = cmdiocb->cmd_dmabuf;
        payload_ptr = (uint32_t *) pcmd->virt;
        payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK);
 
@@ -7851,7 +7797,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
        int rscn_id = 0, hba_id = 0;
        int i, tmo;
 
-       pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+       pcmd = cmdiocb->cmd_dmabuf;
        lp = (uint32_t *) pcmd->virt;
 
        payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
@@ -7953,7 +7899,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
        /* Get the array count after successfully have the token */
        rscn_cnt = vport->fc_rscn_id_cnt;
        /* If we are already processing an RSCN, save the received
-        * RSCN payload buffer, cmdiocb->context2 to process later.
+        * RSCN payload buffer, cmdiocb->cmd_dmabuf to process later.
         */
        if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
                lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -7972,6 +7918,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
                if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
                    !(vport->fc_flag & FC_RSCN_DISCOVERY)) {
                        vport->fc_flag |= FC_RSCN_MODE;
+                       vport->fc_flag &= ~FC_RSCN_MEMENTO;
                        spin_unlock_irq(shost->host_lock);
                        if (rscn_cnt) {
                                cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
@@ -7986,10 +7933,10 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
                        } else {
                                vport->fc_rscn_id_list[rscn_cnt] = pcmd;
                                vport->fc_rscn_id_cnt++;
-                               /* If we zero, cmdiocb->context2, the calling
+                               /* If we zero, cmdiocb->cmd_dmabuf, the calling
                                 * routine will not try to free it.
                                 */
-                               cmdiocb->context2 = NULL;
+                               cmdiocb->cmd_dmabuf = NULL;
                        }
                        /* Deferred RSCN */
                        lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
@@ -8021,15 +7968,16 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 
        spin_lock_irq(shost->host_lock);
        vport->fc_flag |= FC_RSCN_MODE;
+       vport->fc_flag &= ~FC_RSCN_MEMENTO;
        spin_unlock_irq(shost->host_lock);
        vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
        /* Indicate we are done walking fc_rscn_id_list on this vport */
        vport->fc_rscn_flush = 0;
        /*
-        * If we zero, cmdiocb->context2, the calling routine will
+        * If we zero, cmdiocb->cmd_dmabuf, the calling routine will
         * not try to free it.
         */
-       cmdiocb->context2 = NULL;
+       cmdiocb->cmd_dmabuf = NULL;
        lpfc_set_disctmo(vport);
        /* Send back ACC */
        lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
@@ -8153,7 +8101,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 {
        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
        struct lpfc_hba  *phba = vport->phba;
-       struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+       struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
        uint32_t *lp = (uint32_t *) pcmd->virt;
        union lpfc_wqe128 *wqe = &cmdiocb->wqe;
        struct serv_parm *sp;
@@ -8163,6 +8111,9 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
        uint32_t fc_flag = 0;
        uint32_t port_state = 0;
 
+       /* Clear external loopback plug detected flag */
+       phba->link_flag &= ~LS_EXTERNAL_LOOPBACK;
+
        cmd = *lp++;
        sp = (struct serv_parm *) lp;
 
@@ -8214,6 +8165,12 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
                        return 1;
                }
 
+               /* External loopback plug insertion detected */
+               phba->link_flag |= LS_EXTERNAL_LOOPBACK;
+
+               lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_LIBDFC,
+                                "1119 External Loopback plug detected\n");
+
                /* abort the flogi coming back to ourselves
                 * due to external loopback on the port.
                 */
@@ -8320,7 +8277,7 @@ lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
        RNID *rn;
        struct ls_rjt stat;
 
-       pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+       pcmd = cmdiocb->cmd_dmabuf;
        lp = (uint32_t *) pcmd->virt;
 
        lp++;
@@ -8361,7 +8318,7 @@ lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 {
        uint8_t *pcmd;
 
-       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
+       pcmd = (uint8_t *)cmdiocb->cmd_dmabuf->virt;
 
        /* skip over first word of echo command to find echo data */
        pcmd += sizeof(uint32_t);
@@ -8437,7 +8394,7 @@ lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
  * response to the RLS.
  *
  * Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
  * the IOCB for the completion callback function to the RLS Accept Response
  * ELS IOCB command.
  *
@@ -8460,7 +8417,7 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 
        mb = &pmb->u.mb;
 
-       ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+       ndlp = pmb->ctx_ndlp;
        rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff);
        oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff);
        pmb->ctx_buf = NULL;
@@ -8496,7 +8453,7 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
                icmd->unsli3.rcvsli3.ox_id = oxid;
        }
 
-       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+       pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
        *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
        pcmd += sizeof(uint32_t); /* Skip past command */
        rls_rsp = (struct RLS_RSP *)pcmd;
@@ -8517,8 +8474,8 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
                         ndlp->nlp_rpi);
        elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
        phba->fc_stat.elsXmitACC++;
-       elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1) {
+       elsiocb->ndlp = lpfc_nlp_get(ndlp);
+       if (!elsiocb->ndlp) {
                lpfc_els_free_iocb(phba, elsiocb);
                return;
        }
@@ -8609,7 +8566,7 @@ reject_out:
  * Value (RTV) unsolicited IOCB event.
  *
  * Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
  * the IOCB for the completion callback function to the RTV Accept Response
  * ELS IOCB command.
  *
@@ -8644,7 +8601,7 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
        if (!elsiocb)
                return 1;
 
-       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+       pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
        *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
        pcmd += sizeof(uint32_t); /* Skip past command */
 
@@ -8682,8 +8639,8 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
                        rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov);
        elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
        phba->fc_stat.elsXmitACC++;
-       elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1) {
+       elsiocb->ndlp = lpfc_nlp_get(ndlp);
+       if (!elsiocb->ndlp) {
                lpfc_els_free_iocb(phba, elsiocb);
                return 0;
        }
@@ -8739,7 +8696,7 @@ lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        if (!elsiocb)
                return 1;
 
-       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+       pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
 
        /* For RRQ request, remainder of payload is Exchange IDs */
        *((uint32_t *) (pcmd)) = ELS_CMD_RRQ;
@@ -8759,17 +8716,19 @@ lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        elsiocb->context_un.rrq = rrq;
        elsiocb->cmd_cmpl = lpfc_cmpl_els_rrq;
 
-       lpfc_nlp_get(ndlp);
-       elsiocb->context1 = ndlp;
+       elsiocb->ndlp = lpfc_nlp_get(ndlp);
+       if (!elsiocb->ndlp)
+               goto io_err;
 
        ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
-       if (ret == IOCB_ERROR)
+       if (ret == IOCB_ERROR) {
+               lpfc_nlp_put(ndlp);
                goto io_err;
+       }
        return 0;
 
  io_err:
        lpfc_els_free_iocb(phba, elsiocb);
-       lpfc_nlp_put(ndlp);
        return 1;
 }
 
@@ -8811,7 +8770,7 @@ lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq)
  * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL.
  *
  * Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
  * the IOCB for the completion callback function to the RPL Accept Response
  * ELS command.
  *
@@ -8852,7 +8811,7 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
                icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, oldiocb);
        }
 
-       pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+       pcmd = elsiocb->cmd_dmabuf->virt;
        *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
        pcmd += sizeof(uint16_t);
        *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
@@ -8876,8 +8835,8 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
                         ndlp->nlp_rpi);
        elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
        phba->fc_stat.elsXmitACC++;
-       elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1) {
+       elsiocb->ndlp = lpfc_nlp_get(ndlp);
+       if (!elsiocb->ndlp) {
                lpfc_els_free_iocb(phba, elsiocb);
                return 1;
        }
@@ -8932,7 +8891,7 @@ lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
                return 0;
        }
 
-       pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+       pcmd = cmdiocb->cmd_dmabuf;
        lp = (uint32_t *) pcmd->virt;
        rpl = (RPL *) (lp + 1);
        maxsize = be32_to_cpu(rpl->maxsize);
@@ -8984,7 +8943,7 @@ lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
        uint32_t cnt, did;
 
        did = get_job_els_rsp64_did(vport->phba, cmdiocb);
-       pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+       pcmd = cmdiocb->cmd_dmabuf;
        lp = (uint32_t *) pcmd->virt;
 
        lp++;
@@ -9054,8 +9013,8 @@ lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
        uint32_t did;
 
        did = get_job_els_rsp64_did(vport->phba, cmdiocb);
-       pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
-       lp = (uint32_t *) pcmd->virt;
+       pcmd = cmdiocb->cmd_dmabuf;
+       lp = (uint32_t *)pcmd->virt;
 
        lp++;
        /* FARP-RSP received from DID <did> */
@@ -9095,7 +9054,7 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
        FAN *fp;
 
        lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n");
-       lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
+       lp = (uint32_t *)cmdiocb->cmd_dmabuf->virt;
        fp = (FAN *) ++lp;
        /* FAN received; Fan does not have a reply sequence */
        if ((vport == phba->pport) &&
@@ -9144,7 +9103,7 @@ lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
        int desc_cnt = 0, bytes_remain;
        bool rcv_cap_desc = false;
 
-       payload = ((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
+       payload = cmdiocb->cmd_dmabuf->virt;
 
        edc_req = (struct fc_els_edc *)payload;
        bytes_remain = be32_to_cpu(edc_req->desc_len);
@@ -9329,7 +9288,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
                if (piocb->vport != vport)
                        continue;
 
-               pcmd = (struct lpfc_dmabuf *) piocb->context2;
+               pcmd = piocb->cmd_dmabuf;
                if (pcmd)
                        els_command = *(uint32_t *) (pcmd->virt);
 
@@ -9584,7 +9543,7 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba,
        uint32_t *pcmd;
        u32 ulp_status, ulp_word4;
 
-       ndlp = cmdiocbp->context1;
+       ndlp = cmdiocbp->ndlp;
        if (!ndlp)
                return;
 
@@ -9598,8 +9557,7 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba,
                        sizeof(struct lpfc_name));
                memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename,
                        sizeof(struct lpfc_name));
-               pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
-                       cmdiocbp->context2)->virt);
+               pcmd = (uint32_t *)cmdiocbp->cmd_dmabuf->virt;
                lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0;
                stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4);
                lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode;
@@ -9940,11 +9898,14 @@ lpfc_els_rcv_fpin_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
                        /* Take action here for an Alarm event */
                        if (phba->cmf_active_mode != LPFC_CFG_OFF) {
                                if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) {
-                                       /* Track of alarm cnt for cgn_info */
-                                       atomic_inc(&phba->cgn_fabric_alarm_cnt);
                                        /* Track of alarm cnt for SYNC_WQE */
                                        atomic_inc(&phba->cgn_sync_alarm_cnt);
                                }
+                               /* Track alarm cnt for cgn_info regardless
+                                * of whether CMF is configured for Signals
+                                * or FPINs.
+                                */
+                               atomic_inc(&phba->cgn_fabric_alarm_cnt);
                                goto cleanup;
                        }
                        break;
@@ -9952,11 +9913,14 @@ lpfc_els_rcv_fpin_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
                        /* Take action here for a Warning event */
                        if (phba->cmf_active_mode != LPFC_CFG_OFF) {
                                if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) {
-                                       /* Track of warning cnt for cgn_info */
-                                       atomic_inc(&phba->cgn_fabric_warn_cnt);
                                        /* Track of warning cnt for SYNC_WQE */
                                        atomic_inc(&phba->cgn_sync_warn_cnt);
                                }
+                               /* Track warning cnt and freq for cgn_info
+                                * regardless of whether CMF is configured for
+                                * Signals or FPINs.
+                                */
+                               atomic_inc(&phba->cgn_fabric_warn_cnt);
 cleanup:
                                /* Save frequency in ms */
                                phba->cgn_fpin_frequency =
@@ -9965,14 +9929,10 @@ cleanup:
                                if (phba->cgn_i) {
                                        cp = (struct lpfc_cgn_info *)
                                                phba->cgn_i->virt;
-                                       if (phba->cgn_reg_fpin &
-                                               LPFC_CGN_FPIN_ALARM)
-                                               cp->cgn_alarm_freq =
-                                                       cpu_to_le16(value);
-                                       if (phba->cgn_reg_fpin &
-                                               LPFC_CGN_FPIN_WARN)
-                                               cp->cgn_warn_freq =
-                                                       cpu_to_le16(value);
+                                       cp->cgn_alarm_freq =
+                                               cpu_to_le16(value);
+                                       cp->cgn_warn_freq =
+                                               cpu_to_le16(value);
                                        crc = lpfc_cgn_calc_crc32
                                                (cp,
                                                LPFC_CGN_INFO_SZ,
@@ -10133,12 +10093,12 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
        struct lpfc_wcqe_complete *wcqe_cmpl = NULL;
        LPFC_MBOXQ_t *mbox;
 
-       if (!vport || !(elsiocb->context2))
+       if (!vport || !elsiocb->cmd_dmabuf)
                goto dropit;
 
        newnode = 0;
        wcqe_cmpl = &elsiocb->wcqe_cmpl;
-       payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
+       payload = elsiocb->cmd_dmabuf->virt;
        if (phba->sli_rev == LPFC_SLI_REV4)
                payload_len = wcqe_cmpl->total_data_placed;
        else
@@ -10199,8 +10159,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
        }
        spin_unlock_irq(&ndlp->lock);
 
-       elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1)
+       elsiocb->ndlp = lpfc_nlp_get(ndlp);
+       if (!elsiocb->ndlp)
                goto dropit;
        elsiocb->vport = vport;
 
@@ -10556,8 +10516,8 @@ lsrjt:
        }
 
        /* Release the reference on this elsiocb, not the ndlp. */
-       lpfc_nlp_put(elsiocb->context1);
-       elsiocb->context1 = NULL;
+       lpfc_nlp_put(elsiocb->ndlp);
+       elsiocb->ndlp = NULL;
 
        /* Special case.  Driver received an unsolicited command that
         * unsupportable given the driver's current state.  Reset the
@@ -10611,13 +10571,13 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
        u32 ulp_command, status, parameter, bde_count = 0;
        IOCB_t *icmd;
        struct lpfc_wcqe_complete *wcqe_cmpl = NULL;
-       struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2;
-       struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3;
+       struct lpfc_dmabuf *bdeBuf1 = elsiocb->cmd_dmabuf;
+       struct lpfc_dmabuf *bdeBuf2 = elsiocb->bpl_dmabuf;
        dma_addr_t paddr;
 
-       elsiocb->context1 = NULL;
-       elsiocb->context2 = NULL;
-       elsiocb->context3 = NULL;
+       elsiocb->cmd_dmabuf = NULL;
+       elsiocb->rsp_dmabuf = NULL;
+       elsiocb->bpl_dmabuf = NULL;
 
        wcqe_cmpl = &elsiocb->wcqe_cmpl;
        ulp_command = get_job_cmnd(phba, elsiocb);
@@ -10661,38 +10621,39 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 
        /* Account for SLI2 or SLI3 and later unsolicited buffering */
        if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
-               elsiocb->context2 = bdeBuf1;
+               elsiocb->cmd_dmabuf = bdeBuf1;
                if (bde_count == 2)
-                       elsiocb->context3 = bdeBuf2;
+                       elsiocb->bpl_dmabuf = bdeBuf2;
        } else {
                icmd = &elsiocb->iocb;
                paddr = getPaddr(icmd->un.cont64[0].addrHigh,
                                 icmd->un.cont64[0].addrLow);
-               elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
-                                                            paddr);
+               elsiocb->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring,
+                                                              paddr);
                if (bde_count == 2) {
                        paddr = getPaddr(icmd->un.cont64[1].addrHigh,
                                         icmd->un.cont64[1].addrLow);
-                       elsiocb->context3 = lpfc_sli_ringpostbuf_get(phba,
-                                                                      pring,
-                                                                      paddr);
+                       elsiocb->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba,
+                                                                       pring,
+                                                                       paddr);
                }
        }
 
        lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
        /*
         * The different unsolicited event handlers would tell us
-        * if they are done with "mp" by setting context2 to NULL.
+        * if they are done with "mp" by setting cmd_dmabuf to NULL.
         */
-       if (elsiocb->context2) {
-               lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2);
-               elsiocb->context2 = NULL;
+       if (elsiocb->cmd_dmabuf) {
+               lpfc_in_buf_free(phba, elsiocb->cmd_dmabuf);
+               elsiocb->cmd_dmabuf = NULL;
        }
 
-       if (elsiocb->context3) {
-               lpfc_in_buf_free(phba, elsiocb->context3);
-               elsiocb->context3 = NULL;
+       if (elsiocb->bpl_dmabuf) {
+               lpfc_in_buf_free(phba, elsiocb->bpl_dmabuf);
+               elsiocb->bpl_dmabuf = NULL;
        }
+
 }
 
 static void
@@ -10803,7 +10764,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
        struct lpfc_vport *vport = pmb->vport;
        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
-       struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+       struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
        MAILBOX_t *mb = &pmb->u.mb;
        int rc;
 
@@ -11068,11 +11029,11 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 {
        struct lpfc_vport *vport = cmdiocb->vport;
        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
-       struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+       struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
        struct lpfc_nodelist *np;
        struct lpfc_nodelist *next_np;
        struct lpfc_iocbq *piocb;
-       struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
+       struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp;
        struct serv_parm *sp;
        uint8_t fabric_param_changed;
        u32 ulp_status, ulp_word4;
@@ -11210,7 +11171,7 @@ out:
  * IOCB will be sent off HBA at any given time.
  *
  * Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
  * the IOCB for the completion callback function to the FDISC ELS command.
  *
  * Return code
@@ -11255,7 +11216,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                icmd->ulpCt_l = 0;
        }
 
-       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+       pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
        *((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
        pcmd += sizeof(uint32_t); /* CSP Word 1 */
        memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm));
@@ -11287,8 +11248,8 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                "Issue FDISC:     did:x%x",
                did, 0, 0);
 
-       elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1)
+       elsiocb->ndlp = lpfc_nlp_get(ndlp);
+       if (!elsiocb->ndlp)
                goto err_out;
 
        rc = lpfc_issue_fabric_iocb(phba, elsiocb);
@@ -11332,7 +11293,7 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
        u32 ulp_status, ulp_word4, did, tmo;
 
-       ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
+       ndlp = cmdiocb->ndlp;
 
        ulp_status = get_job_ulpstatus(phba, rspiocb);
        ulp_word4 = get_job_word4(phba, rspiocb);
@@ -11390,7 +11351,7 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
  * This routine issues a LOGO ELS command to an @ndlp off a @vport.
  *
  * Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
  * the IOCB for the completion callback function to the LOGO ELS command.
  *
  * Return codes
@@ -11412,7 +11373,7 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
        if (!elsiocb)
                return 1;
 
-       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+       pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
        *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
        pcmd += sizeof(uint32_t);
 
@@ -11429,8 +11390,8 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
        spin_lock_irq(&ndlp->lock);
        ndlp->nlp_flag |= NLP_LOGO_SND;
        spin_unlock_irq(&ndlp->lock);
-       elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1) {
+       elsiocb->ndlp = lpfc_nlp_get(ndlp);
+       if (!elsiocb->ndlp) {
                lpfc_els_free_iocb(phba, elsiocb);
                goto err;
        }
@@ -11989,12 +11950,12 @@ lpfc_cmpl_els_qfpa(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
        struct lpfc_dmabuf *prsp = NULL;
        struct lpfc_vmid_priority_range *vmid_range = NULL;
        u32 *data;
-       struct lpfc_dmabuf *dmabuf = cmdiocb->context2;
+       struct lpfc_dmabuf *dmabuf = cmdiocb->cmd_dmabuf;
        u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
        u32 ulp_word4 = get_job_word4(phba, rspiocb);
        u8 *pcmd, max_desc;
        u32 len, i;
-       struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
+       struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
 
        prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list);
        if (!prsp)
@@ -12090,15 +12051,15 @@ int lpfc_issue_els_qfpa(struct lpfc_vport *vport)
        if (!elsiocb)
                return -ENOMEM;
 
-       pcmd = (u8 *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+       pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
 
        *((u32 *)(pcmd)) = ELS_CMD_QFPA;
        pcmd += 4;
 
        elsiocb->cmd_cmpl = lpfc_cmpl_els_qfpa;
 
-       elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1) {
+       elsiocb->ndlp = lpfc_nlp_get(ndlp);
+       if (!elsiocb->ndlp) {
                lpfc_els_free_iocb(vport->phba, elsiocb);
                return -ENXIO;
        }
@@ -12145,7 +12106,7 @@ lpfc_vmid_uvem(struct lpfc_vport *vport,
        vmid_context->nlp = ndlp;
        vmid_context->instantiated = instantiated;
        elsiocb->vmid_tag.vmid_context = vmid_context;
-       pcmd = (u8 *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+       pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
 
        if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid))
                memcpy(vport->lpfc_vmid_host_uuid, vmid->host_vmid,
@@ -12180,8 +12141,8 @@ lpfc_vmid_uvem(struct lpfc_vport *vport,
 
        elsiocb->cmd_cmpl = lpfc_cmpl_els_uvem;
 
-       elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1) {
+       elsiocb->ndlp = lpfc_nlp_get(ndlp);
+       if (!elsiocb->ndlp) {
                lpfc_els_free_iocb(vport->phba, elsiocb);
                goto out;
        }
@@ -12207,12 +12168,12 @@ lpfc_cmpl_els_uvem(struct lpfc_hba *phba, struct lpfc_iocbq *icmdiocb,
        struct lpfc_dmabuf *prsp = NULL;
        struct lpfc_vmid_context *vmid_context =
            icmdiocb->vmid_tag.vmid_context;
-       struct lpfc_nodelist *ndlp = icmdiocb->context1;
+       struct lpfc_nodelist *ndlp = icmdiocb->ndlp;
        u8 *pcmd;
        u32 *data;
        u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
        u32 ulp_word4 = get_job_word4(phba, rspiocb);
-       struct lpfc_dmabuf *dmabuf = icmdiocb->context2;
+       struct lpfc_dmabuf *dmabuf = icmdiocb->cmd_dmabuf;
        struct lpfc_vmid *vmid;
 
        vmid = vmid_context->vmp;
index 2b877df..fb36f26 100644 (file)
@@ -1183,6 +1183,7 @@ lpfc_port_link_failure(struct lpfc_vport *vport)
 void
 lpfc_linkdown_port(struct lpfc_vport *vport)
 {
+       struct lpfc_hba *phba = vport->phba;
        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
 
        if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
@@ -1200,6 +1201,13 @@ lpfc_linkdown_port(struct lpfc_vport *vport)
        vport->fc_flag &= ~FC_DISC_DELAYED;
        spin_unlock_irq(shost->host_lock);
        del_timer_sync(&vport->delayed_disc_tmo);
+
+       if (phba->sli_rev == LPFC_SLI_REV4 &&
+           vport->port_type == LPFC_PHYSICAL_PORT &&
+           phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) {
+               /* Assume success on link up */
+               phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
+       }
 }
 
 int
@@ -1221,6 +1229,9 @@ lpfc_linkdown(struct lpfc_hba *phba)
 
        phba->defer_flogi_acc_flag = false;
 
+       /* Clear external loopback plug detected flag */
+       phba->link_flag &= ~LS_EXTERNAL_LOOPBACK;
+
        spin_lock_irq(&phba->hbalock);
        phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
        spin_unlock_irq(&phba->hbalock);
@@ -1343,10 +1354,12 @@ lpfc_linkup_port(struct lpfc_vport *vport)
 
        spin_lock_irq(shost->host_lock);
        vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
-                           FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
+                           FC_RSCN_MEMENTO | FC_RSCN_MODE |
+                           FC_NLP_MORE | FC_RSCN_DISCOVERY);
        vport->fc_flag |= FC_NDISC_ACTIVE;
        vport->fc_ns_retry = 0;
        spin_unlock_irq(shost->host_lock);
+       lpfc_setup_fdmi_mask(vport);
 
        lpfc_linkup_cleanup_nodes(vport);
 }
@@ -1378,8 +1391,8 @@ lpfc_linkup(struct lpfc_hba *phba)
        phba->pport->rcv_flogi_cnt = 0;
        spin_unlock_irq(shost->host_lock);
 
-       /* reinitialize initial FLOGI flag */
-       phba->hba_flag &= ~(HBA_FLOGI_ISSUED);
+       /* reinitialize initial HBA flag */
+       phba->hba_flag &= ~(HBA_FLOGI_ISSUED | HBA_RHBA_CMPL);
        phba->defer_flogi_acc_flag = false;
 
        return 0;
@@ -1458,7 +1471,6 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
        struct lpfc_vport *vport = pmb->vport;
        LPFC_MBOXQ_t *sparam_mb;
-       struct lpfc_dmabuf *sparam_mp;
        u16 status = pmb->u.mb.mbxStatus;
        int rc;
 
@@ -1507,13 +1519,8 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
                        sparam_mb->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
                        rc = lpfc_sli_issue_mbox(phba, sparam_mb, MBX_NOWAIT);
                        if (rc == MBX_NOT_FINISHED) {
-                               sparam_mp = (struct lpfc_dmabuf *)
-                                               sparam_mb->ctx_buf;
-                               lpfc_mbuf_free(phba, sparam_mp->virt,
-                                              sparam_mp->phys);
-                               kfree(sparam_mp);
-                               sparam_mb->ctx_buf = NULL;
-                               mempool_free(sparam_mb, phba->mbox_mem_pool);
+                               lpfc_mbox_rsrc_cleanup(phba, sparam_mb,
+                                                      MBOX_THD_UNLOCKED);
                                goto sparam_out;
                        }
 
@@ -3312,7 +3319,6 @@ lpfc_start_fdiscs(struct lpfc_hba *phba)
 void
 lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 {
-       struct lpfc_dmabuf *dmabuf = mboxq->ctx_buf;
        struct lpfc_vport *vport = mboxq->vport;
        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 
@@ -3393,12 +3399,7 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
        }
 
 out_free_mem:
-       mempool_free(mboxq, phba->mbox_mem_pool);
-       if (dmabuf) {
-               lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
-               kfree(dmabuf);
-       }
-       return;
+       lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
 }
 
 static void
@@ -3443,9 +3444,7 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
                memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
        }
 
-       lpfc_mbuf_free(phba, mp->virt, mp->phys);
-       kfree(mp);
-       mempool_free(pmb, phba->mbox_mem_pool);
+       lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
 
        /* Check if sending the FLOGI is being deferred to after we get
         * up to date CSPs from MBX_READ_SPARAM.
@@ -3457,12 +3456,8 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        return;
 
 out:
-       pmb->ctx_buf = NULL;
-       lpfc_mbuf_free(phba, mp->virt, mp->phys);
-       kfree(mp);
+       lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
        lpfc_issue_clear_la(phba, vport);
-       mempool_free(pmb, phba->mbox_mem_pool);
-       return;
 }
 
 static void
@@ -3472,7 +3467,6 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
        LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
        struct Scsi_Host *shost;
        int i;
-       struct lpfc_dmabuf *mp;
        int rc;
        struct fcf_record *fcf_record;
        uint32_t fc_flags = 0;
@@ -3600,10 +3594,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
        sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
        rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
        if (rc == MBX_NOT_FINISHED) {
-               mp = (struct lpfc_dmabuf *)sparam_mbox->ctx_buf;
-               lpfc_mbuf_free(phba, mp->virt, mp->phys);
-               kfree(mp);
-               mempool_free(sparam_mbox, phba->mbox_mem_pool);
+               lpfc_mbox_rsrc_cleanup(phba, sparam_mbox, MBOX_THD_UNLOCKED);
                goto out;
        }
 
@@ -3879,10 +3870,7 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        }
 
 lpfc_mbx_cmpl_read_topology_free_mbuf:
-       lpfc_mbuf_free(phba, mp->virt, mp->phys);
-       kfree(mp);
-       mempool_free(pmb, phba->mbox_mem_pool);
-       return;
+       lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
 }
 
 /*
@@ -3895,9 +3883,13 @@ void
 lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
        struct lpfc_vport  *vport = pmb->vport;
-       struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
+       struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
        struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
 
+       /* The driver calls the state machine with the pmb pointer
+        * but wants to make sure a stale ctx_buf isn't acted on.
+        * The ctx_buf is restored later and cleaned up.
+        */
        pmb->ctx_buf = NULL;
        pmb->ctx_ndlp = NULL;
 
@@ -3934,10 +3926,9 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 
        /* Call state machine */
        lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
+       pmb->ctx_buf = mp;
+       lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
 
-       lpfc_mbuf_free(phba, mp->virt, mp->phys);
-       kfree(mp);
-       mempool_free(pmb, phba->mbox_mem_pool);
        /* decrement the node reference count held for this callback
         * function.
         */
@@ -4104,11 +4095,15 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
 
        vport_buff = (uint8_t *) vport_info;
        do {
-               /* free dma buffer from previous round */
+               /* While loop iteration forces a free dma buffer from
+                * the previous loop because the mbox is reused and
+                * the dump routine is a single-use construct.
+                */
                if (pmb->ctx_buf) {
                        mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
                        lpfc_mbuf_free(phba, mp->virt, mp->phys);
                        kfree(mp);
+                       pmb->ctx_buf = NULL;
                }
                if (lpfc_dump_static_vport(phba, pmb, offset))
                        goto out;
@@ -4193,16 +4188,8 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
 
 out:
        kfree(vport_info);
-       if (mbx_wait_rc != MBX_TIMEOUT) {
-               if (pmb->ctx_buf) {
-                       mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
-                       lpfc_mbuf_free(phba, mp->virt, mp->phys);
-                       kfree(mp);
-               }
-               mempool_free(pmb, phba->mbox_mem_pool);
-       }
-
-       return;
+       if (mbx_wait_rc != MBX_TIMEOUT)
+               lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
 }
 
 /*
@@ -4216,22 +4203,16 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
        struct lpfc_vport *vport = pmb->vport;
        MAILBOX_t *mb = &pmb->u.mb;
-       struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
-       struct lpfc_nodelist *ndlp;
+       struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
        struct Scsi_Host *shost;
 
-       ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
        pmb->ctx_ndlp = NULL;
-       pmb->ctx_buf = NULL;
 
        if (mb->mbxStatus) {
                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
                                 "0258 Register Fabric login error: 0x%x\n",
                                 mb->mbxStatus);
-               lpfc_mbuf_free(phba, mp->virt, mp->phys);
-               kfree(mp);
-               mempool_free(pmb, phba->mbox_mem_pool);
-
+               lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
                if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
                        /* FLOGI failed, use loop map to make discovery list */
                        lpfc_disc_list_loopmap(vport);
@@ -4273,9 +4254,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
                lpfc_do_scr_ns_plogi(phba, vport);
        }
 
-       lpfc_mbuf_free(phba, mp->virt, mp->phys);
-       kfree(mp);
-       mempool_free(pmb, phba->mbox_mem_pool);
+       lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
 
        /* Drop the reference count from the mbox at the end after
         * all the current reference to the ndlp have been done.
@@ -4369,12 +4348,10 @@ void
 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
        MAILBOX_t *mb = &pmb->u.mb;
-       struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
        struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
        struct lpfc_vport *vport = pmb->vport;
        int rc;
 
-       pmb->ctx_buf = NULL;
        pmb->ctx_ndlp = NULL;
        vport->gidft_inp = 0;
 
@@ -4388,9 +4365,7 @@ out:
                 * callback function.
                 */
                lpfc_nlp_put(ndlp);
-               lpfc_mbuf_free(phba, mp->virt, mp->phys);
-               kfree(mp);
-               mempool_free(pmb, phba->mbox_mem_pool);
+               lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
 
                /* If the node is not registered with the scsi or nvme
                 * transport, remove the fabric node.  The failed reg_login
@@ -4479,10 +4454,7 @@ out:
         * callback function.
         */
        lpfc_nlp_put(ndlp);
-       lpfc_mbuf_free(phba, mp->virt, mp->phys);
-       kfree(mp);
-       mempool_free(pmb, phba->mbox_mem_pool);
-
+       lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
        return;
 }
 
@@ -4496,13 +4468,9 @@ lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
        struct lpfc_vport *vport = pmb->vport;
        MAILBOX_t *mb = &pmb->u.mb;
-       struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
-       struct lpfc_nodelist *ndlp;
+       struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
 
-       ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
        pmb->ctx_ndlp = NULL;
-       pmb->ctx_buf = NULL;
-
        if (mb->mbxStatus) {
                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
                                 "0933 %s: Register FC login error: 0x%x\n",
@@ -4526,9 +4494,7 @@ lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
 
  out:
-       lpfc_mbuf_free(phba, mp->virt, mp->phys);
-       kfree(mp);
-       mempool_free(pmb, phba->mbox_mem_pool);
+       lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
 
        /* Drop the reference count from the mbox at the end after
         * all the current reference to the ndlp have been done.
@@ -5155,7 +5121,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
        if (pring->ringno == LPFC_ELS_RING) {
                switch (ulp_command) {
                case CMD_GEN_REQUEST64_CR:
-                       if (iocb->context_un.ndlp == ndlp)
+                       if (iocb->ndlp == ndlp)
                                return 1;
                        fallthrough;
                case CMD_ELS_REQUEST64_CR:
@@ -5163,7 +5129,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
                                return 1;
                        fallthrough;
                case CMD_XMIT_ELS_RSP64_CX:
-                       if (iocb->context1 == (uint8_t *) ndlp)
+                       if (iocb->ndlp == ndlp)
                                return 1;
                }
        } else if (pring->ringno == LPFC_FCP_RING) {
@@ -5273,7 +5239,6 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        if (!ndlp)
                return;
        lpfc_issue_els_logo(vport, ndlp, 0);
-       mempool_free(pmb, phba->mbox_mem_pool);
 
        /* Check to see if there are any deferred events to process */
        if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
@@ -5300,6 +5265,13 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
                ndlp->nlp_flag &= ~NLP_UNREG_INP;
                spin_unlock_irq(&ndlp->lock);
        }
+
+       /* The node has an outstanding reference for the unreg. Now
+        * that the LOGO action and cleanup are finished, release
+        * resources.
+        */
+       lpfc_nlp_put(ndlp);
+       mempool_free(pmb, phba->mbox_mem_pool);
 }
 
 /*
@@ -5569,7 +5541,6 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 {
        struct lpfc_hba  *phba = vport->phba;
        LPFC_MBOXQ_t *mb, *nextmb;
-       struct lpfc_dmabuf *mp;
 
        /* Cleanup node for NPort <nlp_DID> */
        lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
@@ -5607,16 +5578,11 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
                if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
                   !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
                    (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
-                       mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
-                       if (mp) {
-                               __lpfc_mbuf_free(phba, mp->virt, mp->phys);
-                               kfree(mp);
-                       }
                        list_del(&mb->list);
-                       mempool_free(mb, phba->mbox_mem_pool);
-                       /* We shall not invoke the lpfc_nlp_put to decrement
-                        * the ndlp reference count as we are in the process
-                        * of lpfc_nlp_release.
+                       lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_LOCKED);
+
+                       /* Don't invoke lpfc_nlp_put. The driver is in
+                        * lpfc_nlp_release context.
                         */
                }
        }
@@ -6098,7 +6064,7 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
         */
        spin_lock_irq(&phba->hbalock);
        list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
-               if (iocb->context1 != ndlp)
+               if (iocb->ndlp != ndlp)
                        continue;
 
                ulp_command = get_job_cmnd(phba, iocb);
@@ -6112,7 +6078,7 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 
        /* Next check the txcmplq */
        list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
-               if (iocb->context1 != ndlp)
+               if (iocb->ndlp != ndlp)
                        continue;
 
                ulp_command = get_job_cmnd(phba, iocb);
@@ -6390,8 +6356,9 @@ restart_disc:
                        lpfc_printf_vlog(vport, KERN_ERR,
                                         LOG_TRACE_EVENT,
                                         "0231 RSCN timeout Data: x%x "
-                                        "x%x\n",
-                                        vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
+                                        "x%x x%x x%x\n",
+                                        vport->fc_ns_retry, LPFC_MAX_NS_RETRY,
+                                        vport->port_state, vport->gidft_inp);
 
                        /* Cleanup any outstanding ELS commands */
                        lpfc_els_flush_cmd(vport);
@@ -6461,11 +6428,9 @@ void
 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
        MAILBOX_t *mb = &pmb->u.mb;
-       struct lpfc_dmabuf   *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
        struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
        struct lpfc_vport    *vport = pmb->vport;
 
-       pmb->ctx_buf = NULL;
        pmb->ctx_ndlp = NULL;
 
        if (phba->sli_rev < LPFC_SLI_REV4)
@@ -6496,10 +6461,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
         * function.
         */
        lpfc_nlp_put(ndlp);
-       lpfc_mbuf_free(phba, mp->virt, mp->phys);
-       kfree(mp);
-       mempool_free(pmb, phba->mbox_mem_pool);
-
+       lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
        return;
 }
 
index d6050f3..748c532 100644 (file)
@@ -97,6 +97,18 @@ union CtCommandResponse {
 #define FC4_FEATURE_INIT       0x2
 #define FC4_FEATURE_NVME_DISC  0x4
 
+enum rft_word0 {
+       RFT_FCP_REG     = (0x1 << 8),
+};
+
+enum rft_word1 {
+       RFT_NVME_REG    = (0x1 << 8),
+};
+
+enum rft_word3 {
+       RFT_APP_SERV_REG        = (0x1 << 0),
+};
+
 struct lpfc_sli_ct_request {
        /* Structure is in Big Endian format */
        union CtRevisionId RevisionId;
@@ -131,25 +143,13 @@ struct lpfc_sli_ct_request {
                        uint8_t Fc4Type;
                } gid_ff;
                struct rft {
-                       uint32_t PortId;        /* For RFT_ID requests */
+                       __be32 port_id; /* For RFT_ID requests */
 
-#ifdef __BIG_ENDIAN_BITFIELD
-                       uint32_t rsvd0:16;
-                       uint32_t rsvd1:7;
-                       uint32_t fcpReg:1;      /* Type 8 */
-                       uint32_t rsvd2:2;
-                       uint32_t ipReg:1;       /* Type 5 */
-                       uint32_t rsvd3:5;
-#else  /*  __LITTLE_ENDIAN_BITFIELD */
-                       uint32_t rsvd0:16;
-                       uint32_t fcpReg:1;      /* Type 8 */
-                       uint32_t rsvd1:7;
-                       uint32_t rsvd3:5;
-                       uint32_t ipReg:1;       /* Type 5 */
-                       uint32_t rsvd2:2;
-#endif
-
-                       uint32_t rsvd[7];
+                       __be32 fcp_reg; /* rsvd 31:9, fcp_reg 8, rsvd 7:0 */
+                       __be32 nvme_reg; /* rsvd 31:9, nvme_reg 8, rsvd 7:0 */
+                       __be32 word2;
+                       __be32 app_serv_reg; /* rsvd 31:1, app_serv_reg 0 */
+                       __be32 word[4];
                } rft;
                struct rnn {
                        uint32_t PortId;        /* For RNN_ID requests */
@@ -511,8 +511,6 @@ struct class_parms {
        uint8_t word3Reserved2; /* Fc Word 3, bit  0: 7 */
 };
 
-#define FAPWWN_KEY_VENDOR      0x42524344 /*valid vendor version fawwpn key*/
-
 struct serv_parm {     /* Structure is in Big Endian format */
        struct csp cmn;
        struct lpfc_name portName;
@@ -2650,19 +2648,26 @@ typedef struct {
 } READ_SPARM_VAR;
 
 /* Structure for MB Command READ_STATUS (14) */
+enum read_status_word1 {
+       RD_ST_CC        = 0x01,
+       RD_ST_XKB       = 0x80,
+};
+
+enum read_status_word17 {
+       RD_ST_XMIT_XKB_MASK = 0x3fffff,
+};
+
+enum read_status_word18 {
+       RD_ST_RCV_XKB_MASK = 0x3fffff,
+};
 
 typedef struct {
-#ifdef __BIG_ENDIAN_BITFIELD
-       uint32_t rsvd1:31;
-       uint32_t clrCounters:1;
-       uint16_t activeXriCnt;
-       uint16_t activeRpiCnt;
-#else  /*  __LITTLE_ENDIAN_BITFIELD */
-       uint32_t clrCounters:1;
-       uint32_t rsvd1:31;
-       uint16_t activeRpiCnt;
-       uint16_t activeXriCnt;
-#endif
+       u8 clear_counters; /* rsvd 7:1, cc 0 */
+       u8 rsvd5;
+       u8 rsvd6;
+       u8 xkb; /* xkb 7, rsvd 6:0 */
+
+       u32 rsvd8;
 
        uint32_t xmitByteCnt;
        uint32_t rcvByteCnt;
@@ -2674,6 +2679,14 @@ typedef struct {
        uint32_t totalRespExchanges;
        uint32_t rcvPbsyCnt;
        uint32_t rcvFbsyCnt;
+
+       u32 drop_frame_no_rq;
+       u32 empty_rq;
+       u32 drop_frame_no_xri;
+       u32 empty_xri;
+
+       u32 xmit_xkb; /* rsvd 31:22, xmit_xkb 21:0 */
+       u32 rcv_xkb; /* rsvd 31:22, rcv_xkb 21:0 */
 } READ_STATUS_VAR;
 
 /* Structure for MB Command READ_RPI (15) */
index 02e230e..8511369 100644 (file)
@@ -2893,6 +2893,9 @@ struct lpfc_mbx_read_config {
 #define lpfc_mbx_rd_conf_extnts_inuse_SHIFT    31
 #define lpfc_mbx_rd_conf_extnts_inuse_MASK     0x00000001
 #define lpfc_mbx_rd_conf_extnts_inuse_WORD     word1
+#define lpfc_mbx_rd_conf_fawwpn_SHIFT          30
+#define lpfc_mbx_rd_conf_fawwpn_MASK           0x00000001
+#define lpfc_mbx_rd_conf_fawwpn_WORD           word1
 #define lpfc_mbx_rd_conf_wcs_SHIFT             28      /* warning signaling */
 #define lpfc_mbx_rd_conf_wcs_MASK              0x00000001
 #define lpfc_mbx_rd_conf_wcs_WORD              word1
@@ -4473,12 +4476,8 @@ struct wqe_common {
 #define wqe_cmd_type_MASK     0x0000000f
 #define wqe_cmd_type_WORD     word11
 #define wqe_els_id_SHIFT      4
-#define wqe_els_id_MASK       0x00000003
+#define wqe_els_id_MASK       0x00000007
 #define wqe_els_id_WORD       word11
-#define LPFC_ELS_ID_FLOGI      3
-#define LPFC_ELS_ID_FDISC      2
-#define LPFC_ELS_ID_LOGO       1
-#define LPFC_ELS_ID_DEFAULT    0
 #define wqe_irsp_SHIFT        4
 #define wqe_irsp_MASK         0x00000001
 #define wqe_irsp_WORD         word11
@@ -4525,6 +4524,14 @@ struct lpfc_wqe_generic{
        uint32_t payload[4];
 };
 
+enum els_request64_wqe_word11 {
+       LPFC_ELS_ID_DEFAULT,
+       LPFC_ELS_ID_LOGO,
+       LPFC_ELS_ID_FDISC,
+       LPFC_ELS_ID_FLOGI,
+       LPFC_ELS_ID_PLOGI,
+};
+
 struct els_request64_wqe {
        struct ulp_bde64 bde;
        uint32_t payload_len;
index 461d333..2bffaa6 100644 (file)
@@ -350,8 +350,7 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 void
 lpfc_update_vport_wwn(struct lpfc_vport *vport)
 {
-       uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
-       u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
+       struct lpfc_hba *phba = vport->phba;
 
        /*
         * If the name is empty or there exists a soft name
@@ -370,21 +369,32 @@ lpfc_update_vport_wwn(struct lpfc_vport *vport)
         */
        if (vport->fc_portname.u.wwn[0] != 0 &&
                memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
-                       sizeof(struct lpfc_name)))
+                      sizeof(struct lpfc_name))) {
                vport->vport_flag |= FAWWPN_PARAM_CHG;
 
-       if (vport->fc_portname.u.wwn[0] == 0 ||
-           (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
-           vport->vport_flag & FAWWPN_SET) {
-               memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
-                       sizeof(struct lpfc_name));
-               vport->vport_flag &= ~FAWWPN_SET;
-               if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
-                       vport->vport_flag |= FAWWPN_SET;
+               if (phba->sli_rev == LPFC_SLI_REV4 &&
+                   vport->port_type == LPFC_PHYSICAL_PORT &&
+                   phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_FABRIC) {
+                       lpfc_printf_log(phba, KERN_INFO,
+                                       LOG_SLI | LOG_DISCOVERY | LOG_ELS,
+                                       "2701 FA-PWWN change WWPN from %llx to "
+                                       "%llx: vflag x%x fawwpn_flag x%x\n",
+                                       wwn_to_u64(vport->fc_portname.u.wwn),
+                                       wwn_to_u64
+                                          (vport->fc_sparam.portName.u.wwn),
+                                       vport->vport_flag,
+                                       phba->sli4_hba.fawwpn_flag);
+                       memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
+                              sizeof(struct lpfc_name));
+               }
        }
+
+       if (vport->fc_portname.u.wwn[0] == 0)
+               memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
+                      sizeof(struct lpfc_name));
        else
                memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
-                       sizeof(struct lpfc_name));
+                      sizeof(struct lpfc_name));
 }
 
 /**
@@ -443,15 +453,16 @@ lpfc_config_port_post(struct lpfc_hba *phba)
                                "READ_SPARM mbxStatus x%x\n",
                                mb->mbxCommand, mb->mbxStatus);
                phba->link_state = LPFC_HBA_ERROR;
-               mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
-               mempool_free(pmb, phba->mbox_mem_pool);
-               lpfc_mbuf_free(phba, mp->virt, mp->phys);
-               kfree(mp);
+               lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
                return -EIO;
        }
 
        mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
 
+       /* This dmabuf was allocated by lpfc_read_sparam. The dmabuf is no
+        * longer needed.  Prevent unintended ctx_buf access as the mbox is
+        * reused.
+        */
        memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
        lpfc_mbuf_free(phba, mp->virt, mp->phys);
        kfree(mp);
@@ -686,8 +697,14 @@ lpfc_sli4_refresh_params(struct lpfc_hba *phba)
                return rc;
        }
        mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
-       phba->sli4_hba.pc_sli4_params.mi_ver =
+
+       /* Are we forcing MI off via module parameter? */
+       if (phba->cfg_enable_mi)
+               phba->sli4_hba.pc_sli4_params.mi_ver =
                        bf_get(cfg_mi_ver, mbx_sli4_parameters);
+       else
+               phba->sli4_hba.pc_sli4_params.mi_ver = 0;
+
        phba->sli4_hba.pc_sli4_params.cmf =
                        bf_get(cfg_cmf, mbx_sli4_parameters);
        phba->sli4_hba.pc_sli4_params.pls =
@@ -2176,7 +2193,6 @@ lpfc_handle_latt(struct lpfc_hba *phba)
        struct lpfc_sli   *psli = &phba->sli;
        LPFC_MBOXQ_t *pmb;
        volatile uint32_t control;
-       struct lpfc_dmabuf *mp;
        int rc = 0;
 
        pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -2185,23 +2201,17 @@ lpfc_handle_latt(struct lpfc_hba *phba)
                goto lpfc_handle_latt_err_exit;
        }
 
-       mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
-       if (!mp) {
+       rc = lpfc_mbox_rsrc_prep(phba, pmb);
+       if (rc) {
                rc = 2;
-               goto lpfc_handle_latt_free_pmb;
-       }
-
-       mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
-       if (!mp->virt) {
-               rc = 3;
-               goto lpfc_handle_latt_free_mp;
+               mempool_free(pmb, phba->mbox_mem_pool);
+               goto lpfc_handle_latt_err_exit;
        }
 
        /* Cleanup any outstanding ELS commands */
        lpfc_els_flush_all_cmd(phba);
-
        psli->slistat.link_event++;
-       lpfc_read_topology(phba, pmb, mp);
+       lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
        pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
        pmb->vport = vport;
        /* Block ELS IOCBs until we have processed this mbox command */
@@ -2222,11 +2232,7 @@ lpfc_handle_latt(struct lpfc_hba *phba)
 
 lpfc_handle_latt_free_mbuf:
        phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
-       lpfc_mbuf_free(phba, mp->virt, mp->phys);
-lpfc_handle_latt_free_mp:
-       kfree(mp);
-lpfc_handle_latt_free_pmb:
-       mempool_free(pmb, phba->mbox_mem_pool);
+       lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
 lpfc_handle_latt_err_exit:
        /* Enable Link attention interrupts */
        spin_lock_irq(&phba->hbalock);
@@ -4317,9 +4323,10 @@ lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
 
        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
                        "6074 Current allocated XRI sgl count:%d, "
-                       "maximum XRI count:%d\n",
+                       "maximum XRI count:%d els_xri_cnt:%d\n\n",
                        phba->sli4_hba.io_xri_cnt,
-                       phba->sli4_hba.io_xri_max);
+                       phba->sli4_hba.io_xri_max,
+                       els_xri_cnt);
 
        cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
 
@@ -4458,12 +4465,11 @@ lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
                }
                pwqeq->sli4_lxritag = lxri;
                pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
-               pwqeq->context1 = lpfc_ncmd;
 
                /* Initialize local short-hand pointers. */
                lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
                lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
-               lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
+               lpfc_ncmd->cur_iocbq.io_buf = lpfc_ncmd;
                spin_lock_init(&lpfc_ncmd->buf_lock);
 
                /* add the nvme buffer to a post list */
@@ -4472,7 +4478,9 @@ lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
        }
        lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
                        "6114 Allocate %d out of %d requested new NVME "
-                       "buffers\n", bcnt, num_to_alloc);
+                       "buffers of size x%zu bytes\n", bcnt, num_to_alloc,
+                       sizeof(*lpfc_ncmd));
+
 
        /* post the list of nvme buffer sgls to port if available */
        if (!list_empty(&post_nblist))
@@ -5307,7 +5315,6 @@ static void
 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
                         struct lpfc_acqe_link *acqe_link)
 {
-       struct lpfc_dmabuf *mp;
        LPFC_MBOXQ_t *pmb;
        MAILBOX_t *mb;
        struct lpfc_mbx_read_top *la;
@@ -5324,18 +5331,13 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
                                "0395 The mboxq allocation failed\n");
                return;
        }
-       mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
-       if (!mp) {
+
+       rc = lpfc_mbox_rsrc_prep(phba, pmb);
+       if (rc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
-                               "0396 The lpfc_dmabuf allocation failed\n");
+                               "0396 mailbox allocation failed\n");
                goto out_free_pmb;
        }
-       mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
-       if (!mp->virt) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
-                               "0397 The mbuf allocation failed\n");
-               goto out_free_dmabuf;
-       }
 
        /* Cleanup any outstanding ELS commands */
        lpfc_els_flush_all_cmd(phba);
@@ -5347,7 +5349,7 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
        phba->sli.slistat.link_event++;
 
        /* Create lpfc_handle_latt mailbox command from link ACQE */
-       lpfc_read_topology(phba, pmb, mp);
+       lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
        pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
        pmb->vport = phba->pport;
 
@@ -5385,10 +5387,8 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
         */
        if (!(phba->hba_flag & HBA_FCOE_MODE)) {
                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
-               if (rc == MBX_NOT_FINISHED) {
-                       lpfc_mbuf_free(phba, mp->virt, mp->phys);
-                       goto out_free_dmabuf;
-               }
+               if (rc == MBX_NOT_FINISHED)
+                       goto out_free_pmb;
                return;
        }
        /*
@@ -5423,10 +5423,8 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
 
        return;
 
-out_free_dmabuf:
-       kfree(mp);
 out_free_pmb:
-       mempool_free(pmb, phba->mbox_mem_pool);
+       lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
 }
 
 /**
@@ -5533,7 +5531,7 @@ lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag)
        struct tm broken;
        struct timespec64 cur_time;
        u32 cnt;
-       u16 value;
+       u32 value;
 
        /* Make sure we have a congestion info buffer */
        if (!phba->cgn_i)
@@ -5866,21 +5864,8 @@ lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba)
 
        /* Use the frequency found in the last rcv'ed FPIN */
        value = phba->cgn_fpin_frequency;
-       if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN)
-               cp->cgn_warn_freq = cpu_to_le16(value);
-       if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM)
-               cp->cgn_alarm_freq = cpu_to_le16(value);
-
-       /* Frequency (in ms) Signal Warning/Signal Congestion Notifications
-        * are received by the HBA
-        */
-       value = phba->cgn_sig_freq;
-
-       if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
-           phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
-               cp->cgn_warn_freq = cpu_to_le16(value);
-       if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
-               cp->cgn_alarm_freq = cpu_to_le16(value);
+       cp->cgn_warn_freq = cpu_to_le16(value);
+       cp->cgn_alarm_freq = cpu_to_le16(value);
 
        lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
                                     LPFC_CGN_CRC32_SEED);
@@ -6237,7 +6222,6 @@ lpfc_update_trunk_link_status(struct lpfc_hba *phba,
 static void
 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
 {
-       struct lpfc_dmabuf *mp;
        LPFC_MBOXQ_t *pmb;
        MAILBOX_t *mb;
        struct lpfc_mbx_read_top *la;
@@ -6297,18 +6281,12 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
                                "2897 The mboxq allocation failed\n");
                return;
        }
-       mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
-       if (!mp) {
+       rc = lpfc_mbox_rsrc_prep(phba, pmb);
+       if (rc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
-                               "2898 The lpfc_dmabuf allocation failed\n");
+                               "2898 The mboxq prep failed\n");
                goto out_free_pmb;
        }
-       mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
-       if (!mp->virt) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
-                               "2899 The mbuf allocation failed\n");
-               goto out_free_dmabuf;
-       }
 
        /* Cleanup any outstanding ELS commands */
        lpfc_els_flush_all_cmd(phba);
@@ -6320,7 +6298,7 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
        phba->sli.slistat.link_event++;
 
        /* Create lpfc_handle_latt mailbox command from link ACQE */
-       lpfc_read_topology(phba, pmb, mp);
+       lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
        pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
        pmb->vport = phba->pport;
 
@@ -6364,16 +6342,12 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
        }
 
        rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
-       if (rc == MBX_NOT_FINISHED) {
-               lpfc_mbuf_free(phba, mp->virt, mp->phys);
-               goto out_free_dmabuf;
-       }
+       if (rc == MBX_NOT_FINISHED)
+               goto out_free_pmb;
        return;
 
-out_free_dmabuf:
-       kfree(mp);
 out_free_pmb:
-       mempool_free(pmb, phba->mbox_mem_pool);
+       lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
 }
 
 /**
@@ -6565,12 +6539,15 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
        case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
                /* Misconfigured WWN. Reports that the SLI Port is configured
                 * to use FA-WWN, but the attached device doesn’t support it.
-                * No driver action is required.
                 * Event Data1 - N.A, Event Data2 - N.A
+                * This event only happens on the physical port.
                 */
-               lpfc_log_msg(phba, KERN_WARNING, LOG_SLI,
-                            "2699 Misconfigured FA-WWN - Attached device does "
-                            "not support FA-WWN\n");
+               lpfc_log_msg(phba, KERN_WARNING, LOG_SLI | LOG_DISCOVERY,
+                            "2699 Misconfigured FA-PWWN - Attached device "
+                            "does not support FA-PWWN\n");
+               phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_FABRIC;
+               memset(phba->pport->fc_portname.u.wwn, 0,
+                      sizeof(struct lpfc_name));
                break;
        case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
                /* EEPROM failure. No driver action is required */
@@ -6595,9 +6572,6 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
                /* Alarm overrides warning, so check that first */
                if (cgn_signal->alarm_cnt) {
                        if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
-                               /* Keep track of alarm cnt for cgn_info */
-                               atomic_add(cgn_signal->alarm_cnt,
-                                          &phba->cgn_fabric_alarm_cnt);
                                /* Keep track of alarm cnt for CMF_SYNC_WQE */
                                atomic_add(cgn_signal->alarm_cnt,
                                           &phba->cgn_sync_alarm_cnt);
@@ -6606,8 +6580,6 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
                        /* signal action needs to be taken */
                        if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
                            phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
-                               /* Keep track of warning cnt for cgn_info */
-                               atomic_add(cnt, &phba->cgn_fabric_warn_cnt);
                                /* Keep track of warning cnt for CMF_SYNC_WQE */
                                atomic_add(cnt, &phba->cgn_sync_warn_cnt);
                        }
@@ -8027,6 +7999,18 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
        rc = lpfc_sli4_read_config(phba);
        if (unlikely(rc))
                goto out_free_bsmbx;
+
+       if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) {
+               /* Right now the link is down, if FA-PWWN is configured the
+                * firmware will try FLOGI before the driver gets a link up.
+                * If it fails, the driver should get a MISCONFIGURED async
+                * event which will clear this flag. The only notification
+                * the driver gets is if it fails, if it succeeds there is no
+                * notification given. Assume success.
+                */
+               phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
+       }
+
        rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
        if (unlikely(rc))
                goto out_free_bsmbx;
@@ -9001,6 +8985,36 @@ lpfc_hba_free(struct lpfc_hba *phba)
 }
 
 /**
+ * lpfc_setup_fdmi_mask - Setup initial FDMI mask for HBA and Port attributes
+ * @vport: pointer to lpfc vport data structure.
+ *
+ * This routine is will setup initial FDMI attribute masks for
+ * FDMI2 or SmartSAN depending on module parameters. The driver will attempt
+ * to get these attributes first before falling back, the attribute
+ * fallback hierarchy is SmartSAN -> FDMI2 -> FMDI1
+ **/
+void
+lpfc_setup_fdmi_mask(struct lpfc_vport *vport)
+{
+       struct lpfc_hba *phba = vport->phba;
+
+       vport->load_flag |= FC_ALLOW_FDMI;
+       if (phba->cfg_enable_SmartSAN ||
+           phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT) {
+               /* Setup appropriate attribute masks */
+               vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
+               if (phba->cfg_enable_SmartSAN)
+                       vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
+               else
+                       vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
+       }
+
+       lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+                       "6077 Setup FDMI mask: hba x%x port x%x\n",
+                       vport->fdmi_hba_mask, vport->fdmi_port_mask);
+}
+
+/**
  * lpfc_create_shost - Create hba physical port with associated scsi host.
  * @phba: pointer to lpfc hba data structure.
  *
@@ -9043,21 +9057,12 @@ lpfc_create_shost(struct lpfc_hba *phba)
        /* Put reference to SCSI host to driver's device private data */
        pci_set_drvdata(phba->pcidev, shost);
 
+       lpfc_setup_fdmi_mask(vport);
+
        /*
         * At this point we are fully registered with PSA. In addition,
         * any initial discovery should be completed.
         */
-       vport->load_flag |= FC_ALLOW_FDMI;
-       if (phba->cfg_enable_SmartSAN ||
-           (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
-
-               /* Setup appropriate attribute masks */
-               vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
-               if (phba->cfg_enable_SmartSAN)
-                       vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
-               else
-                       vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
-       }
        return 0;
 }
 
@@ -9830,7 +9835,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
        struct lpfc_rsrc_desc_fcfcoe *desc;
        char *pdesc_0;
        uint16_t forced_link_speed;
-       uint32_t if_type, qmin;
+       uint32_t if_type, qmin, fawwpn;
        int length, i, rc = 0, rc2;
 
        pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -9872,10 +9877,23 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
                        phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
                }
 
+               fawwpn = bf_get(lpfc_mbx_rd_conf_fawwpn, rd_config);
+
+               if (fawwpn) {
+                       lpfc_printf_log(phba, KERN_INFO,
+                                       LOG_INIT | LOG_DISCOVERY,
+                                       "2702 READ_CONFIG: FA-PWWN is "
+                                       "configured on\n");
+                       phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_CONFIG;
+               } else {
+                       phba->sli4_hba.fawwpn_flag = 0;
+               }
+
                phba->sli4_hba.conf_trunk =
                        bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
                phba->sli4_hba.extents_in_use =
                        bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
+
                phba->sli4_hba.max_cfg_param.max_xri =
                        bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
                /* Reduce resource usage in kdump environment */
@@ -14832,9 +14850,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
        /* Check if there are static vports to be created. */
        lpfc_create_static_vport(phba);
 
-       /* Enable RAS FW log support */
-       lpfc_sli4_ras_setup(phba);
-
        timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
        cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
 
@@ -15700,34 +15715,7 @@ void lpfc_dmp_dbg(struct lpfc_hba *phba)
        unsigned int temp_idx;
        int i;
        int j = 0;
-       unsigned long rem_nsec, iflags;
-       bool log_verbose = false;
-       struct lpfc_vport *port_iterator;
-
-       /* Don't dump messages if we explicitly set log_verbose for the
-        * physical port or any vport.
-        */
-       if (phba->cfg_log_verbose)
-               return;
-
-       spin_lock_irqsave(&phba->port_list_lock, iflags);
-       list_for_each_entry(port_iterator, &phba->port_list, listentry) {
-               if (port_iterator->load_flag & FC_UNLOADING)
-                       continue;
-               if (scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
-                       if (port_iterator->cfg_log_verbose)
-                               log_verbose = true;
-
-                       scsi_host_put(lpfc_shost_from_vport(port_iterator));
-
-                       if (log_verbose) {
-                               spin_unlock_irqrestore(&phba->port_list_lock,
-                                                      iflags);
-                               return;
-                       }
-               }
-       }
-       spin_unlock_irqrestore(&phba->port_list_lock, iflags);
+       unsigned long rem_nsec;
 
        if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
                return;
index 7d480c7..4d455da 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -73,7 +73,7 @@ do { \
 #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
 do { \
        { if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) { \
-               if ((mask) & LOG_TRACE_EVENT) \
+               if ((mask) & LOG_TRACE_EVENT && !(vport)->cfg_log_verbose) \
                        lpfc_dmp_dbg((vport)->phba); \
                dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \
                           fmt, (vport)->phba->brd_no, vport->vpi, ##arg);  \
@@ -89,11 +89,11 @@ do { \
                                 (phba)->pport->cfg_log_verbose : \
                                 (phba)->cfg_log_verbose; \
        if (((mask) & log_verbose) || (level[1] <= '3')) { \
-               if ((mask) & LOG_TRACE_EVENT) \
+               if ((mask) & LOG_TRACE_EVENT && !log_verbose) \
                        lpfc_dmp_dbg(phba); \
                dev_printk(level, &((phba)->pcidev)->dev, "%d:" \
                        fmt, phba->brd_no, ##arg); \
-       } else  if (!(phba)->cfg_log_verbose)\
+       } else if (!log_verbose)\
                lpfc_dbg_print(phba, "%d:" fmt, phba->brd_no, ##arg); \
        } \
 } while (0)
index e1404ab..9858b17 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
 #include "lpfc_compat.h"
 
 /**
+ * lpfc_mbox_rsrc_prep - Prepare a mailbox with DMA buffer memory.
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to the driver internal queue element for mailbox command.
+ *
+ * A mailbox command consists of the pool memory for the command, @mbox, and
+ * one or more DMA buffers for the data transfer.  This routine provides
+ * a standard framework for allocating the dma buffer and assigning to the
+ * @mbox.  Callers should cleanup the mbox with a call to
+ * lpfc_mbox_rsrc_cleanup.
+ *
+ * The lpfc_mbuf_alloc routine acquires the hbalock so the caller is
+ * responsible to ensure the hbalock is released.  Also note that the
+ * driver design is a single dmabuf/mbuf per mbox in the ctx_buf.
+ *
+ **/
+int
+lpfc_mbox_rsrc_prep(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
+{
+       struct lpfc_dmabuf *mp;
+
+       mp = kmalloc(sizeof(*mp), GFP_KERNEL);
+       if (!mp)
+               return -ENOMEM;
+
+       mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+       if (!mp->virt) {
+               kfree(mp);
+               return -ENOMEM;
+       }
+
+       memset(mp->virt, 0, LPFC_BPL_SIZE);
+
+       /* Initialization only.  Driver does not use a list of dmabufs. */
+       INIT_LIST_HEAD(&mp->list);
+       mbox->ctx_buf = mp;
+       return 0;
+}
+
+/**
+ * lpfc_mbox_rsrc_cleanup - Free the mailbox DMA buffer and virtual memory.
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to the driver internal queue element for mailbox command.
+ * @locked: value that indicates if the hbalock is held (1) or not (0).
+ *
+ * A mailbox command consists of the pool memory for the command, @mbox, and
+ * possibly a DMA buffer for the data transfer.  This routine provides
+ * a standard framework for releasing any dma buffers and freeing all
+ * memory resources in it as well as releasing the @mbox back to the @phba pool.
+ * Callers should use this routine for cleanup for all mailboxes prepped with
+ * lpfc_mbox_rsrc_prep.
+ *
+ **/
+void
+lpfc_mbox_rsrc_cleanup(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
+                      enum lpfc_mbox_ctx locked)
+{
+       struct lpfc_dmabuf *mp;
+
+       mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
+       mbox->ctx_buf = NULL;
+
+       /* Release the generic BPL buffer memory.  */
+       if (mp) {
+               if (locked == MBOX_THD_LOCKED)
+                       __lpfc_mbuf_free(phba, mp->virt, mp->phys);
+               else
+                       lpfc_mbuf_free(phba, mp->virt, mp->phys);
+               kfree(mp);
+       }
+
+       mempool_free(mbox, phba->mbox_mem_pool);
+}
+
+/**
  * lpfc_dump_static_vport - Dump HBA's static vport information.
  * @phba: pointer to lpfc hba data structure.
  * @pmb: pointer to the driver internal queue element for mailbox command.
@@ -61,6 +135,7 @@ lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
 {
        MAILBOX_t *mb;
        struct lpfc_dmabuf *mp;
+       int rc;
 
        mb = &pmb->u.mb;
 
@@ -79,22 +154,15 @@ lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
                return 0;
        }
 
-       /* For SLI4 HBAs driver need to allocate memory */
-       mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
-       if (mp)
-               mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
-
-       if (!mp || !mp->virt) {
-               kfree(mp);
+       rc = lpfc_mbox_rsrc_prep(phba, pmb);
+       if (rc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
-                       "2605 lpfc_dump_static_vport: memory"
-                       " allocation failed\n");
+                               "2605 %s: memory allocation failed\n",
+                               __func__);
                return 1;
        }
-       memset(mp->virt, 0, LPFC_BPL_SIZE);
-       INIT_LIST_HEAD(&mp->list);
-       /* save address for completion */
-       pmb->ctx_buf = (uint8_t *)mp;
+
+       mp = pmb->ctx_buf;
        mb->un.varWords[3] = putPaddrLow(mp->phys);
        mb->un.varWords[4] = putPaddrHigh(mp->phys);
        mb->un.varDmp.sli4_length = sizeof(struct static_vport_info);
@@ -606,26 +674,21 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
 {
        struct lpfc_dmabuf *mp;
        MAILBOX_t *mb;
+       int rc;
 
-       mb = &pmb->u.mb;
        memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
-       mb->mbxOwner = OWN_HOST;
-
        /* Get a buffer to hold the HBAs Service Parameters */
-
-       mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
-       if (mp)
-               mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
-       if (!mp || !mp->virt) {
-               kfree(mp);
-               mb->mbxCommand = MBX_READ_SPARM64;
-               /* READ_SPARAM: no buffers */
+       rc = lpfc_mbox_rsrc_prep(phba, pmb);
+       if (rc) {
                lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
                                "0301 READ_SPARAM: no buffers\n");
-               return (1);
+               return 1;
        }
-       INIT_LIST_HEAD(&mp->list);
+
+       mp = pmb->ctx_buf;
+       mb = &pmb->u.mb;
+       mb->mbxOwner = OWN_HOST;
        mb->mbxCommand = MBX_READ_SPARM64;
        mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
        mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
@@ -633,9 +696,6 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
        if (phba->sli_rev >= LPFC_SLI_REV3)
                mb->un.varRdSparm.vpi = phba->vpi_ids[vpi];
 
-       /* save address for completion */
-       pmb->ctx_buf = mp;
-
        return (0);
 }
 
@@ -756,6 +816,7 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
        MAILBOX_t *mb = &pmb->u.mb;
        uint8_t *sparam;
        struct lpfc_dmabuf *mp;
+       int rc;
 
        memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
@@ -766,12 +827,10 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
                mb->un.varRegLogin.vpi = phba->vpi_ids[vpi];
        mb->un.varRegLogin.did = did;
        mb->mbxOwner = OWN_HOST;
+
        /* Get a buffer to hold NPorts Service Parameters */
-       mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
-       if (mp)
-               mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
-       if (!mp || !mp->virt) {
-               kfree(mp);
+       rc = lpfc_mbox_rsrc_prep(phba, pmb);
+       if (rc) {
                mb->mbxCommand = MBX_REG_LOGIN64;
                /* REG_LOGIN: no buffers */
                lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
@@ -779,15 +838,13 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
                                "rpi x%x\n", vpi, did, rpi);
                return 1;
        }
-       INIT_LIST_HEAD(&mp->list);
-       sparam = mp->virt;
 
        /* Copy param's into a new buffer */
+       mp = pmb->ctx_buf;
+       sparam = mp->virt;
        memcpy(sparam, param, sizeof (struct serv_parm));
 
-       /* save address for completion */
-       pmb->ctx_buf = (uint8_t *)mp;
-
+       /* Finish initializing the mailbox. */
        mb->mbxCommand = MBX_REG_LOGIN64;
        mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
        mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
@@ -1723,7 +1780,9 @@ lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry,
  * @phba: pointer to lpfc hba data structure.
  * @mbox: pointer to lpfc mbox command.
  *
- * This routine frees SLI4 specific mailbox command for sending IOCTL command.
+ * This routine cleans up and releases an SLI4 mailbox command that was
+ * configured using lpfc_sli4_config.  It accounts for the embedded and
+ * non-embedded config types.
  **/
 void
 lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
@@ -2277,33 +2336,24 @@ lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
 {
        struct lpfc_dmabuf *mp = NULL;
        MAILBOX_t *mb;
+       int rc;
 
        memset(mbox, 0, sizeof(*mbox));
        mb = &mbox->u.mb;
 
-       mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
-       if (mp)
-               mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
-
-       if (!mp || !mp->virt) {
-               kfree(mp);
-               /* dump config region 23 failed to allocate memory */
+       rc = lpfc_mbox_rsrc_prep(phba, mbox);
+       if (rc) {
                lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
-                       "2569 lpfc dump config region 23: memory"
-                       " allocation failed\n");
+                               "2569 %s: memory allocation failed\n",
+                               __func__);
                return 1;
        }
 
-       memset(mp->virt, 0, LPFC_BPL_SIZE);
-       INIT_LIST_HEAD(&mp->list);
-
-       /* save address for completion */
-       mbox->ctx_buf = (uint8_t *)mp;
-
        mb->mbxCommand = MBX_DUMP_MEMORY;
        mb->un.varDmp.type = DMP_NV_PARAMS;
        mb->un.varDmp.region_id = DMP_REGION_23;
        mb->un.varDmp.sli4_length = DMP_RGN23_SIZE;
+       mp = mbox->ctx_buf;
        mb->un.varWords[3] = putPaddrLow(mp->phys);
        mb->un.varWords[4] = putPaddrHigh(mp->phys);
        return 0;
@@ -2326,7 +2376,7 @@ lpfc_mbx_cmpl_rdp_link_stat(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
        rc = SUCCESS;
 
 mbx_failed:
-       lpfc_sli4_mbox_cmd_free(phba, mboxq);
+       lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
        rdp_context->cmpl(phba, rdp_context, rc);
 }
 
@@ -2338,30 +2388,25 @@ lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
                        (struct lpfc_rdp_context *)(mbox->ctx_ndlp);
 
        if (bf_get(lpfc_mqe_status, &mbox->u.mqe))
-               goto error_mbuf_free;
+               goto error_mbox_free;
 
        lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a2,
                                DMP_SFF_PAGE_A2_SIZE);
 
-       /* We don't need dma buffer for link stat. */
-       lpfc_mbuf_free(phba, mp->virt, mp->phys);
-       kfree(mp);
-
-       memset(mbox, 0, sizeof(*mbox));
        lpfc_read_lnk_stat(phba, mbox);
        mbox->vport = rdp_context->ndlp->vport;
+
+       /* Save the dma buffer for cleanup in the final completion. */
+       mbox->ctx_buf = mp;
        mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_link_stat;
        mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
        if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == MBX_NOT_FINISHED)
-               goto error_cmd_free;
+               goto error_mbox_free;
 
        return;
 
-error_mbuf_free:
-       lpfc_mbuf_free(phba, mp->virt, mp->phys);
-       kfree(mp);
-error_cmd_free:
-       lpfc_sli4_mbox_cmd_free(phba, mbox);
+error_mbox_free:
+       lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
        rdp_context->cmpl(phba, rdp_context, FAILURE);
 }
 
@@ -2409,9 +2454,7 @@ lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
        return;
 
 error:
-       lpfc_mbuf_free(phba, mp->virt, mp->phys);
-       kfree(mp);
-       lpfc_sli4_mbox_cmd_free(phba, mbox);
+       lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
        rdp_context->cmpl(phba, rdp_context, FAILURE);
 }
 
@@ -2427,27 +2470,19 @@ error:
 int
 lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
 {
+       int rc;
        struct lpfc_dmabuf *mp = NULL;
 
        memset(mbox, 0, sizeof(*mbox));
 
-       mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
-       if (mp)
-               mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
-       if (!mp || !mp->virt) {
-               kfree(mp);
+       rc = lpfc_mbox_rsrc_prep(phba, mbox);
+       if (rc) {
                lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
                        "3569 dump type 3 page 0xA0 allocation failed\n");
                return 1;
        }
 
-       memset(mp->virt, 0, LPFC_BPL_SIZE);
-       INIT_LIST_HEAD(&mp->list);
-
        bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY);
-       /* save address for completion */
-       mbox->ctx_buf = mp;
-
        bf_set(lpfc_mbx_memory_dump_type3_type,
                &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD);
        bf_set(lpfc_mbx_memory_dump_type3_link,
@@ -2456,6 +2491,8 @@ lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
                &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A0);
        bf_set(lpfc_mbx_memory_dump_type3_length,
                &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A0_SIZE);
+
+       mp = mbox->ctx_buf;
        mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys);
        mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
 
index c4e1a07..639f866 100644 (file)
@@ -173,9 +173,9 @@ lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
        void     *ptr = NULL;
        u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
 
-       pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+       pcmd = cmdiocb->cmd_dmabuf;
 
-       /* For lpfc_els_abort, context2 could be zero'ed to delay
+       /* For lpfc_els_abort, cmd_dmabuf could be zero'ed to delay
         * freeing associated memory till after ABTS completes.
         */
        if (pcmd) {
@@ -327,7 +327,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 {
        struct lpfc_hba    *phba = vport->phba;
        struct lpfc_dmabuf *pcmd;
-       struct lpfc_dmabuf *mp;
        uint64_t nlp_portwwn = 0;
        uint32_t *lp;
        union lpfc_wqe128 *wqe;
@@ -343,7 +342,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        u32 remote_did;
 
        memset(&stat, 0, sizeof (struct ls_rjt));
-       pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+       pcmd = cmdiocb->cmd_dmabuf;
        lp = (uint32_t *) pcmd->virt;
        sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
        if (wwn_to_u64(sp->portName.u.wwn) == 0) {
@@ -514,6 +513,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                        lpfc_config_link(phba, link_mbox);
                        link_mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
                        link_mbox->vport = vport;
+
+                       /* The default completion handling for CONFIG_LINK
+                        * does not require the ndlp so no reference is needed.
+                        */
                        link_mbox->ctx_ndlp = ndlp;
 
                        rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
@@ -592,12 +595,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                 * a default RPI.
                 */
                if (phba->sli_rev == LPFC_SLI_REV4) {
-                       mp = (struct lpfc_dmabuf *)login_mbox->ctx_buf;
-                       if (mp) {
-                               lpfc_mbuf_free(phba, mp->virt, mp->phys);
-                               kfree(mp);
-                       }
-                       mempool_free(login_mbox, phba->mbox_mem_pool);
+                       lpfc_mbox_rsrc_cleanup(phba, login_mbox,
+                                              MBOX_THD_UNLOCKED);
                        login_mbox = NULL;
                } else {
                        /* In order to preserve RPIs, we want to cleanup
@@ -614,9 +613,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
                stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
                rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
-                       ndlp, login_mbox);
-               if (rc)
-                       mempool_free(login_mbox, phba->mbox_mem_pool);
+                                        ndlp, login_mbox);
+               if (rc && login_mbox)
+                       lpfc_mbox_rsrc_cleanup(phba, login_mbox,
+                                              MBOX_THD_UNLOCKED);
                return 1;
        }
 
@@ -637,6 +637,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
         */
        login_mbox->mbox_cmpl = lpfc_defer_plogi_acc;
        login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
+       if (!login_mbox->ctx_ndlp)
+               goto out;
+
        login_mbox->context3 = save_iocb; /* For PLOGI ACC */
 
        spin_lock_irq(&ndlp->lock);
@@ -645,8 +648,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 
        /* Start the ball rolling by issuing REG_LOGIN here */
        rc = lpfc_sli_issue_mbox(phba, login_mbox, MBX_NOWAIT);
-       if (rc == MBX_NOT_FINISHED)
+       if (rc == MBX_NOT_FINISHED) {
+               lpfc_nlp_put(ndlp);
                goto out;
+       }
        lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
 
        return 1;
@@ -710,7 +715,7 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        uint32_t *lp;
        uint32_t cmd;
 
-       pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+       pcmd = cmdiocb->cmd_dmabuf;
        lp = (uint32_t *) pcmd->virt;
 
        cmd = *lp++;
@@ -918,7 +923,7 @@ lpfc_rcv_prli_support_check(struct lpfc_vport *vport,
        uint32_t *payload;
        uint32_t cmd;
 
-       payload = ((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
+       payload = cmdiocb->cmd_dmabuf->virt;
        cmd = *payload;
        if (vport->phba->nvmet_support) {
                /* Must be a NVME PRLI */
@@ -955,9 +960,9 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        struct fc_rport *rport = ndlp->rport;
        u32 roles;
 
-       pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
-       lp = (uint32_t *) pcmd->virt;
-       npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
+       pcmd = cmdiocb->cmd_dmabuf;
+       lp = (uint32_t *)pcmd->virt;
+       npr = (PRLI *)((uint8_t *)lp + sizeof(uint32_t));
 
        if ((npr->prliType == PRLI_FCP_TYPE) ||
            (npr->prliType == PRLI_NVME_TYPE)) {
@@ -1103,8 +1108,10 @@ lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport,
                                 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag);
 
                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
-               if (rc == MBX_NOT_FINISHED)
+               if (rc == MBX_NOT_FINISHED) {
+                       lpfc_nlp_put(ndlp);
                        mempool_free(pmb, phba->mbox_mem_pool);
+               }
        }
 }
 
@@ -1218,7 +1225,7 @@ lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
        struct lpfc_hba   *phba = vport->phba;
        struct lpfc_iocbq *cmdiocb = arg;
-       struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+       struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
        uint32_t *lp = (uint32_t *) pcmd->virt;
        struct serv_parm *sp = (struct serv_parm *) (lp + 1);
        struct ls_rjt stat;
@@ -1328,7 +1335,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
 {
        struct lpfc_hba    *phba = vport->phba;
        struct lpfc_iocbq  *cmdiocb, *rspiocb;
-       struct lpfc_dmabuf *pcmd, *prsp, *mp;
+       struct lpfc_dmabuf *pcmd, *prsp;
        uint32_t *lp;
        uint32_t vid, flag;
        struct serv_parm *sp;
@@ -1339,7 +1346,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
        u32 did;
 
        cmdiocb = (struct lpfc_iocbq *) arg;
-       rspiocb = cmdiocb->context_un.rsp_iocb;
+       rspiocb = cmdiocb->rsp_iocb;
 
        ulp_status = get_job_ulpstatus(phba, rspiocb);
 
@@ -1351,7 +1358,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
        if (ulp_status)
                goto out;
 
-       pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+       pcmd = cmdiocb->cmd_dmabuf;
 
        prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
        if (!prsp)
@@ -1495,11 +1502,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
                 * command
                 */
                lpfc_nlp_put(ndlp);
-               mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
-               lpfc_mbuf_free(phba, mp->virt, mp->phys);
-               kfree(mp);
-               mempool_free(mbox, phba->mbox_mem_pool);
-
+               lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
                                 "0134 PLOGI: cannot issue reg_login "
                                 "Data: x%x x%x x%x x%x\n",
@@ -1697,7 +1700,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
        u32 ulp_status;
 
        cmdiocb = (struct lpfc_iocbq *) arg;
-       rspiocb = cmdiocb->context_un.rsp_iocb;
+       rspiocb = cmdiocb->rsp_iocb;
 
        ulp_status = get_job_ulpstatus(phba, rspiocb);
 
@@ -1850,7 +1853,6 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
        struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
        LPFC_MBOXQ_t      *mb;
        LPFC_MBOXQ_t      *nextmb;
-       struct lpfc_dmabuf *mp;
        struct lpfc_nodelist *ns_ndlp;
 
        cmdiocb = (struct lpfc_iocbq *) arg;
@@ -1870,16 +1872,11 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
        list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
                if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
                   (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
-                       mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
-                       if (mp) {
-                               __lpfc_mbuf_free(phba, mp->virt, mp->phys);
-                               kfree(mp);
-                       }
                        ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
                        lpfc_nlp_put(ndlp);
                        list_del(&mb->list);
                        phba->sli.mboxq_cnt--;
-                       mempool_free(mb, phba->mbox_mem_pool);
+                       lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_LOCKED);
                }
        }
        spin_unlock_irq(&phba->hbalock);
@@ -2152,7 +2149,7 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        u32 ulp_status;
 
        cmdiocb = (struct lpfc_iocbq *) arg;
-       rspiocb = cmdiocb->context_un.rsp_iocb;
+       rspiocb = cmdiocb->rsp_iocb;
 
        ulp_status = get_job_ulpstatus(phba, rspiocb);
 
@@ -2772,7 +2769,7 @@ lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        u32 ulp_status;
 
        cmdiocb = (struct lpfc_iocbq *) arg;
-       rspiocb = cmdiocb->context_un.rsp_iocb;
+       rspiocb = cmdiocb->rsp_iocb;
 
        ulp_status = get_job_ulpstatus(phba, rspiocb);
 
@@ -2791,7 +2788,7 @@ lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        u32 ulp_status;
 
        cmdiocb = (struct lpfc_iocbq *) arg;
-       rspiocb = cmdiocb->context_un.rsp_iocb;
+       rspiocb = cmdiocb->rsp_iocb;
 
        ulp_status = get_job_ulpstatus(phba, rspiocb);
 
@@ -2827,7 +2824,7 @@ lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        u32 ulp_status;
 
        cmdiocb = (struct lpfc_iocbq *) arg;
-       rspiocb = cmdiocb->context_un.rsp_iocb;
+       rspiocb = cmdiocb->rsp_iocb;
 
        ulp_status = get_job_ulpstatus(phba, rspiocb);
 
index 8d26f20..5385f4d 100644 (file)
@@ -319,8 +319,10 @@ __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba,  struct lpfc_vport *vport,
        struct lpfc_nodelist *ndlp;
        uint32_t status;
 
-       pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
-       ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
+       pnvme_lsreq = cmdwqe->context_un.nvme_lsreq;
+       ndlp = cmdwqe->ndlp;
+       buf_ptr = cmdwqe->bpl_dmabuf;
+
        status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
 
        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
@@ -330,16 +332,16 @@ __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba,  struct lpfc_vport *vport,
                         pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
                         cmdwqe->sli4_xritag, status,
                         (wcqe->parameter & 0xffff),
-                        cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp);
+                        cmdwqe, pnvme_lsreq, cmdwqe->bpl_dmabuf,
+                        ndlp);
 
        lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x parm x%x\n",
                         cmdwqe->sli4_xritag, status, wcqe->parameter);
 
-       if (cmdwqe->context3) {
-               buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3;
+       if (buf_ptr) {
                lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
                kfree(buf_ptr);
-               cmdwqe->context3 = NULL;
+               cmdwqe->bpl_dmabuf = NULL;
        }
        if (pnvme_lsreq->done)
                pnvme_lsreq->done(pnvme_lsreq, status);
@@ -351,7 +353,7 @@ __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba,  struct lpfc_vport *vport,
                                cmdwqe->sli4_xritag, status);
        if (ndlp) {
                lpfc_nlp_put(ndlp);
-               cmdwqe->context1 = NULL;
+               cmdwqe->ndlp = NULL;
        }
        lpfc_sli_release_iocbq(phba, cmdwqe);
 }
@@ -407,19 +409,19 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
        /* Initialize only 64 bytes */
        memset(wqe, 0, sizeof(union lpfc_wqe));
 
-       genwqe->context3 = (uint8_t *)bmp;
+       genwqe->bpl_dmabuf = bmp;
        genwqe->cmd_flag |= LPFC_IO_NVME_LS;
 
        /* Save for completion so we can release these resources */
-       genwqe->context1 = lpfc_nlp_get(ndlp);
-       if (!genwqe->context1) {
+       genwqe->ndlp = lpfc_nlp_get(ndlp);
+       if (!genwqe->ndlp) {
                dev_warn(&phba->pcidev->dev,
                         "Warning: Failed node ref, not sending LS_REQ\n");
                lpfc_sli_release_iocbq(phba, genwqe);
                return 1;
        }
 
-       genwqe->context2 = (uint8_t *)pnvme_lsreq;
+       genwqe->context_un.nvme_lsreq = pnvme_lsreq;
        /* Fill in payload, bp points to frame payload */
 
        if (!tmo)
@@ -730,7 +732,7 @@ __lpfc_nvme_ls_abort(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        spin_lock_irq(&phba->hbalock);
        spin_lock(&pring->ring_lock);
        list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
-               if (wqe->context2 == pnvme_lsreq) {
+               if (wqe->context_un.nvme_lsreq == pnvme_lsreq) {
                        wqe->cmd_flag |= LPFC_DRIVER_ABORTED;
                        foundit = true;
                        break;
@@ -929,8 +931,7 @@ static void
 lpfc_nvme_io_cmd_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
                      struct lpfc_iocbq *pwqeOut)
 {
-       struct lpfc_io_buf *lpfc_ncmd =
-               (struct lpfc_io_buf *)pwqeIn->context1;
+       struct lpfc_io_buf *lpfc_ncmd = pwqeIn->io_buf;
        struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl;
        struct lpfc_vport *vport = pwqeIn->vport;
        struct nvmefc_fcp_req *nCmd;
@@ -1400,8 +1401,8 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
                                if ((nseg - 1) == i)
                                        bf_set(lpfc_sli4_sge_last, sgl, 1);
 
-                               physaddr = data_sg->dma_address;
-                               dma_len = data_sg->length;
+                               physaddr = sg_dma_address(data_sg);
+                               dma_len = sg_dma_len(data_sg);
                                sgl->addr_lo = cpu_to_le32(
                                                         putPaddrLow(physaddr));
                                sgl->addr_hi = cpu_to_le32(
@@ -2356,6 +2357,11 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
                rpinfo.dev_loss_tmo = vport->cfg_devloss_tmo;
 
        spin_lock_irq(&ndlp->lock);
+
+       /* If an oldrport exists, so does the ndlp reference.  If not
+        * a new reference is needed because either the node has never
+        * been registered or it's been unregistered and getting deleted.
+        */
        oldrport = lpfc_ndlp_get_nrport(ndlp);
        if (oldrport) {
                prev_ndlp = oldrport->ndlp;
@@ -2466,12 +2472,12 @@ lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
        if (!nrport || !remoteport)
                goto rescan_exit;
 
-       /* Only rescan if we are an NVME target in the MAPPED state */
+       /* Rescan an NVME target in MAPPED state with DISCOVERY role set */
        if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY &&
            ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
                nvme_fc_rescan_remoteport(remoteport);
 
-               lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+               lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
                                 "6172 NVME rescanned DID x%06x "
                                 "port_state x%x\n",
                                 ndlp->nlp_DID, remoteport->port_state);
@@ -2717,7 +2723,7 @@ lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
        struct lpfc_wcqe_complete wcqe;
        struct lpfc_wcqe_complete *wcqep = &wcqe;
 
-       lpfc_ncmd = (struct lpfc_io_buf *)pwqeIn->context1;
+       lpfc_ncmd = pwqeIn->io_buf;
        if (!lpfc_ncmd) {
                lpfc_sli_release_iocbq(phba, pwqeIn);
                return;
index 9543826..c0ee0b3 100644 (file)
@@ -295,7 +295,7 @@ void
 __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
                           struct lpfc_iocbq *rspwqe)
 {
-       struct lpfc_async_xchg_ctx *axchg = cmdwqe->context2;
+       struct lpfc_async_xchg_ctx *axchg = cmdwqe->context_un.axchg;
        struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
        struct nvmefc_ls_rsp *ls_rsp = &axchg->ls_rsp;
        uint32_t status, result;
@@ -317,9 +317,9 @@ __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
                        "6038 NVMEx LS rsp cmpl: %d %d oxid x%x\n",
                        status, result, axchg->oxid);
 
-       lpfc_nlp_put(cmdwqe->context1);
-       cmdwqe->context2 = NULL;
-       cmdwqe->context3 = NULL;
+       lpfc_nlp_put(cmdwqe->ndlp);
+       cmdwqe->context_un.axchg = NULL;
+       cmdwqe->bpl_dmabuf = NULL;
        lpfc_sli_release_iocbq(phba, cmdwqe);
        ls_rsp->done(ls_rsp);
        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
@@ -728,7 +728,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
        int id;
 #endif
 
-       ctxp = cmdwqe->context2;
+       ctxp = cmdwqe->context_un.axchg;
        ctxp->flag &= ~LPFC_NVME_IO_INP;
 
        rsp = &ctxp->hdlrctx.fcp_req;
@@ -903,7 +903,7 @@ __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
        /* Save numBdes for bpl2sgl */
        nvmewqeq->num_bdes = 1;
        nvmewqeq->hba_wqidx = 0;
-       nvmewqeq->context3 = &dmabuf;
+       nvmewqeq->bpl_dmabuf = &dmabuf;
        dmabuf.virt = &bpl;
        bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
        bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
@@ -917,7 +917,7 @@ __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
         */
 
        nvmewqeq->cmd_cmpl = xmt_ls_rsp_cmp;
-       nvmewqeq->context2 = axchg;
+       nvmewqeq->context_un.axchg = axchg;
 
        lpfc_nvmeio_data(phba, "NVMEx LS RSP: xri x%x wqidx x%x len x%x\n",
                         axchg->oxid, nvmewqeq->hba_wqidx, ls_rsp->rsplen);
@@ -925,7 +925,7 @@ __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
        rc = lpfc_sli4_issue_wqe(phba, axchg->hdwq, nvmewqeq);
 
        /* clear to be sure there's no reference */
-       nvmewqeq->context3 = NULL;
+       nvmewqeq->bpl_dmabuf = NULL;
 
        if (rc == WQE_SUCCESS) {
                /*
@@ -942,7 +942,7 @@ __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
 
        rc = -ENXIO;
 
-       lpfc_nlp_put(nvmewqeq->context1);
+       lpfc_nlp_put(nvmewqeq->ndlp);
 
 out_free_buf:
        /* Give back resources */
@@ -1075,7 +1075,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
        }
 
        nvmewqeq->cmd_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
-       nvmewqeq->context2 = ctxp;
+       nvmewqeq->context_un.axchg = ctxp;
        nvmewqeq->cmd_flag |=  LPFC_IO_NVMET;
        ctxp->wqeq->hba_wqidx = rsp->hwqid;
 
@@ -1119,8 +1119,8 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
                        ctxp->oxid, rc);
 
        ctxp->wqeq->hba_wqidx = 0;
-       nvmewqeq->context2 = NULL;
-       nvmewqeq->context3 = NULL;
+       nvmewqeq->context_un.axchg = NULL;
+       nvmewqeq->bpl_dmabuf = NULL;
        rc = -EBUSY;
 aerr:
        return rc;
@@ -1590,7 +1590,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
                /* Initialize WQE */
                memset(wqe, 0, sizeof(union lpfc_wqe));
 
-               ctx_buf->iocbq->context1 = NULL;
+               ctx_buf->iocbq->cmd_dmabuf = NULL;
                spin_lock(&phba->sli4_hba.sgl_list_lock);
                ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
                spin_unlock(&phba->sli4_hba.sgl_list_lock);
@@ -2025,7 +2025,7 @@ lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
                                 &wq->wqfull_list, list) {
                if (ctxp) {
                        /* Checking for a specific IO to flush */
-                       if (nvmewqeq->context2 == ctxp) {
+                       if (nvmewqeq->context_un.axchg == ctxp) {
                                list_del(&nvmewqeq->list);
                                spin_unlock_irqrestore(&pring->ring_lock,
                                                       iflags);
@@ -2071,7 +2071,7 @@ lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
                list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
                                 list);
                spin_unlock_irqrestore(&pring->ring_lock, iflags);
-               ctxp = (struct lpfc_async_xchg_ctx *)nvmewqeq->context2;
+               ctxp = nvmewqeq->context_un.axchg;
                rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
                spin_lock_irqsave(&pring->ring_lock, iflags);
                if (rc == -EBUSY) {
@@ -2617,10 +2617,10 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
        ctxp->wqeq = nvmewqe;
 
        /* prevent preparing wqe with NULL ndlp reference */
-       nvmewqe->context1 = lpfc_nlp_get(ndlp);
-       if (nvmewqe->context1 == NULL)
+       nvmewqe->ndlp = lpfc_nlp_get(ndlp);
+       if (!nvmewqe->ndlp)
                goto nvme_wqe_free_wqeq_exit;
-       nvmewqe->context2 = ctxp;
+       nvmewqe->context_un.axchg = ctxp;
 
        wqe = &nvmewqe->wqe;
        memset(wqe, 0, sizeof(union lpfc_wqe));
@@ -2692,8 +2692,9 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
        return nvmewqe;
 
 nvme_wqe_free_wqeq_exit:
-       nvmewqe->context2 = NULL;
-       nvmewqe->context3 = NULL;
+       nvmewqe->context_un.axchg = NULL;
+       nvmewqe->ndlp = NULL;
+       nvmewqe->bpl_dmabuf = NULL;
        lpfc_sli_release_iocbq(phba, nvmewqe);
        return NULL;
 }
@@ -2995,7 +2996,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
        nvmewqe->retry = 1;
        nvmewqe->vport = phba->pport;
        nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
-       nvmewqe->context1 = ndlp;
+       nvmewqe->ndlp = ndlp;
 
        for_each_sg(rsp->sg, sgel, nsegs, i) {
                physaddr = sg_dma_address(sgel);
@@ -3053,7 +3054,7 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
        bool released = false;
        struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
 
-       ctxp = cmdwqe->context2;
+       ctxp = cmdwqe->context_un.axchg;
        result = wcqe->parameter;
 
        tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
@@ -3084,8 +3085,8 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
                        wcqe->word0, wcqe->total_data_placed,
                        result, wcqe->word3);
 
-       cmdwqe->context2 = NULL;
-       cmdwqe->context3 = NULL;
+       cmdwqe->rsp_dmabuf = NULL;
+       cmdwqe->bpl_dmabuf = NULL;
        /*
         * if transport has released ctx, then can reuse it. Otherwise,
         * will be recycled by transport release call.
@@ -3123,7 +3124,7 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
        bool released = false;
        struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
 
-       ctxp = cmdwqe->context2;
+       ctxp = cmdwqe->context_un.axchg;
        result = wcqe->parameter;
 
        if (!ctxp) {
@@ -3169,8 +3170,8 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
                        wcqe->word0, wcqe->total_data_placed,
                        result, wcqe->word3);
 
-       cmdwqe->context2 = NULL;
-       cmdwqe->context3 = NULL;
+       cmdwqe->rsp_dmabuf = NULL;
+       cmdwqe->bpl_dmabuf = NULL;
        /*
         * if transport has released ctx, then can reuse it. Otherwise,
         * will be recycled by transport release call.
@@ -3203,7 +3204,7 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
        uint32_t result;
        struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
 
-       ctxp = cmdwqe->context2;
+       ctxp = cmdwqe->context_un.axchg;
        result = wcqe->parameter;
 
        if (phba->nvmet_support) {
@@ -3234,8 +3235,8 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
                                ctxp->oxid, ctxp->state, ctxp->entry_cnt);
        }
 
-       cmdwqe->context2 = NULL;
-       cmdwqe->context3 = NULL;
+       cmdwqe->rsp_dmabuf = NULL;
+       cmdwqe->bpl_dmabuf = NULL;
        lpfc_sli_release_iocbq(phba, cmdwqe);
        kfree(ctxp);
 }
@@ -3322,9 +3323,9 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
               OTHER_COMMAND);
 
        abts_wqeq->vport = phba->pport;
-       abts_wqeq->context1 = ndlp;
-       abts_wqeq->context2 = ctxp;
-       abts_wqeq->context3 = NULL;
+       abts_wqeq->ndlp = ndlp;
+       abts_wqeq->context_un.axchg = ctxp;
+       abts_wqeq->bpl_dmabuf = NULL;
        abts_wqeq->num_bdes = 0;
        /* hba_wqidx should already be setup from command we are aborting */
        abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
@@ -3477,7 +3478,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
        abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
        abts_wqeq->cmd_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
        abts_wqeq->cmd_flag |= LPFC_IO_NVME;
-       abts_wqeq->context2 = ctxp;
+       abts_wqeq->context_un.axchg = ctxp;
        abts_wqeq->vport = phba->pport;
        if (!ctxp->hdwq)
                ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
@@ -3630,8 +3631,8 @@ lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
 out:
        if (tgtp)
                atomic_inc(&tgtp->xmt_abort_rsp_error);
-       abts_wqeq->context2 = NULL;
-       abts_wqeq->context3 = NULL;
+       abts_wqeq->rsp_dmabuf = NULL;
+       abts_wqeq->bpl_dmabuf = NULL;
        lpfc_sli_release_iocbq(phba, abts_wqeq);
        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
                        "6056 Failed to Issue ABTS. Status x%x\n", rc);
index f6b8385..3b8afa9 100644 (file)
@@ -433,7 +433,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
                iocb->ulpClass = CLASS3;
                psb->status = IOSTAT_SUCCESS;
                /* Put it back into the SCSI buffer list */
-               psb->cur_iocbq.context1  = psb;
+               psb->cur_iocbq.io_buf = psb;
                spin_lock_init(&psb->buf_lock);
                lpfc_release_scsi_buf_s3(phba, psb);
 
@@ -3835,7 +3835,7 @@ lpfc_update_cmf_cmpl(struct lpfc_hba *phba,
                else
                        time = div_u64(time + 500, 1000); /* round it */
 
-               cgs = this_cpu_ptr(phba->cmf_stat);
+               cgs = per_cpu_ptr(phba->cmf_stat, raw_smp_processor_id());
                atomic64_add(size, &cgs->rcv_bytes);
                atomic64_add(time, &cgs->rx_latency);
                atomic_inc(&cgs->rx_io_cnt);
@@ -3879,7 +3879,7 @@ lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t size)
                        atomic_set(&phba->rx_max_read_cnt, size);
        }
 
-       cgs = this_cpu_ptr(phba->cmf_stat);
+       cgs = per_cpu_ptr(phba->cmf_stat, raw_smp_processor_id());
        atomic64_add(size, &cgs->total_bytes);
        return 0;
 }
@@ -4082,8 +4082,7 @@ static void
 lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
                         struct lpfc_iocbq *pwqeOut)
 {
-       struct lpfc_io_buf *lpfc_cmd =
-               (struct lpfc_io_buf *)pwqeIn->context1;
+       struct lpfc_io_buf *lpfc_cmd = pwqeIn->io_buf;
        struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl;
        struct lpfc_vport *vport = pwqeIn->vport;
        struct lpfc_rport_data *rdata;
@@ -4276,6 +4275,7 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
                        break;
                }
                if (lpfc_cmd->result == IOERR_INVALID_RPI ||
+                   lpfc_cmd->result == IOERR_LINK_DOWN ||
                    lpfc_cmd->result == IOERR_NO_RESOURCES ||
                    lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
                    lpfc_cmd->result == IOERR_RPI_SUSPENDED ||
@@ -4420,7 +4420,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
                        struct lpfc_iocbq *pIocbOut)
 {
        struct lpfc_io_buf *lpfc_cmd =
-               (struct lpfc_io_buf *) pIocbIn->context1;
+               (struct lpfc_io_buf *) pIocbIn->io_buf;
        struct lpfc_vport      *vport = pIocbIn->vport;
        struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
        struct lpfc_nodelist *pnode = rdata->pnode;
@@ -4743,7 +4743,7 @@ static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport,
                piocbq->iocb.ulpFCP2Rcvy = 0;
 
        piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
-       piocbq->context1  = lpfc_cmd;
+       piocbq->io_buf  = lpfc_cmd;
        if (!piocbq->cmd_cmpl)
                piocbq->cmd_cmpl = lpfc_scsi_cmd_iocb_cmpl;
        piocbq->iocb.ulpTimeout = tmo;
@@ -4855,8 +4855,7 @@ static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport,
        bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
 
        pwqeq->vport = vport;
-       pwqeq->vport = vport;
-       pwqeq->context1 = lpfc_cmd;
+       pwqeq->io_buf = lpfc_cmd;
        pwqeq->hba_wqidx = lpfc_cmd->hdwq_no;
        pwqeq->cmd_cmpl = lpfc_fcp_io_cmd_wqe_cmpl;
 
@@ -5097,8 +5096,7 @@ lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
                        struct lpfc_iocbq *cmdiocbq,
                        struct lpfc_iocbq *rspiocbq)
 {
-       struct lpfc_io_buf *lpfc_cmd =
-               (struct lpfc_io_buf *) cmdiocbq->context1;
+       struct lpfc_io_buf *lpfc_cmd = cmdiocbq->io_buf;
        if (lpfc_cmd)
                lpfc_release_scsi_buf(phba, lpfc_cmd);
        return;
@@ -5346,9 +5344,9 @@ static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd
 {
        u64 *lta;
 
-       if (vport->vmid_priority_tagging)
+       if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO)
                tag->cs_ctl_vmid = vmp->un.cs_ctl_vmid;
-       else
+       else if (vport->phba->cfg_vmid_app_header)
                tag->app_id = vmp->un.app_id;
 
        if (cmd->sc_data_direction == DMA_TO_DEVICE)
@@ -5393,11 +5391,12 @@ static int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, struct
                               scsi_cmnd * cmd, union lpfc_vmid_io_tag *tag)
 {
        struct lpfc_vmid *vmp = NULL;
-       int hash, len, rc, i;
+       int hash, len, rc = -EPERM, i;
 
        /* check if QFPA is complete */
-       if (lpfc_vmid_is_type_priority_tag(vport) && !(vport->vmid_flag &
-             LPFC_VMID_QFPA_CMPL)) {
+       if (lpfc_vmid_is_type_priority_tag(vport) &&
+           !(vport->vmid_flag & LPFC_VMID_QFPA_CMPL) &&
+           (vport->vmid_flag & LPFC_VMID_ISSUE_QFPA)) {
                vport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
                return -EAGAIN;
        }
@@ -5471,7 +5470,7 @@ static int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, struct
                        vport->vmid_inactivity_timeout ? 1 : 0;
 
                /* if type priority tag, get next available VMID */
-               if (lpfc_vmid_is_type_priority_tag(vport))
+               if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO)
                        lpfc_vmid_assign_cs_ctl(vport, vmp);
 
                /* allocate the per cpu variable for holding */
@@ -5490,9 +5489,9 @@ static int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, struct
                write_unlock(&vport->vmid_lock);
 
                /* complete transaction with switch */
-               if (lpfc_vmid_is_type_priority_tag(vport))
+               if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO)
                        rc = lpfc_vmid_uvem(vport, vmp, true);
-               else
+               else if (vport->phba->cfg_vmid_app_header)
                        rc = lpfc_vmid_cmd(vport, SLI_CTAS_RAPP_IDENT, vmp);
                if (!rc) {
                        write_lock(&vport->vmid_lock);
@@ -5866,25 +5865,25 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
        if (!lpfc_cmd)
                return ret;
 
-       spin_lock_irqsave(&phba->hbalock, flags);
+       /* Guard against IO completion being called at same time */
+       spin_lock_irqsave(&lpfc_cmd->buf_lock, flags);
+
+       spin_lock(&phba->hbalock);
        /* driver queued commands are in process of being flushed */
        if (phba->hba_flag & HBA_IOQ_FLUSH) {
                lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
                        "3168 SCSI Layer abort requested I/O has been "
                        "flushed by LLD.\n");
                ret = FAILED;
-               goto out_unlock;
+               goto out_unlock_hba;
        }
 
-       /* Guard against IO completion being called at same time */
-       spin_lock(&lpfc_cmd->buf_lock);
-
        if (!lpfc_cmd->pCmd) {
                lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
                         "2873 SCSI Layer I/O Abort Request IO CMPL Status "
                         "x%x ID %d LUN %llu\n",
                         SUCCESS, cmnd->device->id, cmnd->device->lun);
-               goto out_unlock_buf;
+               goto out_unlock_hba;
        }
 
        iocb = &lpfc_cmd->cur_iocbq;
@@ -5892,7 +5891,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
                pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
                if (!pring_s4) {
                        ret = FAILED;
-                       goto out_unlock_buf;
+                       goto out_unlock_hba;
                }
                spin_lock(&pring_s4->ring_lock);
        }
@@ -5917,7 +5916,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
                goto out_unlock_ring;
        }
 
-       BUG_ON(iocb->context1 != lpfc_cmd);
+       WARN_ON(iocb->io_buf != lpfc_cmd);
 
        /* abort issued in recovery is still in progress */
        if (iocb->cmd_flag & LPFC_DRIVER_ABORTED) {
@@ -5925,8 +5924,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
                         "3389 SCSI Layer I/O Abort Request is pending\n");
                if (phba->sli_rev == LPFC_SLI_REV4)
                        spin_unlock(&pring_s4->ring_lock);
-               spin_unlock(&lpfc_cmd->buf_lock);
-               spin_unlock_irqrestore(&phba->hbalock, flags);
+               spin_unlock(&phba->hbalock);
+               spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags);
                goto wait_for_cmpl;
        }
 
@@ -5947,15 +5946,13 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
        if (ret_val != IOCB_SUCCESS) {
                /* Indicate the IO is not being aborted by the driver. */
                lpfc_cmd->waitq = NULL;
-               spin_unlock(&lpfc_cmd->buf_lock);
-               spin_unlock_irqrestore(&phba->hbalock, flags);
                ret = FAILED;
-               goto out;
+               goto out_unlock_hba;
        }
 
        /* no longer need the lock after this point */
-       spin_unlock(&lpfc_cmd->buf_lock);
-       spin_unlock_irqrestore(&phba->hbalock, flags);
+       spin_unlock(&phba->hbalock);
+       spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags);
 
        if (phba->cfg_poll & DISABLE_FCP_RING_INT)
                lpfc_sli_handle_fast_ring_event(phba,
@@ -5990,10 +5987,9 @@ wait_for_cmpl:
 out_unlock_ring:
        if (phba->sli_rev == LPFC_SLI_REV4)
                spin_unlock(&pring_s4->ring_lock);
-out_unlock_buf:
-       spin_unlock(&lpfc_cmd->buf_lock);
-out_unlock:
-       spin_unlock_irqrestore(&phba->hbalock, flags);
+out_unlock_hba:
+       spin_unlock(&phba->hbalock);
+       spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags);
 out:
        lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
                         "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
index 6adaf79..6ed696c 100644 (file)
@@ -1254,19 +1254,19 @@ __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
 
        cmnd = get_job_cmnd(phba, piocbq);
 
-       if (piocbq->cmd_flag &  LPFC_IO_FCP) {
-               lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
+       if (piocbq->cmd_flag & LPFC_IO_FCP) {
+               lpfc_cmd = piocbq->io_buf;
                ndlp = lpfc_cmd->rdata->pnode;
        } else  if ((cmnd == CMD_GEN_REQUEST64_CR) &&
                        !(piocbq->cmd_flag & LPFC_IO_LIBDFC)) {
-               ndlp = piocbq->context_un.ndlp;
+               ndlp = piocbq->ndlp;
        } else  if (piocbq->cmd_flag & LPFC_IO_LIBDFC) {
                if (piocbq->cmd_flag & LPFC_IO_LOOPBACK)
                        ndlp = NULL;
                else
-                       ndlp = piocbq->context_un.ndlp;
+                       ndlp = piocbq->ndlp;
        } else {
-               ndlp = piocbq->context1;
+               ndlp = piocbq->ndlp;
        }
 
        spin_lock(&phba->sli4_hba.sgl_list_lock);
@@ -1373,7 +1373,7 @@ static void
 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
 {
        struct lpfc_sglq *sglq;
-       size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
+       size_t start_clean = offsetof(struct lpfc_iocbq, wqe);
        unsigned long iflag = 0;
        struct lpfc_sli_ring *pring;
 
@@ -1996,9 +1996,9 @@ initpath:
 
        sync_buf->vport = phba->pport;
        sync_buf->cmd_cmpl = lpfc_cmf_sync_cmpl;
-       sync_buf->context1 = NULL;
-       sync_buf->context2 = NULL;
-       sync_buf->context3 = NULL;
+       sync_buf->cmd_dmabuf = NULL;
+       sync_buf->rsp_dmabuf = NULL;
+       sync_buf->bpl_dmabuf = NULL;
        sync_buf->sli4_xritag = NO_XRI;
 
        sync_buf->cmd_flag |= LPFC_IO_CMF;
@@ -2848,19 +2848,11 @@ void
 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
        struct lpfc_vport  *vport = pmb->vport;
-       struct lpfc_dmabuf *mp;
        struct lpfc_nodelist *ndlp;
        struct Scsi_Host *shost;
        uint16_t rpi, vpi;
        int rc;
 
-       mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
-
-       if (mp) {
-               lpfc_mbuf_free(phba, mp->virt, mp->phys);
-               kfree(mp);
-       }
-
        /*
         * If a REG_LOGIN succeeded  after node is destroyed or node
         * is in re-discovery driver need to cleanup the RPI.
@@ -2893,8 +2885,6 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
                ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
                lpfc_nlp_put(ndlp);
-               pmb->ctx_buf = NULL;
-               pmb->ctx_ndlp = NULL;
        }
 
        if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
@@ -2945,7 +2935,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
                lpfc_sli4_mbox_cmd_free(phba, pmb);
        else
-               mempool_free(pmb, phba->mbox_mem_pool);
+               lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
 }
  /**
  * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
@@ -3197,7 +3187,7 @@ lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
        uint32_t oxid, sid, did, fctl, size;
        int ret = 1;
 
-       d_buf = piocb->context2;
+       d_buf = piocb->cmd_dmabuf;
 
        nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
        fc_hdr = nvmebuf->hbuf.virt;
@@ -3478,9 +3468,9 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 
        if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
                if (irsp->ulpBdeCount != 0) {
-                       saveq->context2 = lpfc_sli_get_buff(phba, pring,
+                       saveq->cmd_dmabuf = lpfc_sli_get_buff(phba, pring,
                                                irsp->un.ulpWord[3]);
-                       if (!saveq->context2)
+                       if (!saveq->cmd_dmabuf)
                                lpfc_printf_log(phba,
                                        KERN_ERR,
                                        LOG_SLI,
@@ -3490,9 +3480,9 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                                        irsp->un.ulpWord[3]);
                }
                if (irsp->ulpBdeCount == 2) {
-                       saveq->context3 = lpfc_sli_get_buff(phba, pring,
+                       saveq->bpl_dmabuf = lpfc_sli_get_buff(phba, pring,
                                                irsp->unsli3.sli3Words[7]);
-                       if (!saveq->context3)
+                       if (!saveq->bpl_dmabuf)
                                lpfc_printf_log(phba,
                                        KERN_ERR,
                                        LOG_SLI,
@@ -3504,10 +3494,10 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                list_for_each_entry(iocbq, &saveq->list, list) {
                        irsp = &iocbq->iocb;
                        if (irsp->ulpBdeCount != 0) {
-                               iocbq->context2 = lpfc_sli_get_buff(phba,
+                               iocbq->cmd_dmabuf = lpfc_sli_get_buff(phba,
                                                        pring,
                                                        irsp->un.ulpWord[3]);
-                               if (!iocbq->context2)
+                               if (!iocbq->cmd_dmabuf)
                                        lpfc_printf_log(phba,
                                                KERN_ERR,
                                                LOG_SLI,
@@ -3517,10 +3507,10 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                                                irsp->un.ulpWord[3]);
                        }
                        if (irsp->ulpBdeCount == 2) {
-                               iocbq->context3 = lpfc_sli_get_buff(phba,
+                               iocbq->bpl_dmabuf = lpfc_sli_get_buff(phba,
                                                pring,
                                                irsp->unsli3.sli3Words[7]);
-                               if (!iocbq->context3)
+                               if (!iocbq->bpl_dmabuf)
                                        lpfc_printf_log(phba,
                                                KERN_ERR,
                                                LOG_SLI,
@@ -3534,12 +3524,12 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
        } else {
                paddr = getPaddr(irsp->un.cont64[0].addrHigh,
                                 irsp->un.cont64[0].addrLow);
-               saveq->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
+               saveq->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring,
                                                             paddr);
                if (irsp->ulpBdeCount == 2) {
                        paddr = getPaddr(irsp->un.cont64[1].addrHigh,
                                         irsp->un.cont64[1].addrLow);
-                       saveq->context3 = lpfc_sli_ringpostbuf_get(phba,
+                       saveq->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba,
                                                                   pring,
                                                                   paddr);
                }
@@ -3717,7 +3707,6 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                          struct lpfc_iocbq *saveq)
 {
        struct lpfc_iocbq *cmdiocbp;
-       int rc = 1;
        unsigned long iflag;
        u32 ulp_command, ulp_status, ulp_word4, ulp_context, iotag;
 
@@ -3857,7 +3846,7 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                }
        }
 
-       return rc;
+       return 1;
 }
 
 /**
@@ -5275,6 +5264,7 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
        phba->pport->stopped = 0;
        phba->link_state = LPFC_INIT_START;
        phba->hba_flag = 0;
+       phba->sli4_hba.fawwpn_flag = 0;
        spin_unlock_irq(&phba->hbalock);
 
        memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
@@ -5851,26 +5841,20 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
                        mboxq->mcqe.trailer);
 
        if (rc) {
-               lpfc_mbuf_free(phba, mp->virt, mp->phys);
-               kfree(mp);
                rc = -EIO;
                goto out_free_mboxq;
        }
        data_length = mqe->un.mb_words[5];
        if (data_length > DMP_RGN23_SIZE) {
-               lpfc_mbuf_free(phba, mp->virt, mp->phys);
-               kfree(mp);
                rc = -EIO;
                goto out_free_mboxq;
        }
 
        lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
-       lpfc_mbuf_free(phba, mp->virt, mp->phys);
-       kfree(mp);
        rc = 0;
 
 out_free_mboxq:
-       mempool_free(mboxq, phba->mbox_mem_pool);
+       lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
        return rc;
 }
 
@@ -7994,10 +7978,6 @@ lpfc_cmf_setup(struct lpfc_hba *phba)
 
        sli4_params = &phba->sli4_hba.pc_sli4_params;
 
-       /* Are we forcing MI off via module parameter? */
-       if (!phba->cfg_enable_mi)
-               sli4_params->mi_ver = 0;
-
        /* Always try to enable MI feature if we can */
        if (sli4_params->mi_ver) {
                lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_MI);
@@ -8543,8 +8523,9 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
        }
 
        /*
-        * This memory was allocated by the lpfc_read_sparam routine. Release
-        * it to the mbuf pool.
+        * This memory was allocated by the lpfc_read_sparam routine but is
+        * no longer needed.  It is released and ctx_buf NULLed to prevent
+        * unintended pointer access as the mbox is reused.
         */
        lpfc_mbuf_free(phba, mp->virt, mp->phys);
        kfree(mp);
@@ -8864,6 +8845,9 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
        }
        mempool_free(mboxq, phba->mbox_mem_pool);
 
+       /* Enable RAS FW log support */
+       lpfc_sli4_ras_setup(phba);
+
        phba->hba_flag |= HBA_SETUP;
        return rc;
 
@@ -10343,8 +10327,7 @@ __lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
                           struct lpfc_iocbq *piocb, uint32_t flag)
 {
        int rc;
-       struct lpfc_io_buf *lpfc_cmd =
-               (struct lpfc_io_buf *)piocb->context1;
+       struct lpfc_io_buf *lpfc_cmd = piocb->io_buf;
 
        lpfc_prep_embed_io(phba, lpfc_cmd);
        rc = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
@@ -10394,11 +10377,11 @@ lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
 
        /* add the VMID tags as per switch response */
        if (unlikely(piocb->cmd_flag & LPFC_IO_VMID)) {
-               if (phba->pport->vmid_priority_tagging) {
+               if (phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) {
                        bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
                        bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
                                        (piocb->vmid_tag.cs_ctl_vmid));
-               } else {
+               } else if (phba->cfg_vmid_app_header) {
                        bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
                        bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
                        wqe->words[31] = piocb->vmid_tag.app_id;
@@ -10599,6 +10582,7 @@ __lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq *cmdiocbq,
        struct lpfc_hba  *phba = vport->phba;
        union lpfc_wqe128 *wqe;
        struct ulp_bde64_le *bde;
+       u8 els_id;
 
        wqe = &cmdiocbq->wqe;
        memset(wqe, 0, sizeof(*wqe));
@@ -10611,7 +10595,7 @@ __lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq *cmdiocbq,
        bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
 
        if (expect_rsp) {
-               bf_set(wqe_cmnd, &wqe->els_req.wqe_com, CMD_ELS_REQUEST64_CR);
+               bf_set(wqe_cmnd, &wqe->els_req.wqe_com, CMD_ELS_REQUEST64_WQE);
 
                /* Transfer length */
                wqe->els_req.payload_len = cmd_size;
@@ -10619,6 +10603,30 @@ __lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq *cmdiocbq,
 
                /* DID */
                bf_set(wqe_els_did, &wqe->els_req.wqe_dest, did);
+
+               /* Word 11 - ELS_ID */
+               switch (elscmd) {
+               case ELS_CMD_PLOGI:
+                       els_id = LPFC_ELS_ID_PLOGI;
+                       break;
+               case ELS_CMD_FLOGI:
+                       els_id = LPFC_ELS_ID_FLOGI;
+                       break;
+               case ELS_CMD_LOGO:
+                       els_id = LPFC_ELS_ID_LOGO;
+                       break;
+               case ELS_CMD_FDISC:
+                       if (!vport->fc_myDID) {
+                               els_id = LPFC_ELS_ID_FDISC;
+                               break;
+                       }
+                       fallthrough;
+               default:
+                       els_id = LPFC_ELS_ID_DEFAULT;
+                       break;
+               }
+
+               bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
        } else {
                /* DID */
                bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, did);
@@ -10627,7 +10635,7 @@ __lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq *cmdiocbq,
                wqe->xmit_els_rsp.response_payload_len = cmd_size;
 
                bf_set(wqe_cmnd, &wqe->xmit_els_rsp.wqe_com,
-                      CMD_XMIT_ELS_RSP64_CX);
+                      CMD_XMIT_ELS_RSP64_WQE);
        }
 
        bf_set(wqe_tmo, &wqe->generic.wqe_com, tmo);
@@ -10643,7 +10651,7 @@ __lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq *cmdiocbq,
                if (expect_rsp) {
                        bf_set(els_req64_sid, &wqe->els_req, vport->fc_myDID);
 
-                       /* For ELS_REQUEST64_CR, use the VPI by default */
+                       /* For ELS_REQUEST64_WQE, use the VPI by default */
                        bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
                               phba->vpi_ids[vport->vpi]);
                }
@@ -10800,24 +10808,15 @@ __lpfc_sli_prep_xmit_seq64_s4(struct lpfc_iocbq *cmdiocbq,
 {
        union lpfc_wqe128 *wqe;
        struct ulp_bde64 *bpl;
-       struct ulp_bde64_le *bde;
 
        wqe = &cmdiocbq->wqe;
        memset(wqe, 0, sizeof(*wqe));
 
        /* Words 0 - 2 */
        bpl = (struct ulp_bde64 *)bmp->virt;
-       if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK)) {
-               wqe->xmit_sequence.bde.addrHigh = bpl->addrHigh;
-               wqe->xmit_sequence.bde.addrLow = bpl->addrLow;
-               wqe->xmit_sequence.bde.tus.w = bpl->tus.w;
-       } else {
-               bde = (struct ulp_bde64_le *)&wqe->xmit_sequence.bde;
-               bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys));
-               bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys));
-               bde->type_size = cpu_to_le32(bpl->tus.f.bdeSize);
-               bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
-       }
+       wqe->xmit_sequence.bde.addrHigh = bpl->addrHigh;
+       wqe->xmit_sequence.bde.addrLow = bpl->addrLow;
+       wqe->xmit_sequence.bde.tus.w = bpl->tus.w;
 
        /* Word 5 */
        bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, last_seq);
@@ -10990,7 +10989,7 @@ lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
                 * be setup based on what work queue we used.
                 */
                if (!(piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
-                       lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
+                       lpfc_cmd = piocb->io_buf;
                        piocb->hba_wqidx = lpfc_cmd->hdwq_no;
                }
                return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
@@ -12064,8 +12063,9 @@ void
 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                     struct lpfc_iocbq *rspiocb)
 {
-       struct lpfc_nodelist *ndlp = NULL;
+       struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
        IOCB_t *irsp;
+       LPFC_MBOXQ_t *mbox;
        u32 ulp_command, ulp_status, ulp_word4, iotag;
 
        ulp_command = get_job_cmnd(phba, cmdiocb);
@@ -12077,25 +12077,32 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
        } else {
                irsp = &rspiocb->iocb;
                iotag = irsp->ulpIoTag;
+
+               /* It is possible a PLOGI_RJT for NPIV ports to get aborted.
+                * The MBX_REG_LOGIN64 mbox command is freed back to the
+                * mbox_mem_pool here.
+                */
+               if (cmdiocb->context_un.mbox) {
+                       mbox = cmdiocb->context_un.mbox;
+                       lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
+                       cmdiocb->context_un.mbox = NULL;
+               }
        }
 
        /* ELS cmd tag <ulpIoTag> completes */
        lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
                        "0139 Ignoring ELS cmd code x%x completion Data: "
-                       "x%x x%x x%x\n",
-                       ulp_command, ulp_status, ulp_word4, iotag);
-
+                       "x%x x%x x%x x%px\n",
+                       ulp_command, ulp_status, ulp_word4, iotag,
+                       cmdiocb->ndlp);
        /*
         * Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp
         * if exchange is busy.
         */
-       if (ulp_command == CMD_GEN_REQUEST64_CR) {
-               ndlp = cmdiocb->context_un.ndlp;
+       if (ulp_command == CMD_GEN_REQUEST64_CR)
                lpfc_ct_free_iocb(phba, cmdiocb);
-       } else {
-               ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+       else
                lpfc_els_free_iocb(phba, cmdiocb);
-       }
 
        lpfc_nlp_put(ndlp);
 }
@@ -12176,7 +12183,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
        } else {
                iotag = cmdiocb->iocb.ulpIoTag;
                if (pring->ringno == LPFC_ELS_RING) {
-                       ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
+                       ndlp = cmdiocb->ndlp;
                        ulp_context = ndlp->nlp_rpi;
                } else {
                        ulp_context = cmdiocb->iocb.ulpContext;
@@ -12185,7 +12192,8 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 
        if (phba->link_state < LPFC_LINK_UP ||
            (phba->sli_rev == LPFC_SLI_REV4 &&
-            phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN))
+            phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN) ||
+           (phba->link_flag & LS_EXTERNAL_LOOPBACK))
                ia = true;
        else
                ia = false;
@@ -12634,7 +12642,7 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
                } else {
                        iotag = iocbq->iocb.ulpIoTag;
                        if (pring->ringno == LPFC_ELS_RING) {
-                               ndlp = (struct lpfc_nodelist *)(iocbq->context1);
+                               ndlp = iocbq->ndlp;
                                ulp_context = ndlp->nlp_rpi;
                        } else {
                                ulp_context = iocbq->iocb.ulpContext;
@@ -12644,7 +12652,8 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
                ndlp = lpfc_cmd->rdata->pnode;
 
                if (lpfc_is_link_up(phba) &&
-                   (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
+                   (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE) &&
+                   !(phba->link_flag & LS_EXTERNAL_LOOPBACK))
                        ia = false;
                else
                        ia = true;
@@ -12739,8 +12748,8 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
 
        /* Copy the contents of the local rspiocb into the caller's buffer. */
        cmdiocbq->cmd_flag |= LPFC_IO_WAKE;
-       if (cmdiocbq->context2 && rspiocbq)
-               memcpy((char *)cmdiocbq->context2 + offset,
+       if (cmdiocbq->rsp_iocb && rspiocbq)
+               memcpy((char *)cmdiocbq->rsp_iocb + offset,
                       (char *)rspiocbq + offset, sizeof(*rspiocbq) - offset);
 
        /* Set the exchange busy flag for task management commands */
@@ -12848,13 +12857,13 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
        } else
                pring = &phba->sli.sli3_ring[ring_number];
        /*
-        * If the caller has provided a response iocbq buffer, then context2
+        * If the caller has provided a response iocbq buffer, then rsp_iocb
         * is NULL or its an error.
         */
        if (prspiocbq) {
-               if (piocb->context2)
+               if (piocb->rsp_iocb)
                        return IOCB_ERROR;
-               piocb->context2 = prspiocbq;
+               piocb->rsp_iocb = prspiocbq;
        }
 
        piocb->wait_cmd_cmpl = piocb->cmd_cmpl;
@@ -12938,7 +12947,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
        }
 
        if (prspiocbq)
-               piocb->context2 = NULL;
+               piocb->rsp_iocb = NULL;
 
        piocb->context_un.wait_queue = NULL;
        piocb->cmd_cmpl = NULL;
@@ -15732,7 +15741,6 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
 
        mbox->vport = phba->pport;
        mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
-       mbox->ctx_buf = NULL;
        mbox->ctx_ndlp = NULL;
        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
        shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
@@ -18107,7 +18115,6 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
        case FC_RCTL_ELS_REP:   /* extended link services reply */
        case FC_RCTL_ELS4_REQ:  /* FC-4 ELS request */
        case FC_RCTL_ELS4_REP:  /* FC-4 ELS reply */
-       case FC_RCTL_BA_NOP:    /* basic link service NOP */
        case FC_RCTL_BA_ABTS:   /* basic link service abort */
        case FC_RCTL_BA_RMC:    /* remove connection */
        case FC_RCTL_BA_ACC:    /* basic accept */
@@ -18128,6 +18135,7 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
                fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
                fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
                return lpfc_fc_frame_check(phba, fc_hdr);
+       case FC_RCTL_BA_NOP:    /* basic link service NOP */
        default:
                goto drop;
        }
@@ -18512,11 +18520,8 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
                             struct lpfc_iocbq *cmd_iocbq,
                             struct lpfc_iocbq *rsp_iocbq)
 {
-       struct lpfc_nodelist *ndlp;
-
        if (cmd_iocbq) {
-               ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
-               lpfc_nlp_put(ndlp);
+               lpfc_nlp_put(cmd_iocbq->ndlp);
                lpfc_sli_release_iocbq(phba, cmd_iocbq);
        }
 
@@ -18600,8 +18605,8 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
        /* Extract the F_CTL field from FC_HDR */
        fctl = sli4_fctl_from_fc_hdr(fc_hdr);
 
-       ctiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!ctiocb->context1) {
+       ctiocb->ndlp = lpfc_nlp_get(ndlp);
+       if (!ctiocb->ndlp) {
                lpfc_sli_release_iocbq(phba, ctiocb);
                return;
        }
@@ -18677,13 +18682,11 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
               phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
        bf_set(wqe_cmnd, &icmd->generic.wqe_com, CMD_XMIT_BLS_RSP64_CX);
 
-
        /* Xmit CT abts response on exchange <xid> */
        lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
                         "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
                         ctiocb->abort_rctl, oxid, phba->link_state);
 
-       lpfc_sli_prep_wqe(phba, ctiocb);
        rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
        if (rc == IOCB_ERROR) {
                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -18692,7 +18695,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
                                 ctiocb->abort_rctl, oxid,
                                 phba->link_state);
                lpfc_nlp_put(ndlp);
-               ctiocb->context1 = NULL;
+               ctiocb->ndlp = NULL;
                lpfc_sli_release_iocbq(phba, ctiocb);
        }
 }
@@ -18844,8 +18847,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
                tot_len = bf_get(lpfc_rcqe_length,
                                 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
 
-               first_iocbq->context2 = &seq_dmabuf->dbuf;
-               first_iocbq->context3 = NULL;
+               first_iocbq->cmd_dmabuf = &seq_dmabuf->dbuf;
+               first_iocbq->bpl_dmabuf = NULL;
                /* Keep track of the BDE count */
                first_iocbq->wcqe_cmpl.word3 = 1;
 
@@ -18869,8 +18872,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
                        lpfc_in_buf_free(vport->phba, d_buf);
                        continue;
                }
-               if (!iocbq->context3) {
-                       iocbq->context3 = d_buf;
+               if (!iocbq->bpl_dmabuf) {
+                       iocbq->bpl_dmabuf = d_buf;
                        iocbq->wcqe_cmpl.word3++;
                        /* We need to get the size out of the right CQE */
                        hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
@@ -18896,8 +18899,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
                        hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
                        len = bf_get(lpfc_rcqe_length,
                                       &hbq_buf->cq_event.cqe.rcqe_cmpl);
-                       iocbq->context2 = d_buf;
-                       iocbq->context3 = NULL;
+                       iocbq->cmd_dmabuf = d_buf;
+                       iocbq->bpl_dmabuf = NULL;
                        iocbq->wcqe_cmpl.word3 = 1;
 
                        if (len > LPFC_DATA_BUF_SIZE)
@@ -18942,12 +18945,14 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
        if (!lpfc_complete_unsol_iocb(phba,
                                      phba->sli4_hba.els_wq->pring,
                                      iocbq, fc_hdr->fh_r_ctl,
-                                     fc_hdr->fh_type))
+                                     fc_hdr->fh_type)) {
                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
                                "2540 Ring %d handler: unexpected Rctl "
                                "x%x Type x%x received\n",
                                LPFC_ELS_RING,
                                fc_hdr->fh_r_ctl, fc_hdr->fh_type);
+               lpfc_in_buf_free(phba, &seq_dmabuf->dbuf);
+       }
 
        /* Free iocb created in lpfc_prep_seq */
        list_for_each_entry_safe(curr_iocb, next_iocb,
@@ -18962,7 +18967,7 @@ static void
 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                            struct lpfc_iocbq *rspiocb)
 {
-       struct lpfc_dmabuf *pcmd = cmdiocb->context2;
+       struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
 
        if (pcmd && pcmd->virt)
                dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
@@ -19013,7 +19018,7 @@ lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
        /* copyin the payload */
        memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
 
-       iocbq->context2 = pcmd;
+       iocbq->cmd_dmabuf = pcmd;
        iocbq->vport = vport;
        iocbq->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK;
        iocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
@@ -20332,11 +20337,7 @@ lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
        }
        lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
 out:
-       mempool_free(mboxq, phba->mbox_mem_pool);
-       if (mp) {
-               lpfc_mbuf_free(phba, mp->virt, mp->phys);
-               kfree(mp);
-       }
+       lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
        return data_length;
 }
 
@@ -20651,7 +20652,6 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
 {
        struct lpfc_hba *phba = vport->phba;
        LPFC_MBOXQ_t *mb, *nextmb;
-       struct lpfc_dmabuf *mp;
        struct lpfc_nodelist *ndlp;
        struct lpfc_nodelist *act_mbx_ndlp = NULL;
        LIST_HEAD(mbox_cmd_list);
@@ -20677,8 +20677,12 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
                        mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
                if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
                        act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
-                       /* Put reference count for delayed processing */
+
+                       /* This reference is local to this routine.  The
+                        * reference is removed at routine exit.
+                        */
                        act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
+
                        /* Unregister the RPI when mailbox complete */
                        mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
                }
@@ -20721,12 +20725,6 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
        while (!list_empty(&mbox_cmd_list)) {
                list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
                if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
-                       mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
-                       if (mp) {
-                               __lpfc_mbuf_free(phba, mp->virt, mp->phys);
-                               kfree(mp);
-                       }
-                       mb->ctx_buf = NULL;
                        ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
                        mb->ctx_ndlp = NULL;
                        if (ndlp) {
@@ -20736,7 +20734,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
                                lpfc_nlp_put(ndlp);
                        }
                }
-               mempool_free(mb, phba->mbox_mem_pool);
+               lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_UNLOCKED);
        }
 
        /* Release the ndlp with the cleaned-up active mailbox command */
@@ -20888,8 +20886,8 @@ lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
                 * have not been byteswapped yet so there is no
                 * need to swap them back.
                 */
-               if (pwqeq->context3)
-                       dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
+               if (pwqeq->bpl_dmabuf)
+                       dmabuf = pwqeq->bpl_dmabuf;
                else
                        return xritag;
 
@@ -21041,7 +21039,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
                wq = qp->io_wq;
                pring = wq->pring;
 
-               ctxp = pwqe->context2;
+               ctxp = pwqe->context_un.axchg;
                sglq = ctxp->ctxbuf->sglq;
                if (pwqe->sli4_xritag ==  NO_XRI) {
                        pwqe->sli4_lxritag = sglq->sli4_lxritag;
@@ -21107,7 +21105,7 @@ lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
        abtswqe = &abtsiocb->wqe;
        memset(abtswqe, 0, sizeof(*abtswqe));
 
-       if (!lpfc_is_link_up(phba))
+       if (!lpfc_is_link_up(phba) || (phba->link_flag & LS_EXTERNAL_LOOPBACK))
                bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1);
        bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG);
        abtswqe->abort_cmd.rsrvd5 = 0;
@@ -21883,7 +21881,6 @@ lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
 
        mbox->vport = phba->pport;
        mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
-       mbox->ctx_buf = NULL;
        mbox->ctx_ndlp = NULL;
 
        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
@@ -21920,9 +21917,12 @@ lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
        }
 
  exit:
+       /* This is an embedded SLI4 mailbox with an external buffer allocated.
+        * Free the pcmd and then cleanup with the correct routine.
+        */
        lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
        kfree(pcmd);
-       mempool_free(mbox, phba->mbox_mem_pool);
+       lpfc_sli4_mbox_cmd_free(phba, mbox);
        return byte_cnt;
 }
 
@@ -22114,7 +22114,7 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
                        return NULL;
                }
 
-               tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool,
+               tmp->fcp_cmnd = dma_pool_zalloc(phba->lpfc_cmd_rsp_buf_pool,
                                                GFP_ATOMIC,
                                                &tmp->fcp_cmd_rsp_dma_handle);
 
@@ -22236,8 +22236,6 @@ lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job)
        u32 fip, abort_tag;
        struct lpfc_nodelist *ndlp = NULL;
        union lpfc_wqe128 *wqe = &job->wqe;
-       struct lpfc_dmabuf *context2;
-       u32 els_id = LPFC_ELS_ID_DEFAULT;
        u8 command_type = ELS_COMMAND_NON_FIP;
 
        fip = phba->hba_flag & HBA_FIP_SUPPORT;
@@ -22254,21 +22252,12 @@ lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job)
 
        switch (cmnd) {
        case CMD_ELS_REQUEST64_WQE:
-               if (job->cmd_flag & LPFC_IO_LIBDFC)
-                       ndlp = job->context_un.ndlp;
-               else
-                       ndlp = (struct lpfc_nodelist *)job->context1;
-
-               /* CCP CCPE PV PRI in word10 were set in the memcpy */
-               if (command_type == ELS_COMMAND_FIP)
-                       els_id = ((job->cmd_flag & LPFC_FIP_ELS_ID_MASK)
-                                 >> LPFC_FIP_ELS_ID_SHIFT);
+               ndlp = job->ndlp;
 
                if_type = bf_get(lpfc_sli_intf_if_type,
                                 &phba->sli4_hba.sli_intf);
                if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
-                       context2 = (struct lpfc_dmabuf *)job->context2;
-                       pcmd = (u32 *)context2->virt;
+                       pcmd = (u32 *)job->cmd_dmabuf->virt;
                        if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
                                     *pcmd == ELS_CMD_SCR ||
                                     *pcmd == ELS_CMD_RDF ||
@@ -22301,7 +22290,6 @@ lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job)
                bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
                       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
 
-               bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
                bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
                bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
                bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
@@ -22309,7 +22297,7 @@ lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job)
                bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
                break;
        case CMD_XMIT_ELS_RSP64_WQE:
-               ndlp = (struct lpfc_nodelist *)job->context1;
+               ndlp = job->ndlp;
 
                /* word4 */
                wqe->xmit_els_rsp.word4 = 0;
index 663cc90..0af6860 100644 (file)
@@ -35,6 +35,12 @@ typedef enum _lpfc_ctx_cmd {
        LPFC_CTX_HOST
 } lpfc_ctx_cmd;
 
+/* Enumeration to describe the thread lock context. */
+enum lpfc_mbox_ctx {
+       MBOX_THD_UNLOCKED,
+       MBOX_THD_LOCKED
+};
+
 union lpfc_vmid_tag {
        uint32_t app_id;
        uint8_t cs_ctl_vmid;
@@ -77,11 +83,15 @@ struct lpfc_iocbq {
 
        u32 unsol_rcv_len;      /* Receive len in usol path */
 
-       uint8_t num_bdes;
-       uint8_t abort_bls;      /* ABTS by initiator or responder */
-       u8 abort_rctl;          /* ACC or RJT flag */
-       uint8_t priority;       /* OAS priority */
-       uint8_t retry;          /* retry counter for IOCB cmd - if needed */
+       /* Pack the u8's together and make them module-4. */
+       u8 num_bdes;    /* Number of BDEs */
+       u8 abort_bls;   /* ABTS by initiator or responder */
+       u8 abort_rctl;  /* ACC or RJT flag */
+       u8 priority;    /* OAS priority */
+       u8 retry;       /* retry counter for IOCB cmd - if needed */
+       u8 rsvd1;       /* Pad for u32 */
+       u8 rsvd2;       /* Pad for u32 */
+       u8 rsvd3;       /* Pad for u32 */
 
        u32 cmd_flag;
 #define LPFC_IO_LIBDFC         1       /* libdfc iocb */
@@ -116,18 +126,22 @@ struct lpfc_iocbq {
 
        uint32_t drvrTimeout;   /* driver timeout in seconds */
        struct lpfc_vport *vport;/* virtual port pointer */
-       void *context1;         /* caller context information */
-       void *context2;         /* caller context information */
-       void *context3;         /* caller context information */
+       struct lpfc_dmabuf *cmd_dmabuf;
+       struct lpfc_dmabuf *rsp_dmabuf;
+       struct lpfc_dmabuf *bpl_dmabuf;
        uint32_t event_tag;     /* LA Event tag */
        union {
                wait_queue_head_t    *wait_queue;
-               struct lpfc_iocbq    *rsp_iocb;
                struct lpfcMboxq     *mbox;
-               struct lpfc_nodelist *ndlp;
                struct lpfc_node_rrq *rrq;
+               struct nvmefc_ls_req *nvme_lsreq;
+               struct lpfc_async_xchg_ctx *axchg;
+               struct bsg_job_data *dd_data;
        } context_un;
 
+       struct lpfc_io_buf *io_buf;
+       struct lpfc_iocbq *rsp_iocb;
+       struct lpfc_nodelist *ndlp;
        union lpfc_vmid_tag vmid_tag;
        void (*fabric_cmd_cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmd,
                                struct lpfc_iocbq *rsp);
index e0c2569..1ddad5b 100644 (file)
@@ -981,6 +981,9 @@ struct lpfc_sli4_hba {
 #define lpfc_conf_trunk_port3_nd_MASK  0x1
        uint8_t flash_id;
        uint8_t asic_rev;
+       uint16_t fawwpn_flag;   /* FA-WWPN support state */
+#define LPFC_FAWWPN_CONFIG     0x1 /* FA-PWWN is configured */
+#define LPFC_FAWWPN_FABRIC     0x2 /* FA-PWWN success with Fabric */
 };
 
 enum lpfc_sge_type {
index a4d3259..4fab79e 100644 (file)
@@ -20,7 +20,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "14.2.0.1"
+#define LPFC_DRIVER_VERSION "14.2.0.3"
 #define LPFC_DRIVER_NAME               "lpfc"
 
 /* Used for SLI 2/3 */
index d694d0c..e7efb02 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -135,12 +135,14 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
        }
 
        /*
-        * Grab buffer pointer and clear context1 so we can use
-        * lpfc_sli_issue_box_wait
+        * Wait for the read_sparams mailbox to complete.  Driver needs
+        * this per vport to start the FDISC.  If the mailbox fails,
+        * just cleanup and return an error unless the failure is a
+        * mailbox timeout.  For MBX_TIMEOUT, allow the default
+        * mbox completion handler to take care of the cleanup.  This
+        * is safe as the mailbox command isn't one that triggers
+        * another mailbox.
         */
-       mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
-       pmb->ctx_buf = NULL;
-
        pmb->vport = vport;
        rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2);
        if (rc != MBX_SUCCESS) {
@@ -148,34 +150,29 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
                        lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
                                         "1830 Signal aborted mbxCmd x%x\n",
                                         mb->mbxCommand);
-                       lpfc_mbuf_free(phba, mp->virt, mp->phys);
-                       kfree(mp);
                        if (rc != MBX_TIMEOUT)
-                               mempool_free(pmb, phba->mbox_mem_pool);
+                               lpfc_mbox_rsrc_cleanup(phba, pmb,
+                                                      MBOX_THD_UNLOCKED);
                        return -EINTR;
                } else {
                        lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
                                         "1818 VPort failed init, mbxCmd x%x "
                                         "READ_SPARM mbxStatus x%x, rc = x%x\n",
                                         mb->mbxCommand, mb->mbxStatus, rc);
-                       lpfc_mbuf_free(phba, mp->virt, mp->phys);
-                       kfree(mp);
                        if (rc != MBX_TIMEOUT)
-                               mempool_free(pmb, phba->mbox_mem_pool);
+                               lpfc_mbox_rsrc_cleanup(phba, pmb,
+                                                      MBOX_THD_UNLOCKED);
                        return -EIO;
                }
        }
 
+       mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
        memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
        memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
               sizeof (struct lpfc_name));
        memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
               sizeof (struct lpfc_name));
-
-       lpfc_mbuf_free(phba, mp->virt, mp->phys);
-       kfree(mp);
-       mempool_free(pmb, phba->mbox_mem_pool);
-
+       lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
        return 0;
 }
 
index 070ebe3..f75928f 100644 (file)
@@ -234,7 +234,7 @@ static void mac53c94_interrupt(int irq, void *dev_id)
                ++mac53c94_errors;
                writeb(CMD_NOP + CMD_DMA_MODE, &regs->command);
        }
-       if (cmd == 0) {
+       if (!cmd) {
                printk(KERN_DEBUG "53c94: interrupt with no command active?\n");
                return;
        }
index a5d8cee..bf491af 100644 (file)
@@ -4607,7 +4607,7 @@ static int __init megaraid_init(void)
         * major number allocation.
         */
        major = register_chrdev(0, "megadev_legacy", &megadev_fops);
-       if (!major) {
+       if (major < 0) {
                printk(KERN_WARNING
                                "megaraid: failed to register char device\n");
        }
index db67936..c95360a 100644 (file)
@@ -4473,8 +4473,6 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
                return -ENOMEM;
        }
 
-       memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd);
-
        for (i = 0; i < max_cmd; i++) {
                instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
                                                GFP_KERNEL);
index 54fde26..5b5885d 100644 (file)
@@ -2047,8 +2047,6 @@ map_cmd_status(struct fusion_context *fusion,
 
                scmd->result = (DID_OK << 16) | ext_status;
                if (ext_status == SAM_STAT_CHECK_CONDITION) {
-                       memset(scmd->sense_buffer, 0,
-                              SCSI_SENSE_BUFFERSIZE);
                        memcpy(scmd->sense_buffer, sense,
                               SCSI_SENSE_BUFFERSIZE);
                }
index f788237..8997531 100644 (file)
@@ -3,5 +3,6 @@
 config SCSI_MPI3MR
        tristate "Broadcom MPI3 Storage Controller Device Driver"
        depends on PCI && SCSI
+       select BLK_DEV_BSGLIB
        help
        MPI3 based Storage & RAID Controllers Driver.
index 7c2063e..f5cdbe4 100644 (file)
@@ -2,3 +2,4 @@
 obj-m += mpi3mr.o
 mpi3mr-y +=  mpi3mr_os.o     \
                mpi3mr_fw.o \
+               mpi3mr_app.o \
index e2e8b22..aac11c5 100644 (file)
@@ -115,57 +115,4 @@ struct mpi3_scsi_io_reply {
 #define MPI3_SCSI_RSP_ARI0_MASK                 (0xff000000)
 #define MPI3_SCSI_RSP_ARI0_SHIFT                (24)
 #define MPI3_SCSI_TASKTAG_UNKNOWN               (0xffff)
-struct mpi3_scsi_task_mgmt_request {
-       __le16                     host_tag;
-       u8                         ioc_use_only02;
-       u8                         function;
-       __le16                     ioc_use_only04;
-       u8                         ioc_use_only06;
-       u8                         msg_flags;
-       __le16                     change_count;
-       __le16                     dev_handle;
-       __le16                     task_host_tag;
-       u8                         task_type;
-       u8                         reserved0f;
-       __le16                     task_request_queue_id;
-       __le16                     reserved12;
-       __le32                     reserved14;
-       u8                         lun[8];
-};
-
-#define MPI3_SCSITASKMGMT_MSGFLAGS_DO_NOT_SEND_TASK_IU      (0x08)
-#define MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK               (0x01)
-#define MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK_SET           (0x02)
-#define MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET             (0x03)
-#define MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET       (0x05)
-#define MPI3_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET           (0x06)
-#define MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK               (0x07)
-#define MPI3_SCSITASKMGMT_TASKTYPE_CLEAR_ACA                (0x08)
-#define MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK_SET           (0x09)
-#define MPI3_SCSITASKMGMT_TASKTYPE_QUERY_ASYNC_EVENT        (0x0a)
-#define MPI3_SCSITASKMGMT_TASKTYPE_I_T_NEXUS_RESET          (0x0b)
-struct mpi3_scsi_task_mgmt_reply {
-       __le16                     host_tag;
-       u8                         ioc_use_only02;
-       u8                         function;
-       __le16                     ioc_use_only04;
-       u8                         ioc_use_only06;
-       u8                         msg_flags;
-       __le16                     ioc_use_only08;
-       __le16                     ioc_status;
-       __le32                     ioc_log_info;
-       __le32                     termination_count;
-       __le32                     response_data;
-       __le32                     reserved18;
-};
-
-#define MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE                (0x00)
-#define MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME              (0x02)
-#define MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED  (0x04)
-#define MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED                  (0x05)
-#define MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED               (0x08)
-#define MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN             (0x09)
-#define MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG          (0x0a)
-#define MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC           (0x80)
-#define MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED             (0x81)
 #endif
index 633037d..214e4c6 100644 (file)
@@ -38,16 +38,6 @@ struct mpi3_ioc_init_request {
 #define MPI3_WHOINIT_ROM_BIOS                            (0x02)
 #define MPI3_WHOINIT_HOST_DRIVER                         (0x03)
 #define MPI3_WHOINIT_MANUFACTURER                        (0x04)
-struct mpi3_driver_info_layout {
-       __le32             information_length;
-       u8                 driver_signature[12];
-       u8                 os_name[16];
-       u8                 os_version[12];
-       u8                 driver_name[20];
-       u8                 driver_version[32];
-       u8                 driver_release_date[20];
-       __le32             driver_capabilities;
-};
 
 struct mpi3_ioc_facts_request {
        __le16                 host_tag;
@@ -647,23 +637,6 @@ struct mpi3_event_data_diag_buffer_status_change {
 #define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RELEASED             (0x01)
 #define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_PAUSED               (0x02)
 #define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RESUMED              (0x03)
-#define MPI3_PEL_LOCALE_FLAGS_NON_BLOCKING_BOOT_EVENT   (0x0200)
-#define MPI3_PEL_LOCALE_FLAGS_BLOCKING_BOOT_EVENT       (0x0100)
-#define MPI3_PEL_LOCALE_FLAGS_PCIE                      (0x0080)
-#define MPI3_PEL_LOCALE_FLAGS_CONFIGURATION             (0x0040)
-#define MPI3_PEL_LOCALE_FLAGS_CONTROLER                 (0x0020)
-#define MPI3_PEL_LOCALE_FLAGS_SAS                       (0x0010)
-#define MPI3_PEL_LOCALE_FLAGS_EPACK                     (0x0008)
-#define MPI3_PEL_LOCALE_FLAGS_ENCLOSURE                 (0x0004)
-#define MPI3_PEL_LOCALE_FLAGS_PD                        (0x0002)
-#define MPI3_PEL_LOCALE_FLAGS_VD                        (0x0001)
-#define MPI3_PEL_CLASS_DEBUG                            (0x00)
-#define MPI3_PEL_CLASS_PROGRESS                         (0x01)
-#define MPI3_PEL_CLASS_INFORMATIONAL                    (0x02)
-#define MPI3_PEL_CLASS_WARNING                          (0x03)
-#define MPI3_PEL_CLASS_CRITICAL                         (0x04)
-#define MPI3_PEL_CLASS_FATAL                            (0x05)
-#define MPI3_PEL_CLASS_FAULT                            (0x06)
 #define MPI3_PEL_CLEARTYPE_CLEAR                        (0x00)
 #define MPI3_PEL_WAITTIME_INFINITE_WAIT                 (0x00)
 #define MPI3_PEL_ACTION_GET_SEQNUM                      (0x01)
index 77270f5..901dbd7 100644 (file)
@@ -5,24 +5,6 @@
  */
 #ifndef MPI30_PCI_H
 #define MPI30_PCI_H     1
-#ifndef MPI3_NVME_ENCAP_CMD_MAX
-#define MPI3_NVME_ENCAP_CMD_MAX               (1)
-#endif
-struct mpi3_nvme_encapsulated_request {
-       __le16                     host_tag;
-       u8                         ioc_use_only02;
-       u8                         function;
-       __le16                     ioc_use_only04;
-       u8                         ioc_use_only06;
-       u8                         msg_flags;
-       __le16                     change_count;
-       __le16                     dev_handle;
-       __le16                     encapsulated_command_length;
-       __le16                     flags;
-       __le32                     data_length;
-       __le32                     reserved14[3];
-       __le32                     command[MPI3_NVME_ENCAP_CMD_MAX];
-};
 
 #define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_MASK      (0x0002)
 #define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_FAIL_ONLY (0x0000)
@@ -30,16 +12,5 @@ struct mpi3_nvme_encapsulated_request {
 #define MPI3_NVME_FLAGS_SUBMISSIONQ_MASK                (0x0001)
 #define MPI3_NVME_FLAGS_SUBMISSIONQ_IO                  (0x0000)
 #define MPI3_NVME_FLAGS_SUBMISSIONQ_ADMIN               (0x0001)
-struct mpi3_nvme_encapsulated_error_reply {
-       __le16                     host_tag;
-       u8                         ioc_use_only02;
-       u8                         function;
-       __le16                     ioc_use_only04;
-       u8                         ioc_use_only06;
-       u8                         msg_flags;
-       __le16                     ioc_use_only08;
-       __le16                     ioc_status;
-       __le32                     ioc_log_info;
-       __le32                     nvme_completion_entry[4];
-};
+
 #endif
index 96c85f7..01cd017 100644 (file)
@@ -38,6 +38,7 @@
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_tcq.h>
+#include <uapi/scsi/scsi_bsg_mpi3mr.h>
 
 #include "mpi/mpi30_transport.h"
 #include "mpi/mpi30_cnfg.h"
 extern spinlock_t mrioc_list_lock;
 extern struct list_head mrioc_list;
 extern int prot_mask;
+extern atomic64_t event_counter;
 
-#define MPI3MR_DRIVER_VERSION  "8.0.0.68.0"
-#define MPI3MR_DRIVER_RELDATE  "10-February-2022"
+#define MPI3MR_DRIVER_VERSION  "8.0.0.69.0"
+#define MPI3MR_DRIVER_RELDATE  "16-March-2022"
 
 #define MPI3MR_DRIVER_NAME     "mpi3mr"
 #define MPI3MR_DRIVER_LICENSE  "GPL"
@@ -89,7 +91,9 @@ extern int prot_mask;
 /* Reserved Host Tag definitions */
 #define MPI3MR_HOSTTAG_INVALID         0xFFFF
 #define MPI3MR_HOSTTAG_INITCMDS                1
-#define MPI3MR_HOSTTAG_IOCTLCMDS       2
+#define MPI3MR_HOSTTAG_BSG_CMDS                2
+#define MPI3MR_HOSTTAG_PEL_ABORT       3
+#define MPI3MR_HOSTTAG_PEL_WAIT                4
 #define MPI3MR_HOSTTAG_BLK_TMS         5
 
 #define MPI3MR_NUM_DEVRMCMD            16
@@ -120,6 +124,9 @@ extern int prot_mask;
 
 #define MPI3MR_WATCHDOG_INTERVAL               1000 /* in milli seconds */
 
+#define MPI3MR_SCMD_TIMEOUT    (60 * HZ)
+#define MPI3MR_EH_SCMD_TIMEOUT (60 * HZ)
+
 /* Internal admin command state definitions*/
 #define MPI3MR_CMD_NOTUSED     0x8000
 #define MPI3MR_CMD_COMPLETE    0x0001
@@ -148,8 +155,10 @@ extern int prot_mask;
 
 #define MPI3MR_DEFAULT_MDTS    (128 * 1024)
 #define MPI3MR_DEFAULT_PGSZEXP         (12)
+
 /* Command retry count definitions */
 #define MPI3MR_DEV_RMHS_RETRY_COUNT 3
+#define MPI3MR_PEL_RETRY_COUNT 3
 
 /* Default target device queue depth */
 #define MPI3MR_DEFAULT_SDEV_QD 32
@@ -175,6 +184,57 @@ extern int prot_mask;
 /* MSI Index from Reply Queue Index */
 #define REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, offset)      (qidx + offset)
 
+/*
+ * Maximum data transfer size definitions for management
+ * application commands
+ */
+#define MPI3MR_MAX_APP_XFER_SIZE       (1 * 1024 * 1024)
+#define MPI3MR_MAX_APP_XFER_SEGMENTS   512
+/*
+ * 2048 sectors are for data buffers and additional 512 sectors for
+ * other buffers
+ */
+#define MPI3MR_MAX_APP_XFER_SECTORS    (2048 + 512)
+
+/**
+ * struct mpi3mr_nvme_pt_sge -  Structure to store SGEs for NVMe
+ * Encapsulated commands.
+ *
+ * @base_addr: Physical address
+ * @length: SGE length
+ * @rsvd: Reserved
+ * @rsvd1: Reserved
+ * @sgl_type: sgl type
+ */
+struct mpi3mr_nvme_pt_sge {
+       u64 base_addr;
+       u32 length;
+       u16 rsvd;
+       u8 rsvd1;
+       u8 sgl_type;
+};
+
+/**
+ * struct mpi3mr_buf_map -  local structure to
+ * track kernel and user buffers associated with an BSG
+ * structure.
+ *
+ * @bsg_buf: BSG buffer virtual address
+ * @bsg_buf_len:  BSG buffer length
+ * @kern_buf: Kernel buffer virtual address
+ * @kern_buf_len: Kernel buffer length
+ * @kern_buf_dma: Kernel buffer DMA address
+ * @data_dir: Data direction.
+ */
+struct mpi3mr_buf_map {
+       void *bsg_buf;
+       u32 bsg_buf_len;
+       void *kern_buf;
+       u32 kern_buf_len;
+       dma_addr_t kern_buf_dma;
+       u8 data_dir;
+};
+
 /* IOC State definitions */
 enum mpi3mr_iocstate {
        MRIOC_STATE_READY = 1,
@@ -189,10 +249,10 @@ enum mpi3mr_iocstate {
 enum mpi3mr_reset_reason {
        MPI3MR_RESET_FROM_BRINGUP = 1,
        MPI3MR_RESET_FROM_FAULT_WATCH = 2,
-       MPI3MR_RESET_FROM_IOCTL = 3,
+       MPI3MR_RESET_FROM_APP = 3,
        MPI3MR_RESET_FROM_EH_HOS = 4,
        MPI3MR_RESET_FROM_TM_TIMEOUT = 5,
-       MPI3MR_RESET_FROM_IOCTL_TIMEOUT = 6,
+       MPI3MR_RESET_FROM_APP_TIMEOUT = 6,
        MPI3MR_RESET_FROM_MUR_FAILURE = 7,
        MPI3MR_RESET_FROM_CTLR_CLEANUP = 8,
        MPI3MR_RESET_FROM_CIACTIV_FAULT = 9,
@@ -543,6 +603,7 @@ struct mpi3mr_sdev_priv_data {
  * @ioc_status: IOC status from the firmware
  * @ioc_loginfo:IOC log info from the firmware
  * @is_waiting: Is the command issued in block mode
+ * @is_sense: Is Sense data present
  * @retry_count: Retry count for retriable commands
  * @host_tag: Host tag used by the command
  * @callback: Callback for non blocking commands
@@ -558,6 +619,7 @@ struct mpi3mr_drv_cmd {
        u16 ioc_status;
        u32 ioc_loginfo;
        u8 is_waiting;
+       u8 is_sense;
        u8 retry_count;
        u16 host_tag;
 
@@ -685,6 +747,7 @@ struct scmd_priv {
  * @chain_bitmap_sz: Chain buffer allocator bitmap size
  * @chain_bitmap: Chain buffer allocator bitmap
  * @chain_buf_lock: Chain buffer list lock
+ * @bsg_cmds: Command tracker for BSG command
  * @host_tm_cmds: Command tracker for task management commands
  * @dev_rmhs_cmds: Command tracker for device removal commands
  * @evtack_cmds: Command tracker for event ack commands
@@ -704,16 +767,35 @@ struct scmd_priv {
  * @reset_waitq: Controller reset  wait queue
  * @prepare_for_reset: Prepare for reset event received
  * @prepare_for_reset_timeout_counter: Prepare for reset timeout
+ * @prp_list_virt: NVMe encapsulated PRP list virtual base
+ * @prp_list_dma: NVMe encapsulated PRP list DMA
+ * @prp_sz: NVME encapsulated PRP list size
  * @diagsave_timeout: Diagnostic information save timeout
  * @logging_level: Controller debug logging level
  * @flush_io_count: I/O count to flush after reset
  * @current_event: Firmware event currently in process
  * @driver_info: Driver, Kernel, OS information to firmware
  * @change_count: Topology change count
+ * @pel_enabled: Persistent Event Log(PEL) enabled or not
+ * @pel_abort_requested: PEL abort is requested or not
+ * @pel_class: PEL Class identifier
+ * @pel_locale: PEL Locale identifier
+ * @pel_cmds: Command tracker for PEL wait command
+ * @pel_abort_cmd: Command tracker for PEL abort command
+ * @pel_newest_seqnum: Newest PEL sequenece number
+ * @pel_seqnum_virt: PEL sequence number virtual address
+ * @pel_seqnum_dma: PEL sequence number DMA address
+ * @pel_seqnum_sz: PEL sequenece number size
  * @op_reply_q_offset: Operational reply queue offset with MSIx
  * @default_qcount: Total Default queues
  * @active_poll_qcount: Currently active poll queue count
  * @requested_poll_qcount: User requested poll queue count
+ * @bsg_dev: BSG device structure
+ * @bsg_queue: Request queue for BSG device
+ * @stop_bsgs: Stop BSG request flag
+ * @logdata_buf: Circular buffer to store log data entries
+ * @logdata_buf_idx: Index of entry in buffer to store
+ * @logdata_entry_sz: log data entry size
  */
 struct mpi3mr_ioc {
        struct list_head list;
@@ -820,6 +902,7 @@ struct mpi3mr_ioc {
        void *chain_bitmap;
        spinlock_t chain_buf_lock;
 
+       struct mpi3mr_drv_cmd bsg_cmds;
        struct mpi3mr_drv_cmd host_tm_cmds;
        struct mpi3mr_drv_cmd dev_rmhs_cmds[MPI3MR_NUM_DEVRMCMD];
        struct mpi3mr_drv_cmd evtack_cmds[MPI3MR_NUM_EVTACKCMD];
@@ -842,6 +925,10 @@ struct mpi3mr_ioc {
        u8 prepare_for_reset;
        u16 prepare_for_reset_timeout_counter;
 
+       void *prp_list_virt;
+       dma_addr_t prp_list_dma;
+       u32 prp_sz;
+
        u16 diagsave_timeout;
        int logging_level;
        u16 flush_io_count;
@@ -849,11 +936,30 @@ struct mpi3mr_ioc {
        struct mpi3mr_fwevt *current_event;
        struct mpi3_driver_info_layout driver_info;
        u16 change_count;
-       u16 op_reply_q_offset;
 
+       u8 pel_enabled;
+       u8 pel_abort_requested;
+       u8 pel_class;
+       u16 pel_locale;
+       struct mpi3mr_drv_cmd pel_cmds;
+       struct mpi3mr_drv_cmd pel_abort_cmd;
+
+       u32 pel_newest_seqnum;
+       void *pel_seqnum_virt;
+       dma_addr_t pel_seqnum_dma;
+       u32 pel_seqnum_sz;
+
+       u16 op_reply_q_offset;
        u16 default_qcount;
        u16 active_poll_qcount;
        u16 requested_poll_qcount;
+
+       struct device *bsg_dev;
+       struct request_queue *bsg_queue;
+       u8 stop_bsgs;
+       u8 *logdata_buf;
+       u16 logdata_buf_idx;
+       u16 logdata_entry_sz;
 };
 
 /**
@@ -866,6 +972,7 @@ struct mpi3mr_ioc {
  * @send_ack: Event acknowledgment required or not
  * @process_evt: Bottomhalf processing required or not
  * @evt_ctx: Event context to send in Ack
+ * @event_data_size: size of the event data in bytes
  * @pending_at_sml: waiting for device add/remove API to complete
  * @discard: discard this event
  * @ref_count: kref count
@@ -879,6 +986,7 @@ struct mpi3mr_fwevt {
        bool send_ack;
        bool process_evt;
        u32 evt_ctx;
+       u16 event_data_size;
        bool pending_at_sml;
        bool discard;
        struct kref ref_count;
@@ -962,5 +1070,20 @@ void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code);
 int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
        struct op_reply_qinfo *op_reply_q);
 int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num);
-
+void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc);
+void mpi3mr_bsg_exit(struct mpi3mr_ioc *mrioc);
+int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
+       u16 handle, uint lun, u16 htag, ulong timeout,
+       struct mpi3mr_drv_cmd *drv_cmd,
+       u8 *resp_code, struct scsi_cmnd *scmd);
+struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle(
+       struct mpi3mr_ioc *mrioc, u16 handle);
+void mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc *mrioc,
+       struct mpi3mr_drv_cmd *drv_cmd);
+int mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc *mrioc,
+       struct mpi3mr_drv_cmd *drv_cmd);
+void mpi3mr_app_save_logdata(struct mpi3mr_ioc *mrioc, char *event_data,
+       u16 event_data_size);
+extern const struct attribute_group *mpi3mr_host_groups[];
+extern const struct attribute_group *mpi3mr_dev_groups[];
 #endif /*MPI3MR_H_INCLUDED*/
diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
new file mode 100644 (file)
index 0000000..9ab1762
--- /dev/null
@@ -0,0 +1,1864 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Broadcom MPI3 Storage Controllers
+ *
+ * Copyright (C) 2017-2022 Broadcom Inc.
+ *  (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
+ *
+ */
+
+#include "mpi3mr.h"
+#include <linux/bsg-lib.h>
+#include <uapi/scsi/scsi_bsg_mpi3mr.h>
+
+/**
+ * mpi3mr_bsg_pel_abort - sends PEL abort request
+ * @mrioc: Adapter instance reference
+ *
+ * This function sends PEL abort request to the firmware through
+ * admin request queue.
+ *
+ * Return: 0 on success, -1 on failure
+ */
+static int mpi3mr_bsg_pel_abort(struct mpi3mr_ioc *mrioc)
+{
+       struct mpi3_pel_req_action_abort pel_abort_req;
+       struct mpi3_pel_reply *pel_reply;
+       int retval = 0;
+       u16 pe_log_status;
+
+       if (mrioc->reset_in_progress) {
+               dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__);
+               return -1;
+       }
+       if (mrioc->stop_bsgs) {
+               dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__);
+               return -1;
+       }
+
+       memset(&pel_abort_req, 0, sizeof(pel_abort_req));
+       mutex_lock(&mrioc->pel_abort_cmd.mutex);
+       if (mrioc->pel_abort_cmd.state & MPI3MR_CMD_PENDING) {
+               dprint_bsg_err(mrioc, "%s: command is in use\n", __func__);
+               mutex_unlock(&mrioc->pel_abort_cmd.mutex);
+               return -1;
+       }
+       mrioc->pel_abort_cmd.state = MPI3MR_CMD_PENDING;
+       mrioc->pel_abort_cmd.is_waiting = 1;
+       mrioc->pel_abort_cmd.callback = NULL;
+       pel_abort_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_ABORT);
+       pel_abort_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
+       pel_abort_req.action = MPI3_PEL_ACTION_ABORT;
+       pel_abort_req.abort_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
+
+       mrioc->pel_abort_requested = 1;
+       init_completion(&mrioc->pel_abort_cmd.done);
+       retval = mpi3mr_admin_request_post(mrioc, &pel_abort_req,
+           sizeof(pel_abort_req), 0);
+       if (retval) {
+               retval = -1;
+               dprint_bsg_err(mrioc, "%s: admin request post failed\n",
+                   __func__);
+               mrioc->pel_abort_requested = 0;
+               goto out_unlock;
+       }
+
+       wait_for_completion_timeout(&mrioc->pel_abort_cmd.done,
+           (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+       if (!(mrioc->pel_abort_cmd.state & MPI3MR_CMD_COMPLETE)) {
+               mrioc->pel_abort_cmd.is_waiting = 0;
+               dprint_bsg_err(mrioc, "%s: command timedout\n", __func__);
+               if (!(mrioc->pel_abort_cmd.state & MPI3MR_CMD_RESET))
+                       mpi3mr_soft_reset_handler(mrioc,
+                           MPI3MR_RESET_FROM_PELABORT_TIMEOUT, 1);
+               retval = -1;
+               goto out_unlock;
+       }
+       if ((mrioc->pel_abort_cmd.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+            != MPI3_IOCSTATUS_SUCCESS) {
+               dprint_bsg_err(mrioc,
+                   "%s: command failed, ioc_status(0x%04x) log_info(0x%08x)\n",
+                   __func__, (mrioc->pel_abort_cmd.ioc_status &
+                   MPI3_IOCSTATUS_STATUS_MASK),
+                   mrioc->pel_abort_cmd.ioc_loginfo);
+               retval = -1;
+               goto out_unlock;
+       }
+       if (mrioc->pel_abort_cmd.state & MPI3MR_CMD_REPLY_VALID) {
+               pel_reply = (struct mpi3_pel_reply *)mrioc->pel_abort_cmd.reply;
+               pe_log_status = le16_to_cpu(pel_reply->pe_log_status);
+               if (pe_log_status != MPI3_PEL_STATUS_SUCCESS) {
+                       dprint_bsg_err(mrioc,
+                           "%s: command failed, pel_status(0x%04x)\n",
+                           __func__, pe_log_status);
+                       retval = -1;
+               }
+       }
+
+out_unlock:
+       mrioc->pel_abort_cmd.state = MPI3MR_CMD_NOTUSED;
+       mutex_unlock(&mrioc->pel_abort_cmd.mutex);
+       return retval;
+}
+/**
+ * mpi3mr_bsg_verify_adapter - verify adapter number is valid
+ * @ioc_number: Adapter number
+ *
+ * This function returns the adapter instance pointer of given
+ * adapter number. If adapter number does not match with the
+ * driver's adapter list, driver returns NULL.
+ *
+ * Return: adapter instance reference
+ */
+static struct mpi3mr_ioc *mpi3mr_bsg_verify_adapter(int ioc_number)
+{
+       struct mpi3mr_ioc *mrioc = NULL;
+
+       spin_lock(&mrioc_list_lock);
+       list_for_each_entry(mrioc, &mrioc_list, list) {
+               if (mrioc->id == ioc_number) {
+                       spin_unlock(&mrioc_list_lock);
+                       return mrioc;
+               }
+       }
+       spin_unlock(&mrioc_list_lock);
+       return NULL;
+}
+
+/**
+ * mpi3mr_enable_logdata - Handler for log data enable
+ * @mrioc: Adapter instance reference
+ * @job: BSG job reference
+ *
+ * This function enables log data caching in the driver if not
+ * already enabled and return the maximum number of log data
+ * entries that can be cached in the driver.
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+static long mpi3mr_enable_logdata(struct mpi3mr_ioc *mrioc,
+       struct bsg_job *job)
+{
+       struct mpi3mr_logdata_enable logdata_enable;
+
+       if (!mrioc->logdata_buf) {
+               mrioc->logdata_entry_sz =
+                   (mrioc->reply_sz - (sizeof(struct mpi3_event_notification_reply) - 4))
+                   + MPI3MR_BSG_LOGDATA_ENTRY_HEADER_SZ;
+               mrioc->logdata_buf_idx = 0;
+               mrioc->logdata_buf = kcalloc(MPI3MR_BSG_LOGDATA_MAX_ENTRIES,
+                   mrioc->logdata_entry_sz, GFP_KERNEL);
+
+               if (!mrioc->logdata_buf)
+                       return -ENOMEM;
+       }
+
+       memset(&logdata_enable, 0, sizeof(logdata_enable));
+       logdata_enable.max_entries =
+           MPI3MR_BSG_LOGDATA_MAX_ENTRIES;
+       if (job->request_payload.payload_len >= sizeof(logdata_enable)) {
+               sg_copy_from_buffer(job->request_payload.sg_list,
+                                   job->request_payload.sg_cnt,
+                                   &logdata_enable, sizeof(logdata_enable));
+               return 0;
+       }
+
+       return -EINVAL;
+}
+/**
+ * mpi3mr_get_logdata - Handler for get log data
+ * @mrioc: Adapter instance reference
+ * @job: BSG job pointer
+ * This function copies the log data entries to the user buffer
+ * when log caching is enabled in the driver.
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+static long mpi3mr_get_logdata(struct mpi3mr_ioc *mrioc,
+       struct bsg_job *job)
+{
+       u16 num_entries, sz, entry_sz = mrioc->logdata_entry_sz;
+
+       if ((!mrioc->logdata_buf) || (job->request_payload.payload_len < entry_sz))
+               return -EINVAL;
+
+       num_entries = job->request_payload.payload_len / entry_sz;
+       if (num_entries > MPI3MR_BSG_LOGDATA_MAX_ENTRIES)
+               num_entries = MPI3MR_BSG_LOGDATA_MAX_ENTRIES;
+       sz = num_entries * entry_sz;
+
+       if (job->request_payload.payload_len >= sz) {
+               sg_copy_from_buffer(job->request_payload.sg_list,
+                                   job->request_payload.sg_cnt,
+                                   mrioc->logdata_buf, sz);
+               return 0;
+       }
+       return -EINVAL;
+}
+
+/**
+ * mpi3mr_bsg_pel_enable - Handler for PEL enable driver
+ * @mrioc: Adapter instance reference
+ * @job: BSG job pointer
+ *
+ * This function is the handler for PEL enable driver.
+ * Validates the application given class and locale and if
+ * requires aborts the existing PEL wait request and/or issues
+ * new PEL wait request to the firmware and returns.
+ *
+ * Return: 0 on success and proper error codes on failure.
+ */
+static long mpi3mr_bsg_pel_enable(struct mpi3mr_ioc *mrioc,
+                                 struct bsg_job *job)
+{
+       long rval = -EINVAL;
+       struct mpi3mr_bsg_out_pel_enable pel_enable;
+       u8 issue_pel_wait;
+       u8 tmp_class;
+       u16 tmp_locale;
+
+       if (job->request_payload.payload_len != sizeof(pel_enable)) {
+               dprint_bsg_err(mrioc, "%s: invalid size argument\n",
+                   __func__);
+               return rval;
+       }
+
+       sg_copy_to_buffer(job->request_payload.sg_list,
+                         job->request_payload.sg_cnt,
+                         &pel_enable, sizeof(pel_enable));
+
+       if (pel_enable.pel_class > MPI3_PEL_CLASS_FAULT) {
+               dprint_bsg_err(mrioc, "%s: out of range class %d sent\n",
+                       __func__, pel_enable.pel_class);
+               rval = 0;
+               goto out;
+       }
+       if (!mrioc->pel_enabled)
+               issue_pel_wait = 1;
+       else {
+               if ((mrioc->pel_class <= pel_enable.pel_class) &&
+                   !((mrioc->pel_locale & pel_enable.pel_locale) ^
+                     pel_enable.pel_locale)) {
+                       issue_pel_wait = 0;
+                       rval = 0;
+               } else {
+                       pel_enable.pel_locale |= mrioc->pel_locale;
+
+                       if (mrioc->pel_class < pel_enable.pel_class)
+                               pel_enable.pel_class = mrioc->pel_class;
+
+                       rval = mpi3mr_bsg_pel_abort(mrioc);
+                       if (rval) {
+                               dprint_bsg_err(mrioc,
+                                   "%s: pel_abort failed, status(%ld)\n",
+                                   __func__, rval);
+                               goto out;
+                       }
+                       issue_pel_wait = 1;
+               }
+       }
+       if (issue_pel_wait) {
+               tmp_class = mrioc->pel_class;
+               tmp_locale = mrioc->pel_locale;
+               mrioc->pel_class = pel_enable.pel_class;
+               mrioc->pel_locale = pel_enable.pel_locale;
+               mrioc->pel_enabled = 1;
+               rval = mpi3mr_pel_get_seqnum_post(mrioc, NULL);
+               if (rval) {
+                       mrioc->pel_class = tmp_class;
+                       mrioc->pel_locale = tmp_locale;
+                       mrioc->pel_enabled = 0;
+                       dprint_bsg_err(mrioc,
+                           "%s: pel get sequence number failed, status(%ld)\n",
+                           __func__, rval);
+               }
+       }
+
+out:
+       return rval;
+}
+/**
+ * mpi3mr_get_all_tgt_info - Get all target information
+ * @mrioc: Adapter instance reference
+ * @job: BSG job reference
+ *
+ * This function copies the driver managed target devices device
+ * handle, persistent ID, bus ID and taret ID to the user
+ * provided buffer for the specific controller. This function
+ * also provides the number of devices managed by the driver for
+ * the specific controller.
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+static long mpi3mr_get_all_tgt_info(struct mpi3mr_ioc *mrioc,
+       struct bsg_job *job)
+{
+       long rval = -EINVAL;
+       u16 num_devices = 0, i = 0, size;
+       unsigned long flags;
+       struct mpi3mr_tgt_dev *tgtdev;
+       struct mpi3mr_device_map_info *devmap_info = NULL;
+       struct mpi3mr_all_tgt_info *alltgt_info = NULL;
+       uint32_t min_entrylen = 0, kern_entrylen = 0, usr_entrylen = 0;
+
+       if (job->request_payload.payload_len < sizeof(u32)) {
+               dprint_bsg_err(mrioc, "%s: invalid size argument\n",
+                   __func__);
+               return rval;
+       }
+
+       spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+       list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
+               num_devices++;
+       spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+       if ((job->request_payload.payload_len == sizeof(u32)) ||
+               list_empty(&mrioc->tgtdev_list)) {
+               sg_copy_from_buffer(job->request_payload.sg_list,
+                                   job->request_payload.sg_cnt,
+                                   &num_devices, sizeof(num_devices));
+               return 0;
+       }
+
+       kern_entrylen = (num_devices - 1) * sizeof(*devmap_info);
+       size = sizeof(*alltgt_info) + kern_entrylen;
+       alltgt_info = kzalloc(size, GFP_KERNEL);
+       if (!alltgt_info)
+               return -ENOMEM;
+
+       devmap_info = alltgt_info->dmi;
+       memset((u8 *)devmap_info, 0xFF, (kern_entrylen + sizeof(*devmap_info)));
+       spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+       list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
+               if (i < num_devices) {
+                       devmap_info[i].handle = tgtdev->dev_handle;
+                       devmap_info[i].perst_id = tgtdev->perst_id;
+                       if (tgtdev->host_exposed && tgtdev->starget) {
+                               devmap_info[i].target_id = tgtdev->starget->id;
+                               devmap_info[i].bus_id =
+                                   tgtdev->starget->channel;
+                       }
+                       i++;
+               }
+       }
+       num_devices = i;
+       spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+       memcpy(&alltgt_info->num_devices, &num_devices, sizeof(num_devices));
+
+       usr_entrylen = (job->request_payload.payload_len - sizeof(u32)) / sizeof(*devmap_info);
+       usr_entrylen *= sizeof(*devmap_info);
+       min_entrylen = min(usr_entrylen, kern_entrylen);
+       if (min_entrylen && (!memcpy(&alltgt_info->dmi, devmap_info, min_entrylen))) {
+               dprint_bsg_err(mrioc, "%s:%d: device map info copy failed\n",
+                   __func__, __LINE__);
+               rval = -EFAULT;
+               goto out;
+       }
+
+       sg_copy_from_buffer(job->request_payload.sg_list,
+                           job->request_payload.sg_cnt,
+                           alltgt_info, job->request_payload.payload_len);
+       rval = 0;
+out:
+       kfree(alltgt_info);
+       return rval;
+}
+/**
+ * mpi3mr_get_change_count - Get topology change count
+ * @mrioc: Adapter instance reference
+ * @job: BSG job reference
+ *
+ * This function copies the toplogy change count provided by the
+ * driver in events and cached in the driver to the user
+ * provided buffer for the specific controller.
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+static long mpi3mr_get_change_count(struct mpi3mr_ioc *mrioc,
+       struct bsg_job *job)
+{
+       struct mpi3mr_change_count chgcnt;
+
+       memset(&chgcnt, 0, sizeof(chgcnt));
+       chgcnt.change_count = mrioc->change_count;
+       if (job->request_payload.payload_len >= sizeof(chgcnt)) {
+               sg_copy_from_buffer(job->request_payload.sg_list,
+                                   job->request_payload.sg_cnt,
+                                   &chgcnt, sizeof(chgcnt));
+               return 0;
+       }
+       return -EINVAL;
+}
+
+/**
+ * mpi3mr_bsg_adp_reset - Issue controller reset
+ * @mrioc: Adapter instance reference
+ * @job: BSG job reference
+ *
+ * This function identifies the user provided reset type and
+ * issues approporiate reset to the controller and wait for that
+ * to complete and reinitialize the controller and then returns
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+static long mpi3mr_bsg_adp_reset(struct mpi3mr_ioc *mrioc,
+       struct bsg_job *job)
+{
+       long rval = -EINVAL;
+       u8 save_snapdump;
+       struct mpi3mr_bsg_adp_reset adpreset;
+
+       if (job->request_payload.payload_len !=
+                       sizeof(adpreset)) {
+               dprint_bsg_err(mrioc, "%s: invalid size argument\n",
+                   __func__);
+               goto out;
+       }
+
+       sg_copy_to_buffer(job->request_payload.sg_list,
+                         job->request_payload.sg_cnt,
+                         &adpreset, sizeof(adpreset));
+
+       switch (adpreset.reset_type) {
+       case MPI3MR_BSG_ADPRESET_SOFT:
+               save_snapdump = 0;
+               break;
+       case MPI3MR_BSG_ADPRESET_DIAG_FAULT:
+               save_snapdump = 1;
+               break;
+       default:
+               dprint_bsg_err(mrioc, "%s: unknown reset_type(%d)\n",
+                   __func__, adpreset.reset_type);
+               goto out;
+       }
+
+       rval = mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_APP,
+           save_snapdump);
+
+       if (rval)
+               dprint_bsg_err(mrioc,
+                   "%s: reset handler returned error(%ld) for reset type %d\n",
+                   __func__, rval, adpreset.reset_type);
+out:
+       return rval;
+}
+
+/**
+ * mpi3mr_bsg_populate_adpinfo - Get adapter info command handler
+ * @mrioc: Adapter instance reference
+ * @job: BSG job reference
+ *
+ * This function provides adapter information for the given
+ * controller
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+static long mpi3mr_bsg_populate_adpinfo(struct mpi3mr_ioc *mrioc,
+       struct bsg_job *job)
+{
+       enum mpi3mr_iocstate ioc_state;
+       struct mpi3mr_bsg_in_adpinfo adpinfo;
+
+       memset(&adpinfo, 0, sizeof(adpinfo));
+       adpinfo.adp_type = MPI3MR_BSG_ADPTYPE_AVGFAMILY;
+       adpinfo.pci_dev_id = mrioc->pdev->device;
+       adpinfo.pci_dev_hw_rev = mrioc->pdev->revision;
+       adpinfo.pci_subsys_dev_id = mrioc->pdev->subsystem_device;
+       adpinfo.pci_subsys_ven_id = mrioc->pdev->subsystem_vendor;
+       adpinfo.pci_bus = mrioc->pdev->bus->number;
+       adpinfo.pci_dev = PCI_SLOT(mrioc->pdev->devfn);
+       adpinfo.pci_func = PCI_FUNC(mrioc->pdev->devfn);
+       adpinfo.pci_seg_id = pci_domain_nr(mrioc->pdev->bus);
+       adpinfo.app_intfc_ver = MPI3MR_IOCTL_VERSION;
+
+       ioc_state = mpi3mr_get_iocstate(mrioc);
+       if (ioc_state == MRIOC_STATE_UNRECOVERABLE)
+               adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_UNRECOVERABLE;
+       else if ((mrioc->reset_in_progress) || (mrioc->stop_bsgs))
+               adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_IN_RESET;
+       else if (ioc_state == MRIOC_STATE_FAULT)
+               adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_FAULT;
+       else
+               adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_OPERATIONAL;
+
+       memcpy((u8 *)&adpinfo.driver_info, (u8 *)&mrioc->driver_info,
+           sizeof(adpinfo.driver_info));
+
+       if (job->request_payload.payload_len >= sizeof(adpinfo)) {
+               sg_copy_from_buffer(job->request_payload.sg_list,
+                                   job->request_payload.sg_cnt,
+                                   &adpinfo, sizeof(adpinfo));
+               return 0;
+       }
+       return -EINVAL;
+}
+
+/**
+ * mpi3mr_bsg_process_drv_cmds - Driver Command handler
+ * @job: BSG job reference
+ *
+ * This function is the top level handler for driver commands,
+ * this does basic validation of the buffer and identifies the
+ * opcode and switches to correct sub handler.
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+static long mpi3mr_bsg_process_drv_cmds(struct bsg_job *job)
+{
+       long rval = -EINVAL;
+       struct mpi3mr_ioc *mrioc = NULL;
+       struct mpi3mr_bsg_packet *bsg_req = NULL;
+       struct mpi3mr_bsg_drv_cmd *drvrcmd = NULL;
+
+       bsg_req = job->request;
+       drvrcmd = &bsg_req->cmd.drvrcmd;
+
+       mrioc = mpi3mr_bsg_verify_adapter(drvrcmd->mrioc_id);
+       if (!mrioc)
+               return -ENODEV;
+
+       if (drvrcmd->opcode == MPI3MR_DRVBSG_OPCODE_ADPINFO) {
+               rval = mpi3mr_bsg_populate_adpinfo(mrioc, job);
+               return rval;
+       }
+
+       if (mutex_lock_interruptible(&mrioc->bsg_cmds.mutex))
+               return -ERESTARTSYS;
+
+       switch (drvrcmd->opcode) {
+       case MPI3MR_DRVBSG_OPCODE_ADPRESET:
+               rval = mpi3mr_bsg_adp_reset(mrioc, job);
+               break;
+       case MPI3MR_DRVBSG_OPCODE_ALLTGTDEVINFO:
+               rval = mpi3mr_get_all_tgt_info(mrioc, job);
+               break;
+       case MPI3MR_DRVBSG_OPCODE_GETCHGCNT:
+               rval = mpi3mr_get_change_count(mrioc, job);
+               break;
+       case MPI3MR_DRVBSG_OPCODE_LOGDATAENABLE:
+               rval = mpi3mr_enable_logdata(mrioc, job);
+               break;
+       case MPI3MR_DRVBSG_OPCODE_GETLOGDATA:
+               rval = mpi3mr_get_logdata(mrioc, job);
+               break;
+       case MPI3MR_DRVBSG_OPCODE_PELENABLE:
+               rval = mpi3mr_bsg_pel_enable(mrioc, job);
+               break;
+       case MPI3MR_DRVBSG_OPCODE_UNKNOWN:
+       default:
+               pr_err("%s: unsupported driver command opcode %d\n",
+                   MPI3MR_DRIVER_NAME, drvrcmd->opcode);
+               break;
+       }
+       mutex_unlock(&mrioc->bsg_cmds.mutex);
+       return rval;
+}
+
+/**
+ * mpi3mr_bsg_build_sgl - SGL construction for MPI commands
+ * @mpi_req: MPI request
+ * @sgl_offset: offset to start sgl in the MPI request
+ * @drv_bufs: DMA address of the buffers to be placed in sgl
+ * @bufcnt: Number of DMA buffers
+ * @is_rmc: Does the buffer list has management command buffer
+ * @is_rmr: Does the buffer list has management response buffer
+ * @num_datasges: Number of data buffers in the list
+ *
+ * This function places the DMA address of the given buffers in
+ * proper format as SGEs in the given MPI request.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_bsg_build_sgl(u8 *mpi_req, uint32_t sgl_offset,
+       struct mpi3mr_buf_map *drv_bufs, u8 bufcnt, u8 is_rmc,
+       u8 is_rmr, u8 num_datasges)
+{
+       u8 *sgl = (mpi_req + sgl_offset), count = 0;
+       struct mpi3_mgmt_passthrough_request *rmgmt_req =
+           (struct mpi3_mgmt_passthrough_request *)mpi_req;
+       struct mpi3mr_buf_map *drv_buf_iter = drv_bufs;
+       u8 sgl_flags, sgl_flags_last;
+
+       sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
+               MPI3_SGE_FLAGS_DLAS_SYSTEM | MPI3_SGE_FLAGS_END_OF_BUFFER;
+       sgl_flags_last = sgl_flags | MPI3_SGE_FLAGS_END_OF_LIST;
+
+       if (is_rmc) {
+               mpi3mr_add_sg_single(&rmgmt_req->command_sgl,
+                   sgl_flags_last, drv_buf_iter->kern_buf_len,
+                   drv_buf_iter->kern_buf_dma);
+               sgl = (u8 *)drv_buf_iter->kern_buf + drv_buf_iter->bsg_buf_len;
+               drv_buf_iter++;
+               count++;
+               if (is_rmr) {
+                       mpi3mr_add_sg_single(&rmgmt_req->response_sgl,
+                           sgl_flags_last, drv_buf_iter->kern_buf_len,
+                           drv_buf_iter->kern_buf_dma);
+                       drv_buf_iter++;
+                       count++;
+               } else
+                       mpi3mr_build_zero_len_sge(
+                           &rmgmt_req->response_sgl);
+       }
+       if (!num_datasges) {
+               mpi3mr_build_zero_len_sge(sgl);
+               return;
+       }
+       for (; count < bufcnt; count++, drv_buf_iter++) {
+               if (drv_buf_iter->data_dir == DMA_NONE)
+                       continue;
+               if (num_datasges == 1 || !is_rmc)
+                       mpi3mr_add_sg_single(sgl, sgl_flags_last,
+                           drv_buf_iter->kern_buf_len, drv_buf_iter->kern_buf_dma);
+               else
+                       mpi3mr_add_sg_single(sgl, sgl_flags,
+                           drv_buf_iter->kern_buf_len, drv_buf_iter->kern_buf_dma);
+               sgl += sizeof(struct mpi3_sge_common);
+               num_datasges--;
+       }
+}
+
+/**
+ * mpi3mr_get_nvme_data_fmt - returns the NVMe data format
+ * @nvme_encap_request: NVMe encapsulated MPI request
+ *
+ * This function returns the type of the data format specified
+ * in user provided NVMe command in NVMe encapsulated request.
+ *
+ * Return: Data format of the NVMe command (PRP/SGL etc)
+ */
+static unsigned int mpi3mr_get_nvme_data_fmt(
+       struct mpi3_nvme_encapsulated_request *nvme_encap_request)
+{
+       u8 format = 0;
+
+       format = ((nvme_encap_request->command[0] & 0xc000) >> 14);
+       return format;
+
+}
+
+/**
+ * mpi3mr_build_nvme_sgl - SGL constructor for NVME
+ *                                encapsulated request
+ * @mrioc: Adapter instance reference
+ * @nvme_encap_request: NVMe encapsulated MPI request
+ * @drv_bufs: DMA address of the buffers to be placed in sgl
+ * @bufcnt: Number of DMA buffers
+ *
+ * This function places the DMA address of the given buffers in
+ * proper format as SGEs in the given NVMe encapsulated request.
+ *
+ * Return: 0 on success, -1 on failure
+ */
+static int mpi3mr_build_nvme_sgl(struct mpi3mr_ioc *mrioc,
+       struct mpi3_nvme_encapsulated_request *nvme_encap_request,
+       struct mpi3mr_buf_map *drv_bufs, u8 bufcnt)
+{
+       struct mpi3mr_nvme_pt_sge *nvme_sgl;
+       u64 sgl_ptr;
+       u8 count;
+       size_t length = 0;
+       struct mpi3mr_buf_map *drv_buf_iter = drv_bufs;
+       u64 sgemod_mask = ((u64)((mrioc->facts.sge_mod_mask) <<
+                           mrioc->facts.sge_mod_shift) << 32);
+       u64 sgemod_val = ((u64)(mrioc->facts.sge_mod_value) <<
+                         mrioc->facts.sge_mod_shift) << 32;
+
+       /*
+        * Not all commands require a data transfer. If no data, just return
+        * without constructing any sgl.
+        */
+       for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
+               if (drv_buf_iter->data_dir == DMA_NONE)
+                       continue;
+               sgl_ptr = (u64)drv_buf_iter->kern_buf_dma;
+               length = drv_buf_iter->kern_buf_len;
+               break;
+       }
+       if (!length)
+               return 0;
+
+       if (sgl_ptr & sgemod_mask) {
+               dprint_bsg_err(mrioc,
+                   "%s: SGL address collides with SGE modifier\n",
+                   __func__);
+               return -1;
+       }
+
+       sgl_ptr &= ~sgemod_mask;
+       sgl_ptr |= sgemod_val;
+       nvme_sgl = (struct mpi3mr_nvme_pt_sge *)
+           ((u8 *)(nvme_encap_request->command) + MPI3MR_NVME_CMD_SGL_OFFSET);
+       memset(nvme_sgl, 0, sizeof(struct mpi3mr_nvme_pt_sge));
+       nvme_sgl->base_addr = sgl_ptr;
+       nvme_sgl->length = length;
+       return 0;
+}
+
+/**
+ * mpi3mr_build_nvme_prp - PRP constructor for NVME
+ *                            encapsulated request
+ * @mrioc: Adapter instance reference
+ * @nvme_encap_request: NVMe encapsulated MPI request
+ * @drv_bufs: DMA address of the buffers to be placed in SGL
+ * @bufcnt: Number of DMA buffers
+ *
+ * This function places the DMA address of the given buffers in
+ * proper format as PRP entries in the given NVMe encapsulated
+ * request.
+ *
+ * Return: 0 on success, -1 on failure
+ */
+static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc,
+       struct mpi3_nvme_encapsulated_request *nvme_encap_request,
+       struct mpi3mr_buf_map *drv_bufs, u8 bufcnt)
+{
+       int prp_size = MPI3MR_NVME_PRP_SIZE;
+       __le64 *prp_entry, *prp1_entry, *prp2_entry;
+       __le64 *prp_page;
+       dma_addr_t prp_entry_dma, prp_page_dma, dma_addr;
+       u32 offset, entry_len, dev_pgsz;
+       u32 page_mask_result, page_mask;
+       size_t length = 0;
+       u8 count;
+       struct mpi3mr_buf_map *drv_buf_iter = drv_bufs;
+       u64 sgemod_mask = ((u64)((mrioc->facts.sge_mod_mask) <<
+                           mrioc->facts.sge_mod_shift) << 32);
+       u64 sgemod_val = ((u64)(mrioc->facts.sge_mod_value) <<
+                         mrioc->facts.sge_mod_shift) << 32;
+       u16 dev_handle = nvme_encap_request->dev_handle;
+       struct mpi3mr_tgt_dev *tgtdev;
+
+       tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
+       if (!tgtdev) {
+               dprint_bsg_err(mrioc, "%s: invalid device handle 0x%04x\n",
+                       __func__, dev_handle);
+               return -1;
+       }
+
+       if (tgtdev->dev_spec.pcie_inf.pgsz == 0) {
+               dprint_bsg_err(mrioc,
+                   "%s: NVMe device page size is zero for handle 0x%04x\n",
+                   __func__, dev_handle);
+               mpi3mr_tgtdev_put(tgtdev);
+               return -1;
+       }
+
+       dev_pgsz = 1 << (tgtdev->dev_spec.pcie_inf.pgsz);
+       mpi3mr_tgtdev_put(tgtdev);
+
+       /*
+        * Not all commands require a data transfer. If no data, just return
+        * without constructing any PRP.
+        */
+       for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
+               if (drv_buf_iter->data_dir == DMA_NONE)
+                       continue;
+               dma_addr = drv_buf_iter->kern_buf_dma;
+               length = drv_buf_iter->kern_buf_len;
+               break;
+       }
+
+       if (!length)
+               return 0;
+
+       mrioc->prp_sz = 0;
+       mrioc->prp_list_virt = dma_alloc_coherent(&mrioc->pdev->dev,
+           dev_pgsz, &mrioc->prp_list_dma, GFP_KERNEL);
+
+       if (!mrioc->prp_list_virt)
+               return -1;
+       mrioc->prp_sz = dev_pgsz;
+
+       /*
+        * Set pointers to PRP1 and PRP2, which are in the NVMe command.
+        * PRP1 is located at a 24 byte offset from the start of the NVMe
+        * command.  Then set the current PRP entry pointer to PRP1.
+        */
+       prp1_entry = (__le64 *)((u8 *)(nvme_encap_request->command) +
+           MPI3MR_NVME_CMD_PRP1_OFFSET);
+       prp2_entry = (__le64 *)((u8 *)(nvme_encap_request->command) +
+           MPI3MR_NVME_CMD_PRP2_OFFSET);
+       prp_entry = prp1_entry;
+       /*
+        * For the PRP entries, use the specially allocated buffer of
+        * contiguous memory.
+        */
+       prp_page = (__le64 *)mrioc->prp_list_virt;
+       prp_page_dma = mrioc->prp_list_dma;
+
+       /*
+        * Check if we are within 1 entry of a page boundary we don't
+        * want our first entry to be a PRP List entry.
+        */
+       page_mask = dev_pgsz - 1;
+       page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask;
+       if (!page_mask_result) {
+               dprint_bsg_err(mrioc, "%s: PRP page is not page aligned\n",
+                   __func__);
+               goto err_out;
+       }
+
+       /*
+        * Set PRP physical pointer, which initially points to the current PRP
+        * DMA memory page.
+        */
+       prp_entry_dma = prp_page_dma;
+
+
+       /* Loop while the length is not zero. */
+       while (length) {
+               page_mask_result = (prp_entry_dma + prp_size) & page_mask;
+               if (!page_mask_result && (length >  dev_pgsz)) {
+                       dprint_bsg_err(mrioc,
+                           "%s: single PRP page is not sufficient\n",
+                           __func__);
+                       goto err_out;
+               }
+
+               /* Need to handle if entry will be part of a page. */
+               offset = dma_addr & page_mask;
+               entry_len = dev_pgsz - offset;
+
+               if (prp_entry == prp1_entry) {
+                       /*
+                        * Must fill in the first PRP pointer (PRP1) before
+                        * moving on.
+                        */
+                       *prp1_entry = cpu_to_le64(dma_addr);
+                       if (*prp1_entry & sgemod_mask) {
+                               dprint_bsg_err(mrioc,
+                                   "%s: PRP1 address collides with SGE modifier\n",
+                                   __func__);
+                               goto err_out;
+                       }
+                       *prp1_entry &= ~sgemod_mask;
+                       *prp1_entry |= sgemod_val;
+
+                       /*
+                        * Now point to the second PRP entry within the
+                        * command (PRP2).
+                        */
+                       prp_entry = prp2_entry;
+               } else if (prp_entry == prp2_entry) {
+                       /*
+                        * Should the PRP2 entry be a PRP List pointer or just
+                        * a regular PRP pointer?  If there is more than one
+                        * more page of data, must use a PRP List pointer.
+                        */
+                       if (length > dev_pgsz) {
+                               /*
+                                * PRP2 will contain a PRP List pointer because
+                                * more PRP's are needed with this command. The
+                                * list will start at the beginning of the
+                                * contiguous buffer.
+                                */
+                               *prp2_entry = cpu_to_le64(prp_entry_dma);
+                               if (*prp2_entry & sgemod_mask) {
+                                       dprint_bsg_err(mrioc,
+                                           "%s: PRP list address collides with SGE modifier\n",
+                                           __func__);
+                                       goto err_out;
+                               }
+                               *prp2_entry &= ~sgemod_mask;
+                               *prp2_entry |= sgemod_val;
+
+                               /*
+                                * The next PRP Entry will be the start of the
+                                * first PRP List.
+                                */
+                               prp_entry = prp_page;
+                               continue;
+                       } else {
+                               /*
+                                * After this, the PRP Entries are complete.
+                                * This command uses 2 PRP's and no PRP list.
+                                */
+                               *prp2_entry = cpu_to_le64(dma_addr);
+                               if (*prp2_entry & sgemod_mask) {
+                                       dprint_bsg_err(mrioc,
+                                           "%s: PRP2 collides with SGE modifier\n",
+                                           __func__);
+                                       goto err_out;
+                               }
+                               *prp2_entry &= ~sgemod_mask;
+                               *prp2_entry |= sgemod_val;
+                       }
+               } else {
+                       /*
+                        * Put entry in list and bump the addresses.
+                        *
+                        * After PRP1 and PRP2 are filled in, this will fill in
+                        * all remaining PRP entries in a PRP List, one per
+                        * each time through the loop.
+                        */
+                       *prp_entry = cpu_to_le64(dma_addr);
+                       if (*prp1_entry & sgemod_mask) {
+                               dprint_bsg_err(mrioc,
+                                   "%s: PRP address collides with SGE modifier\n",
+                                   __func__);
+                               goto err_out;
+                       }
+                       *prp_entry &= ~sgemod_mask;
+                       *prp_entry |= sgemod_val;
+                       prp_entry++;
+                       prp_entry_dma++;
+               }
+
+               /*
+                * Bump the phys address of the command's data buffer by the
+                * entry_len.
+                */
+               dma_addr += entry_len;
+
+               /* decrement length accounting for last partial page. */
+               if (entry_len > length)
+                       length = 0;
+               else
+                       length -= entry_len;
+       }
+       return 0;
+err_out:
+       if (mrioc->prp_list_virt) {
+               dma_free_coherent(&mrioc->pdev->dev, mrioc->prp_sz,
+                   mrioc->prp_list_virt, mrioc->prp_list_dma);
+               mrioc->prp_list_virt = NULL;
+       }
+       return -1;
+}
+/**
+ * mpi3mr_bsg_process_mpt_cmds - MPI Pass through BSG handler
+ * @job: BSG job reference
+ *
+ * This function is the top level handler for MPI Pass through
+ * command, this does basic validation of the input data buffers,
+ * identifies the given buffer types and MPI command, allocates
+ * DMAable memory for user given buffers, construstcs SGL
+ * properly and passes the command to the firmware.
+ *
+ * Once the MPI command is completed the driver copies the data
+ * if any and reply, sense information to user provided buffers.
+ * If the command is timed out then issues controller reset
+ * prior to returning.
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+
+static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply_payload_rcv_len)
+{
+       long rval = -EINVAL;
+
+       struct mpi3mr_ioc *mrioc = NULL;
+       u8 *mpi_req = NULL, *sense_buff_k = NULL;
+       u8 mpi_msg_size = 0;
+       struct mpi3mr_bsg_packet *bsg_req = NULL;
+       struct mpi3mr_bsg_mptcmd *karg;
+       struct mpi3mr_buf_entry *buf_entries = NULL;
+       struct mpi3mr_buf_map *drv_bufs = NULL, *drv_buf_iter = NULL;
+       u8 count, bufcnt = 0, is_rmcb = 0, is_rmrb = 0, din_cnt = 0, dout_cnt = 0;
+       u8 invalid_be = 0, erb_offset = 0xFF, mpirep_offset = 0xFF, sg_entries = 0;
+       u8 block_io = 0, resp_code = 0, nvme_fmt = 0;
+       struct mpi3_request_header *mpi_header = NULL;
+       struct mpi3_status_reply_descriptor *status_desc;
+       struct mpi3_scsi_task_mgmt_request *tm_req;
+       u32 erbsz = MPI3MR_SENSE_BUF_SZ, tmplen;
+       u16 dev_handle;
+       struct mpi3mr_tgt_dev *tgtdev;
+       struct mpi3mr_stgt_priv_data *stgt_priv = NULL;
+       struct mpi3mr_bsg_in_reply_buf *bsg_reply_buf = NULL;
+       u32 din_size = 0, dout_size = 0;
+       u8 *din_buf = NULL, *dout_buf = NULL;
+       u8 *sgl_iter = NULL, *sgl_din_iter = NULL, *sgl_dout_iter = NULL;
+
+       bsg_req = job->request;
+       karg = (struct mpi3mr_bsg_mptcmd *)&bsg_req->cmd.mptcmd;
+
+       mrioc = mpi3mr_bsg_verify_adapter(karg->mrioc_id);
+       if (!mrioc)
+               return -ENODEV;
+
+       if (karg->timeout < MPI3MR_APP_DEFAULT_TIMEOUT)
+               karg->timeout = MPI3MR_APP_DEFAULT_TIMEOUT;
+
+       mpi_req = kzalloc(MPI3MR_ADMIN_REQ_FRAME_SZ, GFP_KERNEL);
+       if (!mpi_req)
+               return -ENOMEM;
+       mpi_header = (struct mpi3_request_header *)mpi_req;
+
+       bufcnt = karg->buf_entry_list.num_of_entries;
+       drv_bufs = kzalloc((sizeof(*drv_bufs) * bufcnt), GFP_KERNEL);
+       if (!drv_bufs) {
+               rval = -ENOMEM;
+               goto out;
+       }
+
+       dout_buf = kzalloc(job->request_payload.payload_len,
+                                     GFP_KERNEL);
+       if (!dout_buf) {
+               rval = -ENOMEM;
+               goto out;
+       }
+
+       din_buf = kzalloc(job->reply_payload.payload_len,
+                                    GFP_KERNEL);
+       if (!din_buf) {
+               rval = -ENOMEM;
+               goto out;
+       }
+
+       sg_copy_to_buffer(job->request_payload.sg_list,
+                         job->request_payload.sg_cnt,
+                         dout_buf, job->request_payload.payload_len);
+
+       buf_entries = karg->buf_entry_list.buf_entry;
+       sgl_din_iter = din_buf;
+       sgl_dout_iter = dout_buf;
+       drv_buf_iter = drv_bufs;
+
+       for (count = 0; count < bufcnt; count++, buf_entries++, drv_buf_iter++) {
+
+               if (sgl_dout_iter > (dout_buf + job->request_payload.payload_len)) {
+                       dprint_bsg_err(mrioc, "%s: data_out buffer length mismatch\n",
+                               __func__);
+                       rval = -EINVAL;
+                       goto out;
+               }
+               if (sgl_din_iter > (din_buf + job->reply_payload.payload_len)) {
+                       dprint_bsg_err(mrioc, "%s: data_in buffer length mismatch\n",
+                               __func__);
+                       rval = -EINVAL;
+                       goto out;
+               }
+
+               switch (buf_entries->buf_type) {
+               case MPI3MR_BSG_BUFTYPE_RAIDMGMT_CMD:
+                       sgl_iter = sgl_dout_iter;
+                       sgl_dout_iter += buf_entries->buf_len;
+                       drv_buf_iter->data_dir = DMA_TO_DEVICE;
+                       is_rmcb = 1;
+                       if (count != 0)
+                               invalid_be = 1;
+                       break;
+               case MPI3MR_BSG_BUFTYPE_RAIDMGMT_RESP:
+                       sgl_iter = sgl_din_iter;
+                       sgl_din_iter += buf_entries->buf_len;
+                       drv_buf_iter->data_dir = DMA_FROM_DEVICE;
+                       is_rmrb = 1;
+                       if (count != 1 || !is_rmcb)
+                               invalid_be = 1;
+                       break;
+               case MPI3MR_BSG_BUFTYPE_DATA_IN:
+                       sgl_iter = sgl_din_iter;
+                       sgl_din_iter += buf_entries->buf_len;
+                       drv_buf_iter->data_dir = DMA_FROM_DEVICE;
+                       din_cnt++;
+                       din_size += drv_buf_iter->bsg_buf_len;
+                       if ((din_cnt > 1) && !is_rmcb)
+                               invalid_be = 1;
+                       break;
+               case MPI3MR_BSG_BUFTYPE_DATA_OUT:
+                       sgl_iter = sgl_dout_iter;
+                       sgl_dout_iter += buf_entries->buf_len;
+                       drv_buf_iter->data_dir = DMA_TO_DEVICE;
+                       dout_cnt++;
+                       dout_size += drv_buf_iter->bsg_buf_len;
+                       if ((dout_cnt > 1) && !is_rmcb)
+                               invalid_be = 1;
+                       break;
+               case MPI3MR_BSG_BUFTYPE_MPI_REPLY:
+                       sgl_iter = sgl_din_iter;
+                       sgl_din_iter += buf_entries->buf_len;
+                       drv_buf_iter->data_dir = DMA_NONE;
+                       mpirep_offset = count;
+                       break;
+               case MPI3MR_BSG_BUFTYPE_ERR_RESPONSE:
+                       sgl_iter = sgl_din_iter;
+                       sgl_din_iter += buf_entries->buf_len;
+                       drv_buf_iter->data_dir = DMA_NONE;
+                       erb_offset = count;
+                       break;
+               case MPI3MR_BSG_BUFTYPE_MPI_REQUEST:
+                       sgl_iter = sgl_dout_iter;
+                       sgl_dout_iter += buf_entries->buf_len;
+                       drv_buf_iter->data_dir = DMA_NONE;
+                       mpi_msg_size = buf_entries->buf_len;
+                       if ((!mpi_msg_size || (mpi_msg_size % 4)) ||
+                                       (mpi_msg_size > MPI3MR_ADMIN_REQ_FRAME_SZ)) {
+                               dprint_bsg_err(mrioc, "%s: invalid MPI message size\n",
+                                       __func__);
+                               rval = -EINVAL;
+                               goto out;
+                       }
+                       memcpy(mpi_req, sgl_iter, buf_entries->buf_len);
+                       break;
+               default:
+                       invalid_be = 1;
+                       break;
+               }
+               if (invalid_be) {
+                       dprint_bsg_err(mrioc, "%s: invalid buffer entries passed\n",
+                               __func__);
+                       rval = -EINVAL;
+                       goto out;
+               }
+
+               drv_buf_iter->bsg_buf = sgl_iter;
+               drv_buf_iter->bsg_buf_len = buf_entries->buf_len;
+
+       }
+       if (!is_rmcb && (dout_cnt || din_cnt)) {
+               sg_entries = dout_cnt + din_cnt;
+               if (((mpi_msg_size) + (sg_entries *
+                     sizeof(struct mpi3_sge_common))) > MPI3MR_ADMIN_REQ_FRAME_SZ) {
+                       dprint_bsg_err(mrioc,
+                           "%s:%d: invalid message size passed\n",
+                           __func__, __LINE__);
+                       rval = -EINVAL;
+                       goto out;
+               }
+       }
+       if (din_size > MPI3MR_MAX_APP_XFER_SIZE) {
+               dprint_bsg_err(mrioc,
+                   "%s:%d: invalid data transfer size passed for function 0x%x din_size=%d\n",
+                   __func__, __LINE__, mpi_header->function, din_size);
+               rval = -EINVAL;
+               goto out;
+       }
+       if (dout_size > MPI3MR_MAX_APP_XFER_SIZE) {
+               dprint_bsg_err(mrioc,
+                   "%s:%d: invalid data transfer size passed for function 0x%x dout_size = %d\n",
+                   __func__, __LINE__, mpi_header->function, dout_size);
+               rval = -EINVAL;
+               goto out;
+       }
+
+       drv_buf_iter = drv_bufs;
+       for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
+               if (drv_buf_iter->data_dir == DMA_NONE)
+                       continue;
+
+               drv_buf_iter->kern_buf_len = drv_buf_iter->bsg_buf_len;
+               if (is_rmcb && !count)
+                       drv_buf_iter->kern_buf_len += ((dout_cnt + din_cnt) *
+                           sizeof(struct mpi3_sge_common));
+
+               if (!drv_buf_iter->kern_buf_len)
+                       continue;
+
+               drv_buf_iter->kern_buf = dma_alloc_coherent(&mrioc->pdev->dev,
+                   drv_buf_iter->kern_buf_len, &drv_buf_iter->kern_buf_dma,
+                   GFP_KERNEL);
+               if (!drv_buf_iter->kern_buf) {
+                       rval = -ENOMEM;
+                       goto out;
+               }
+               if (drv_buf_iter->data_dir == DMA_TO_DEVICE) {
+                       tmplen = min(drv_buf_iter->kern_buf_len,
+                           drv_buf_iter->bsg_buf_len);
+                       memcpy(drv_buf_iter->kern_buf, drv_buf_iter->bsg_buf, tmplen);
+               }
+       }
+
+       if (erb_offset != 0xFF) {
+               sense_buff_k = kzalloc(erbsz, GFP_KERNEL);
+               if (!sense_buff_k) {
+                       rval = -ENOMEM;
+                       goto out;
+               }
+       }
+
+       if (mutex_lock_interruptible(&mrioc->bsg_cmds.mutex)) {
+               rval = -ERESTARTSYS;
+               goto out;
+       }
+       if (mrioc->bsg_cmds.state & MPI3MR_CMD_PENDING) {
+               rval = -EAGAIN;
+               dprint_bsg_err(mrioc, "%s: command is in use\n", __func__);
+               mutex_unlock(&mrioc->bsg_cmds.mutex);
+               goto out;
+       }
+       if (mrioc->unrecoverable) {
+               dprint_bsg_err(mrioc, "%s: unrecoverable controller\n",
+                   __func__);
+               rval = -EFAULT;
+               mutex_unlock(&mrioc->bsg_cmds.mutex);
+               goto out;
+       }
+       if (mrioc->reset_in_progress) {
+               dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__);
+               rval = -EAGAIN;
+               mutex_unlock(&mrioc->bsg_cmds.mutex);
+               goto out;
+       }
+       if (mrioc->stop_bsgs) {
+               dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__);
+               rval = -EAGAIN;
+               mutex_unlock(&mrioc->bsg_cmds.mutex);
+               goto out;
+       }
+
+       if (mpi_header->function == MPI3_BSG_FUNCTION_NVME_ENCAPSULATED) {
+               nvme_fmt = mpi3mr_get_nvme_data_fmt(
+                       (struct mpi3_nvme_encapsulated_request *)mpi_req);
+               if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_PRP) {
+                       if (mpi3mr_build_nvme_prp(mrioc,
+                           (struct mpi3_nvme_encapsulated_request *)mpi_req,
+                           drv_bufs, bufcnt)) {
+                               rval = -ENOMEM;
+                               mutex_unlock(&mrioc->bsg_cmds.mutex);
+                               goto out;
+                       }
+               } else if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL1 ||
+                       nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL2) {
+                       if (mpi3mr_build_nvme_sgl(mrioc,
+                           (struct mpi3_nvme_encapsulated_request *)mpi_req,
+                           drv_bufs, bufcnt)) {
+                               rval = -EINVAL;
+                               mutex_unlock(&mrioc->bsg_cmds.mutex);
+                               goto out;
+                       }
+               } else {
+                       dprint_bsg_err(mrioc,
+                           "%s:invalid NVMe command format\n", __func__);
+                       rval = -EINVAL;
+                       mutex_unlock(&mrioc->bsg_cmds.mutex);
+                       goto out;
+               }
+       } else {
+               mpi3mr_bsg_build_sgl(mpi_req, (mpi_msg_size),
+                   drv_bufs, bufcnt, is_rmcb, is_rmrb,
+                   (dout_cnt + din_cnt));
+       }
+
+       if (mpi_header->function == MPI3_BSG_FUNCTION_SCSI_TASK_MGMT) {
+               tm_req = (struct mpi3_scsi_task_mgmt_request *)mpi_req;
+               if (tm_req->task_type !=
+                   MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
+                       dev_handle = tm_req->dev_handle;
+                       block_io = 1;
+               }
+       }
+       if (block_io) {
+               tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
+               if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) {
+                       stgt_priv = (struct mpi3mr_stgt_priv_data *)
+                           tgtdev->starget->hostdata;
+                       atomic_inc(&stgt_priv->block_io);
+                       mpi3mr_tgtdev_put(tgtdev);
+               }
+       }
+
+       mrioc->bsg_cmds.state = MPI3MR_CMD_PENDING;
+       mrioc->bsg_cmds.is_waiting = 1;
+       mrioc->bsg_cmds.callback = NULL;
+       mrioc->bsg_cmds.is_sense = 0;
+       mrioc->bsg_cmds.sensebuf = sense_buff_k;
+       memset(mrioc->bsg_cmds.reply, 0, mrioc->reply_sz);
+       mpi_header->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_BSG_CMDS);
+       if (mrioc->logging_level & MPI3_DEBUG_BSG_INFO) {
+               dprint_bsg_info(mrioc,
+                   "%s: posting bsg request to the controller\n", __func__);
+               dprint_dump(mpi_req, MPI3MR_ADMIN_REQ_FRAME_SZ,
+                   "bsg_mpi3_req");
+               if (mpi_header->function == MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH) {
+                       drv_buf_iter = &drv_bufs[0];
+                       dprint_dump(drv_buf_iter->kern_buf,
+                           drv_buf_iter->kern_buf_len, "mpi3_mgmt_req");
+               }
+       }
+
+       init_completion(&mrioc->bsg_cmds.done);
+       rval = mpi3mr_admin_request_post(mrioc, mpi_req,
+           MPI3MR_ADMIN_REQ_FRAME_SZ, 0);
+
+
+       if (rval) {
+               mrioc->bsg_cmds.is_waiting = 0;
+               dprint_bsg_err(mrioc,
+                   "%s: posting bsg request is failed\n", __func__);
+               rval = -EAGAIN;
+               goto out_unlock;
+       }
+       wait_for_completion_timeout(&mrioc->bsg_cmds.done,
+           (karg->timeout * HZ));
+       if (block_io && stgt_priv)
+               atomic_dec(&stgt_priv->block_io);
+       if (!(mrioc->bsg_cmds.state & MPI3MR_CMD_COMPLETE)) {
+               mrioc->bsg_cmds.is_waiting = 0;
+               rval = -EAGAIN;
+               if (mrioc->bsg_cmds.state & MPI3MR_CMD_RESET)
+                       goto out_unlock;
+               dprint_bsg_err(mrioc,
+                   "%s: bsg request timedout after %d seconds\n", __func__,
+                   karg->timeout);
+               if (mrioc->logging_level & MPI3_DEBUG_BSG_ERROR) {
+                       dprint_dump(mpi_req, MPI3MR_ADMIN_REQ_FRAME_SZ,
+                           "bsg_mpi3_req");
+                       if (mpi_header->function ==
+                           MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH) {
+                               drv_buf_iter = &drv_bufs[0];
+                               dprint_dump(drv_buf_iter->kern_buf,
+                                   drv_buf_iter->kern_buf_len, "mpi3_mgmt_req");
+                       }
+               }
+
+               if ((mpi_header->function == MPI3_BSG_FUNCTION_NVME_ENCAPSULATED) ||
+                   (mpi_header->function == MPI3_BSG_FUNCTION_SCSI_IO))
+                       mpi3mr_issue_tm(mrioc,
+                           MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
+                           mpi_header->function_dependent, 0,
+                           MPI3MR_HOSTTAG_BLK_TMS, MPI3MR_RESETTM_TIMEOUT,
+                           &mrioc->host_tm_cmds, &resp_code, NULL);
+               if (!(mrioc->bsg_cmds.state & MPI3MR_CMD_COMPLETE) &&
+                   !(mrioc->bsg_cmds.state & MPI3MR_CMD_RESET))
+                       mpi3mr_soft_reset_handler(mrioc,
+                           MPI3MR_RESET_FROM_APP_TIMEOUT, 1);
+               goto out_unlock;
+       }
+       dprint_bsg_info(mrioc, "%s: bsg request is completed\n", __func__);
+
+       if (mrioc->prp_list_virt) {
+               dma_free_coherent(&mrioc->pdev->dev, mrioc->prp_sz,
+                   mrioc->prp_list_virt, mrioc->prp_list_dma);
+               mrioc->prp_list_virt = NULL;
+       }
+
+       if ((mrioc->bsg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+            != MPI3_IOCSTATUS_SUCCESS) {
+               dprint_bsg_info(mrioc,
+                   "%s: command failed, ioc_status(0x%04x) log_info(0x%08x)\n",
+                   __func__,
+                   (mrioc->bsg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+                   mrioc->bsg_cmds.ioc_loginfo);
+       }
+
+       if ((mpirep_offset != 0xFF) &&
+           drv_bufs[mpirep_offset].bsg_buf_len) {
+               drv_buf_iter = &drv_bufs[mpirep_offset];
+               drv_buf_iter->kern_buf_len = (sizeof(*bsg_reply_buf) - 1 +
+                                          mrioc->reply_sz);
+               bsg_reply_buf = kzalloc(drv_buf_iter->kern_buf_len, GFP_KERNEL);
+
+               if (!bsg_reply_buf) {
+                       rval = -ENOMEM;
+                       goto out_unlock;
+               }
+               if (mrioc->bsg_cmds.state & MPI3MR_CMD_REPLY_VALID) {
+                       bsg_reply_buf->mpi_reply_type =
+                               MPI3MR_BSG_MPI_REPLY_BUFTYPE_ADDRESS;
+                       memcpy(bsg_reply_buf->reply_buf,
+                           mrioc->bsg_cmds.reply, mrioc->reply_sz);
+               } else {
+                       bsg_reply_buf->mpi_reply_type =
+                               MPI3MR_BSG_MPI_REPLY_BUFTYPE_STATUS;
+                       status_desc = (struct mpi3_status_reply_descriptor *)
+                           bsg_reply_buf->reply_buf;
+                       status_desc->ioc_status = mrioc->bsg_cmds.ioc_status;
+                       status_desc->ioc_log_info = mrioc->bsg_cmds.ioc_loginfo;
+               }
+               tmplen = min(drv_buf_iter->kern_buf_len,
+                       drv_buf_iter->bsg_buf_len);
+               memcpy(drv_buf_iter->bsg_buf, bsg_reply_buf, tmplen);
+       }
+
+       if (erb_offset != 0xFF && mrioc->bsg_cmds.sensebuf &&
+           mrioc->bsg_cmds.is_sense) {
+               drv_buf_iter = &drv_bufs[erb_offset];
+               tmplen = min(erbsz, drv_buf_iter->bsg_buf_len);
+               memcpy(drv_buf_iter->bsg_buf, sense_buff_k, tmplen);
+       }
+
+       drv_buf_iter = drv_bufs;
+       for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
+               if (drv_buf_iter->data_dir == DMA_NONE)
+                       continue;
+               if (drv_buf_iter->data_dir == DMA_FROM_DEVICE) {
+                       tmplen = min(drv_buf_iter->kern_buf_len,
+                                    drv_buf_iter->bsg_buf_len);
+                       memcpy(drv_buf_iter->bsg_buf,
+                              drv_buf_iter->kern_buf, tmplen);
+               }
+       }
+
+out_unlock:
+       if (din_buf) {
+               *reply_payload_rcv_len =
+                       sg_copy_from_buffer(job->reply_payload.sg_list,
+                                           job->reply_payload.sg_cnt,
+                                           din_buf, job->reply_payload.payload_len);
+       }
+       mrioc->bsg_cmds.is_sense = 0;
+       mrioc->bsg_cmds.sensebuf = NULL;
+       mrioc->bsg_cmds.state = MPI3MR_CMD_NOTUSED;
+       mutex_unlock(&mrioc->bsg_cmds.mutex);
+out:
+       kfree(sense_buff_k);
+       kfree(dout_buf);
+       kfree(din_buf);
+       kfree(mpi_req);
+       if (drv_bufs) {
+               drv_buf_iter = drv_bufs;
+               for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
+                       if (drv_buf_iter->kern_buf && drv_buf_iter->kern_buf_dma)
+                               dma_free_coherent(&mrioc->pdev->dev,
+                                   drv_buf_iter->kern_buf_len,
+                                   drv_buf_iter->kern_buf,
+                                   drv_buf_iter->kern_buf_dma);
+               }
+               kfree(drv_bufs);
+       }
+       kfree(bsg_reply_buf);
+       return rval;
+}
+
+/**
+ * mpi3mr_app_save_logdata - Save Log Data events
+ * @mrioc: Adapter instance reference
+ * @event_data: event data associated with log data event
+ * @event_data_size: event data size to copy
+ *
+ * If log data event caching is enabled by the applicatiobns,
+ * then this function saves the log data in the circular queue
+ * and Sends async signal SIGIO to indicate there is an async
+ * event from the firmware to the event monitoring applications.
+ *
+ * Return:Nothing
+ */
+void mpi3mr_app_save_logdata(struct mpi3mr_ioc *mrioc, char *event_data,
+       u16 event_data_size)
+{
+       u32 index = mrioc->logdata_buf_idx, sz;
+       struct mpi3mr_logdata_entry *entry;
+
+       if (!(mrioc->logdata_buf))
+               return;
+
+       entry = (struct mpi3mr_logdata_entry *)
+               (mrioc->logdata_buf + (index * mrioc->logdata_entry_sz));
+       entry->valid_entry = 1;
+       sz = min(mrioc->logdata_entry_sz, event_data_size);
+       memcpy(entry->data, event_data, sz);
+       mrioc->logdata_buf_idx =
+               ((++index) % MPI3MR_BSG_LOGDATA_MAX_ENTRIES);
+       atomic64_inc(&event_counter);
+}
+
+/**
+ * mpi3mr_bsg_request - bsg request entry point
+ * @job: BSG job reference
+ *
+ * This is driver's entry point for bsg requests
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+static int mpi3mr_bsg_request(struct bsg_job *job)
+{
+       long rval = -EINVAL;
+       unsigned int reply_payload_rcv_len = 0;
+
+       struct mpi3mr_bsg_packet *bsg_req = job->request;
+
+       switch (bsg_req->cmd_type) {
+       case MPI3MR_DRV_CMD:
+               rval = mpi3mr_bsg_process_drv_cmds(job);
+               break;
+       case MPI3MR_MPT_CMD:
+               rval = mpi3mr_bsg_process_mpt_cmds(job, &reply_payload_rcv_len);
+               break;
+       default:
+               pr_err("%s: unsupported BSG command(0x%08x)\n",
+                   MPI3MR_DRIVER_NAME, bsg_req->cmd_type);
+               break;
+       }
+
+       bsg_job_done(job, rval, reply_payload_rcv_len);
+
+       return 0;
+}
+
+/**
+ * mpi3mr_bsg_exit - de-registration from bsg layer
+ *
+ * This will be called during driver unload and all
+ * bsg resources allocated during load will be freed.
+ *
+ * Return:Nothing
+ */
+void mpi3mr_bsg_exit(struct mpi3mr_ioc *mrioc)
+{
+       if (!mrioc->bsg_queue)
+               return;
+
+       bsg_remove_queue(mrioc->bsg_queue);
+       mrioc->bsg_queue = NULL;
+
+       device_del(mrioc->bsg_dev);
+       put_device(mrioc->bsg_dev);
+       kfree(mrioc->bsg_dev);
+}
+
+/**
+ * mpi3mr_bsg_node_release -release bsg device node
+ * @dev: bsg device node
+ *
+ * decrements bsg dev reference count
+ *
+ * Return:Nothing
+ */
+static void mpi3mr_bsg_node_release(struct device *dev)
+{
+       put_device(dev);
+}
+
+/**
+ * mpi3mr_bsg_init -  registration with bsg layer
+ *
+ * This will be called during driver load and it will
+ * register driver with bsg layer
+ *
+ * Return:Nothing
+ */
+void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc)
+{
+       mrioc->bsg_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+       if (!mrioc->bsg_dev) {
+               ioc_err(mrioc, "bsg device mem allocation failed\n");
+               return;
+       }
+
+       device_initialize(mrioc->bsg_dev);
+       dev_set_name(mrioc->bsg_dev, "mpi3mrctl%u", mrioc->id);
+
+       if (device_add(mrioc->bsg_dev)) {
+               ioc_err(mrioc, "%s: bsg device add failed\n",
+                   dev_name(mrioc->bsg_dev));
+               goto err_device_add;
+       }
+
+       mrioc->bsg_dev->release = mpi3mr_bsg_node_release;
+
+       mrioc->bsg_queue = bsg_setup_queue(mrioc->bsg_dev, dev_name(mrioc->bsg_dev),
+                       mpi3mr_bsg_request, NULL, 0);
+       if (IS_ERR(mrioc->bsg_queue)) {
+               ioc_err(mrioc, "%s: bsg registration failed\n",
+                   dev_name(mrioc->bsg_dev));
+               goto err_setup_queue;
+       }
+
+       blk_queue_max_segments(mrioc->bsg_queue, MPI3MR_MAX_APP_XFER_SEGMENTS);
+       blk_queue_max_hw_sectors(mrioc->bsg_queue, MPI3MR_MAX_APP_XFER_SECTORS);
+
+       return;
+
+err_setup_queue:
+       device_del(mrioc->bsg_dev);
+       put_device(mrioc->bsg_dev);
+err_device_add:
+       kfree(mrioc->bsg_dev);
+}
+
+/**
+ * version_fw_show - SysFS callback for firmware version read
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Return: sysfs_emit() return after copying firmware version
+ */
+static ssize_t
+version_fw_show(struct device *dev, struct device_attribute *attr,
+       char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct mpi3mr_ioc *mrioc = shost_priv(shost);
+       struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver;
+
+       return sysfs_emit(buf, "%d.%d.%d.%d.%05d-%05d\n",
+           fwver->gen_major, fwver->gen_minor, fwver->ph_major,
+           fwver->ph_minor, fwver->cust_id, fwver->build_num);
+}
+static DEVICE_ATTR_RO(version_fw);
+
+/**
+ * fw_queue_depth_show - SysFS callback for firmware max cmds
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Return: sysfs_emit() return after copying firmware max commands
+ */
+static ssize_t
+fw_queue_depth_show(struct device *dev, struct device_attribute *attr,
+                       char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct mpi3mr_ioc *mrioc = shost_priv(shost);
+
+       return sysfs_emit(buf, "%d\n", mrioc->facts.max_reqs);
+}
+static DEVICE_ATTR_RO(fw_queue_depth);
+
+/**
+ * op_req_q_count_show - SysFS callback for request queue count
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Return: sysfs_emit() return after copying request queue count
+ */
+static ssize_t
+op_req_q_count_show(struct device *dev, struct device_attribute *attr,
+                       char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct mpi3mr_ioc *mrioc = shost_priv(shost);
+
+       return sysfs_emit(buf, "%d\n", mrioc->num_op_req_q);
+}
+static DEVICE_ATTR_RO(op_req_q_count);
+
+/**
+ * reply_queue_count_show - SysFS callback for reply queue count
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Return: sysfs_emit() return after copying reply queue count
+ */
+static ssize_t
+reply_queue_count_show(struct device *dev, struct device_attribute *attr,
+                       char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct mpi3mr_ioc *mrioc = shost_priv(shost);
+
+       return sysfs_emit(buf, "%d\n", mrioc->num_op_reply_q);
+}
+
+static DEVICE_ATTR_RO(reply_queue_count);
+
+/**
+ * logging_level_show - Show controller debug level
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * A sysfs 'read/write' shost attribute, to show the current
+ * debug log level used by the driver for the specific
+ * controller.
+ *
+ * Return: sysfs_emit() return
+ */
+static ssize_t
+logging_level_show(struct device *dev,
+       struct device_attribute *attr, char *buf)
+
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct mpi3mr_ioc *mrioc = shost_priv(shost);
+
+       return sysfs_emit(buf, "%08xh\n", mrioc->logging_level);
+}
+
+/**
+ * logging_level_store- Change controller debug level
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ * @count: size of the buffer
+ *
+ * A sysfs 'read/write' shost attribute, to change the current
+ * debug log level used by the driver for the specific
+ * controller.
+ *
+ * Return: strlen() return
+ */
+static ssize_t
+logging_level_store(struct device *dev,
+       struct device_attribute *attr,
+       const char *buf, size_t count)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct mpi3mr_ioc *mrioc = shost_priv(shost);
+       int val = 0;
+
+       if (kstrtoint(buf, 0, &val) != 0)
+               return -EINVAL;
+
+       mrioc->logging_level = val;
+       ioc_info(mrioc, "logging_level=%08xh\n", mrioc->logging_level);
+       return strlen(buf);
+}
+static DEVICE_ATTR_RW(logging_level);
+
+/**
+ * adapter_state_show - SysFS callback for adapter state show
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Return: sysfs_emit() return after copying adapter state
+ */
+static ssize_t
+adp_state_show(struct device *dev, struct device_attribute *attr,
+       char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct mpi3mr_ioc *mrioc = shost_priv(shost);
+       enum mpi3mr_iocstate ioc_state;
+       uint8_t adp_state;
+
+       ioc_state = mpi3mr_get_iocstate(mrioc);
+       if (ioc_state == MRIOC_STATE_UNRECOVERABLE)
+               adp_state = MPI3MR_BSG_ADPSTATE_UNRECOVERABLE;
+       else if ((mrioc->reset_in_progress) || (mrioc->stop_bsgs))
+               adp_state = MPI3MR_BSG_ADPSTATE_IN_RESET;
+       else if (ioc_state == MRIOC_STATE_FAULT)
+               adp_state = MPI3MR_BSG_ADPSTATE_FAULT;
+       else
+               adp_state = MPI3MR_BSG_ADPSTATE_OPERATIONAL;
+
+       return sysfs_emit(buf, "%u\n", adp_state);
+}
+
+static DEVICE_ATTR_RO(adp_state);
+
+static struct attribute *mpi3mr_host_attrs[] = {
+       &dev_attr_version_fw.attr,
+       &dev_attr_fw_queue_depth.attr,
+       &dev_attr_op_req_q_count.attr,
+       &dev_attr_reply_queue_count.attr,
+       &dev_attr_logging_level.attr,
+       &dev_attr_adp_state.attr,
+       NULL,
+};
+
+static const struct attribute_group mpi3mr_host_attr_group = {
+       .attrs = mpi3mr_host_attrs
+};
+
+const struct attribute_group *mpi3mr_host_groups[] = {
+       &mpi3mr_host_attr_group,
+       NULL,
+};
+
+
+/*
+ * SCSI Device attributes under sysfs
+ */
+
+/**
+ * sas_address_show - SysFS callback for dev SASaddress display
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Return: sysfs_emit() return after copying SAS address of the
+ * specific SAS/SATA end device.
+ */
+static ssize_t
+sas_address_show(struct device *dev, struct device_attribute *attr,
+                       char *buf)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct mpi3mr_sdev_priv_data *sdev_priv_data;
+       struct mpi3mr_stgt_priv_data *tgt_priv_data;
+       struct mpi3mr_tgt_dev *tgtdev;
+
+       sdev_priv_data = sdev->hostdata;
+       if (!sdev_priv_data)
+               return 0;
+
+       tgt_priv_data = sdev_priv_data->tgt_priv_data;
+       if (!tgt_priv_data)
+               return 0;
+       tgtdev = tgt_priv_data->tgt_dev;
+       if (!tgtdev || tgtdev->dev_type != MPI3_DEVICE_DEVFORM_SAS_SATA)
+               return 0;
+       return sysfs_emit(buf, "0x%016llx\n",
+           (unsigned long long)tgtdev->dev_spec.sas_sata_inf.sas_address);
+}
+
+static DEVICE_ATTR_RO(sas_address);
+
+/**
+ * device_handle_show - SysFS callback for device handle display
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Return: sysfs_emit() return after copying firmware internal
+ * device handle of the specific device.
+ */
+static ssize_t
+device_handle_show(struct device *dev, struct device_attribute *attr,
+                       char *buf)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct mpi3mr_sdev_priv_data *sdev_priv_data;
+       struct mpi3mr_stgt_priv_data *tgt_priv_data;
+       struct mpi3mr_tgt_dev *tgtdev;
+
+       sdev_priv_data = sdev->hostdata;
+       if (!sdev_priv_data)
+               return 0;
+
+       tgt_priv_data = sdev_priv_data->tgt_priv_data;
+       if (!tgt_priv_data)
+               return 0;
+       tgtdev = tgt_priv_data->tgt_dev;
+       if (!tgtdev)
+               return 0;
+       return sysfs_emit(buf, "0x%04x\n", tgtdev->dev_handle);
+}
+
+static DEVICE_ATTR_RO(device_handle);
+
+/**
+ * persistent_id_show - SysFS callback for persisten ID display
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Return: sysfs_emit() return after copying persistent ID of the
+ * of the specific device.
+ */
+static ssize_t
+persistent_id_show(struct device *dev, struct device_attribute *attr,
+                       char *buf)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct mpi3mr_sdev_priv_data *sdev_priv_data;
+       struct mpi3mr_stgt_priv_data *tgt_priv_data;
+       struct mpi3mr_tgt_dev *tgtdev;
+
+       sdev_priv_data = sdev->hostdata;
+       if (!sdev_priv_data)
+               return 0;
+
+       tgt_priv_data = sdev_priv_data->tgt_priv_data;
+       if (!tgt_priv_data)
+               return 0;
+       tgtdev = tgt_priv_data->tgt_dev;
+       if (!tgtdev)
+               return 0;
+       return sysfs_emit(buf, "%d\n", tgtdev->perst_id);
+}
+static DEVICE_ATTR_RO(persistent_id);
+
+static struct attribute *mpi3mr_dev_attrs[] = {
+       &dev_attr_sas_address.attr,
+       &dev_attr_device_handle.attr,
+       &dev_attr_persistent_id.attr,
+       NULL,
+};
+
+static const struct attribute_group mpi3mr_dev_attr_group = {
+       .attrs = mpi3mr_dev_attrs
+};
+
+const struct attribute_group *mpi3mr_dev_groups[] = {
+       &mpi3mr_dev_attr_group,
+       NULL,
+};
index c798244..2464c40 100644 (file)
@@ -23,8 +23,8 @@
 #define MPI3_DEBUG_RESET               0x00000020
 #define MPI3_DEBUG_SCSI_ERROR          0x00000040
 #define MPI3_DEBUG_REPLY               0x00000080
-#define MPI3_DEBUG_IOCTL_ERROR         0x00008000
-#define MPI3_DEBUG_IOCTL_INFO          0x00010000
+#define MPI3_DEBUG_BSG_ERROR           0x00008000
+#define MPI3_DEBUG_BSG_INFO            0x00010000
 #define MPI3_DEBUG_SCSI_INFO           0x00020000
 #define MPI3_DEBUG                     0x01000000
 #define MPI3_DEBUG_SG                  0x02000000
        } while (0)
 
 
-#define dprint_ioctl_info(ioc, fmt, ...) \
+#define dprint_bsg_info(ioc, fmt, ...) \
        do { \
-               if (ioc->logging_level & MPI3_DEBUG_IOCTL_INFO) \
+               if (ioc->logging_level & MPI3_DEBUG_BSG_INFO) \
                        pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
        } while (0)
 
-#define dprint_ioctl_err(ioc, fmt, ...) \
+#define dprint_bsg_err(ioc, fmt, ...) \
        do { \
-               if (ioc->logging_level & MPI3_DEBUG_IOCTL_ERROR) \
+               if (ioc->logging_level & MPI3_DEBUG_BSG_ERROR) \
                        pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
        } while (0)
 
 #endif /* MPT3SAS_DEBUG_H_INCLUDED */
 
 /**
+ * dprint_dump - print contents of a memory buffer
+ * @req: Pointer to a memory buffer
+ * @sz: Memory buffer size
+ * @namestr: Name String to identify the buffer type
+ */
+static inline void
+dprint_dump(void *req, int sz, const char *name_string)
+{
+       int i;
+       __le32 *mfp = (__le32 *)req;
+
+       sz = sz/4;
+       if (name_string)
+               pr_info("%s:\n\t", name_string);
+       else
+               pr_info("request:\n\t");
+       for (i = 0; i < sz; i++) {
+               if (i && ((i % 8) == 0))
+                       pr_info("\n\t");
+               pr_info("%08x ", le32_to_cpu(mfp[i]));
+       }
+       pr_info("\n");
+}
+
+/**
  * dprint_dump_req - print message frame contents
  * @req: pointer to message frame
  * @sz: number of dwords
index e25c024..f1d4ea8 100644 (file)
@@ -15,6 +15,8 @@ mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u32 reset_reason);
 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc);
 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
        struct mpi3_ioc_facts_data *facts_data);
+static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
+       struct mpi3mr_drv_cmd *drv_cmd);
 
 static int poll_queues;
 module_param(poll_queues, int, 0444);
@@ -297,8 +299,14 @@ mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag,
        switch (host_tag) {
        case MPI3MR_HOSTTAG_INITCMDS:
                return &mrioc->init_cmds;
+       case MPI3MR_HOSTTAG_BSG_CMDS:
+               return &mrioc->bsg_cmds;
        case MPI3MR_HOSTTAG_BLK_TMS:
                return &mrioc->host_tm_cmds;
+       case MPI3MR_HOSTTAG_PEL_ABORT:
+               return &mrioc->pel_abort_cmd;
+       case MPI3MR_HOSTTAG_PEL_WAIT:
+               return &mrioc->pel_cmds;
        case MPI3MR_HOSTTAG_INVALID:
                if (def_reply && def_reply->function ==
                    MPI3_FUNCTION_EVENT_NOTIFICATION)
@@ -865,10 +873,10 @@ static const struct {
 } mpi3mr_reset_reason_codes[] = {
        { MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" },
        { MPI3MR_RESET_FROM_FAULT_WATCH, "fault" },
-       { MPI3MR_RESET_FROM_IOCTL, "application invocation" },
+       { MPI3MR_RESET_FROM_APP, "application invocation" },
        { MPI3MR_RESET_FROM_EH_HOS, "error handling" },
        { MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" },
-       { MPI3MR_RESET_FROM_IOCTL_TIMEOUT, "IOCTL timeout" },
+       { MPI3MR_RESET_FROM_APP_TIMEOUT, "application command timeout" },
        { MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" },
        { MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" },
        { MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" },
@@ -2813,6 +2821,10 @@ static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
        if (!mrioc->init_cmds.reply)
                goto out_failed;
 
+       mrioc->bsg_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
+       if (!mrioc->bsg_cmds.reply)
+               goto out_failed;
+
        for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
                mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->reply_sz,
                    GFP_KERNEL);
@@ -2831,6 +2843,14 @@ static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
        if (!mrioc->host_tm_cmds.reply)
                goto out_failed;
 
+       mrioc->pel_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
+       if (!mrioc->pel_cmds.reply)
+               goto out_failed;
+
+       mrioc->pel_abort_cmd.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
+       if (!mrioc->pel_abort_cmd.reply)
+               goto out_failed;
+
        mrioc->dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8;
        if (mrioc->facts.max_devhandle % 8)
                mrioc->dev_handle_bitmap_sz++;
@@ -3728,6 +3748,18 @@ retry_init:
                goto out_failed;
        }
 
+       if (!mrioc->pel_seqnum_virt) {
+               dprint_init(mrioc, "allocating memory for pel_seqnum_virt\n");
+               mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
+               mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
+                   mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
+                   GFP_KERNEL);
+               if (!mrioc->pel_seqnum_virt) {
+                       retval = -ENOMEM;
+                       goto out_failed_noretry;
+               }
+       }
+
        retval = mpi3mr_enable_events(mrioc);
        if (retval) {
                ioc_err(mrioc, "failed to enable events %d\n",
@@ -3837,6 +3869,18 @@ retry_init:
                goto out_failed;
        }
 
+       if (!mrioc->pel_seqnum_virt) {
+               dprint_reset(mrioc, "allocating memory for pel_seqnum_virt\n");
+               mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
+               mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
+                   mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
+                   GFP_KERNEL);
+               if (!mrioc->pel_seqnum_virt) {
+                       retval = -ENOMEM;
+                       goto out_failed_noretry;
+               }
+       }
+
        if (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q) {
                ioc_err(mrioc,
                    "cannot create minimum number of operational queues expected:%d created:%d\n",
@@ -3948,8 +3992,14 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
 
        if (mrioc->init_cmds.reply) {
                memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
+               memset(mrioc->bsg_cmds.reply, 0,
+                   sizeof(*mrioc->bsg_cmds.reply));
                memset(mrioc->host_tm_cmds.reply, 0,
                    sizeof(*mrioc->host_tm_cmds.reply));
+               memset(mrioc->pel_cmds.reply, 0,
+                   sizeof(*mrioc->pel_cmds.reply));
+               memset(mrioc->pel_abort_cmd.reply, 0,
+                   sizeof(*mrioc->pel_abort_cmd.reply));
                for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
                        memset(mrioc->dev_rmhs_cmds[i].reply, 0,
                            sizeof(*mrioc->dev_rmhs_cmds[i].reply));
@@ -4050,9 +4100,18 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
        kfree(mrioc->init_cmds.reply);
        mrioc->init_cmds.reply = NULL;
 
+       kfree(mrioc->bsg_cmds.reply);
+       mrioc->bsg_cmds.reply = NULL;
+
        kfree(mrioc->host_tm_cmds.reply);
        mrioc->host_tm_cmds.reply = NULL;
 
+       kfree(mrioc->pel_cmds.reply);
+       mrioc->pel_cmds.reply = NULL;
+
+       kfree(mrioc->pel_abort_cmd.reply);
+       mrioc->pel_abort_cmd.reply = NULL;
+
        for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
                kfree(mrioc->evtack_cmds[i].reply);
                mrioc->evtack_cmds[i].reply = NULL;
@@ -4101,6 +4160,16 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
                    mrioc->admin_req_base, mrioc->admin_req_dma);
                mrioc->admin_req_base = NULL;
        }
+
+       if (mrioc->pel_seqnum_virt) {
+               dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz,
+                   mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma);
+               mrioc->pel_seqnum_virt = NULL;
+       }
+
+       kfree(mrioc->logdata_buf);
+       mrioc->logdata_buf = NULL;
+
 }
 
 /**
@@ -4235,6 +4304,8 @@ static void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
 
        cmdptr = &mrioc->init_cmds;
        mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+       cmdptr = &mrioc->bsg_cmds;
+       mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
        cmdptr = &mrioc->host_tm_cmds;
        mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
 
@@ -4247,6 +4318,254 @@ static void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
                cmdptr = &mrioc->evtack_cmds[i];
                mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
        }
+
+       cmdptr = &mrioc->pel_cmds;
+       mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+
+       cmdptr = &mrioc->pel_abort_cmd;
+       mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+
+}
+
+/**
+ * mpi3mr_pel_wait_post - Issue PEL Wait
+ * @mrioc: Adapter instance reference
+ * @drv_cmd: Internal command tracker
+ *
+ * Issue PEL Wait MPI request through admin queue and return.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_pel_wait_post(struct mpi3mr_ioc *mrioc,
+       struct mpi3mr_drv_cmd *drv_cmd)
+{
+       struct mpi3_pel_req_action_wait pel_wait;
+
+       mrioc->pel_abort_requested = false;
+
+       memset(&pel_wait, 0, sizeof(pel_wait));
+       drv_cmd->state = MPI3MR_CMD_PENDING;
+       drv_cmd->is_waiting = 0;
+       drv_cmd->callback = mpi3mr_pel_wait_complete;
+       drv_cmd->ioc_status = 0;
+       drv_cmd->ioc_loginfo = 0;
+       pel_wait.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
+       pel_wait.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
+       pel_wait.action = MPI3_PEL_ACTION_WAIT;
+       pel_wait.starting_sequence_number = cpu_to_le32(mrioc->pel_newest_seqnum);
+       pel_wait.locale = cpu_to_le16(mrioc->pel_locale);
+       pel_wait.class = cpu_to_le16(mrioc->pel_class);
+       pel_wait.wait_time = MPI3_PEL_WAITTIME_INFINITE_WAIT;
+       dprint_bsg_info(mrioc, "sending pel_wait seqnum(%d), class(%d), locale(0x%08x)\n",
+           mrioc->pel_newest_seqnum, mrioc->pel_class, mrioc->pel_locale);
+
+       if (mpi3mr_admin_request_post(mrioc, &pel_wait, sizeof(pel_wait), 0)) {
+               dprint_bsg_err(mrioc,
+                           "Issuing PELWait: Admin post failed\n");
+               drv_cmd->state = MPI3MR_CMD_NOTUSED;
+               drv_cmd->callback = NULL;
+               drv_cmd->retry_count = 0;
+               mrioc->pel_enabled = false;
+       }
+}
+
+/**
+ * mpi3mr_pel_get_seqnum_post - Issue PEL Get Sequence number
+ * @mrioc: Adapter instance reference
+ * @drv_cmd: Internal command tracker
+ *
+ * Issue PEL get sequence number MPI request through admin queue
+ * and return.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc *mrioc,
+       struct mpi3mr_drv_cmd *drv_cmd)
+{
+       struct mpi3_pel_req_action_get_sequence_numbers pel_getseq_req;
+       u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+       int retval = 0;
+
+       memset(&pel_getseq_req, 0, sizeof(pel_getseq_req));
+       mrioc->pel_cmds.state = MPI3MR_CMD_PENDING;
+       mrioc->pel_cmds.is_waiting = 0;
+       mrioc->pel_cmds.ioc_status = 0;
+       mrioc->pel_cmds.ioc_loginfo = 0;
+       mrioc->pel_cmds.callback = mpi3mr_pel_get_seqnum_complete;
+       pel_getseq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
+       pel_getseq_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
+       pel_getseq_req.action = MPI3_PEL_ACTION_GET_SEQNUM;
+       mpi3mr_add_sg_single(&pel_getseq_req.sgl, sgl_flags,
+           mrioc->pel_seqnum_sz, mrioc->pel_seqnum_dma);
+
+       retval = mpi3mr_admin_request_post(mrioc, &pel_getseq_req,
+                       sizeof(pel_getseq_req), 0);
+       if (retval) {
+               if (drv_cmd) {
+                       drv_cmd->state = MPI3MR_CMD_NOTUSED;
+                       drv_cmd->callback = NULL;
+                       drv_cmd->retry_count = 0;
+               }
+               mrioc->pel_enabled = false;
+       }
+
+       return retval;
+}
+
+/**
+ * mpi3mr_pel_wait_complete - PELWait Completion callback
+ * @mrioc: Adapter instance reference
+ * @drv_cmd: Internal command tracker
+ *
+ * This is a callback handler for the PELWait request and
+ * firmware completes a PELWait request when it is aborted or a
+ * new PEL entry is available. This sends AEN to the application
+ * and if the PELwait completion is not due to PELAbort then
+ * this will send a request for new PEL Sequence number
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
+       struct mpi3mr_drv_cmd *drv_cmd)
+{
+       struct mpi3_pel_reply *pel_reply = NULL;
+       u16 ioc_status, pe_log_status;
+       bool do_retry = false;
+
+       if (drv_cmd->state & MPI3MR_CMD_RESET)
+               goto cleanup_drv_cmd;
+
+       ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
+       if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+               ioc_err(mrioc, "%s: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+                       __func__, ioc_status, drv_cmd->ioc_loginfo);
+               dprint_bsg_err(mrioc,
+                   "pel_wait: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
+                   ioc_status, drv_cmd->ioc_loginfo);
+               do_retry = true;
+       }
+
+       if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
+               pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
+
+       if (!pel_reply) {
+               dprint_bsg_err(mrioc,
+                   "pel_wait: failed due to no reply\n");
+               goto out_failed;
+       }
+
+       pe_log_status = le16_to_cpu(pel_reply->pe_log_status);
+       if ((pe_log_status != MPI3_PEL_STATUS_SUCCESS) &&
+           (pe_log_status != MPI3_PEL_STATUS_ABORTED)) {
+               ioc_err(mrioc, "%s: Failed pe_log_status(0x%04x)\n",
+                       __func__, pe_log_status);
+               dprint_bsg_err(mrioc,
+                   "pel_wait: failed due to pel_log_status(0x%04x)\n",
+                   pe_log_status);
+               do_retry = true;
+       }
+
+       if (do_retry) {
+               if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
+                       drv_cmd->retry_count++;
+                       dprint_bsg_err(mrioc, "pel_wait: retrying(%d)\n",
+                           drv_cmd->retry_count);
+                       mpi3mr_pel_wait_post(mrioc, drv_cmd);
+                       return;
+               }
+               dprint_bsg_err(mrioc,
+                   "pel_wait: failed after all retries(%d)\n",
+                   drv_cmd->retry_count);
+               goto out_failed;
+       }
+       atomic64_inc(&event_counter);
+       if (!mrioc->pel_abort_requested) {
+               mrioc->pel_cmds.retry_count = 0;
+               mpi3mr_pel_get_seqnum_post(mrioc, &mrioc->pel_cmds);
+       }
+
+       return;
+out_failed:
+       mrioc->pel_enabled = false;
+cleanup_drv_cmd:
+       drv_cmd->state = MPI3MR_CMD_NOTUSED;
+       drv_cmd->callback = NULL;
+       drv_cmd->retry_count = 0;
+}
+
+/**
+ * mpi3mr_pel_get_seqnum_complete - PELGetSeqNum Completion callback
+ * @mrioc: Adapter instance reference
+ * @drv_cmd: Internal command tracker
+ *
+ * This is a callback handler for the PEL get sequence number
+ * request and a new PEL wait request will be issued to the
+ * firmware from this
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc *mrioc,
+       struct mpi3mr_drv_cmd *drv_cmd)
+{
+       struct mpi3_pel_reply *pel_reply = NULL;
+       struct mpi3_pel_seq *pel_seqnum_virt;
+       u16 ioc_status;
+       bool do_retry = false;
+
+       pel_seqnum_virt = (struct mpi3_pel_seq *)mrioc->pel_seqnum_virt;
+
+       if (drv_cmd->state & MPI3MR_CMD_RESET)
+               goto cleanup_drv_cmd;
+
+       ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
+       if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+               dprint_bsg_err(mrioc,
+                   "pel_get_seqnum: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
+                   ioc_status, drv_cmd->ioc_loginfo);
+               do_retry = true;
+       }
+
+       if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
+               pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
+       if (!pel_reply) {
+               dprint_bsg_err(mrioc,
+                   "pel_get_seqnum: failed due to no reply\n");
+               goto out_failed;
+       }
+
+       if (le16_to_cpu(pel_reply->pe_log_status) != MPI3_PEL_STATUS_SUCCESS) {
+               dprint_bsg_err(mrioc,
+                   "pel_get_seqnum: failed due to pel_log_status(0x%04x)\n",
+                   le16_to_cpu(pel_reply->pe_log_status));
+               do_retry = true;
+       }
+
+       if (do_retry) {
+               if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
+                       drv_cmd->retry_count++;
+                       dprint_bsg_err(mrioc,
+                           "pel_get_seqnum: retrying(%d)\n",
+                           drv_cmd->retry_count);
+                       mpi3mr_pel_get_seqnum_post(mrioc, drv_cmd);
+                       return;
+               }
+
+               dprint_bsg_err(mrioc,
+                   "pel_get_seqnum: failed after all retries(%d)\n",
+                   drv_cmd->retry_count);
+               goto out_failed;
+       }
+       mrioc->pel_newest_seqnum = le32_to_cpu(pel_seqnum_virt->newest) + 1;
+       drv_cmd->retry_count = 0;
+       mpi3mr_pel_wait_post(mrioc, drv_cmd);
+
+       return;
+out_failed:
+       mrioc->pel_enabled = false;
+cleanup_drv_cmd:
+       drv_cmd->state = MPI3MR_CMD_NOTUSED;
+       drv_cmd->callback = NULL;
+       drv_cmd->retry_count = 0;
 }
 
 /**
@@ -4258,7 +4577,7 @@ static void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
  * This is an handler for recovering controller by issuing soft
  * reset are diag fault reset.  This is a blocking function and
  * when one reset is executed if any other resets they will be
- * blocked. All IOCTLs/IO will be blocked during the reset. If
+ * blocked. All BSG requests will be blocked during the reset. If
  * controller reset is successful then the controller will be
  * reinitalized, otherwise the controller will be marked as not
  * recoverable
@@ -4305,6 +4624,7 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
            mpi3mr_reset_rc_name(reset_reason));
 
        mrioc->reset_in_progress = 1;
+       mrioc->stop_bsgs = 1;
        mrioc->prev_reset_result = -1;
 
        if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) &&
@@ -4369,6 +4689,12 @@ out:
        if (!retval) {
                mrioc->diagsave_timeout = 0;
                mrioc->reset_in_progress = 0;
+               mrioc->pel_abort_requested = 0;
+               if (mrioc->pel_enabled) {
+                       mrioc->pel_cmds.retry_count = 0;
+                       mpi3mr_pel_wait_post(mrioc, &mrioc->pel_cmds);
+               }
+
                mpi3mr_rfresh_tgtdevs(mrioc);
                mrioc->ts_update_counter = 0;
                spin_lock_irqsave(&mrioc->watchdog_lock, flags);
@@ -4377,6 +4703,9 @@ out:
                            &mrioc->watchdog_work,
                            msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
                spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
+               mrioc->stop_bsgs = 0;
+               if (mrioc->pel_enabled)
+                       atomic64_inc(&event_counter);
        } else {
                mpi3mr_issue_reset(mrioc,
                    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
index f7cd70a..d8c195b 100644 (file)
@@ -14,6 +14,7 @@ LIST_HEAD(mrioc_list);
 DEFINE_SPINLOCK(mrioc_list_lock);
 static int mrioc_ids;
 static int warn_non_secure_ctlr;
+atomic64_t event_counter;
 
 MODULE_AUTHOR(MPI3MR_DRIVER_AUTHOR);
 MODULE_DESCRIPTION(MPI3MR_DRIVER_DESC);
@@ -634,7 +635,7 @@ found_tgtdev:
  *
  * Return: Target device reference.
  */
-static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle(
+struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle(
        struct mpi3mr_ioc *mrioc, u16 handle)
 {
        struct mpi3mr_tgt_dev *tgtdev;
@@ -910,9 +911,11 @@ void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
 
        list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
            list) {
-               if ((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) &&
-                   tgtdev->host_exposed) {
-                       mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
+               if (tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
+                       dprint_reset(mrioc, "removing target device with perst_id(%d)\n",
+                           tgtdev->perst_id);
+                       if (tgtdev->host_exposed)
+                               mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
                        mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
                        mpi3mr_tgtdev_put(tgtdev);
                }
@@ -1416,6 +1419,23 @@ static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc,
 }
 
 /**
+ * mpi3mr_logdata_evt_bh -  Log data event bottomhalf
+ * @mrioc: Adapter instance reference
+ * @fwevt: Firmware event reference
+ *
+ * Extracts the event data and calls application interfacing
+ * function to process the event further.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_logdata_evt_bh(struct mpi3mr_ioc *mrioc,
+       struct mpi3mr_fwevt *fwevt)
+{
+       mpi3mr_app_save_logdata(mrioc, fwevt->event_data,
+           fwevt->event_data_size);
+}
+
+/**
  * mpi3mr_fwevt_bh - Firmware event bottomhalf handler
  * @mrioc: Adapter instance reference
  * @fwevt: Firmware event reference
@@ -1467,6 +1487,11 @@ static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
                mpi3mr_pcietopochg_evt_bh(mrioc, fwevt);
                break;
        }
+       case MPI3_EVENT_LOG_DATA:
+       {
+               mpi3mr_logdata_evt_bh(mrioc, fwevt);
+               break;
+       }
        default:
                break;
        }
@@ -2298,6 +2323,7 @@ void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
                break;
        }
        case MPI3_EVENT_DEVICE_INFO_CHANGED:
+       case MPI3_EVENT_LOG_DATA:
        {
                process_evt_bh = 1;
                break;
@@ -2996,7 +3022,7 @@ inline void mpi3mr_poll_pend_io_completions(struct mpi3mr_ioc *mrioc)
  *
  * Return: 0 on success, non-zero on errors
  */
-static int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
+int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
        u16 handle, uint lun, u16 htag, ulong timeout,
        struct mpi3mr_drv_cmd *drv_cmd,
        u8 *resp_code, struct scsi_cmnd *scmd)
@@ -3589,6 +3615,7 @@ static int mpi3mr_scan_finished(struct Scsi_Host *shost,
 
        mpi3mr_start_watchdog(mrioc);
        mrioc->is_driver_loading = 0;
+       mrioc->stop_bsgs = 0;
        return 1;
 }
 
@@ -3700,6 +3727,10 @@ static int mpi3mr_slave_configure(struct scsi_device *sdev)
                return -ENXIO;
 
        mpi3mr_change_queue_depth(sdev, tgt_dev->q_depth);
+
+       sdev->eh_timeout = MPI3MR_EH_SCMD_TIMEOUT;
+       blk_queue_rq_timeout(sdev->request_queue, MPI3MR_SCMD_TIMEOUT);
+
        switch (tgt_dev->dev_type) {
        case MPI3_DEVICE_DEVFORM_PCIE:
                /*The block layer hw sector size = 512*/
@@ -3971,6 +4002,12 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost,
        int iprio_class;
        u8 is_pcie_dev = 0;
 
+       if (mrioc->unrecoverable) {
+               scmd->result = DID_ERROR << 16;
+               scsi_done(scmd);
+               goto out;
+       }
+
        sdev_priv_data = scmd->device->hostdata;
        if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
                scmd->result = DID_NO_CONNECT << 16;
@@ -4109,6 +4146,8 @@ static struct scsi_host_template mpi3mr_driver_template = {
        .max_segment_size               = 0xffffffff,
        .track_queue_depth              = 1,
        .cmd_size                       = sizeof(struct scmd_priv),
+       .shost_groups                   = mpi3mr_host_groups,
+       .sdev_groups                    = mpi3mr_dev_groups,
 };
 
 /**
@@ -4259,6 +4298,7 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        mutex_init(&mrioc->reset_mutex);
        mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS);
        mpi3mr_init_drv_cmd(&mrioc->host_tm_cmds, MPI3MR_HOSTTAG_BLK_TMS);
+       mpi3mr_init_drv_cmd(&mrioc->bsg_cmds, MPI3MR_HOSTTAG_BSG_CMDS);
 
        for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
                mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i],
@@ -4271,6 +4311,7 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        mrioc->logging_level = logging_level;
        mrioc->shost = shost;
        mrioc->pdev = pdev;
+       mrioc->stop_bsgs = 1;
 
        /* init shost parameters */
        shost->max_cmd_len = MPI3MR_MAX_CDB_LENGTH;
@@ -4345,6 +4386,7 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        }
 
        scsi_scan_host(shost);
+       mpi3mr_bsg_init(mrioc);
        return retval;
 
 addhost_failed:
@@ -4389,6 +4431,7 @@ static void mpi3mr_remove(struct pci_dev *pdev)
        while (mrioc->reset_in_progress || mrioc->is_driver_loading)
                ssleep(1);
 
+       mpi3mr_bsg_exit(mrioc);
        mrioc->stop_drv_processing = 1;
        mpi3mr_cleanup_fwevt_list(mrioc);
        spin_lock_irqsave(&mrioc->fwevt_lock, flags);
@@ -4563,6 +4606,12 @@ static struct pci_driver mpi3mr_pci_driver = {
 #endif
 };
 
+static ssize_t event_counter_show(struct device_driver *dd, char *buf)
+{
+       return sprintf(buf, "%llu\n", atomic64_read(&event_counter));
+}
+static DRIVER_ATTR_RO(event_counter);
+
 static int __init mpi3mr_init(void)
 {
        int ret_val;
@@ -4571,6 +4620,16 @@ static int __init mpi3mr_init(void)
            MPI3MR_DRIVER_VERSION);
 
        ret_val = pci_register_driver(&mpi3mr_pci_driver);
+       if (ret_val) {
+               pr_err("%s failed to load due to pci register driver failure\n",
+                   MPI3MR_DRIVER_NAME);
+               return ret_val;
+       }
+
+       ret_val = driver_create_file(&mpi3mr_pci_driver.driver,
+                                    &driver_attr_event_counter);
+       if (ret_val)
+               pci_unregister_driver(&mpi3mr_pci_driver);
 
        return ret_val;
 }
@@ -4585,6 +4644,8 @@ static void __exit mpi3mr_exit(void)
                pr_info("Unloading %s version %s\n", MPI3MR_DRIVER_NAME,
                    MPI3MR_DRIVER_VERSION);
 
+       driver_remove_file(&mpi3mr_pci_driver.driver,
+                          &driver_attr_event_counter);
        pci_unregister_driver(&mpi3mr_pci_driver);
 }
 
index 538d2c0..37d46ae 100644 (file)
@@ -3692,10 +3692,11 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
                }
 
                for (i = 0; i < ioc->combined_reply_index_count; i++) {
-                       ioc->replyPostRegisterIndex[i] = (resource_size_t *)
-                            ((u8 __force *)&ioc->chip->Doorbell +
-                            MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
-                            (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
+                       ioc->replyPostRegisterIndex[i] =
+                               (resource_size_t __iomem *)
+                               ((u8 __force *)&ioc->chip->Doorbell +
+                                MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
+                                (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
                }
        }
 
@@ -4312,7 +4313,7 @@ _base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
        descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
        descriptor.SMID = cpu_to_le16(smid);
 
-       writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+       writel(*request, &ioc->chip->AtomicRequestDescriptorPost);
 }
 
 /**
@@ -4334,7 +4335,7 @@ _base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
        descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
        descriptor.SMID = cpu_to_le16(smid);
 
-       writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+       writel(*request, &ioc->chip->AtomicRequestDescriptorPost);
 }
 
 /**
@@ -4357,7 +4358,7 @@ _base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
        descriptor.MSIxIndex = msix_task;
        descriptor.SMID = cpu_to_le16(smid);
 
-       writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+       writel(*request, &ioc->chip->AtomicRequestDescriptorPost);
 }
 
 /**
@@ -4378,7 +4379,7 @@ _base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
        descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
        descriptor.SMID = cpu_to_le16(smid);
 
-       writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+       writel(*request, &ioc->chip->AtomicRequestDescriptorPost);
 }
 
 /**
@@ -4752,7 +4753,7 @@ static void
 _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
 {
        int i = 0;
-       char desc[16];
+       char desc[17] = {0};
        u32 iounit_pg1_flags;
        u32 bios_version;
 
@@ -6893,7 +6894,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
 
        /* send message 32-bits at a time */
        for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
-               writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
+               writel(request[i], &ioc->chip->Doorbell);
                if ((_base_wait_for_doorbell_ack(ioc, 5)))
                        failed = 1;
        }
@@ -6912,16 +6913,16 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
        }
 
        /* read the first two 16-bits, it gives the total length of the reply */
-       reply[0] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
-           & MPI2_DOORBELL_DATA_MASK);
+       reply[0] = ioc->base_readl(&ioc->chip->Doorbell)
+               & MPI2_DOORBELL_DATA_MASK;
        writel(0, &ioc->chip->HostInterruptStatus);
        if ((_base_wait_for_doorbell_int(ioc, 5))) {
                ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
                        __LINE__);
                return -EFAULT;
        }
-       reply[1] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
-           & MPI2_DOORBELL_DATA_MASK);
+       reply[1] = ioc->base_readl(&ioc->chip->Doorbell)
+               & MPI2_DOORBELL_DATA_MASK;
        writel(0, &ioc->chip->HostInterruptStatus);
 
        for (i = 2; i < default_reply->MsgLength * 2; i++)  {
@@ -6933,9 +6934,8 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
                if (i >=  reply_bytes/2) /* overflow case */
                        ioc->base_readl(&ioc->chip->Doorbell);
                else
-                       reply[i] = le16_to_cpu(
-                           ioc->base_readl(&ioc->chip->Doorbell)
-                           & MPI2_DOORBELL_DATA_MASK);
+                       reply[i] = ioc->base_readl(&ioc->chip->Doorbell)
+                               & MPI2_DOORBELL_DATA_MASK;
                writel(0, &ioc->chip->HostInterruptStatus);
        }
 
index 949e98d..e584cf0 100644 (file)
@@ -77,8 +77,8 @@
 #define MPT3SAS_DRIVER_NAME            "mpt3sas"
 #define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
 #define MPT3SAS_DESCRIPTION    "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION         "40.100.00.00"
-#define MPT3SAS_MAJOR_VERSION          40
+#define MPT3SAS_DRIVER_VERSION         "42.100.00.00"
+#define MPT3SAS_MAJOR_VERSION          42
 #define MPT3SAS_MINOR_VERSION          100
 #define MPT3SAS_BUILD_VERSION          0
 #define MPT3SAS_RELEASE_VERSION        00
@@ -1588,7 +1588,7 @@ struct MPT3SAS_ADAPTER {
        u8              combined_reply_index_count;
        u8              smp_affinity_enable;
        /* reply post register index */
-       resource_size_t **replyPostRegisterIndex;
+       resource_size_t __iomem **replyPostRegisterIndex;
 
        struct list_head delayed_tr_list;
        struct list_head delayed_tr_volume_list;
index d92ca14..84c87c2 100644 (file)
@@ -578,7 +578,7 @@ static int
 _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
        Mpi2SCSITaskManagementRequest_t *tm_request)
 {
-       u8 found = 0;
+       bool found = false;
        u16 smid;
        u16 handle;
        struct scsi_cmnd *scmd;
@@ -600,6 +600,7 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
        handle = le16_to_cpu(tm_request->DevHandle);
        for (smid = ioc->scsiio_depth; smid && !found; smid--) {
                struct scsiio_tracker *st;
+               __le16 task_mid;
 
                scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
                if (!scmd)
@@ -618,10 +619,10 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
                 * first outstanding smid will be picked up.  Otherwise,
                 * targeted smid will be the one.
                 */
-               if (!tm_request->TaskMID || tm_request->TaskMID == st->smid) {
-                       tm_request->TaskMID = cpu_to_le16(st->smid);
-                       found = 1;
-               }
+               task_mid = cpu_to_le16(st->smid);
+               if (!tm_request->TaskMID)
+                       tm_request->TaskMID = task_mid;
+               found = tm_request->TaskMID == task_mid;
        }
 
        if (!found) {
index 7e476f5..b519f4b 100644 (file)
@@ -10926,20 +10926,20 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
        case MPI2_EVENT_LOG_ENTRY_ADDED:
        {
                Mpi2EventDataLogEntryAdded_t *log_entry;
-               u32 *log_code;
+               u32 log_code;
 
                if (!ioc->is_warpdrive)
                        break;
 
                log_entry = (Mpi2EventDataLogEntryAdded_t *)
                    mpi_reply->EventData;
-               log_code = (u32 *)log_entry->LogData;
+               log_code = le32_to_cpu(*(__le32 *)log_entry->LogData);
 
                if (le16_to_cpu(log_entry->LogEntryQualifier)
                    != MPT2_WARPDRIVE_LOGENTRY)
                        break;
 
-               switch (le32_to_cpu(*log_code)) {
+               switch (log_code) {
                case MPT2_WARPDRIVE_LC_SSDT:
                        ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
                        break;
@@ -12588,20 +12588,18 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev)
  */
 bool scsih_ncq_prio_supp(struct scsi_device *sdev)
 {
-       unsigned char *buf;
+       struct scsi_vpd *vpd;
        bool ncq_prio_supp = false;
 
-       if (!scsi_device_supports_vpd(sdev))
-               return ncq_prio_supp;
-
-       buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL);
-       if (!buf)
-               return ncq_prio_supp;
+       rcu_read_lock();
+       vpd = rcu_dereference(sdev->vpd_pg89);
+       if (!vpd || vpd->len < 214)
+               goto out;
 
-       if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN))
-               ncq_prio_supp = (buf[213] >> 4) & 1;
+       ncq_prio_supp = (vpd->data[213] >> 4) & 1;
+out:
+       rcu_read_unlock();
 
-       kfree(buf);
        return ncq_prio_supp;
 }
 /*
index fd674ed..3d5cd33 100644 (file)
@@ -4590,7 +4590,7 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
                        mapped_pci_addr + chip_cfg->ioa_host_mask_clr;
                pint_regs->global_interrupt_mask_reg =
                        mapped_pci_addr + chip_cfg->global_intr_mask;
-       };
+       }
 
        pinstance->ioa_reset_attempts = 0;
        init_waitqueue_head(&pinstance->reset_wait_q);
index fdc66d2..8d8c760 100644 (file)
@@ -131,7 +131,6 @@ qedf_sysfs_write_grcdump(struct file *filep, struct kobject *kobj,
        struct qedf_ctx *qedf = NULL;
        long reading;
        int ret = 0;
-       char msg[40];
 
        if (off != 0)
                return ret;
@@ -148,7 +147,6 @@ qedf_sysfs_write_grcdump(struct file *filep, struct kobject *kobj,
                return ret;
        }
 
-       memset(msg, 0, sizeof(msg));
        switch (reading) {
        case 0:
                memset(qedf->grcdump, 0, qedf->grcdump_size);
index 2ec1f71..e57cc22 100644 (file)
@@ -804,7 +804,6 @@ static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
        struct qedf_io_log *io_log;
        struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
        unsigned long flags;
-       uint8_t op;
 
        spin_lock_irqsave(&qedf->io_trace_lock, flags);
 
@@ -813,7 +812,7 @@ static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
        io_log->task_id = io_req->xid;
        io_log->port_id = fcport->rdata->ids.port_id;
        io_log->lun = sc_cmd->device->lun;
-       io_log->op = op = sc_cmd->cmnd[0];
+       io_log->op = sc_cmd->cmnd[0];
        io_log->lba[0] = sc_cmd->cmnd[2];
        io_log->lba[1] = sc_cmd->cmnd[3];
        io_log->lba[2] = sc_cmd->cmnd[4];
index 18dc68d..3d6b137 100644 (file)
@@ -873,7 +873,7 @@ static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd)
 
 bool qedf_wait_for_upload(struct qedf_ctx *qedf)
 {
-       struct qedf_rport *fcport = NULL;
+       struct qedf_rport *fcport;
        int wait_cnt = 120;
 
        while (wait_cnt--) {
@@ -888,7 +888,7 @@ bool qedf_wait_for_upload(struct qedf_ctx *qedf)
 
        rcu_read_lock();
        list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
-               if (fcport && test_bit(QEDF_RPORT_SESSION_READY,
+               if (test_bit(QEDF_RPORT_SESSION_READY,
                                       &fcport->flags)) {
                        if (fcport->rdata)
                                QEDF_ERR(&qedf->dbg_ctx,
@@ -899,9 +899,9 @@ bool qedf_wait_for_upload(struct qedf_ctx *qedf)
                                         "Waiting for fcport %p.\n", fcport);
                        }
        }
+
        rcu_read_unlock();
        return false;
-
 }
 
 /* Performs soft reset of qedf_ctx by simulating a link down/up */
@@ -1067,7 +1067,6 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
        u32                     crc;
        unsigned int            hlen, tlen, elen;
        int                     wlen;
-       struct fc_stats         *stats;
        struct fc_lport *tmp_lport;
        struct fc_lport *vn_port = NULL;
        struct qedf_rport *fcport;
@@ -1215,10 +1214,8 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
        hp->fcoe_sof = sof;
 
        /*update tx stats */
-       stats = per_cpu_ptr(lport->stats, get_cpu());
-       stats->TxFrames++;
-       stats->TxWords += wlen;
-       put_cpu();
+       this_cpu_inc(lport->stats->TxFrames);
+       this_cpu_add(lport->stats->TxWords, wlen);
 
        /* Get VLAN ID from skb for printing purposes */
        __vlan_hwaccel_get_tag(skb, &vlan_tci);
index 0628633..cb8145a 100644 (file)
@@ -657,7 +657,6 @@ qla_edif_app_chk_sa_update(scsi_qla_host_t *vha, fc_port_t *fcport,
 static int
 qla_edif_app_authok(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
 {
-       int32_t                 rval = 0;
        struct auth_complete_cmd appplogiok;
        struct app_plogi_reply  appplogireply = {0};
        struct fc_bsg_reply     *bsg_reply = bsg_job->reply;
@@ -758,7 +757,7 @@ errstate_exit:
                                                               &appplogireply,
                                                               sizeof(struct app_plogi_reply));
 
-       return rval;
+       return 0;
 }
 
 /**
index 762229d..73073fb 100644 (file)
@@ -3933,7 +3933,6 @@ qla2x00_free_device(scsi_qla_host_t *vha)
 
        /* Flush the work queue and remove it */
        if (ha->wq) {
-               flush_workqueue(ha->wq);
                destroy_workqueue(ha->wq);
                ha->wq = NULL;
        }
index 6dfcfd8..a02235a 100644 (file)
@@ -3866,8 +3866,6 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
 
        BUG_ON(cmd->sg_mapped);
        cmd->jiffies_at_free = get_jiffies_64();
-       if (unlikely(cmd->free_sg))
-               kfree(cmd->sg);
 
        if (!sess || !sess->se_sess) {
                WARN_ON(1);
index 156b950..de3942b 100644 (file)
@@ -883,7 +883,6 @@ struct qla_tgt_cmd {
        /* to save extra sess dereferences */
        unsigned int conf_compl_supported:1;
        unsigned int sg_mapped:1;
-       unsigned int free_sg:1;
        unsigned int write_data_transferred:1;
        unsigned int q_full:1;
        unsigned int term_exchg:1;
index 3f6cb2a..9e849f6 100644 (file)
@@ -671,7 +671,6 @@ static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
                goto exit_chap_list;
        }
 
-       memset(ha->chap_list, 0, chap_size);
        memcpy(ha->chap_list, chap_flash_data, chap_size);
 
 exit_chap_list:
index 211aace..c59eac7 100644 (file)
@@ -200,11 +200,11 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
 
 
 /*
- * 1024 is big enough for saturating fast SCSI LUNs.
+ * 4096 is big enough for saturating fast SCSI LUNs.
  */
 int scsi_device_max_queue_depth(struct scsi_device *sdev)
 {
-       return min_t(int, sdev->host->can_queue, 1024);
+       return min_t(int, sdev->host->can_queue, 4096);
 }
 
 /**
@@ -321,6 +321,31 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
        return get_unaligned_be16(&buffer[2]) + 4;
 }
 
+static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
+{
+       unsigned char vpd_header[SCSI_VPD_HEADER_SIZE] __aligned(4);
+       int result;
+
+       /*
+        * Fetch the VPD page header to find out how big the page
+        * is. This is done to prevent problems on legacy devices
+        * which can not handle allocation lengths as large as
+        * potentially requested by the caller.
+        */
+       result = scsi_vpd_inquiry(sdev, vpd_header, page, sizeof(vpd_header));
+       if (result < 0)
+               return 0;
+
+       if (result < SCSI_VPD_HEADER_SIZE) {
+               dev_warn_once(&sdev->sdev_gendev,
+                             "%s: short VPD page 0x%02x length: %d bytes\n",
+                             __func__, page, result);
+               return 0;
+       }
+
+       return result;
+}
+
 /**
  * scsi_get_vpd_page - Get Vital Product Data from a SCSI device
  * @sdev: The device to ask
@@ -330,47 +355,38 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
  *
  * SCSI devices may optionally supply Vital Product Data.  Each 'page'
  * of VPD is defined in the appropriate SCSI document (eg SPC, SBC).
- * If the device supports this VPD page, this routine returns a pointer
- * to a buffer containing the data from that page.  The caller is
- * responsible for calling kfree() on this pointer when it is no longer
- * needed.  If we cannot retrieve the VPD page this routine returns %NULL.
+ * If the device supports this VPD page, this routine fills @buf
+ * with the data from that page and return 0. If the VPD page is not
+ * supported or its content cannot be retrieved, -EINVAL is returned.
  */
 int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
                      int buf_len)
 {
-       int i, result;
-
-       if (sdev->skip_vpd_pages)
-               goto fail;
-
-       /* Ask for all the pages supported by this device */
-       result = scsi_vpd_inquiry(sdev, buf, 0, buf_len);
-       if (result < 4)
-               goto fail;
+       int result, vpd_len;
 
-       /* If the user actually wanted this page, we can skip the rest */
-       if (page == 0)
-               return 0;
+       if (!scsi_device_supports_vpd(sdev))
+               return -EINVAL;
 
-       for (i = 4; i < min(result, buf_len); i++)
-               if (buf[i] == page)
-                       goto found;
+       vpd_len = scsi_get_vpd_size(sdev, page);
+       if (vpd_len <= 0)
+               return -EINVAL;
 
-       if (i < result && i >= buf_len)
-               /* ran off the end of the buffer, give us benefit of doubt */
-               goto found;
-       /* The device claims it doesn't support the requested page */
-       goto fail;
+       vpd_len = min(vpd_len, buf_len);
 
- found:
-       result = scsi_vpd_inquiry(sdev, buf, page, buf_len);
+       /*
+        * Fetch the actual page. Since the appropriate size was reported
+        * by the device it is now safe to ask for something bigger.
+        */
+       memset(buf, 0, buf_len);
+       result = scsi_vpd_inquiry(sdev, buf, page, vpd_len);
        if (result < 0)
-               goto fail;
+               return -EINVAL;
+       else if (result > vpd_len)
+               dev_warn_once(&sdev->sdev_gendev,
+                             "%s: VPD page 0x%02x result %d > %d bytes\n",
+                             __func__, page, result, vpd_len);
 
        return 0;
-
- fail:
-       return -EINVAL;
 }
 EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
 
@@ -384,9 +400,17 @@ EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
 static struct scsi_vpd *scsi_get_vpd_buf(struct scsi_device *sdev, u8 page)
 {
        struct scsi_vpd *vpd_buf;
-       int vpd_len = SCSI_VPD_PG_LEN, result;
+       int vpd_len, result;
+
+       vpd_len = scsi_get_vpd_size(sdev, page);
+       if (vpd_len <= 0)
+               return NULL;
 
 retry_pg:
+       /*
+        * Fetch the actual page. Since the appropriate size was reported
+        * by the device it is now safe to ask for something bigger.
+        */
        vpd_buf = kmalloc(sizeof(*vpd_buf) + vpd_len, GFP_KERNEL);
        if (!vpd_buf)
                return NULL;
@@ -397,6 +421,9 @@ retry_pg:
                return NULL;
        }
        if (result > vpd_len) {
+               dev_warn_once(&sdev->sdev_gendev,
+                             "%s: VPD page 0x%02x result %d > %d bytes\n",
+                             __func__, page, result, vpd_len);
                vpd_len = result;
                kfree(vpd_buf);
                goto retry_pg;
@@ -456,6 +483,12 @@ void scsi_attach_vpd(struct scsi_device *sdev)
                        scsi_update_vpd_page(sdev, 0x83, &sdev->vpd_pg83);
                if (vpd_buf->data[i] == 0x89)
                        scsi_update_vpd_page(sdev, 0x89, &sdev->vpd_pg89);
+               if (vpd_buf->data[i] == 0xb0)
+                       scsi_update_vpd_page(sdev, 0xb0, &sdev->vpd_pgb0);
+               if (vpd_buf->data[i] == 0xb1)
+                       scsi_update_vpd_page(sdev, 0xb1, &sdev->vpd_pgb1);
+               if (vpd_buf->data[i] == 0xb2)
+                       scsi_update_vpd_page(sdev, 0xb2, &sdev->vpd_pgb2);
        }
        kfree(vpd_buf);
 }
@@ -476,21 +509,30 @@ int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
 {
        unsigned char cmd[16];
        struct scsi_sense_hdr sshdr;
-       int result;
+       int result, request_len;
 
        if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3)
                return -EINVAL;
 
+       /* RSOC header + size of command we are asking about */
+       request_len = 4 + COMMAND_SIZE(opcode);
+       if (request_len > len) {
+               dev_warn_once(&sdev->sdev_gendev,
+                             "%s: len %u bytes, opcode 0x%02x needs %u\n",
+                             __func__, len, opcode, request_len);
+               return -EINVAL;
+       }
+
        memset(cmd, 0, 16);
        cmd[0] = MAINTENANCE_IN;
        cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES;
        cmd[2] = 1;             /* One command format */
        cmd[3] = opcode;
-       put_unaligned_be32(len, &cmd[6]);
+       put_unaligned_be32(request_len, &cmd[6]);
        memset(buffer, 0, len);
 
-       result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
-                                 &sshdr, 30 * HZ, 3, NULL);
+       result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer,
+                                 request_len, &sshdr, 30 * HZ, 3, NULL);
 
        if (result < 0)
                return result;
index 592a290..1f423f7 100644 (file)
@@ -16,7 +16,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
 
 #include <linux/module.h>
-
+#include <linux/align.h>
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/jiffies.h>
@@ -98,6 +98,7 @@ static const char *sdebug_version_date = "20210520";
 #define WRITE_BOUNDARY_ASCQ 0x5
 #define READ_INVDATA_ASCQ 0x6
 #define READ_BOUNDARY_ASCQ 0x7
+#define ATTEMPT_ACCESS_GAP 0x9
 #define INSUFF_ZONE_ASCQ 0xe
 
 /* Additional Sense Code Qualifier (ASCQ) */
@@ -251,9 +252,11 @@ static const char *sdebug_version_date = "20210520";
 
 /* Zone types (zbcr05 table 25) */
 enum sdebug_z_type {
-       ZBC_ZONE_TYPE_CNV       = 0x1,
-       ZBC_ZONE_TYPE_SWR       = 0x2,
-       ZBC_ZONE_TYPE_SWP       = 0x3,
+       ZBC_ZTYPE_CNV   = 0x1,
+       ZBC_ZTYPE_SWR   = 0x2,
+       ZBC_ZTYPE_SWP   = 0x3,
+       /* ZBC_ZTYPE_SOBR = 0x4, */
+       ZBC_ZTYPE_GAP   = 0x5,
 };
 
 /* enumeration names taken from table 26, zbcr05 */
@@ -291,10 +294,12 @@ struct sdebug_dev_info {
 
        /* For ZBC devices */
        enum blk_zoned_model zmodel;
+       unsigned int zcap;
        unsigned int zsize;
        unsigned int zsize_shift;
        unsigned int nr_zones;
        unsigned int nr_conv_zones;
+       unsigned int nr_seq_zones;
        unsigned int nr_imp_open;
        unsigned int nr_exp_open;
        unsigned int nr_closed;
@@ -829,6 +834,7 @@ static int dif_errors;
 
 /* ZBC global data */
 static bool sdeb_zbc_in_use;   /* true for host-aware and host-managed disks */
+static int sdeb_zbc_zone_cap_mb;
 static int sdeb_zbc_zone_size_mb;
 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
@@ -1559,6 +1565,12 @@ static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
                put_unaligned_be32(devip->max_open, &arr[12]);
        else
                put_unaligned_be32(0xffffffff, &arr[12]);
+       if (devip->zcap < devip->zsize) {
+               arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
+               put_unaligned_be64(devip->zsize, &arr[20]);
+       } else {
+               arr[19] = 0;
+       }
        return 0x3c;
 }
 
@@ -2711,12 +2723,38 @@ static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
                                        unsigned long long lba)
 {
-       return &devip->zstate[lba >> devip->zsize_shift];
+       u32 zno = lba >> devip->zsize_shift;
+       struct sdeb_zone_state *zsp;
+
+       if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
+               return &devip->zstate[zno];
+
+       /*
+        * If the zone capacity is less than the zone size, adjust for gap
+        * zones.
+        */
+       zno = 2 * zno - devip->nr_conv_zones;
+       WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
+       zsp = &devip->zstate[zno];
+       if (lba >= zsp->z_start + zsp->z_size)
+               zsp++;
+       WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
+       return zsp;
 }
 
 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
 {
-       return zsp->z_type == ZBC_ZONE_TYPE_CNV;
+       return zsp->z_type == ZBC_ZTYPE_CNV;
+}
+
+static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
+{
+       return zsp->z_type == ZBC_ZTYPE_GAP;
+}
+
+static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
+{
+       return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
 }
 
 static void zbc_close_zone(struct sdebug_dev_info *devip,
@@ -2724,7 +2762,7 @@ static void zbc_close_zone(struct sdebug_dev_info *devip,
 {
        enum sdebug_z_cond zc;
 
-       if (zbc_zone_is_conv(zsp))
+       if (!zbc_zone_is_seq(zsp))
                return;
 
        zc = zsp->z_cond;
@@ -2762,7 +2800,7 @@ static void zbc_open_zone(struct sdebug_dev_info *devip,
 {
        enum sdebug_z_cond zc;
 
-       if (zbc_zone_is_conv(zsp))
+       if (!zbc_zone_is_seq(zsp))
                return;
 
        zc = zsp->z_cond;
@@ -2794,10 +2832,10 @@ static void zbc_inc_wp(struct sdebug_dev_info *devip,
        struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
        unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
 
-       if (zbc_zone_is_conv(zsp))
+       if (!zbc_zone_is_seq(zsp))
                return;
 
-       if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
+       if (zsp->z_type == ZBC_ZTYPE_SWR) {
                zsp->z_wp += num;
                if (zsp->z_wp >= zend)
                        zsp->z_cond = ZC5_FULL;
@@ -2842,9 +2880,7 @@ static int check_zbc_access_params(struct scsi_cmnd *scp,
                if (devip->zmodel == BLK_ZONED_HA)
                        return 0;
                /* For host-managed, reads cannot cross zone types boundaries */
-               if (zsp_end != zsp &&
-                   zbc_zone_is_conv(zsp) &&
-                   !zbc_zone_is_conv(zsp_end)) {
+               if (zsp->z_type != zsp_end->z_type) {
                        mk_sense_buffer(scp, ILLEGAL_REQUEST,
                                        LBA_OUT_OF_RANGE,
                                        READ_INVDATA_ASCQ);
@@ -2853,6 +2889,13 @@ static int check_zbc_access_params(struct scsi_cmnd *scp,
                return 0;
        }
 
+       /* Writing into a gap zone is not allowed */
+       if (zbc_zone_is_gap(zsp)) {
+               mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
+                               ATTEMPT_ACCESS_GAP);
+               return check_condition_result;
+       }
+
        /* No restrictions for writes within conventional zones */
        if (zbc_zone_is_conv(zsp)) {
                if (!zbc_zone_is_conv(zsp_end)) {
@@ -2864,7 +2907,7 @@ static int check_zbc_access_params(struct scsi_cmnd *scp,
                return 0;
        }
 
-       if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
+       if (zsp->z_type == ZBC_ZTYPE_SWR) {
                /* Writes cannot cross sequential zone boundaries */
                if (zsp_end != zsp) {
                        mk_sense_buffer(scp, ILLEGAL_REQUEST,
@@ -4404,18 +4447,18 @@ cleanup:
 
 #define RZONES_DESC_HD 64
 
-/* Report zones depending on start LBA nad reporting options */
+/* Report zones depending on start LBA and reporting options */
 static int resp_report_zones(struct scsi_cmnd *scp,
                             struct sdebug_dev_info *devip)
 {
-       unsigned int i, max_zones, rep_max_zones, nrz = 0;
+       unsigned int rep_max_zones, nrz = 0;
        int ret = 0;
        u32 alloc_len, rep_opts, rep_len;
        bool partial;
        u64 lba, zs_lba;
        u8 *arr = NULL, *desc;
        u8 *cmd = scp->cmnd;
-       struct sdeb_zone_state *zsp;
+       struct sdeb_zone_state *zsp = NULL;
        struct sdeb_store_info *sip = devip2sip(devip, false);
 
        if (!sdebug_dev_is_zoned(devip)) {
@@ -4434,9 +4477,7 @@ static int resp_report_zones(struct scsi_cmnd *scp,
                return check_condition_result;
        }
 
-       max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
-       rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
-                           max_zones);
+       rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
 
        arr = kzalloc(alloc_len, GFP_ATOMIC);
        if (!arr) {
@@ -4448,9 +4489,9 @@ static int resp_report_zones(struct scsi_cmnd *scp,
        sdeb_read_lock(sip);
 
        desc = arr + 64;
-       for (i = 0; i < max_zones; i++) {
-               lba = zs_lba + devip->zsize * i;
-               if (lba > sdebug_capacity)
+       for (lba = zs_lba; lba < sdebug_capacity;
+            lba = zsp->z_start + zsp->z_size) {
+               if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
                        break;
                zsp = zbc_zone(devip, lba);
                switch (rep_opts) {
@@ -4495,9 +4536,14 @@ static int resp_report_zones(struct scsi_cmnd *scp,
                        if (!zsp->z_non_seq_resource)
                                continue;
                        break;
+               case 0x3e:
+                       /* All zones except gap zones. */
+                       if (zbc_zone_is_gap(zsp))
+                               continue;
+                       break;
                case 0x3f:
                        /* Not write pointer (conventional) zones */
-                       if (!zbc_zone_is_conv(zsp))
+                       if (zbc_zone_is_seq(zsp))
                                continue;
                        break;
                default:
@@ -4526,8 +4572,13 @@ static int resp_report_zones(struct scsi_cmnd *scp,
        }
 
        /* Report header */
+       /* Zone list length. */
        put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
+       /* Maximum LBA */
        put_unaligned_be64(sdebug_capacity - 1, arr + 8);
+       /* Zone starting LBA granularity. */
+       if (devip->zcap < devip->zsize)
+               put_unaligned_be64(devip->zsize, arr + 16);
 
        rep_len = (unsigned long)desc - (unsigned long)arr;
        ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
@@ -4752,7 +4803,7 @@ static void zbc_rwp_zone(struct sdebug_dev_info *devip,
        enum sdebug_z_cond zc;
        struct sdeb_store_info *sip = devip2sip(devip, false);
 
-       if (zbc_zone_is_conv(zsp))
+       if (!zbc_zone_is_seq(zsp))
                return;
 
        zc = zsp->z_cond;
@@ -4942,6 +4993,7 @@ static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
 {
        struct sdeb_zone_state *zsp;
        sector_t capacity = get_sdebug_capacity();
+       sector_t conv_capacity;
        sector_t zstart = 0;
        unsigned int i;
 
@@ -4976,11 +5028,30 @@ static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
        devip->zsize_shift = ilog2(devip->zsize);
        devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
 
-       if (sdeb_zbc_nr_conv >= devip->nr_zones) {
+       if (sdeb_zbc_zone_cap_mb == 0) {
+               devip->zcap = devip->zsize;
+       } else {
+               devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
+                             ilog2(sdebug_sector_size);
+               if (devip->zcap > devip->zsize) {
+                       pr_err("Zone capacity too large\n");
+                       return -EINVAL;
+               }
+       }
+
+       conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
+       if (conv_capacity >= capacity) {
                pr_err("Number of conventional zones too large\n");
                return -EINVAL;
        }
        devip->nr_conv_zones = sdeb_zbc_nr_conv;
+       devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
+                             devip->zsize_shift;
+       devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
+
+       /* Add gap zones if zone capacity is smaller than the zone size */
+       if (devip->zcap < devip->zsize)
+               devip->nr_zones += devip->nr_seq_zones;
 
        if (devip->zmodel == BLK_ZONED_HM) {
                /* zbc_max_open_zones can be 0, meaning "not reported" */
@@ -5001,23 +5072,29 @@ static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
                zsp->z_start = zstart;
 
                if (i < devip->nr_conv_zones) {
-                       zsp->z_type = ZBC_ZONE_TYPE_CNV;
+                       zsp->z_type = ZBC_ZTYPE_CNV;
                        zsp->z_cond = ZBC_NOT_WRITE_POINTER;
                        zsp->z_wp = (sector_t)-1;
-               } else {
+                       zsp->z_size =
+                               min_t(u64, devip->zsize, capacity - zstart);
+               } else if ((zstart & (devip->zsize - 1)) == 0) {
                        if (devip->zmodel == BLK_ZONED_HM)
-                               zsp->z_type = ZBC_ZONE_TYPE_SWR;
+                               zsp->z_type = ZBC_ZTYPE_SWR;
                        else
-                               zsp->z_type = ZBC_ZONE_TYPE_SWP;
+                               zsp->z_type = ZBC_ZTYPE_SWP;
                        zsp->z_cond = ZC1_EMPTY;
                        zsp->z_wp = zsp->z_start;
+                       zsp->z_size =
+                               min_t(u64, devip->zcap, capacity - zstart);
+               } else {
+                       zsp->z_type = ZBC_ZTYPE_GAP;
+                       zsp->z_cond = ZBC_NOT_WRITE_POINTER;
+                       zsp->z_wp = (sector_t)-1;
+                       zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
+                                           capacity - zstart);
                }
 
-               if (zsp->z_start + devip->zsize < capacity)
-                       zsp->z_size = devip->zsize;
-               else
-                       zsp->z_size = capacity - zsp->z_start;
-
+               WARN_ON_ONCE((int)zsp->z_size <= 0);
                zstart += zsp->z_size;
        }
 
@@ -5779,6 +5856,7 @@ module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
 module_param_named(write_same_length, sdebug_write_same_length, int,
                   S_IRUGO | S_IWUSR);
 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
+module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
@@ -5850,6 +5928,7 @@ MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique de
 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
+MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
index 8d18cc7..e9db7da 100644 (file)
@@ -1977,7 +1977,7 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
        tag_set->nr_maps = shost->nr_maps ? : 1;
        tag_set->queue_depth = shost->can_queue;
        tag_set->cmd_size = cmd_size;
-       tag_set->numa_node = NUMA_NO_NODE;
+       tag_set->numa_node = dev_to_node(shost->dma_dev);
        tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
        tag_set->flags |=
                BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
index 2ef7808..91ac901 100644 (file)
@@ -733,7 +733,17 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
                if (pass == 1) {
                        if (BLIST_INQUIRY_36 & *bflags)
                                next_inquiry_len = 36;
-                       else if (sdev->inquiry_len)
+                       /*
+                        * LLD specified a maximum sdev->inquiry_len
+                        * but device claims it has more data. Capping
+                        * the length only makes sense for legacy
+                        * devices. If a device supports SPC-4 (2014)
+                        * or newer, assume that it is safe to ask for
+                        * as much as the device says it supports.
+                        */
+                       else if (sdev->inquiry_len &&
+                                response_len > sdev->inquiry_len &&
+                                (inq_result[2] & 0x7) < 6) /* SPC-4 */
                                next_inquiry_len = sdev->inquiry_len;
                        else
                                next_inquiry_len = response_len;
index dc6872e..546a9e3 100644 (file)
@@ -448,6 +448,7 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
        struct list_head *this, *tmp;
        struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL;
        struct scsi_vpd *vpd_pg0 = NULL, *vpd_pg89 = NULL;
+       struct scsi_vpd *vpd_pgb0 = NULL, *vpd_pgb1 = NULL, *vpd_pgb2 = NULL;
        unsigned long flags;
        struct module *mod;
 
@@ -490,6 +491,12 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
                                       lockdep_is_held(&sdev->inquiry_mutex));
        vpd_pg89 = rcu_replace_pointer(sdev->vpd_pg89, vpd_pg89,
                                       lockdep_is_held(&sdev->inquiry_mutex));
+       vpd_pgb0 = rcu_replace_pointer(sdev->vpd_pgb0, vpd_pgb0,
+                                      lockdep_is_held(&sdev->inquiry_mutex));
+       vpd_pgb1 = rcu_replace_pointer(sdev->vpd_pgb1, vpd_pgb1,
+                                      lockdep_is_held(&sdev->inquiry_mutex));
+       vpd_pgb2 = rcu_replace_pointer(sdev->vpd_pgb2, vpd_pgb2,
+                                      lockdep_is_held(&sdev->inquiry_mutex));
        mutex_unlock(&sdev->inquiry_mutex);
 
        if (vpd_pg0)
@@ -500,6 +507,12 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
                kfree_rcu(vpd_pg80, rcu);
        if (vpd_pg89)
                kfree_rcu(vpd_pg89, rcu);
+       if (vpd_pgb0)
+               kfree_rcu(vpd_pgb0, rcu);
+       if (vpd_pgb1)
+               kfree_rcu(vpd_pgb1, rcu);
+       if (vpd_pgb2)
+               kfree_rcu(vpd_pgb2, rcu);
        kfree(sdev->inquiry);
        kfree(sdev);
 
@@ -913,6 +926,9 @@ static struct bin_attribute dev_attr_vpd_##_page = {                \
 sdev_vpd_pg_attr(pg83);
 sdev_vpd_pg_attr(pg80);
 sdev_vpd_pg_attr(pg89);
+sdev_vpd_pg_attr(pgb0);
+sdev_vpd_pg_attr(pgb1);
+sdev_vpd_pg_attr(pgb2);
 sdev_vpd_pg_attr(pg0);
 
 static ssize_t show_inquiry(struct file *filep, struct kobject *kobj,
@@ -1250,6 +1266,15 @@ static umode_t scsi_sdev_bin_attr_is_visible(struct kobject *kobj,
        if (attr == &dev_attr_vpd_pg89 && !sdev->vpd_pg89)
                return 0;
 
+       if (attr == &dev_attr_vpd_pgb0 && !sdev->vpd_pgb0)
+               return 0;
+
+       if (attr == &dev_attr_vpd_pgb1 && !sdev->vpd_pgb1)
+               return 0;
+
+       if (attr == &dev_attr_vpd_pgb2 && !sdev->vpd_pgb2)
+               return 0;
+
        return S_IRUGO;
 }
 
@@ -1296,6 +1321,9 @@ static struct bin_attribute *scsi_sdev_bin_attrs[] = {
        &dev_attr_vpd_pg83,
        &dev_attr_vpd_pg80,
        &dev_attr_vpd_pg89,
+       &dev_attr_vpd_pgb0,
+       &dev_attr_vpd_pgb1,
+       &dev_attr_vpd_pgb2,
        &dev_attr_inquiry,
        NULL
 };
index 9694e2c..7493164 100644 (file)
@@ -2174,40 +2174,48 @@ static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer
 {
        struct scsi_device *sdp = sdkp->device;
        u8 type;
-       int ret = 0;
 
        if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) {
                sdkp->protection_type = 0;
-               return ret;
+               return 0;
        }
 
        type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
 
-       if (type > T10_PI_TYPE3_PROTECTION)
-               ret = -ENODEV;
-       else if (scsi_host_dif_capable(sdp->host, type))
-               ret = 1;
-
-       if (sdkp->first_scan || type != sdkp->protection_type)
-               switch (ret) {
-               case -ENODEV:
-                       sd_printk(KERN_ERR, sdkp, "formatted with unsupported" \
-                                 " protection type %u. Disabling disk!\n",
-                                 type);
-                       break;
-               case 1:
-                       sd_printk(KERN_NOTICE, sdkp,
-                                 "Enabling DIF Type %u protection\n", type);
-                       break;
-               case 0:
-                       sd_printk(KERN_NOTICE, sdkp,
-                                 "Disabling DIF Type %u protection\n", type);
-                       break;
-               }
+       if (type > T10_PI_TYPE3_PROTECTION) {
+               sd_printk(KERN_ERR, sdkp, "formatted with unsupported"  \
+                         " protection type %u. Disabling disk!\n",
+                         type);
+               sdkp->protection_type = 0;
+               return -ENODEV;
+       }
 
        sdkp->protection_type = type;
 
-       return ret;
+       return 0;
+}
+
+static void sd_config_protection(struct scsi_disk *sdkp)
+{
+       struct scsi_device *sdp = sdkp->device;
+
+       if (!sdkp->first_scan)
+               return;
+
+       sd_dif_config_host(sdkp);
+
+       if (!sdkp->protection_type)
+               return;
+
+       if (!scsi_host_dif_capable(sdp->host, sdkp->protection_type)) {
+               sd_printk(KERN_NOTICE, sdkp,
+                         "Disabling DIF Type %u protection\n",
+                         sdkp->protection_type);
+               sdkp->protection_type = 0;
+       }
+
+       sd_printk(KERN_NOTICE, sdkp, "Enabling DIF Type %u protection\n",
+                 sdkp->protection_type);
 }
 
 static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
@@ -2841,40 +2849,37 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
  */
 static void sd_read_block_limits(struct scsi_disk *sdkp)
 {
-       unsigned int sector_sz = sdkp->device->sector_size;
-       const int vpd_len = 64;
-       unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
+       struct scsi_vpd *vpd;
 
-       if (!buffer ||
-           /* Block Limits VPD */
-           scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len))
-               goto out;
+       rcu_read_lock();
 
-       blk_queue_io_min(sdkp->disk->queue,
-                        get_unaligned_be16(&buffer[6]) * sector_sz);
+       vpd = rcu_dereference(sdkp->device->vpd_pgb0);
+       if (!vpd || vpd->len < 16)
+               goto out;
 
-       sdkp->max_xfer_blocks = get_unaligned_be32(&buffer[8]);
-       sdkp->opt_xfer_blocks = get_unaligned_be32(&buffer[12]);
+       sdkp->min_xfer_blocks = get_unaligned_be16(&vpd->data[6]);
+       sdkp->max_xfer_blocks = get_unaligned_be32(&vpd->data[8]);
+       sdkp->opt_xfer_blocks = get_unaligned_be32(&vpd->data[12]);
 
-       if (buffer[3] == 0x3c) {
+       if (vpd->len >= 64) {
                unsigned int lba_count, desc_count;
 
-               sdkp->max_ws_blocks = (u32)get_unaligned_be64(&buffer[36]);
+               sdkp->max_ws_blocks = (u32)get_unaligned_be64(&vpd->data[36]);
 
                if (!sdkp->lbpme)
                        goto out;
 
-               lba_count = get_unaligned_be32(&buffer[20]);
-               desc_count = get_unaligned_be32(&buffer[24]);
+               lba_count = get_unaligned_be32(&vpd->data[20]);
+               desc_count = get_unaligned_be32(&vpd->data[24]);
 
                if (lba_count && desc_count)
                        sdkp->max_unmap_blocks = lba_count;
 
-               sdkp->unmap_granularity = get_unaligned_be32(&buffer[28]);
+               sdkp->unmap_granularity = get_unaligned_be32(&vpd->data[28]);
 
-               if (buffer[32] & 0x80)
+               if (vpd->data[32] & 0x80)
                        sdkp->unmap_alignment =
-                               get_unaligned_be32(&buffer[32]) & ~(1 << 31);
+                               get_unaligned_be32(&vpd->data[32]) & ~(1 << 31);
 
                if (!sdkp->lbpvpd) { /* LBP VPD page not provided */
 
@@ -2896,7 +2901,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
        }
 
  out:
-       kfree(buffer);
+       rcu_read_unlock();
 }
 
 /**
@@ -2906,18 +2911,21 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
 static void sd_read_block_characteristics(struct scsi_disk *sdkp)
 {
        struct request_queue *q = sdkp->disk->queue;
-       unsigned char *buffer;
+       struct scsi_vpd *vpd;
        u16 rot;
-       const int vpd_len = 64;
+       u8 zoned;
 
-       buffer = kmalloc(vpd_len, GFP_KERNEL);
+       rcu_read_lock();
+       vpd = rcu_dereference(sdkp->device->vpd_pgb1);
 
-       if (!buffer ||
-           /* Block Device Characteristics VPD */
-           scsi_get_vpd_page(sdkp->device, 0xb1, buffer, vpd_len))
-               goto out;
+       if (!vpd || vpd->len < 8) {
+               rcu_read_unlock();
+               return;
+       }
 
-       rot = get_unaligned_be16(&buffer[4]);
+       rot = get_unaligned_be16(&vpd->data[4]);
+       zoned = (vpd->data[8] >> 4) & 3;
+       rcu_read_unlock();
 
        if (rot == 1) {
                blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
@@ -2928,7 +2936,7 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
                /* Host-managed */
                blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HM);
        } else {
-               sdkp->zoned = (buffer[8] >> 4) & 3;
+               sdkp->zoned = zoned;
                if (sdkp->zoned == 1) {
                        /* Host-aware */
                        blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HA);
@@ -2939,7 +2947,7 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
        }
 
        if (!sdkp->first_scan)
-               goto out;
+               return;
 
        if (blk_queue_is_zoned(q)) {
                sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
@@ -2952,9 +2960,6 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
                        sd_printk(KERN_NOTICE, sdkp,
                                  "Drive-managed SMR disk\n");
        }
-
- out:
-       kfree(buffer);
 }
 
 /**
@@ -2963,24 +2968,24 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
  */
 static void sd_read_block_provisioning(struct scsi_disk *sdkp)
 {
-       unsigned char *buffer;
-       const int vpd_len = 8;
+       struct scsi_vpd *vpd;
 
        if (sdkp->lbpme == 0)
                return;
 
-       buffer = kmalloc(vpd_len, GFP_KERNEL);
+       rcu_read_lock();
+       vpd = rcu_dereference(sdkp->device->vpd_pgb2);
 
-       if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb2, buffer, vpd_len))
-               goto out;
+       if (!vpd || vpd->len < 8) {
+               rcu_read_unlock();
+               return;
+       }
 
        sdkp->lbpvpd    = 1;
-       sdkp->lbpu      = (buffer[5] >> 7) & 1; /* UNMAP */
-       sdkp->lbpws     = (buffer[5] >> 6) & 1; /* WRITE SAME(16) with UNMAP */
-       sdkp->lbpws10   = (buffer[5] >> 5) & 1; /* WRITE SAME(10) with UNMAP */
-
- out:
-       kfree(buffer);
+       sdkp->lbpu      = (vpd->data[5] >> 7) & 1; /* UNMAP */
+       sdkp->lbpws     = (vpd->data[5] >> 6) & 1; /* WRITE SAME(16) w/ UNMAP */
+       sdkp->lbpws10   = (vpd->data[5] >> 5) & 1; /* WRITE SAME(10) w/ UNMAP */
+       rcu_read_unlock();
 }
 
 static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
@@ -2994,8 +2999,7 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
        }
 
        if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) {
-               /* too large values might cause issues with arcmsr */
-               int vpd_buf_len = 64;
+               struct scsi_vpd *vpd;
 
                sdev->no_report_opcodes = 1;
 
@@ -3003,8 +3007,11 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
                 * CODES is unsupported and the device has an ATA
                 * Information VPD page (SAT).
                 */
-               if (!scsi_get_vpd_page(sdev, 0x89, buffer, vpd_buf_len))
+               rcu_read_lock();
+               vpd = rcu_dereference(sdev->vpd_pg89);
+               if (vpd)
                        sdev->no_write_same = 1;
+               rcu_read_unlock();
        }
 
        if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME_16) == 1)
@@ -3108,6 +3115,29 @@ out:
        kfree(buffer);
 }
 
+static bool sd_validate_min_xfer_size(struct scsi_disk *sdkp)
+{
+       struct scsi_device *sdp = sdkp->device;
+       unsigned int min_xfer_bytes =
+               logical_to_bytes(sdp, sdkp->min_xfer_blocks);
+
+       if (sdkp->min_xfer_blocks == 0)
+               return false;
+
+       if (min_xfer_bytes & (sdkp->physical_block_size - 1)) {
+               sd_first_printk(KERN_WARNING, sdkp,
+                               "Preferred minimum I/O size %u bytes not a " \
+                               "multiple of physical block size (%u bytes)\n",
+                               min_xfer_bytes, sdkp->physical_block_size);
+               sdkp->min_xfer_blocks = 0;
+               return false;
+       }
+
+       sd_first_printk(KERN_INFO, sdkp, "Preferred minimum I/O size %u bytes\n",
+                       min_xfer_bytes);
+       return true;
+}
+
 /*
  * Determine the device's preferred I/O size for reads and writes
  * unless the reported value is unreasonably small, large, not a
@@ -3119,6 +3149,8 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
        struct scsi_device *sdp = sdkp->device;
        unsigned int opt_xfer_bytes =
                logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
+       unsigned int min_xfer_bytes =
+               logical_to_bytes(sdp, sdkp->min_xfer_blocks);
 
        if (sdkp->opt_xfer_blocks == 0)
                return false;
@@ -3147,6 +3179,15 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
                return false;
        }
 
+       if (min_xfer_bytes && opt_xfer_bytes % min_xfer_bytes) {
+               sd_first_printk(KERN_WARNING, sdkp,
+                               "Optimal transfer size %u bytes not a " \
+                               "multiple of preferred minimum block " \
+                               "size (%u bytes)\n",
+                               opt_xfer_bytes, min_xfer_bytes);
+               return false;
+       }
+
        if (opt_xfer_bytes & (sdkp->physical_block_size - 1)) {
                sd_first_printk(KERN_WARNING, sdkp,
                                "Optimal transfer size %u bytes not a " \
@@ -3224,6 +3265,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
                sd_read_app_tag_own(sdkp, buffer);
                sd_read_write_same(sdkp, buffer);
                sd_read_security(sdkp, buffer);
+               sd_config_protection(sdkp);
        }
 
        /*
@@ -3239,6 +3281,12 @@ static int sd_revalidate_disk(struct gendisk *disk)
        dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
        q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
 
+       if (sd_validate_min_xfer_size(sdkp))
+               blk_queue_io_min(sdkp->disk->queue,
+                                logical_to_bytes(sdp, sdkp->min_xfer_blocks));
+       else
+               blk_queue_io_min(sdkp->disk->queue, 0);
+
        if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
                q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
                rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
@@ -3477,11 +3525,6 @@ static int sd_probe(struct device *dev)
                goto out;
        }
 
-       if (sdkp->capacity)
-               sd_dif_config_host(sdkp);
-
-       sd_revalidate_disk(gd);
-
        if (sdkp->security) {
                sdkp->opal_dev = init_opal_dev(sdkp, &sd_sec_submit);
                if (sdkp->opal_dev)
index 0a33a4b..2abad54 100644 (file)
@@ -67,6 +67,20 @@ enum {
        SD_ZERO_WS10_UNMAP,     /* Use WRITE SAME(10) with UNMAP */
 };
 
+/**
+ * struct zoned_disk_info - Specific properties of a ZBC SCSI device.
+ * @nr_zones: number of zones.
+ * @zone_blocks: number of logical blocks per zone.
+ *
+ * This data structure holds the ZBC SCSI device properties that are retrieved
+ * twice: a first time before the gendisk capacity is known and a second time
+ * after the gendisk capacity is known.
+ */
+struct zoned_disk_info {
+       u32             nr_zones;
+       u32             zone_blocks;
+};
+
 struct scsi_disk {
        struct scsi_device *device;
 
@@ -78,13 +92,18 @@ struct scsi_disk {
        struct gendisk  *disk;
        struct opal_dev *opal_dev;
 #ifdef CONFIG_BLK_DEV_ZONED
-       u32             nr_zones;
-       u32             rev_nr_zones;
-       u32             zone_blocks;
-       u32             rev_zone_blocks;
+       /* Updated during revalidation before the gendisk capacity is known. */
+       struct zoned_disk_info  early_zone_info;
+       /* Updated during revalidation after the gendisk capacity is known. */
+       struct zoned_disk_info  zone_info;
        u32             zones_optimal_open;
        u32             zones_optimal_nonseq;
        u32             zones_max_open;
+       /*
+        * Either zero or a power of two. If not zero it means that the offset
+        * between zone starting LBAs is constant.
+        */
+       u32             zone_starting_lba_gran;
        u32             *zones_wp_offset;
        spinlock_t      zones_wp_offset_lock;
        u32             *rev_wp_offset;
@@ -95,6 +114,7 @@ struct scsi_disk {
        atomic_t        openers;
        sector_t        capacity;       /* size in logical blocks */
        int             max_retries;
+       u32             min_xfer_blocks;
        u32             max_xfer_blocks;
        u32             opt_xfer_blocks;
        u32             max_ws_blocks;
@@ -222,7 +242,7 @@ static inline int sd_is_zoned(struct scsi_disk *sdkp)
 #ifdef CONFIG_BLK_DEV_ZONED
 
 void sd_zbc_release_disk(struct scsi_disk *sdkp);
-int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer);
+int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE]);
 int sd_zbc_revalidate_zones(struct scsi_disk *sdkp);
 blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
                                         unsigned char op, bool all);
@@ -238,8 +258,7 @@ blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba,
 
 static inline void sd_zbc_release_disk(struct scsi_disk *sdkp) {}
 
-static inline int sd_zbc_read_zones(struct scsi_disk *sdkp,
-                                   unsigned char *buf)
+static inline int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
 {
        return 0;
 }
index 3499506..968993e 100644 (file)
@@ -59,8 +59,6 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
                        bi.profile = &t10_pi_type1_crc;
 
        bi.tuple_size = sizeof(struct t10_pi_tuple);
-       sd_printk(KERN_NOTICE, sdkp,
-                 "Enabling DIX %s protection\n", bi.profile->name);
 
        if (dif && type) {
                bi.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
@@ -72,11 +70,11 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
                        bi.tag_size = sizeof(u16) + sizeof(u32);
                else
                        bi.tag_size = sizeof(u16);
-
-               sd_printk(KERN_NOTICE, sdkp, "DIF application tag size %u\n",
-                         bi.tag_size);
        }
 
+       sd_printk(KERN_NOTICE, sdkp,
+                 "Enabling DIX %s, application tag size %u bytes\n",
+                 bi.profile->name, bi.tag_size);
 out:
        blk_integrity_register(disk, &bi);
 }
index 7f46628..5b9fad7 100644 (file)
 
 #include "sd.h"
 
+/**
+ * sd_zbc_get_zone_wp_offset - Get zone write pointer offset.
+ * @zone: Zone for which to return the write pointer offset.
+ *
+ * Return: offset of the write pointer from the start of the zone.
+ */
 static unsigned int sd_zbc_get_zone_wp_offset(struct blk_zone *zone)
 {
        if (zone->type == ZBC_ZONE_TYPE_CONV)
@@ -44,13 +50,37 @@ static unsigned int sd_zbc_get_zone_wp_offset(struct blk_zone *zone)
        }
 }
 
-static int sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
+/* Whether or not a SCSI zone descriptor describes a gap zone. */
+static bool sd_zbc_is_gap_zone(const u8 buf[64])
+{
+       return (buf[0] & 0xf) == ZBC_ZONE_TYPE_GAP;
+}
+
+/**
+ * sd_zbc_parse_report - Parse a SCSI zone descriptor
+ * @sdkp: SCSI disk pointer.
+ * @buf: SCSI zone descriptor.
+ * @idx: Index of the zone relative to the first zone reported by the current
+ *     sd_zbc_report_zones() call.
+ * @cb: Callback function pointer.
+ * @data: Second argument passed to @cb.
+ *
+ * Return: Value returned by @cb.
+ *
+ * Convert a SCSI zone descriptor into struct blk_zone format. Additionally,
+ * call @cb(blk_zone, @data).
+ */
+static int sd_zbc_parse_report(struct scsi_disk *sdkp, const u8 buf[64],
                               unsigned int idx, report_zones_cb cb, void *data)
 {
        struct scsi_device *sdp = sdkp->device;
        struct blk_zone zone = { 0 };
+       sector_t start_lba, gran;
        int ret;
 
+       if (WARN_ON_ONCE(sd_zbc_is_gap_zone(buf)))
+               return -EINVAL;
+
        zone.type = buf[0] & 0x0f;
        zone.cond = (buf[1] >> 4) & 0xf;
        if (buf[1] & 0x01)
@@ -58,9 +88,27 @@ static int sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
        if (buf[1] & 0x02)
                zone.non_seq = 1;
 
-       zone.len = logical_to_sectors(sdp, get_unaligned_be64(&buf[8]));
-       zone.capacity = zone.len;
-       zone.start = logical_to_sectors(sdp, get_unaligned_be64(&buf[16]));
+       start_lba = get_unaligned_be64(&buf[16]);
+       zone.start = logical_to_sectors(sdp, start_lba);
+       zone.capacity = logical_to_sectors(sdp, get_unaligned_be64(&buf[8]));
+       zone.len = zone.capacity;
+       if (sdkp->zone_starting_lba_gran) {
+               gran = logical_to_sectors(sdp, sdkp->zone_starting_lba_gran);
+               if (zone.len > gran) {
+                       sd_printk(KERN_ERR, sdkp,
+                                 "Invalid zone at LBA %llu with capacity %llu and length %llu; granularity = %llu\n",
+                                 start_lba,
+                                 sectors_to_logical(sdp, zone.capacity),
+                                 sectors_to_logical(sdp, zone.len),
+                                 sectors_to_logical(sdp, gran));
+                       return -EINVAL;
+               }
+               /*
+                * Use the starting LBA granularity instead of the zone length
+                * obtained from the REPORT ZONES command.
+                */
+               zone.len = gran;
+       }
        if (zone.cond == ZBC_ZONE_COND_FULL)
                zone.wp = zone.start + zone.len;
        else
@@ -161,7 +209,7 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
         * sure that the allocated buffer can always be mapped by limiting the
         * number of pages allocated to the HBA max segments limit.
         */
-       nr_zones = min(nr_zones, sdkp->nr_zones);
+       nr_zones = min(nr_zones, sdkp->zone_info.nr_zones);
        bufsize = roundup((nr_zones + 1) * 64, SECTOR_SIZE);
        bufsize = min_t(size_t, bufsize,
                        queue_max_hw_sectors(q) << SECTOR_SHIFT);
@@ -186,16 +234,28 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
  */
 static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp)
 {
-       return logical_to_sectors(sdkp->device, sdkp->zone_blocks);
+       return logical_to_sectors(sdkp->device, sdkp->zone_info.zone_blocks);
 }
 
+/**
+ * sd_zbc_report_zones - SCSI .report_zones() callback.
+ * @disk: Disk to report zones for.
+ * @sector: Start sector.
+ * @nr_zones: Maximum number of zones to report.
+ * @cb: Callback function called to report zone information.
+ * @data: Second argument passed to @cb.
+ *
+ * Called by the block layer to iterate over zone information. See also the
+ * disk->fops->report_zones() calls in block/blk-zoned.c.
+ */
 int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
                        unsigned int nr_zones, report_zones_cb cb, void *data)
 {
        struct scsi_disk *sdkp = scsi_disk(disk);
-       sector_t capacity = logical_to_sectors(sdkp->device, sdkp->capacity);
+       sector_t lba = sectors_to_logical(sdkp->device, sector);
        unsigned int nr, i;
        unsigned char *buf;
+       u64 zone_length, start_lba;
        size_t offset, buflen = 0;
        int zone_idx = 0;
        int ret;
@@ -204,7 +264,7 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
                /* Not a zoned device */
                return -EOPNOTSUPP;
 
-       if (!capacity)
+       if (!sdkp->capacity)
                /* Device gone or invalid */
                return -ENODEV;
 
@@ -212,9 +272,8 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
        if (!buf)
                return -ENOMEM;
 
-       while (zone_idx < nr_zones && sector < capacity) {
-               ret = sd_zbc_do_report_zones(sdkp, buf, buflen,
-                               sectors_to_logical(sdkp->device, sector), true);
+       while (zone_idx < nr_zones && lba < sdkp->capacity) {
+               ret = sd_zbc_do_report_zones(sdkp, buf, buflen, lba, true);
                if (ret)
                        goto out;
 
@@ -225,14 +284,36 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
 
                for (i = 0; i < nr && zone_idx < nr_zones; i++) {
                        offset += 64;
+                       start_lba = get_unaligned_be64(&buf[offset + 16]);
+                       zone_length = get_unaligned_be64(&buf[offset + 8]);
+                       if ((zone_idx == 0 &&
+                           (lba < start_lba ||
+                            lba >= start_lba + zone_length)) ||
+                           (zone_idx > 0 && start_lba != lba) ||
+                           start_lba + zone_length < start_lba) {
+                               sd_printk(KERN_ERR, sdkp,
+                                         "Zone %d at LBA %llu is invalid: %llu + %llu\n",
+                                         zone_idx, lba, start_lba, zone_length);
+                               ret = -EINVAL;
+                               goto out;
+                       }
+                       lba = start_lba + zone_length;
+                       if (sd_zbc_is_gap_zone(&buf[offset])) {
+                               if (sdkp->zone_starting_lba_gran)
+                                       continue;
+                               sd_printk(KERN_ERR, sdkp,
+                                         "Gap zone without constant LBA offsets\n");
+                               ret = -EINVAL;
+                               goto out;
+                       }
+
                        ret = sd_zbc_parse_report(sdkp, buf + offset, zone_idx,
                                                  cb, data);
                        if (ret)
                                goto out;
+
                        zone_idx++;
                }
-
-               sector += sd_zbc_zone_sectors(sdkp) * i;
        }
 
        ret = zone_idx;
@@ -276,6 +357,10 @@ static int sd_zbc_update_wp_offset_cb(struct blk_zone *zone, unsigned int idx,
        return 0;
 }
 
+/*
+ * An attempt to append a zone triggered an invalid write pointer error.
+ * Reread the write pointer of the zone(s) in which the append failed.
+ */
 static void sd_zbc_update_wp_offset_workfn(struct work_struct *work)
 {
        struct scsi_disk *sdkp;
@@ -286,14 +371,14 @@ static void sd_zbc_update_wp_offset_workfn(struct work_struct *work)
        sdkp = container_of(work, struct scsi_disk, zone_wp_offset_work);
 
        spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags);
-       for (zno = 0; zno < sdkp->nr_zones; zno++) {
+       for (zno = 0; zno < sdkp->zone_info.nr_zones; zno++) {
                if (sdkp->zones_wp_offset[zno] != SD_ZBC_UPDATING_WP_OFST)
                        continue;
 
                spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags);
                ret = sd_zbc_do_report_zones(sdkp, sdkp->zone_wp_update_buf,
                                             SD_BUF_SIZE,
-                                            zno * sdkp->zone_blocks, true);
+                                            zno * sdkp->zone_info.zone_blocks, true);
                spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags);
                if (!ret)
                        sd_zbc_parse_report(sdkp, sdkp->zone_wp_update_buf + 64,
@@ -360,7 +445,7 @@ blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba,
                break;
        default:
                wp_offset = sectors_to_logical(sdkp->device, wp_offset);
-               if (wp_offset + nr_blocks > sdkp->zone_blocks) {
+               if (wp_offset + nr_blocks > sdkp->zone_info.zone_blocks) {
                        ret = BLK_STS_IOERR;
                        break;
                }
@@ -489,7 +574,7 @@ static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd,
                break;
        case REQ_OP_ZONE_RESET_ALL:
                memset(sdkp->zones_wp_offset, 0,
-                      sdkp->nr_zones * sizeof(unsigned int));
+                      sdkp->zone_info.nr_zones * sizeof(unsigned int));
                break;
        default:
                break;
@@ -545,6 +630,7 @@ unsigned int sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
 static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp,
                                              unsigned char *buf)
 {
+       u64 zone_starting_lba_gran;
 
        if (scsi_get_vpd_page(sdkp->device, 0xb6, buf, 64)) {
                sd_printk(KERN_NOTICE, sdkp,
@@ -558,12 +644,36 @@ static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp,
                sdkp->zones_optimal_open = get_unaligned_be32(&buf[8]);
                sdkp->zones_optimal_nonseq = get_unaligned_be32(&buf[12]);
                sdkp->zones_max_open = 0;
-       } else {
-               /* Host-managed */
-               sdkp->urswrz = buf[4] & 1;
-               sdkp->zones_optimal_open = 0;
-               sdkp->zones_optimal_nonseq = 0;
-               sdkp->zones_max_open = get_unaligned_be32(&buf[16]);
+               return 0;
+       }
+
+       /* Host-managed */
+       sdkp->urswrz = buf[4] & 1;
+       sdkp->zones_optimal_open = 0;
+       sdkp->zones_optimal_nonseq = 0;
+       sdkp->zones_max_open = get_unaligned_be32(&buf[16]);
+       /* Check zone alignment method */
+       switch (buf[23] & 0xf) {
+       case 0:
+       case ZBC_CONSTANT_ZONE_LENGTH:
+               /* Use zone length */
+               break;
+       case ZBC_CONSTANT_ZONE_START_OFFSET:
+               zone_starting_lba_gran = get_unaligned_be64(&buf[24]);
+               if (zone_starting_lba_gran == 0 ||
+                   !is_power_of_2(zone_starting_lba_gran) ||
+                   logical_to_sectors(sdkp->device, zone_starting_lba_gran) >
+                   UINT_MAX) {
+                       sd_printk(KERN_ERR, sdkp,
+                                 "Invalid zone starting LBA granularity %llu\n",
+                                 zone_starting_lba_gran);
+                       return -ENODEV;
+               }
+               sdkp->zone_starting_lba_gran = zone_starting_lba_gran;
+               break;
+       default:
+               sd_printk(KERN_ERR, sdkp, "Invalid zone alignment method\n");
+               return -ENODEV;
        }
 
        /*
@@ -585,7 +695,7 @@ static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp,
  * sd_zbc_check_capacity - Check the device capacity
  * @sdkp: Target disk
  * @buf: command buffer
- * @zblocks: zone size in number of blocks
+ * @zblocks: zone size in logical blocks
  *
  * Get the device zone size and check that the device capacity as reported
  * by READ CAPACITY matches the max_lba value (plus one) of the report zones
@@ -619,14 +729,25 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf,
                }
        }
 
-       /* Get the size of the first reported zone */
-       rec = buf + 64;
-       zone_blocks = get_unaligned_be64(&rec[8]);
-       if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
-               if (sdkp->first_scan)
-                       sd_printk(KERN_NOTICE, sdkp,
-                                 "Zone size too large\n");
-               return -EFBIG;
+       if (sdkp->zone_starting_lba_gran == 0) {
+               /* Get the size of the first reported zone */
+               rec = buf + 64;
+               zone_blocks = get_unaligned_be64(&rec[8]);
+               if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
+                       if (sdkp->first_scan)
+                               sd_printk(KERN_NOTICE, sdkp,
+                                         "Zone size too large\n");
+                       return -EFBIG;
+               }
+       } else {
+               zone_blocks = sdkp->zone_starting_lba_gran;
+       }
+
+       if (!is_power_of_2(zone_blocks)) {
+               sd_printk(KERN_ERR, sdkp,
+                         "Zone size %llu is not a power of two.\n",
+                         zone_blocks);
+               return -EINVAL;
        }
 
        *zblocks = zone_blocks;
@@ -639,16 +760,16 @@ static void sd_zbc_print_zones(struct scsi_disk *sdkp)
        if (!sd_is_zoned(sdkp) || !sdkp->capacity)
                return;
 
-       if (sdkp->capacity & (sdkp->zone_blocks - 1))
+       if (sdkp->capacity & (sdkp->zone_info.zone_blocks - 1))
                sd_printk(KERN_NOTICE, sdkp,
                          "%u zones of %u logical blocks + 1 runt zone\n",
-                         sdkp->nr_zones - 1,
-                         sdkp->zone_blocks);
+                         sdkp->zone_info.nr_zones - 1,
+                         sdkp->zone_info.zone_blocks);
        else
                sd_printk(KERN_NOTICE, sdkp,
                          "%u zones of %u logical blocks\n",
-                         sdkp->nr_zones,
-                         sdkp->zone_blocks);
+                         sdkp->zone_info.nr_zones,
+                         sdkp->zone_info.zone_blocks);
 }
 
 static int sd_zbc_init_disk(struct scsi_disk *sdkp)
@@ -675,10 +796,8 @@ static void sd_zbc_clear_zone_info(struct scsi_disk *sdkp)
        kfree(sdkp->zone_wp_update_buf);
        sdkp->zone_wp_update_buf = NULL;
 
-       sdkp->nr_zones = 0;
-       sdkp->rev_nr_zones = 0;
-       sdkp->zone_blocks = 0;
-       sdkp->rev_zone_blocks = 0;
+       sdkp->early_zone_info = (struct zoned_disk_info){ };
+       sdkp->zone_info = (struct zoned_disk_info){ };
 
        mutex_unlock(&sdkp->rev_mutex);
 }
@@ -696,12 +815,17 @@ static void sd_zbc_revalidate_zones_cb(struct gendisk *disk)
        swap(sdkp->zones_wp_offset, sdkp->rev_wp_offset);
 }
 
+/*
+ * Call blk_revalidate_disk_zones() if any of the zoned disk properties have
+ * changed that make it necessary to call that function. Called by
+ * sd_revalidate_disk() after the gendisk capacity has been set.
+ */
 int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
 {
        struct gendisk *disk = sdkp->disk;
        struct request_queue *q = disk->queue;
-       u32 zone_blocks = sdkp->rev_zone_blocks;
-       unsigned int nr_zones = sdkp->rev_nr_zones;
+       u32 zone_blocks = sdkp->early_zone_info.zone_blocks;
+       unsigned int nr_zones = sdkp->early_zone_info.nr_zones;
        u32 max_append;
        int ret = 0;
        unsigned int flags;
@@ -732,14 +856,14 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
         */
        mutex_lock(&sdkp->rev_mutex);
 
-       if (sdkp->zone_blocks == zone_blocks &&
-           sdkp->nr_zones == nr_zones &&
+       if (sdkp->zone_info.zone_blocks == zone_blocks &&
+           sdkp->zone_info.nr_zones == nr_zones &&
            disk->queue->nr_zones == nr_zones)
                goto unlock;
 
        flags = memalloc_noio_save();
-       sdkp->zone_blocks = zone_blocks;
-       sdkp->nr_zones = nr_zones;
+       sdkp->zone_info.zone_blocks = zone_blocks;
+       sdkp->zone_info.nr_zones = nr_zones;
        sdkp->rev_wp_offset = kvcalloc(nr_zones, sizeof(u32), GFP_KERNEL);
        if (!sdkp->rev_wp_offset) {
                ret = -ENOMEM;
@@ -754,8 +878,7 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
        sdkp->rev_wp_offset = NULL;
 
        if (ret) {
-               sdkp->zone_blocks = 0;
-               sdkp->nr_zones = 0;
+               sdkp->zone_info = (struct zoned_disk_info){ };
                sdkp->capacity = 0;
                goto unlock;
        }
@@ -774,7 +897,16 @@ unlock:
        return ret;
 }
 
-int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
+/**
+ * sd_zbc_read_zones - Read zone information and update the request queue
+ * @sdkp: SCSI disk pointer.
+ * @buf: 512 byte buffer used for storing SCSI command output.
+ *
+ * Read zone information and update the request queue zone characteristics and
+ * also the zoned device information in *sdkp. Called by sd_revalidate_disk()
+ * before the gendisk capacity has been set.
+ */
+int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
 {
        struct gendisk *disk = sdkp->disk;
        struct request_queue *q = disk->queue;
@@ -832,8 +964,8 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
        if (blk_queue_zoned_model(q) == BLK_ZONED_HM)
                blk_queue_zone_write_granularity(q, sdkp->physical_block_size);
 
-       sdkp->rev_nr_zones = nr_zones;
-       sdkp->rev_zone_blocks = zone_blocks;
+       sdkp->early_zone_info.nr_zones = nr_zones;
+       sdkp->early_zone_info.zone_blocks = zone_blocks;
 
        return 0;
 
index cbd9289..32d3b82 100644 (file)
@@ -113,7 +113,7 @@ static int sr_open(struct cdrom_device_info *, int);
 static void sr_release(struct cdrom_device_info *);
 
 static void get_sectorsize(struct scsi_cd *);
-static void get_capabilities(struct scsi_cd *);
+static int get_capabilities(struct scsi_cd *);
 
 static unsigned int sr_check_events(struct cdrom_device_info *cdi,
                                    unsigned int clearing, int slot);
@@ -669,8 +669,9 @@ static int sr_probe(struct device *dev)
 
        sdev->sector_size = 2048;       /* A guess, just in case */
 
-       /* FIXME: need to handle a get_capabilities failure properly ?? */
-       get_capabilities(cd);
+       error = -ENOMEM;
+       if (get_capabilities(cd))
+               goto fail_minor;
        sr_vendor_init(cd);
 
        set_capacity(disk, cd->capacity);
@@ -794,7 +795,7 @@ static void get_sectorsize(struct scsi_cd *cd)
        return;
 }
 
-static void get_capabilities(struct scsi_cd *cd)
+static int get_capabilities(struct scsi_cd *cd)
 {
        unsigned char *buffer;
        struct scsi_mode_data data;
@@ -819,7 +820,7 @@ static void get_capabilities(struct scsi_cd *cd)
        buffer = kmalloc(512, GFP_KERNEL);
        if (!buffer) {
                sr_printk(KERN_ERR, cd, "out of memory.\n");
-               return;
+               return -ENOMEM;
        }
 
        /* eat unit attentions */
@@ -839,7 +840,7 @@ static void get_capabilities(struct scsi_cd *cd)
                                 CDC_MRW | CDC_MRW_W | CDC_RAM);
                kfree(buffer);
                sr_printk(KERN_INFO, cd, "scsi-1 drive");
-               return;
+               return 0;
        }
 
        n = data.header_length + data.block_descriptor_length;
@@ -898,6 +899,7 @@ static void get_capabilities(struct scsi_cd *cd)
        }
 
        kfree(buffer);
+       return 0;
 }
 
 /*
index 9fe27b0..393b9a0 100644 (file)
@@ -1,36 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0+
 #
 # Kernel configuration file for the UFS Host Controller
 #
-# This code is based on drivers/scsi/ufs/Kconfig
 # Copyright (C) 2011-2013 Samsung India Software Operations
 #
 # Authors:
 #      Santosh Yaraganavi <santosh.sy@samsung.com>
 #      Vinayak Holikatti <h.vinayak@samsung.com>
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License
-# as published by the Free Software Foundation; either version 2
-# of the License, or (at your option) any later version.
-# See the COPYING file in the top-level directory or visit
-# <http://www.gnu.org/licenses/gpl-2.0.html>
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# This program is provided "AS IS" and "WITH ALL FAULTS" and
-# without warranty of any kind. You are solely responsible for
-# determining the appropriateness of using and distributing
-# the program and assume all risks associated with your exercise
-# of rights with respect to the program, including but not limited
-# to infringement of third party rights, the risks and costs of
-# program errors, damage to or loss of data, programs or equipment,
-# and unavailability or interruption of operations. Under no
-# circumstances will the contributor of this Program be liable for
-# any damages of any kind arising from your use or distribution of
-# this program.
 
 config SCSI_UFSHCD
        tristate "Universal Flash Storage Controller Driver Core"
index 7da8be2..e05c0ae 100644 (file)
@@ -9,6 +9,7 @@
  *
  */
 
+#include <linux/clk.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
@@ -340,4 +341,3 @@ module_platform_driver(cdns_ufs_pltfrm_driver);
 MODULE_AUTHOR("Jan Kotas <jank@cadence.com>");
 MODULE_DESCRIPTION("Cadence UFS host controller platform driver");
 MODULE_LICENSE("GPL v2");
-MODULE_VERSION(UFSHCD_DRIVER_VERSION);
index 7b08e2e..e635c21 100644 (file)
@@ -11,6 +11,7 @@
 #include "ufshcd-dwc.h"
 #include "tc-dwc-g210.h"
 
+#include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/pm_runtime.h>
 
index 783ec43..f15a84d 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/platform_device.h>
 #include <linux/of.h>
 #include <linux/delay.h>
+#include <linux/pm_runtime.h>
 
 #include "ufshcd-pltfrm.h"
 #include "ufshcd-dwc.h"
index f954a68..7ef67c9 100644 (file)
@@ -7,6 +7,8 @@
  * Authors: Joao Pinto <jpinto@synopsys.com>
  */
 
+#include <linux/module.h>
+
 #include "ufshcd.h"
 #include "unipro.h"
 
index 5a506da..f715401 100644 (file)
@@ -10,6 +10,8 @@
 #ifndef _TC_DWC_G210_H
 #define _TC_DWC_G210_H
 
+struct ufs_hba;
+
 int tc_dwc_g210_config_40_bit(struct ufs_hba *hba);
 int tc_dwc_g210_config_20_bit(struct ufs_hba *hba);
 
index eafe0db..122d650 100644 (file)
@@ -29,11 +29,9 @@ static int ti_j721e_ufs_probe(struct platform_device *pdev)
                return PTR_ERR(regbase);
 
        pm_runtime_enable(dev);
-       ret = pm_runtime_get_sync(dev);
-       if (ret < 0) {
-               pm_runtime_put_noidle(dev);
+       ret = pm_runtime_resume_and_get(dev);
+       if (ret < 0)
                goto disable_pm;
-       }
 
        /* Select MPHY refclk frequency */
        clk = devm_clk_get(dev, NULL);
index 4a0bbcf..c10a8f0 100644 (file)
@@ -5,6 +5,7 @@
 
 #include "ufs-debugfs.h"
 #include "ufshcd.h"
+#include "ufshcd-priv.h"
 
 static struct dentry *ufs_debugfs_root;
 
index 474a4a0..ddb2d42 100644 (file)
@@ -9,6 +9,7 @@
  */
 
 #include <linux/clk.h>
+#include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
@@ -704,7 +705,7 @@ static void exynos_ufs_establish_connt(struct exynos_ufs *ufs)
 
        /* local unipro attributes */
        ufshcd_dme_set(hba, UIC_ARG_MIB(N_DEVICEID), DEV_ID);
-       ufshcd_dme_set(hba, UIC_ARG_MIB(N_DEVICEID_VALID), TRUE);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(N_DEVICEID_VALID), true);
        ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERDEVICEID), PEER_DEV_ID);
        ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERCPORTID), PEER_CPORT_ID);
        ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTFLAGS), CPORT_DEF_FLAGS);
@@ -1028,7 +1029,7 @@ static int exynos_ufs_post_link(struct ufs_hba *hba)
 
        if (ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB)
                ufshcd_dme_set(hba,
-                       UIC_ARG_MIB(T_DBG_SKIP_INIT_HIBERN8_EXIT), TRUE);
+                       UIC_ARG_MIB(T_DBG_SKIP_INIT_HIBERN8_EXIT), true);
 
        if (attr->pa_granularity) {
                exynos_ufs_enable_dbg_mode(hba);
index 1c33e54..0b0a3d5 100644 (file)
@@ -248,22 +248,22 @@ long exynos_ufs_calc_time_cntr(struct exynos_ufs *, long);
 
 static inline void exynos_ufs_enable_ov_tm(struct ufs_hba *hba)
 {
-       ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OV_TM), TRUE);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OV_TM), true);
 }
 
 static inline void exynos_ufs_disable_ov_tm(struct ufs_hba *hba)
 {
-       ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OV_TM), FALSE);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OV_TM), false);
 }
 
 static inline void exynos_ufs_enable_dbg_mode(struct ufs_hba *hba)
 {
-       ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), TRUE);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), true);
 }
 
 static inline void exynos_ufs_disable_dbg_mode(struct ufs_hba *hba)
 {
-       ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), FALSE);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), false);
 }
 
 #endif /* _UFS_EXYNOS_H_ */
index ab1a7eb..7046143 100644 (file)
@@ -7,6 +7,8 @@
  */
 
 #include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/dma-mapping.h>
index 7485549..c38d9d9 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/units.h>
 
 #include "ufshcd.h"
+#include "ufshcd-priv.h"
 
 struct ufs_hwmon_data {
        struct ufs_hba *hba;
index 86a9380..083d6bd 100644 (file)
@@ -8,6 +8,9 @@
 
 #include <linux/arm-smccc.h>
 #include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_device.h>
@@ -19,7 +22,6 @@
 #include <linux/soc/mediatek/mtk_sip_svc.h>
 
 #include "ufshcd.h"
-#include "ufshcd-crypto.h"
 #include "ufshcd-pltfrm.h"
 #include "ufs_quirks.h"
 #include "unipro.h"
 #define ufs_mtk_device_reset_ctrl(high, res) \
        ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res)
 
-static struct ufs_dev_fix ufs_mtk_dev_fixups[] = {
-       UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
-               UFS_DEVICE_QUIRK_DELAY_AFTER_LPM),
-       UFS_FIX(UFS_VENDOR_SKHYNIX, "H9HQ21AFAMZDAR",
-               UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES),
-       END_FIX
+static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
+       { .wmanufacturerid = UFS_VENDOR_MICRON,
+         .model = UFS_ANY_MODEL,
+         .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM },
+       { .wmanufacturerid = UFS_VENDOR_SKHYNIX,
+         .model = "H9HQ21AFAMZDAR",
+         .quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES },
+       {}
 };
 
 static const struct of_device_id ufs_mtk_of_match[] = {
@@ -169,7 +173,6 @@ static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
                                     enum ufs_notify_change_status status)
 {
        struct ufs_mtk_host *host = ufshcd_get_variant(hba);
-       unsigned long flags;
 
        if (status == PRE_CHANGE) {
                if (host->unipro_lpm) {
@@ -183,12 +186,8 @@ static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
                        ufs_mtk_crypto_enable(hba);
 
                if (host->caps & UFS_MTK_CAP_DISABLE_AH8) {
-                       spin_lock_irqsave(hba->host->host_lock, flags);
                        ufshcd_writel(hba, 0,
                                      REG_AUTO_HIBERNATE_IDLE_TIMER);
-                       spin_unlock_irqrestore(hba->host->host_lock,
-                                              flags);
-
                        hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
                        hba->ahit = 0;
                }
@@ -860,7 +859,6 @@ static int ufs_mtk_pre_link(struct ufs_hba *hba)
 
 static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
 {
-       unsigned long flags;
        u32 ah_ms;
 
        if (ufshcd_is_clkgating_allowed(hba)) {
@@ -869,9 +867,7 @@ static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
                                          hba->ahit);
                else
                        ah_ms = 10;
-               spin_lock_irqsave(hba->host->host_lock, flags);
-               hba->clk_gating.delay_ms = ah_ms + 5;
-               spin_unlock_irqrestore(hba->host->host_lock, flags);
+               ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5);
        }
 }
 
@@ -992,13 +988,10 @@ static void ufs_mtk_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
 
 static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
 {
-       unsigned long flags;
        int ret;
 
        /* disable auto-hibern8 */
-       spin_lock_irqsave(hba->host->host_lock, flags);
        ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
-       spin_unlock_irqrestore(hba->host->host_lock, flags);
 
        /* wait host return to idle state when auto-hibern8 off */
        ufs_mtk_wait_idle_state(hba, 5);
index bbb0ad7..745e48e 100644 (file)
@@ -6,10 +6,10 @@
  * Copyright 2019 Google LLC
  */
 
+#include <linux/delay.h>
 #include <linux/platform_device.h>
 #include <linux/qcom_scm.h>
 
-#include "ufshcd-crypto.h"
 #include "ufs-qcom.h"
 
 #define AES_256_XTS_KEY_SIZE                   64
index 586c0e5..4dcb232 100644 (file)
@@ -5,6 +5,9 @@
 
 #include <linux/acpi.h>
 #include <linux/time.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/module.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/phy/phy.h>
@@ -18,6 +21,7 @@
 #include "ufs-qcom.h"
 #include "ufshci.h"
 #include "ufs_quirks.h"
+
 #define UFS_QCOM_DEFAULT_DBG_PRINT_EN  \
        (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
 
@@ -299,8 +303,7 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
        struct phy *phy = host->generic_phy;
        int ret = 0;
-       bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
-                                                       ? true : false;
+       bool is_rate_B = UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B;
 
        /* Reset UFS Host Controller and PHY */
        ret = ufs_qcom_host_reset(hba);
@@ -641,12 +644,7 @@ static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
                        return err;
        }
 
-       err = ufs_qcom_ice_resume(host);
-       if (err)
-               return err;
-
-       hba->is_sys_suspended = false;
-       return 0;
+       return ufs_qcom_ice_resume(host);
 }
 
 static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
@@ -687,8 +685,11 @@ static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
 
                writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
 
-               /* ensure that ref_clk is enabled/disabled before we return */
-               wmb();
+               /*
+                * Make sure the write to ref_clk reaches the destination and
+                * not stored in a Write Buffer (WB).
+                */
+               readl(host->dev_ref_clk_ctrl_mmio);
 
                /*
                 * If we call hibern8 exit after this, we need to make sure that
@@ -873,6 +874,7 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba)
        hba->caps |= UFSHCD_CAP_WB_EN;
        hba->caps |= UFSHCD_CAP_CRYPTO;
        hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE;
+       hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
 
        if (host->hw_ver.major >= 0x2) {
                host->caps = UFS_QCOM_CAP_QUNIPRO |
@@ -987,13 +989,12 @@ static int ufs_qcom_init(struct ufs_hba *hba)
        host->hba = hba;
        ufshcd_set_variant(hba, host);
 
-       /* Setup the reset control of HCI */
-       host->core_reset = devm_reset_control_get(hba->dev, "rst");
+       /* Setup the optional reset control of HCI */
+       host->core_reset = devm_reset_control_get_optional(hba->dev, "rst");
        if (IS_ERR(host->core_reset)) {
-               err = PTR_ERR(host->core_reset);
-               dev_warn(dev, "Failed to get reset control %d\n", err);
-               host->core_reset = NULL;
-               err = 0;
+               err = dev_err_probe(dev, PTR_ERR(host->core_reset),
+                                   "Failed to get reset control\n");
+               goto out_variant_clear;
        }
 
        /* Fire up the reset controller. Failure here is non-fatal. */
@@ -1007,28 +1008,10 @@ static int ufs_qcom_init(struct ufs_hba *hba)
                err = 0;
        }
 
-       /*
-        * voting/devoting device ref_clk source is time consuming hence
-        * skip devoting it during aggressive clock gating. This clock
-        * will still be gated off during runtime suspend.
-        */
-       host->generic_phy = devm_phy_get(dev, "ufsphy");
-
-       if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) {
-               /*
-                * UFS driver might be probed before the phy driver does.
-                * In that case we would like to return EPROBE_DEFER code.
-                */
-               err = -EPROBE_DEFER;
-               dev_warn(dev, "%s: required phy device. hasn't probed yet. err = %d\n",
-                       __func__, err);
-               goto out_variant_clear;
-       } else if (IS_ERR(host->generic_phy)) {
-               if (has_acpi_companion(dev)) {
-                       host->generic_phy = NULL;
-               } else {
-                       err = PTR_ERR(host->generic_phy);
-                       dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
+       if (!has_acpi_companion(dev)) {
+               host->generic_phy = devm_phy_get(dev, "ufsphy");
+               if (IS_ERR(host->generic_phy)) {
+                       err = dev_err_probe(dev, PTR_ERR(host->generic_phy), "Failed to get PHY\n");
                        goto out_variant_clear;
                }
        }
@@ -1449,23 +1432,17 @@ static int ufs_qcom_device_reset(struct ufs_hba *hba)
 
 #if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
 static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
-                                         struct devfreq_dev_profile *p,
-                                         void *data)
+                                       struct devfreq_dev_profile *p,
+                                       struct devfreq_simple_ondemand_data *d)
 {
-       static struct devfreq_simple_ondemand_data *d;
-
-       if (!data)
-               return;
-
-       d = (struct devfreq_simple_ondemand_data *)data;
        p->polling_ms = 60;
        d->upthreshold = 70;
        d->downdifferential = 5;
 }
 #else
 static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
-                                         struct devfreq_dev_profile *p,
-                                         void *data)
+               struct devfreq_dev_profile *p,
+               struct devfreq_simple_ondemand_data *data)
 {
 }
 #endif
index 8208e3a..771bc95 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <linux/reset-controller.h>
 #include <linux/reset.h>
+#include "ufshcd.h"
 
 #define MAX_UFS_QCOM_HOSTS     1
 #define MAX_U32                 (~(u32)0)
@@ -239,10 +240,7 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host);
 
 static inline bool ufs_qcom_cap_qunipro(struct ufs_qcom_host *host)
 {
-       if (host->caps & UFS_QCOM_CAP_QUNIPRO)
-               return true;
-       else
-               return false;
+       return host->caps & UFS_QCOM_CAP_QUNIPRO;
 }
 
 /* ufs-qcom-ice.c */
index 5c405ff..8a3c644 100644 (file)
@@ -8,6 +8,7 @@
 
 #include "ufs.h"
 #include "ufs-sysfs.h"
+#include "ufshcd-priv.h"
 
 static const char *ufshcd_uic_link_state_to_string(
                        enum uic_link_state state)
index 0f4e750..8d94af3 100644 (file)
@@ -7,11 +7,12 @@
 
 #include <linux/sysfs.h>
 
-#include "ufshcd.h"
+struct device;
 
 void ufs_sysfs_add_nodes(struct device *dev);
 void ufs_sysfs_remove_nodes(struct device *dev);
 
 extern const struct attribute_group ufs_sysfs_unit_descriptor_group;
 extern const struct attribute_group ufs_sysfs_lun_attributes_group;
+
 #endif
index 4a00c24..1bba3fe 100644 (file)
@@ -415,11 +415,6 @@ enum ufs_ref_clk_freq {
        REF_CLK_FREQ_INVAL      = -1,
 };
 
-struct ufs_ref_clk {
-       unsigned long freq_hz;
-       enum ufs_ref_clk_freq val;
-};
-
 /* Query response result code */
 enum {
        QUERY_RESULT_SUCCESS                    = 0x00,
@@ -562,15 +557,6 @@ struct ufs_query_res {
        struct utp_upiu_query upiu_res;
 };
 
-#define UFS_VREG_VCC_MIN_UV       2700000 /* uV */
-#define UFS_VREG_VCC_MAX_UV       3600000 /* uV */
-#define UFS_VREG_VCC_1P8_MIN_UV    1700000 /* uV */
-#define UFS_VREG_VCC_1P8_MAX_UV    1950000 /* uV */
-#define UFS_VREG_VCCQ_MIN_UV      1140000 /* uV */
-#define UFS_VREG_VCCQ_MAX_UV      1260000 /* uV */
-#define UFS_VREG_VCCQ2_MIN_UV     1700000 /* uV */
-#define UFS_VREG_VCCQ2_MAX_UV     1950000 /* uV */
-
 /*
  * VCCQ & VCCQ2 current requirement when UFS device is in sleep state
  * and link is in Hibern8 state.
@@ -582,8 +568,6 @@ struct ufs_vreg {
        const char *name;
        bool always_on;
        bool enabled;
-       int min_uV;
-       int max_uV;
        int max_uA;
 };
 
@@ -636,23 +620,4 @@ enum ufs_trace_tsf_t {
        UFS_TSF_CDB, UFS_TSF_OSF, UFS_TSF_TM_INPUT, UFS_TSF_TM_OUTPUT
 };
 
-/**
- * ufs_is_valid_unit_desc_lun - checks if the given LUN has a unit descriptor
- * @dev_info: pointer of instance of struct ufs_dev_info
- * @lun: LU number to check
- * @return: true if the lun has a matching unit descriptor, false otherwise
- */
-static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info,
-               u8 lun, u8 param_offset)
-{
-       if (!dev_info || !dev_info->max_lu_supported) {
-               pr_err("Max General LU supported by UFS isn't initialized\n");
-               return false;
-       }
-       /* WB is available only for the logical unit from 0 to 7 */
-       if (param_offset == UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS)
-               return lun < UFS_UPIU_MAX_WB_LUN_ID;
-       return lun == UFS_UPIU_RPMB_WLUN || (lun < dev_info->max_lu_supported);
-}
-
 #endif /* End of Header */
index 39bf204..9e9b938 100644 (file)
@@ -4,7 +4,13 @@
  *
  * Copyright (C) 2018 Western Digital Corporation
  */
+
+#include <linux/bsg-lib.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
 #include "ufs_bsg.h"
+#include "ufshcd.h"
+#include "ufshcd-priv.h"
 
 static int ufs_bsg_get_query_desc_size(struct ufs_hba *hba, int *desc_len,
                                       struct utp_upiu_query *qr)
index d099187..57712d2 100644 (file)
@@ -5,12 +5,7 @@
 #ifndef UFS_BSG_H
 #define UFS_BSG_H
 
-#include <linux/bsg-lib.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_host.h>
-
-#include "ufshcd.h"
-#include "ufs.h"
+struct ufs_hba;
 
 #ifdef CONFIG_SCSI_UFS_BSG
 void ufs_bsg_remove(struct ufs_hba *hba);
index 35ec9ea..bcb4f00 100644 (file)
 #define UFS_VENDOR_WDC         0x145
 
 /**
- * ufs_dev_fix - ufs device quirk info
+ * ufs_dev_quirk - ufs device quirk info
  * @card: ufs card details
  * @quirk: device quirk
  */
-struct ufs_dev_fix {
+struct ufs_dev_quirk {
        u16 wmanufacturerid;
-       u8 *model;
+       const u8 *model;
        unsigned int quirk;
 };
 
-#define END_FIX { }
-
-/* add specific device quirk */
-#define UFS_FIX(_vendor, _model, _quirk) { \
-       .wmanufacturerid = (_vendor),\
-       .model = (_model),                 \
-       .quirk = (_quirk),                 \
-}
-
 /*
  * Some vendor's UFS device sends back to back NACs for the DL data frames
  * causing the host controller to raise the DFES error status. Sometimes
index e18c012..9f98f18 100644 (file)
@@ -6,10 +6,13 @@
 #ifndef _UFSHCD_CRYPTO_H
 #define _UFSHCD_CRYPTO_H
 
-#ifdef CONFIG_SCSI_UFS_CRYPTO
+#include <scsi/scsi_cmnd.h>
 #include "ufshcd.h"
+#include "ufshcd-priv.h"
 #include "ufshci.h"
 
+#ifdef CONFIG_SCSI_UFS_CRYPTO
+
 static inline void ufshcd_prepare_lrbp_crypto(struct request *rq,
                                              struct ufshcd_lrb *lrbp)
 {
index 5bb9d3a..a57973c 100644 (file)
@@ -7,6 +7,8 @@
  * Authors: Joao Pinto <jpinto@synopsys.com>
  */
 
+#include <linux/module.h>
+
 #include "ufshcd.h"
 #include "unipro.h"
 
index 4268ca2..43b7079 100644 (file)
@@ -10,6 +10,8 @@
 #ifndef _UFSHCD_DWC_H
 #define _UFSHCD_DWC_H
 
+#include "ufshcd.h"
+
 struct ufshcd_dme_attr_val {
        u32 attr_sel;
        u32 mib_val;
index e892b9f..20af2fb 100644 (file)
@@ -2,7 +2,6 @@
 /*
  * Universal Flash Storage Host controller PCI glue driver
  *
- * This code is based on drivers/scsi/ufs/ufshcd-pci.c
  * Copyright (C) 2011-2013 Samsung India Software Operations
  *
  * Authors:
@@ -11,6 +10,8 @@
  */
 
 #include "ufshcd.h"
+#include <linux/delay.h>
+#include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/pm_runtime.h>
 #include <linux/pm_qos.h>
@@ -618,4 +619,3 @@ MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
 MODULE_DESCRIPTION("UFS host controller PCI glue driver");
 MODULE_LICENSE("GPL");
-MODULE_VERSION(UFSHCD_DRIVER_VERSION);
index 87975d1..f5313f4 100644 (file)
@@ -8,6 +8,7 @@
  *     Vinayak Holikatti <h.vinayak@samsung.com>
  */
 
+#include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/of.h>
@@ -297,18 +298,20 @@ EXPORT_SYMBOL_GPL(ufshcd_get_pwr_dev_param);
 
 void ufshcd_init_pwr_dev_param(struct ufs_dev_params *dev_param)
 {
-       dev_param->tx_lanes = 2;
-       dev_param->rx_lanes = 2;
-       dev_param->hs_rx_gear = UFS_HS_G3;
-       dev_param->hs_tx_gear = UFS_HS_G3;
-       dev_param->pwm_rx_gear = UFS_PWM_G4;
-       dev_param->pwm_tx_gear = UFS_PWM_G4;
-       dev_param->rx_pwr_pwm = SLOW_MODE;
-       dev_param->tx_pwr_pwm = SLOW_MODE;
-       dev_param->rx_pwr_hs = FAST_MODE;
-       dev_param->tx_pwr_hs = FAST_MODE;
-       dev_param->hs_rate = PA_HS_MODE_B;
-       dev_param->desired_working_mode = UFS_HS_MODE;
+       *dev_param = (struct ufs_dev_params){
+               .tx_lanes = 2,
+               .rx_lanes = 2,
+               .hs_rx_gear = UFS_HS_G3,
+               .hs_tx_gear = UFS_HS_G3,
+               .pwm_rx_gear = UFS_PWM_G4,
+               .pwm_tx_gear = UFS_PWM_G4,
+               .rx_pwr_pwm = SLOW_MODE,
+               .tx_pwr_pwm = SLOW_MODE,
+               .rx_pwr_hs = FAST_MODE,
+               .tx_pwr_hs = FAST_MODE,
+               .hs_rate = PA_HS_MODE_B,
+               .desired_working_mode = UFS_HS_MODE,
+       };
 }
 EXPORT_SYMBOL_GPL(ufshcd_init_pwr_dev_param);
 
@@ -341,7 +344,7 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
 
        err = ufshcd_alloc_host(dev, &hba);
        if (err) {
-               dev_err(&pdev->dev, "Allocation failed\n");
+               dev_err(dev, "Allocation failed\n");
                goto out;
        }
 
@@ -349,13 +352,13 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
 
        err = ufshcd_parse_clock_info(hba);
        if (err) {
-               dev_err(&pdev->dev, "%s: clock parse failed %d\n",
+               dev_err(dev, "%s: clock parse failed %d\n",
                                __func__, err);
                goto dealloc_host;
        }
        err = ufshcd_parse_regulator_info(hba);
        if (err) {
-               dev_err(&pdev->dev, "%s: regulator init failed %d\n",
+               dev_err(dev, "%s: regulator init failed %d\n",
                                __func__, err);
                goto dealloc_host;
        }
@@ -368,8 +371,8 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
                goto dealloc_host;
        }
 
-       pm_runtime_set_active(&pdev->dev);
-       pm_runtime_enable(&pdev->dev);
+       pm_runtime_set_active(dev);
+       pm_runtime_enable(dev);
 
        return 0;
 
@@ -384,4 +387,3 @@ MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
 MODULE_DESCRIPTION("UFS host controller Platform bus based glue driver");
 MODULE_LICENSE("GPL");
-MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/ufshcd-priv.h b/drivers/scsi/ufs/ufshcd-priv.h
new file mode 100644 (file)
index 0000000..38bc77d
--- /dev/null
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _UFSHCD_PRIV_H_
+#define _UFSHCD_PRIV_H_
+
+#include <linux/pm_runtime.h>
+#include "ufshcd.h"
+
+static inline bool ufshcd_is_user_access_allowed(struct ufs_hba *hba)
+{
+       return !hba->shutting_down;
+}
+
+void ufshcd_schedule_eh_work(struct ufs_hba *hba);
+
+static inline bool ufshcd_keep_autobkops_enabled_except_suspend(
+                                                       struct ufs_hba *hba)
+{
+       return hba->caps & UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND;
+}
+
+static inline u8 ufshcd_wb_get_query_index(struct ufs_hba *hba)
+{
+       if (hba->dev_info.wb_buffer_type == WB_BUF_MODE_LU_DEDICATED)
+               return hba->dev_info.wb_dedicated_lu;
+       return 0;
+}
+
+#ifdef CONFIG_SCSI_UFS_HWMON
+void ufs_hwmon_probe(struct ufs_hba *hba, u8 mask);
+void ufs_hwmon_remove(struct ufs_hba *hba);
+void ufs_hwmon_notify_event(struct ufs_hba *hba, u8 ee_mask);
+#else
+static inline void ufs_hwmon_probe(struct ufs_hba *hba, u8 mask) {}
+static inline void ufs_hwmon_remove(struct ufs_hba *hba) {}
+static inline void ufs_hwmon_notify_event(struct ufs_hba *hba, u8 ee_mask) {}
+#endif
+
+int ufshcd_read_desc_param(struct ufs_hba *hba,
+                          enum desc_idn desc_id,
+                          int desc_index,
+                          u8 param_offset,
+                          u8 *param_read_buf,
+                          u8 param_size);
+int ufshcd_query_attr_retry(struct ufs_hba *hba, enum query_opcode opcode,
+                           enum attr_idn idn, u8 index, u8 selector,
+                           u32 *attr_val);
+int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
+                     enum attr_idn idn, u8 index, u8 selector, u32 *attr_val);
+int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
+       enum flag_idn idn, u8 index, bool *flag_res);
+void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
+
+#define SD_ASCII_STD true
+#define SD_RAW false
+int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
+                           u8 **buf, bool ascii);
+
+int ufshcd_hold(struct ufs_hba *hba, bool async);
+void ufshcd_release(struct ufs_hba *hba);
+
+void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
+                                 int *desc_length);
+
+int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
+
+int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
+                            struct utp_upiu_req *req_upiu,
+                            struct utp_upiu_req *rsp_upiu,
+                            int msgcode,
+                            u8 *desc_buff, int *buff_len,
+                            enum query_opcode desc_op);
+
+int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable);
+
+/* Wrapper functions for safely calling variant operations */
+static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
+{
+       if (hba->vops)
+               return hba->vops->name;
+       return "";
+}
+
+static inline void ufshcd_vops_exit(struct ufs_hba *hba)
+{
+       if (hba->vops && hba->vops->exit)
+               return hba->vops->exit(hba);
+}
+
+static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba)
+{
+       if (hba->vops && hba->vops->get_ufs_hci_version)
+               return hba->vops->get_ufs_hci_version(hba);
+
+       return ufshcd_readl(hba, REG_UFS_VERSION);
+}
+
+static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
+                       bool up, enum ufs_notify_change_status status)
+{
+       if (hba->vops && hba->vops->clk_scale_notify)
+               return hba->vops->clk_scale_notify(hba, up, status);
+       return 0;
+}
+
+static inline void ufshcd_vops_event_notify(struct ufs_hba *hba,
+                                           enum ufs_event_type evt,
+                                           void *data)
+{
+       if (hba->vops && hba->vops->event_notify)
+               hba->vops->event_notify(hba, evt, data);
+}
+
+static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on,
+                                       enum ufs_notify_change_status status)
+{
+       if (hba->vops && hba->vops->setup_clocks)
+               return hba->vops->setup_clocks(hba, on, status);
+       return 0;
+}
+
+static inline int ufshcd_vops_hce_enable_notify(struct ufs_hba *hba,
+                                               bool status)
+{
+       if (hba->vops && hba->vops->hce_enable_notify)
+               return hba->vops->hce_enable_notify(hba, status);
+
+       return 0;
+}
+static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba,
+                                               bool status)
+{
+       if (hba->vops && hba->vops->link_startup_notify)
+               return hba->vops->link_startup_notify(hba, status);
+
+       return 0;
+}
+
+static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
+                                 enum ufs_notify_change_status status,
+                                 struct ufs_pa_layer_attr *dev_max_params,
+                                 struct ufs_pa_layer_attr *dev_req_params)
+{
+       if (hba->vops && hba->vops->pwr_change_notify)
+               return hba->vops->pwr_change_notify(hba, status,
+                                       dev_max_params, dev_req_params);
+
+       return -ENOTSUPP;
+}
+
+static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba,
+                                       int tag, u8 tm_function)
+{
+       if (hba->vops && hba->vops->setup_task_mgmt)
+               return hba->vops->setup_task_mgmt(hba, tag, tm_function);
+}
+
+static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba,
+                                       enum uic_cmd_dme cmd,
+                                       enum ufs_notify_change_status status)
+{
+       if (hba->vops && hba->vops->hibern8_notify)
+               return hba->vops->hibern8_notify(hba, cmd, status);
+}
+
+static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba)
+{
+       if (hba->vops && hba->vops->apply_dev_quirks)
+               return hba->vops->apply_dev_quirks(hba);
+       return 0;
+}
+
+static inline void ufshcd_vops_fixup_dev_quirks(struct ufs_hba *hba)
+{
+       if (hba->vops && hba->vops->fixup_dev_quirks)
+               hba->vops->fixup_dev_quirks(hba);
+}
+
+static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op,
+                               enum ufs_notify_change_status status)
+{
+       if (hba->vops && hba->vops->suspend)
+               return hba->vops->suspend(hba, op, status);
+
+       return 0;
+}
+
+static inline int ufshcd_vops_resume(struct ufs_hba *hba, enum ufs_pm_op op)
+{
+       if (hba->vops && hba->vops->resume)
+               return hba->vops->resume(hba, op);
+
+       return 0;
+}
+
+static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba)
+{
+       if (hba->vops && hba->vops->dbg_register_dump)
+               hba->vops->dbg_register_dump(hba);
+}
+
+static inline int ufshcd_vops_device_reset(struct ufs_hba *hba)
+{
+       if (hba->vops && hba->vops->device_reset)
+               return hba->vops->device_reset(hba);
+
+       return -EOPNOTSUPP;
+}
+
+static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba,
+               struct devfreq_dev_profile *p,
+               struct devfreq_simple_ondemand_data *data)
+{
+       if (hba->vops && hba->vops->config_scaling_param)
+               hba->vops->config_scaling_param(hba, p, data);
+}
+
+extern struct ufs_pm_lvl_states ufs_pm_lvl_states[];
+
+/**
+ * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
+ * @scsi_lun: scsi LUN id
+ *
+ * Returns UPIU LUN id
+ */
+static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
+{
+       if (scsi_is_wlun(scsi_lun))
+               return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
+                       | UFS_UPIU_WLUN_ID;
+       else
+               return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
+}
+
+int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask);
+int ufshcd_write_ee_control(struct ufs_hba *hba);
+int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, u16 *other_mask,
+                            u16 set, u16 clr);
+
+static inline int ufshcd_update_ee_drv_mask(struct ufs_hba *hba,
+                                           u16 set, u16 clr)
+{
+       return ufshcd_update_ee_control(hba, &hba->ee_drv_mask,
+                                       &hba->ee_usr_mask, set, clr);
+}
+
+static inline int ufshcd_update_ee_usr_mask(struct ufs_hba *hba,
+                                           u16 set, u16 clr)
+{
+       return ufshcd_update_ee_control(hba, &hba->ee_usr_mask,
+                                       &hba->ee_drv_mask, set, clr);
+}
+
+static inline int ufshcd_rpm_get_sync(struct ufs_hba *hba)
+{
+       return pm_runtime_get_sync(&hba->ufs_device_wlun->sdev_gendev);
+}
+
+static inline int ufshcd_rpm_put_sync(struct ufs_hba *hba)
+{
+       return pm_runtime_put_sync(&hba->ufs_device_wlun->sdev_gendev);
+}
+
+static inline void ufshcd_rpm_get_noresume(struct ufs_hba *hba)
+{
+       pm_runtime_get_noresume(&hba->ufs_device_wlun->sdev_gendev);
+}
+
+static inline int ufshcd_rpm_resume(struct ufs_hba *hba)
+{
+       return pm_runtime_resume(&hba->ufs_device_wlun->sdev_gendev);
+}
+
+static inline int ufshcd_rpm_put(struct ufs_hba *hba)
+{
+       return pm_runtime_put(&hba->ufs_device_wlun->sdev_gendev);
+}
+
+/**
+ * ufs_is_valid_unit_desc_lun - checks if the given LUN has a unit descriptor
+ * @dev_info: pointer of instance of struct ufs_dev_info
+ * @lun: LU number to check
+ * @return: true if the lun has a matching unit descriptor, false otherwise
+ */
+static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info,
+               u8 lun, u8 param_offset)
+{
+       if (!dev_info || !dev_info->max_lu_supported) {
+               pr_err("Max General LU supported by UFS isn't initialized\n");
+               return false;
+       }
+       /* WB is available only for the logical unit from 0 to 7 */
+       if (param_offset == UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS)
+               return lun < UFS_UPIU_MAX_WB_LUN_ID;
+       return lun == UFS_UPIU_RPMB_WLUN || (lun < dev_info->max_lu_supported);
+}
+
+#endif /* _UFSHCD_PRIV_H_ */
index 3f9caaf..1fb3a8b 100644 (file)
 #include <linux/bitfield.h>
 #include <linux/blk-pm.h>
 #include <linux/blkdev.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_dbg.h>
 #include <scsi/scsi_driver.h>
-#include "ufshcd.h"
+#include <scsi/scsi_eh.h>
+#include "ufshcd-priv.h"
 #include "ufs_quirks.h"
 #include "unipro.h"
 #include "ufs-sysfs.h"
@@ -113,8 +121,13 @@ int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
        if (!regs)
                return -ENOMEM;
 
-       for (pos = 0; pos < len; pos += 4)
+       for (pos = 0; pos < len; pos += 4) {
+               if (offset == 0 &&
+                   pos >= REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER &&
+                   pos <= REG_UIC_ERROR_CODE_DME)
+                       continue;
                regs[pos / 4] = ufshcd_readl(hba, offset + pos);
+       }
 
        ufshcd_hex_dump(prefix, regs, len);
        kfree(regs);
@@ -204,26 +217,33 @@ ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
        return UFS_PM_LVL_0;
 }
 
-static struct ufs_dev_fix ufs_fixups[] = {
+static const struct ufs_dev_quirk ufs_fixups[] = {
        /* UFS cards deviations table */
-       UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
-               UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
-               UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ),
-       UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
-               UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
-               UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
-               UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
-       UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
-               UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
-       UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/,
-               UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME),
-       UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
-               UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
-       UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
-               UFS_DEVICE_QUIRK_PA_TACTIVATE),
-       UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
-               UFS_DEVICE_QUIRK_PA_TACTIVATE),
-       END_FIX
+       { .wmanufacturerid = UFS_VENDOR_MICRON,
+         .model = UFS_ANY_MODEL,
+         .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
+                  UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ },
+       { .wmanufacturerid = UFS_VENDOR_SAMSUNG,
+         .model = UFS_ANY_MODEL,
+         .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
+                  UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
+                  UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS },
+       { .wmanufacturerid = UFS_VENDOR_SKHYNIX,
+         .model = UFS_ANY_MODEL,
+         .quirk = UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME },
+       { .wmanufacturerid = UFS_VENDOR_SKHYNIX,
+         .model = "hB8aL1" /*H28U62301AMR*/,
+         .quirk = UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME },
+       { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
+         .model = UFS_ANY_MODEL,
+         .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
+       { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
+         .model = "THGLF2G9C8KBADG",
+         .quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE },
+       { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
+         .model = "THGLF2G9D8KBADG",
+         .quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE },
+       {}
 };
 
 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
@@ -533,7 +553,7 @@ static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
 
 static void ufshcd_print_host_state(struct ufs_hba *hba)
 {
-       struct scsi_device *sdev_ufs = hba->sdev_ufs_device;
+       struct scsi_device *sdev_ufs = hba->ufs_device_wlun;
 
        dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
        dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
@@ -639,7 +659,7 @@ EXPORT_SYMBOL_GPL(ufshcd_delay_us);
  * Return:
  * -ETIMEDOUT on error, zero on success.
  */
-int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
+static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
                                u32 val, unsigned long interval_us,
                                unsigned long timeout_ms)
 {
@@ -712,8 +732,7 @@ static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
  */
 static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
 {
-       return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
-                                               DEVICE_PRESENT) ? true : false;
+       return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & DEVICE_PRESENT;
 }
 
 /**
@@ -840,7 +859,7 @@ ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
 {
        return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
-                       MASK_RSP_EXCEPTION_EVENT ? true : false;
+                       MASK_RSP_EXCEPTION_EVENT;
 }
 
 /**
@@ -911,12 +930,11 @@ static inline void ufshcd_hba_start(struct ufs_hba *hba)
  * ufshcd_is_hba_active - Get controller state
  * @hba: per adapter instance
  *
- * Returns false if controller is active, true otherwise
+ * Returns true if and only if the controller is active.
  */
 static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
 {
-       return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
-               ? false : true;
+       return ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE;
 }
 
 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
@@ -940,10 +958,7 @@ static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
         * logic simple, we will only do manual tuning if local unipro version
         * doesn't support ver1.6 or later.
         */
-       if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
-               return true;
-       else
-               return false;
+       return ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6;
 }
 
 /**
@@ -1350,7 +1365,7 @@ static int ufshcd_devfreq_target(struct device *dev,
        }
 
        /* Decide based on the rounded-off frequency and update */
-       scale_up = (*freq == clki->max_freq) ? true : false;
+       scale_up = *freq == clki->max_freq;
        if (!scale_up)
                *freq = clki->min_freq;
        /* Update the frequency */
@@ -1862,18 +1877,26 @@ static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
        return sysfs_emit(buf, "%lu\n", hba->clk_gating.delay_ms);
 }
 
+void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value)
+{
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+       unsigned long flags;
+
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       hba->clk_gating.delay_ms = value;
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set);
+
 static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
 {
-       struct ufs_hba *hba = dev_get_drvdata(dev);
-       unsigned long flags, value;
+       unsigned long value;
 
        if (kstrtoul(buf, 0, &value))
                return -EINVAL;
 
-       spin_lock_irqsave(hba->host->host_lock, flags);
-       hba->clk_gating.delay_ms = value;
-       spin_unlock_irqrestore(hba->host->host_lock, flags);
+       ufshcd_clkgate_delay_set(dev, value);
        return count;
 }
 
@@ -2120,9 +2143,6 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
        __set_bit(task_tag, &hba->outstanding_reqs);
        ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
        spin_unlock_irqrestore(&hba->outstanding_lock, flags);
-
-       /* Make sure that doorbell is committed immediately */
-       wmb();
 }
 
 /**
@@ -2131,15 +2151,17 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
  */
 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
 {
+       u8 *const sense_buffer = lrbp->cmd->sense_buffer;
        int len;
-       if (lrbp->sense_buffer &&
+
+       if (sense_buffer &&
            ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
                int len_to_copy;
 
                len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
                len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
 
-               memcpy(lrbp->sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
+               memcpy(sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
                       len_to_copy);
        }
 }
@@ -2217,10 +2239,7 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
  */
 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
 {
-       if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
-               return true;
-       else
-               return false;
+       return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY;
 }
 
 /**
@@ -2796,11 +2815,9 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
        lrbp = &hba->lrb[tag];
        WARN_ON(lrbp->cmd);
        lrbp->cmd = cmd;
-       lrbp->sense_bufflen = UFS_SENSE_SIZE;
-       lrbp->sense_buffer = cmd->sense_buffer;
        lrbp->task_tag = tag;
        lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
-       lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
+       lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba);
 
        ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd), lrbp);
 
@@ -2837,8 +2854,6 @@ static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
                struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
 {
        lrbp->cmd = NULL;
-       lrbp->sense_bufflen = 0;
-       lrbp->sense_buffer = NULL;
        lrbp->task_tag = tag;
        lrbp->lun = 0; /* device management cmd is not specific to any LUN */
        lrbp->intr_cmd = true; /* No interrupt aggregation */
@@ -4198,7 +4213,7 @@ void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 
        if (update &&
-           !pm_runtime_suspended(&hba->sdev_ufs_device->sdev_gendev)) {
+           !pm_runtime_suspended(&hba->ufs_device_wlun->sdev_gendev)) {
                ufshcd_rpm_get_sync(hba);
                ufshcd_hold(hba, false);
                ufshcd_auto_hibern8_enable(hba);
@@ -4210,14 +4225,10 @@ EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
 
 void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
 {
-       unsigned long flags;
-
        if (!ufshcd_is_auto_hibern8_supported(hba))
                return;
 
-       spin_lock_irqsave(hba->host->host_lock, flags);
        ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
-       spin_unlock_irqrestore(hba->host->host_lock, flags);
 }
 
  /**
@@ -4328,18 +4339,18 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba,
                        pwr_mode->lane_rx);
        if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
                        pwr_mode->pwr_rx == FAST_MODE)
-               ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
+               ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
        else
-               ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
+               ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), false);
 
        ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
        ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
                        pwr_mode->lane_tx);
        if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
                        pwr_mode->pwr_tx == FAST_MODE)
-               ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
+               ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
        else
-               ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
+               ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), false);
 
        if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
            pwr_mode->pwr_tx == FASTAUTO_MODE ||
@@ -4438,7 +4449,7 @@ static int ufshcd_complete_dev_init(struct ufs_hba *hba)
                                        QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
                if (!flag_res)
                        break;
-               usleep_range(5000, 10000);
+               usleep_range(500, 1000);
        } while (ktime_before(ktime_get(), timeout));
 
        if (err) {
@@ -4554,7 +4565,7 @@ static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
        int retry_inner;
 
 start:
-       if (!ufshcd_is_hba_active(hba))
+       if (ufshcd_is_hba_active(hba))
                /* change controller state to "reset state" */
                ufshcd_hba_stop(hba);
 
@@ -4580,7 +4591,7 @@ start:
 
        /* wait for the host controller to complete initialization */
        retry_inner = 50;
-       while (ufshcd_is_hba_active(hba)) {
+       while (!ufshcd_is_hba_active(hba)) {
                if (retry_inner) {
                        retry_inner--;
                } else {
@@ -4914,13 +4925,13 @@ static void ufshcd_setup_links(struct ufs_hba *hba, struct scsi_device *sdev)
         * Device wlun is the supplier & rest of the luns are consumers.
         * This ensures that device wlun suspends after all other luns.
         */
-       if (hba->sdev_ufs_device) {
+       if (hba->ufs_device_wlun) {
                link = device_link_add(&sdev->sdev_gendev,
-                                      &hba->sdev_ufs_device->sdev_gendev,
+                                      &hba->ufs_device_wlun->sdev_gendev,
                                       DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
                if (!link) {
                        dev_err(&sdev->sdev_gendev, "Failed establishing link - %s\n",
-                               dev_name(&hba->sdev_ufs_device->sdev_gendev));
+                               dev_name(&hba->ufs_device_wlun->sdev_gendev));
                        return;
                }
                hba->luns_avail--;
@@ -5056,15 +5067,15 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev)
        /* Drop the reference as it won't be needed anymore */
        if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
                spin_lock_irqsave(hba->host->host_lock, flags);
-               hba->sdev_ufs_device = NULL;
+               hba->ufs_device_wlun = NULL;
                spin_unlock_irqrestore(hba->host->host_lock, flags);
-       } else if (hba->sdev_ufs_device) {
+       } else if (hba->ufs_device_wlun) {
                struct device *supplier = NULL;
 
                /* Ensure UFS Device WLUN exists and does not disappear */
                spin_lock_irqsave(hba->host->host_lock, flags);
-               if (hba->sdev_ufs_device) {
-                       supplier = &hba->sdev_ufs_device->sdev_gendev;
+               if (hba->ufs_device_wlun) {
+                       supplier = &hba->ufs_device_wlun->sdev_gendev;
                        get_device(supplier);
                }
                spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -5782,10 +5793,7 @@ static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
                return false;
        }
        /* Let it continue to flush when available buffer exceeds threshold */
-       if (avail_buf < hba->vps->wb_flush_threshold)
-               return true;
-
-       return false;
+       return avail_buf < hba->vps->wb_flush_threshold;
 }
 
 static void ufshcd_wb_force_disable(struct ufs_hba *hba)
@@ -5864,11 +5872,8 @@ static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
                return false;
        }
 
-       if (!hba->dev_info.b_presrv_uspc_en) {
-               if (avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10))
-                       return true;
-               return false;
-       }
+       if (!hba->dev_info.b_presrv_uspc_en)
+               return avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10);
 
        return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
 }
@@ -6046,7 +6051,7 @@ static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
 static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
 {
        ufshcd_rpm_get_sync(hba);
-       if (pm_runtime_status_suspended(&hba->sdev_ufs_device->sdev_gendev) ||
+       if (pm_runtime_status_suspended(&hba->ufs_device_wlun->sdev_gendev) ||
            hba->is_sys_suspended) {
                enum ufs_pm_op pm_op;
 
@@ -6091,7 +6096,7 @@ static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
 static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
 {
        return (!hba->is_powered || hba->shutting_down ||
-               !hba->sdev_ufs_device ||
+               !hba->ufs_device_wlun ||
                hba->ufshcd_state == UFSHCD_STATE_ERROR ||
                (!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
                   ufshcd_is_link_broken(hba))));
@@ -6110,7 +6115,7 @@ static void ufshcd_recover_pm_error(struct ufs_hba *hba)
         * Set RPM status of wlun device to RPM_ACTIVE,
         * this also clears its runtime error.
         */
-       ret = pm_runtime_set_active(&hba->sdev_ufs_device->sdev_gendev);
+       ret = pm_runtime_set_active(&hba->ufs_device_wlun->sdev_gendev);
 
        /* hba device might have a runtime error otherwise */
        if (ret)
@@ -6815,8 +6820,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
        lrbp = &hba->lrb[tag];
        WARN_ON(lrbp->cmd);
        lrbp->cmd = NULL;
-       lrbp->sense_bufflen = 0;
-       lrbp->sense_buffer = NULL;
        lrbp->task_tag = tag;
        lrbp->lun = 0;
        lrbp->intr_cmd = true;
@@ -7223,7 +7226,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
         * Stop the host controller and complete the requests
         * cleared by h/w
         */
-       ufshpb_reset_host(hba);
+       ufshpb_toggle_state(hba, HPB_PRESENT, HPB_RESET);
        ufshcd_hba_stop(hba);
        hba->silence_err_logs = true;
        ufshcd_complete_requests(hba);
@@ -7351,7 +7354,7 @@ static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
        u16 unit;
 
        for (i = start_scan; i >= 0; i--) {
-               data = be16_to_cpup((__be16 *)&buff[2 * i]);
+               data = get_unaligned_be16(&buff[2 * i]);
                unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
                                                ATTR_ICC_LVL_UNIT_OFFSET;
                curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
@@ -7506,20 +7509,20 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
        int ret = 0;
        struct scsi_device *sdev_boot, *sdev_rpmb;
 
-       hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
+       hba->ufs_device_wlun = __scsi_add_device(hba->host, 0, 0,
                ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
-       if (IS_ERR(hba->sdev_ufs_device)) {
-               ret = PTR_ERR(hba->sdev_ufs_device);
-               hba->sdev_ufs_device = NULL;
+       if (IS_ERR(hba->ufs_device_wlun)) {
+               ret = PTR_ERR(hba->ufs_device_wlun);
+               hba->ufs_device_wlun = NULL;
                goto out;
        }
-       scsi_device_put(hba->sdev_ufs_device);
+       scsi_device_put(hba->ufs_device_wlun);
 
        sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
                ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
        if (IS_ERR(sdev_rpmb)) {
                ret = PTR_ERR(sdev_rpmb);
-               goto remove_sdev_ufs_device;
+               goto remove_ufs_device_wlun;
        }
        ufshcd_blk_pm_runtime_init(sdev_rpmb);
        scsi_device_put(sdev_rpmb);
@@ -7534,8 +7537,8 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
        }
        goto out;
 
-remove_sdev_ufs_device:
-       scsi_remove_device(hba->sdev_ufs_device);
+remove_ufs_device_wlun:
+       scsi_remove_device(hba->ufs_device_wlun);
 out:
        return ret;
 }
@@ -7634,9 +7637,10 @@ static void ufshcd_temp_notif_probe(struct ufs_hba *hba, u8 *desc_buf)
        }
 }
 
-void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups)
+void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
+                            const struct ufs_dev_quirk *fixups)
 {
-       struct ufs_dev_fix *f;
+       const struct ufs_dev_quirk *f;
        struct ufs_dev_info *dev_info = &hba->dev_info;
 
        if (!fixups)
@@ -7956,6 +7960,11 @@ out:
        return err;
 }
 
+struct ufs_ref_clk {
+       unsigned long freq_hz;
+       enum ufs_ref_clk_freq val;
+};
+
 static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
        {19200000, REF_CLK_FREQ_19_2_MHZ},
        {26000000, REF_CLK_FREQ_26_MHZ},
@@ -8184,7 +8193,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
        /* Enable Auto-Hibernate if configured */
        ufshcd_auto_hibern8_enable(hba);
 
-       ufshpb_reset(hba);
+       ufshpb_toggle_state(hba, HPB_RESET, HPB_PRESENT);
 out:
        spin_lock_irqsave(hba->host->host_lock, flags);
        if (ret)
@@ -8319,33 +8328,10 @@ static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
 static int ufshcd_config_vreg(struct device *dev,
                struct ufs_vreg *vreg, bool on)
 {
-       int ret = 0;
-       struct regulator *reg;
-       const char *name;
-       int min_uV, uA_load;
-
-       BUG_ON(!vreg);
-
-       reg = vreg->reg;
-       name = vreg->name;
-
-       if (regulator_count_voltages(reg) > 0) {
-               uA_load = on ? vreg->max_uA : 0;
-               ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
-               if (ret)
-                       goto out;
+       if (regulator_count_voltages(vreg->reg) <= 0)
+               return 0;
 
-               if (vreg->min_uV && vreg->max_uV) {
-                       min_uV = on ? vreg->min_uV : 0;
-                       ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
-                       if (ret)
-                               dev_err(dev,
-                                       "%s: %s set voltage failed, err=%d\n",
-                                       __func__, name, ret);
-               }
-       }
-out:
-       return ret;
+       return ufshcd_config_vreg_load(dev, vreg, on ? vreg->max_uA : 0);
 }
 
 static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
@@ -8693,7 +8679,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
        int ret, retries;
 
        spin_lock_irqsave(hba->host->host_lock, flags);
-       sdp = hba->sdev_ufs_device;
+       sdp = hba->ufs_device_wlun;
        if (sdp) {
                ret = scsi_device_get(sdp);
                if (!ret && !scsi_device_online(sdp)) {
@@ -9257,7 +9243,7 @@ static void ufshcd_wl_shutdown(struct device *dev)
        ufshcd_rpm_get_sync(hba);
        scsi_device_quiesce(sdev);
        shost_for_each_device(sdev, hba->host) {
-               if (sdev == hba->sdev_ufs_device)
+               if (sdev == hba->ufs_device_wlun)
                        continue;
                scsi_device_quiesce(sdev);
        }
@@ -9478,7 +9464,7 @@ EXPORT_SYMBOL(ufshcd_shutdown);
  */
 void ufshcd_remove(struct ufs_hba *hba)
 {
-       if (hba->sdev_ufs_device)
+       if (hba->ufs_device_wlun)
                ufshcd_rpm_get_sync(hba);
        ufs_hwmon_remove(hba);
        ufs_bsg_remove(hba);
@@ -9806,7 +9792,7 @@ EXPORT_SYMBOL_GPL(ufshcd_resume_complete);
 
 static bool ufshcd_rpm_ok_for_spm(struct ufs_hba *hba)
 {
-       struct device *dev = &hba->sdev_ufs_device->sdev_gendev;
+       struct device *dev = &hba->ufs_device_wlun->sdev_gendev;
        enum ufs_dev_pwr_mode dev_pwr_mode;
        enum uic_link_state link_state;
        unsigned long flags;
@@ -9835,7 +9821,7 @@ int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm)
         * if it's runtime suspended. But ufs doesn't follow that.
         * Refer ufshcd_resume_complete()
         */
-       if (hba->sdev_ufs_device) {
+       if (hba->ufs_device_wlun) {
                /* Prevent runtime suspend */
                ufshcd_rpm_get_noresume(hba);
                /*
@@ -9956,4 +9942,3 @@ MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
 MODULE_LICENSE("GPL");
-MODULE_VERSION(UFSHCD_DRIVER_VERSION);
index 94f545b..2b0f344 100644 (file)
 #ifndef _UFSHCD_H
 #define _UFSHCD_H
 
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/rwsem.h>
-#include <linux/workqueue.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/wait.h>
-#include <linux/bitops.h>
-#include <linux/pm_runtime.h>
-#include <linux/clk.h>
-#include <linux/completion.h>
-#include <linux/regulator/consumer.h>
 #include <linux/bitfield.h>
-#include <linux/devfreq.h>
 #include <linux/blk-crypto-profile.h>
+#include <linux/blk-mq.h>
+#include <linux/devfreq.h>
+#include <linux/pm_runtime.h>
+#include <scsi/scsi_device.h>
 #include "unipro.h"
-
-#include <asm/irq.h>
-#include <asm/byteorder.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_tcq.h>
-#include <scsi/scsi_dbg.h>
-#include <scsi/scsi_eh.h>
-
 #include "ufs.h"
 #include "ufs_quirks.h"
 #include "ufshci.h"
 
 #define UFSHCD "ufshcd"
-#define UFSHCD_DRIVER_VERSION "0.2"
 
 struct ufs_hba;
 
@@ -181,8 +155,6 @@ struct ufs_pm_lvl_states {
  * @ucd_rsp_dma_addr: UPIU response dma address for debug
  * @ucd_req_dma_addr: UPIU request dma address for debug
  * @cmd: pointer to SCSI command
- * @sense_buffer: pointer to sense buffer address of the SCSI command
- * @sense_bufflen: Length of the sense buffer
  * @scsi_status: SCSI status of the command
  * @command_type: SCSI, UFS, Query.
  * @task_tag: Task tag of the command
@@ -206,8 +178,6 @@ struct ufshcd_lrb {
        dma_addr_t ucd_prdt_dma_addr;
 
        struct scsi_cmnd *cmd;
-       u8 *sense_buffer;
-       unsigned int sense_bufflen;
        int scsi_status;
 
        int command_type;
@@ -241,6 +211,7 @@ struct ufs_query {
  * @type: device management command type - Query, NOP OUT
  * @lock: lock to allow one command at a time
  * @complete: internal commands completion
+ * @query: Device management query information
  */
 struct ufs_dev_cmd {
        enum dev_cmd_type type;
@@ -258,7 +229,7 @@ struct ufs_dev_cmd {
  * @min_freq: min frequency that can be used for clock scaling
  * @curr_freq: indicates the current frequency that it is set to
  * @keep_link_active: indicates that the clk should not be disabled if
                    link is active
*                   link is active
  * @enabled: variable to check against multiple enable/disable
  */
 struct ufs_clk_info {
@@ -313,11 +284,13 @@ struct ufs_pwr_mode_info {
  *                  to set some things
  * @hibern8_notify: called around hibern8 enter/exit
  * @apply_dev_quirks: called to apply device specific quirks
+ * @fixup_dev_quirks: called to modify device specific quirks
  * @suspend: called during host controller PM callback
  * @resume: called during host controller PM callback
  * @dbg_register_dump: used to dump controller debug information
  * @phy_initialization: used to initialize phys
  * @device_reset: called to issue a reset pulse on the UFS device
+ * @config_scaling_param: called to configure clock scaling parameters
  * @program_key: program or evict an inline encryption key
  * @event_notify: called to notify important events
  */
@@ -352,8 +325,8 @@ struct ufs_hba_variant_ops {
        int     (*phy_initialization)(struct ufs_hba *);
        int     (*device_reset)(struct ufs_hba *hba);
        void    (*config_scaling_param)(struct ufs_hba *hba,
-                                       struct devfreq_dev_profile *profile,
-                                       void *data);
+                               struct devfreq_dev_profile *profile,
+                               struct devfreq_simple_ondemand_data *data);
        int     (*program_key)(struct ufs_hba *hba,
                               const union ufs_crypto_cfg_entry *cfg, int slot);
        void    (*event_notify)(struct ufs_hba *hba,
@@ -384,6 +357,7 @@ enum clk_gating_state {
  * @is_initialized: Indicates whether clock gating is initialized or not
  * @active_reqs: number of requests that are pending and should be waited for
  * completion before gating clocks.
+ * @clk_gating_workq: workqueue for clock gating work.
  */
 struct ufs_clk_gating {
        struct delayed_work gate_work;
@@ -420,9 +394,9 @@ struct ufs_saved_pwr_info {
  * @resume_work: worker to resume devfreq
  * @min_gear: lowest HS gear to scale down to
  * @is_enabled: tracks if scaling is currently enabled or not, controlled by
              clkscale_enable sysfs node
*             clkscale_enable sysfs node
  * @is_allowed: tracks if scaling is currently allowed or not, used to block
              clock scaling which is not invoked from devfreq governor
*             clock scaling which is not invoked from devfreq governor
  * @is_initialized: Indicates whether clock scaling is initialized or not
  * @is_busy_started: tracks if busy period has started or not
  * @is_suspended: tracks if devfreq is suspended or not
@@ -449,7 +423,7 @@ struct ufs_clk_scaling {
 /**
  * struct ufs_event_hist - keeps history of errors
  * @pos: index to indicate cyclic buffer position
- * @reg: cyclic buffer for registers value
+ * @val: cyclic buffer for registers value
  * @tstamp: cyclic buffer for time stamp
  * @cnt: error counter
  */
@@ -468,6 +442,7 @@ struct ufs_event_hist {
  *             reset this after link-startup.
  * @last_hibern8_exit_tstamp: Set time after the hibern8 exit.
  *             Clear after the first successful command completion.
+ * @event: array with event history.
  */
 struct ufs_stats {
        u32 last_intr_status;
@@ -737,6 +712,14 @@ struct ufs_hba_monitor {
  * @utmrdl_dma_addr: UTMRDL DMA address
  * @host: Scsi_Host instance of the driver
  * @dev: device handle
+ * @ufs_device_wlun: WLUN that controls the entire UFS device.
+ * @hwmon_device: device instance registered with the hwmon core.
+ * @curr_dev_pwr_mode: active UFS device power mode.
+ * @uic_link_state: active state of the link to the UFS device.
+ * @rpm_lvl: desired UFS power management level during runtime PM.
+ * @spm_lvl: desired UFS power management level during system PM.
+ * @pm_op_in_progress: whether or not a PM operation is in progress.
+ * @ahit: value of Auto-Hibernate Idle Timer register.
  * @lrb: local reference block
  * @outstanding_tasks: Bits representing outstanding task requests
  * @outstanding_lock: Protects @outstanding_reqs.
@@ -747,17 +730,26 @@ struct ufs_hba_monitor {
  * @reserved_slot: Used to submit device commands. Protected by @dev_cmd.lock.
  * @ufs_version: UFS Version to which controller complies
  * @vops: pointer to variant specific operations
+ * @vps: pointer to variant specific parameters
  * @priv: pointer to variant specific private data
  * @irq: Irq number of the controller
- * @active_uic_cmd: handle of active UIC command
- * @uic_cmd_mutex: mutex for UIC command
+ * @is_irq_enabled: whether or not the UFS controller interrupt is enabled.
+ * @dev_ref_clk_freq: reference clock frequency
+ * @quirks: bitmask with information about deviations from the UFSHCI standard.
+ * @dev_quirks: bitmask with information about deviations from the UFS standard.
  * @tmf_tag_set: TMF tag set.
  * @tmf_queue: Used to allocate TMF tags.
- * @pwr_done: completion for power mode change
+ * @tmf_rqs: array with pointers to TMF requests while these are in progress.
+ * @active_uic_cmd: handle of active UIC command
+ * @uic_cmd_mutex: mutex for UIC command
+ * @uic_async_done: completion used during UIC processing
  * @ufshcd_state: UFSHCD state
  * @eh_flags: Error handling flags
  * @intr_mask: Interrupt Mask Bits
  * @ee_ctrl_mask: Exception event control mask
+ * @ee_drv_mask: Exception event mask for driver
+ * @ee_usr_mask: Exception event mask for user (set via debugfs)
+ * @ee_ctrl_mutex: Used to serialize exception event information.
  * @is_powered: flag to check if HBA is powered
  * @shutting_down: flag to check if shutdown has been invoked
  * @host_sem: semaphore used to serialize concurrent contexts
@@ -768,26 +760,52 @@ struct ufs_hba_monitor {
  * @uic_error: UFS interconnect layer error status
  * @saved_err: sticky error mask
  * @saved_uic_err: sticky UIC error mask
+ * @ufs_stats: various error counters
  * @force_reset: flag to force eh_work perform a full reset
  * @force_pmc: flag to force a power mode change
  * @silence_err_logs: flag to silence error logs
  * @dev_cmd: ufs device management command information
  * @last_dme_cmd_tstamp: time stamp of the last completed DME command
+ * @nop_out_timeout: NOP OUT timeout value
+ * @dev_info: information about the UFS device
  * @auto_bkops_enabled: to track whether bkops is enabled in device
  * @vreg_info: UFS device voltage regulator information
  * @clk_list_head: UFS host controller clocks list node head
+ * @req_abort_count: number of times ufshcd_abort() has been called
+ * @lanes_per_direction: number of lanes per data direction between the UFS
+ *     controller and the UFS device.
  * @pwr_info: holds current power mode
  * @max_pwr_info: keeps the device max valid pwm
- * @clk_scaling_lock: used to serialize device commands and clock scaling
- * @desc_size: descriptor sizes reported by device
+ * @clk_gating: information related to clock gating
+ * @caps: bitmask with information about UFS controller capabilities
+ * @devfreq: frequency scaling information owned by the devfreq core
+ * @clk_scaling: frequency scaling information owned by the UFS driver
+ * @is_sys_suspended: whether or not the entire system has been suspended
  * @urgent_bkops_lvl: keeps track of urgent bkops level for device
  * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
  *  device is known or not.
+ * @clk_scaling_lock: used to serialize device commands and clock scaling
+ * @desc_size: descriptor sizes reported by device
  * @scsi_block_reqs_cnt: reference counting for scsi block requests
+ * @bsg_dev: struct device associated with the BSG queue
+ * @bsg_queue: BSG queue associated with the UFS controller
+ * @rpm_dev_flush_recheck_work: used to suspend from RPM (runtime power
+ *     management) after the UFS device has finished a WriteBooster buffer
+ *     flush or auto BKOP.
+ * @ufshpb_dev: information related to HPB (Host Performance Booster).
+ * @monitor: statistics about UFS commands
  * @crypto_capabilities: Content of crypto capabilities register (0x100)
  * @crypto_cap_array: Array of crypto capabilities
  * @crypto_cfg_register: Start of the crypto cfg array
  * @crypto_profile: the crypto profile of this hba (if applicable)
+ * @debugfs_root: UFS controller debugfs root directory
+ * @debugfs_ee_work: used to restore ee_ctrl_mask after a delay
+ * @debugfs_ee_rate_limit_ms: user configurable delay after which to restore
+ *     ee_ctrl_mask
+ * @luns_avail: number of regular and well known LUNs supported by the UFS
+ *     device
+ * @complete_put: whether or not to call ufshcd_rpm_put() from inside
+ *     ufshcd_resume_complete()
  */
 struct ufs_hba {
        void __iomem *mmio_base;
@@ -804,11 +822,7 @@ struct ufs_hba {
 
        struct Scsi_Host *host;
        struct device *dev;
-       /*
-        * This field is to keep a reference to "scsi_device" corresponding to
-        * "UFS device" W-LU.
-        */
-       struct scsi_device *sdev_ufs_device;
+       struct scsi_device *ufs_device_wlun;
 
 #ifdef CONFIG_SCSI_UFS_HWMON
        struct device *hwmon_device;
@@ -859,9 +873,9 @@ struct ufs_hba {
        enum ufshcd_state ufshcd_state;
        u32 eh_flags;
        u32 intr_mask;
-       u16 ee_ctrl_mask; /* Exception event mask */
-       u16 ee_drv_mask;  /* Exception event mask for driver */
-       u16 ee_usr_mask;  /* Exception event mask for user (via debugfs) */
+       u16 ee_ctrl_mask;
+       u16 ee_drv_mask;
+       u16 ee_usr_mask;
        struct mutex ee_ctrl_mutex;
        bool is_powered;
        bool shutting_down;
@@ -983,7 +997,7 @@ static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba)
 
 static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba)
 {
-       return FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, hba->ahit) ? true : false;
+       return FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, hba->ahit);
 }
 
 static inline bool ufshcd_is_wb_allowed(struct ufs_hba *hba)
@@ -991,22 +1005,17 @@ static inline bool ufshcd_is_wb_allowed(struct ufs_hba *hba)
        return hba->caps & UFSHCD_CAP_WB_EN;
 }
 
-static inline bool ufshcd_is_user_access_allowed(struct ufs_hba *hba)
-{
-       return !hba->shutting_down;
-}
-
 #define ufshcd_writel(hba, val, reg)   \
        writel((val), (hba)->mmio_base + (reg))
 #define ufshcd_readl(hba, reg) \
        readl((hba)->mmio_base + (reg))
 
 /**
- * ufshcd_rmwl - read modify write into a register
- * @hba - per adapter instance
- * @mask - mask to apply on read value
- * @val - actual value to write
- * @reg - register address
+ * ufshcd_rmwl - perform read/modify/write for a controller register
+ * @hba: per adapter instance
+ * @mask: mask to apply on read value
+ * @val: actual value to write
+ * @reg: register address
  */
 static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
 {
@@ -1028,9 +1037,6 @@ void ufshcd_remove(struct ufs_hba *);
 int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
 int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
 void ufshcd_delay_us(unsigned long us, unsigned long tolerance);
-int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
-                               u32 val, unsigned long interval_us,
-                               unsigned long timeout_ms);
 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
 void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val);
 void ufshcd_hba_stop(struct ufs_hba *hba);
@@ -1044,8 +1050,8 @@ static inline void check_upiu_size(void)
 
 /**
  * ufshcd_set_variant - set variant specific data to the hba
- * @hba - per adapter instance
- * @variant - pointer to variant specific data
+ * @hba: per adapter instance
+ * @variant: pointer to variant specific data
  */
 static inline void ufshcd_set_variant(struct ufs_hba *hba, void *variant)
 {
@@ -1055,35 +1061,13 @@ static inline void ufshcd_set_variant(struct ufs_hba *hba, void *variant)
 
 /**
  * ufshcd_get_variant - get variant specific data from the hba
- * @hba - per adapter instance
+ * @hba: per adapter instance
  */
 static inline void *ufshcd_get_variant(struct ufs_hba *hba)
 {
        BUG_ON(!hba);
        return hba->priv;
 }
-static inline bool ufshcd_keep_autobkops_enabled_except_suspend(
-                                                       struct ufs_hba *hba)
-{
-       return hba->caps & UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND;
-}
-
-static inline u8 ufshcd_wb_get_query_index(struct ufs_hba *hba)
-{
-       if (hba->dev_info.wb_buffer_type == WB_BUF_MODE_LU_DEDICATED)
-               return hba->dev_info.wb_dedicated_lu;
-       return 0;
-}
-
-#ifdef CONFIG_SCSI_UFS_HWMON
-void ufs_hwmon_probe(struct ufs_hba *hba, u8 mask);
-void ufs_hwmon_remove(struct ufs_hba *hba);
-void ufs_hwmon_notify_event(struct ufs_hba *hba, u8 ee_mask);
-#else
-static inline void ufs_hwmon_probe(struct ufs_hba *hba, u8 mask) {}
-static inline void ufs_hwmon_remove(struct ufs_hba *hba) {}
-static inline void ufs_hwmon_notify_event(struct ufs_hba *hba, u8 ee_mask) {}
-#endif
 
 #ifdef CONFIG_PM
 extern int ufshcd_runtime_suspend(struct device *dev);
@@ -1185,7 +1169,8 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
 
 void ufshcd_auto_hibern8_enable(struct ufs_hba *hba);
 void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
-void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups);
+void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
+                            const struct ufs_dev_quirk *fixups);
 #define SD_ASCII_STD true
 #define SD_RAW false
 int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
@@ -1194,6 +1179,8 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
 int ufshcd_hold(struct ufs_hba *hba, bool async);
 void ufshcd_release(struct ufs_hba *hba);
 
+void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value);
+
 void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
                                  int *desc_length);
 
@@ -1214,13 +1201,6 @@ int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm);
 void ufshcd_resume_complete(struct device *dev);
 
 /* Wrapper functions for safely calling variant operations */
-static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
-{
-       if (hba->vops)
-               return hba->vops->name;
-       return "";
-}
-
 static inline int ufshcd_vops_init(struct ufs_hba *hba)
 {
        if (hba->vops && hba->vops->init)
@@ -1229,61 +1209,6 @@ static inline int ufshcd_vops_init(struct ufs_hba *hba)
        return 0;
 }
 
-static inline void ufshcd_vops_exit(struct ufs_hba *hba)
-{
-       if (hba->vops && hba->vops->exit)
-               return hba->vops->exit(hba);
-}
-
-static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba)
-{
-       if (hba->vops && hba->vops->get_ufs_hci_version)
-               return hba->vops->get_ufs_hci_version(hba);
-
-       return ufshcd_readl(hba, REG_UFS_VERSION);
-}
-
-static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
-                       bool up, enum ufs_notify_change_status status)
-{
-       if (hba->vops && hba->vops->clk_scale_notify)
-               return hba->vops->clk_scale_notify(hba, up, status);
-       return 0;
-}
-
-static inline void ufshcd_vops_event_notify(struct ufs_hba *hba,
-                                           enum ufs_event_type evt,
-                                           void *data)
-{
-       if (hba->vops && hba->vops->event_notify)
-               hba->vops->event_notify(hba, evt, data);
-}
-
-static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on,
-                                       enum ufs_notify_change_status status)
-{
-       if (hba->vops && hba->vops->setup_clocks)
-               return hba->vops->setup_clocks(hba, on, status);
-       return 0;
-}
-
-static inline int ufshcd_vops_hce_enable_notify(struct ufs_hba *hba,
-                                               bool status)
-{
-       if (hba->vops && hba->vops->hce_enable_notify)
-               return hba->vops->hce_enable_notify(hba, status);
-
-       return 0;
-}
-static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba,
-                                               bool status)
-{
-       if (hba->vops && hba->vops->link_startup_notify)
-               return hba->vops->link_startup_notify(hba, status);
-
-       return 0;
-}
-
 static inline int ufshcd_vops_phy_initialization(struct ufs_hba *hba)
 {
        if (hba->vops && hba->vops->phy_initialization)
@@ -1292,102 +1217,8 @@ static inline int ufshcd_vops_phy_initialization(struct ufs_hba *hba)
        return 0;
 }
 
-static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
-                                 enum ufs_notify_change_status status,
-                                 struct ufs_pa_layer_attr *dev_max_params,
-                                 struct ufs_pa_layer_attr *dev_req_params)
-{
-       if (hba->vops && hba->vops->pwr_change_notify)
-               return hba->vops->pwr_change_notify(hba, status,
-                                       dev_max_params, dev_req_params);
-
-       return -ENOTSUPP;
-}
-
-static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba,
-                                       int tag, u8 tm_function)
-{
-       if (hba->vops && hba->vops->setup_task_mgmt)
-               return hba->vops->setup_task_mgmt(hba, tag, tm_function);
-}
-
-static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba,
-                                       enum uic_cmd_dme cmd,
-                                       enum ufs_notify_change_status status)
-{
-       if (hba->vops && hba->vops->hibern8_notify)
-               return hba->vops->hibern8_notify(hba, cmd, status);
-}
-
-static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba)
-{
-       if (hba->vops && hba->vops->apply_dev_quirks)
-               return hba->vops->apply_dev_quirks(hba);
-       return 0;
-}
-
-static inline void ufshcd_vops_fixup_dev_quirks(struct ufs_hba *hba)
-{
-       if (hba->vops && hba->vops->fixup_dev_quirks)
-               hba->vops->fixup_dev_quirks(hba);
-}
-
-static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op,
-                               enum ufs_notify_change_status status)
-{
-       if (hba->vops && hba->vops->suspend)
-               return hba->vops->suspend(hba, op, status);
-
-       return 0;
-}
-
-static inline int ufshcd_vops_resume(struct ufs_hba *hba, enum ufs_pm_op op)
-{
-       if (hba->vops && hba->vops->resume)
-               return hba->vops->resume(hba, op);
-
-       return 0;
-}
-
-static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba)
-{
-       if (hba->vops && hba->vops->dbg_register_dump)
-               hba->vops->dbg_register_dump(hba);
-}
-
-static inline int ufshcd_vops_device_reset(struct ufs_hba *hba)
-{
-       if (hba->vops && hba->vops->device_reset)
-               return hba->vops->device_reset(hba);
-
-       return -EOPNOTSUPP;
-}
-
-static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba,
-                                                   struct devfreq_dev_profile
-                                                   *profile, void *data)
-{
-       if (hba->vops && hba->vops->config_scaling_param)
-               hba->vops->config_scaling_param(hba, profile, data);
-}
-
 extern struct ufs_pm_lvl_states ufs_pm_lvl_states[];
 
-/*
- * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
- * @scsi_lun: scsi LUN id
- *
- * Returns UPIU LUN id
- */
-static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
-{
-       if (scsi_is_wlun(scsi_lun))
-               return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
-                       | UFS_UPIU_WLUN_ID;
-       else
-               return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
-}
-
 int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
                     const char *prefix);
 
@@ -1396,43 +1227,4 @@ int ufshcd_write_ee_control(struct ufs_hba *hba);
 int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, u16 *other_mask,
                             u16 set, u16 clr);
 
-static inline int ufshcd_update_ee_drv_mask(struct ufs_hba *hba,
-                                           u16 set, u16 clr)
-{
-       return ufshcd_update_ee_control(hba, &hba->ee_drv_mask,
-                                       &hba->ee_usr_mask, set, clr);
-}
-
-static inline int ufshcd_update_ee_usr_mask(struct ufs_hba *hba,
-                                           u16 set, u16 clr)
-{
-       return ufshcd_update_ee_control(hba, &hba->ee_usr_mask,
-                                       &hba->ee_drv_mask, set, clr);
-}
-
-static inline int ufshcd_rpm_get_sync(struct ufs_hba *hba)
-{
-       return pm_runtime_get_sync(&hba->sdev_ufs_device->sdev_gendev);
-}
-
-static inline int ufshcd_rpm_put_sync(struct ufs_hba *hba)
-{
-       return pm_runtime_put_sync(&hba->sdev_ufs_device->sdev_gendev);
-}
-
-static inline void ufshcd_rpm_get_noresume(struct ufs_hba *hba)
-{
-       pm_runtime_get_noresume(&hba->sdev_ufs_device->sdev_gendev);
-}
-
-static inline int ufshcd_rpm_resume(struct ufs_hba *hba)
-{
-       return pm_runtime_resume(&hba->sdev_ufs_device->sdev_gendev);
-}
-
-static inline int ufshcd_rpm_put(struct ufs_hba *hba)
-{
-       return pm_runtime_put(&hba->sdev_ufs_device->sdev_gendev);
-}
-
 #endif /* End of Header */
index a7ff0e5..f81aa95 100644 (file)
@@ -11,6 +11,8 @@
 #ifndef _UFSHCI_H
 #define _UFSHCI_H
 
+#include <scsi/scsi_host.h>
+
 enum {
        TASK_REQ_UPIU_SIZE_DWORDS       = 8,
        TASK_RSP_UPIU_SIZE_DWORDS       = 8,
index 588c032..8882b47 100644 (file)
  */
 
 #include <asm/unaligned.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <scsi/scsi_cmnd.h>
 
-#include "ufshcd.h"
+#include "ufshcd-priv.h"
 #include "ufshpb.h"
 #include "../sd.h"
 
@@ -90,12 +94,8 @@ static bool ufshpb_is_general_lun(int lun)
 
 static bool ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int rgn_idx)
 {
-       if (hpb->lu_pinned_end != PINNED_NOT_SET &&
-           rgn_idx >= hpb->lu_pinned_start &&
-           rgn_idx <= hpb->lu_pinned_end)
-               return true;
-
-       return false;
+       return hpb->lu_pinned_end != PINNED_NOT_SET &&
+              rgn_idx >= hpb->lu_pinned_start && rgn_idx <= hpb->lu_pinned_end;
 }
 
 static void ufshpb_kick_map_work(struct ufshpb_lu *hpb)
@@ -563,7 +563,7 @@ static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
        if (list_empty(&srgn->list_act_srgn))
                list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
 
-       hpb->stats.rb_active_cnt++;
+       hpb->stats.rcmd_active_cnt++;
 }
 
 static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx)
@@ -580,7 +580,7 @@ static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx)
        if (list_empty(&rgn->list_inact_rgn))
                list_add_tail(&rgn->list_inact_rgn, &hpb->lh_inact_rgn);
 
-       hpb->stats.rb_inactive_cnt++;
+       hpb->stats.rcmd_inactive_cnt++;
 }
 
 static void ufshpb_activate_subregion(struct ufshpb_lu *hpb,
@@ -930,11 +930,6 @@ static int ufshpb_issue_umap_single_req(struct ufshpb_lu *hpb,
        return ufshpb_issue_umap_req(hpb, rgn, true);
 }
 
-static int ufshpb_issue_umap_all_req(struct ufshpb_lu *hpb)
-{
-       return ufshpb_issue_umap_req(hpb, NULL, false);
-}
-
 static void __ufshpb_evict_region(struct ufshpb_lu *hpb,
                                 struct ufshpb_region *rgn)
 {
@@ -1142,6 +1137,39 @@ out:
        spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
        return ret;
 }
+/**
+ *ufshpb_submit_region_inactive() - submit a region to be inactivated later
+ *@hpb: per-LU HPB instance
+ *@region_index: the index associated with the region that will be inactivated later
+ */
+static void ufshpb_submit_region_inactive(struct ufshpb_lu *hpb, int region_index)
+{
+       int subregion_index;
+       struct ufshpb_region *rgn;
+       struct ufshpb_subregion *srgn;
+
+       /*
+        * Remove this region from active region list and add it to inactive list
+        */
+       spin_lock(&hpb->rsp_list_lock);
+       ufshpb_update_inactive_info(hpb, region_index);
+       spin_unlock(&hpb->rsp_list_lock);
+
+       rgn = hpb->rgn_tbl + region_index;
+
+       /*
+        * Set subregion state to be HPB_SRGN_INVALID, there will no HPB read on this subregion
+        */
+       spin_lock(&hpb->rgn_state_lock);
+       if (rgn->rgn_state != HPB_RGN_INACTIVE) {
+               for (subregion_index = 0; subregion_index < rgn->srgn_cnt; subregion_index++) {
+                       srgn = rgn->srgn_tbl + subregion_index;
+                       if (srgn->srgn_state == HPB_SRGN_VALID)
+                               srgn->srgn_state = HPB_SRGN_INVALID;
+               }
+       }
+       spin_unlock(&hpb->rgn_state_lock);
+}
 
 static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb,
                                         struct utp_hpb_rsp *rsp_field)
@@ -1201,25 +1229,8 @@ static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb,
 
        for (i = 0; i < rsp_field->inactive_rgn_cnt; i++) {
                rgn_i = be16_to_cpu(rsp_field->hpb_inactive_field[i]);
-               dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
-                       "inactivate(%d) region %d\n", i, rgn_i);
-
-               spin_lock(&hpb->rsp_list_lock);
-               ufshpb_update_inactive_info(hpb, rgn_i);
-               spin_unlock(&hpb->rsp_list_lock);
-
-               rgn = hpb->rgn_tbl + rgn_i;
-
-               spin_lock(&hpb->rgn_state_lock);
-               if (rgn->rgn_state != HPB_RGN_INACTIVE) {
-                       for (srgn_i = 0; srgn_i < rgn->srgn_cnt; srgn_i++) {
-                               srgn = rgn->srgn_tbl + srgn_i;
-                               if (srgn->srgn_state == HPB_SRGN_VALID)
-                                       srgn->srgn_state = HPB_SRGN_INVALID;
-                       }
-               }
-               spin_unlock(&hpb->rgn_state_lock);
-
+               dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "inactivate(%d) region %d\n", i, rgn_i);
+               ufshpb_submit_region_inactive(hpb, rgn_i);
        }
 
 out:
@@ -1230,7 +1241,10 @@ out:
                queue_work(ufshpb_wq, &hpb->map_work);
 }
 
-static void ufshpb_dev_reset_handler(struct ufshpb_lu *hpb)
+/*
+ * Set the flags of all active regions to RGN_FLAG_UPDATE to let host side reload L2P entries later
+ */
+static void ufshpb_set_regions_update(struct ufshpb_lu *hpb)
 {
        struct victim_select_info *lru_info = &hpb->lru_info;
        struct ufshpb_region *rgn;
@@ -1244,6 +1258,42 @@ static void ufshpb_dev_reset_handler(struct ufshpb_lu *hpb)
        spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
 }
 
+static void ufshpb_dev_reset_handler(struct ufs_hba *hba)
+{
+       struct scsi_device *sdev;
+       struct ufshpb_lu *hpb;
+
+       __shost_for_each_device(sdev, hba->host) {
+               hpb = ufshpb_get_hpb_data(sdev);
+               if (!hpb)
+                       continue;
+
+               if (hpb->is_hcm) {
+                       /*
+                        * For the HPB host control mode, in case device powered up and lost HPB
+                        * information, we will set the region flag to be RGN_FLAG_UPDATE, it will
+                        * let host reload its L2P entries(reactivate region in the UFS device).
+                        */
+                       ufshpb_set_regions_update(hpb);
+               } else {
+                       /*
+                        * For the HPB device control mode, if host side receives 02h:HPB Operation
+                        * in UPIU response, which means device recommends the host side should
+                        * inactivate all active regions. Here we add all active regions to inactive
+                        * list, they will be inactivated later in ufshpb_map_work_handler().
+                        */
+                       struct victim_select_info *lru_info = &hpb->lru_info;
+                       struct ufshpb_region *rgn;
+
+                       list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn)
+                               ufshpb_submit_region_inactive(hpb, rgn->rgn_idx);
+
+                       if (ufshpb_get_state(hpb) == HPB_PRESENT)
+                               queue_work(ufshpb_wq, &hpb->map_work);
+               }
+       }
+}
+
 /*
  * This function will parse recommended active subregion information in sense
  * data field of response UPIU with SAM_STAT_GOOD state.
@@ -1300,7 +1350,7 @@ void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
        if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field))
                return;
 
-       hpb->stats.rb_noti_cnt++;
+       hpb->stats.rcmd_noti_cnt++;
 
        switch (rsp_field->hpb_op) {
        case HPB_RSP_REQ_REGION_UPDATE:
@@ -1313,17 +1363,7 @@ void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
        case HPB_RSP_DEV_RESET:
                dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
                         "UFS device lost HPB information during PM.\n");
-
-               if (hpb->is_hcm) {
-                       struct scsi_device *sdev;
-
-                       __shost_for_each_device(sdev, hba->host) {
-                               struct ufshpb_lu *h = sdev->hostdata;
-
-                               if (h)
-                                       ufshpb_dev_reset_handler(h);
-                       }
-               }
+               ufshpb_dev_reset_handler(hba);
 
                break;
        default:
@@ -1713,18 +1753,18 @@ static DEVICE_ATTR_RO(__name)
 
 ufshpb_sysfs_attr_show_func(hit_cnt);
 ufshpb_sysfs_attr_show_func(miss_cnt);
-ufshpb_sysfs_attr_show_func(rb_noti_cnt);
-ufshpb_sysfs_attr_show_func(rb_active_cnt);
-ufshpb_sysfs_attr_show_func(rb_inactive_cnt);
+ufshpb_sysfs_attr_show_func(rcmd_noti_cnt);
+ufshpb_sysfs_attr_show_func(rcmd_active_cnt);
+ufshpb_sysfs_attr_show_func(rcmd_inactive_cnt);
 ufshpb_sysfs_attr_show_func(map_req_cnt);
 ufshpb_sysfs_attr_show_func(umap_req_cnt);
 
 static struct attribute *hpb_dev_stat_attrs[] = {
        &dev_attr_hit_cnt.attr,
        &dev_attr_miss_cnt.attr,
-       &dev_attr_rb_noti_cnt.attr,
-       &dev_attr_rb_active_cnt.attr,
-       &dev_attr_rb_inactive_cnt.attr,
+       &dev_attr_rcmd_noti_cnt.attr,
+       &dev_attr_rcmd_active_cnt.attr,
+       &dev_attr_rcmd_inactive_cnt.attr,
        &dev_attr_map_req_cnt.attr,
        &dev_attr_umap_req_cnt.attr,
        NULL,
@@ -2087,9 +2127,9 @@ static void ufshpb_stat_init(struct ufshpb_lu *hpb)
 {
        hpb->stats.hit_cnt = 0;
        hpb->stats.miss_cnt = 0;
-       hpb->stats.rb_noti_cnt = 0;
-       hpb->stats.rb_active_cnt = 0;
-       hpb->stats.rb_inactive_cnt = 0;
+       hpb->stats.rcmd_noti_cnt = 0;
+       hpb->stats.rcmd_active_cnt = 0;
+       hpb->stats.rcmd_inactive_cnt = 0;
        hpb->stats.map_req_cnt = 0;
        hpb->stats.umap_req_cnt = 0;
 }
@@ -2272,38 +2312,28 @@ out:
        return flag_res;
 }
 
-void ufshpb_reset(struct ufs_hba *hba)
+/**
+ * ufshpb_toggle_state - switch HPB state of all LUs
+ * @hba: per-adapter instance
+ * @src: expected current HPB state
+ * @dest: target HPB state to switch to
+ */
+void ufshpb_toggle_state(struct ufs_hba *hba, enum UFSHPB_STATE src, enum UFSHPB_STATE dest)
 {
        struct ufshpb_lu *hpb;
        struct scsi_device *sdev;
 
        shost_for_each_device(sdev, hba->host) {
                hpb = ufshpb_get_hpb_data(sdev);
-               if (!hpb)
-                       continue;
 
-               if (ufshpb_get_state(hpb) != HPB_RESET)
+               if (!hpb || ufshpb_get_state(hpb) != src)
                        continue;
+               ufshpb_set_state(hpb, dest);
 
-               ufshpb_set_state(hpb, HPB_PRESENT);
-       }
-}
-
-void ufshpb_reset_host(struct ufs_hba *hba)
-{
-       struct ufshpb_lu *hpb;
-       struct scsi_device *sdev;
-
-       shost_for_each_device(sdev, hba->host) {
-               hpb = ufshpb_get_hpb_data(sdev);
-               if (!hpb)
-                       continue;
-
-               if (ufshpb_get_state(hpb) != HPB_PRESENT)
-                       continue;
-               ufshpb_set_state(hpb, HPB_RESET);
-               ufshpb_cancel_jobs(hpb);
-               ufshpb_discard_rsp_lists(hpb);
+               if (dest == HPB_RESET) {
+                       ufshpb_cancel_jobs(hpb);
+                       ufshpb_discard_rsp_lists(hpb);
+               }
        }
 }
 
@@ -2314,11 +2344,9 @@ void ufshpb_suspend(struct ufs_hba *hba)
 
        shost_for_each_device(sdev, hba->host) {
                hpb = ufshpb_get_hpb_data(sdev);
-               if (!hpb)
+               if (!hpb || ufshpb_get_state(hpb) != HPB_PRESENT)
                        continue;
 
-               if (ufshpb_get_state(hpb) != HPB_PRESENT)
-                       continue;
                ufshpb_set_state(hpb, HPB_SUSPEND);
                ufshpb_cancel_jobs(hpb);
        }
@@ -2331,20 +2359,15 @@ void ufshpb_resume(struct ufs_hba *hba)
 
        shost_for_each_device(sdev, hba->host) {
                hpb = ufshpb_get_hpb_data(sdev);
-               if (!hpb)
+               if (!hpb || ufshpb_get_state(hpb) != HPB_SUSPEND)
                        continue;
 
-               if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
-                   (ufshpb_get_state(hpb) != HPB_SUSPEND))
-                       continue;
                ufshpb_set_state(hpb, HPB_PRESENT);
                ufshpb_kick_map_work(hpb);
                if (hpb->is_hcm) {
-                       unsigned int poll =
-                               hpb->params.timeout_polling_interval_ms;
+                       unsigned int poll = hpb->params.timeout_polling_interval_ms;
 
-                       schedule_delayed_work(&hpb->ufshpb_read_to_work,
-                               msecs_to_jiffies(poll));
+                       schedule_delayed_work(&hpb->ufshpb_read_to_work, msecs_to_jiffies(poll));
                }
        }
 }
@@ -2450,8 +2473,6 @@ static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba)
                        ufshpb_set_state(hpb, HPB_PRESENT);
                        if ((hpb->lu_pinned_end - hpb->lu_pinned_start) > 0)
                                queue_work(ufshpb_wq, &hpb->map_work);
-                       if (!hpb->is_hcm)
-                               ufshpb_issue_umap_all_req(hpb);
                } else {
                        dev_err(hba->dev, "destroy HPB lu %d\n", hpb->lun);
                        ufshpb_destroy_lu(hba, sdev);
index b475dbd..0d6e600 100644 (file)
@@ -59,8 +59,8 @@ enum UFSHPB_MODE {
 };
 
 enum UFSHPB_STATE {
-       HPB_INIT = 0,
-       HPB_PRESENT = 1,
+       HPB_INIT,
+       HPB_PRESENT,
        HPB_SUSPEND,
        HPB_FAILED,
        HPB_RESET,
@@ -211,9 +211,9 @@ struct ufshpb_params {
 struct ufshpb_stats {
        u64 hit_cnt;
        u64 miss_cnt;
-       u64 rb_noti_cnt;
-       u64 rb_active_cnt;
-       u64 rb_inactive_cnt;
+       u64 rcmd_noti_cnt;
+       u64 rcmd_active_cnt;
+       u64 rcmd_inactive_cnt;
        u64 map_req_cnt;
        u64 pre_req_cnt;
        u64 umap_req_cnt;
@@ -288,8 +288,7 @@ static int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) { return 0;
 static void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) {}
 static void ufshpb_resume(struct ufs_hba *hba) {}
 static void ufshpb_suspend(struct ufs_hba *hba) {}
-static void ufshpb_reset(struct ufs_hba *hba) {}
-static void ufshpb_reset_host(struct ufs_hba *hba) {}
+static void ufshpb_toggle_state(struct ufs_hba *hba, enum UFSHPB_STATE src, enum UFSHPB_STATE dest) {}
 static void ufshpb_init(struct ufs_hba *hba) {}
 static void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev) {}
 static void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev) {}
@@ -303,8 +302,7 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp);
 void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp);
 void ufshpb_resume(struct ufs_hba *hba);
 void ufshpb_suspend(struct ufs_hba *hba);
-void ufshpb_reset(struct ufs_hba *hba);
-void ufshpb_reset_host(struct ufs_hba *hba);
+void ufshpb_toggle_state(struct ufs_hba *hba, enum UFSHPB_STATE src, enum UFSHPB_STATE dest);
 void ufshpb_init(struct ufs_hba *hba);
 void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev);
 void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev);
index 8e9e486..0521f88 100644 (file)
@@ -1,7 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
- * drivers/scsi/ufs/unipro.h
- *
  * Copyright (C) 2013 Samsung Electronics Co., Ltd.
  */
 
 #define UNIPRO_CB_OFFSET(x)                    (0x8000 | x)
 
 /*
- * PHY Adpater attributes
+ * PHY Adapter attributes
  */
 #define PA_ACTIVETXDATALANES   0x1560
 #define PA_ACTIVERXDATALANES   0x1580
@@ -300,20 +298,6 @@ enum ufs_unipro_ver {
 #define T_TC0TXMAXSDUSIZE      0x4060
 #define T_TC1TXMAXSDUSIZE      0x4061
 
-#ifdef FALSE
-#undef FALSE
-#endif
-
-#ifdef TRUE
-#undef TRUE
-#endif
-
-/* Boolean attribute values */
-enum {
-       FALSE = 0,
-       TRUE,
-};
-
 /* CPort setting */
 #define E2EFC_ON       (1 << 0)
 #define E2EFC_OFF      (0 << 0)
index 1f037b8..f88ecdb 100644 (file)
@@ -1324,7 +1324,6 @@ static u32 pvscsi_get_max_targets(struct pvscsi_adapter *adapter)
         * indicate success.
         */
        header = config_page;
-       memset(header, 0, sizeof *header);
        header->hostStatus = BTSTAT_INVPARAM;
        header->scsiStatus = SDSTAT_CHECK;
 
index 4069033..aff7276 100644 (file)
@@ -189,7 +189,7 @@ struct cxgbit_np {
 struct cxgbit_sock {
        struct cxgbit_sock_common com;
        struct cxgbit_np *cnp;
-       struct iscsi_conn *conn;
+       struct iscsit_conn *conn;
        struct l2t_entry *l2t;
        struct dst_entry *dst;
        struct list_head list;
@@ -316,32 +316,32 @@ typedef void (*cxgbit_cplhandler_func)(struct cxgbit_device *,
 
 int cxgbit_setup_np(struct iscsi_np *, struct sockaddr_storage *);
 int cxgbit_setup_conn_digest(struct cxgbit_sock *);
-int cxgbit_accept_np(struct iscsi_np *, struct iscsi_conn *);
+int cxgbit_accept_np(struct iscsi_np *, struct iscsit_conn *);
 void cxgbit_free_np(struct iscsi_np *);
 void cxgbit_abort_conn(struct cxgbit_sock *csk);
-void cxgbit_free_conn(struct iscsi_conn *);
+void cxgbit_free_conn(struct iscsit_conn *);
 extern cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS];
-int cxgbit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
+int cxgbit_get_login_rx(struct iscsit_conn *, struct iscsi_login *);
 int cxgbit_rx_data_ack(struct cxgbit_sock *);
 int cxgbit_l2t_send(struct cxgbit_device *, struct sk_buff *,
                    struct l2t_entry *);
 void cxgbit_push_tx_frames(struct cxgbit_sock *);
-int cxgbit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
-int cxgbit_xmit_pdu(struct iscsi_conn *, struct iscsi_cmd *,
+int cxgbit_put_login_tx(struct iscsit_conn *, struct iscsi_login *, u32);
+int cxgbit_xmit_pdu(struct iscsit_conn *, struct iscsit_cmd *,
                    struct iscsi_datain_req *, const void *, u32);
-void cxgbit_get_r2t_ttt(struct iscsi_conn *, struct iscsi_cmd *,
+void cxgbit_get_r2t_ttt(struct iscsit_conn *, struct iscsit_cmd *,
                        struct iscsi_r2t *);
 u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *);
 int cxgbit_ofld_send(struct cxgbit_device *, struct sk_buff *);
-void cxgbit_get_rx_pdu(struct iscsi_conn *);
-int cxgbit_validate_params(struct iscsi_conn *);
+void cxgbit_get_rx_pdu(struct iscsit_conn *);
+int cxgbit_validate_params(struct iscsit_conn *);
 struct cxgbit_device *cxgbit_find_device(struct net_device *, u8 *);
 
 /* DDP */
 int cxgbit_ddp_init(struct cxgbit_device *);
 int cxgbit_setup_conn_pgidx(struct cxgbit_sock *, u32);
-int cxgbit_reserve_ttt(struct cxgbit_sock *, struct iscsi_cmd *);
-void cxgbit_unmap_cmd(struct iscsi_conn *, struct iscsi_cmd *);
+int cxgbit_reserve_ttt(struct cxgbit_sock *, struct iscsit_cmd *);
+void cxgbit_unmap_cmd(struct iscsit_conn *, struct iscsit_cmd *);
 
 static inline
 struct cxgbi_ppm *cdev2ppm(struct cxgbit_device *cdev)
index da31a30..3336d2b 100644 (file)
@@ -465,7 +465,7 @@ int cxgbit_setup_np(struct iscsi_np *np, struct sockaddr_storage *ksockaddr)
 }
 
 static void
-cxgbit_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
+cxgbit_set_conn_info(struct iscsi_np *np, struct iscsit_conn *conn,
                     struct cxgbit_sock *csk)
 {
        conn->login_family = np->np_sockaddr.ss_family;
@@ -473,7 +473,7 @@ cxgbit_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
        conn->local_sockaddr = csk->com.local_addr;
 }
 
-int cxgbit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
+int cxgbit_accept_np(struct iscsi_np *np, struct iscsit_conn *conn)
 {
        struct cxgbit_np *cnp = np->np_context;
        struct cxgbit_sock *csk;
@@ -717,7 +717,7 @@ void cxgbit_abort_conn(struct cxgbit_sock *csk)
 
 static void __cxgbit_free_conn(struct cxgbit_sock *csk)
 {
-       struct iscsi_conn *conn = csk->conn;
+       struct iscsit_conn *conn = csk->conn;
        bool release = false;
 
        pr_debug("%s: state %d\n",
@@ -751,7 +751,7 @@ static void __cxgbit_free_conn(struct cxgbit_sock *csk)
                cxgbit_put_csk(csk);
 }
 
-void cxgbit_free_conn(struct iscsi_conn *conn)
+void cxgbit_free_conn(struct iscsit_conn *conn)
 {
        __cxgbit_free_conn(conn->context);
 }
index 072afd0..17fd0d8 100644 (file)
@@ -227,7 +227,7 @@ rel_ppods:
 }
 
 void
-cxgbit_get_r2t_ttt(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+cxgbit_get_r2t_ttt(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
                   struct iscsi_r2t *r2t)
 {
        struct cxgbit_sock *csk = conn->context;
@@ -260,7 +260,7 @@ out:
        r2t->targ_xfer_tag = ttinfo->tag;
 }
 
-void cxgbit_unmap_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+void cxgbit_unmap_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
 {
        struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
 
index c6678dc..2c1950d 100644 (file)
@@ -657,7 +657,7 @@ cxgbit_dcbevent_notify(struct notifier_block *nb, unsigned long action,
 }
 #endif
 
-static enum target_prot_op cxgbit_get_sup_prot_ops(struct iscsi_conn *conn)
+static enum target_prot_op cxgbit_get_sup_prot_ops(struct iscsit_conn *conn)
 {
        return TARGET_PROT_NORMAL;
 }
index d314ee1..acfc396 100644 (file)
@@ -337,7 +337,7 @@ unlock:
 }
 
 static int
-cxgbit_map_skb(struct iscsi_cmd *cmd, struct sk_buff *skb, u32 data_offset,
+cxgbit_map_skb(struct iscsit_cmd *cmd, struct sk_buff *skb, u32 data_offset,
               u32 data_length)
 {
        u32 i = 0, nr_frags = MAX_SKB_FRAGS;
@@ -390,10 +390,10 @@ cxgbit_map_skb(struct iscsi_cmd *cmd, struct sk_buff *skb, u32 data_offset,
 }
 
 static int
-cxgbit_tx_datain_iso(struct cxgbit_sock *csk, struct iscsi_cmd *cmd,
+cxgbit_tx_datain_iso(struct cxgbit_sock *csk, struct iscsit_cmd *cmd,
                     struct iscsi_datain_req *dr)
 {
-       struct iscsi_conn *conn = csk->conn;
+       struct iscsit_conn *conn = csk->conn;
        struct sk_buff *skb;
        struct iscsi_datain datain;
        struct cxgbit_iso_info iso_info;
@@ -481,7 +481,7 @@ out:
 }
 
 static int
-cxgbit_tx_datain(struct cxgbit_sock *csk, struct iscsi_cmd *cmd,
+cxgbit_tx_datain(struct cxgbit_sock *csk, struct iscsit_cmd *cmd,
                 const struct iscsi_datain *datain)
 {
        struct sk_buff *skb;
@@ -510,7 +510,7 @@ cxgbit_tx_datain(struct cxgbit_sock *csk, struct iscsi_cmd *cmd,
 }
 
 static int
-cxgbit_xmit_datain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+cxgbit_xmit_datain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
                       struct iscsi_datain_req *dr,
                       const struct iscsi_datain *datain)
 {
@@ -530,7 +530,7 @@ cxgbit_xmit_datain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 }
 
 static int
-cxgbit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+cxgbit_xmit_nondatain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
                          const void *data_buf, u32 data_buf_len)
 {
        struct cxgbit_sock *csk = conn->context;
@@ -560,7 +560,7 @@ cxgbit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 }
 
 int
-cxgbit_xmit_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+cxgbit_xmit_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
                struct iscsi_datain_req *dr, const void *buf, u32 buf_len)
 {
        if (dr)
@@ -569,7 +569,7 @@ cxgbit_xmit_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                return cxgbit_xmit_nondatain_pdu(conn, cmd, buf, buf_len);
 }
 
-int cxgbit_validate_params(struct iscsi_conn *conn)
+int cxgbit_validate_params(struct iscsit_conn *conn)
 {
        struct cxgbit_sock *csk = conn->context;
        struct cxgbit_device *cdev = csk->com.cdev;
@@ -595,7 +595,7 @@ int cxgbit_validate_params(struct iscsi_conn *conn)
 
 static int cxgbit_set_digest(struct cxgbit_sock *csk)
 {
-       struct iscsi_conn *conn = csk->conn;
+       struct iscsit_conn *conn = csk->conn;
        struct iscsi_param *param;
 
        param = iscsi_find_param_from_key(HEADERDIGEST, conn->param_list);
@@ -627,7 +627,7 @@ static int cxgbit_set_digest(struct cxgbit_sock *csk)
 
 static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk)
 {
-       struct iscsi_conn *conn = csk->conn;
+       struct iscsit_conn *conn = csk->conn;
        struct iscsi_conn_ops *conn_ops = conn->conn_ops;
        struct iscsi_param *param;
        u32 mrdsl, mbl;
@@ -678,7 +678,7 @@ static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk)
  */
 static int cxgbit_seq_pdu_inorder(struct cxgbit_sock *csk)
 {
-       struct iscsi_conn *conn = csk->conn;
+       struct iscsit_conn *conn = csk->conn;
        struct iscsi_param *param;
 
        if (conn->login->leading_connection) {
@@ -712,7 +712,7 @@ static int cxgbit_seq_pdu_inorder(struct cxgbit_sock *csk)
        return 0;
 }
 
-static int cxgbit_set_params(struct iscsi_conn *conn)
+static int cxgbit_set_params(struct iscsit_conn *conn)
 {
        struct cxgbit_sock *csk = conn->context;
        struct cxgbit_device *cdev = csk->com.cdev;
@@ -771,7 +771,7 @@ enable_ddp:
 }
 
 int
-cxgbit_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
+cxgbit_put_login_tx(struct iscsit_conn *conn, struct iscsi_login *login,
                    u32 length)
 {
        struct cxgbit_sock *csk = conn->context;
@@ -832,16 +832,16 @@ cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg,
        }
 }
 
-static struct iscsi_cmd *cxgbit_allocate_cmd(struct cxgbit_sock *csk)
+static struct iscsit_cmd *cxgbit_allocate_cmd(struct cxgbit_sock *csk)
 {
-       struct iscsi_conn *conn = csk->conn;
+       struct iscsit_conn *conn = csk->conn;
        struct cxgbi_ppm *ppm = cdev2ppm(csk->com.cdev);
        struct cxgbit_cmd *ccmd;
-       struct iscsi_cmd *cmd;
+       struct iscsit_cmd *cmd;
 
        cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
        if (!cmd) {
-               pr_err("Unable to allocate iscsi_cmd + cxgbit_cmd\n");
+               pr_err("Unable to allocate iscsit_cmd + cxgbit_cmd\n");
                return NULL;
        }
 
@@ -853,10 +853,10 @@ static struct iscsi_cmd *cxgbit_allocate_cmd(struct cxgbit_sock *csk)
 }
 
 static int
-cxgbit_handle_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
+cxgbit_handle_immediate_data(struct iscsit_cmd *cmd, struct iscsi_scsi_req *hdr,
                             u32 length)
 {
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
        struct cxgbit_sock *csk = conn->context;
        struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
 
@@ -910,10 +910,10 @@ cxgbit_handle_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
 }
 
 static int
-cxgbit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
+cxgbit_get_immediate_data(struct iscsit_cmd *cmd, struct iscsi_scsi_req *hdr,
                          bool dump_payload)
 {
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
        int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
        /*
         * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
@@ -964,9 +964,9 @@ after_immediate_data:
 }
 
 static int
-cxgbit_handle_scsi_cmd(struct cxgbit_sock *csk, struct iscsi_cmd *cmd)
+cxgbit_handle_scsi_cmd(struct cxgbit_sock *csk, struct iscsit_cmd *cmd)
 {
-       struct iscsi_conn *conn = csk->conn;
+       struct iscsit_conn *conn = csk->conn;
        struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
        struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)pdu_cb->hdr;
        int rc;
@@ -995,8 +995,8 @@ cxgbit_handle_scsi_cmd(struct cxgbit_sock *csk, struct iscsi_cmd *cmd)
 static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk)
 {
        struct scatterlist *sg_start;
-       struct iscsi_conn *conn = csk->conn;
-       struct iscsi_cmd *cmd = NULL;
+       struct iscsit_conn *conn = csk->conn;
+       struct iscsit_cmd *cmd = NULL;
        struct cxgbit_cmd *ccmd;
        struct cxgbi_task_tag_info *ttinfo;
        struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
@@ -1084,9 +1084,9 @@ check_payload:
        return 0;
 }
 
-static int cxgbit_handle_nop_out(struct cxgbit_sock *csk, struct iscsi_cmd *cmd)
+static int cxgbit_handle_nop_out(struct cxgbit_sock *csk, struct iscsit_cmd *cmd)
 {
-       struct iscsi_conn *conn = csk->conn;
+       struct iscsit_conn *conn = csk->conn;
        struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
        struct iscsi_nopout *hdr = (struct iscsi_nopout *)pdu_cb->hdr;
        unsigned char *ping_data = NULL;
@@ -1134,7 +1134,7 @@ static int cxgbit_handle_nop_out(struct cxgbit_sock *csk, struct iscsi_cmd *cmd)
 
                ping_data[payload_length] = '\0';
                /*
-                * Attach ping data to struct iscsi_cmd->buf_ptr.
+                * Attach ping data to struct iscsit_cmd->buf_ptr.
                 */
                cmd->buf_ptr = ping_data;
                cmd->buf_ptr_size = payload_length;
@@ -1152,9 +1152,9 @@ out:
 }
 
 static int
-cxgbit_handle_text_cmd(struct cxgbit_sock *csk, struct iscsi_cmd *cmd)
+cxgbit_handle_text_cmd(struct cxgbit_sock *csk, struct iscsit_cmd *cmd)
 {
-       struct iscsi_conn *conn = csk->conn;
+       struct iscsit_conn *conn = csk->conn;
        struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
        struct iscsi_text *hdr = (struct iscsi_text *)pdu_cb->hdr;
        u32 payload_length = pdu_cb->dlen;
@@ -1209,8 +1209,8 @@ static int cxgbit_target_rx_opcode(struct cxgbit_sock *csk)
 {
        struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
        struct iscsi_hdr *hdr = (struct iscsi_hdr *)pdu_cb->hdr;
-       struct iscsi_conn *conn = csk->conn;
-       struct iscsi_cmd *cmd = NULL;
+       struct iscsit_conn *conn = csk->conn;
+       struct iscsit_cmd *cmd = NULL;
        u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
        int ret = -EINVAL;
 
@@ -1286,7 +1286,7 @@ reject:
 static int cxgbit_rx_opcode(struct cxgbit_sock *csk)
 {
        struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
-       struct iscsi_conn *conn = csk->conn;
+       struct iscsit_conn *conn = csk->conn;
        struct iscsi_hdr *hdr = pdu_cb->hdr;
        u8 opcode;
 
@@ -1321,7 +1321,7 @@ transport_err:
 
 static int cxgbit_rx_login_pdu(struct cxgbit_sock *csk)
 {
-       struct iscsi_conn *conn = csk->conn;
+       struct iscsit_conn *conn = csk->conn;
        struct iscsi_login *login = conn->login;
        struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
        struct iscsi_login_req *login_req;
@@ -1626,7 +1626,7 @@ out:
        return -1;
 }
 
-int cxgbit_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
+int cxgbit_get_login_rx(struct iscsit_conn *conn, struct iscsi_login *login)
 {
        struct cxgbit_sock *csk = conn->context;
        int ret = -1;
@@ -1642,7 +1642,7 @@ int cxgbit_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
        return ret;
 }
 
-void cxgbit_get_rx_pdu(struct iscsi_conn *conn)
+void cxgbit_get_rx_pdu(struct iscsit_conn *conn)
 {
        struct cxgbit_sock *csk = conn->context;
 
index ddf6c2a..e368f03 100644 (file)
@@ -59,7 +59,7 @@ struct kmem_cache *lio_dr_cache;
 struct kmem_cache *lio_ooo_cache;
 struct kmem_cache *lio_r2t_cache;
 
-static int iscsit_handle_immediate_data(struct iscsi_cmd *,
+static int iscsit_handle_immediate_data(struct iscsit_cmd *,
                        struct iscsi_scsi_req *, u32);
 
 struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *buf)
@@ -472,15 +472,15 @@ int iscsit_del_np(struct iscsi_np *np)
        return 0;
 }
 
-static void iscsit_get_rx_pdu(struct iscsi_conn *);
+static void iscsit_get_rx_pdu(struct iscsit_conn *);
 
-int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+int iscsit_queue_rsp(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
 {
        return iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
 }
 EXPORT_SYMBOL(iscsit_queue_rsp);
 
-void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+void iscsit_aborted_task(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
 {
        spin_lock_bh(&conn->cmd_lock);
        if (!list_empty(&cmd->i_conn_node))
@@ -493,10 +493,10 @@ EXPORT_SYMBOL(iscsit_aborted_task);
 
 static void iscsit_do_crypto_hash_buf(struct ahash_request *, const void *,
                                      u32, u32, const void *, void *);
-static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *);
+static void iscsit_tx_thread_wait_for_tcp(struct iscsit_conn *);
 
 static int
-iscsit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+iscsit_xmit_nondatain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
                          const void *data_buf, u32 data_buf_len)
 {
        struct iscsi_hdr *hdr = (struct iscsi_hdr *)cmd->pdu;
@@ -564,13 +564,13 @@ iscsit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        return 0;
 }
 
-static int iscsit_map_iovec(struct iscsi_cmd *cmd, struct kvec *iov, int nvec,
+static int iscsit_map_iovec(struct iscsit_cmd *cmd, struct kvec *iov, int nvec,
                            u32 data_offset, u32 data_length);
-static void iscsit_unmap_iovec(struct iscsi_cmd *);
-static u32 iscsit_do_crypto_hash_sg(struct ahash_request *, struct iscsi_cmd *,
+static void iscsit_unmap_iovec(struct iscsit_cmd *);
+static u32 iscsit_do_crypto_hash_sg(struct ahash_request *, struct iscsit_cmd *,
                                    u32, u32, u32, u8 *);
 static int
-iscsit_xmit_datain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+iscsit_xmit_datain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
                       const struct iscsi_datain *datain)
 {
        struct kvec *iov;
@@ -644,7 +644,7 @@ iscsit_xmit_datain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        return 0;
 }
 
-static int iscsit_xmit_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+static int iscsit_xmit_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
                           struct iscsi_datain_req *dr, const void *buf,
                           u32 buf_len)
 {
@@ -654,7 +654,7 @@ static int iscsit_xmit_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                return iscsit_xmit_nondatain_pdu(conn, cmd, buf, buf_len);
 }
 
-static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsi_conn *conn)
+static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsit_conn *conn)
 {
        return TARGET_PROT_NORMAL;
 }
@@ -796,11 +796,11 @@ static void __exit iscsi_target_cleanup_module(void)
 }
 
 int iscsit_add_reject(
-       struct iscsi_conn *conn,
+       struct iscsit_conn *conn,
        u8 reason,
        unsigned char *buf)
 {
-       struct iscsi_cmd *cmd;
+       struct iscsit_cmd *cmd;
 
        cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
        if (!cmd)
@@ -828,12 +828,12 @@ int iscsit_add_reject(
 EXPORT_SYMBOL(iscsit_add_reject);
 
 static int iscsit_add_reject_from_cmd(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        u8 reason,
        bool add_to_conn,
        unsigned char *buf)
 {
-       struct iscsi_conn *conn;
+       struct iscsit_conn *conn;
        const bool do_put = cmd->se_cmd.se_tfo != NULL;
 
        if (!cmd->conn) {
@@ -872,13 +872,13 @@ static int iscsit_add_reject_from_cmd(
        return -1;
 }
 
-static int iscsit_add_reject_cmd(struct iscsi_cmd *cmd, u8 reason,
+static int iscsit_add_reject_cmd(struct iscsit_cmd *cmd, u8 reason,
                                 unsigned char *buf)
 {
        return iscsit_add_reject_from_cmd(cmd, reason, true, buf);
 }
 
-int iscsit_reject_cmd(struct iscsi_cmd *cmd, u8 reason, unsigned char *buf)
+int iscsit_reject_cmd(struct iscsit_cmd *cmd, u8 reason, unsigned char *buf)
 {
        return iscsit_add_reject_from_cmd(cmd, reason, false, buf);
 }
@@ -888,7 +888,7 @@ EXPORT_SYMBOL(iscsit_reject_cmd);
  * Map some portion of the allocated scatterlist to an iovec, suitable for
  * kernel sockets to copy data in/out.
  */
-static int iscsit_map_iovec(struct iscsi_cmd *cmd, struct kvec *iov, int nvec,
+static int iscsit_map_iovec(struct iscsit_cmd *cmd, struct kvec *iov, int nvec,
                            u32 data_offset, u32 data_length)
 {
        u32 i = 0, orig_data_length = data_length;
@@ -946,7 +946,7 @@ overflow:
        return -1;
 }
 
-static void iscsit_unmap_iovec(struct iscsi_cmd *cmd)
+static void iscsit_unmap_iovec(struct iscsit_cmd *cmd)
 {
        u32 i;
        struct scatterlist *sg;
@@ -957,10 +957,10 @@ static void iscsit_unmap_iovec(struct iscsi_cmd *cmd)
                kunmap(sg_page(&sg[i]));
 }
 
-static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
+static void iscsit_ack_from_expstatsn(struct iscsit_conn *conn, u32 exp_statsn)
 {
        LIST_HEAD(ack_list);
-       struct iscsi_cmd *cmd, *cmd_p;
+       struct iscsit_cmd *cmd, *cmd_p;
 
        conn->exp_statsn = exp_statsn;
 
@@ -987,7 +987,7 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
        }
 }
 
-static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd)
+static int iscsit_allocate_iovecs(struct iscsit_cmd *cmd)
 {
        u32 iov_count = max(1UL, DIV_ROUND_UP(cmd->se_cmd.data_length, PAGE_SIZE));
 
@@ -1000,7 +1000,7 @@ static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd)
        return 0;
 }
 
-int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+int iscsit_setup_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
                          unsigned char *buf)
 {
        int data_direction, payload_length;
@@ -1215,7 +1215,7 @@ attach_cmd:
 }
 EXPORT_SYMBOL(iscsit_setup_scsi_cmd);
 
-void iscsit_set_unsolicited_dataout(struct iscsi_cmd *cmd)
+void iscsit_set_unsolicited_dataout(struct iscsit_cmd *cmd)
 {
        iscsit_set_dataout_sequence_values(cmd);
 
@@ -1225,7 +1225,7 @@ void iscsit_set_unsolicited_dataout(struct iscsi_cmd *cmd)
 }
 EXPORT_SYMBOL(iscsit_set_unsolicited_dataout);
 
-int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+int iscsit_process_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
                            struct iscsi_scsi_req *hdr)
 {
        int cmdsn_ret = 0;
@@ -1285,7 +1285,7 @@ int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 EXPORT_SYMBOL(iscsit_process_scsi_cmd);
 
 static int
-iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
+iscsit_get_immediate_data(struct iscsit_cmd *cmd, struct iscsi_scsi_req *hdr,
                          bool dump_payload)
 {
        int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
@@ -1349,7 +1349,7 @@ iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
 }
 
 static int
-iscsit_handle_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+iscsit_handle_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
                           unsigned char *buf)
 {
        struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
@@ -1383,7 +1383,7 @@ iscsit_handle_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 
 static u32 iscsit_do_crypto_hash_sg(
        struct ahash_request *hash,
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        u32 data_offset,
        u32 data_length,
        u32 padding,
@@ -1455,8 +1455,8 @@ static void iscsit_do_crypto_hash_buf(struct ahash_request *hash,
 }
 
 int
-__iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf,
-                          struct iscsi_cmd *cmd, u32 payload_length,
+__iscsit_check_dataout_hdr(struct iscsit_conn *conn, void *buf,
+                          struct iscsit_cmd *cmd, u32 payload_length,
                           bool *success)
 {
        struct iscsi_data *hdr = buf;
@@ -1559,11 +1559,11 @@ __iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf,
 EXPORT_SYMBOL(__iscsit_check_dataout_hdr);
 
 int
-iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf,
-                        struct iscsi_cmd **out_cmd)
+iscsit_check_dataout_hdr(struct iscsit_conn *conn, void *buf,
+                        struct iscsit_cmd **out_cmd)
 {
        struct iscsi_data *hdr = buf;
-       struct iscsi_cmd *cmd;
+       struct iscsit_cmd *cmd;
        u32 payload_length = ntoh24(hdr->dlength);
        int rc;
        bool success = false;
@@ -1594,7 +1594,7 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf,
 EXPORT_SYMBOL(iscsit_check_dataout_hdr);
 
 static int
-iscsit_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+iscsit_get_dataout(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
                   struct iscsi_data *hdr)
 {
        struct kvec *iov;
@@ -1662,10 +1662,10 @@ iscsit_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 }
 
 int
-iscsit_check_dataout_payload(struct iscsi_cmd *cmd, struct iscsi_data *hdr,
+iscsit_check_dataout_payload(struct iscsit_cmd *cmd, struct iscsi_data *hdr,
                             bool data_crc_failed)
 {
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
        int rc, ooo_cmdsn;
        /*
         * Increment post receive data and CRC values or perform
@@ -1700,9 +1700,9 @@ iscsit_check_dataout_payload(struct iscsi_cmd *cmd, struct iscsi_data *hdr,
 }
 EXPORT_SYMBOL(iscsit_check_dataout_payload);
 
-static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
+static int iscsit_handle_data_out(struct iscsit_conn *conn, unsigned char *buf)
 {
-       struct iscsi_cmd *cmd = NULL;
+       struct iscsit_cmd *cmd = NULL;
        struct iscsi_data *hdr = (struct iscsi_data *)buf;
        int rc;
        bool data_crc_failed = false;
@@ -1722,7 +1722,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
        return iscsit_check_dataout_payload(cmd, hdr, data_crc_failed);
 }
 
-int iscsit_setup_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+int iscsit_setup_nop_out(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
                         struct iscsi_nopout *hdr)
 {
        u32 payload_length = ntoh24(hdr->dlength);
@@ -1770,7 +1770,7 @@ int iscsit_setup_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
         * This is not a response to a Unsolicited NopIN, which means
         * it can either be a NOPOUT ping request (with a valid ITT),
         * or a NOPOUT not requesting a NOPIN (with a reserved ITT).
-        * Either way, make sure we allocate an struct iscsi_cmd, as both
+        * Either way, make sure we allocate an struct iscsit_cmd, as both
         * can contain ping data.
         */
        if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
@@ -1789,10 +1789,10 @@ int iscsit_setup_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 }
 EXPORT_SYMBOL(iscsit_setup_nop_out);
 
-int iscsit_process_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+int iscsit_process_nop_out(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
                           struct iscsi_nopout *hdr)
 {
-       struct iscsi_cmd *cmd_p = NULL;
+       struct iscsit_cmd *cmd_p = NULL;
        int cmdsn_ret = 0;
        /*
         * Initiator is expecting a NopIN ping reply..
@@ -1851,7 +1851,7 @@ int iscsit_process_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 }
 EXPORT_SYMBOL(iscsit_process_nop_out);
 
-static int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+static int iscsit_handle_nop_out(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
                                 unsigned char *buf)
 {
        unsigned char *ping_data = NULL;
@@ -1936,7 +1936,7 @@ static int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 
                ping_data[payload_length] = '\0';
                /*
-                * Attach ping data to struct iscsi_cmd->buf_ptr.
+                * Attach ping data to struct iscsit_cmd->buf_ptr.
                 */
                cmd->buf_ptr = ping_data;
                cmd->buf_ptr_size = payload_length;
@@ -1978,7 +1978,7 @@ static enum tcm_tmreq_table iscsit_convert_tmf(u8 iscsi_tmf)
 }
 
 int
-iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+iscsit_handle_task_mgt_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
                           unsigned char *buf)
 {
        struct se_tmr_req *se_tmr;
@@ -2159,7 +2159,7 @@ EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd);
 
 /* #warning FIXME: Support Text Command parameters besides SendTargets */
 int
-iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+iscsit_setup_text_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
                      struct iscsi_text *hdr)
 {
        u32 payload_length = ntoh24(hdr->dlength);
@@ -2199,7 +2199,7 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 EXPORT_SYMBOL(iscsit_setup_text_cmd);
 
 int
-iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+iscsit_process_text_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
                        struct iscsi_text *hdr)
 {
        unsigned char *text_in = cmd->text_in_ptr, *text_ptr;
@@ -2258,7 +2258,7 @@ reject:
 EXPORT_SYMBOL(iscsit_process_text_cmd);
 
 static int
-iscsit_handle_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+iscsit_handle_text_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
                       unsigned char *buf)
 {
        struct iscsi_text *hdr = (struct iscsi_text *)buf;
@@ -2347,10 +2347,10 @@ reject:
        return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
 }
 
-int iscsit_logout_closesession(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+int iscsit_logout_closesession(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
 {
-       struct iscsi_conn *conn_p;
-       struct iscsi_session *sess = conn->sess;
+       struct iscsit_conn *conn_p;
+       struct iscsit_session *sess = conn->sess;
 
        pr_debug("Received logout request CLOSESESSION on CID: %hu"
                " for SID: %u.\n", conn->cid, conn->sess->sid);
@@ -2377,10 +2377,10 @@ int iscsit_logout_closesession(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
        return 0;
 }
 
-int iscsit_logout_closeconnection(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+int iscsit_logout_closeconnection(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
 {
-       struct iscsi_conn *l_conn;
-       struct iscsi_session *sess = conn->sess;
+       struct iscsit_conn *l_conn;
+       struct iscsit_session *sess = conn->sess;
 
        pr_debug("Received logout request CLOSECONNECTION for CID:"
                " %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
@@ -2425,9 +2425,9 @@ int iscsit_logout_closeconnection(struct iscsi_cmd *cmd, struct iscsi_conn *conn
        return 0;
 }
 
-int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+int iscsit_logout_removeconnforrecovery(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
 {
-       struct iscsi_session *sess = conn->sess;
+       struct iscsit_session *sess = conn->sess;
 
        pr_debug("Received explicit REMOVECONNFORRECOVERY logout for"
                " CID: %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
@@ -2455,7 +2455,7 @@ int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *cmd, struct iscsi_conn
 }
 
 int
-iscsit_handle_logout_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+iscsit_handle_logout_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
                        unsigned char *buf)
 {
        int cmdsn_ret, logout_remove = 0;
@@ -2536,7 +2536,7 @@ iscsit_handle_logout_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 EXPORT_SYMBOL(iscsit_handle_logout_cmd);
 
 int iscsit_handle_snack(
-       struct iscsi_conn *conn,
+       struct iscsit_conn *conn,
        unsigned char *buf)
 {
        struct iscsi_snack *hdr;
@@ -2590,7 +2590,7 @@ int iscsit_handle_snack(
 }
 EXPORT_SYMBOL(iscsit_handle_snack);
 
-static void iscsit_rx_thread_wait_for_tcp(struct iscsi_conn *conn)
+static void iscsit_rx_thread_wait_for_tcp(struct iscsit_conn *conn)
 {
        if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
            (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
@@ -2601,13 +2601,13 @@ static void iscsit_rx_thread_wait_for_tcp(struct iscsi_conn *conn)
 }
 
 static int iscsit_handle_immediate_data(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        struct iscsi_scsi_req *hdr,
        u32 length)
 {
        int iov_ret, rx_got = 0, rx_size = 0;
        u32 checksum, iov_count = 0, padding = 0;
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
        struct kvec *iov;
        void *overflow_buf = NULL;
 
@@ -2708,10 +2708,10 @@ static int iscsit_handle_immediate_data(
 
 /* #warning iscsi_build_conn_drop_async_message() only sends out on connections
        with active network interface */
-static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
+static void iscsit_build_conn_drop_async_message(struct iscsit_conn *conn)
 {
-       struct iscsi_cmd *cmd;
-       struct iscsi_conn *conn_p;
+       struct iscsit_cmd *cmd;
+       struct iscsit_conn *conn_p;
        bool found = false;
 
        lockdep_assert_held(&conn->sess->conn_lock);
@@ -2750,8 +2750,8 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
 }
 
 static int iscsit_send_conn_drop_async_message(
-       struct iscsi_cmd *cmd,
-       struct iscsi_conn *conn)
+       struct iscsit_cmd *cmd,
+       struct iscsit_conn *conn)
 {
        struct iscsi_async *hdr;
 
@@ -2779,7 +2779,7 @@ static int iscsit_send_conn_drop_async_message(
        return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
 }
 
-static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn)
+static void iscsit_tx_thread_wait_for_tcp(struct iscsit_conn *conn)
 {
        if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
            (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
@@ -2790,7 +2790,7 @@ static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn)
 }
 
 void
-iscsit_build_datain_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+iscsit_build_datain_pdu(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
                        struct iscsi_datain *datain, struct iscsi_data_rsp *hdr,
                        bool set_statsn)
 {
@@ -2835,7 +2835,7 @@ iscsit_build_datain_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
 }
 EXPORT_SYMBOL(iscsit_build_datain_pdu);
 
-static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+static int iscsit_send_datain(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
 {
        struct iscsi_data_rsp *hdr = (struct iscsi_data_rsp *)&cmd->pdu[0];
        struct iscsi_datain datain;
@@ -2896,12 +2896,12 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
 }
 
 int
-iscsit_build_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+iscsit_build_logout_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
                        struct iscsi_logout_rsp *hdr)
 {
-       struct iscsi_conn *logout_conn = NULL;
+       struct iscsit_conn *logout_conn = NULL;
        struct iscsi_conn_recovery *cr = NULL;
-       struct iscsi_session *sess = conn->sess;
+       struct iscsit_session *sess = conn->sess;
        /*
         * The actual shutting down of Sessions and/or Connections
         * for CLOSESESSION and CLOSECONNECTION Logout Requests
@@ -2991,7 +2991,7 @@ iscsit_build_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
 EXPORT_SYMBOL(iscsit_build_logout_rsp);
 
 static int
-iscsit_send_logout(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+iscsit_send_logout(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
 {
        int rc;
 
@@ -3004,7 +3004,7 @@ iscsit_send_logout(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
 }
 
 void
-iscsit_build_nopin_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+iscsit_build_nopin_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
                       struct iscsi_nopin *hdr, bool nopout_response)
 {
        hdr->opcode             = ISCSI_OP_NOOP_IN;
@@ -3035,8 +3035,8 @@ EXPORT_SYMBOL(iscsit_build_nopin_rsp);
  *     Unsolicited NOPIN, either requesting a response or not.
  */
 static int iscsit_send_unsolicited_nopin(
-       struct iscsi_cmd *cmd,
-       struct iscsi_conn *conn,
+       struct iscsit_cmd *cmd,
+       struct iscsit_conn *conn,
        int want_response)
 {
        struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
@@ -3060,15 +3060,15 @@ static int iscsit_send_unsolicited_nopin(
 }
 
 static int
-iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+iscsit_send_nopin(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
 {
        struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
 
        iscsit_build_nopin_rsp(cmd, conn, hdr, true);
 
        /*
-        * NOPOUT Ping Data is attached to struct iscsi_cmd->buf_ptr.
-        * NOPOUT DataSegmentLength is at struct iscsi_cmd->buf_ptr_size.
+        * NOPOUT Ping Data is attached to struct iscsit_cmd->buf_ptr.
+        * NOPOUT DataSegmentLength is at struct iscsit_cmd->buf_ptr_size.
         */
        pr_debug("Echoing back %u bytes of ping data.\n", cmd->buf_ptr_size);
 
@@ -3078,8 +3078,8 @@ iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
 }
 
 static int iscsit_send_r2t(
-       struct iscsi_cmd *cmd,
-       struct iscsi_conn *conn)
+       struct iscsit_cmd *cmd,
+       struct iscsit_conn *conn)
 {
        struct iscsi_r2t *r2t;
        struct iscsi_r2t_rsp *hdr;
@@ -3135,8 +3135,8 @@ static int iscsit_send_r2t(
  *             connection recovery.
  */
 int iscsit_build_r2ts_for_cmd(
-       struct iscsi_conn *conn,
-       struct iscsi_cmd *cmd,
+       struct iscsit_conn *conn,
+       struct iscsit_cmd *cmd,
        bool recovery)
 {
        int first_r2t = 1;
@@ -3218,7 +3218,7 @@ int iscsit_build_r2ts_for_cmd(
 }
 EXPORT_SYMBOL(iscsit_build_r2ts_for_cmd);
 
-void iscsit_build_rsp_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+void iscsit_build_rsp_pdu(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
                        bool inc_stat_sn, struct iscsi_scsi_rsp *hdr)
 {
        if (inc_stat_sn)
@@ -3252,7 +3252,7 @@ void iscsit_build_rsp_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
 }
 EXPORT_SYMBOL(iscsit_build_rsp_pdu);
 
-static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+static int iscsit_send_response(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
 {
        struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)&cmd->pdu[0];
        bool inc_stat_sn = (cmd->i_state == ISTATE_SEND_STATUS);
@@ -3309,7 +3309,7 @@ static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr)
 }
 
 void
-iscsit_build_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+iscsit_build_task_mgt_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
                          struct iscsi_tm_rsp *hdr)
 {
        struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
@@ -3332,7 +3332,7 @@ iscsit_build_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
 EXPORT_SYMBOL(iscsit_build_task_mgt_rsp);
 
 static int
-iscsit_send_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+iscsit_send_task_mgt_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
 {
        struct iscsi_tm_rsp *hdr = (struct iscsi_tm_rsp *)&cmd->pdu[0];
 
@@ -3344,12 +3344,12 @@ iscsit_send_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
 #define SENDTARGETS_BUF_LIMIT 32768U
 
 static int
-iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
+iscsit_build_sendtargets_response(struct iscsit_cmd *cmd,
                                  enum iscsit_transport_type network_transport,
                                  int skip_bytes, bool *completed)
 {
        char *payload = NULL;
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
        struct iscsi_portal_group *tpg;
        struct iscsi_tiqn *tiqn;
        struct iscsi_tpg_np *tpg_np;
@@ -3494,7 +3494,7 @@ eob:
 }
 
 int
-iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+iscsit_build_text_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
                      struct iscsi_text_rsp *hdr,
                      enum iscsit_transport_type network_transport)
 {
@@ -3544,8 +3544,8 @@ iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
 EXPORT_SYMBOL(iscsit_build_text_rsp);
 
 static int iscsit_send_text_rsp(
-       struct iscsi_cmd *cmd,
-       struct iscsi_conn *conn)
+       struct iscsit_cmd *cmd,
+       struct iscsit_conn *conn)
 {
        struct iscsi_text_rsp *hdr = (struct iscsi_text_rsp *)cmd->pdu;
        int text_length;
@@ -3561,7 +3561,7 @@ static int iscsit_send_text_rsp(
 }
 
 void
-iscsit_build_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+iscsit_build_reject(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
                    struct iscsi_reject *hdr)
 {
        hdr->opcode             = ISCSI_OP_REJECT;
@@ -3578,8 +3578,8 @@ iscsit_build_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
 EXPORT_SYMBOL(iscsit_build_reject);
 
 static int iscsit_send_reject(
-       struct iscsi_cmd *cmd,
-       struct iscsi_conn *conn)
+       struct iscsit_cmd *cmd,
+       struct iscsit_conn *conn)
 {
        struct iscsi_reject *hdr = (struct iscsi_reject *)&cmd->pdu[0];
 
@@ -3593,7 +3593,7 @@ static int iscsit_send_reject(
                                                     ISCSI_HDR_LEN);
 }
 
-void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
+void iscsit_thread_get_cpumask(struct iscsit_conn *conn)
 {
        int ord, cpu;
        cpumask_var_t conn_allowed_cpumask;
@@ -3636,7 +3636,7 @@ void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
        cpumask_setall(conn->conn_cpumask);
 }
 
-static void iscsit_thread_reschedule(struct iscsi_conn *conn)
+static void iscsit_thread_reschedule(struct iscsit_conn *conn)
 {
        /*
         * If iscsit_global->allowed_cpumask modified, reschedule iSCSI
@@ -3653,7 +3653,7 @@ static void iscsit_thread_reschedule(struct iscsi_conn *conn)
 }
 
 void iscsit_thread_check_cpumask(
-       struct iscsi_conn *conn,
+       struct iscsit_conn *conn,
        struct task_struct *p,
        int mode)
 {
@@ -3693,7 +3693,7 @@ void iscsit_thread_check_cpumask(
 EXPORT_SYMBOL(iscsit_thread_check_cpumask);
 
 int
-iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
+iscsit_immediate_queue(struct iscsit_conn *conn, struct iscsit_cmd *cmd, int state)
 {
        int ret;
 
@@ -3737,11 +3737,11 @@ err:
 EXPORT_SYMBOL(iscsit_immediate_queue);
 
 static int
-iscsit_handle_immediate_queue(struct iscsi_conn *conn)
+iscsit_handle_immediate_queue(struct iscsit_conn *conn)
 {
        struct iscsit_transport *t = conn->conn_transport;
        struct iscsi_queue_req *qr;
-       struct iscsi_cmd *cmd;
+       struct iscsit_cmd *cmd;
        u8 state;
        int ret;
 
@@ -3760,7 +3760,7 @@ iscsit_handle_immediate_queue(struct iscsi_conn *conn)
 }
 
 int
-iscsit_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
+iscsit_response_queue(struct iscsit_conn *conn, struct iscsit_cmd *cmd, int state)
 {
        int ret;
 
@@ -3866,11 +3866,11 @@ err:
 }
 EXPORT_SYMBOL(iscsit_response_queue);
 
-static int iscsit_handle_response_queue(struct iscsi_conn *conn)
+static int iscsit_handle_response_queue(struct iscsit_conn *conn)
 {
        struct iscsit_transport *t = conn->conn_transport;
        struct iscsi_queue_req *qr;
-       struct iscsi_cmd *cmd;
+       struct iscsit_cmd *cmd;
        u8 state;
        int ret;
 
@@ -3890,7 +3890,7 @@ static int iscsit_handle_response_queue(struct iscsi_conn *conn)
 int iscsi_target_tx_thread(void *arg)
 {
        int ret = 0;
-       struct iscsi_conn *conn = arg;
+       struct iscsit_conn *conn = arg;
        bool conn_freed = false;
 
        /*
@@ -3945,10 +3945,10 @@ out:
        return 0;
 }
 
-static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)
+static int iscsi_target_rx_opcode(struct iscsit_conn *conn, unsigned char *buf)
 {
        struct iscsi_hdr *hdr = (struct iscsi_hdr *)buf;
-       struct iscsi_cmd *cmd;
+       struct iscsit_cmd *cmd;
        int ret = 0;
 
        switch (hdr->opcode & ISCSI_OPCODE_MASK) {
@@ -4022,7 +4022,7 @@ reject:
        return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
 }
 
-static bool iscsi_target_check_conn_state(struct iscsi_conn *conn)
+static bool iscsi_target_check_conn_state(struct iscsit_conn *conn)
 {
        bool ret;
 
@@ -4033,7 +4033,7 @@ static bool iscsi_target_check_conn_state(struct iscsi_conn *conn)
        return ret;
 }
 
-static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
+static void iscsit_get_rx_pdu(struct iscsit_conn *conn)
 {
        int ret;
        u8 *buffer, opcode;
@@ -4118,7 +4118,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
 int iscsi_target_rx_thread(void *arg)
 {
        int rc;
-       struct iscsi_conn *conn = arg;
+       struct iscsit_conn *conn = arg;
        bool conn_freed = false;
 
        /*
@@ -4153,11 +4153,11 @@ out:
        return 0;
 }
 
-static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
+static void iscsit_release_commands_from_conn(struct iscsit_conn *conn)
 {
        LIST_HEAD(tmp_list);
-       struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL;
-       struct iscsi_session *sess = conn->sess;
+       struct iscsit_cmd *cmd = NULL, *cmd_tmp = NULL;
+       struct iscsit_session *sess = conn->sess;
        /*
         * We expect this function to only ever be called from either RX or TX
         * thread context via iscsit_close_connection() once the other context
@@ -4197,9 +4197,9 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
 }
 
 static void iscsit_stop_timers_for_cmds(
-       struct iscsi_conn *conn)
+       struct iscsit_conn *conn)
 {
-       struct iscsi_cmd *cmd;
+       struct iscsit_cmd *cmd;
 
        spin_lock_bh(&conn->cmd_lock);
        list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
@@ -4210,10 +4210,10 @@ static void iscsit_stop_timers_for_cmds(
 }
 
 int iscsit_close_connection(
-       struct iscsi_conn *conn)
+       struct iscsit_conn *conn)
 {
        int conn_logout = (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT);
-       struct iscsi_session    *sess = conn->sess;
+       struct iscsit_session   *sess = conn->sess;
 
        pr_debug("Closing iSCSI connection CID %hu on SID:"
                " %u\n", conn->cid, sess->sid);
@@ -4226,7 +4226,7 @@ int iscsit_close_connection(
         * However for iser-target, isert_wait4logout() is using conn_logout_comp
         * to signal logout response TX interrupt completion.  Go ahead and skip
         * this for iser since isert_rx_opcode() does not wait on logout failure,
-        * and to avoid iscsi_conn pointer dereference in iser-target code.
+        * and to avoid iscsit_conn pointer dereference in iser-target code.
         */
        if (!conn->conn_transport->rdma_shutdown)
                complete(&conn->conn_logout_comp);
@@ -4264,7 +4264,7 @@ int iscsit_close_connection(
         *
         * During normal operation clear the out of order commands (but
         * do not free the struct iscsi_ooo_cmdsn's) and release all
-        * struct iscsi_cmds.
+        * struct iscsit_cmds.
         */
        if (atomic_read(&conn->connection_recovery)) {
                iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(conn);
@@ -4450,7 +4450,7 @@ int iscsit_close_connection(
  * If the iSCSI Session for the iSCSI Initiator Node exists,
  * forcefully shutdown the iSCSI NEXUS.
  */
-int iscsit_close_session(struct iscsi_session *sess, bool can_sleep)
+int iscsit_close_session(struct iscsit_session *sess, bool can_sleep)
 {
        struct iscsi_portal_group *tpg = sess->tpg;
        struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
@@ -4518,9 +4518,9 @@ int iscsit_close_session(struct iscsi_session *sess, bool can_sleep)
 }
 
 static void iscsit_logout_post_handler_closesession(
-       struct iscsi_conn *conn)
+       struct iscsit_conn *conn)
 {
-       struct iscsi_session *sess = conn->sess;
+       struct iscsit_session *sess = conn->sess;
        int sleep = 1;
        /*
         * Traditional iscsi/tcp will invoke this logic from TX thread
@@ -4547,7 +4547,7 @@ static void iscsit_logout_post_handler_closesession(
 }
 
 static void iscsit_logout_post_handler_samecid(
-       struct iscsi_conn *conn)
+       struct iscsit_conn *conn)
 {
        int sleep = 1;
 
@@ -4565,11 +4565,11 @@ static void iscsit_logout_post_handler_samecid(
 }
 
 static void iscsit_logout_post_handler_diffcid(
-       struct iscsi_conn *conn,
+       struct iscsit_conn *conn,
        u16 cid)
 {
-       struct iscsi_conn *l_conn;
-       struct iscsi_session *sess = conn->sess;
+       struct iscsit_conn *l_conn;
+       struct iscsit_session *sess = conn->sess;
        bool conn_found = false;
 
        if (!sess)
@@ -4604,8 +4604,8 @@ static void iscsit_logout_post_handler_diffcid(
  *     Return of 0 causes the TX thread to restart.
  */
 int iscsit_logout_post_handler(
-       struct iscsi_cmd *cmd,
-       struct iscsi_conn *conn)
+       struct iscsit_cmd *cmd,
+       struct iscsit_conn *conn)
 {
        int ret = 0;
 
@@ -4661,9 +4661,9 @@ int iscsit_logout_post_handler(
 }
 EXPORT_SYMBOL(iscsit_logout_post_handler);
 
-void iscsit_fail_session(struct iscsi_session *sess)
+void iscsit_fail_session(struct iscsit_session *sess)
 {
-       struct iscsi_conn *conn;
+       struct iscsit_conn *conn;
 
        spin_lock_bh(&sess->conn_lock);
        list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
@@ -4677,12 +4677,12 @@ void iscsit_fail_session(struct iscsi_session *sess)
 }
 
 void iscsit_stop_session(
-       struct iscsi_session *sess,
+       struct iscsit_session *sess,
        int session_sleep,
        int connection_sleep)
 {
        u16 conn_count = atomic_read(&sess->nconn);
-       struct iscsi_conn *conn, *conn_tmp = NULL;
+       struct iscsit_conn *conn, *conn_tmp = NULL;
        int is_last;
 
        spin_lock_bh(&sess->conn_lock);
@@ -4724,7 +4724,7 @@ void iscsit_stop_session(
 
 int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
 {
-       struct iscsi_session *sess;
+       struct iscsit_session *sess;
        struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
        struct se_session *se_sess, *se_sess_tmp;
        LIST_HEAD(free_list);
@@ -4738,7 +4738,7 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
 
        list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
                        sess_list) {
-               sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+               sess = (struct iscsit_session *)se_sess->fabric_sess_ptr;
 
                spin_lock(&sess->conn_lock);
                if (atomic_read(&sess->session_fall_back_to_erl0) ||
@@ -4759,7 +4759,7 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
        spin_unlock_bh(&se_tpg->session_lock);
 
        list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) {
-               sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+               sess = (struct iscsit_session *)se_sess->fabric_sess_ptr;
 
                list_del_init(&se_sess->sess_list);
                iscsit_stop_session(sess, 1, 1);
index b35a96d..0c997a0 100644 (file)
@@ -5,11 +5,11 @@
 #include <linux/types.h>
 #include <linux/spinlock.h>
 
-struct iscsi_cmd;
-struct iscsi_conn;
+struct iscsit_cmd;
+struct iscsit_conn;
 struct iscsi_np;
 struct iscsi_portal_group;
-struct iscsi_session;
+struct iscsit_session;
 struct iscsi_tpg_np;
 struct kref;
 struct sockaddr_storage;
@@ -30,20 +30,20 @@ extern struct iscsi_np *iscsit_add_np(struct sockaddr_storage *,
 extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *,
                                struct iscsi_portal_group *, bool);
 extern int iscsit_del_np(struct iscsi_np *);
-extern int iscsit_reject_cmd(struct iscsi_cmd *cmd, u8, unsigned char *);
-extern void iscsit_set_unsolicited_dataout(struct iscsi_cmd *);
-extern int iscsit_logout_closesession(struct iscsi_cmd *, struct iscsi_conn *);
-extern int iscsit_logout_closeconnection(struct iscsi_cmd *, struct iscsi_conn *);
-extern int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *, struct iscsi_conn *);
-extern int iscsit_send_async_msg(struct iscsi_conn *, u16, u8, u8);
-extern int iscsit_build_r2ts_for_cmd(struct iscsi_conn *, struct iscsi_cmd *, bool recovery);
-extern void iscsit_thread_get_cpumask(struct iscsi_conn *);
+extern int iscsit_reject_cmd(struct iscsit_cmd *cmd, u8, unsigned char *);
+extern void iscsit_set_unsolicited_dataout(struct iscsit_cmd *);
+extern int iscsit_logout_closesession(struct iscsit_cmd *, struct iscsit_conn *);
+extern int iscsit_logout_closeconnection(struct iscsit_cmd *, struct iscsit_conn *);
+extern int iscsit_logout_removeconnforrecovery(struct iscsit_cmd *, struct iscsit_conn *);
+extern int iscsit_send_async_msg(struct iscsit_conn *, u16, u8, u8);
+extern int iscsit_build_r2ts_for_cmd(struct iscsit_conn *, struct iscsit_cmd *, bool recovery);
+extern void iscsit_thread_get_cpumask(struct iscsit_conn *);
 extern int iscsi_target_tx_thread(void *);
 extern int iscsi_target_rx_thread(void *);
-extern int iscsit_close_connection(struct iscsi_conn *);
-extern int iscsit_close_session(struct iscsi_session *, bool can_sleep);
-extern void iscsit_fail_session(struct iscsi_session *);
-extern void iscsit_stop_session(struct iscsi_session *, int, int);
+extern int iscsit_close_connection(struct iscsit_conn *);
+extern int iscsit_close_session(struct iscsit_session *, bool can_sleep);
+extern void iscsit_fail_session(struct iscsit_session *);
+extern void iscsit_stop_session(struct iscsit_session *, int, int);
 extern int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *, int);
 
 extern struct iscsit_global *iscsit_global;
index 62d912b..6e5611d 100644 (file)
@@ -35,7 +35,7 @@ static char *chap_get_digest_name(const int digest_type)
 }
 
 static int chap_gen_challenge(
-       struct iscsi_conn *conn,
+       struct iscsit_conn *conn,
        int caller,
        char *c_str,
        unsigned int *c_len)
@@ -128,14 +128,14 @@ out:
        return r;
 }
 
-static void chap_close(struct iscsi_conn *conn)
+static void chap_close(struct iscsit_conn *conn)
 {
        kfree(conn->auth_protocol);
        conn->auth_protocol = NULL;
 }
 
 static struct iscsi_chap *chap_server_open(
-       struct iscsi_conn *conn,
+       struct iscsit_conn *conn,
        struct iscsi_node_auth *auth,
        const char *a_str,
        char *aic_str,
@@ -206,7 +206,7 @@ static struct iscsi_chap *chap_server_open(
 }
 
 static int chap_server_compute_hash(
-       struct iscsi_conn *conn,
+       struct iscsit_conn *conn,
        struct iscsi_node_auth *auth,
        char *nr_in_ptr,
        char *nr_out_ptr,
@@ -497,7 +497,7 @@ out:
 }
 
 u32 chap_main_loop(
-       struct iscsi_conn *conn,
+       struct iscsit_conn *conn,
        struct iscsi_node_auth *auth,
        char *in_text,
        char *out_text,
index fc75c1c..ceb9b77 100644 (file)
@@ -27,9 +27,9 @@
 #define CHAP_STAGE_SERVER_NR   5
 
 struct iscsi_node_auth;
-struct iscsi_conn;
+struct iscsit_conn;
 
-extern u32 chap_main_loop(struct iscsi_conn *, struct iscsi_node_auth *, char *, char *,
+extern u32 chap_main_loop(struct iscsit_conn *, struct iscsi_node_auth *, char *, char *,
                                int *, int *);
 
 struct iscsi_chap {
index 57b4fd5..ce14540 100644 (file)
@@ -443,7 +443,7 @@ static ssize_t iscsi_nacl_param_##name##_show(struct config_item *item,     \
                char *page)                                             \
 {                                                                      \
        struct se_node_acl *se_nacl = param_to_nacl(item);              \
-       struct iscsi_session *sess;                                     \
+       struct iscsit_session *sess;                                    \
        struct se_session *se_sess;                                     \
        ssize_t rb;                                                     \
                                                                        \
@@ -498,8 +498,8 @@ static struct configfs_attribute *lio_target_nacl_param_attrs[] = {
 static ssize_t lio_target_nacl_info_show(struct config_item *item, char *page)
 {
        struct se_node_acl *se_nacl = acl_to_nacl(item);
-       struct iscsi_session *sess;
-       struct iscsi_conn *conn;
+       struct iscsit_session *sess;
+       struct iscsit_conn *conn;
        struct se_session *se_sess;
        ssize_t rb = 0;
        u32 max_cmd_sn;
@@ -1344,14 +1344,14 @@ static struct configfs_attribute *lio_target_discovery_auth_attrs[] = {
 
 static int iscsi_get_cmd_state(struct se_cmd *se_cmd)
 {
-       struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+       struct iscsit_cmd *cmd = container_of(se_cmd, struct iscsit_cmd, se_cmd);
 
        return cmd->i_state;
 }
 
 static u32 lio_sess_get_index(struct se_session *se_sess)
 {
-       struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+       struct iscsit_session *sess = se_sess->fabric_sess_ptr;
 
        return sess->session_index;
 }
@@ -1361,7 +1361,7 @@ static u32 lio_sess_get_initiator_sid(
        unsigned char *buf,
        u32 size)
 {
-       struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+       struct iscsit_session *sess = se_sess->fabric_sess_ptr;
        /*
         * iSCSI Initiator Session Identifier from RFC-3720.
         */
@@ -1370,8 +1370,8 @@ static u32 lio_sess_get_initiator_sid(
 
 static int lio_queue_data_in(struct se_cmd *se_cmd)
 {
-       struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_cmd *cmd = container_of(se_cmd, struct iscsit_cmd, se_cmd);
+       struct iscsit_conn *conn = cmd->conn;
 
        cmd->i_state = ISTATE_SEND_DATAIN;
        return conn->conn_transport->iscsit_queue_data_in(conn, cmd);
@@ -1379,8 +1379,8 @@ static int lio_queue_data_in(struct se_cmd *se_cmd)
 
 static int lio_write_pending(struct se_cmd *se_cmd)
 {
-       struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_cmd *cmd = container_of(se_cmd, struct iscsit_cmd, se_cmd);
+       struct iscsit_conn *conn = cmd->conn;
 
        if (!cmd->immediate_data && !cmd->unsolicited_data)
                return conn->conn_transport->iscsit_get_dataout(conn, cmd, false);
@@ -1390,8 +1390,8 @@ static int lio_write_pending(struct se_cmd *se_cmd)
 
 static int lio_queue_status(struct se_cmd *se_cmd)
 {
-       struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_cmd *cmd = container_of(se_cmd, struct iscsit_cmd, se_cmd);
+       struct iscsit_conn *conn = cmd->conn;
 
        cmd->i_state = ISTATE_SEND_STATUS;
 
@@ -1403,7 +1403,7 @@ static int lio_queue_status(struct se_cmd *se_cmd)
 
 static void lio_queue_tm_rsp(struct se_cmd *se_cmd)
 {
-       struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+       struct iscsit_cmd *cmd = container_of(se_cmd, struct iscsit_cmd, se_cmd);
 
        cmd->i_state = ISTATE_SEND_TASKMGTRSP;
        iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
@@ -1411,7 +1411,7 @@ static void lio_queue_tm_rsp(struct se_cmd *se_cmd)
 
 static void lio_aborted_task(struct se_cmd *se_cmd)
 {
-       struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+       struct iscsit_cmd *cmd = container_of(se_cmd, struct iscsit_cmd, se_cmd);
 
        cmd->conn->conn_transport->iscsit_aborted_task(cmd->conn, cmd);
 }
@@ -1472,11 +1472,11 @@ static int lio_tpg_check_prot_fabric_only(
 
 /*
  * This function calls iscsit_inc_session_usage_count() on the
- * struct iscsi_session in question.
+ * struct iscsit_session in question.
  */
 static void lio_tpg_close_session(struct se_session *se_sess)
 {
-       struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+       struct iscsit_session *sess = se_sess->fabric_sess_ptr;
        struct se_portal_group *se_tpg = &sess->tpg->tpg_se_tpg;
 
        spin_lock_bh(&se_tpg->session_lock);
@@ -1526,7 +1526,7 @@ static int lio_check_stop_free(struct se_cmd *se_cmd)
 
 static void lio_release_cmd(struct se_cmd *se_cmd)
 {
-       struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+       struct iscsit_cmd *cmd = container_of(se_cmd, struct iscsit_cmd, se_cmd);
 
        pr_debug("Entering lio_release_cmd for se_cmd: %p\n", se_cmd);
        iscsit_release_cmd(cmd);
index 07a22cd..2d44781 100644 (file)
@@ -32,14 +32,14 @@ struct iscsi_datain_req *iscsit_allocate_datain_req(void)
        return dr;
 }
 
-void iscsit_attach_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr)
+void iscsit_attach_datain_req(struct iscsit_cmd *cmd, struct iscsi_datain_req *dr)
 {
        spin_lock(&cmd->datain_lock);
        list_add_tail(&dr->cmd_datain_node, &cmd->datain_list);
        spin_unlock(&cmd->datain_lock);
 }
 
-void iscsit_free_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr)
+void iscsit_free_datain_req(struct iscsit_cmd *cmd, struct iscsi_datain_req *dr)
 {
        spin_lock(&cmd->datain_lock);
        list_del(&dr->cmd_datain_node);
@@ -48,7 +48,7 @@ void iscsit_free_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr)
        kmem_cache_free(lio_dr_cache, dr);
 }
 
-void iscsit_free_all_datain_reqs(struct iscsi_cmd *cmd)
+void iscsit_free_all_datain_reqs(struct iscsit_cmd *cmd)
 {
        struct iscsi_datain_req *dr, *dr_tmp;
 
@@ -60,7 +60,7 @@ void iscsit_free_all_datain_reqs(struct iscsi_cmd *cmd)
        spin_unlock(&cmd->datain_lock);
 }
 
-struct iscsi_datain_req *iscsit_get_datain_req(struct iscsi_cmd *cmd)
+struct iscsi_datain_req *iscsit_get_datain_req(struct iscsit_cmd *cmd)
 {
        if (list_empty(&cmd->datain_list)) {
                pr_err("cmd->datain_list is empty for ITT:"
@@ -76,11 +76,11 @@ struct iscsi_datain_req *iscsit_get_datain_req(struct iscsi_cmd *cmd)
  *     For Normal and Recovery DataSequenceInOrder=Yes and DataPDUInOrder=Yes.
  */
 static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_yes(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        struct iscsi_datain *datain)
 {
        u32 next_burst_len, read_data_done, read_data_left;
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
        struct iscsi_datain_req *dr;
 
        dr = iscsit_get_datain_req(cmd);
@@ -174,11 +174,11 @@ static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_yes(
  *     For Normal and Recovery DataSequenceInOrder=No and DataPDUInOrder=Yes.
  */
 static struct iscsi_datain_req *iscsit_set_datain_values_no_and_yes(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        struct iscsi_datain *datain)
 {
        u32 offset, read_data_done, read_data_left, seq_send_order;
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
        struct iscsi_datain_req *dr;
        struct iscsi_seq *seq;
 
@@ -295,11 +295,11 @@ static struct iscsi_datain_req *iscsit_set_datain_values_no_and_yes(
  *     For Normal and Recovery DataSequenceInOrder=Yes and DataPDUInOrder=No.
  */
 static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_no(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        struct iscsi_datain *datain)
 {
        u32 next_burst_len, read_data_done, read_data_left;
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
        struct iscsi_datain_req *dr;
        struct iscsi_pdu *pdu;
 
@@ -394,11 +394,11 @@ static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_no(
  *     For Normal and Recovery DataSequenceInOrder=No and DataPDUInOrder=No.
  */
 static struct iscsi_datain_req *iscsit_set_datain_values_no_and_no(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        struct iscsi_datain *datain)
 {
        u32 read_data_done, read_data_left, seq_send_order;
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
        struct iscsi_datain_req *dr;
        struct iscsi_pdu *pdu;
        struct iscsi_seq *seq = NULL;
@@ -496,10 +496,10 @@ static struct iscsi_datain_req *iscsit_set_datain_values_no_and_no(
 }
 
 struct iscsi_datain_req *iscsit_get_datain_values(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        struct iscsi_datain *datain)
 {
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
 
        if (conn->sess->sess_ops->DataSequenceInOrder &&
            conn->sess->sess_ops->DataPDUInOrder)
index a420fbd..b28df88 100644 (file)
@@ -2,15 +2,15 @@
 #ifndef ISCSI_TARGET_DATAIN_VALUES_H
 #define ISCSI_TARGET_DATAIN_VALUES_H
 
-struct iscsi_cmd;
+struct iscsit_cmd;
 struct iscsi_datain;
 
 extern struct iscsi_datain_req *iscsit_allocate_datain_req(void);
-extern void iscsit_attach_datain_req(struct iscsi_cmd *, struct iscsi_datain_req *);
-extern void iscsit_free_datain_req(struct iscsi_cmd *, struct iscsi_datain_req *);
-extern void iscsit_free_all_datain_reqs(struct iscsi_cmd *);
-extern struct iscsi_datain_req *iscsit_get_datain_req(struct iscsi_cmd *);
-extern struct iscsi_datain_req *iscsit_get_datain_values(struct iscsi_cmd *,
+extern void iscsit_attach_datain_req(struct iscsit_cmd *, struct iscsi_datain_req *);
+extern void iscsit_free_datain_req(struct iscsit_cmd *, struct iscsi_datain_req *);
+extern void iscsit_free_all_datain_reqs(struct iscsit_cmd *);
+extern struct iscsi_datain_req *iscsit_get_datain_req(struct iscsit_cmd *);
+extern struct iscsi_datain_req *iscsit_get_datain_values(struct iscsit_cmd *,
                        struct iscsi_datain *);
 
 #endif   /*** ISCSI_TARGET_DATAIN_VALUES_H ***/
index 8bf36ec..b565ce3 100644 (file)
@@ -17,7 +17,7 @@
 #include "iscsi_target_tpg.h"
 #include "iscsi_target_util.h"
 
-void iscsit_determine_maxcmdsn(struct iscsi_session *sess)
+void iscsit_determine_maxcmdsn(struct iscsit_session *sess)
 {
        struct se_node_acl *se_nacl;
 
@@ -42,7 +42,7 @@ void iscsit_determine_maxcmdsn(struct iscsi_session *sess)
        atomic_add(se_nacl->queue_depth - 1, &sess->max_cmd_sn);
 }
 
-void iscsit_increment_maxcmdsn(struct iscsi_cmd *cmd, struct iscsi_session *sess)
+void iscsit_increment_maxcmdsn(struct iscsit_cmd *cmd, struct iscsit_session *sess)
 {
        u32 max_cmd_sn;
 
index ab2166f..3663401 100644 (file)
@@ -2,10 +2,10 @@
 #ifndef ISCSI_TARGET_DEVICE_H
 #define ISCSI_TARGET_DEVICE_H
 
-struct iscsi_cmd;
-struct iscsi_session;
+struct iscsit_cmd;
+struct iscsit_session;
 
-extern void iscsit_determine_maxcmdsn(struct iscsi_session *);
-extern void iscsit_increment_maxcmdsn(struct iscsi_cmd *, struct iscsi_session *);
+extern void iscsit_determine_maxcmdsn(struct iscsit_session *);
+extern void iscsit_increment_maxcmdsn(struct iscsit_cmd *, struct iscsit_session *);
 
 #endif /* ISCSI_TARGET_DEVICE_H */
index 102c9cb..07e9cf4 100644 (file)
 #include "iscsi_target.h"
 
 /*
- *     Used to set values in struct iscsi_cmd that iscsit_dataout_check_sequence()
+ *     Used to set values in struct iscsit_cmd that iscsit_dataout_check_sequence()
  *     checks against to determine a PDU's Offset+Length is within the current
  *     DataOUT Sequence.  Used for DataSequenceInOrder=Yes only.
  */
 void iscsit_set_dataout_sequence_values(
-       struct iscsi_cmd *cmd)
+       struct iscsit_cmd *cmd)
 {
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
        /*
         * Still set seq_start_offset and seq_end_offset for Unsolicited
         * DataOUT, even if DataSequenceInOrder=No.
@@ -63,10 +63,10 @@ void iscsit_set_dataout_sequence_values(
 }
 
 static int iscsit_dataout_within_command_recovery_check(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        unsigned char *buf)
 {
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
        struct iscsi_data *hdr = (struct iscsi_data *) buf;
        u32 payload_length = ntoh24(hdr->dlength);
 
@@ -129,11 +129,11 @@ dump:
 }
 
 static int iscsit_dataout_check_unsolicited_sequence(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        unsigned char *buf)
 {
        u32 first_burst_len;
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
        struct iscsi_data *hdr = (struct iscsi_data *) buf;
        u32 payload_length = ntoh24(hdr->dlength);
 
@@ -204,11 +204,11 @@ out:
 }
 
 static int iscsit_dataout_check_sequence(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        unsigned char *buf)
 {
        u32 next_burst_len;
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
        struct iscsi_seq *seq = NULL;
        struct iscsi_data *hdr = (struct iscsi_data *) buf;
        u32 payload_length = ntoh24(hdr->dlength);
@@ -333,11 +333,11 @@ out:
 }
 
 static int iscsit_dataout_check_datasn(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        unsigned char *buf)
 {
        u32 data_sn = 0;
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
        struct iscsi_data *hdr = (struct iscsi_data *) buf;
        u32 payload_length = ntoh24(hdr->dlength);
 
@@ -384,17 +384,17 @@ dump:
 }
 
 static int iscsit_dataout_pre_datapduinorder_yes(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        unsigned char *buf)
 {
        int dump = 0, recovery = 0;
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
        struct iscsi_data *hdr = (struct iscsi_data *) buf;
        u32 payload_length = ntoh24(hdr->dlength);
 
        /*
         * For DataSequenceInOrder=Yes: If the offset is greater than the global
-        * DataPDUInOrder=Yes offset counter in struct iscsi_cmd a protcol error has
+        * DataPDUInOrder=Yes offset counter in struct iscsit_cmd a protcol error has
         * occurred and fail the connection.
         *
         * For DataSequenceInOrder=No: If the offset is greater than the per
@@ -446,7 +446,7 @@ dump:
 }
 
 static int iscsit_dataout_pre_datapduinorder_no(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        unsigned char *buf)
 {
        struct iscsi_pdu *pdu;
@@ -477,7 +477,7 @@ static int iscsit_dataout_pre_datapduinorder_no(
        return DATAOUT_NORMAL;
 }
 
-static int iscsit_dataout_update_r2t(struct iscsi_cmd *cmd, u32 offset, u32 length)
+static int iscsit_dataout_update_r2t(struct iscsit_cmd *cmd, u32 offset, u32 length)
 {
        struct iscsi_r2t *r2t;
 
@@ -497,7 +497,7 @@ static int iscsit_dataout_update_r2t(struct iscsi_cmd *cmd, u32 offset, u32 leng
 }
 
 static int iscsit_dataout_update_datapduinorder_no(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        u32 data_sn,
        int f_bit)
 {
@@ -530,11 +530,11 @@ static int iscsit_dataout_update_datapduinorder_no(
 }
 
 static int iscsit_dataout_post_crc_passed(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        unsigned char *buf)
 {
        int ret, send_r2t = 0;
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
        struct iscsi_seq *seq = NULL;
        struct iscsi_data *hdr = (struct iscsi_data *) buf;
        u32 payload_length = ntoh24(hdr->dlength);
@@ -641,10 +641,10 @@ static int iscsit_dataout_post_crc_passed(
 }
 
 static int iscsit_dataout_post_crc_failed(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        unsigned char *buf)
 {
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
        struct iscsi_pdu *pdu;
        struct iscsi_data *hdr = (struct iscsi_data *) buf;
        u32 payload_length = ntoh24(hdr->dlength);
@@ -679,11 +679,11 @@ recover:
  *     and CRC computed.
  */
 int iscsit_check_pre_dataout(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        unsigned char *buf)
 {
        int ret;
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
 
        ret = iscsit_dataout_within_command_recovery_check(cmd, buf);
        if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
@@ -717,11 +717,11 @@ int iscsit_check_pre_dataout(
  *     and CRC computed.
  */
 int iscsit_check_post_dataout(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        unsigned char *buf,
        u8 data_crc_failed)
 {
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
 
        cmd->dataout_timeout_retries = 0;
 
@@ -743,7 +743,7 @@ int iscsit_check_post_dataout(
 
 void iscsit_handle_time2retain_timeout(struct timer_list *t)
 {
-       struct iscsi_session *sess = from_timer(sess, t, time2retain_timer);
+       struct iscsit_session *sess = from_timer(sess, t, time2retain_timer);
        struct iscsi_portal_group *tpg = sess->tpg;
        struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
 
@@ -768,7 +768,7 @@ void iscsit_handle_time2retain_timeout(struct timer_list *t)
        iscsit_close_session(sess, false);
 }
 
-void iscsit_start_time2retain_handler(struct iscsi_session *sess)
+void iscsit_start_time2retain_handler(struct iscsit_session *sess)
 {
        int tpg_active;
        /*
@@ -794,7 +794,7 @@ void iscsit_start_time2retain_handler(struct iscsi_session *sess)
                  jiffies + sess->sess_ops->DefaultTime2Retain * HZ);
 }
 
-int iscsit_stop_time2retain_timer(struct iscsi_session *sess)
+int iscsit_stop_time2retain_timer(struct iscsit_session *sess)
 {
        struct iscsi_portal_group *tpg = sess->tpg;
        struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
@@ -819,7 +819,7 @@ int iscsit_stop_time2retain_timer(struct iscsi_session *sess)
        return 0;
 }
 
-void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *conn)
+void iscsit_connection_reinstatement_rcfr(struct iscsit_conn *conn)
 {
        spin_lock_bh(&conn->state_lock);
        if (atomic_read(&conn->connection_exit)) {
@@ -843,7 +843,7 @@ sleep:
        complete(&conn->conn_post_wait_comp);
 }
 
-void iscsit_cause_connection_reinstatement(struct iscsi_conn *conn, int sleep)
+void iscsit_cause_connection_reinstatement(struct iscsit_conn *conn, int sleep)
 {
        spin_lock_bh(&conn->state_lock);
        if (atomic_read(&conn->connection_exit)) {
@@ -880,7 +880,7 @@ void iscsit_cause_connection_reinstatement(struct iscsi_conn *conn, int sleep)
 }
 EXPORT_SYMBOL(iscsit_cause_connection_reinstatement);
 
-void iscsit_fall_back_to_erl0(struct iscsi_session *sess)
+void iscsit_fall_back_to_erl0(struct iscsit_session *sess)
 {
        pr_debug("Falling back to ErrorRecoveryLevel=0 for SID:"
                        " %u\n", sess->sid);
@@ -888,9 +888,9 @@ void iscsit_fall_back_to_erl0(struct iscsi_session *sess)
        atomic_set(&sess->session_fall_back_to_erl0, 1);
 }
 
-static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn)
+static void iscsit_handle_connection_cleanup(struct iscsit_conn *conn)
 {
-       struct iscsi_session *sess = conn->sess;
+       struct iscsit_session *sess = conn->sess;
 
        if ((sess->sess_ops->ErrorRecoveryLevel == 2) &&
            !atomic_read(&sess->session_reinstatement) &&
@@ -904,7 +904,7 @@ static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn)
        }
 }
 
-void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn, bool *conn_freed)
+void iscsit_take_action_for_connection_exit(struct iscsit_conn *conn, bool *conn_freed)
 {
        *conn_freed = false;
 
index 883ebf6..2a877d1 100644 (file)
@@ -4,19 +4,19 @@
 
 #include <linux/types.h>
 
-struct iscsi_cmd;
-struct iscsi_conn;
-struct iscsi_session;
+struct iscsit_cmd;
+struct iscsit_conn;
+struct iscsit_session;
 
-extern void iscsit_set_dataout_sequence_values(struct iscsi_cmd *);
-extern int iscsit_check_pre_dataout(struct iscsi_cmd *, unsigned char *);
-extern int iscsit_check_post_dataout(struct iscsi_cmd *, unsigned char *, u8);
-extern void iscsit_start_time2retain_handler(struct iscsi_session *);
+extern void iscsit_set_dataout_sequence_values(struct iscsit_cmd *);
+extern int iscsit_check_pre_dataout(struct iscsit_cmd *, unsigned char *);
+extern int iscsit_check_post_dataout(struct iscsit_cmd *, unsigned char *, u8);
+extern void iscsit_start_time2retain_handler(struct iscsit_session *);
 extern void iscsit_handle_time2retain_timeout(struct timer_list *t);
-extern int iscsit_stop_time2retain_timer(struct iscsi_session *);
-extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *);
-extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
-extern void iscsit_fall_back_to_erl0(struct iscsi_session *);
-extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *, bool *);
+extern int iscsit_stop_time2retain_timer(struct iscsit_session *);
+extern void iscsit_connection_reinstatement_rcfr(struct iscsit_conn *);
+extern void iscsit_cause_connection_reinstatement(struct iscsit_conn *, int);
+extern void iscsit_fall_back_to_erl0(struct iscsit_session *);
+extern void iscsit_take_action_for_connection_exit(struct iscsit_conn *, bool *);
 
 #endif   /*** ISCSI_TARGET_ERL0_H ***/
index 0dd52f4..f460a66 100644 (file)
@@ -36,7 +36,7 @@
  *     to be dumped.
  */
 int iscsit_dump_data_payload(
-       struct iscsi_conn *conn,
+       struct iscsit_conn *conn,
        u32 buf_len,
        int dump_padding_digest)
 {
@@ -87,7 +87,7 @@ int iscsit_dump_data_payload(
  *     Used for retransmitting R2Ts from a R2T SNACK request.
  */
 static int iscsit_send_recovery_r2t_for_snack(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        struct iscsi_r2t *r2t)
 {
        /*
@@ -109,7 +109,7 @@ static int iscsit_send_recovery_r2t_for_snack(
 }
 
 static int iscsit_handle_r2t_snack(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        unsigned char *buf,
        u32 begrun,
        u32 runlength)
@@ -167,13 +167,13 @@ static int iscsit_handle_r2t_snack(
  *     FIXME: How is this handled for a RData SNACK?
  */
 int iscsit_create_recovery_datain_values_datasequenceinorder_yes(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        struct iscsi_datain_req *dr)
 {
        u32 data_sn = 0, data_sn_count = 0;
        u32 pdu_start = 0, seq_no = 0;
        u32 begrun = dr->begrun;
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
 
        while (begrun > data_sn++) {
                data_sn_count++;
@@ -213,18 +213,18 @@ int iscsit_create_recovery_datain_values_datasequenceinorder_yes(
  *     FIXME: How is this handled for a RData SNACK?
  */
 int iscsit_create_recovery_datain_values_datasequenceinorder_no(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        struct iscsi_datain_req *dr)
 {
        int found_seq = 0, i;
        u32 data_sn, read_data_done = 0, seq_send_order = 0;
        u32 begrun = dr->begrun;
        u32 runlength = dr->runlength;
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
        struct iscsi_seq *first_seq = NULL, *seq = NULL;
 
        if (!cmd->seq_list) {
-               pr_err("struct iscsi_cmd->seq_list is NULL!\n");
+               pr_err("struct iscsit_cmd->seq_list is NULL!\n");
                return -1;
        }
 
@@ -371,12 +371,12 @@ done:
 }
 
 static int iscsit_handle_recovery_datain(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        unsigned char *buf,
        u32 begrun,
        u32 runlength)
 {
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
        struct iscsi_datain_req *dr;
        struct se_cmd *se_cmd = &cmd->se_cmd;
 
@@ -432,14 +432,14 @@ static int iscsit_handle_recovery_datain(
 }
 
 int iscsit_handle_recovery_datain_or_r2t(
-       struct iscsi_conn *conn,
+       struct iscsit_conn *conn,
        unsigned char *buf,
        itt_t init_task_tag,
        u32 targ_xfer_tag,
        u32 begrun,
        u32 runlength)
 {
-       struct iscsi_cmd *cmd;
+       struct iscsit_cmd *cmd;
 
        cmd = iscsit_find_cmd_from_itt(conn, init_task_tag);
        if (!cmd)
@@ -465,13 +465,13 @@ int iscsit_handle_recovery_datain_or_r2t(
 
 /* #warning FIXME: Status SNACK needs to be dependent on OPCODE!!! */
 int iscsit_handle_status_snack(
-       struct iscsi_conn *conn,
+       struct iscsit_conn *conn,
        itt_t init_task_tag,
        u32 targ_xfer_tag,
        u32 begrun,
        u32 runlength)
 {
-       struct iscsi_cmd *cmd = NULL;
+       struct iscsit_cmd *cmd = NULL;
        u32 last_statsn;
        int found_cmd;
 
@@ -529,12 +529,12 @@ int iscsit_handle_status_snack(
 }
 
 int iscsit_handle_data_ack(
-       struct iscsi_conn *conn,
+       struct iscsit_conn *conn,
        u32 targ_xfer_tag,
        u32 begrun,
        u32 runlength)
 {
-       struct iscsi_cmd *cmd = NULL;
+       struct iscsit_cmd *cmd = NULL;
 
        cmd = iscsit_find_cmd_from_ttt(conn, targ_xfer_tag);
        if (!cmd) {
@@ -565,7 +565,7 @@ int iscsit_handle_data_ack(
 }
 
 static int iscsit_send_recovery_r2t(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        u32 offset,
        u32 xfer_len)
 {
@@ -579,12 +579,12 @@ static int iscsit_send_recovery_r2t(
 }
 
 int iscsit_dataout_datapduinorder_no_fbit(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        struct iscsi_pdu *pdu)
 {
        int i, send_recovery_r2t = 0, recovery = 0;
        u32 length = 0, offset = 0, pdu_count = 0, xfer_len = 0;
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
        struct iscsi_pdu *first_pdu = NULL;
 
        /*
@@ -655,14 +655,14 @@ int iscsit_dataout_datapduinorder_no_fbit(
 }
 
 static int iscsit_recalculate_dataout_values(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        u32 pdu_offset,
        u32 pdu_length,
        u32 *r2t_offset,
        u32 *r2t_length)
 {
        int i;
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
        struct iscsi_pdu *pdu = NULL;
 
        if (conn->sess->sess_ops->DataSequenceInOrder) {
@@ -732,7 +732,7 @@ static int iscsit_recalculate_dataout_values(
 }
 
 int iscsit_recover_dataout_sequence(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        u32 pdu_offset,
        u32 pdu_length)
 {
@@ -767,7 +767,7 @@ static struct iscsi_ooo_cmdsn *iscsit_allocate_ooo_cmdsn(void)
 }
 
 static int iscsit_attach_ooo_cmdsn(
-       struct iscsi_session *sess,
+       struct iscsit_session *sess,
        struct iscsi_ooo_cmdsn *ooo_cmdsn)
 {
        struct iscsi_ooo_cmdsn *ooo_tail, *ooo_tmp;
@@ -815,20 +815,20 @@ static int iscsit_attach_ooo_cmdsn(
 
 /*
  *     Removes an struct iscsi_ooo_cmdsn from a session's list,
- *     called with struct iscsi_session->cmdsn_mutex held.
+ *     called with struct iscsit_session->cmdsn_mutex held.
  */
 void iscsit_remove_ooo_cmdsn(
-       struct iscsi_session *sess,
+       struct iscsit_session *sess,
        struct iscsi_ooo_cmdsn *ooo_cmdsn)
 {
        list_del(&ooo_cmdsn->ooo_list);
        kmem_cache_free(lio_ooo_cache, ooo_cmdsn);
 }
 
-void iscsit_clear_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
+void iscsit_clear_ooo_cmdsns_for_conn(struct iscsit_conn *conn)
 {
        struct iscsi_ooo_cmdsn *ooo_cmdsn;
-       struct iscsi_session *sess = conn->sess;
+       struct iscsit_session *sess = conn->sess;
 
        mutex_lock(&sess->cmdsn_mutex);
        list_for_each_entry(ooo_cmdsn, &sess->sess_ooo_cmdsn_list, ooo_list) {
@@ -840,10 +840,10 @@ void iscsit_clear_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
        mutex_unlock(&sess->cmdsn_mutex);
 }
 
-int iscsit_execute_ooo_cmdsns(struct iscsi_session *sess)
+int iscsit_execute_ooo_cmdsns(struct iscsit_session *sess)
 {
        int ooo_count = 0;
-       struct iscsi_cmd *cmd = NULL;
+       struct iscsit_cmd *cmd = NULL;
        struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
 
        lockdep_assert_held(&sess->cmdsn_mutex);
@@ -884,10 +884,10 @@ int iscsit_execute_ooo_cmdsns(struct iscsi_session *sess)
  *     2. With no locks held directly from iscsi_handle_XXX_pdu() functions
  *     for immediate commands.
  */
-int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
+int iscsit_execute_cmd(struct iscsit_cmd *cmd, int ooo)
 {
        struct se_cmd *se_cmd = &cmd->se_cmd;
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
        int lr = 0;
 
        spin_lock_bh(&cmd->istate_lock);
@@ -994,7 +994,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
        return 0;
 }
 
-void iscsit_free_all_ooo_cmdsns(struct iscsi_session *sess)
+void iscsit_free_all_ooo_cmdsns(struct iscsit_session *sess)
 {
        struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
 
@@ -1009,8 +1009,8 @@ void iscsit_free_all_ooo_cmdsns(struct iscsi_session *sess)
 }
 
 int iscsit_handle_ooo_cmdsn(
-       struct iscsi_session *sess,
-       struct iscsi_cmd *cmd,
+       struct iscsit_session *sess,
+       struct iscsit_cmd *cmd,
        u32 cmdsn)
 {
        int batch = 0;
@@ -1049,11 +1049,11 @@ int iscsit_handle_ooo_cmdsn(
 }
 
 static int iscsit_set_dataout_timeout_values(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        u32 *offset,
        u32 *length)
 {
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
        struct iscsi_r2t *r2t;
 
        if (cmd->unsolicited_data) {
@@ -1095,9 +1095,9 @@ void iscsit_handle_dataout_timeout(struct timer_list *t)
 {
        u32 pdu_length = 0, pdu_offset = 0;
        u32 r2t_length = 0, r2t_offset = 0;
-       struct iscsi_cmd *cmd = from_timer(cmd, t, dataout_timer);
-       struct iscsi_conn *conn = cmd->conn;
-       struct iscsi_session *sess = NULL;
+       struct iscsit_cmd *cmd = from_timer(cmd, t, dataout_timer);
+       struct iscsit_conn *conn = cmd->conn;
+       struct iscsit_session *sess = NULL;
        struct iscsi_node_attrib *na;
 
        iscsit_inc_conn_usage_count(conn);
@@ -1179,10 +1179,10 @@ failure:
        iscsit_dec_conn_usage_count(conn);
 }
 
-void iscsit_mod_dataout_timer(struct iscsi_cmd *cmd)
+void iscsit_mod_dataout_timer(struct iscsit_cmd *cmd)
 {
-       struct iscsi_conn *conn = cmd->conn;
-       struct iscsi_session *sess = conn->sess;
+       struct iscsit_conn *conn = cmd->conn;
+       struct iscsit_session *sess = conn->sess;
        struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
 
        spin_lock_bh(&cmd->dataout_timeout_lock);
@@ -1199,10 +1199,10 @@ void iscsit_mod_dataout_timer(struct iscsi_cmd *cmd)
 }
 
 void iscsit_start_dataout_timer(
-       struct iscsi_cmd *cmd,
-       struct iscsi_conn *conn)
+       struct iscsit_cmd *cmd,
+       struct iscsit_conn *conn)
 {
-       struct iscsi_session *sess = conn->sess;
+       struct iscsit_session *sess = conn->sess;
        struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
 
        lockdep_assert_held(&cmd->dataout_timeout_lock);
@@ -1218,7 +1218,7 @@ void iscsit_start_dataout_timer(
        mod_timer(&cmd->dataout_timer, jiffies + na->dataout_timeout * HZ);
 }
 
-void iscsit_stop_dataout_timer(struct iscsi_cmd *cmd)
+void iscsit_stop_dataout_timer(struct iscsit_cmd *cmd)
 {
        spin_lock_bh(&cmd->dataout_timeout_lock);
        if (!(cmd->dataout_timer_flags & ISCSI_TF_RUNNING)) {
index 1f6973f..12472ee 100644 (file)
@@ -5,34 +5,34 @@
 #include <linux/types.h>
 #include <scsi/iscsi_proto.h> /* itt_t */
 
-struct iscsi_cmd;
-struct iscsi_conn;
+struct iscsit_cmd;
+struct iscsit_conn;
 struct iscsi_datain_req;
 struct iscsi_ooo_cmdsn;
 struct iscsi_pdu;
-struct iscsi_session;
+struct iscsit_session;
 
-extern int iscsit_dump_data_payload(struct iscsi_conn *, u32, int);
+extern int iscsit_dump_data_payload(struct iscsit_conn *, u32, int);
 extern int iscsit_create_recovery_datain_values_datasequenceinorder_yes(
-                       struct iscsi_cmd *, struct iscsi_datain_req *);
+                       struct iscsit_cmd *, struct iscsi_datain_req *);
 extern int iscsit_create_recovery_datain_values_datasequenceinorder_no(
-                       struct iscsi_cmd *, struct iscsi_datain_req *);
-extern int iscsit_handle_recovery_datain_or_r2t(struct iscsi_conn *, unsigned char *,
+                       struct iscsit_cmd *, struct iscsi_datain_req *);
+extern int iscsit_handle_recovery_datain_or_r2t(struct iscsit_conn *, unsigned char *,
                        itt_t, u32, u32, u32);
-extern int iscsit_handle_status_snack(struct iscsi_conn *, itt_t, u32,
+extern int iscsit_handle_status_snack(struct iscsit_conn *, itt_t, u32,
                        u32, u32);
-extern int iscsit_handle_data_ack(struct iscsi_conn *, u32, u32, u32);
-extern int iscsit_dataout_datapduinorder_no_fbit(struct iscsi_cmd *, struct iscsi_pdu *);
-extern int iscsit_recover_dataout_sequence(struct iscsi_cmd *, u32, u32);
-extern void iscsit_clear_ooo_cmdsns_for_conn(struct iscsi_conn *);
-extern void iscsit_free_all_ooo_cmdsns(struct iscsi_session *);
-extern int iscsit_execute_ooo_cmdsns(struct iscsi_session *);
-extern int iscsit_execute_cmd(struct iscsi_cmd *, int);
-extern int iscsit_handle_ooo_cmdsn(struct iscsi_session *, struct iscsi_cmd *, u32);
-extern void iscsit_remove_ooo_cmdsn(struct iscsi_session *, struct iscsi_ooo_cmdsn *);
+extern int iscsit_handle_data_ack(struct iscsit_conn *, u32, u32, u32);
+extern int iscsit_dataout_datapduinorder_no_fbit(struct iscsit_cmd *, struct iscsi_pdu *);
+extern int iscsit_recover_dataout_sequence(struct iscsit_cmd *, u32, u32);
+extern void iscsit_clear_ooo_cmdsns_for_conn(struct iscsit_conn *);
+extern void iscsit_free_all_ooo_cmdsns(struct iscsit_session *);
+extern int iscsit_execute_ooo_cmdsns(struct iscsit_session *);
+extern int iscsit_execute_cmd(struct iscsit_cmd *, int);
+extern int iscsit_handle_ooo_cmdsn(struct iscsit_session *, struct iscsit_cmd *, u32);
+extern void iscsit_remove_ooo_cmdsn(struct iscsit_session *, struct iscsi_ooo_cmdsn *);
 extern void iscsit_handle_dataout_timeout(struct timer_list *t);
-extern void iscsit_mod_dataout_timer(struct iscsi_cmd *);
-extern void iscsit_start_dataout_timer(struct iscsi_cmd *, struct iscsi_conn *);
-extern void iscsit_stop_dataout_timer(struct iscsi_cmd *);
+extern void iscsit_mod_dataout_timer(struct iscsit_cmd *);
+extern void iscsit_start_dataout_timer(struct iscsit_cmd *, struct iscsit_conn *);
+extern void iscsit_stop_dataout_timer(struct iscsit_cmd *);
 
 #endif /* ISCSI_TARGET_ERL1_H */
index b1b7db9..18e88d2 100644 (file)
  *     FIXME: Does RData SNACK apply here as well?
  */
 void iscsit_create_conn_recovery_datain_values(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        __be32 exp_data_sn)
 {
        u32 data_sn = 0;
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
 
        cmd->next_burst_len = 0;
        cmd->read_data_done = 0;
@@ -54,10 +54,10 @@ void iscsit_create_conn_recovery_datain_values(
 }
 
 void iscsit_create_conn_recovery_dataout_values(
-       struct iscsi_cmd *cmd)
+       struct iscsit_cmd *cmd)
 {
        u32 write_data_done = 0;
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
 
        cmd->data_sn = 0;
        cmd->next_burst_len = 0;
@@ -74,7 +74,7 @@ void iscsit_create_conn_recovery_dataout_values(
 }
 
 static int iscsit_attach_active_connection_recovery_entry(
-       struct iscsi_session *sess,
+       struct iscsit_session *sess,
        struct iscsi_conn_recovery *cr)
 {
        spin_lock(&sess->cr_a_lock);
@@ -85,7 +85,7 @@ static int iscsit_attach_active_connection_recovery_entry(
 }
 
 static int iscsit_attach_inactive_connection_recovery_entry(
-       struct iscsi_session *sess,
+       struct iscsit_session *sess,
        struct iscsi_conn_recovery *cr)
 {
        spin_lock(&sess->cr_i_lock);
@@ -100,7 +100,7 @@ static int iscsit_attach_inactive_connection_recovery_entry(
 }
 
 struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
-       struct iscsi_session *sess,
+       struct iscsit_session *sess,
        u16 cid)
 {
        struct iscsi_conn_recovery *cr;
@@ -117,9 +117,9 @@ struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
        return NULL;
 }
 
-void iscsit_free_connection_recovery_entries(struct iscsi_session *sess)
+void iscsit_free_connection_recovery_entries(struct iscsit_session *sess)
 {
-       struct iscsi_cmd *cmd, *cmd_tmp;
+       struct iscsit_cmd *cmd, *cmd_tmp;
        struct iscsi_conn_recovery *cr, *cr_tmp;
 
        spin_lock(&sess->cr_a_lock);
@@ -169,7 +169,7 @@ void iscsit_free_connection_recovery_entries(struct iscsi_session *sess)
 
 int iscsit_remove_active_connection_recovery_entry(
        struct iscsi_conn_recovery *cr,
-       struct iscsi_session *sess)
+       struct iscsit_session *sess)
 {
        spin_lock(&sess->cr_a_lock);
        list_del(&cr->cr_list);
@@ -186,7 +186,7 @@ int iscsit_remove_active_connection_recovery_entry(
 
 static void iscsit_remove_inactive_connection_recovery_entry(
        struct iscsi_conn_recovery *cr,
-       struct iscsi_session *sess)
+       struct iscsit_session *sess)
 {
        spin_lock(&sess->cr_i_lock);
        list_del(&cr->cr_list);
@@ -197,8 +197,8 @@ static void iscsit_remove_inactive_connection_recovery_entry(
  *     Called with cr->conn_recovery_cmd_lock help.
  */
 int iscsit_remove_cmd_from_connection_recovery(
-       struct iscsi_cmd *cmd,
-       struct iscsi_session *sess)
+       struct iscsit_cmd *cmd,
+       struct iscsit_session *sess)
 {
        struct iscsi_conn_recovery *cr;
 
@@ -218,8 +218,8 @@ void iscsit_discard_cr_cmds_by_expstatsn(
        u32 exp_statsn)
 {
        u32 dropped_count = 0;
-       struct iscsi_cmd *cmd, *cmd_tmp;
-       struct iscsi_session *sess = cr->sess;
+       struct iscsit_cmd *cmd, *cmd_tmp;
+       struct iscsit_session *sess = cr->sess;
 
        spin_lock(&cr->conn_recovery_cmd_lock);
        list_for_each_entry_safe(cmd, cmd_tmp,
@@ -263,12 +263,12 @@ void iscsit_discard_cr_cmds_by_expstatsn(
        }
 }
 
-int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
+int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsit_conn *conn)
 {
        u32 dropped_count = 0;
-       struct iscsi_cmd *cmd, *cmd_tmp;
+       struct iscsit_cmd *cmd, *cmd_tmp;
        struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
-       struct iscsi_session *sess = conn->sess;
+       struct iscsit_session *sess = conn->sess;
 
        mutex_lock(&sess->cmdsn_mutex);
        list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp,
@@ -304,16 +304,16 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
        return 0;
 }
 
-int iscsit_prepare_cmds_for_reallegiance(struct iscsi_conn *conn)
+int iscsit_prepare_cmds_for_reallegiance(struct iscsit_conn *conn)
 {
        u32 cmd_count = 0;
-       struct iscsi_cmd *cmd, *cmd_tmp;
+       struct iscsit_cmd *cmd, *cmd_tmp;
        struct iscsi_conn_recovery *cr;
 
        /*
         * Allocate an struct iscsi_conn_recovery for this connection.
-        * Each struct iscsi_cmd contains an struct iscsi_conn_recovery pointer
-        * (struct iscsi_cmd->cr) so we need to allocate this before preparing the
+        * Each struct iscsit_cmd contains an struct iscsi_conn_recovery pointer
+        * (struct iscsit_cmd->cr) so we need to allocate this before preparing the
         * connection's command list for connection recovery.
         */
        cr = kzalloc(sizeof(struct iscsi_conn_recovery), GFP_KERNEL);
@@ -393,7 +393,7 @@ int iscsit_prepare_cmds_for_reallegiance(struct iscsi_conn *conn)
 
                transport_wait_for_tasks(&cmd->se_cmd);
                /*
-                * Add the struct iscsi_cmd to the connection recovery cmd list
+                * Add the struct iscsit_cmd to the connection recovery cmd list
                 */
                spin_lock(&cr->conn_recovery_cmd_lock);
                list_add_tail(&cmd->i_conn_node, &cr->conn_recovery_cmd_list);
@@ -418,7 +418,7 @@ int iscsit_prepare_cmds_for_reallegiance(struct iscsi_conn *conn)
        return 0;
 }
 
-int iscsit_connection_recovery_transport_reset(struct iscsi_conn *conn)
+int iscsit_connection_recovery_transport_reset(struct iscsit_conn *conn)
 {
        atomic_set(&conn->connection_recovery, 1);
 
index a39b0ca..6655e4b 100644 (file)
@@ -4,23 +4,23 @@
 
 #include <linux/types.h>
 
-struct iscsi_cmd;
-struct iscsi_conn;
+struct iscsit_cmd;
+struct iscsit_conn;
 struct iscsi_conn_recovery;
-struct iscsi_session;
+struct iscsit_session;
 
-extern void iscsit_create_conn_recovery_datain_values(struct iscsi_cmd *, __be32);
-extern void iscsit_create_conn_recovery_dataout_values(struct iscsi_cmd *);
+extern void iscsit_create_conn_recovery_datain_values(struct iscsit_cmd *, __be32);
+extern void iscsit_create_conn_recovery_dataout_values(struct iscsit_cmd *);
 extern struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
-                       struct iscsi_session *, u16);
-extern void iscsit_free_connection_recovery_entries(struct iscsi_session *);
+                       struct iscsit_session *, u16);
+extern void iscsit_free_connection_recovery_entries(struct iscsit_session *);
 extern int iscsit_remove_active_connection_recovery_entry(
-                       struct iscsi_conn_recovery *, struct iscsi_session *);
-extern int iscsit_remove_cmd_from_connection_recovery(struct iscsi_cmd *,
-                       struct iscsi_session *);
+                       struct iscsi_conn_recovery *, struct iscsit_session *);
+extern int iscsit_remove_cmd_from_connection_recovery(struct iscsit_cmd *,
+                       struct iscsit_session *);
 extern void iscsit_discard_cr_cmds_by_expstatsn(struct iscsi_conn_recovery *, u32);
-extern int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *);
-extern int iscsit_prepare_cmds_for_reallegiance(struct iscsi_conn *);
-extern int iscsit_connection_recovery_transport_reset(struct iscsi_conn *);
+extern int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsit_conn *);
+extern int iscsit_prepare_cmds_for_reallegiance(struct iscsit_conn *);
+extern int iscsit_connection_recovery_transport_reset(struct iscsit_conn *);
 
 #endif /*** ISCSI_TARGET_ERL2_H ***/
index 9c01fb8..6b94eec 100644 (file)
@@ -35,7 +35,7 @@
 
 #include <target/iscsi/iscsi_transport.h>
 
-static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn)
+static struct iscsi_login *iscsi_login_init_conn(struct iscsit_conn *conn)
 {
        struct iscsi_login *login;
 
@@ -73,9 +73,9 @@ out_login:
 
 /*
  * Used by iscsi_target_nego.c:iscsi_target_locate_portal() to setup
- * per struct iscsi_conn libcrypto contexts for crc32c and crc32-intel
+ * per struct iscsit_conn libcrypto contexts for crc32c and crc32-intel
  */
-int iscsi_login_setup_crypto(struct iscsi_conn *conn)
+int iscsi_login_setup_crypto(struct iscsit_conn *conn)
 {
        struct crypto_ahash *tfm;
 
@@ -112,7 +112,7 @@ int iscsi_login_setup_crypto(struct iscsi_conn *conn)
 }
 
 static int iscsi_login_check_initiator_version(
-       struct iscsi_conn *conn,
+       struct iscsit_conn *conn,
        u8 version_max,
        u8 version_min)
 {
@@ -128,12 +128,12 @@ static int iscsi_login_check_initiator_version(
        return 0;
 }
 
-int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
+int iscsi_check_for_session_reinstatement(struct iscsit_conn *conn)
 {
        int sessiontype;
        struct iscsi_param *initiatorname_param = NULL, *sessiontype_param = NULL;
        struct iscsi_portal_group *tpg = conn->tpg;
-       struct iscsi_session *sess = NULL, *sess_p = NULL;
+       struct iscsit_session *sess = NULL, *sess_p = NULL;
        struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
        struct se_session *se_sess, *se_sess_tmp;
 
@@ -204,8 +204,8 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
 }
 
 static int iscsi_login_set_conn_values(
-       struct iscsi_session *sess,
-       struct iscsi_conn *conn,
+       struct iscsit_session *sess,
+       struct iscsit_conn *conn,
        __be16 cid)
 {
        int ret;
@@ -226,7 +226,7 @@ static int iscsi_login_set_conn_values(
 }
 
 __printf(2, 3) int iscsi_change_param_sprintf(
-       struct iscsi_conn *conn,
+       struct iscsit_conn *conn,
        const char *fmt, ...)
 {
        va_list args;
@@ -253,14 +253,14 @@ EXPORT_SYMBOL(iscsi_change_param_sprintf);
  *     or session reinstatement.
  */
 static int iscsi_login_zero_tsih_s1(
-       struct iscsi_conn *conn,
+       struct iscsit_conn *conn,
        unsigned char *buf)
 {
-       struct iscsi_session *sess = NULL;
+       struct iscsit_session *sess = NULL;
        struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
        int ret;
 
-       sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL);
+       sess = kzalloc(sizeof(struct iscsit_session), GFP_KERNEL);
        if (!sess) {
                iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
@@ -337,10 +337,10 @@ free_sess:
 }
 
 static int iscsi_login_zero_tsih_s2(
-       struct iscsi_conn *conn)
+       struct iscsit_conn *conn)
 {
        struct iscsi_node_attrib *na;
-       struct iscsi_session *sess = conn->sess;
+       struct iscsit_session *sess = conn->sess;
        bool iser = false;
 
        sess->tpg = conn->tpg;
@@ -458,7 +458,7 @@ check_prot:
 }
 
 static int iscsi_login_non_zero_tsih_s1(
-       struct iscsi_conn *conn,
+       struct iscsit_conn *conn,
        unsigned char *buf)
 {
        struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
@@ -470,11 +470,11 @@ static int iscsi_login_non_zero_tsih_s1(
  *     Add a new connection to an existing session.
  */
 static int iscsi_login_non_zero_tsih_s2(
-       struct iscsi_conn *conn,
+       struct iscsit_conn *conn,
        unsigned char *buf)
 {
        struct iscsi_portal_group *tpg = conn->tpg;
-       struct iscsi_session *sess = NULL, *sess_p = NULL;
+       struct iscsit_session *sess = NULL, *sess_p = NULL;
        struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
        struct se_session *se_sess, *se_sess_tmp;
        struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
@@ -484,7 +484,7 @@ static int iscsi_login_non_zero_tsih_s2(
        list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
                        sess_list) {
 
-               sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+               sess_p = (struct iscsit_session *)se_sess->fabric_sess_ptr;
                if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
                    atomic_read(&sess_p->session_logout) ||
                    atomic_read(&sess_p->session_close) ||
@@ -546,13 +546,13 @@ static int iscsi_login_non_zero_tsih_s2(
 }
 
 int iscsi_login_post_auth_non_zero_tsih(
-       struct iscsi_conn *conn,
+       struct iscsit_conn *conn,
        u16 cid,
        u32 exp_statsn)
 {
-       struct iscsi_conn *conn_ptr = NULL;
+       struct iscsit_conn *conn_ptr = NULL;
        struct iscsi_conn_recovery *cr = NULL;
-       struct iscsi_session *sess = conn->sess;
+       struct iscsit_session *sess = conn->sess;
 
        /*
         * By following item 5 in the login table,  if we have found
@@ -612,9 +612,9 @@ int iscsi_login_post_auth_non_zero_tsih(
        return 0;
 }
 
-static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
+static void iscsi_post_login_start_timers(struct iscsit_conn *conn)
 {
-       struct iscsi_session *sess = conn->sess;
+       struct iscsit_session *sess = conn->sess;
        /*
         * FIXME: Unsolicited NopIN support for ISER
         */
@@ -625,7 +625,7 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
                iscsit_start_nopin_timer(conn);
 }
 
-int iscsit_start_kthreads(struct iscsi_conn *conn)
+int iscsit_start_kthreads(struct iscsit_conn *conn)
 {
        int ret = 0;
 
@@ -673,11 +673,11 @@ out_bitmap:
 
 void iscsi_post_login_handler(
        struct iscsi_np *np,
-       struct iscsi_conn *conn,
+       struct iscsit_conn *conn,
        u8 zero_tsih)
 {
        int stop_timer = 0;
-       struct iscsi_session *sess = conn->sess;
+       struct iscsit_session *sess = conn->sess;
        struct se_session *se_sess = sess->se_sess;
        struct iscsi_portal_group *tpg = sess->tpg;
        struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
@@ -730,7 +730,7 @@ void iscsi_post_login_handler(
                conn->conn_tx_reset_cpumask = 1;
                /*
                 * Wakeup the sleeping iscsi_target_rx_thread() now that
-                * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
+                * iscsit_conn is in TARG_CONN_STATE_LOGGED_IN state.
                 */
                complete(&conn->rx_login_comp);
                iscsit_dec_conn_usage_count(conn);
@@ -792,7 +792,7 @@ void iscsi_post_login_handler(
        conn->conn_tx_reset_cpumask = 1;
        /*
         * Wakeup the sleeping iscsi_target_rx_thread() now that
-        * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
+        * iscsit_conn is in TARG_CONN_STATE_LOGGED_IN state.
         */
        complete(&conn->rx_login_comp);
        iscsit_dec_conn_usage_count(conn);
@@ -944,7 +944,7 @@ int iscsi_target_setup_login_socket(
        return 0;
 }
 
-int iscsit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
+int iscsit_accept_np(struct iscsi_np *np, struct iscsit_conn *conn)
 {
        struct socket *new_sock, *sock = np->np_socket;
        struct sockaddr_in sock_in;
@@ -1005,7 +1005,7 @@ int iscsit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
        return 0;
 }
 
-int iscsit_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
+int iscsit_get_login_rx(struct iscsit_conn *conn, struct iscsi_login *login)
 {
        struct iscsi_login_req *login_req;
        u32 padding = 0, payload_length;
@@ -1050,7 +1050,7 @@ int iscsit_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
        return 0;
 }
 
-int iscsit_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
+int iscsit_put_login_tx(struct iscsit_conn *conn, struct iscsi_login *login,
                        u32 length)
 {
        if (iscsi_login_tx_data(conn, login->rsp, login->rsp_buf, length) < 0)
@@ -1060,7 +1060,7 @@ int iscsit_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
 }
 
 static int
-iscsit_conn_set_transport(struct iscsi_conn *conn, struct iscsit_transport *t)
+iscsit_conn_set_transport(struct iscsit_conn *conn, struct iscsit_transport *t)
 {
        int rc;
 
@@ -1079,11 +1079,11 @@ iscsit_conn_set_transport(struct iscsi_conn *conn, struct iscsit_transport *t)
        return 0;
 }
 
-static struct iscsi_conn *iscsit_alloc_conn(struct iscsi_np *np)
+static struct iscsit_conn *iscsit_alloc_conn(struct iscsi_np *np)
 {
-       struct iscsi_conn *conn;
+       struct iscsit_conn *conn;
 
-       conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
+       conn = kzalloc(sizeof(struct iscsit_conn), GFP_KERNEL);
        if (!conn) {
                pr_err("Could not allocate memory for new connection\n");
                return NULL;
@@ -1147,7 +1147,7 @@ free_conn:
        return NULL;
 }
 
-void iscsit_free_conn(struct iscsi_conn *conn)
+void iscsit_free_conn(struct iscsit_conn *conn)
 {
        free_cpumask_var(conn->allowed_cpumask);
        free_cpumask_var(conn->conn_cpumask);
@@ -1156,7 +1156,7 @@ void iscsit_free_conn(struct iscsi_conn *conn)
        kfree(conn);
 }
 
-void iscsi_target_login_sess_out(struct iscsi_conn *conn,
+void iscsi_target_login_sess_out(struct iscsit_conn *conn,
                                 bool zero_tsih, bool new_sess)
 {
        if (!new_sess)
@@ -1228,7 +1228,7 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
 {
        u8 *buffer, zero_tsih = 0;
        int ret = 0, rc;
-       struct iscsi_conn *conn = NULL;
+       struct iscsit_conn *conn = NULL;
        struct iscsi_login *login;
        struct iscsi_portal_group *tpg = NULL;
        struct iscsi_login_req *pdu;
@@ -1371,7 +1371,7 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
 
        tpg = conn->tpg;
        if (!tpg) {
-               pr_err("Unable to locate struct iscsi_conn->tpg\n");
+               pr_err("Unable to locate struct iscsit_conn->tpg\n");
                goto new_sess_out;
        }
 
index fc95e61..3ca2f23 100644 (file)
@@ -4,25 +4,25 @@
 
 #include <linux/types.h>
 
-struct iscsi_conn;
+struct iscsit_conn;
 struct iscsi_login;
 struct iscsi_np;
 struct sockaddr_storage;
 
-extern int iscsi_login_setup_crypto(struct iscsi_conn *);
-extern int iscsi_check_for_session_reinstatement(struct iscsi_conn *);
-extern int iscsi_login_post_auth_non_zero_tsih(struct iscsi_conn *, u16, u32);
+extern int iscsi_login_setup_crypto(struct iscsit_conn *);
+extern int iscsi_check_for_session_reinstatement(struct iscsit_conn *);
+extern int iscsi_login_post_auth_non_zero_tsih(struct iscsit_conn *, u16, u32);
 extern int iscsit_setup_np(struct iscsi_np *,
                                struct sockaddr_storage *);
 extern int iscsi_target_setup_login_socket(struct iscsi_np *,
                                struct sockaddr_storage *);
-extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *);
-extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
-extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
-extern void iscsit_free_conn(struct iscsi_conn *);
-extern int iscsit_start_kthreads(struct iscsi_conn *);
-extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
-extern void iscsi_target_login_sess_out(struct iscsi_conn *, bool, bool);
+extern int iscsit_accept_np(struct iscsi_np *, struct iscsit_conn *);
+extern int iscsit_get_login_rx(struct iscsit_conn *, struct iscsi_login *);
+extern int iscsit_put_login_tx(struct iscsit_conn *, struct iscsi_login *, u32);
+extern void iscsit_free_conn(struct iscsit_conn *);
+extern int iscsit_start_kthreads(struct iscsit_conn *);
+extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsit_conn *, u8);
+extern void iscsi_target_login_sess_out(struct iscsit_conn *, bool, bool);
 extern int iscsi_target_login_thread(void *);
 extern void iscsi_handle_login_thread_timeout(struct timer_list *t);
 
index c0ed6f8..b34ac9e 100644 (file)
@@ -95,14 +95,14 @@ int extract_param(
 }
 
 static u32 iscsi_handle_authentication(
-       struct iscsi_conn *conn,
+       struct iscsit_conn *conn,
        char *in_buf,
        char *out_buf,
        int in_length,
        int *out_length,
        unsigned char *authtype)
 {
-       struct iscsi_session *sess = conn->sess;
+       struct iscsit_session *sess = conn->sess;
        struct iscsi_node_auth *auth;
        struct iscsi_node_acl *iscsi_nacl;
        struct iscsi_portal_group *iscsi_tpg;
@@ -151,13 +151,13 @@ static u32 iscsi_handle_authentication(
        return 2;
 }
 
-static void iscsi_remove_failed_auth_entry(struct iscsi_conn *conn)
+static void iscsi_remove_failed_auth_entry(struct iscsit_conn *conn)
 {
        kfree(conn->auth_protocol);
 }
 
 int iscsi_target_check_login_request(
-       struct iscsi_conn *conn,
+       struct iscsit_conn *conn,
        struct iscsi_login *login)
 {
        int req_csg, req_nsg;
@@ -248,7 +248,7 @@ int iscsi_target_check_login_request(
 EXPORT_SYMBOL(iscsi_target_check_login_request);
 
 static int iscsi_target_check_first_request(
-       struct iscsi_conn *conn,
+       struct iscsit_conn *conn,
        struct iscsi_login *login)
 {
        struct iscsi_param *param = NULL;
@@ -315,7 +315,7 @@ static int iscsi_target_check_first_request(
        return 0;
 }
 
-static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
+static int iscsi_target_do_tx_login_io(struct iscsit_conn *conn, struct iscsi_login *login)
 {
        u32 padding = 0;
        struct iscsi_login_rsp *login_rsp;
@@ -382,7 +382,7 @@ err:
 
 static void iscsi_target_sk_data_ready(struct sock *sk)
 {
-       struct iscsi_conn *conn = sk->sk_user_data;
+       struct iscsit_conn *conn = sk->sk_user_data;
        bool rc;
 
        pr_debug("Entering iscsi_target_sk_data_ready: conn: %p\n", conn);
@@ -421,7 +421,7 @@ static void iscsi_target_sk_data_ready(struct sock *sk)
 
 static void iscsi_target_sk_state_change(struct sock *);
 
-static void iscsi_target_set_sock_callbacks(struct iscsi_conn *conn)
+static void iscsi_target_set_sock_callbacks(struct iscsit_conn *conn)
 {
        struct sock *sk;
 
@@ -443,7 +443,7 @@ static void iscsi_target_set_sock_callbacks(struct iscsi_conn *conn)
        sk->sk_rcvtimeo = TA_LOGIN_TIMEOUT * HZ;
 }
 
-static void iscsi_target_restore_sock_callbacks(struct iscsi_conn *conn)
+static void iscsi_target_restore_sock_callbacks(struct iscsit_conn *conn)
 {
        struct sock *sk;
 
@@ -467,7 +467,7 @@ static void iscsi_target_restore_sock_callbacks(struct iscsi_conn *conn)
        sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
 }
 
-static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *);
+static int iscsi_target_do_login(struct iscsit_conn *, struct iscsi_login *);
 
 static bool __iscsi_target_sk_check_close(struct sock *sk)
 {
@@ -479,7 +479,7 @@ static bool __iscsi_target_sk_check_close(struct sock *sk)
        return false;
 }
 
-static bool iscsi_target_sk_check_close(struct iscsi_conn *conn)
+static bool iscsi_target_sk_check_close(struct iscsit_conn *conn)
 {
        bool state = false;
 
@@ -494,7 +494,7 @@ static bool iscsi_target_sk_check_close(struct iscsi_conn *conn)
        return state;
 }
 
-static bool iscsi_target_sk_check_flag(struct iscsi_conn *conn, unsigned int flag)
+static bool iscsi_target_sk_check_flag(struct iscsit_conn *conn, unsigned int flag)
 {
        bool state = false;
 
@@ -508,7 +508,7 @@ static bool iscsi_target_sk_check_flag(struct iscsi_conn *conn, unsigned int fla
        return state;
 }
 
-static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned int flag)
+static bool iscsi_target_sk_check_and_clear(struct iscsit_conn *conn, unsigned int flag)
 {
        bool state = false;
 
@@ -525,7 +525,7 @@ static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned in
        return state;
 }
 
-static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login)
+static void iscsi_target_login_drop(struct iscsit_conn *conn, struct iscsi_login *login)
 {
        bool zero_tsih = login->zero_tsih;
 
@@ -536,13 +536,13 @@ static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login
 
 struct conn_timeout {
        struct timer_list timer;
-       struct iscsi_conn *conn;
+       struct iscsit_conn *conn;
 };
 
 static void iscsi_target_login_timeout(struct timer_list *t)
 {
        struct conn_timeout *timeout = from_timer(timeout, t, timer);
-       struct iscsi_conn *conn = timeout->conn;
+       struct iscsit_conn *conn = timeout->conn;
 
        pr_debug("Entering iscsi_target_login_timeout >>>>>>>>>>>>>>>>>>>\n");
 
@@ -555,8 +555,8 @@ static void iscsi_target_login_timeout(struct timer_list *t)
 
 static void iscsi_target_do_login_rx(struct work_struct *work)
 {
-       struct iscsi_conn *conn = container_of(work,
-                               struct iscsi_conn, login_work.work);
+       struct iscsit_conn *conn = container_of(work,
+                               struct iscsit_conn, login_work.work);
        struct iscsi_login *login = conn->login;
        struct iscsi_np *np = login->np;
        struct iscsi_portal_group *tpg = conn->tpg;
@@ -662,7 +662,7 @@ err:
 
 static void iscsi_target_sk_state_change(struct sock *sk)
 {
-       struct iscsi_conn *conn;
+       struct iscsit_conn *conn;
        void (*orig_state_change)(struct sock *);
        bool state;
 
@@ -741,7 +741,7 @@ static void iscsi_target_sk_state_change(struct sock *sk)
  *     ISID/TSIH combinations.
  */
 static int iscsi_target_check_for_existing_instances(
-       struct iscsi_conn *conn,
+       struct iscsit_conn *conn,
        struct iscsi_login *login)
 {
        if (login->checked_for_existing)
@@ -757,7 +757,7 @@ static int iscsi_target_check_for_existing_instances(
 }
 
 static int iscsi_target_do_authentication(
-       struct iscsi_conn *conn,
+       struct iscsit_conn *conn,
        struct iscsi_login *login)
 {
        int authret;
@@ -816,7 +816,7 @@ static int iscsi_target_do_authentication(
 }
 
 static int iscsi_target_handle_csg_zero(
-       struct iscsi_conn *conn,
+       struct iscsit_conn *conn,
        struct iscsi_login *login)
 {
        int ret;
@@ -906,7 +906,7 @@ do_auth:
        return iscsi_target_do_authentication(conn, login);
 }
 
-static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_login *login)
+static int iscsi_target_handle_csg_one(struct iscsit_conn *conn, struct iscsi_login *login)
 {
        int ret;
        u32 payload_length;
@@ -968,7 +968,7 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log
        return 0;
 }
 
-static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *login)
+static int iscsi_target_do_login(struct iscsit_conn *conn, struct iscsi_login *login)
 {
        int pdu_count = 0;
        struct iscsi_login_req *login_req;
@@ -1054,12 +1054,12 @@ static void iscsi_initiatorname_tolower(
  */
 int iscsi_target_locate_portal(
        struct iscsi_np *np,
-       struct iscsi_conn *conn,
+       struct iscsit_conn *conn,
        struct iscsi_login *login)
 {
        char *i_buf = NULL, *s_buf = NULL, *t_buf = NULL;
        char *tmpbuf, *start = NULL, *end = NULL, *key, *value;
-       struct iscsi_session *sess = conn->sess;
+       struct iscsit_session *sess = conn->sess;
        struct iscsi_tiqn *tiqn;
        struct iscsi_tpg_np *tpg_np = NULL;
        struct iscsi_login_req *login_req;
@@ -1232,7 +1232,7 @@ get_target:
 
        /*
         * conn->sess->node_acl will be set when the referenced
-        * struct iscsi_session is located from received ISID+TSIH in
+        * struct iscsit_session is located from received ISID+TSIH in
         * iscsi_login_non_zero_tsih_s2().
         */
        if (!login->leading_connection) {
@@ -1272,7 +1272,7 @@ get_target:
 alloc_tags:
        tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth);
        tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
-       tag_size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
+       tag_size = sizeof(struct iscsit_cmd) + conn->conn_transport->priv_size;
 
        ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size);
        if (ret < 0) {
@@ -1287,7 +1287,7 @@ out:
 
 int iscsi_target_start_negotiation(
        struct iscsi_login *login,
-       struct iscsi_conn *conn)
+       struct iscsit_conn *conn)
 {
        int ret;
 
@@ -1323,7 +1323,7 @@ int iscsi_target_start_negotiation(
        return ret;
 }
 
-void iscsi_target_nego_release(struct iscsi_conn *conn)
+void iscsi_target_nego_release(struct iscsit_conn *conn)
 {
        struct iscsi_login *login = conn->conn_login;
 
index 835e1b7..ed30b9e 100644 (file)
@@ -5,21 +5,21 @@
 #define DECIMAL         0
 #define HEX             1
 
-struct iscsi_conn;
+struct iscsit_conn;
 struct iscsi_login;
 struct iscsi_np;
 
 extern void convert_null_to_semi(char *, int);
 extern int extract_param(const char *, const char *, unsigned int, char *,
                unsigned char *);
-extern int iscsi_target_check_login_request(struct iscsi_conn *,
+extern int iscsi_target_check_login_request(struct iscsit_conn *,
                struct iscsi_login *);
-extern int iscsi_target_get_initial_payload(struct iscsi_conn *,
+extern int iscsi_target_get_initial_payload(struct iscsit_conn *,
                struct iscsi_login *);
-extern int iscsi_target_locate_portal(struct iscsi_np *, struct iscsi_conn *,
+extern int iscsi_target_locate_portal(struct iscsi_np *, struct iscsit_conn *,
                struct iscsi_login *);
 extern int iscsi_target_start_negotiation(
-               struct iscsi_login *, struct iscsi_conn *);
-extern void iscsi_target_nego_release(struct iscsi_conn *);
+               struct iscsi_login *, struct iscsit_conn *);
+extern void iscsi_target_nego_release(struct iscsit_conn *);
 
 #endif /* ISCSI_TARGET_NEGO_H */
index e3ac247..874cb33 100644 (file)
@@ -96,8 +96,8 @@ int iscsit_na_nopin_timeout(
        u32 nopin_timeout)
 {
        struct iscsi_node_attrib *a = &acl->node_attrib;
-       struct iscsi_session *sess;
-       struct iscsi_conn *conn;
+       struct iscsit_session *sess;
+       struct iscsit_conn *conn;
        struct se_node_acl *se_nacl = &a->nacl->se_node_acl;
        struct se_session *se_sess;
        u32 orig_nopin_timeout = a->nopin_timeout;
index 6bc3aaf..2317fb0 100644 (file)
@@ -15,7 +15,7 @@
 #include "iscsi_target_parameters.h"
 
 int iscsi_login_rx_data(
-       struct iscsi_conn *conn,
+       struct iscsit_conn *conn,
        char *buf,
        int length)
 {
@@ -37,7 +37,7 @@ int iscsi_login_rx_data(
 }
 
 int iscsi_login_tx_data(
-       struct iscsi_conn *conn,
+       struct iscsit_conn *conn,
        char *pdu_buf,
        char *text_buf,
        int text_length)
@@ -955,7 +955,7 @@ out:
 }
 
 static int iscsi_check_acceptor_state(struct iscsi_param *param, char *value,
-                               struct iscsi_conn *conn)
+                               struct iscsit_conn *conn)
 {
        u8 acceptor_boolean_value = 0, proposer_boolean_value = 0;
        char *negotiated_value = NULL;
@@ -1352,7 +1352,7 @@ int iscsi_decode_text_input(
        u8 sender,
        char *textbuf,
        u32 length,
-       struct iscsi_conn *conn)
+       struct iscsit_conn *conn)
 {
        struct iscsi_param_list *param_list = conn->param_list;
        char *tmpbuf, *start = NULL, *end = NULL;
index 240c4c4..00fbbeb 100644 (file)
@@ -25,13 +25,13 @@ struct iscsi_param {
        struct list_head p_list;
 } ____cacheline_aligned;
 
-struct iscsi_conn;
+struct iscsit_conn;
 struct iscsi_conn_ops;
 struct iscsi_param_list;
 struct iscsi_sess_ops;
 
-extern int iscsi_login_rx_data(struct iscsi_conn *, char *, int);
-extern int iscsi_login_tx_data(struct iscsi_conn *, char *, char *, int);
+extern int iscsi_login_rx_data(struct iscsit_conn *, char *, int);
+extern int iscsi_login_tx_data(struct iscsit_conn *, char *, char *, int);
 extern void iscsi_dump_conn_ops(struct iscsi_conn_ops *);
 extern void iscsi_dump_sess_ops(struct iscsi_sess_ops *);
 extern void iscsi_print_params(struct iscsi_param_list *);
@@ -45,7 +45,7 @@ extern void iscsi_release_param_list(struct iscsi_param_list *);
 extern struct iscsi_param *iscsi_find_param_from_key(char *, struct iscsi_param_list *);
 extern int iscsi_extract_key_value(char *, char **, char **);
 extern int iscsi_update_param_value(struct iscsi_param *, char *);
-extern int iscsi_decode_text_input(u8, u8, char *, u32, struct iscsi_conn *);
+extern int iscsi_decode_text_input(u8, u8, char *, u32, struct iscsit_conn *);
 extern int iscsi_encode_text_output(u8, u8, char *, u32 *,
                        struct iscsi_param_list *, bool);
 extern int iscsi_check_negotiated_keys(struct iscsi_param_list *);
index ea2b02a..66de2b8 100644 (file)
@@ -18,7 +18,7 @@
 #include "iscsi_target_seq_pdu_list.h"
 
 #ifdef DEBUG
-static void iscsit_dump_seq_list(struct iscsi_cmd *cmd)
+static void iscsit_dump_seq_list(struct iscsit_cmd *cmd)
 {
        int i;
        struct iscsi_seq *seq;
@@ -36,7 +36,7 @@ static void iscsit_dump_seq_list(struct iscsi_cmd *cmd)
        }
 }
 
-static void iscsit_dump_pdu_list(struct iscsi_cmd *cmd)
+static void iscsit_dump_pdu_list(struct iscsit_cmd *cmd)
 {
        int i;
        struct iscsi_pdu *pdu;
@@ -52,12 +52,12 @@ static void iscsit_dump_pdu_list(struct iscsi_cmd *cmd)
        }
 }
 #else
-static void iscsit_dump_seq_list(struct iscsi_cmd *cmd) {}
-static void iscsit_dump_pdu_list(struct iscsi_cmd *cmd) {}
+static void iscsit_dump_seq_list(struct iscsit_cmd *cmd) {}
+static void iscsit_dump_pdu_list(struct iscsit_cmd *cmd) {}
 #endif
 
 static void iscsit_ordered_seq_lists(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        u8 type)
 {
        u32 i, seq_count = 0;
@@ -70,7 +70,7 @@ static void iscsit_ordered_seq_lists(
 }
 
 static void iscsit_ordered_pdu_lists(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        u8 type)
 {
        u32 i, pdu_send_order = 0, seq_no = 0;
@@ -117,7 +117,7 @@ redo:
 }
 
 static int iscsit_randomize_pdu_lists(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        u8 type)
 {
        int i = 0;
@@ -167,7 +167,7 @@ redo:
 }
 
 static int iscsit_randomize_seq_lists(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        u8 type)
 {
        int i, j = 0;
@@ -199,7 +199,7 @@ static int iscsit_randomize_seq_lists(
 }
 
 static void iscsit_determine_counts_for_list(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        struct iscsi_build_list *bl,
        u32 *seq_count,
        u32 *pdu_count)
@@ -208,7 +208,7 @@ static void iscsit_determine_counts_for_list(
        u32 burstlength = 0, offset = 0;
        u32 unsolicited_data_length = 0;
        u32 mdsl;
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
 
        if (cmd->se_cmd.data_direction == DMA_TO_DEVICE)
                mdsl = cmd->conn->conn_ops->MaxXmitDataSegmentLength;
@@ -283,13 +283,13 @@ static void iscsit_determine_counts_for_list(
  *     or DataPDUInOrder=No.
  */
 static int iscsit_do_build_pdu_and_seq_lists(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        struct iscsi_build_list *bl)
 {
        int check_immediate = 0, datapduinorder, datasequenceinorder;
        u32 burstlength = 0, offset = 0, i = 0, mdsl;
        u32 pdu_count = 0, seq_no = 0, unsolicited_data_length = 0;
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
        struct iscsi_pdu *pdu = cmd->pdu_list;
        struct iscsi_seq *seq = cmd->seq_list;
 
@@ -484,16 +484,16 @@ static int iscsit_do_build_pdu_and_seq_lists(
 }
 
 int iscsit_build_pdu_and_seq_lists(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        u32 immediate_data_length)
 {
        struct iscsi_build_list bl;
        u32 pdu_count = 0, seq_count = 1;
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
        struct iscsi_pdu *pdu = NULL;
        struct iscsi_seq *seq = NULL;
 
-       struct iscsi_session *sess = conn->sess;
+       struct iscsit_session *sess = conn->sess;
        struct iscsi_node_attrib *na;
 
        /*
@@ -559,7 +559,7 @@ int iscsit_build_pdu_and_seq_lists(
 }
 
 struct iscsi_pdu *iscsit_get_pdu_holder(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        u32 offset,
        u32 length)
 {
@@ -567,7 +567,7 @@ struct iscsi_pdu *iscsit_get_pdu_holder(
        struct iscsi_pdu *pdu = NULL;
 
        if (!cmd->pdu_list) {
-               pr_err("struct iscsi_cmd->pdu_list is NULL!\n");
+               pr_err("struct iscsit_cmd->pdu_list is NULL!\n");
                return NULL;
        }
 
@@ -583,15 +583,15 @@ struct iscsi_pdu *iscsit_get_pdu_holder(
 }
 
 struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        struct iscsi_seq *seq)
 {
        u32 i;
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
        struct iscsi_pdu *pdu = NULL;
 
        if (!cmd->pdu_list) {
-               pr_err("struct iscsi_cmd->pdu_list is NULL!\n");
+               pr_err("struct iscsit_cmd->pdu_list is NULL!\n");
                return NULL;
        }
 
@@ -660,14 +660,14 @@ redo:
 }
 
 struct iscsi_seq *iscsit_get_seq_holder(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        u32 offset,
        u32 length)
 {
        u32 i;
 
        if (!cmd->seq_list) {
-               pr_err("struct iscsi_cmd->seq_list is NULL!\n");
+               pr_err("struct iscsit_cmd->seq_list is NULL!\n");
                return NULL;
        }
 
index 5a09070..288298f 100644 (file)
@@ -82,11 +82,11 @@ struct iscsi_seq {
        u32             xfer_len;
 } ____cacheline_aligned;
 
-struct iscsi_cmd;
+struct iscsit_cmd;
 
-extern int iscsit_build_pdu_and_seq_lists(struct iscsi_cmd *, u32);
-extern struct iscsi_pdu *iscsit_get_pdu_holder(struct iscsi_cmd *, u32, u32);
-extern struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(struct iscsi_cmd *, struct iscsi_seq *);
-extern struct iscsi_seq *iscsit_get_seq_holder(struct iscsi_cmd *, u32, u32);
+extern int iscsit_build_pdu_and_seq_lists(struct iscsit_cmd *, u32);
+extern struct iscsi_pdu *iscsit_get_pdu_holder(struct iscsit_cmd *, u32, u32);
+extern struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(struct iscsit_cmd *, struct iscsi_seq *);
+extern struct iscsi_seq *iscsit_get_seq_holder(struct iscsit_cmd *, u32, u32);
 
 #endif /* ISCSI_SEQ_AND_PDU_LIST_H */
index cce3a82..367c646 100644 (file)
@@ -599,7 +599,7 @@ static ssize_t iscsi_stat_sess_node_show(struct config_item *item, char *page)
 {
        struct iscsi_node_acl *acl = iscsi_stat_nacl(item);
        struct se_node_acl *se_nacl = &acl->se_node_acl;
-       struct iscsi_session *sess;
+       struct iscsit_session *sess;
        struct se_session *se_sess;
        ssize_t ret = 0;
 
@@ -620,7 +620,7 @@ static ssize_t iscsi_stat_sess_indx_show(struct config_item *item, char *page)
 {
        struct iscsi_node_acl *acl = iscsi_stat_nacl(item);
        struct se_node_acl *se_nacl = &acl->se_node_acl;
-       struct iscsi_session *sess;
+       struct iscsit_session *sess;
        struct se_session *se_sess;
        ssize_t ret = 0;
 
@@ -642,7 +642,7 @@ static ssize_t iscsi_stat_sess_cmd_pdus_show(struct config_item *item,
 {
        struct iscsi_node_acl *acl = iscsi_stat_nacl(item);
        struct se_node_acl *se_nacl = &acl->se_node_acl;
-       struct iscsi_session *sess;
+       struct iscsit_session *sess;
        struct se_session *se_sess;
        ssize_t ret = 0;
 
@@ -664,7 +664,7 @@ static ssize_t iscsi_stat_sess_rsp_pdus_show(struct config_item *item,
 {
        struct iscsi_node_acl *acl = iscsi_stat_nacl(item);
        struct se_node_acl *se_nacl = &acl->se_node_acl;
-       struct iscsi_session *sess;
+       struct iscsit_session *sess;
        struct se_session *se_sess;
        ssize_t ret = 0;
 
@@ -686,7 +686,7 @@ static ssize_t iscsi_stat_sess_txdata_octs_show(struct config_item *item,
 {
        struct iscsi_node_acl *acl = iscsi_stat_nacl(item);
        struct se_node_acl *se_nacl = &acl->se_node_acl;
-       struct iscsi_session *sess;
+       struct iscsit_session *sess;
        struct se_session *se_sess;
        ssize_t ret = 0;
 
@@ -708,7 +708,7 @@ static ssize_t iscsi_stat_sess_rxdata_octs_show(struct config_item *item,
 {
        struct iscsi_node_acl *acl = iscsi_stat_nacl(item);
        struct se_node_acl *se_nacl = &acl->se_node_acl;
-       struct iscsi_session *sess;
+       struct iscsit_session *sess;
        struct se_session *se_sess;
        ssize_t ret = 0;
 
@@ -730,7 +730,7 @@ static ssize_t iscsi_stat_sess_conn_digest_errors_show(struct config_item *item,
 {
        struct iscsi_node_acl *acl = iscsi_stat_nacl(item);
        struct se_node_acl *se_nacl = &acl->se_node_acl;
-       struct iscsi_session *sess;
+       struct iscsit_session *sess;
        struct se_session *se_sess;
        ssize_t ret = 0;
 
@@ -752,7 +752,7 @@ static ssize_t iscsi_stat_sess_conn_timeout_errors_show(
 {
        struct iscsi_node_acl *acl = iscsi_stat_nacl(item);
        struct se_node_acl *se_nacl = &acl->se_node_acl;
-       struct iscsi_session *sess;
+       struct iscsit_session *sess;
        struct se_session *se_sess;
        ssize_t ret = 0;
 
index 7d618db..afc801f 100644 (file)
 #include "iscsi_target.h"
 
 u8 iscsit_tmr_abort_task(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        unsigned char *buf)
 {
-       struct iscsi_cmd *ref_cmd;
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_cmd *ref_cmd;
+       struct iscsit_conn *conn = cmd->conn;
        struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
        struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
        struct iscsi_tm *hdr = (struct iscsi_tm *) buf;
@@ -63,11 +63,11 @@ u8 iscsit_tmr_abort_task(
  *     Called from iscsit_handle_task_mgt_cmd().
  */
 int iscsit_tmr_task_warm_reset(
-       struct iscsi_conn *conn,
+       struct iscsit_conn *conn,
        struct iscsi_tmr_req *tmr_req,
        unsigned char *buf)
 {
-       struct iscsi_session *sess = conn->sess;
+       struct iscsit_session *sess = conn->sess;
        struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
 
        if (!na->tmr_warm_reset) {
@@ -83,11 +83,11 @@ int iscsit_tmr_task_warm_reset(
 }
 
 int iscsit_tmr_task_cold_reset(
-       struct iscsi_conn *conn,
+       struct iscsit_conn *conn,
        struct iscsi_tmr_req *tmr_req,
        unsigned char *buf)
 {
-       struct iscsi_session *sess = conn->sess;
+       struct iscsit_session *sess = conn->sess;
        struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
 
        if (!na->tmr_cold_reset) {
@@ -103,11 +103,11 @@ int iscsit_tmr_task_cold_reset(
 }
 
 u8 iscsit_tmr_task_reassign(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        unsigned char *buf)
 {
-       struct iscsi_cmd *ref_cmd = NULL;
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_cmd *ref_cmd = NULL;
+       struct iscsit_conn *conn = cmd->conn;
        struct iscsi_conn_recovery *cr = NULL;
        struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
        struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
@@ -175,9 +175,9 @@ u8 iscsit_tmr_task_reassign(
 }
 
 static void iscsit_task_reassign_remove_cmd(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        struct iscsi_conn_recovery *cr,
-       struct iscsi_session *sess)
+       struct iscsit_session *sess)
 {
        int ret;
 
@@ -193,9 +193,9 @@ static void iscsit_task_reassign_remove_cmd(
 
 static int iscsit_task_reassign_complete_nop_out(
        struct iscsi_tmr_req *tmr_req,
-       struct iscsi_conn *conn)
+       struct iscsit_conn *conn)
 {
-       struct iscsi_cmd *cmd = tmr_req->ref_cmd;
+       struct iscsit_cmd *cmd = tmr_req->ref_cmd;
        struct iscsi_conn_recovery *cr;
 
        if (!cmd->cr) {
@@ -224,12 +224,12 @@ static int iscsit_task_reassign_complete_nop_out(
 }
 
 static int iscsit_task_reassign_complete_write(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        struct iscsi_tmr_req *tmr_req)
 {
        int no_build_r2ts = 0;
        u32 length = 0, offset = 0;
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
        struct se_cmd *se_cmd = &cmd->se_cmd;
        /*
         * The Initiator must not send a R2T SNACK with a Begrun less than
@@ -296,10 +296,10 @@ static int iscsit_task_reassign_complete_write(
 }
 
 static int iscsit_task_reassign_complete_read(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        struct iscsi_tmr_req *tmr_req)
 {
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
        struct iscsi_datain_req *dr;
        struct se_cmd *se_cmd = &cmd->se_cmd;
        /*
@@ -349,10 +349,10 @@ static int iscsit_task_reassign_complete_read(
 }
 
 static int iscsit_task_reassign_complete_none(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        struct iscsi_tmr_req *tmr_req)
 {
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
 
        cmd->i_state = ISTATE_SEND_STATUS;
        iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
@@ -361,9 +361,9 @@ static int iscsit_task_reassign_complete_none(
 
 static int iscsit_task_reassign_complete_scsi_cmnd(
        struct iscsi_tmr_req *tmr_req,
-       struct iscsi_conn *conn)
+       struct iscsit_conn *conn)
 {
-       struct iscsi_cmd *cmd = tmr_req->ref_cmd;
+       struct iscsit_cmd *cmd = tmr_req->ref_cmd;
        struct iscsi_conn_recovery *cr;
 
        if (!cmd->cr) {
@@ -410,13 +410,13 @@ static int iscsit_task_reassign_complete_scsi_cmnd(
 
 static int iscsit_task_reassign_complete(
        struct iscsi_tmr_req *tmr_req,
-       struct iscsi_conn *conn)
+       struct iscsit_conn *conn)
 {
-       struct iscsi_cmd *cmd;
+       struct iscsit_cmd *cmd;
        int ret = 0;
 
        if (!tmr_req->ref_cmd) {
-               pr_err("TMR Request is missing a RefCmd struct iscsi_cmd.\n");
+               pr_err("TMR Request is missing a RefCmd struct iscsit_cmd.\n");
                return -1;
        }
        cmd = tmr_req->ref_cmd;
@@ -451,7 +451,7 @@ static int iscsit_task_reassign_complete(
  *     Right now the only one that its really needed for is
  *     connection recovery releated TASK_REASSIGN.
  */
-int iscsit_tmr_post_handler(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+int iscsit_tmr_post_handler(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
 {
        struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
        struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
@@ -469,14 +469,14 @@ EXPORT_SYMBOL(iscsit_tmr_post_handler);
  */
 static int iscsit_task_reassign_prepare_read(
        struct iscsi_tmr_req *tmr_req,
-       struct iscsi_conn *conn)
+       struct iscsit_conn *conn)
 {
        return 0;
 }
 
 static void iscsit_task_reassign_prepare_unsolicited_dataout(
-       struct iscsi_cmd *cmd,
-       struct iscsi_conn *conn)
+       struct iscsit_cmd *cmd,
+       struct iscsit_conn *conn)
 {
        int i, j;
        struct iscsi_pdu *pdu = NULL;
@@ -544,9 +544,9 @@ static void iscsit_task_reassign_prepare_unsolicited_dataout(
 
 static int iscsit_task_reassign_prepare_write(
        struct iscsi_tmr_req *tmr_req,
-       struct iscsi_conn *conn)
+       struct iscsit_conn *conn)
 {
-       struct iscsi_cmd *cmd = tmr_req->ref_cmd;
+       struct iscsit_cmd *cmd = tmr_req->ref_cmd;
        struct iscsi_pdu *pdu = NULL;
        struct iscsi_r2t *r2t = NULL, *r2t_tmp;
        int first_incomplete_r2t = 1, i = 0;
@@ -575,7 +575,7 @@ static int iscsit_task_reassign_prepare_write(
         *
         * If we have not received all DataOUT in question,  we must
         * make sure to make the appropriate changes to values in
-        * struct iscsi_cmd (and elsewhere depending on session parameters)
+        * struct iscsit_cmd (and elsewhere depending on session parameters)
         * so iscsit_build_r2ts_for_cmd() in iscsit_task_reassign_complete_write()
         * will resend a new R2T for the DataOUT sequences in question.
         */
@@ -708,7 +708,7 @@ next:
         * to check that the Initiator is not requesting R2Ts for DataOUT
         * sequences it has already completed.
         *
-        * Free each R2T in question and adjust values in struct iscsi_cmd
+        * Free each R2T in question and adjust values in struct iscsit_cmd
         * accordingly so iscsit_build_r2ts_for_cmd() do the rest of
         * the work after the TMR TASK_REASSIGN Response is sent.
         */
@@ -773,13 +773,13 @@ drop_unacknowledged_r2ts:
 
 /*
  *     Performs sanity checks TMR TASK_REASSIGN's ExpDataSN for
- *     a given struct iscsi_cmd.
+ *     a given struct iscsit_cmd.
  */
 int iscsit_check_task_reassign_expdatasn(
        struct iscsi_tmr_req *tmr_req,
-       struct iscsi_conn *conn)
+       struct iscsit_conn *conn)
 {
-       struct iscsi_cmd *ref_cmd = tmr_req->ref_cmd;
+       struct iscsit_cmd *ref_cmd = tmr_req->ref_cmd;
 
        if (ref_cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD)
                return 0;
index 301f093..3413d0f 100644 (file)
@@ -4,18 +4,18 @@
 
 #include <linux/types.h>
 
-struct iscsi_cmd;
-struct iscsi_conn;
+struct iscsit_cmd;
+struct iscsit_conn;
 struct iscsi_tmr_req;
 
-extern u8 iscsit_tmr_abort_task(struct iscsi_cmd *, unsigned char *);
-extern int iscsit_tmr_task_warm_reset(struct iscsi_conn *, struct iscsi_tmr_req *,
+extern u8 iscsit_tmr_abort_task(struct iscsit_cmd *, unsigned char *);
+extern int iscsit_tmr_task_warm_reset(struct iscsit_conn *, struct iscsi_tmr_req *,
                        unsigned char *);
-extern int iscsit_tmr_task_cold_reset(struct iscsi_conn *, struct iscsi_tmr_req *,
+extern int iscsit_tmr_task_cold_reset(struct iscsit_conn *, struct iscsi_tmr_req *,
                        unsigned char *);
-extern u8 iscsit_tmr_task_reassign(struct iscsi_cmd *, unsigned char *);
-extern int iscsit_tmr_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
+extern u8 iscsit_tmr_task_reassign(struct iscsit_cmd *, unsigned char *);
+extern int iscsit_tmr_post_handler(struct iscsit_cmd *, struct iscsit_conn *);
 extern int iscsit_check_task_reassign_expdatasn(struct iscsi_tmr_req *,
-                       struct iscsi_conn *);
+                       struct iscsit_conn *);
 
 #endif /* ISCSI_TARGET_TMR_H */
index 2d5cf17..4339ee5 100644 (file)
@@ -390,7 +390,7 @@ int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *tpg, int force)
 }
 
 struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(
-       struct iscsi_session *sess)
+       struct iscsit_session *sess)
 {
        struct se_session *se_sess = sess->se_sess;
        struct se_node_acl *se_nacl = se_sess->se_node_acl;
index 88576f5..839e453 100644 (file)
@@ -5,7 +5,7 @@
 #include <linux/types.h>
 
 struct iscsi_np;
-struct iscsi_session;
+struct iscsit_session;
 struct iscsi_tiqn;
 struct iscsi_tpg_np;
 struct se_node_acl;
@@ -28,7 +28,7 @@ extern struct iscsi_node_acl *iscsit_tpg_add_initiator_node_acl(
                        struct iscsi_portal_group *, const char *, u32);
 extern void iscsit_tpg_del_initiator_node_acl(struct iscsi_portal_group *,
                        struct se_node_acl *);
-extern struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(struct iscsi_session *);
+extern struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(struct iscsit_session *);
 extern void iscsit_tpg_del_external_nps(struct iscsi_tpg_np *);
 extern struct iscsi_tpg_np *iscsit_tpg_locate_child_np(struct iscsi_tpg_np *, int);
 extern struct iscsi_tpg_np *iscsit_tpg_add_network_portal(struct iscsi_portal_group *,
index 6dd5810..8d9f213 100644 (file)
@@ -32,7 +32,7 @@ extern struct list_head g_tiqn_list;
 extern spinlock_t tiqn_lock;
 
 int iscsit_add_r2t_to_list(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        u32 offset,
        u32 xfer_len,
        int recovery,
@@ -65,7 +65,7 @@ int iscsit_add_r2t_to_list(
 }
 
 struct iscsi_r2t *iscsit_get_r2t_for_eos(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        u32 offset,
        u32 length)
 {
@@ -86,7 +86,7 @@ struct iscsi_r2t *iscsit_get_r2t_for_eos(
        return NULL;
 }
 
-struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *cmd)
+struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsit_cmd *cmd)
 {
        struct iscsi_r2t *r2t;
 
@@ -104,7 +104,7 @@ struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *cmd)
        return NULL;
 }
 
-void iscsit_free_r2t(struct iscsi_r2t *r2t, struct iscsi_cmd *cmd)
+void iscsit_free_r2t(struct iscsi_r2t *r2t, struct iscsit_cmd *cmd)
 {
        lockdep_assert_held(&cmd->r2t_lock);
 
@@ -112,7 +112,7 @@ void iscsit_free_r2t(struct iscsi_r2t *r2t, struct iscsi_cmd *cmd)
        kmem_cache_free(lio_r2t_cache, r2t);
 }
 
-void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd)
+void iscsit_free_r2ts_from_list(struct iscsit_cmd *cmd)
 {
        struct iscsi_r2t *r2t, *r2t_tmp;
 
@@ -152,9 +152,9 @@ static int iscsit_wait_for_tag(struct se_session *se_sess, int state, int *cpup)
  * May be called from software interrupt (timer) context for allocating
  * iSCSI NopINs.
  */
-struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state)
+struct iscsit_cmd *iscsit_allocate_cmd(struct iscsit_conn *conn, int state)
 {
-       struct iscsi_cmd *cmd;
+       struct iscsit_cmd *cmd;
        struct se_session *se_sess = conn->sess->se_sess;
        int size, tag, cpu;
 
@@ -164,8 +164,8 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state)
        if (tag < 0)
                return NULL;
 
-       size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
-       cmd = (struct iscsi_cmd *)(se_sess->sess_cmd_map + (tag * size));
+       size = sizeof(struct iscsit_cmd) + conn->conn_transport->priv_size;
+       cmd = (struct iscsit_cmd *)(se_sess->sess_cmd_map + (tag * size));
        memset(cmd, 0, size);
 
        cmd->se_cmd.map_tag = tag;
@@ -187,7 +187,7 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state)
 EXPORT_SYMBOL(iscsit_allocate_cmd);
 
 struct iscsi_seq *iscsit_get_seq_holder_for_datain(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        u32 seq_send_order)
 {
        u32 i;
@@ -199,12 +199,12 @@ struct iscsi_seq *iscsit_get_seq_holder_for_datain(
        return NULL;
 }
 
-struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *cmd)
+struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsit_cmd *cmd)
 {
        u32 i;
 
        if (!cmd->seq_list) {
-               pr_err("struct iscsi_cmd->seq_list is NULL!\n");
+               pr_err("struct iscsit_cmd->seq_list is NULL!\n");
                return NULL;
        }
 
@@ -221,7 +221,7 @@ struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *cmd)
 }
 
 struct iscsi_r2t *iscsit_get_holder_for_r2tsn(
-       struct iscsi_cmd *cmd,
+       struct iscsit_cmd *cmd,
        u32 r2t_sn)
 {
        struct iscsi_r2t *r2t;
@@ -238,7 +238,7 @@ struct iscsi_r2t *iscsit_get_holder_for_r2tsn(
        return NULL;
 }
 
-static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cmdsn)
+static inline int iscsit_check_received_cmdsn(struct iscsit_session *sess, u32 cmdsn)
 {
        u32 max_cmdsn;
        int ret;
@@ -282,7 +282,7 @@ static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cm
  * Commands may be received out of order if MC/S is in use.
  * Ensure they are executed in CmdSN order.
  */
-int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+int iscsit_sequence_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
                        unsigned char *buf, __be32 cmdsn)
 {
        int ret, cmdsn_ret;
@@ -333,9 +333,9 @@ int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 }
 EXPORT_SYMBOL(iscsit_sequence_cmd);
 
-int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf)
+int iscsit_check_unsolicited_dataout(struct iscsit_cmd *cmd, unsigned char *buf)
 {
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
        struct se_cmd *se_cmd = &cmd->se_cmd;
        struct iscsi_data *hdr = (struct iscsi_data *) buf;
        u32 payload_length = ntoh24(hdr->dlength);
@@ -377,11 +377,11 @@ int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf)
        return 0;
 }
 
-struct iscsi_cmd *iscsit_find_cmd_from_itt(
-       struct iscsi_conn *conn,
+struct iscsit_cmd *iscsit_find_cmd_from_itt(
+       struct iscsit_conn *conn,
        itt_t init_task_tag)
 {
-       struct iscsi_cmd *cmd;
+       struct iscsit_cmd *cmd;
 
        spin_lock_bh(&conn->cmd_lock);
        list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
@@ -398,12 +398,12 @@ struct iscsi_cmd *iscsit_find_cmd_from_itt(
 }
 EXPORT_SYMBOL(iscsit_find_cmd_from_itt);
 
-struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(
-       struct iscsi_conn *conn,
+struct iscsit_cmd *iscsit_find_cmd_from_itt_or_dump(
+       struct iscsit_conn *conn,
        itt_t init_task_tag,
        u32 length)
 {
-       struct iscsi_cmd *cmd;
+       struct iscsit_cmd *cmd;
 
        spin_lock_bh(&conn->cmd_lock);
        list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
@@ -425,11 +425,11 @@ struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(
 }
 EXPORT_SYMBOL(iscsit_find_cmd_from_itt_or_dump);
 
-struct iscsi_cmd *iscsit_find_cmd_from_ttt(
-       struct iscsi_conn *conn,
+struct iscsit_cmd *iscsit_find_cmd_from_ttt(
+       struct iscsit_conn *conn,
        u32 targ_xfer_tag)
 {
-       struct iscsi_cmd *cmd = NULL;
+       struct iscsit_cmd *cmd = NULL;
 
        spin_lock_bh(&conn->cmd_lock);
        list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
@@ -446,12 +446,12 @@ struct iscsi_cmd *iscsit_find_cmd_from_ttt(
 }
 
 int iscsit_find_cmd_for_recovery(
-       struct iscsi_session *sess,
-       struct iscsi_cmd **cmd_ptr,
+       struct iscsit_session *sess,
+       struct iscsit_cmd **cmd_ptr,
        struct iscsi_conn_recovery **cr_ptr,
        itt_t init_task_tag)
 {
-       struct iscsi_cmd *cmd = NULL;
+       struct iscsit_cmd *cmd = NULL;
        struct iscsi_conn_recovery *cr;
        /*
         * Scan through the inactive connection recovery list's command list.
@@ -498,8 +498,8 @@ int iscsit_find_cmd_for_recovery(
 }
 
 void iscsit_add_cmd_to_immediate_queue(
-       struct iscsi_cmd *cmd,
-       struct iscsi_conn *conn,
+       struct iscsit_cmd *cmd,
+       struct iscsit_conn *conn,
        u8 state)
 {
        struct iscsi_queue_req *qr;
@@ -524,7 +524,7 @@ void iscsit_add_cmd_to_immediate_queue(
 }
 EXPORT_SYMBOL(iscsit_add_cmd_to_immediate_queue);
 
-struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn)
+struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsit_conn *conn)
 {
        struct iscsi_queue_req *qr;
 
@@ -545,8 +545,8 @@ struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *c
 }
 
 static void iscsit_remove_cmd_from_immediate_queue(
-       struct iscsi_cmd *cmd,
-       struct iscsi_conn *conn)
+       struct iscsit_cmd *cmd,
+       struct iscsit_conn *conn)
 {
        struct iscsi_queue_req *qr, *qr_tmp;
 
@@ -574,8 +574,8 @@ static void iscsit_remove_cmd_from_immediate_queue(
 }
 
 int iscsit_add_cmd_to_response_queue(
-       struct iscsi_cmd *cmd,
-       struct iscsi_conn *conn,
+       struct iscsit_cmd *cmd,
+       struct iscsit_conn *conn,
        u8 state)
 {
        struct iscsi_queue_req *qr;
@@ -599,7 +599,7 @@ int iscsit_add_cmd_to_response_queue(
        return 0;
 }
 
-struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn)
+struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsit_conn *conn)
 {
        struct iscsi_queue_req *qr;
 
@@ -621,8 +621,8 @@ struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *co
 }
 
 static void iscsit_remove_cmd_from_response_queue(
-       struct iscsi_cmd *cmd,
-       struct iscsi_conn *conn)
+       struct iscsit_cmd *cmd,
+       struct iscsit_conn *conn)
 {
        struct iscsi_queue_req *qr, *qr_tmp;
 
@@ -650,7 +650,7 @@ static void iscsit_remove_cmd_from_response_queue(
        }
 }
 
-bool iscsit_conn_all_queues_empty(struct iscsi_conn *conn)
+bool iscsit_conn_all_queues_empty(struct iscsit_conn *conn)
 {
        bool empty;
 
@@ -668,7 +668,7 @@ bool iscsit_conn_all_queues_empty(struct iscsi_conn *conn)
        return empty;
 }
 
-void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn)
+void iscsit_free_queue_reqs_for_conn(struct iscsit_conn *conn)
 {
        struct iscsi_queue_req *qr, *qr_tmp;
 
@@ -694,9 +694,9 @@ void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn)
        spin_unlock_bh(&conn->response_queue_lock);
 }
 
-void iscsit_release_cmd(struct iscsi_cmd *cmd)
+void iscsit_release_cmd(struct iscsit_cmd *cmd)
 {
-       struct iscsi_session *sess;
+       struct iscsit_session *sess;
        struct se_cmd *se_cmd = &cmd->se_cmd;
 
        WARN_ON(!list_empty(&cmd->i_conn_node));
@@ -720,9 +720,9 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd)
 }
 EXPORT_SYMBOL(iscsit_release_cmd);
 
-void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool check_queues)
+void __iscsit_free_cmd(struct iscsit_cmd *cmd, bool check_queues)
 {
-       struct iscsi_conn *conn = cmd->conn;
+       struct iscsit_conn *conn = cmd->conn;
 
        WARN_ON(!list_empty(&cmd->i_conn_node));
 
@@ -742,7 +742,7 @@ void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool check_queues)
                conn->conn_transport->iscsit_unmap_cmd(conn, cmd);
 }
 
-void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
+void iscsit_free_cmd(struct iscsit_cmd *cmd, bool shutdown)
 {
        struct se_cmd *se_cmd = cmd->se_cmd.se_tfo ? &cmd->se_cmd : NULL;
        int rc;
@@ -762,7 +762,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
 }
 EXPORT_SYMBOL(iscsit_free_cmd);
 
-bool iscsit_check_session_usage_count(struct iscsi_session *sess,
+bool iscsit_check_session_usage_count(struct iscsit_session *sess,
                                      bool can_sleep)
 {
        spin_lock_bh(&sess->session_usage_lock);
@@ -780,7 +780,7 @@ bool iscsit_check_session_usage_count(struct iscsi_session *sess,
        return false;
 }
 
-void iscsit_dec_session_usage_count(struct iscsi_session *sess)
+void iscsit_dec_session_usage_count(struct iscsit_session *sess)
 {
        spin_lock_bh(&sess->session_usage_lock);
        sess->session_usage_count--;
@@ -791,16 +791,16 @@ void iscsit_dec_session_usage_count(struct iscsi_session *sess)
        spin_unlock_bh(&sess->session_usage_lock);
 }
 
-void iscsit_inc_session_usage_count(struct iscsi_session *sess)
+void iscsit_inc_session_usage_count(struct iscsit_session *sess)
 {
        spin_lock_bh(&sess->session_usage_lock);
        sess->session_usage_count++;
        spin_unlock_bh(&sess->session_usage_lock);
 }
 
-struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *sess, u16 cid)
+struct iscsit_conn *iscsit_get_conn_from_cid(struct iscsit_session *sess, u16 cid)
 {
-       struct iscsi_conn *conn;
+       struct iscsit_conn *conn;
 
        spin_lock_bh(&sess->conn_lock);
        list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
@@ -816,9 +816,9 @@ struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *sess, u16 cid)
        return NULL;
 }
 
-struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *sess, u16 cid)
+struct iscsit_conn *iscsit_get_conn_from_cid_rcfr(struct iscsit_session *sess, u16 cid)
 {
-       struct iscsi_conn *conn;
+       struct iscsit_conn *conn;
 
        spin_lock_bh(&sess->conn_lock);
        list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
@@ -836,7 +836,7 @@ struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *sess, u16
        return NULL;
 }
 
-void iscsit_check_conn_usage_count(struct iscsi_conn *conn)
+void iscsit_check_conn_usage_count(struct iscsit_conn *conn)
 {
        spin_lock_bh(&conn->conn_usage_lock);
        if (conn->conn_usage_count != 0) {
@@ -849,7 +849,7 @@ void iscsit_check_conn_usage_count(struct iscsi_conn *conn)
        spin_unlock_bh(&conn->conn_usage_lock);
 }
 
-void iscsit_dec_conn_usage_count(struct iscsi_conn *conn)
+void iscsit_dec_conn_usage_count(struct iscsit_conn *conn)
 {
        spin_lock_bh(&conn->conn_usage_lock);
        conn->conn_usage_count--;
@@ -860,17 +860,17 @@ void iscsit_dec_conn_usage_count(struct iscsi_conn *conn)
        spin_unlock_bh(&conn->conn_usage_lock);
 }
 
-void iscsit_inc_conn_usage_count(struct iscsi_conn *conn)
+void iscsit_inc_conn_usage_count(struct iscsit_conn *conn)
 {
        spin_lock_bh(&conn->conn_usage_lock);
        conn->conn_usage_count++;
        spin_unlock_bh(&conn->conn_usage_lock);
 }
 
-static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
+static int iscsit_add_nopin(struct iscsit_conn *conn, int want_response)
 {
        u8 state;
-       struct iscsi_cmd *cmd;
+       struct iscsit_cmd *cmd;
 
        cmd = iscsit_allocate_cmd(conn, TASK_RUNNING);
        if (!cmd)
@@ -895,8 +895,8 @@ static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
 
 void iscsit_handle_nopin_response_timeout(struct timer_list *t)
 {
-       struct iscsi_conn *conn = from_timer(conn, t, nopin_response_timer);
-       struct iscsi_session *sess = conn->sess;
+       struct iscsit_conn *conn = from_timer(conn, t, nopin_response_timer);
+       struct iscsit_session *sess = conn->sess;
 
        iscsit_inc_conn_usage_count(conn);
 
@@ -919,9 +919,9 @@ void iscsit_handle_nopin_response_timeout(struct timer_list *t)
        iscsit_dec_conn_usage_count(conn);
 }
 
-void iscsit_mod_nopin_response_timer(struct iscsi_conn *conn)
+void iscsit_mod_nopin_response_timer(struct iscsit_conn *conn)
 {
-       struct iscsi_session *sess = conn->sess;
+       struct iscsit_session *sess = conn->sess;
        struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
 
        spin_lock_bh(&conn->nopin_timer_lock);
@@ -935,9 +935,9 @@ void iscsit_mod_nopin_response_timer(struct iscsi_conn *conn)
        spin_unlock_bh(&conn->nopin_timer_lock);
 }
 
-void iscsit_start_nopin_response_timer(struct iscsi_conn *conn)
+void iscsit_start_nopin_response_timer(struct iscsit_conn *conn)
 {
-       struct iscsi_session *sess = conn->sess;
+       struct iscsit_session *sess = conn->sess;
        struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
 
        spin_lock_bh(&conn->nopin_timer_lock);
@@ -956,7 +956,7 @@ void iscsit_start_nopin_response_timer(struct iscsi_conn *conn)
        spin_unlock_bh(&conn->nopin_timer_lock);
 }
 
-void iscsit_stop_nopin_response_timer(struct iscsi_conn *conn)
+void iscsit_stop_nopin_response_timer(struct iscsit_conn *conn)
 {
        spin_lock_bh(&conn->nopin_timer_lock);
        if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
@@ -975,7 +975,7 @@ void iscsit_stop_nopin_response_timer(struct iscsi_conn *conn)
 
 void iscsit_handle_nopin_timeout(struct timer_list *t)
 {
-       struct iscsi_conn *conn = from_timer(conn, t, nopin_timer);
+       struct iscsit_conn *conn = from_timer(conn, t, nopin_timer);
 
        iscsit_inc_conn_usage_count(conn);
 
@@ -992,9 +992,9 @@ void iscsit_handle_nopin_timeout(struct timer_list *t)
        iscsit_dec_conn_usage_count(conn);
 }
 
-void __iscsit_start_nopin_timer(struct iscsi_conn *conn)
+void __iscsit_start_nopin_timer(struct iscsit_conn *conn)
 {
-       struct iscsi_session *sess = conn->sess;
+       struct iscsit_session *sess = conn->sess;
        struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
 
        lockdep_assert_held(&conn->nopin_timer_lock);
@@ -1016,14 +1016,14 @@ void __iscsit_start_nopin_timer(struct iscsi_conn *conn)
                " interval\n", conn->cid, na->nopin_timeout);
 }
 
-void iscsit_start_nopin_timer(struct iscsi_conn *conn)
+void iscsit_start_nopin_timer(struct iscsit_conn *conn)
 {
        spin_lock_bh(&conn->nopin_timer_lock);
        __iscsit_start_nopin_timer(conn);
        spin_unlock_bh(&conn->nopin_timer_lock);
 }
 
-void iscsit_stop_nopin_timer(struct iscsi_conn *conn)
+void iscsit_stop_nopin_timer(struct iscsit_conn *conn)
 {
        spin_lock_bh(&conn->nopin_timer_lock);
        if (!(conn->nopin_timer_flags & ISCSI_TF_RUNNING)) {
@@ -1041,8 +1041,8 @@ void iscsit_stop_nopin_timer(struct iscsi_conn *conn)
 }
 
 int iscsit_send_tx_data(
-       struct iscsi_cmd *cmd,
-       struct iscsi_conn *conn,
+       struct iscsit_cmd *cmd,
+       struct iscsit_conn *conn,
        int use_misc)
 {
        int tx_sent, tx_size;
@@ -1074,8 +1074,8 @@ send_data:
 }
 
 int iscsit_fe_sendpage_sg(
-       struct iscsi_cmd *cmd,
-       struct iscsi_conn *conn)
+       struct iscsit_cmd *cmd,
+       struct iscsit_conn *conn)
 {
        struct scatterlist *sg = cmd->first_data_sg;
        struct kvec iov;
@@ -1179,7 +1179,7 @@ send_datacrc:
  *      Parameters:     iSCSI Connection, Status Class, Status Detail.
  *      Returns:        0 on success, -1 on error.
  */
-int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_detail)
+int iscsit_tx_login_rsp(struct iscsit_conn *conn, u8 status_class, u8 status_detail)
 {
        struct iscsi_login_rsp *hdr;
        struct iscsi_login *login = conn->conn_login;
@@ -1198,9 +1198,9 @@ int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_deta
        return conn->conn_transport->iscsit_put_login_tx(conn, login, 0);
 }
 
-void iscsit_print_session_params(struct iscsi_session *sess)
+void iscsit_print_session_params(struct iscsit_session *sess)
 {
-       struct iscsi_conn *conn;
+       struct iscsit_conn *conn;
 
        pr_debug("-----------------------------[Session Params for"
                " SID: %u]-----------------------------\n", sess->sid);
@@ -1213,7 +1213,7 @@ void iscsit_print_session_params(struct iscsi_session *sess)
 }
 
 int rx_data(
-       struct iscsi_conn *conn,
+       struct iscsit_conn *conn,
        struct kvec *iov,
        int iov_count,
        int data)
@@ -1243,7 +1243,7 @@ int rx_data(
 }
 
 int tx_data(
-       struct iscsi_conn *conn,
+       struct iscsit_conn *conn,
        struct kvec *iov,
        int iov_count,
        int data)
@@ -1279,7 +1279,7 @@ int tx_data(
 }
 
 void iscsit_collect_login_stats(
-       struct iscsi_conn *conn,
+       struct iscsit_conn *conn,
        u8 status_class,
        u8 status_detail)
 {
@@ -1334,7 +1334,7 @@ void iscsit_collect_login_stats(
        spin_unlock(&ls->lock);
 }
 
-struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn)
+struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsit_conn *conn)
 {
        struct iscsi_portal_group *tpg;
 
@@ -1351,7 +1351,7 @@ struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn)
        return tpg->tpg_tiqn;
 }
 
-void iscsit_fill_cxn_timeout_err_stats(struct iscsi_session *sess)
+void iscsit_fill_cxn_timeout_err_stats(struct iscsit_session *sess)
 {
        struct iscsi_portal_group *tpg = sess->tpg;
        struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
index 8ee1c13..33ea799 100644 (file)
@@ -7,66 +7,66 @@
 
 #define MARKER_SIZE    8
 
-struct iscsi_cmd;
-struct iscsi_conn;
+struct iscsit_cmd;
+struct iscsit_conn;
 struct iscsi_conn_recovery;
-struct iscsi_session;
+struct iscsit_session;
 
-extern int iscsit_add_r2t_to_list(struct iscsi_cmd *, u32, u32, int, u32);
-extern struct iscsi_r2t *iscsit_get_r2t_for_eos(struct iscsi_cmd *, u32, u32);
-extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *);
-extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsi_cmd *);
-extern void iscsit_free_r2ts_from_list(struct iscsi_cmd *);
-extern struct iscsi_cmd *iscsit_alloc_cmd(struct iscsi_conn *, gfp_t);
-extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, int);
-extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32);
-extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *);
-extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32);
-extern int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+extern int iscsit_add_r2t_to_list(struct iscsit_cmd *, u32, u32, int, u32);
+extern struct iscsi_r2t *iscsit_get_r2t_for_eos(struct iscsit_cmd *, u32, u32);
+extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsit_cmd *);
+extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsit_cmd *);
+extern void iscsit_free_r2ts_from_list(struct iscsit_cmd *);
+extern struct iscsit_cmd *iscsit_alloc_cmd(struct iscsit_conn *, gfp_t);
+extern struct iscsit_cmd *iscsit_allocate_cmd(struct iscsit_conn *, int);
+extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsit_cmd *, u32);
+extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsit_cmd *);
+extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsit_cmd *, u32);
+extern int iscsit_sequence_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
                               unsigned char * ,__be32 cmdsn);
-extern int iscsit_check_unsolicited_dataout(struct iscsi_cmd *, unsigned char *);
-extern struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *,
+extern int iscsit_check_unsolicited_dataout(struct iscsit_cmd *, unsigned char *);
+extern struct iscsit_cmd *iscsit_find_cmd_from_itt_or_dump(struct iscsit_conn *,
                        itt_t, u32);
-extern struct iscsi_cmd *iscsit_find_cmd_from_ttt(struct iscsi_conn *, u32);
-extern int iscsit_find_cmd_for_recovery(struct iscsi_session *, struct iscsi_cmd **,
+extern struct iscsit_cmd *iscsit_find_cmd_from_ttt(struct iscsit_conn *, u32);
+extern int iscsit_find_cmd_for_recovery(struct iscsit_session *, struct iscsit_cmd **,
                        struct iscsi_conn_recovery **, itt_t);
-extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
-extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *);
-extern int iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
-extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *);
-extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *);
-extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
-extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);
-extern void iscsit_release_cmd(struct iscsi_cmd *);
-extern void __iscsit_free_cmd(struct iscsi_cmd *, bool);
-extern void iscsit_free_cmd(struct iscsi_cmd *, bool);
-extern bool iscsit_check_session_usage_count(struct iscsi_session *sess, bool can_sleep);
-extern void iscsit_dec_session_usage_count(struct iscsi_session *);
-extern void iscsit_inc_session_usage_count(struct iscsi_session *);
-extern struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *, u16);
-extern struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *, u16);
-extern void iscsit_check_conn_usage_count(struct iscsi_conn *);
-extern void iscsit_dec_conn_usage_count(struct iscsi_conn *);
-extern void iscsit_inc_conn_usage_count(struct iscsi_conn *);
+extern void iscsit_add_cmd_to_immediate_queue(struct iscsit_cmd *, struct iscsit_conn *, u8);
+extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsit_conn *);
+extern int iscsit_add_cmd_to_response_queue(struct iscsit_cmd *, struct iscsit_conn *, u8);
+extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsit_conn *);
+extern void iscsit_remove_cmd_from_tx_queues(struct iscsit_cmd *, struct iscsit_conn *);
+extern bool iscsit_conn_all_queues_empty(struct iscsit_conn *);
+extern void iscsit_free_queue_reqs_for_conn(struct iscsit_conn *);
+extern void iscsit_release_cmd(struct iscsit_cmd *);
+extern void __iscsit_free_cmd(struct iscsit_cmd *, bool);
+extern void iscsit_free_cmd(struct iscsit_cmd *, bool);
+extern bool iscsit_check_session_usage_count(struct iscsit_session *sess, bool can_sleep);
+extern void iscsit_dec_session_usage_count(struct iscsit_session *);
+extern void iscsit_inc_session_usage_count(struct iscsit_session *);
+extern struct iscsit_conn *iscsit_get_conn_from_cid(struct iscsit_session *, u16);
+extern struct iscsit_conn *iscsit_get_conn_from_cid_rcfr(struct iscsit_session *, u16);
+extern void iscsit_check_conn_usage_count(struct iscsit_conn *);
+extern void iscsit_dec_conn_usage_count(struct iscsit_conn *);
+extern void iscsit_inc_conn_usage_count(struct iscsit_conn *);
 extern void iscsit_handle_nopin_response_timeout(struct timer_list *t);
-extern void iscsit_mod_nopin_response_timer(struct iscsi_conn *);
-extern void iscsit_start_nopin_response_timer(struct iscsi_conn *);
-extern void iscsit_stop_nopin_response_timer(struct iscsi_conn *);
+extern void iscsit_mod_nopin_response_timer(struct iscsit_conn *);
+extern void iscsit_start_nopin_response_timer(struct iscsit_conn *);
+extern void iscsit_stop_nopin_response_timer(struct iscsit_conn *);
 extern void iscsit_handle_nopin_timeout(struct timer_list *t);
-extern void __iscsit_start_nopin_timer(struct iscsi_conn *);
-extern void iscsit_start_nopin_timer(struct iscsi_conn *);
-extern void iscsit_stop_nopin_timer(struct iscsi_conn *);
-extern int iscsit_send_tx_data(struct iscsi_cmd *, struct iscsi_conn *, int);
-extern int iscsit_fe_sendpage_sg(struct iscsi_cmd *, struct iscsi_conn *);
-extern int iscsit_tx_login_rsp(struct iscsi_conn *, u8, u8);
-extern void iscsit_print_session_params(struct iscsi_session *);
+extern void __iscsit_start_nopin_timer(struct iscsit_conn *);
+extern void iscsit_start_nopin_timer(struct iscsit_conn *);
+extern void iscsit_stop_nopin_timer(struct iscsit_conn *);
+extern int iscsit_send_tx_data(struct iscsit_cmd *, struct iscsit_conn *, int);
+extern int iscsit_fe_sendpage_sg(struct iscsit_cmd *, struct iscsit_conn *);
+extern int iscsit_tx_login_rsp(struct iscsit_conn *, u8, u8);
+extern void iscsit_print_session_params(struct iscsit_session *);
 extern int iscsit_print_dev_to_proc(char *, char **, off_t, int);
 extern int iscsit_print_sessions_to_proc(char *, char **, off_t, int);
 extern int iscsit_print_tpg_to_proc(char *, char **, off_t, int);
-extern int rx_data(struct iscsi_conn *, struct kvec *, int, int);
-extern int tx_data(struct iscsi_conn *, struct kvec *, int, int);
-extern void iscsit_collect_login_stats(struct iscsi_conn *, u8, u8);
-extern struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *);
-extern void iscsit_fill_cxn_timeout_err_stats(struct iscsi_session *);
+extern int rx_data(struct iscsit_conn *, struct kvec *, int, int);
+extern int tx_data(struct iscsit_conn *, struct kvec *, int, int);
+extern void iscsit_collect_login_stats(struct iscsit_conn *, u8, u8);
+extern struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsit_conn *);
+extern void iscsit_fill_cxn_timeout_err_stats(struct iscsit_session *);
 
 #endif /*** ISCSI_TARGET_UTIL_H ***/
index 4c86697..bbcbbfa 100644 (file)
@@ -72,6 +72,9 @@ static struct config_group target_core_hbagroup;
 static struct config_group alua_group;
 static struct config_group alua_lu_gps_group;
 
+static unsigned int target_devices;
+static DEFINE_MUTEX(target_devices_lock);
+
 static inline struct se_hba *
 item_to_hba(struct config_item *item)
 {
@@ -105,51 +108,48 @@ static ssize_t target_core_item_dbroot_store(struct config_item *item,
 {
        ssize_t read_bytes;
        struct file *fp;
+       ssize_t r = -EINVAL;
 
-       mutex_lock(&g_tf_lock);
-       if (!list_empty(&g_tf_list)) {
-               mutex_unlock(&g_tf_lock);
-               pr_err("db_root: cannot be changed: target drivers registered");
-               return -EINVAL;
+       mutex_lock(&target_devices_lock);
+       if (target_devices) {
+               pr_err("db_root: cannot be changed because it's in use\n");
+               goto unlock;
        }
 
        if (count > (DB_ROOT_LEN - 1)) {
-               mutex_unlock(&g_tf_lock);
                pr_err("db_root: count %d exceeds DB_ROOT_LEN-1: %u\n",
                       (int)count, DB_ROOT_LEN - 1);
-               return -EINVAL;
+               goto unlock;
        }
 
        read_bytes = snprintf(db_root_stage, DB_ROOT_LEN, "%s", page);
-       if (!read_bytes) {
-               mutex_unlock(&g_tf_lock);
-               return -EINVAL;
-       }
+       if (!read_bytes)
+               goto unlock;
+
        if (db_root_stage[read_bytes - 1] == '\n')
                db_root_stage[read_bytes - 1] = '\0';
 
        /* validate new db root before accepting it */
        fp = filp_open(db_root_stage, O_RDONLY, 0);
        if (IS_ERR(fp)) {
-               mutex_unlock(&g_tf_lock);
                pr_err("db_root: cannot open: %s\n", db_root_stage);
-               return -EINVAL;
+               goto unlock;
        }
        if (!S_ISDIR(file_inode(fp)->i_mode)) {
                filp_close(fp, NULL);
-               mutex_unlock(&g_tf_lock);
                pr_err("db_root: not a directory: %s\n", db_root_stage);
-               return -EINVAL;
+               goto unlock;
        }
        filp_close(fp, NULL);
 
        strncpy(db_root, db_root_stage, read_bytes);
-
-       mutex_unlock(&g_tf_lock);
-
        pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root);
 
-       return read_bytes;
+       r = read_bytes;
+
+unlock:
+       mutex_unlock(&target_devices_lock);
+       return r;
 }
 
 CONFIGFS_ATTR(target_core_item_, dbroot);
@@ -3316,6 +3316,10 @@ static struct config_group *target_core_make_subdev(
         */
        target_stat_setup_dev_default_groups(dev);
 
+       mutex_lock(&target_devices_lock);
+       target_devices++;
+       mutex_unlock(&target_devices_lock);
+
        mutex_unlock(&hba->hba_access_mutex);
        return &dev->dev_group;
 
@@ -3354,6 +3358,11 @@ static void target_core_drop_subdev(
         * se_dev is released from target_core_dev_item_ops->release()
         */
        config_item_put(item);
+
+       mutex_lock(&target_devices_lock);
+       target_devices--;
+       mutex_unlock(&target_devices_lock);
+
        mutex_unlock(&hba->hba_access_mutex);
 }
 
index 4c76498..c14441c 100644 (file)
@@ -769,7 +769,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)
                }
        }
 
-       pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
+       pr_debug("Unknown VPD Code: 0x%02x\n", cdb[2]);
        ret = TCM_INVALID_CDB_FIELD;
 
 out:
index fd7267b..3deaeec 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/configfs.h>
 #include <linux/mutex.h>
 #include <linux/workqueue.h>
+#include <linux/pagemap.h>
 #include <net/genetlink.h>
 #include <scsi/scsi_common.h>
 #include <scsi/scsi_proto.h>
@@ -1660,17 +1661,37 @@ static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
 static u32 tcmu_blocks_release(struct tcmu_dev *udev, unsigned long first,
                                unsigned long last)
 {
-       XA_STATE(xas, &udev->data_pages, first * udev->data_pages_per_blk);
        struct page *page;
+       unsigned long dpi;
        u32 pages_freed = 0;
 
-       xas_lock(&xas);
-       xas_for_each(&xas, page, (last + 1) * udev->data_pages_per_blk - 1) {
-               xas_store(&xas, NULL);
+       first = first * udev->data_pages_per_blk;
+       last = (last + 1) * udev->data_pages_per_blk - 1;
+       xa_for_each_range(&udev->data_pages, dpi, page, first, last) {
+               xa_erase(&udev->data_pages, dpi);
+               /*
+                * While reaching here there may be page faults occurring on
+                * the to-be-released pages. A race condition may occur if
+                * unmap_mapping_range() is called before page faults on these
+                * pages have completed; a valid but stale map is created.
+                *
+                * If another command subsequently runs and needs to extend
+                * dbi_thresh, it may reuse the slot corresponding to the
+                * previous page in data_bitmap. Though we will allocate a new
+                * page for the slot in data_area, no page fault will happen
+                * because we have a valid map. Therefore the command's data
+                * will be lost.
+                *
+                * We lock and unlock pages that are to be released to ensure
+                * all page faults have completed. This way
+                * unmap_mapping_range() can ensure stale maps are cleanly
+                * removed.
+                */
+               lock_page(page);
+               unlock_page(page);
                __free_page(page);
                pages_freed++;
        }
-       xas_unlock(&xas);
 
        atomic_sub(pages_freed, &global_page_count);
 
@@ -1822,6 +1843,7 @@ static struct page *tcmu_try_get_data_page(struct tcmu_dev *udev, uint32_t dpi)
        page = xa_load(&udev->data_pages, dpi);
        if (likely(page)) {
                get_page(page);
+               lock_page(page);
                mutex_unlock(&udev->cmdr_lock);
                return page;
        }
@@ -1863,6 +1885,7 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
        struct page *page;
        unsigned long offset;
        void *addr;
+       vm_fault_t ret = 0;
 
        int mi = tcmu_find_mem_index(vmf->vma);
        if (mi < 0)
@@ -1887,10 +1910,11 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
                page = tcmu_try_get_data_page(udev, dpi);
                if (!page)
                        return VM_FAULT_SIGBUS;
+               ret = VM_FAULT_LOCKED;
        }
 
        vmf->page = page;
-       return 0;
+       return ret;
 }
 
 static const struct vm_operations_struct tcmu_vm_ops = {
@@ -3205,12 +3229,22 @@ static void find_free_blocks(void)
                        udev->dbi_max = block;
                }
 
+               /*
+                * Release the block pages.
+                *
+                * Also note that since tcmu_vma_fault() gets an extra page
+                * refcount, tcmu_blocks_release() won't free pages if pages
+                * are mapped. This means it is safe to call
+                * tcmu_blocks_release() before unmap_mapping_range() which
+                * drops the refcount of any pages it unmaps and thus releases
+                * them.
+                */
+               pages_freed = tcmu_blocks_release(udev, start, end - 1);
+
                /* Here will truncate the data area from off */
                off = udev->data_off + (loff_t)start * udev->data_blk_size;
                unmap_mapping_range(udev->inode->i_mapping, off, 0, 1);
 
-               /* Release the block pages */
-               pages_freed = tcmu_blocks_release(udev, start, end - 1);
                mutex_unlock(&udev->cmdr_lock);
 
                total_pages_freed += pages_freed;
index 310e0db..2797821 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
 #include <linux/workqueue.h>
+#include <linux/local_lock.h>
 #include <linux/random.h>
 #include <scsi/fc/fc_fcoe.h>
 #include <scsi/libfc.h>
@@ -327,6 +328,7 @@ struct fcoe_percpu_s {
        struct sk_buff_head fcoe_rx_list;
        struct page *crc_eof_page;
        int crc_eof_offset;
+       local_lock_t lock;
 };
 
 /**
index d0a2477..c0703cd 100644 (file)
@@ -54,9 +54,9 @@ enum {
 #define ISID_SIZE                      6
 
 /* Connection flags */
-#define ISCSI_CONN_FLAG_SUSPEND_TX     BIT(0)
-#define ISCSI_CONN_FLAG_SUSPEND_RX     BIT(1)
-#define ISCSI_CONN_FLAG_BOUND          BIT(2)
+#define ISCSI_CONN_FLAG_SUSPEND_TX     0
+#define ISCSI_CONN_FLAG_SUSPEND_RX     1
+#define ISCSI_CONN_FLAG_BOUND          2
 
 #define ISCSI_ITT_MASK                 0x1fff
 #define ISCSI_TOTAL_CMDS_MAX           4096
index d47dea7..a1df4f9 100644 (file)
@@ -34,6 +34,7 @@ void sas_resume_sata(struct asd_sas_port *port);
 void sas_ata_end_eh(struct ata_port *ap);
 int sas_execute_ata_cmd(struct domain_device *device, u8 *fis,
                        int force_phy_id);
+int sas_ata_wait_after_reset(struct domain_device *dev, unsigned long deadline);
 #else
 
 
@@ -91,6 +92,12 @@ static inline int sas_execute_ata_cmd(struct domain_device *device, u8 *fis,
 {
        return 0;
 }
+
+static inline int sas_ata_wait_after_reset(struct domain_device *dev,
+                                          unsigned long deadline)
+{
+       return -ETIMEDOUT;
+}
 #endif
 
 #endif /* _SAS_ATA_H_ */
index 57e3e23..7cf5f3b 100644 (file)
@@ -100,6 +100,10 @@ struct scsi_vpd {
        unsigned char   data[];
 };
 
+enum scsi_vpd_parameters {
+       SCSI_VPD_HEADER_SIZE = 4,
+};
+
 struct scsi_device {
        struct Scsi_Host *host;
        struct request_queue *request_queue;
@@ -141,11 +145,14 @@ struct scsi_device {
        const char * model;             /* ... after scan; point to static string */
        const char * rev;               /* ... "nullnullnullnull" before scan */
 
-#define SCSI_VPD_PG_LEN                255
        struct scsi_vpd __rcu *vpd_pg0;
        struct scsi_vpd __rcu *vpd_pg83;
        struct scsi_vpd __rcu *vpd_pg80;
        struct scsi_vpd __rcu *vpd_pg89;
+       struct scsi_vpd __rcu *vpd_pgb0;
+       struct scsi_vpd __rcu *vpd_pgb1;
+       struct scsi_vpd __rcu *vpd_pgb2;
+
        struct scsi_target      *sdev_target;
 
        blist_flags_t           sdev_bflags; /* black/white flags as also found in
index f017843..c03e35f 100644 (file)
@@ -307,7 +307,9 @@ enum zbc_zone_type {
        ZBC_ZONE_TYPE_CONV              = 0x1,
        ZBC_ZONE_TYPE_SEQWRITE_REQ      = 0x2,
        ZBC_ZONE_TYPE_SEQWRITE_PREF     = 0x3,
-       /* 0x4 to 0xf are reserved */
+       ZBC_ZONE_TYPE_SEQ_OR_BEFORE_REQ = 0x4,
+       ZBC_ZONE_TYPE_GAP               = 0x5,
+       /* 0x6 to 0xf are reserved */
 };
 
 /* Zone conditions of REPORT ZONES zone descriptors */
@@ -323,6 +325,11 @@ enum zbc_zone_cond {
        ZBC_ZONE_COND_OFFLINE           = 0xf,
 };
 
+enum zbc_zone_alignment_method {
+       ZBC_CONSTANT_ZONE_LENGTH        = 0x1,
+       ZBC_CONSTANT_ZONE_START_OFFSET  = 0x8,
+};
+
 /* Version descriptor values for INQUIRY */
 enum scsi_version_descriptor {
        SCSI_VERSION_DESCRIPTOR_FCP4    = 0x0a40,
index adc87de..8e68ace 100644 (file)
@@ -143,7 +143,7 @@ enum tiqn_state_table {
        TIQN_STATE_SHUTDOWN                     = 2,
 };
 
-/* struct iscsi_cmd->cmd_flags */
+/* struct iscsit_cmd->cmd_flags */
 enum cmd_flags_table {
        ICF_GOT_LAST_DATAOUT                    = 0x00000001,
        ICF_GOT_DATACK_SNACK                    = 0x00000002,
@@ -157,7 +157,7 @@ enum cmd_flags_table {
        ICF_SENDTARGETS_SINGLE                  = 0x00000200,
 };
 
-/* struct iscsi_cmd->i_state */
+/* struct iscsit_cmd->i_state */
 enum cmd_i_state_table {
        ISTATE_NO_STATE                 = 0,
        ISTATE_NEW_CMD                  = 1,
@@ -297,7 +297,7 @@ struct iscsi_sess_ops {
 
 struct iscsi_queue_req {
        int                     state;
-       struct iscsi_cmd        *cmd;
+       struct iscsit_cmd       *cmd;
        struct list_head        qr_list;
 };
 
@@ -327,7 +327,7 @@ struct iscsi_ooo_cmdsn {
        u32                     batch_count;
        u32                     cmdsn;
        u32                     exp_cmdsn;
-       struct iscsi_cmd        *cmd;
+       struct iscsit_cmd       *cmd;
        struct list_head        ooo_list;
 } ____cacheline_aligned;
 
@@ -349,7 +349,7 @@ struct iscsi_r2t {
        struct list_head        r2t_list;
 } ____cacheline_aligned;
 
-struct iscsi_cmd {
+struct iscsit_cmd {
        enum iscsi_timer_flags_table dataout_timer_flags;
        /* DataOUT timeout retries */
        u8                      dataout_timeout_retries;
@@ -405,22 +405,22 @@ struct iscsi_cmd {
        u32                     outstanding_r2ts;
        /* Next R2T Offset when DataSequenceInOrder=Yes */
        u32                     r2t_offset;
-       /* Iovec current and orig count for iscsi_cmd->iov_data */
+       /* Iovec current and orig count for iscsit_cmd->iov_data */
        u32                     iov_data_count;
        u32                     orig_iov_data_count;
        /* Number of miscellaneous iovecs used for IP stack calls */
        u32                     iov_misc_count;
-       /* Number of struct iscsi_pdu in struct iscsi_cmd->pdu_list */
+       /* Number of struct iscsi_pdu in struct iscsit_cmd->pdu_list */
        u32                     pdu_count;
-       /* Next struct iscsi_pdu to send in struct iscsi_cmd->pdu_list */
+       /* Next struct iscsi_pdu to send in struct iscsit_cmd->pdu_list */
        u32                     pdu_send_order;
-       /* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
+       /* Current struct iscsi_pdu in struct iscsit_cmd->pdu_list */
        u32                     pdu_start;
-       /* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
+       /* Next struct iscsi_seq to send in struct iscsit_cmd->seq_list */
        u32                     seq_send_order;
-       /* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
+       /* Number of struct iscsi_seq in struct iscsit_cmd->seq_list */
        u32                     seq_count;
-       /* Current struct iscsi_seq in struct iscsi_cmd->seq_list */
+       /* Current struct iscsi_seq in struct iscsit_cmd->seq_list */
        u32                     seq_no;
        /* Lowest offset in current DataOUT sequence */
        u32                     seq_start_offset;
@@ -444,12 +444,12 @@ struct iscsi_cmd {
        enum dma_data_direction data_direction;
        /* iSCSI PDU Header + CRC */
        unsigned char           pdu[ISCSI_HDR_LEN + ISCSI_CRC_LEN];
-       /* Number of times struct iscsi_cmd is present in immediate queue */
+       /* Number of times struct iscsit_cmd is present in immediate queue */
        atomic_t                immed_queue_count;
        atomic_t                response_queue_count;
        spinlock_t              datain_lock;
        spinlock_t              dataout_timeout_lock;
-       /* spinlock for protecting struct iscsi_cmd->i_state */
+       /* spinlock for protecting struct iscsit_cmd->i_state */
        spinlock_t              istate_lock;
        /* spinlock for adding within command recovery entries */
        spinlock_t              error_lock;
@@ -478,11 +478,11 @@ struct iscsi_cmd {
        /* TMR Request when iscsi_opcode == ISCSI_OP_SCSI_TMFUNC */
        struct iscsi_tmr_req    *tmr_req;
        /* Connection this command is alligient to */
-       struct iscsi_conn       *conn;
+       struct iscsit_conn      *conn;
        /* Pointer to connection recovery entry */
        struct iscsi_conn_recovery *cr;
        /* Session the command is part of,  used for connection recovery */
-       struct iscsi_session    *sess;
+       struct iscsit_session   *sess;
        /* list_head for connection list */
        struct list_head        i_conn_node;
        /* The TCM I/O descriptor that is accessed via container_of() */
@@ -503,12 +503,12 @@ struct iscsi_cmd {
 struct iscsi_tmr_req {
        bool                    task_reassign:1;
        u32                     exp_data_sn;
-       struct iscsi_cmd        *ref_cmd;
+       struct iscsit_cmd       *ref_cmd;
        struct iscsi_conn_recovery *conn_recovery;
        struct se_tmr_req       *se_tmr_req;
 };
 
-struct iscsi_conn {
+struct iscsit_conn {
        wait_queue_head_t       queues_wq;
        /* Authentication Successful for this connection */
        u8                      auth_complete;
@@ -583,7 +583,7 @@ struct iscsi_conn {
        cpumask_var_t           allowed_cpumask;
        unsigned int            conn_rx_reset_cpumask:1;
        unsigned int            conn_tx_reset_cpumask:1;
-       /* list_head of struct iscsi_cmd for this connection */
+       /* list_head of struct iscsit_cmd for this connection */
        struct list_head        conn_cmd_list;
        struct list_head        immed_queue_list;
        struct list_head        response_queue_list;
@@ -598,7 +598,7 @@ struct iscsi_conn {
        struct iscsi_portal_group *tpg;
        struct iscsi_tpg_np     *tpg_np;
        /* Pointer to parent session */
-       struct iscsi_session    *sess;
+       struct iscsit_session   *sess;
        int                     bitmap_id;
        int                     rx_thread_active;
        struct task_struct      *rx_thread;
@@ -618,11 +618,11 @@ struct iscsi_conn_recovery {
        struct list_head        conn_recovery_cmd_list;
        spinlock_t              conn_recovery_cmd_lock;
        struct timer_list       time2retain_timer;
-       struct iscsi_session    *sess;
+       struct iscsit_session   *sess;
        struct list_head        cr_list;
 }  ____cacheline_aligned;
 
-struct iscsi_session {
+struct iscsit_session {
        u8                      initiator_vendor;
        u8                      isid[6];
        enum iscsi_timer_flags_table time2retain_timer_flags;
@@ -710,7 +710,7 @@ struct iscsi_login {
        char rsp[ISCSI_HDR_LEN];
        char *req_buf;
        char *rsp_buf;
-       struct iscsi_conn *conn;
+       struct iscsit_conn *conn;
        struct iscsi_np *np;
 } ____cacheline_aligned;
 
@@ -885,7 +885,7 @@ struct iscsit_global {
        struct iscsi_portal_group       *discovery_tpg;
 };
 
-static inline u32 session_get_next_ttt(struct iscsi_session *session)
+static inline u32 session_get_next_ttt(struct iscsit_session *session)
 {
        u32 ttt;
 
@@ -898,9 +898,9 @@ static inline u32 session_get_next_ttt(struct iscsi_session *session)
        return ttt;
 }
 
-extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t);
+extern struct iscsit_cmd *iscsit_find_cmd_from_itt(struct iscsit_conn *, itt_t);
 
-extern void iscsit_thread_check_cpumask(struct iscsi_conn *conn,
+extern void iscsit_thread_check_cpumask(struct iscsit_conn *conn,
                                        struct task_struct *p,
                                        int mode);
 
index b8feba7..42cfe02 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-#include "iscsi_target_core.h" /* struct iscsi_cmd */
+#include "iscsi_target_core.h" /* struct iscsit_cmd */
 
 struct sockaddr_storage;
 
@@ -12,29 +12,29 @@ struct iscsit_transport {
        struct module *owner;
        struct list_head t_node;
        int (*iscsit_setup_np)(struct iscsi_np *, struct sockaddr_storage *);
-       int (*iscsit_accept_np)(struct iscsi_np *, struct iscsi_conn *);
+       int (*iscsit_accept_np)(struct iscsi_np *, struct iscsit_conn *);
        void (*iscsit_free_np)(struct iscsi_np *);
-       void (*iscsit_wait_conn)(struct iscsi_conn *);
-       void (*iscsit_free_conn)(struct iscsi_conn *);
-       int (*iscsit_get_login_rx)(struct iscsi_conn *, struct iscsi_login *);
-       int (*iscsit_put_login_tx)(struct iscsi_conn *, struct iscsi_login *, u32);
-       int (*iscsit_immediate_queue)(struct iscsi_conn *, struct iscsi_cmd *, int);
-       int (*iscsit_response_queue)(struct iscsi_conn *, struct iscsi_cmd *, int);
-       int (*iscsit_get_dataout)(struct iscsi_conn *, struct iscsi_cmd *, bool);
-       int (*iscsit_queue_data_in)(struct iscsi_conn *, struct iscsi_cmd *);
-       int (*iscsit_queue_status)(struct iscsi_conn *, struct iscsi_cmd *);
-       void (*iscsit_aborted_task)(struct iscsi_conn *, struct iscsi_cmd *);
-       int (*iscsit_xmit_pdu)(struct iscsi_conn *, struct iscsi_cmd *,
+       void (*iscsit_wait_conn)(struct iscsit_conn *);
+       void (*iscsit_free_conn)(struct iscsit_conn *);
+       int (*iscsit_get_login_rx)(struct iscsit_conn *, struct iscsi_login *);
+       int (*iscsit_put_login_tx)(struct iscsit_conn *, struct iscsi_login *, u32);
+       int (*iscsit_immediate_queue)(struct iscsit_conn *, struct iscsit_cmd *, int);
+       int (*iscsit_response_queue)(struct iscsit_conn *, struct iscsit_cmd *, int);
+       int (*iscsit_get_dataout)(struct iscsit_conn *, struct iscsit_cmd *, bool);
+       int (*iscsit_queue_data_in)(struct iscsit_conn *, struct iscsit_cmd *);
+       int (*iscsit_queue_status)(struct iscsit_conn *, struct iscsit_cmd *);
+       void (*iscsit_aborted_task)(struct iscsit_conn *, struct iscsit_cmd *);
+       int (*iscsit_xmit_pdu)(struct iscsit_conn *, struct iscsit_cmd *,
                               struct iscsi_datain_req *, const void *, u32);
-       void (*iscsit_unmap_cmd)(struct iscsi_conn *, struct iscsi_cmd *);
-       void (*iscsit_get_rx_pdu)(struct iscsi_conn *);
-       int (*iscsit_validate_params)(struct iscsi_conn *);
-       void (*iscsit_get_r2t_ttt)(struct iscsi_conn *, struct iscsi_cmd *,
+       void (*iscsit_unmap_cmd)(struct iscsit_conn *, struct iscsit_cmd *);
+       void (*iscsit_get_rx_pdu)(struct iscsit_conn *);
+       int (*iscsit_validate_params)(struct iscsit_conn *);
+       void (*iscsit_get_r2t_ttt)(struct iscsit_conn *, struct iscsit_cmd *,
                                   struct iscsi_r2t *);
-       enum target_prot_op (*iscsit_get_sup_prot_ops)(struct iscsi_conn *);
+       enum target_prot_op (*iscsit_get_sup_prot_ops)(struct iscsit_conn *);
 };
 
-static inline void *iscsit_priv_cmd(struct iscsi_cmd *cmd)
+static inline void *iscsit_priv_cmd(struct iscsit_cmd *cmd)
 {
        return (void *)(cmd + 1);
 }
@@ -51,100 +51,100 @@ extern void iscsit_put_transport(struct iscsit_transport *);
 /*
  * From iscsi_target.c
  */
-extern int iscsit_setup_scsi_cmd(struct iscsi_conn *, struct iscsi_cmd *,
+extern int iscsit_setup_scsi_cmd(struct iscsit_conn *, struct iscsit_cmd *,
                                unsigned char *);
-extern void iscsit_set_unsolicited_dataout(struct iscsi_cmd *);
-extern int iscsit_process_scsi_cmd(struct iscsi_conn *, struct iscsi_cmd *,
+extern void iscsit_set_unsolicited_dataout(struct iscsit_cmd *);
+extern int iscsit_process_scsi_cmd(struct iscsit_conn *, struct iscsit_cmd *,
                                struct iscsi_scsi_req *);
 extern int
-__iscsit_check_dataout_hdr(struct iscsi_conn *, void *,
-                          struct iscsi_cmd *, u32, bool *);
+__iscsit_check_dataout_hdr(struct iscsit_conn *, void *,
+                          struct iscsit_cmd *, u32, bool *);
 extern int
-iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf,
-                        struct iscsi_cmd **out_cmd);
-extern int iscsit_check_dataout_payload(struct iscsi_cmd *, struct iscsi_data *,
+iscsit_check_dataout_hdr(struct iscsit_conn *conn, void *buf,
+                        struct iscsit_cmd **out_cmd);
+extern int iscsit_check_dataout_payload(struct iscsit_cmd *, struct iscsi_data *,
                                bool);
-extern int iscsit_setup_nop_out(struct iscsi_conn *, struct iscsi_cmd *,
+extern int iscsit_setup_nop_out(struct iscsit_conn *, struct iscsit_cmd *,
                                struct iscsi_nopout *);
-extern int iscsit_process_nop_out(struct iscsi_conn *, struct iscsi_cmd *,
+extern int iscsit_process_nop_out(struct iscsit_conn *, struct iscsit_cmd *,
                                struct iscsi_nopout *);
-extern int iscsit_handle_logout_cmd(struct iscsi_conn *, struct iscsi_cmd *,
+extern int iscsit_handle_logout_cmd(struct iscsit_conn *, struct iscsit_cmd *,
                                unsigned char *);
-extern int iscsit_handle_task_mgt_cmd(struct iscsi_conn *, struct iscsi_cmd *,
+extern int iscsit_handle_task_mgt_cmd(struct iscsit_conn *, struct iscsit_cmd *,
                                unsigned char *);
-extern int iscsit_setup_text_cmd(struct iscsi_conn *, struct iscsi_cmd *,
+extern int iscsit_setup_text_cmd(struct iscsit_conn *, struct iscsit_cmd *,
                                 struct iscsi_text *);
-extern int iscsit_process_text_cmd(struct iscsi_conn *, struct iscsi_cmd *,
+extern int iscsit_process_text_cmd(struct iscsit_conn *, struct iscsit_cmd *,
                                   struct iscsi_text *);
-extern void iscsit_build_rsp_pdu(struct iscsi_cmd *, struct iscsi_conn *,
+extern void iscsit_build_rsp_pdu(struct iscsit_cmd *, struct iscsit_conn *,
                                bool, struct iscsi_scsi_rsp *);
-extern void iscsit_build_nopin_rsp(struct iscsi_cmd *, struct iscsi_conn *,
+extern void iscsit_build_nopin_rsp(struct iscsit_cmd *, struct iscsit_conn *,
                                struct iscsi_nopin *, bool);
-extern void iscsit_build_task_mgt_rsp(struct iscsi_cmd *, struct iscsi_conn *,
+extern void iscsit_build_task_mgt_rsp(struct iscsit_cmd *, struct iscsit_conn *,
                                struct iscsi_tm_rsp *);
-extern int iscsit_build_text_rsp(struct iscsi_cmd *, struct iscsi_conn *,
+extern int iscsit_build_text_rsp(struct iscsit_cmd *, struct iscsit_conn *,
                                struct iscsi_text_rsp *,
                                enum iscsit_transport_type);
-extern void iscsit_build_reject(struct iscsi_cmd *, struct iscsi_conn *,
+extern void iscsit_build_reject(struct iscsit_cmd *, struct iscsit_conn *,
                                struct iscsi_reject *);
-extern int iscsit_build_logout_rsp(struct iscsi_cmd *, struct iscsi_conn *,
+extern int iscsit_build_logout_rsp(struct iscsit_cmd *, struct iscsit_conn *,
                                struct iscsi_logout_rsp *);
-extern int iscsit_logout_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
-extern int iscsit_queue_rsp(struct iscsi_conn *, struct iscsi_cmd *);
-extern void iscsit_aborted_task(struct iscsi_conn *, struct iscsi_cmd *);
-extern int iscsit_add_reject(struct iscsi_conn *, u8, unsigned char *);
-extern int iscsit_reject_cmd(struct iscsi_cmd *, u8, unsigned char *);
-extern int iscsit_handle_snack(struct iscsi_conn *, unsigned char *);
-extern void iscsit_build_datain_pdu(struct iscsi_cmd *, struct iscsi_conn *,
+extern int iscsit_logout_post_handler(struct iscsit_cmd *, struct iscsit_conn *);
+extern int iscsit_queue_rsp(struct iscsit_conn *, struct iscsit_cmd *);
+extern void iscsit_aborted_task(struct iscsit_conn *, struct iscsit_cmd *);
+extern int iscsit_add_reject(struct iscsit_conn *, u8, unsigned char *);
+extern int iscsit_reject_cmd(struct iscsit_cmd *, u8, unsigned char *);
+extern int iscsit_handle_snack(struct iscsit_conn *, unsigned char *);
+extern void iscsit_build_datain_pdu(struct iscsit_cmd *, struct iscsit_conn *,
                                    struct iscsi_datain *,
                                    struct iscsi_data_rsp *, bool);
-extern int iscsit_build_r2ts_for_cmd(struct iscsi_conn *, struct iscsi_cmd *,
+extern int iscsit_build_r2ts_for_cmd(struct iscsit_conn *, struct iscsit_cmd *,
                                     bool);
-extern int iscsit_immediate_queue(struct iscsi_conn *, struct iscsi_cmd *, int);
-extern int iscsit_response_queue(struct iscsi_conn *, struct iscsi_cmd *, int);
+extern int iscsit_immediate_queue(struct iscsit_conn *, struct iscsit_cmd *, int);
+extern int iscsit_response_queue(struct iscsit_conn *, struct iscsit_cmd *, int);
 /*
  * From iscsi_target_device.c
  */
-extern void iscsit_increment_maxcmdsn(struct iscsi_cmd *, struct iscsi_session *);
+extern void iscsit_increment_maxcmdsn(struct iscsit_cmd *, struct iscsit_session *);
 /*
  * From iscsi_target_erl0.c
  */
-extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
+extern void iscsit_cause_connection_reinstatement(struct iscsit_conn *, int);
 /*
  * From iscsi_target_erl1.c
  */
-extern void iscsit_stop_dataout_timer(struct iscsi_cmd *);
+extern void iscsit_stop_dataout_timer(struct iscsit_cmd *);
 
 /*
  * From iscsi_target_tmr.c
  */
-extern int iscsit_tmr_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
+extern int iscsit_tmr_post_handler(struct iscsit_cmd *, struct iscsit_conn *);
 
 /*
  * From iscsi_target_util.c
  */
-extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, int);
-extern int iscsit_sequence_cmd(struct iscsi_conn *, struct iscsi_cmd *,
+extern struct iscsit_cmd *iscsit_allocate_cmd(struct iscsit_conn *, int);
+extern int iscsit_sequence_cmd(struct iscsit_conn *, struct iscsit_cmd *,
                               unsigned char *, __be32);
-extern void iscsit_release_cmd(struct iscsi_cmd *);
-extern void iscsit_free_cmd(struct iscsi_cmd *, bool);
-extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *,
-                                             struct iscsi_conn *, u8);
-extern struct iscsi_cmd *
-iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *conn,
+extern void iscsit_release_cmd(struct iscsit_cmd *);
+extern void iscsit_free_cmd(struct iscsit_cmd *, bool);
+extern void iscsit_add_cmd_to_immediate_queue(struct iscsit_cmd *,
+                                             struct iscsit_conn *, u8);
+extern struct iscsit_cmd *
+iscsit_find_cmd_from_itt_or_dump(struct iscsit_conn *conn,
                                 itt_t init_task_tag, u32 length);
 
 /*
  * From iscsi_target_nego.c
  */
-extern int iscsi_target_check_login_request(struct iscsi_conn *,
+extern int iscsi_target_check_login_request(struct iscsit_conn *,
                                            struct iscsi_login *);
 
 /*
  * From iscsi_target_login.c
  */
 extern __printf(2, 3) int iscsi_change_param_sprintf(
-       struct iscsi_conn *, const char *, ...);
+       struct iscsit_conn *, const char *, ...);
 
 /*
  * From iscsi_target_parameters.c
diff --git a/include/uapi/scsi/scsi_bsg_mpi3mr.h b/include/uapi/scsi/scsi_bsg_mpi3mr.h
new file mode 100644 (file)
index 0000000..fdc3517
--- /dev/null
@@ -0,0 +1,582 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later WITH Linux-syscall-note */
+/*
+ * Driver for Broadcom MPI3 Storage Controllers
+ *
+ * Copyright (C) 2017-2022 Broadcom Inc.
+ *  (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
+ *
+ */
+
+#ifndef SCSI_BSG_MPI3MR_H_INCLUDED
+#define SCSI_BSG_MPI3MR_H_INCLUDED
+
+#include <linux/types.h>
+
+/* Definitions for BSG commands */
+#define MPI3MR_IOCTL_VERSION                   0x06
+
+#define MPI3MR_APP_DEFAULT_TIMEOUT             (60) /*seconds*/
+
+#define MPI3MR_BSG_ADPTYPE_UNKNOWN             0
+#define MPI3MR_BSG_ADPTYPE_AVGFAMILY           1
+
+#define MPI3MR_BSG_ADPSTATE_UNKNOWN            0
+#define MPI3MR_BSG_ADPSTATE_OPERATIONAL                1
+#define MPI3MR_BSG_ADPSTATE_FAULT              2
+#define MPI3MR_BSG_ADPSTATE_IN_RESET           3
+#define MPI3MR_BSG_ADPSTATE_UNRECOVERABLE      4
+
+#define MPI3MR_BSG_ADPRESET_UNKNOWN            0
+#define MPI3MR_BSG_ADPRESET_SOFT               1
+#define MPI3MR_BSG_ADPRESET_DIAG_FAULT         2
+
+#define MPI3MR_BSG_LOGDATA_MAX_ENTRIES         400
+#define MPI3MR_BSG_LOGDATA_ENTRY_HEADER_SZ     4
+
+#define MPI3MR_DRVBSG_OPCODE_UNKNOWN           0
+#define MPI3MR_DRVBSG_OPCODE_ADPINFO           1
+#define MPI3MR_DRVBSG_OPCODE_ADPRESET          2
+#define MPI3MR_DRVBSG_OPCODE_ALLTGTDEVINFO     4
+#define MPI3MR_DRVBSG_OPCODE_GETCHGCNT         5
+#define MPI3MR_DRVBSG_OPCODE_LOGDATAENABLE     6
+#define MPI3MR_DRVBSG_OPCODE_PELENABLE         7
+#define MPI3MR_DRVBSG_OPCODE_GETLOGDATA                8
+#define MPI3MR_DRVBSG_OPCODE_QUERY_HDB         9
+#define MPI3MR_DRVBSG_OPCODE_REPOST_HDB                10
+#define MPI3MR_DRVBSG_OPCODE_UPLOAD_HDB                11
+#define MPI3MR_DRVBSG_OPCODE_REFRESH_HDB_TRIGGERS      12
+
+
+#define MPI3MR_BSG_BUFTYPE_UNKNOWN             0
+#define MPI3MR_BSG_BUFTYPE_RAIDMGMT_CMD                1
+#define MPI3MR_BSG_BUFTYPE_RAIDMGMT_RESP       2
+#define MPI3MR_BSG_BUFTYPE_DATA_IN             3
+#define MPI3MR_BSG_BUFTYPE_DATA_OUT            4
+#define MPI3MR_BSG_BUFTYPE_MPI_REPLY           5
+#define MPI3MR_BSG_BUFTYPE_ERR_RESPONSE                6
+#define MPI3MR_BSG_BUFTYPE_MPI_REQUEST         0xFE
+
+#define MPI3MR_BSG_MPI_REPLY_BUFTYPE_UNKNOWN   0
+#define MPI3MR_BSG_MPI_REPLY_BUFTYPE_STATUS    1
+#define MPI3MR_BSG_MPI_REPLY_BUFTYPE_ADDRESS   2
+
+#define MPI3MR_HDB_BUFTYPE_UNKNOWN             0
+#define MPI3MR_HDB_BUFTYPE_TRACE               1
+#define MPI3MR_HDB_BUFTYPE_FIRMWARE            2
+#define MPI3MR_HDB_BUFTYPE_RESERVED            3
+
+#define MPI3MR_HDB_BUFSTATUS_UNKNOWN           0
+#define MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED     1
+#define MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED   2
+#define MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED     3
+#define MPI3MR_HDB_BUFSTATUS_RELEASED          4
+
+#define MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN                0
+#define MPI3MR_HDB_TRIGGER_TYPE_DIAGFAULT      1
+#define MPI3MR_HDB_TRIGGER_TYPE_ELEMENT                2
+#define MPI3MR_HDB_TRIGGER_TYPE_MASTER         3
+
+
+/* Supported BSG commands */
+enum command {
+       MPI3MR_DRV_CMD = 1,
+       MPI3MR_MPT_CMD = 2,
+};
+
+/**
+ * struct mpi3_driver_info_layout - Information about driver
+ *
+ * @information_length: Length of this structure in bytes
+ * @driver_signature: Driver Vendor name
+ * @os_name: Operating System Name
+ * @driver_name: Driver name
+ * @driver_version: Driver version
+ * @driver_release_date: Driver release date
+ * @driver_capabilities: Driver capabilities
+ */
+struct mpi3_driver_info_layout {
+       __le32  information_length;
+       __u8    driver_signature[12];
+       __u8    os_name[16];
+       __u8    os_version[12];
+       __u8    driver_name[20];
+       __u8    driver_version[32];
+       __u8    driver_release_date[20];
+       __le32  driver_capabilities;
+};
+
+/**
+ * struct mpi3mr_bsg_in_adpinfo - Adapter information request
+ * data returned by the driver.
+ *
+ * @adp_type: Adapter type
+ * @rsvd1: Reserved
+ * @pci_dev_id: PCI device ID of the adapter
+ * @pci_dev_hw_rev: PCI revision of the adapter
+ * @pci_subsys_dev_id: PCI subsystem device ID of the adapter
+ * @pci_subsys_ven_id: PCI subsystem vendor ID of the adapter
+ * @pci_dev: PCI device
+ * @pci_func: PCI function
+ * @pci_bus: PCI bus
+ * @rsvd2: Reserved
+ * @pci_seg_id: PCI segment ID
+ * @app_intfc_ver: version of the application interface definition
+ * @rsvd3: Reserved
+ * @rsvd4: Reserved
+ * @rsvd5: Reserved
+ * @driver_info: Driver Information (Version/Name)
+ */
+struct mpi3mr_bsg_in_adpinfo {
+       __u32   adp_type;
+       __u32   rsvd1;
+       __u32   pci_dev_id;
+       __u32   pci_dev_hw_rev;
+       __u32   pci_subsys_dev_id;
+       __u32   pci_subsys_ven_id;
+       __u32   pci_dev:5;
+       __u32   pci_func:3;
+       __u32   pci_bus:8;
+       __u16   rsvd2;
+       __u32   pci_seg_id;
+       __u32   app_intfc_ver;
+       __u8    adp_state;
+       __u8    rsvd3;
+       __u16   rsvd4;
+       __u32   rsvd5[2];
+       struct mpi3_driver_info_layout driver_info;
+};
+
+/**
+ * struct mpi3mr_bsg_adp_reset - Adapter reset request
+ * payload data to the driver.
+ *
+ * @reset_type: Reset type
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ */
+struct mpi3mr_bsg_adp_reset {
+       __u8    reset_type;
+       __u8    rsvd1;
+       __u16   rsvd2;
+};
+
+/**
+ * struct mpi3mr_change_count - Topology change count
+ * returned by the driver.
+ *
+ * @change_count: Topology change count
+ * @rsvd: Reserved
+ */
+struct mpi3mr_change_count {
+       __u16   change_count;
+       __u16   rsvd;
+};
+
+/**
+ * struct mpi3mr_device_map_info - Target device mapping
+ * information
+ *
+ * @handle: Firmware device handle
+ * @perst_id: Persistent ID assigned by the firmware
+ * @target_id: Target ID assigned by the driver
+ * @bus_id: Bus ID assigned by the driver
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ */
+struct mpi3mr_device_map_info {
+       __u16   handle;
+       __u16   perst_id;
+       __u32   target_id;
+       __u8    bus_id;
+       __u8    rsvd1;
+       __u16   rsvd2;
+};
+
+/**
+ * struct mpi3mr_all_tgt_info - Target device mapping
+ * information returned by the driver
+ *
+ * @num_devices: The number of devices in driver's inventory
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ * @dmi: Variable length array of mapping information of targets
+ */
+struct mpi3mr_all_tgt_info {
+       __u16   num_devices;
+       __u16   rsvd1;
+       __u32   rsvd2;
+       struct mpi3mr_device_map_info dmi[1];
+};
+
+/**
+ * struct mpi3mr_logdata_enable - Number of log data
+ * entries saved by the driver returned as payload data for
+ * enable logdata BSG request by the driver.
+ *
+ * @max_entries: Number of log data entries cached by the driver
+ * @rsvd: Reserved
+ */
+struct mpi3mr_logdata_enable {
+       __u16   max_entries;
+       __u16   rsvd;
+};
+
+/**
+ * struct mpi3mr_bsg_out_pel_enable - PEL enable request payload
+ * data to the driver.
+ *
+ * @pel_locale: PEL locale to the firmware
+ * @pel_class: PEL class to the firmware
+ * @rsvd: Reserved
+ */
+struct mpi3mr_bsg_out_pel_enable {
+       __u16   pel_locale;
+       __u8    pel_class;
+       __u8    rsvd;
+};
+
+/**
+ * struct mpi3mr_logdata_entry - Log data entry cached by the
+ * driver.
+ *
+ * @valid_entry: Is the entry valid
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ * @data: Variable length Log entry data
+ */
+struct mpi3mr_logdata_entry {
+       __u8    valid_entry;
+       __u8    rsvd1;
+       __u16   rsvd2;
+       __u8    data[1]; /* Variable length Array */
+};
+
+/**
+ * struct mpi3mr_bsg_in_log_data - Log data entries saved by
+ * the driver returned as payload data for Get logdata request
+ * by the driver.
+ *
+ * @entry: Variable length Log data entry array
+ */
+struct mpi3mr_bsg_in_log_data {
+       struct mpi3mr_logdata_entry entry[1];
+};
+
+/**
+ * struct mpi3mr_hdb_entry - host diag buffer entry.
+ *
+ * @buf_type: Buffer type
+ * @status: Buffer status
+ * @trigger_type: Trigger type
+ * @rsvd1: Reserved
+ * @size: Buffer size
+ * @rsvd2: Reserved
+ * @trigger_data: Trigger specific data
+ * @rsvd3: Reserved
+ * @rsvd4: Reserved
+ */
+struct mpi3mr_hdb_entry {
+       __u8    buf_type;
+       __u8    status;
+       __u8    trigger_type;
+       __u8    rsvd1;
+       __u16   size;
+       __u16   rsvd2;
+       __u64   trigger_data;
+       __u32   rsvd3;
+       __u32   rsvd4;
+};
+
+
+/**
+ * struct mpi3mr_bsg_in_hdb_status - This structure contains
+ * return data for the BSG request to retrieve the number of host
+ * diagnostic buffers supported by the driver and their current
+ * status and additional status specific data if any in forms of
+ * multiple hdb entries.
+ *
+ * @num_hdb_types: Number of host diag buffer types supported
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ * @rsvd3: Reserved
+ * @entry: Variable length Diag buffer status entry array
+ */
+struct mpi3mr_bsg_in_hdb_status {
+       __u8    num_hdb_types;
+       __u8    rsvd1;
+       __u16   rsvd2;
+       __u32   rsvd3;
+       struct mpi3mr_hdb_entry entry[1];
+};
+
+/**
+ * struct mpi3mr_bsg_out_repost_hdb - Repost host diagnostic
+ * buffer request payload data to the driver.
+ *
+ * @buf_type: Buffer type
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ */
+struct mpi3mr_bsg_out_repost_hdb {
+       __u8    buf_type;
+       __u8    rsvd1;
+       __u16   rsvd2;
+};
+
+/**
+ * struct mpi3mr_bsg_out_upload_hdb - Upload host diagnostic
+ * buffer request payload data to the driver.
+ *
+ * @buf_type: Buffer type
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ * @start_offset: Start offset of the buffer from where to copy
+ * @length: Length of the buffer to copy
+ */
+struct mpi3mr_bsg_out_upload_hdb {
+       __u8    buf_type;
+       __u8    rsvd1;
+       __u16   rsvd2;
+       __u32   start_offset;
+       __u32   length;
+};
+
+/**
+ * struct mpi3mr_bsg_out_refresh_hdb_triggers - Refresh host
+ * diagnostic buffer triggers request payload data to the driver.
+ *
+ * @page_type: Page type
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ */
+struct mpi3mr_bsg_out_refresh_hdb_triggers {
+       __u8    page_type;
+       __u8    rsvd1;
+       __u16   rsvd2;
+};
+/**
+ * struct mpi3mr_bsg_drv_cmd -  Generic bsg data
+ * structure for all driver specific requests.
+ *
+ * @mrioc_id: Controller ID
+ * @opcode: Driver specific opcode
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ */
+struct mpi3mr_bsg_drv_cmd {
+       __u8    mrioc_id;
+       __u8    opcode;
+       __u16   rsvd1;
+       __u32   rsvd2[4];
+};
+/**
+ * struct mpi3mr_bsg_in_reply_buf - MPI reply buffer returned
+ * for MPI Passthrough request .
+ *
+ * @mpi_reply_type: Type of MPI reply
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ * @reply_buf: Variable Length buffer based on mpirep type
+ */
+struct mpi3mr_bsg_in_reply_buf {
+       __u8    mpi_reply_type;
+       __u8    rsvd1;
+       __u16   rsvd2;
+       __u8    reply_buf[1];
+};
+
+/**
+ * struct mpi3mr_buf_entry - User buffer descriptor for MPI
+ * Passthrough requests.
+ *
+ * @buf_type: Buffer type
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ * @buf_len: Buffer length
+ */
+struct mpi3mr_buf_entry {
+       __u8    buf_type;
+       __u8    rsvd1;
+       __u16   rsvd2;
+       __u32   buf_len;
+};
+/**
+ * struct mpi3mr_bsg_buf_entry_list - list of user buffer
+ * descriptor for MPI Passthrough requests.
+ *
+ * @num_of_entries: Number of buffer descriptors
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ * @rsvd3: Reserved
+ * @buf_entry: Variable length array of buffer descriptors
+ */
+struct mpi3mr_buf_entry_list {
+       __u8    num_of_entries;
+       __u8    rsvd1;
+       __u16   rsvd2;
+       __u32   rsvd3;
+       struct mpi3mr_buf_entry buf_entry[1];
+};
+/**
+ * struct mpi3mr_bsg_mptcmd -  Generic bsg data
+ * structure for all MPI Passthrough requests .
+ *
+ * @mrioc_id: Controller ID
+ * @rsvd1: Reserved
+ * @timeout: MPI request timeout
+ * @buf_entry_list: Buffer descriptor list
+ */
+struct mpi3mr_bsg_mptcmd {
+       __u8    mrioc_id;
+       __u8    rsvd1;
+       __u16   timeout;
+       __u32   rsvd2;
+       struct mpi3mr_buf_entry_list buf_entry_list;
+};
+
+/**
+ * struct mpi3mr_bsg_packet -  Generic bsg data
+ * structure for all supported requests .
+ *
+ * @cmd_type: represents drvrcmd or mptcmd
+ * @rsvd1: Reserved
+ * @rsvd2: Reserved
+ * @drvrcmd: driver request structure
+ * @mptcmd: mpt request structure
+ */
+struct mpi3mr_bsg_packet {
+       __u8    cmd_type;
+       __u8    rsvd1;
+       __u16   rsvd2;
+       __u32   rsvd3;
+       union {
+               struct mpi3mr_bsg_drv_cmd drvrcmd;
+               struct mpi3mr_bsg_mptcmd mptcmd;
+       } cmd;
+};
+
+
+/* MPI3: NVMe Encasulation related definitions */
+#ifndef MPI3_NVME_ENCAP_CMD_MAX
+#define MPI3_NVME_ENCAP_CMD_MAX               (1)
+#endif
+
+struct mpi3_nvme_encapsulated_request {
+       __le16  host_tag;
+       __u8    ioc_use_only02;
+       __u8    function;
+       __le16  ioc_use_only04;
+       __u8    ioc_use_only06;
+       __u8    msg_flags;
+       __le16  change_count;
+       __le16  dev_handle;
+       __le16  encapsulated_command_length;
+       __le16  flags;
+       __le32  data_length;
+       __le32  reserved14[3];
+       __le32  command[MPI3_NVME_ENCAP_CMD_MAX];
+};
+
+struct mpi3_nvme_encapsulated_error_reply {
+       __le16  host_tag;
+       __u8    ioc_use_only02;
+       __u8    function;
+       __le16  ioc_use_only04;
+       __u8    ioc_use_only06;
+       __u8    msg_flags;
+       __le16  ioc_use_only08;
+       __le16  ioc_status;
+       __le32  ioc_log_info;
+       __le32  nvme_completion_entry[4];
+};
+
+#define        MPI3MR_NVME_PRP_SIZE            8 /* PRP size */
+#define        MPI3MR_NVME_CMD_PRP1_OFFSET     24 /* PRP1 offset in NVMe cmd */
+#define        MPI3MR_NVME_CMD_PRP2_OFFSET     32 /* PRP2 offset in NVMe cmd */
+#define        MPI3MR_NVME_CMD_SGL_OFFSET      24 /* SGL offset in NVMe cmd */
+#define MPI3MR_NVME_DATA_FORMAT_PRP    0
+#define MPI3MR_NVME_DATA_FORMAT_SGL1   1
+#define MPI3MR_NVME_DATA_FORMAT_SGL2   2
+
+/* MPI3: task management related definitions */
+struct mpi3_scsi_task_mgmt_request {
+       __le16  host_tag;
+       __u8    ioc_use_only02;
+       __u8    function;
+       __le16  ioc_use_only04;
+       __u8    ioc_use_only06;
+       __u8    msg_flags;
+       __le16  change_count;
+       __le16  dev_handle;
+       __le16  task_host_tag;
+       __u8    task_type;
+       __u8    reserved0f;
+       __le16  task_request_queue_id;
+       __le16  reserved12;
+       __le32  reserved14;
+       __u8    lun[8];
+};
+
+#define MPI3_SCSITASKMGMT_MSGFLAGS_DO_NOT_SEND_TASK_IU      (0x08)
+#define MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK               (0x01)
+#define MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK_SET           (0x02)
+#define MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET             (0x03)
+#define MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET       (0x05)
+#define MPI3_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET           (0x06)
+#define MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK               (0x07)
+#define MPI3_SCSITASKMGMT_TASKTYPE_CLEAR_ACA                (0x08)
+#define MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK_SET           (0x09)
+#define MPI3_SCSITASKMGMT_TASKTYPE_QUERY_ASYNC_EVENT        (0x0a)
+#define MPI3_SCSITASKMGMT_TASKTYPE_I_T_NEXUS_RESET          (0x0b)
+struct mpi3_scsi_task_mgmt_reply {
+       __le16  host_tag;
+       __u8    ioc_use_only02;
+       __u8    function;
+       __le16  ioc_use_only04;
+       __u8    ioc_use_only06;
+       __u8    msg_flags;
+       __le16  ioc_use_only08;
+       __le16  ioc_status;
+       __le32  ioc_log_info;
+       __le32  termination_count;
+       __le32  response_data;
+       __le32  reserved18;
+};
+
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE                (0x00)
+#define MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME              (0x02)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED  (0x04)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED                  (0x05)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED               (0x08)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN             (0x09)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG          (0x0a)
+#define MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC           (0x80)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED             (0x81)
+
+/* MPI3: PEL related definitions */
+#define MPI3_PEL_LOCALE_FLAGS_NON_BLOCKING_BOOT_EVENT   (0x0200)
+#define MPI3_PEL_LOCALE_FLAGS_BLOCKING_BOOT_EVENT       (0x0100)
+#define MPI3_PEL_LOCALE_FLAGS_PCIE                      (0x0080)
+#define MPI3_PEL_LOCALE_FLAGS_CONFIGURATION             (0x0040)
+#define MPI3_PEL_LOCALE_FLAGS_CONTROLER                 (0x0020)
+#define MPI3_PEL_LOCALE_FLAGS_SAS                       (0x0010)
+#define MPI3_PEL_LOCALE_FLAGS_EPACK                     (0x0008)
+#define MPI3_PEL_LOCALE_FLAGS_ENCLOSURE                 (0x0004)
+#define MPI3_PEL_LOCALE_FLAGS_PD                        (0x0002)
+#define MPI3_PEL_LOCALE_FLAGS_VD                        (0x0001)
+#define MPI3_PEL_CLASS_DEBUG                            (0x00)
+#define MPI3_PEL_CLASS_PROGRESS                         (0x01)
+#define MPI3_PEL_CLASS_INFORMATIONAL                    (0x02)
+#define MPI3_PEL_CLASS_WARNING                          (0x03)
+#define MPI3_PEL_CLASS_CRITICAL                         (0x04)
+#define MPI3_PEL_CLASS_FATAL                            (0x05)
+#define MPI3_PEL_CLASS_FAULT                            (0x06)
+
+/* MPI3: Function definitions */
+#define MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH              (0x0a)
+#define MPI3_BSG_FUNCTION_SCSI_IO                       (0x20)
+#define MPI3_BSG_FUNCTION_SCSI_TASK_MGMT                (0x21)
+#define MPI3_BSG_FUNCTION_SMP_PASSTHROUGH               (0x22)
+#define MPI3_BSG_FUNCTION_NVME_ENCAPSULATED             (0x24)
+
+#endif