Merge remote-tracking branch 'mkp-scsi/fixes' into fixes
authorJames Bottomley <James.Bottomley@HansenPartnership.com>
Tue, 7 Mar 2017 23:13:02 +0000 (15:13 -0800)
committerJames Bottomley <James.Bottomley@HansenPartnership.com>
Tue, 7 Mar 2017 23:13:02 +0000 (15:13 -0800)
1  2 
drivers/scsi/libiscsi.c
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/scsi_lib.c

diff --combined drivers/scsi/libiscsi.c
@@@ -26,7 -26,6 +26,7 @@@
  #include <linux/delay.h>
  #include <linux/log2.h>
  #include <linux/slab.h>
 +#include <linux/sched/signal.h>
  #include <linux/module.h>
  #include <asm/unaligned.h>
  #include <net/tcp.h>
@@@ -561,8 -560,12 +561,12 @@@ static void iscsi_complete_task(struct 
        WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
        task->state = state;
  
-       if (!list_empty(&task->running))
+       spin_lock_bh(&conn->taskqueuelock);
+       if (!list_empty(&task->running)) {
+               pr_debug_once("%s while task on list", __func__);
                list_del_init(&task->running);
+       }
+       spin_unlock_bh(&conn->taskqueuelock);
  
        if (conn->task == task)
                conn->task = NULL;
@@@ -784,7 -787,9 +788,9 @@@ __iscsi_conn_send_pdu(struct iscsi_con
                if (session->tt->xmit_task(task))
                        goto free_task;
        } else {
+               spin_lock_bh(&conn->taskqueuelock);
                list_add_tail(&task->running, &conn->mgmtqueue);
+               spin_unlock_bh(&conn->taskqueuelock);
                iscsi_conn_queue_work(conn);
        }
  
@@@ -1475,8 -1480,10 +1481,10 @@@ void iscsi_requeue_task(struct iscsi_ta
         * this may be on the requeue list already if the xmit_task callout
         * is handling the r2ts while we are adding new ones
         */
+       spin_lock_bh(&conn->taskqueuelock);
        if (list_empty(&task->running))
                list_add_tail(&task->running, &conn->requeue);
+       spin_unlock_bh(&conn->taskqueuelock);
        iscsi_conn_queue_work(conn);
  }
  EXPORT_SYMBOL_GPL(iscsi_requeue_task);
@@@ -1513,22 -1520,26 +1521,26 @@@ static int iscsi_data_xmit(struct iscsi
         * only have one nop-out as a ping from us and targets should not
         * overflow us with nop-ins
         */
+       spin_lock_bh(&conn->taskqueuelock);
  check_mgmt:
        while (!list_empty(&conn->mgmtqueue)) {
                conn->task = list_entry(conn->mgmtqueue.next,
                                         struct iscsi_task, running);
                list_del_init(&conn->task->running);
+               spin_unlock_bh(&conn->taskqueuelock);
                if (iscsi_prep_mgmt_task(conn, conn->task)) {
                        /* regular RX path uses back_lock */
                        spin_lock_bh(&conn->session->back_lock);
                        __iscsi_put_task(conn->task);
                        spin_unlock_bh(&conn->session->back_lock);
                        conn->task = NULL;
+                       spin_lock_bh(&conn->taskqueuelock);
                        continue;
                }
                rc = iscsi_xmit_task(conn);
                if (rc)
                        goto done;
+               spin_lock_bh(&conn->taskqueuelock);
        }
  
        /* process pending command queue */
                conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task,
                                        running);
                list_del_init(&conn->task->running);
+               spin_unlock_bh(&conn->taskqueuelock);
                if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
                        fail_scsi_task(conn->task, DID_IMM_RETRY);
+                       spin_lock_bh(&conn->taskqueuelock);
                        continue;
                }
                rc = iscsi_prep_scsi_cmd_pdu(conn->task);
                if (rc) {
                        if (rc == -ENOMEM || rc == -EACCES) {
+                               spin_lock_bh(&conn->taskqueuelock);
                                list_add_tail(&conn->task->running,
                                              &conn->cmdqueue);
                                conn->task = NULL;
+                               spin_unlock_bh(&conn->taskqueuelock);
                                goto done;
                        } else
                                fail_scsi_task(conn->task, DID_ABORT);
+                       spin_lock_bh(&conn->taskqueuelock);
                        continue;
                }
                rc = iscsi_xmit_task(conn);
                 * we need to check the mgmt queue for nops that need to
                 * be sent to aviod starvation
                 */
+               spin_lock_bh(&conn->taskqueuelock);
                if (!list_empty(&conn->mgmtqueue))
                        goto check_mgmt;
        }
                conn->task = task;
                list_del_init(&conn->task->running);
                conn->task->state = ISCSI_TASK_RUNNING;
+               spin_unlock_bh(&conn->taskqueuelock);
                rc = iscsi_xmit_task(conn);
                if (rc)
                        goto done;
+               spin_lock_bh(&conn->taskqueuelock);
                if (!list_empty(&conn->mgmtqueue))
                        goto check_mgmt;
        }
+       spin_unlock_bh(&conn->taskqueuelock);
        spin_unlock_bh(&conn->session->frwd_lock);
        return -ENODATA;
  
@@@ -1739,7 -1759,9 +1760,9 @@@ int iscsi_queuecommand(struct Scsi_Hos
                        goto prepd_reject;
                }
        } else {
+               spin_lock_bh(&conn->taskqueuelock);
                list_add_tail(&task->running, &conn->cmdqueue);
+               spin_unlock_bh(&conn->taskqueuelock);
                iscsi_conn_queue_work(conn);
        }
  
@@@ -2897,6 -2919,7 +2920,7 @@@ iscsi_conn_setup(struct iscsi_cls_sessi
        INIT_LIST_HEAD(&conn->mgmtqueue);
        INIT_LIST_HEAD(&conn->cmdqueue);
        INIT_LIST_HEAD(&conn->requeue);
+       spin_lock_init(&conn->taskqueuelock);
        INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
  
        /* allocate login_task used for the login/text sequences */
@@@ -3010,6 -3010,12 +3010,12 @@@ MODULE_PARM_DESC(lpfc_poll, "FCP ring p
  static DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,
                   lpfc_poll_show, lpfc_poll_store);
  
+ int lpfc_no_hba_reset_cnt;
+ unsigned long lpfc_no_hba_reset[MAX_HBAS_NO_RESET] = {
+       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ module_param_array(lpfc_no_hba_reset, ulong, &lpfc_no_hba_reset_cnt, 0444);
+ MODULE_PARM_DESC(lpfc_no_hba_reset, "WWPN of HBAs that should not be reset");
  LPFC_ATTR(sli_mode, 0, 0, 3,
        "SLI mode selector:"
        " 0 - auto (SLI-3 if supported),"
@@@ -3631,7 -3637,7 +3637,7 @@@ static DEVICE_ATTR(lpfc_static_vport, S
   * @buf: Data buffer.
   * @count: Size of the data buffer.
   *
 - * This function get called when an user write to the lpfc_stat_data_ctrl
 + * This function get called when a user write to the lpfc_stat_data_ctrl
   * sysfs file. This function parse the command written to the sysfs file
   * and take appropriate action. These commands are used for controlling
   * driver statistical data collection.
@@@ -4451,7 -4457,8 +4457,8 @@@ lpfc_fcp_imax_store(struct device *dev
                return -EINVAL;
  
        phba->cfg_fcp_imax = (uint32_t)val;
-       for (i = 0; i < phba->io_channel_irqs; i++)
+       for (i = 0; i < phba->io_channel_irqs; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
                lpfc_modify_hba_eq_delay(phba, i);
  
        return strlen(buf);
@@@ -1,3 -1,4 +1,4 @@@
  /*******************************************************************
   * This file is part of the Emulex Linux Device Driver for         *
   * Fibre Channel Host Bus Adapters.                                *
@@@ -952,7 -953,7 +953,7 @@@ __lpfc_sli_get_els_sglq(struct lpfc_hb
        start_sglq = sglq;
        while (!found) {
                if (!sglq)
-                       return NULL;
+                       break;
                if (ndlp && ndlp->active_rrqs_xri_bitmap &&
                    test_bit(sglq->sli4_lxritag,
                    ndlp->active_rrqs_xri_bitmap)) {
@@@ -4673,7 -4674,7 +4674,7 @@@ lpfc_sli4_rb_setup(struct lpfc_hba *phb
   * @phba: Pointer to HBA context object.
   * @sli_mode: sli mode - 2/3
   *
 - * This function is called by the sli intialization code path
 + * This function is called by the sli initialization code path
   * to issue config_port mailbox command. This function restarts the
   * HBA firmware and issues a config_port mailbox command to configure
   * the SLI interface in the sli mode specified by sli_mode
@@@ -4813,11 -4814,11 +4814,11 @@@ do_prep_failed
  
  
  /**
 - * lpfc_sli_hba_setup - SLI intialization function
 + * lpfc_sli_hba_setup - SLI initialization function
   * @phba: Pointer to HBA context object.
   *
 - * This function is the main SLI intialization function. This function
 - * is called by the HBA intialization code, HBA reset code and HBA
 + * This function is the main SLI initialization function. This function
 + * is called by the HBA initialization code, HBA reset code and HBA
   * error attention handler code. Caller is not required to hold any
   * locks. This function issues config_port mailbox command to configure
   * the SLI, setup iocb rings and HBQ rings. In the end the function
@@@ -6507,11 -6508,11 +6508,11 @@@ lpfc_set_host_data(struct lpfc_hba *phb
  }
  
  /**
 - * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
 + * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
   * @phba: Pointer to HBA context object.
   *
 - * This function is the main SLI4 device intialization PCI function. This
 - * function is called by the HBA intialization code, HBA reset code and
 + * This function is the main SLI4 device initialization PCI function. This
 + * function is called by the HBA initialization code, HBA reset code and
   * HBA error attention handler code. Caller is not required to hold any
   * locks.
   **/
@@@ -12213,6 -12214,41 +12214,41 @@@ void lpfc_sli4_fcp_xri_abort_event_proc
  }
  
  /**
+  * lpfc_sli4_nvme_xri_abort_event_proc - Process nvme xri abort event
+  * @phba: pointer to lpfc hba data structure.
+  *
+  * This routine is invoked by the worker thread to process all the pending
+  * SLI4 NVME abort XRI events.
+  **/
+ void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba)
+ {
+       struct lpfc_cq_event *cq_event;
+       /* First, declare the fcp xri abort event has been handled */
+       spin_lock_irq(&phba->hbalock);
+       phba->hba_flag &= ~NVME_XRI_ABORT_EVENT;
+       spin_unlock_irq(&phba->hbalock);
+       /* Now, handle all the fcp xri abort events */
+       while (!list_empty(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue)) {
+               /* Get the first event from the head of the event queue */
+               spin_lock_irq(&phba->hbalock);
+               list_remove_head(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
+                                cq_event, struct lpfc_cq_event, list);
+               spin_unlock_irq(&phba->hbalock);
+               /* Notify aborted XRI for NVME work queue */
+               if (phba->nvmet_support) {
+                       lpfc_sli4_nvmet_xri_aborted(phba,
+                                                   &cq_event->cqe.wcqe_axri);
+               } else {
+                       lpfc_sli4_nvme_xri_aborted(phba,
+                                                  &cq_event->cqe.wcqe_axri);
+               }
+               /* Free the event processed back to the free pool */
+               lpfc_sli4_cq_event_release(phba, cq_event);
+       }
+ }
+ /**
   * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
   * @phba: pointer to lpfc hba data structure.
   *
@@@ -12639,7 -12675,7 +12675,7 @@@ lpfc_sli4_sp_handle_els_wcqe(struct lpf
   * @phba: Pointer to HBA context object.
   * @wcqe: Pointer to work-queue completion queue entry.
   *
 - * This routine handles slow-path WQ entry comsumed event by invoking the
 + * This routine handles slow-path WQ entry consumed event by invoking the
   * proper WQ release routine to the slow-path WQ.
   **/
  static void
@@@ -12709,10 -12745,22 +12745,22 @@@ lpfc_sli4_sp_handle_abort_xri_wcqe(stru
                spin_unlock_irqrestore(&phba->hbalock, iflags);
                workposted = true;
                break;
+       case LPFC_NVME:
+               spin_lock_irqsave(&phba->hbalock, iflags);
+               list_add_tail(&cq_event->list,
+                             &phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
+               /* Set the nvme xri abort event flag */
+               phba->hba_flag |= NVME_XRI_ABORT_EVENT;
+               spin_unlock_irqrestore(&phba->hbalock, iflags);
+               workposted = true;
+               break;
        default:
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-                               "0603 Invalid work queue CQE subtype (x%x)\n",
-                               cq->subtype);
+                               "0603 Invalid CQ subtype %d: "
+                               "%08x %08x %08x %08x\n",
+                               cq->subtype, wcqe->word0, wcqe->parameter,
+                               wcqe->word2, wcqe->word3);
+               lpfc_sli4_cq_event_release(phba, cq_event);
                workposted = false;
                break;
        }
@@@ -13036,7 -13084,7 +13084,7 @@@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpf
   * @cq: Pointer to completion queue.
   * @wcqe: Pointer to work-queue completion queue entry.
   *
 - * This routine handles an fast-path WQ entry comsumed event by invoking the
 + * This routine handles an fast-path WQ entry consumed event by invoking the
   * proper WQ release routine to the slow-path WQ.
   **/
  static void
@@@ -13827,6 -13875,8 +13875,8 @@@ lpfc_dual_chute_pci_bar_map(struct lpfc
   * @startq: The starting FCP EQ to modify
   *
   * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
+  * The command allows up to LPFC_MAX_EQ_DELAY_EQID_CNT EQ ID's to be
+  * updated in one mailbox command.
   *
   * The @phba struct is used to send mailbox command to HBA. The @startq
   * is used to get the starting FCP EQ to change.
@@@ -13879,7 -13929,7 +13929,7 @@@ lpfc_modify_hba_eq_delay(struct lpfc_hb
                eq_delay->u.request.eq[cnt].phase = 0;
                eq_delay->u.request.eq[cnt].delay_multi = dmult;
                cnt++;
-               if (cnt >= LPFC_MAX_EQ_DELAY)
+               if (cnt >= LPFC_MAX_EQ_DELAY_EQID_CNT)
                        break;
        }
        eq_delay->u.request.num_eq = cnt;
@@@ -15185,17 -15235,17 +15235,17 @@@ lpfc_mrq_create(struct lpfc_hba *phba, 
                drq = drqp[idx];
                cq  = cqp[idx];
  
-               if (hrq->entry_count != drq->entry_count) {
-                       status = -EINVAL;
-                       goto out;
-               }
                /* sanity check on queue memory */
                if (!hrq || !drq || !cq) {
                        status = -ENODEV;
                        goto out;
                }
  
+               if (hrq->entry_count != drq->entry_count) {
+                       status = -EINVAL;
+                       goto out;
+               }
                if (idx == 0) {
                        bf_set(lpfc_mbx_rq_create_num_pages,
                               &rq_create->u.request,
diff --combined drivers/scsi/scsi_lib.c
@@@ -1141,7 -1141,7 +1141,7 @@@ void scsi_init_command(struct scsi_devi
  
        /* zero out the cmd, except for the embedded scsi_request */
        memset((char *)cmd + sizeof(cmd->req), 0,
 -              sizeof(*cmd) - sizeof(cmd->req));
 +              sizeof(*cmd) - sizeof(cmd->req) + dev->host->hostt->cmd_size);
  
        cmd->device = dev;
        cmd->sense_buffer = buf;
@@@ -2932,6 -2932,8 +2932,8 @@@ EXPORT_SYMBOL(scsi_target_resume)
  /**
   * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
   * @sdev:     device to block
+  * @wait:     Whether or not to wait until ongoing .queuecommand() /
+  *            .queue_rq() calls have finished.
   *
   * Block request made by scsi lld's to temporarily stop all
   * scsi commands on the specified device. May sleep.
   * remove the rport mutex lock and unlock calls from srp_queuecommand().
   */
  int
- scsi_internal_device_block(struct scsi_device *sdev)
+ scsi_internal_device_block(struct scsi_device *sdev, bool wait)
  {
        struct request_queue *q = sdev->request_queue;
        unsigned long flags;
         * request queue. 
         */
        if (q->mq_ops) {
-               blk_mq_quiesce_queue(q);
+               if (wait)
+                       blk_mq_quiesce_queue(q);
+               else
+                       blk_mq_stop_hw_queues(q);
        } else {
                spin_lock_irqsave(q->queue_lock, flags);
                blk_stop_queue(q);
                spin_unlock_irqrestore(q->queue_lock, flags);
-               scsi_wait_for_queuecommand(sdev);
+               if (wait)
+                       scsi_wait_for_queuecommand(sdev);
        }
  
        return 0;
@@@ -3036,7 -3042,7 +3042,7 @@@ EXPORT_SYMBOL_GPL(scsi_internal_device_
  static void
  device_block(struct scsi_device *sdev, void *data)
  {
-       scsi_internal_device_block(sdev);
+       scsi_internal_device_block(sdev, true);
  }
  
  static int