Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 29 Apr 2021 00:22:10 +0000 (17:22 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 29 Apr 2021 00:22:10 +0000 (17:22 -0700)
Pull SCSI updates from James Bottomley:
 "This consists of the usual driver updates (ufs, target, tcmu,
  smartpqi, lpfc, zfcp, qla2xxx, mpt3sas, pm80xx).

  The major core change is using a sbitmap instead of an atomic for
  queue tracking"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (412 commits)
  scsi: target: tcm_fc: Fix a kernel-doc header
  scsi: target: Shorten ALUA error messages
  scsi: target: Fix two format specifiers
  scsi: target: Compare explicitly with SAM_STAT_GOOD
  scsi: sd: Introduce a new local variable in sd_check_events()
  scsi: dc395x: Open-code status_byte(u8) calls
  scsi: 53c700: Open-code status_byte(u8) calls
  scsi: smartpqi: Remove unused functions
  scsi: qla4xxx: Remove an unused function
  scsi: myrs: Remove unused functions
  scsi: myrb: Remove unused functions
  scsi: mpt3sas: Fix two kernel-doc headers
  scsi: fcoe: Suppress a compiler warning
  scsi: libfc: Fix a format specifier
  scsi: aacraid: Remove an unused function
  scsi: core: Introduce enum scsi_disposition
  scsi: core: Modify the scsi_send_eh_cmnd() return value for the SDEV_BLOCK case
  scsi: core: Rename scsi_softirq_done() into scsi_complete()
  scsi: core: Remove an incorrect comment
  scsi: core: Make the scsi_alloc_sgtables() documentation more accurate
  ...

19 files changed:
1  2 
block/blk-mq-sched.c
block/blk-mq.c
drivers/ata/libata-eh.c
drivers/s390/scsi/zfcp_qdio.c
drivers/scsi/BusLogic.c
drivers/scsi/advansys.c
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/ibmvscsi/ibmvscsi.c
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
drivers/scsi/libsas/sas_ata.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/sg.c
drivers/scsi/smartpqi/smartpqi_init.c
drivers/target/target_core_pscsi.c
include/linux/hyperv.h
include/scsi/scsi_cmnd.h
include/scsi/scsi_host.h

diff --combined block/blk-mq-sched.c
@@@ -75,8 -75,7 +75,8 @@@ void blk_mq_sched_restart(struct blk_mq
        blk_mq_run_hw_queue(hctx, true);
  }
  
 -static int sched_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
 +static int sched_rq_cmp(void *priv, const struct list_head *a,
 +                      const struct list_head *b)
  {
        struct request *rqa = container_of(a, struct request, queuelist);
        struct request *rqb = container_of(b, struct request, queuelist);
@@@ -132,6 -131,7 +132,7 @@@ static int __blk_mq_do_dispatch_sched(s
  
        do {
                struct request *rq;
+               int budget_token;
  
                if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
                        break;
                        break;
                }
  
-               if (!blk_mq_get_dispatch_budget(q))
+               budget_token = blk_mq_get_dispatch_budget(q);
+               if (budget_token < 0)
                        break;
  
                rq = e->type->ops.dispatch_request(hctx);
                if (!rq) {
-                       blk_mq_put_dispatch_budget(q);
+                       blk_mq_put_dispatch_budget(q, budget_token);
                        /*
                         * We're releasing without dispatching. Holding the
                         * budget could have blocked any "hctx"s with the
                        break;
                }
  
+               blk_mq_set_rq_budget_token(rq, budget_token);
                /*
                 * Now this rq owns the budget which has to be released
                 * if this rq won't be queued to driver via .queue_rq()
@@@ -231,6 -234,8 +235,8 @@@ static int blk_mq_do_dispatch_ctx(struc
        struct request *rq;
  
        do {
+               int budget_token;
                if (!list_empty_careful(&hctx->dispatch)) {
                        ret = -EAGAIN;
                        break;
                if (!sbitmap_any_bit_set(&hctx->ctx_map))
                        break;
  
-               if (!blk_mq_get_dispatch_budget(q))
+               budget_token = blk_mq_get_dispatch_budget(q);
+               if (budget_token < 0)
                        break;
  
                rq = blk_mq_dequeue_from_ctx(hctx, ctx);
                if (!rq) {
-                       blk_mq_put_dispatch_budget(q);
+                       blk_mq_put_dispatch_budget(q, budget_token);
                        /*
                         * We're releasing without dispatching. Holding the
                         * budget could have blocked any "hctx"s with the
                        break;
                }
  
+               blk_mq_set_rq_budget_token(rq, budget_token);
                /*
                 * Now this rq owns the budget which has to be released
                 * if this rq won't be queued to driver via .queue_rq()
@@@ -386,6 -394,7 +395,6 @@@ bool blk_mq_sched_try_insert_merge(stru
  EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
  
  static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
 -                                     bool has_sched,
                                       struct request *rq)
  {
        /*
        if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq))
                return true;
  
 -      if (has_sched)
 -              rq->rq_flags |= RQF_SORTED;
 -
        return false;
  }
  
@@@ -415,7 -427,7 +424,7 @@@ void blk_mq_sched_insert_request(struc
  
        WARN_ON(e && (rq->tag != BLK_MQ_NO_TAG));
  
 -      if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) {
 +      if (blk_mq_sched_bypass_insert(hctx, rq)) {
                /*
                 * Firstly normal IO request is inserted to scheduler queue or
                 * sw queue, meantime we add flush request to dispatch queue(
diff --combined block/blk-mq.c
@@@ -361,12 -361,11 +361,12 @@@ static struct request *__blk_mq_alloc_r
  
        if (e) {
                /*
 -               * Flush requests are special and go directly to the
 +               * Flush/passthrough requests are special and go directly to the
                 * dispatch list. Don't include reserved tags in the
                 * limiting, as it isn't useful.
                 */
                if (!op_is_flush(data->cmd_flags) &&
 +                  !blk_op_is_passthrough(data->cmd_flags) &&
                    e->type->ops.limit_depth &&
                    !(data->flags & BLK_MQ_REQ_RESERVED))
                        e->type->ops.limit_depth(data->cmd_flags, data);
@@@ -1278,10 -1277,15 +1278,15 @@@ static enum prep_dispatch blk_mq_prep_d
                                                  bool need_budget)
  {
        struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
+       int budget_token = -1;
  
-       if (need_budget && !blk_mq_get_dispatch_budget(rq->q)) {
-               blk_mq_put_driver_tag(rq);
-               return PREP_DISPATCH_NO_BUDGET;
+       if (need_budget) {
+               budget_token = blk_mq_get_dispatch_budget(rq->q);
+               if (budget_token < 0) {
+                       blk_mq_put_driver_tag(rq);
+                       return PREP_DISPATCH_NO_BUDGET;
+               }
+               blk_mq_set_rq_budget_token(rq, budget_token);
        }
  
        if (!blk_mq_get_driver_tag(rq)) {
                         * together during handling partial dispatch
                         */
                        if (need_budget)
-                               blk_mq_put_dispatch_budget(rq->q);
+                               blk_mq_put_dispatch_budget(rq->q, budget_token);
                        return PREP_DISPATCH_NO_TAG;
                }
        }
  
  /* release all allocated budgets before calling to blk_mq_dispatch_rq_list */
  static void blk_mq_release_budgets(struct request_queue *q,
-               unsigned int nr_budgets)
+               struct list_head *list)
  {
-       int i;
+       struct request *rq;
  
-       for (i = 0; i < nr_budgets; i++)
-               blk_mq_put_dispatch_budget(q);
+       list_for_each_entry(rq, list, queuelist) {
+               int budget_token = blk_mq_get_rq_budget_token(rq);
+               if (budget_token >= 0)
+                       blk_mq_put_dispatch_budget(q, budget_token);
+       }
  }
  
  /*
@@@ -1411,7 -1419,8 +1420,8 @@@ out
                        (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED);
                bool no_budget_avail = prep == PREP_DISPATCH_NO_BUDGET;
  
-               blk_mq_release_budgets(q, nr_budgets);
+               if (nr_budgets)
+                       blk_mq_release_budgets(q, list);
  
                spin_lock(&hctx->lock);
                list_splice_tail_init(list, &hctx->dispatch);
@@@ -1896,8 -1905,7 +1906,8 @@@ void blk_mq_insert_requests(struct blk_
        spin_unlock(&ctx->lock);
  }
  
 -static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
 +static int plug_rq_cmp(void *priv, const struct list_head *a,
 +                     const struct list_head *b)
  {
        struct request *rqa = container_of(a, struct request, queuelist);
        struct request *rqb = container_of(b, struct request, queuelist);
@@@ -2011,6 -2019,7 +2021,7 @@@ static blk_status_t __blk_mq_try_issue_
  {
        struct request_queue *q = rq->q;
        bool run_queue = true;
+       int budget_token;
  
        /*
         * RCU or SRCU read lock is needed before checking quiesced flag.
        if (q->elevator && !bypass_insert)
                goto insert;
  
-       if (!blk_mq_get_dispatch_budget(q))
+       budget_token = blk_mq_get_dispatch_budget(q);
+       if (budget_token < 0)
                goto insert;
  
+       blk_mq_set_rq_budget_token(rq, budget_token);
        if (!blk_mq_get_driver_tag(rq)) {
-               blk_mq_put_dispatch_budget(q);
+               blk_mq_put_dispatch_budget(q, budget_token);
                goto insert;
        }
  
@@@ -2704,7 -2716,7 +2718,7 @@@ blk_mq_alloc_hctx(struct request_queue 
                goto free_cpumask;
  
        if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
-                               gfp, node))
+                               gfp, node, false, false))
                goto free_ctxs;
        hctx->nr_ctx = 0;
  
diff --combined drivers/ata/libata-eh.c
@@@ -1599,7 -1599,7 +1599,7 @@@ static unsigned int ata_eh_analyze_tf(s
        }
  
        if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
-               int ret = scsi_check_sense(qc->scsicmd);
+               enum scsi_disposition ret = scsi_check_sense(qc->scsicmd);
                /*
                 * SUCCESS here means that the sense code could be
                 * evaluated and should be passed to the upper layers
@@@ -2613,7 -2613,6 +2613,7 @@@ int ata_eh_reset(struct ata_link *link
                        switch (tmp) {
                        case -EAGAIN:
                                rc = -EAGAIN;
 +                              break;
                        case 0:
                                break;
                        default:
@@@ -20,6 -20,9 +20,9 @@@ static bool enable_multibuffer = true
  module_param_named(datarouter, enable_multibuffer, bool, 0400);
  MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)");
  
+ #define ZFCP_QDIO_REQUEST_RESCAN_MSECS        (MSEC_PER_SEC * 10)
+ #define ZFCP_QDIO_REQUEST_SCAN_MSECS  MSEC_PER_SEC
  static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *dbftag,
                                    unsigned int qdio_err)
  {
@@@ -70,15 -73,41 +73,41 @@@ static void zfcp_qdio_int_req(struct cc
                zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
                return;
        }
+ }
  
-       /* cleanup all SBALs being program-owned now */
-       zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
+ static void zfcp_qdio_request_tasklet(struct tasklet_struct *tasklet)
+ {
+       struct zfcp_qdio *qdio = from_tasklet(qdio, tasklet, request_tasklet);
+       struct ccw_device *cdev = qdio->adapter->ccw_device;
+       unsigned int start, error;
+       int completed;
  
-       spin_lock_irq(&qdio->stat_lock);
-       zfcp_qdio_account(qdio);
-       spin_unlock_irq(&qdio->stat_lock);
-       atomic_add(count, &qdio->req_q_free);
-       wake_up(&qdio->req_q_wq);
+       completed = qdio_inspect_queue(cdev, 0, false, &start, &error);
+       if (completed > 0) {
+               if (error) {
+                       zfcp_qdio_handler_error(qdio, "qdreqt1", error);
+               } else {
+                       /* cleanup all SBALs being program-owned now */
+                       zfcp_qdio_zero_sbals(qdio->req_q, start, completed);
+                       spin_lock_irq(&qdio->stat_lock);
+                       zfcp_qdio_account(qdio);
+                       spin_unlock_irq(&qdio->stat_lock);
+                       atomic_add(completed, &qdio->req_q_free);
+                       wake_up(&qdio->req_q_wq);
+               }
+       }
+       if (atomic_read(&qdio->req_q_free) < QDIO_MAX_BUFFERS_PER_Q)
+               timer_reduce(&qdio->request_timer,
+                            jiffies + msecs_to_jiffies(ZFCP_QDIO_REQUEST_RESCAN_MSECS));
+ }
+ static void zfcp_qdio_request_timer(struct timer_list *timer)
+ {
+       struct zfcp_qdio *qdio = from_timer(qdio, timer, request_timer);
+       tasklet_schedule(&qdio->request_tasklet);
  }
  
  static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
        /*
         * put SBALs back to response queue
         */
 -      if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count))
 +      if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count, NULL))
                zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
  }
  
@@@ -139,8 -168,11 +168,11 @@@ static void zfcp_qdio_irq_tasklet(struc
        unsigned int start, error;
        int completed;
  
-       /* Check the Response Queue, and kick off the Request Queue tasklet: */
-       completed = qdio_get_next_buffers(cdev, 0, &start, &error);
+       if (atomic_read(&qdio->req_q_free) < QDIO_MAX_BUFFERS_PER_Q)
+               tasklet_schedule(&qdio->request_tasklet);
+       /* Check the Response Queue: */
+       completed = qdio_inspect_queue(cdev, 0, true, &start, &error);
        if (completed < 0)
                return;
        if (completed > 0)
@@@ -286,7 -318,7 +318,7 @@@ int zfcp_qdio_send(struct zfcp_qdio *qd
  
        /*
         * This should actually be a spin_lock_bh(stat_lock), to protect against
-        * zfcp_qdio_int_req() in tasklet context.
+        * Request Queue completion processing in tasklet context.
         * But we can't do so (and are safe), as we always get called with IRQs
         * disabled by spin_lock_irq[save](req_q_lock).
         */
        atomic_sub(sbal_number, &qdio->req_q_free);
  
        retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0,
 -                       q_req->sbal_first, sbal_number);
 +                       q_req->sbal_first, sbal_number, NULL);
  
        if (unlikely(retval)) {
                /* Failed to submit the IO, roll back our modifications. */
                return retval;
        }
  
+       if (atomic_read(&qdio->req_q_free) <= 2 * ZFCP_QDIO_MAX_SBALS_PER_REQ)
+               tasklet_schedule(&qdio->request_tasklet);
+       else
+               timer_reduce(&qdio->request_timer,
+                            jiffies + msecs_to_jiffies(ZFCP_QDIO_REQUEST_SCAN_MSECS));
        /* account for transferred buffers */
        qdio->req_q_idx += sbal_number;
        qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q;
@@@ -368,6 -406,8 +406,8 @@@ void zfcp_qdio_close(struct zfcp_qdio *
        wake_up(&qdio->req_q_wq);
  
        tasklet_disable(&qdio->irq_tasklet);
+       tasklet_disable(&qdio->request_tasklet);
+       del_timer_sync(&qdio->request_timer);
        qdio_stop_irq(adapter->ccw_device);
        qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
  
@@@ -428,8 -468,6 +468,6 @@@ int zfcp_qdio_open(struct zfcp_qdio *qd
        init_data.int_parm = (unsigned long) qdio;
        init_data.input_sbal_addr_array = input_sbals;
        init_data.output_sbal_addr_array = output_sbals;
-       init_data.scan_threshold =
-               QDIO_MAX_BUFFERS_PER_Q - ZFCP_QDIO_MAX_SBALS_PER_REQ * 2;
  
        if (qdio_establish(cdev, &init_data))
                goto failed_establish;
                sbale->addr = 0;
        }
  
 -      if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
 +      if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q,
 +                  NULL))
                goto failed_qdio;
  
        /* set index of first available SBALS / number of available SBALS */
        atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
        atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
  
+       /* Enable processing for Request Queue completions: */
+       tasklet_enable(&qdio->request_tasklet);
        /* Enable processing for QDIO interrupts: */
        tasklet_enable(&qdio->irq_tasklet);
        /* This results in a qdio_start_irq(): */
@@@ -495,6 -534,7 +535,7 @@@ void zfcp_qdio_destroy(struct zfcp_qdi
                return;
  
        tasklet_kill(&qdio->irq_tasklet);
+       tasklet_kill(&qdio->request_tasklet);
  
        if (qdio->adapter->ccw_device)
                qdio_free(qdio->adapter->ccw_device);
@@@ -521,8 -561,11 +562,11 @@@ int zfcp_qdio_setup(struct zfcp_adapte
  
        spin_lock_init(&qdio->req_q_lock);
        spin_lock_init(&qdio->stat_lock);
+       timer_setup(&qdio->request_timer, zfcp_qdio_request_timer, 0);
        tasklet_setup(&qdio->irq_tasklet, zfcp_qdio_irq_tasklet);
+       tasklet_setup(&qdio->request_tasklet, zfcp_qdio_request_tasklet);
        tasklet_disable(&qdio->irq_tasklet);
+       tasklet_disable(&qdio->request_tasklet);
  
        adapter->qdio = qdio;
        return 0;
diff --combined drivers/scsi/BusLogic.c
@@@ -562,6 -562,60 +562,6 @@@ done
  
  
  /*
 -  blogic_add_probeaddr_isa appends a single ISA I/O Address to the list
 -  of I/O Address and Bus Probe Information to be checked for potential BusLogic
 -  Host Adapters.
 -*/
 -
 -static void __init blogic_add_probeaddr_isa(unsigned long io_addr)
 -{
 -      struct blogic_probeinfo *probeinfo;
 -      if (blogic_probeinfo_count >= BLOGIC_MAX_ADAPTERS)
 -              return;
 -      probeinfo = &blogic_probeinfo_list[blogic_probeinfo_count++];
 -      probeinfo->adapter_type = BLOGIC_MULTIMASTER;
 -      probeinfo->adapter_bus_type = BLOGIC_ISA_BUS;
 -      probeinfo->io_addr = io_addr;
 -      probeinfo->pci_device = NULL;
 -}
 -
 -
 -/*
 -  blogic_init_probeinfo_isa initializes the list of I/O Address and
 -  Bus Probe Information to be checked for potential BusLogic SCSI Host Adapters
 -  only from the list of standard BusLogic MultiMaster ISA I/O Addresses.
 -*/
 -
 -static void __init blogic_init_probeinfo_isa(struct blogic_adapter *adapter)
 -{
 -      /*
 -         If BusLogic Driver Options specifications requested that ISA
 -         Bus Probes be inhibited, do not proceed further.
 -       */
 -      if (blogic_probe_options.noprobe_isa)
 -              return;
 -      /*
 -         Append the list of standard BusLogic MultiMaster ISA I/O Addresses.
 -       */
 -      if (!blogic_probe_options.limited_isa || blogic_probe_options.probe330)
 -              blogic_add_probeaddr_isa(0x330);
 -      if (!blogic_probe_options.limited_isa || blogic_probe_options.probe334)
 -              blogic_add_probeaddr_isa(0x334);
 -      if (!blogic_probe_options.limited_isa || blogic_probe_options.probe230)
 -              blogic_add_probeaddr_isa(0x230);
 -      if (!blogic_probe_options.limited_isa || blogic_probe_options.probe234)
 -              blogic_add_probeaddr_isa(0x234);
 -      if (!blogic_probe_options.limited_isa || blogic_probe_options.probe130)
 -              blogic_add_probeaddr_isa(0x130);
 -      if (!blogic_probe_options.limited_isa || blogic_probe_options.probe134)
 -              blogic_add_probeaddr_isa(0x134);
 -}
 -
 -
 -#ifdef CONFIG_PCI
 -
 -
 -/*
    blogic_sort_probeinfo sorts a section of blogic_probeinfo_list in order
    of increasing PCI Bus and Device Number.
  */
@@@ -613,11 -667,14 +613,11 @@@ static int __init blogic_init_mm_probei
        int nonpr_mmcount = 0, mmcount = 0;
        bool force_scan_order = false;
        bool force_scan_order_checked = false;
 -      bool addr_seen[6];
        struct pci_dev *pci_device = NULL;
        int i;
        if (blogic_probeinfo_count >= BLOGIC_MAX_ADAPTERS)
                return 0;
        blogic_probeinfo_count++;
 -      for (i = 0; i < 6; i++)
 -              addr_seen[i] = false;
        /*
           Iterate over the MultiMaster PCI Host Adapters.  For each
           enumerated host adapter, determine whether its ISA Compatible
                host_adapter->io_addr = io_addr;
                blogic_intreset(host_adapter);
                if (blogic_cmd(host_adapter, BLOGIC_INQ_PCI_INFO, NULL, 0,
 -                              &adapter_info, sizeof(adapter_info)) ==
 -                              sizeof(adapter_info)) {
 -                      if (adapter_info.isa_port < 6)
 -                              addr_seen[adapter_info.isa_port] = true;
 -              } else
 +                              &adapter_info, sizeof(adapter_info)) !=
 +                              sizeof(adapter_info))
                        adapter_info.isa_port = BLOGIC_IO_DISABLE;
                /*
                   Issue the Modify I/O Address command to disable the
                blogic_sort_probeinfo(&blogic_probeinfo_list[nonpr_mmindex],
                                        nonpr_mmcount);
        /*
 -         If no PCI MultiMaster Host Adapter is assigned the Primary
 -         I/O Address, then the Primary I/O Address must be probed
 -         explicitly before any PCI host adapters are probed.
 -       */
 -      if (!blogic_probe_options.noprobe_isa)
 -              if (pr_probeinfo->io_addr == 0 &&
 -                              (!blogic_probe_options.limited_isa ||
 -                               blogic_probe_options.probe330)) {
 -                      pr_probeinfo->adapter_type = BLOGIC_MULTIMASTER;
 -                      pr_probeinfo->adapter_bus_type = BLOGIC_ISA_BUS;
 -                      pr_probeinfo->io_addr = 0x330;
 -              }
 -      /*
 -         Append the list of standard BusLogic MultiMaster ISA I/O Addresses,
 -         omitting the Primary I/O Address which has already been handled.
 -       */
 -      if (!blogic_probe_options.noprobe_isa) {
 -              if (!addr_seen[1] &&
 -                              (!blogic_probe_options.limited_isa ||
 -                               blogic_probe_options.probe334))
 -                      blogic_add_probeaddr_isa(0x334);
 -              if (!addr_seen[2] &&
 -                              (!blogic_probe_options.limited_isa ||
 -                               blogic_probe_options.probe230))
 -                      blogic_add_probeaddr_isa(0x230);
 -              if (!addr_seen[3] &&
 -                              (!blogic_probe_options.limited_isa ||
 -                               blogic_probe_options.probe234))
 -                      blogic_add_probeaddr_isa(0x234);
 -              if (!addr_seen[4] &&
 -                              (!blogic_probe_options.limited_isa ||
 -                               blogic_probe_options.probe130))
 -                      blogic_add_probeaddr_isa(0x130);
 -              if (!addr_seen[5] &&
 -                              (!blogic_probe_options.limited_isa ||
 -                               blogic_probe_options.probe134))
 -                      blogic_add_probeaddr_isa(0x134);
 -      }
 -      /*
           Iterate over the older non-compliant MultiMaster PCI Host Adapters,
           noting the PCI bus location and assigned IRQ Channel.
         */
@@@ -979,10 -1078,18 +979,10 @@@ static void __init blogic_init_probeinf
                                }
                        }
                }
 -      } else {
 -              blogic_init_probeinfo_isa(adapter);
        }
  }
  
  
 -#else
 -#define blogic_init_probeinfo_list(adapter) \
 -              blogic_init_probeinfo_isa(adapter)
 -#endif                                /* CONFIG_PCI */
 -
 -
  /*
    blogic_failure prints a standardized error message, and then returns false.
  */
@@@ -1432,6 -1539,14 +1432,6 @@@ static bool __init blogic_rdconfig(stru
                else if (config.irq_ch15)
                        adapter->irq_ch = 15;
        }
 -      if (adapter->adapter_bus_type == BLOGIC_ISA_BUS) {
 -              if (config.dma_ch5)
 -                      adapter->dma_ch = 5;
 -              else if (config.dma_ch6)
 -                      adapter->dma_ch = 6;
 -              else if (config.dma_ch7)
 -                      adapter->dma_ch = 7;
 -      }
        /*
           Determine whether Extended Translation is enabled and save it in
           the Host Adapter structure.
        if (adapter->fw_ver[0] == '5')
                adapter->adapter_qdepth = 192;
        else if (adapter->fw_ver[0] == '4')
 -              adapter->adapter_qdepth = (adapter->adapter_bus_type !=
 -                                              BLOGIC_ISA_BUS ? 100 : 50);
 +              adapter->adapter_qdepth = 100;
        else
                adapter->adapter_qdepth = 30;
        if (strcmp(adapter->fw_ver, "3.31") >= 0) {
         */
        adapter->bios_addr = ext_setupinfo.bios_addr << 12;
        /*
 -         ISA Host Adapters require Bounce Buffers if there is more than
 -         16MB memory.
 -       */
 -      if (adapter->adapter_bus_type == BLOGIC_ISA_BUS &&
 -                      (void *) high_memory > (void *) MAX_DMA_ADDRESS)
 -              adapter->need_bouncebuf = true;
 -      /*
           BusLogic BT-445S Host Adapters prior to board revision E have a
           hardware bug whereby when the BIOS is enabled, transfers to/from
           the same address range the BIOS occupies modulo 16MB are handled
           incorrectly.  Only properly functioning BT-445S Host Adapters
 -         have firmware version 3.37, so require that ISA Bounce Buffers
 -         be used for the buggy BT-445S models if there is more than 16MB
 -         memory.
 +         have firmware version 3.37.
         */
 -      if (adapter->bios_addr > 0 && strcmp(adapter->model, "BT-445S") == 0 &&
 -                      strcmp(adapter->fw_ver, "3.37") < 0 &&
 -                      (void *) high_memory > (void *) MAX_DMA_ADDRESS)
 -              adapter->need_bouncebuf = true;
 +      if (adapter->bios_addr > 0 &&
 +          strcmp(adapter->model, "BT-445S") == 0 &&
 +          strcmp(adapter->fw_ver, "3.37") < 0)
 +              return blogic_failure(adapter, "Too old firmware");
        /*
           Initialize parameters common to MultiMaster and FlashPoint
           Host Adapters.
@@@ -1644,9 -1769,14 +1644,9 @@@ common
                if (adapter->drvr_opts != NULL &&
                                adapter->drvr_opts->qdepth[tgt_id] > 0)
                        qdepth = adapter->drvr_opts->qdepth[tgt_id];
 -              else if (adapter->need_bouncebuf)
 -                      qdepth = BLOGIC_TAG_DEPTH_BB;
                adapter->qdepth[tgt_id] = qdepth;
        }
 -      if (adapter->need_bouncebuf)
 -              adapter->untag_qdepth = BLOGIC_UNTAG_DEPTH_BB;
 -      else
 -              adapter->untag_qdepth = BLOGIC_UNTAG_DEPTH;
 +      adapter->untag_qdepth = BLOGIC_UNTAG_DEPTH;
        if (adapter->drvr_opts != NULL)
                adapter->common_qdepth = adapter->drvr_opts->common_qdepth;
        if (adapter->common_qdepth > 0 &&
@@@ -1709,7 -1839,11 +1709,7 @@@ static bool __init blogic_reportconfig(
        blogic_info("Configuring BusLogic Model %s %s%s%s%s SCSI Host Adapter\n", adapter, adapter->model, blogic_adapter_busnames[adapter->adapter_bus_type], (adapter->wide ? " Wide" : ""), (adapter->differential ? " Differential" : ""), (adapter->ultra ? " Ultra" : ""));
        blogic_info("  Firmware Version: %s, I/O Address: 0x%lX, IRQ Channel: %d/%s\n", adapter, adapter->fw_ver, adapter->io_addr, adapter->irq_ch, (adapter->level_int ? "Level" : "Edge"));
        if (adapter->adapter_bus_type != BLOGIC_PCI_BUS) {
 -              blogic_info("  DMA Channel: ", adapter);
 -              if (adapter->dma_ch > 0)
 -                      blogic_info("%d, ", adapter, adapter->dma_ch);
 -              else
 -                      blogic_info("None, ", adapter);
 +              blogic_info("  DMA Channel: None, ", adapter);
                if (adapter->bios_addr > 0)
                        blogic_info("BIOS Address: 0x%lX, ", adapter,
                                        adapter->bios_addr);
@@@ -1862,6 -1996,18 +1862,6 @@@ static bool __init blogic_getres(struc
        }
        adapter->irq_acquired = true;
        /*
 -         Acquire exclusive access to the DMA Channel.
 -       */
 -      if (adapter->dma_ch > 0) {
 -              if (request_dma(adapter->dma_ch, adapter->full_model) < 0) {
 -                      blogic_err("UNABLE TO ACQUIRE DMA CHANNEL %d - DETACHING\n", adapter, adapter->dma_ch);
 -                      return false;
 -              }
 -              set_dma_mode(adapter->dma_ch, DMA_MODE_CASCADE);
 -              enable_dma(adapter->dma_ch);
 -              adapter->dma_chan_acquired = true;
 -      }
 -      /*
           Indicate the System Resource Acquisition completed successfully,
         */
        return true;
@@@ -1881,6 -2027,11 +1881,6 @@@ static void blogic_relres(struct blogic
        if (adapter->irq_acquired)
                free_irq(adapter->irq_ch, adapter);
        /*
 -         Release exclusive access to the DMA Channel.
 -       */
 -      if (adapter->dma_chan_acquired)
 -              free_dma(adapter->dma_ch);
 -      /*
           Release any allocated memory structs not released elsewhere
         */
        if (adapter->mbox_space)
@@@ -2148,6 -2299,7 +2148,6 @@@ static void __init blogic_inithoststruc
        host->this_id = adapter->scsi_id;
        host->can_queue = adapter->drvr_qdepth;
        host->sg_tablesize = adapter->drvr_sglimit;
 -      host->unchecked_isa_dma = adapter->need_bouncebuf;
        host->cmd_per_lun = adapter->untag_qdepth;
  }
  
@@@ -3426,7 -3578,7 +3426,7 @@@ Target  Requested Completed  Requested C
  /*
    blogic_msg prints Driver Messages.
  */
+ __printf(2, 4)
  static void blogic_msg(enum blogic_msglevel msglevel, char *fmt,
                        struct blogic_adapter *adapter, ...)
  {
@@@ -3514,7 -3666,37 +3514,7 @@@ static int __init blogic_parseopts(cha
  
                memset(drvr_opts, 0, sizeof(struct blogic_drvr_options));
                while (*options != '\0' && *options != ';') {
 -                      /* Probing Options. */
 -                      if (blogic_parse(&options, "IO:")) {
 -                              unsigned long io_addr = simple_strtoul(options,
 -                                                              &options, 0);
 -                              blogic_probe_options.limited_isa = true;
 -                              switch (io_addr) {
 -                              case 0x330:
 -                                      blogic_probe_options.probe330 = true;
 -                                      break;
 -                              case 0x334:
 -                                      blogic_probe_options.probe334 = true;
 -                                      break;
 -                              case 0x230:
 -                                      blogic_probe_options.probe230 = true;
 -                                      break;
 -                              case 0x234:
 -                                      blogic_probe_options.probe234 = true;
 -                                      break;
 -                              case 0x130:
 -                                      blogic_probe_options.probe130 = true;
 -                                      break;
 -                              case 0x134:
 -                                      blogic_probe_options.probe134 = true;
 -                                      break;
 -                              default:
 -                                      blogic_err("BusLogic: Invalid Driver Options (invalid I/O Address 0x%lX)\n", NULL, io_addr);
 -                                      return 0;
 -                              }
 -                      } else if (blogic_parse(&options, "NoProbeISA"))
 -                              blogic_probe_options.noprobe_isa = true;
 -                      else if (blogic_parse(&options, "NoProbePCI"))
 +                      if (blogic_parse(&options, "NoProbePCI"))
                                blogic_probe_options.noprobe_pci = true;
                        else if (blogic_parse(&options, "NoProbe"))
                                blogic_probe_options.noprobe = true;
@@@ -3669,6 -3851,7 +3669,6 @@@ static struct scsi_host_template blogic
  #if 0
        .eh_abort_handler = blogic_abort,
  #endif
 -      .unchecked_isa_dma = 1,
        .max_sectors = 128,
  };
  
diff --combined drivers/scsi/advansys.c
@@@ -84,6 -84,8 +84,6 @@@ typedef unsigned char uchar
  
  #define ASC_CS_TYPE  unsigned short
  
 -#define ASC_IS_ISA          (0x0001)
 -#define ASC_IS_ISAPNP       (0x0081)
  #define ASC_IS_EISA         (0x0002)
  #define ASC_IS_PCI          (0x0004)
  #define ASC_IS_PCI_ULTRA    (0x0104)
  #define ASC_CHIP_MIN_VER_PCI     (0x09)
  #define ASC_CHIP_MAX_VER_PCI     (0x0F)
  #define ASC_CHIP_VER_PCI_BIT     (0x08)
 -#define ASC_CHIP_MIN_VER_ISA     (0x11)
 -#define ASC_CHIP_MIN_VER_ISA_PNP (0x21)
 -#define ASC_CHIP_MAX_VER_ISA     (0x27)
 -#define ASC_CHIP_VER_ISA_BIT     (0x30)
 -#define ASC_CHIP_VER_ISAPNP_BIT  (0x20)
  #define ASC_CHIP_VER_ASYN_BUG    (0x21)
  #define ASC_CHIP_VER_PCI             0x08
  #define ASC_CHIP_VER_PCI_ULTRA_3150  (ASC_CHIP_VER_PCI | 0x02)
  #define ASC_CHIP_LATEST_VER_EISA   ((ASC_CHIP_MIN_VER_EISA - 1) + 3)
  #define ASC_MAX_VL_DMA_COUNT    (0x07FFFFFFL)
  #define ASC_MAX_PCI_DMA_COUNT   (0xFFFFFFFFL)
 -#define ASC_MAX_ISA_DMA_COUNT   (0x00FFFFFFL)
  
  #define ASC_SCSI_ID_BITS  3
  #define ASC_SCSI_TIX_TYPE     uchar
  #define ASC_FLAG_SRB_LINEAR_ADDR  0x08
  #define ASC_FLAG_WIN16            0x10
  #define ASC_FLAG_WIN32            0x20
 -#define ASC_FLAG_ISA_OVER_16MB    0x40
  #define ASC_FLAG_DOS_VM_CALLBACK  0x80
  #define ASC_TAG_FLAG_EXTRA_BYTES               0x10
  #define ASC_TAG_FLAG_DISABLE_DISCONNECT        0x04
@@@ -455,6 -464,8 +455,6 @@@ typedef struct asc_dvc_cfg 
        ASC_SCSI_BIT_ID_TYPE disc_enable;
        ASC_SCSI_BIT_ID_TYPE sdtr_enable;
        uchar chip_scsi_id;
 -      uchar isa_dma_speed;
 -      uchar isa_dma_channel;
        uchar chip_version;
        ushort mcode_date;
        ushort mcode_version;
@@@ -561,8 -572,10 +561,8 @@@ typedef struct asc_cap_info_array 
  #define ASC_EEP_MAX_RETRY        20
  
  /*
 - * These macros keep the chip SCSI id and ISA DMA speed
 - * bitfields in board order. C bitfields aren't portable
 - * between big and little-endian platforms so they are
 - * not used.
 + * These macros keep the chip SCSI id  bitfields in board order. C bitfields
 + * aren't portable between big and little-endian platforms so they are not used.
   */
  
  #define ASC_EEP_GET_CHIP_ID(cfg)    ((cfg)->id_speed & 0x0f)
@@@ -1799,7 -1812,7 +1799,7 @@@ typedef struct adv_req 
   * Field naming convention:
   *
   *  *_able indicates both whether a feature should be enabled or disabled
-  *  and whether a device isi capable of the feature. At initialization
+  *  and whether a device is capable of the feature. At initialization
   *  this field may be set, but later if a device is found to be incapable
   *  of the feature, the field is cleared.
   */
@@@ -2327,8 -2340,9 +2327,8 @@@ static void asc_prt_asc_dvc_cfg(ASC_DVC
        printk(" disc_enable 0x%x, sdtr_enable 0x%x,\n",
               h->disc_enable, h->sdtr_enable);
  
 -      printk(" chip_scsi_id %d, isa_dma_speed %d, isa_dma_channel %d, "
 -              "chip_version %d,\n", h->chip_scsi_id, h->isa_dma_speed,
 -              h->isa_dma_channel, h->chip_version);
 +      printk(" chip_scsi_id %d, chip_version %d,\n",
 +             h->chip_scsi_id, h->chip_version);
  
        printk(" mcode_date 0x%x, mcode_version %d\n",
                h->mcode_date, h->mcode_version);
@@@ -2401,8 -2415,8 +2401,8 @@@ static void asc_prt_scsi_host(struct Sc
        printk(" dma_channel %d, this_id %d, can_queue %d,\n",
               s->dma_channel, s->this_id, s->can_queue);
  
 -      printk(" cmd_per_lun %d, sg_tablesize %d, unchecked_isa_dma %d\n",
 -             s->cmd_per_lun, s->sg_tablesize, s->unchecked_isa_dma);
 +      printk(" cmd_per_lun %d, sg_tablesize %d\n",
 +             s->cmd_per_lun, s->sg_tablesize);
  
        if (ASC_NARROW_BOARD(boardp)) {
                asc_prt_asc_dvc_var(&boardp->dvc_var.asc_dvc_var);
@@@ -2618,28 -2632,42 +2618,28 @@@ static const char *advansys_info(struc
        if (ASC_NARROW_BOARD(boardp)) {
                asc_dvc_varp = &boardp->dvc_var.asc_dvc_var;
                ASC_DBG(1, "begin\n");
 -              if (asc_dvc_varp->bus_type & ASC_IS_ISA) {
 -                      if ((asc_dvc_varp->bus_type & ASC_IS_ISAPNP) ==
 -                          ASC_IS_ISAPNP) {
 -                              busname = "ISA PnP";
 +
 +              if (asc_dvc_varp->bus_type & ASC_IS_VL) {
 +                      busname = "VL";
 +              } else if (asc_dvc_varp->bus_type & ASC_IS_EISA) {
 +                      busname = "EISA";
 +              } else if (asc_dvc_varp->bus_type & ASC_IS_PCI) {
 +                      if ((asc_dvc_varp->bus_type & ASC_IS_PCI_ULTRA)
 +                          == ASC_IS_PCI_ULTRA) {
 +                              busname = "PCI Ultra";
                        } else {
 -                              busname = "ISA";
 +                              busname = "PCI";
                        }
 -                      sprintf(info,
 -                              "AdvanSys SCSI %s: %s: IO 0x%lX-0x%lX, IRQ 0x%X, DMA 0x%X",
 -                              ASC_VERSION, busname,
 -                              (ulong)shost->io_port,
 -                              (ulong)shost->io_port + ASC_IOADR_GAP - 1,
 -                              boardp->irq, shost->dma_channel);
                } else {
 -                      if (asc_dvc_varp->bus_type & ASC_IS_VL) {
 -                              busname = "VL";
 -                      } else if (asc_dvc_varp->bus_type & ASC_IS_EISA) {
 -                              busname = "EISA";
 -                      } else if (asc_dvc_varp->bus_type & ASC_IS_PCI) {
 -                              if ((asc_dvc_varp->bus_type & ASC_IS_PCI_ULTRA)
 -                                  == ASC_IS_PCI_ULTRA) {
 -                                      busname = "PCI Ultra";
 -                              } else {
 -                                      busname = "PCI";
 -                              }
 -                      } else {
 -                              busname = "?";
 -                              shost_printk(KERN_ERR, shost, "unknown bus "
 -                                      "type %d\n", asc_dvc_varp->bus_type);
 -                      }
 -                      sprintf(info,
 -                              "AdvanSys SCSI %s: %s: IO 0x%lX-0x%lX, IRQ 0x%X",
 -                              ASC_VERSION, busname, (ulong)shost->io_port,
 -                              (ulong)shost->io_port + ASC_IOADR_GAP - 1,
 -                              boardp->irq);
 +                      busname = "?";
 +                      shost_printk(KERN_ERR, shost, "unknown bus "
 +                              "type %d\n", asc_dvc_varp->bus_type);
                }
 +              sprintf(info,
 +                      "AdvanSys SCSI %s: %s: IO 0x%lX-0x%lX, IRQ 0x%X",
 +                      ASC_VERSION, busname, (ulong)shost->io_port,
 +                      (ulong)shost->io_port + ASC_IOADR_GAP - 1,
 +                      boardp->irq);
        } else {
                /*
                 * Wide Adapter Information
@@@ -2845,7 -2873,12 +2845,7 @@@ static void asc_prt_asc_board_eeprom(st
        ASCEEP_CONFIG *ep;
        int i;
        uchar serialstr[13];
 -#ifdef CONFIG_ISA
 -      ASC_DVC_VAR *asc_dvc_varp;
 -      int isa_dma_speed[] = { 10, 8, 7, 6, 5, 4, 3, 2 };
  
 -      asc_dvc_varp = &boardp->dvc_var.asc_dvc_var;
 -#endif /* CONFIG_ISA */
        ep = &boardp->eep_config.asc_eep;
  
        seq_printf(m,
                seq_printf(m, " %c",
                           (ep->init_sdtr & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
        seq_putc(m, '\n');
 -
 -#ifdef CONFIG_ISA
 -      if (asc_dvc_varp->bus_type & ASC_IS_ISA) {
 -              seq_printf(m,
 -                         " Host ISA DMA speed:   %d MB/S\n",
 -                         isa_dma_speed[ASC_EEP_GET_DMA_SPD(ep)]);
 -      }
 -#endif /* CONFIG_ISA */
  }
  
  /*
@@@ -3140,6 -3181,10 +3140,6 @@@ static void asc_prt_driver_conf(struct 
                   shost->sg_tablesize, shost->cmd_per_lun);
  
        seq_printf(m,
 -                 " unchecked_isa_dma %d\n",
 -                 shost->unchecked_isa_dma);
 -
 -      seq_printf(m,
                   " flags 0x%x, last_reset 0x%lx, jiffies 0x%lx, asc_n_io_port 0x%x\n",
                   boardp->flags, shost->last_reset, jiffies,
                   boardp->asc_n_io_port);
@@@ -8518,6 -8563,12 +8518,6 @@@ static unsigned short AscGetChipBiosAdd
        }
  
        cfg_lsw = AscGetChipCfgLsw(iop_base);
 -
 -      /*
 -       *  ISA PnP uses the top bit as the 32K BIOS flag
 -       */
 -      if (bus_type == ASC_IS_ISAPNP)
 -              cfg_lsw &= 0x7FFF;
        bios_addr = ASC_BIOS_MIN_ADDR + (cfg_lsw >> 12) * ASC_BIOS_BANK_SIZE;
        return bios_addr;
  }
@@@ -8560,6 -8611,19 +8560,6 @@@ static unsigned char AscGetChipVersion(
        return AscGetChipVerNo(iop_base);
  }
  
 -#ifdef CONFIG_ISA
 -static void AscEnableIsaDma(uchar dma_channel)
 -{
 -      if (dma_channel < 4) {
 -              outp(0x000B, (ushort)(0xC0 | dma_channel));
 -              outp(0x000A, dma_channel);
 -      } else if (dma_channel < 8) {
 -              outp(0x00D6, (ushort)(0xC0 | (dma_channel - 4)));
 -              outp(0x00D4, (ushort)(dma_channel - 4));
 -      }
 -}
 -#endif /* CONFIG_ISA */
 -
  static int AscStopQueueExe(PortAddr iop_base)
  {
        int count = 0;
  
  static unsigned int AscGetMaxDmaCount(ushort bus_type)
  {
 -      if (bus_type & ASC_IS_ISA)
 -              return ASC_MAX_ISA_DMA_COUNT;
 -      else if (bus_type & (ASC_IS_EISA | ASC_IS_VL))
 +      if (bus_type & (ASC_IS_EISA | ASC_IS_VL))
                return ASC_MAX_VL_DMA_COUNT;
        return ASC_MAX_PCI_DMA_COUNT;
  }
  
 -#ifdef CONFIG_ISA
 -static ushort AscGetIsaDmaChannel(PortAddr iop_base)
 -{
 -      ushort channel;
 -
 -      channel = AscGetChipCfgLsw(iop_base) & 0x0003;
 -      if (channel == 0x03)
 -              return (0);
 -      else if (channel == 0x00)
 -              return (7);
 -      return (channel + 4);
 -}
 -
 -static ushort AscSetIsaDmaChannel(PortAddr iop_base, ushort dma_channel)
 -{
 -      ushort cfg_lsw;
 -      uchar value;
 -
 -      if ((dma_channel >= 5) && (dma_channel <= 7)) {
 -              if (dma_channel == 7)
 -                      value = 0x00;
 -              else
 -                      value = dma_channel - 4;
 -              cfg_lsw = AscGetChipCfgLsw(iop_base) & 0xFFFC;
 -              cfg_lsw |= value;
 -              AscSetChipCfgLsw(iop_base, cfg_lsw);
 -              return (AscGetIsaDmaChannel(iop_base));
 -      }
 -      return 0;
 -}
 -
 -static uchar AscGetIsaDmaSpeed(PortAddr iop_base)
 -{
 -      uchar speed_value;
 -
 -      AscSetBank(iop_base, 1);
 -      speed_value = AscReadChipDmaSpeed(iop_base);
 -      speed_value &= 0x07;
 -      AscSetBank(iop_base, 0);
 -      return speed_value;
 -}
 -
 -static uchar AscSetIsaDmaSpeed(PortAddr iop_base, uchar speed_value)
 -{
 -      speed_value &= 0x07;
 -      AscSetBank(iop_base, 1);
 -      AscWriteChipDmaSpeed(iop_base, speed_value);
 -      AscSetBank(iop_base, 0);
 -      return AscGetIsaDmaSpeed(iop_base);
 -}
 -#endif /* CONFIG_ISA */
 -
  static void AscInitAscDvcVar(ASC_DVC_VAR *asc_dvc)
  {
        int i;
        iop_base = asc_dvc->iop_base;
        asc_dvc->err_code = 0;
        if ((asc_dvc->bus_type &
 -           (ASC_IS_ISA | ASC_IS_PCI | ASC_IS_EISA | ASC_IS_VL)) == 0) {
 +           (ASC_IS_PCI | ASC_IS_EISA | ASC_IS_VL)) == 0) {
                asc_dvc->err_code |= ASC_IERR_NO_BUS_TYPE;
        }
        AscSetChipControl(iop_base, CC_HALT);
                                   (SEC_ACTIVE_NEGATE | SEC_SLEW_RATE));
        }
  
 -      asc_dvc->cfg->isa_dma_speed = ASC_DEF_ISA_DMA_SPEED;
 -#ifdef CONFIG_ISA
 -      if ((asc_dvc->bus_type & ASC_IS_ISA) != 0) {
 -              if (chip_version >= ASC_CHIP_MIN_VER_ISA_PNP) {
 -                      AscSetChipIFC(iop_base, IFC_INIT_DEFAULT);
 -                      asc_dvc->bus_type = ASC_IS_ISAPNP;
 -              }
 -              asc_dvc->cfg->isa_dma_channel =
 -                  (uchar)AscGetIsaDmaChannel(iop_base);
 -      }
 -#endif /* CONFIG_ISA */
        for (i = 0; i <= ASC_MAX_TID; i++) {
                asc_dvc->cur_dvc_qng[i] = 0;
                asc_dvc->max_dvc_qng[i] = ASC_MAX_SCSI1_QNG;
@@@ -9012,6 -9141,7 +9012,6 @@@ static int AscInitFromEEP(ASC_DVC_VAR *
        asc_dvc->cfg->sdtr_enable = eep_config->init_sdtr;
        asc_dvc->cfg->disc_enable = eep_config->disc_enable;
        asc_dvc->cfg->cmd_qng_enabled = eep_config->use_cmd_qng;
 -      asc_dvc->cfg->isa_dma_speed = ASC_EEP_GET_DMA_SPD(eep_config);
        asc_dvc->start_motor = eep_config->start_motor;
        asc_dvc->dvc_cntl = eep_config->cntl;
        asc_dvc->no_scam = eep_config->no_scam;
@@@ -9184,10 -9314,22 +9184,10 @@@ static int AscInitSetConfig(struct pci_
                }
        } else
  #endif /* CONFIG_PCI */
 -      if (asc_dvc->bus_type == ASC_IS_ISAPNP) {
 -              if (AscGetChipVersion(iop_base, asc_dvc->bus_type)
 -                  == ASC_CHIP_VER_ASYN_BUG) {
 -                      asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_ASYN_USE_SYN;
 -              }
 -      }
        if (AscSetChipScsiID(iop_base, asc_dvc->cfg->chip_scsi_id) !=
            asc_dvc->cfg->chip_scsi_id) {
                asc_dvc->err_code |= ASC_IERR_SET_SCSI_ID;
        }
 -#ifdef CONFIG_ISA
 -      if (asc_dvc->bus_type & ASC_IS_ISA) {
 -              AscSetIsaDmaChannel(iop_base, asc_dvc->cfg->isa_dma_channel);
 -              AscSetIsaDmaSpeed(iop_base, asc_dvc->cfg->isa_dma_speed);
 -      }
 -#endif /* CONFIG_ISA */
  
        asc_dvc->init_state |= ASC_INIT_STATE_END_SET_CFG;
  
@@@ -10610,6 -10752,12 +10610,6 @@@ static struct scsi_host_template advans
        .eh_host_reset_handler = advansys_reset,
        .bios_param = advansys_biosparam,
        .slave_configure = advansys_slave_configure,
 -      /*
 -       * Because the driver may control an ISA adapter 'unchecked_isa_dma'
 -       * must be set. The flag will be cleared in advansys_board_found
 -       * for non-ISA adapters.
 -       */
 -      .unchecked_isa_dma = true,
  };
  
  static int advansys_wide_init_chip(struct Scsi_Host *shost)
@@@ -10775,21 -10923,29 +10775,21 @@@ static int advansys_board_found(struct 
                 */
                switch (asc_dvc_varp->bus_type) {
  #ifdef CONFIG_ISA
 -              case ASC_IS_ISA:
 -                      shost->unchecked_isa_dma = true;
 -                      share_irq = 0;
 -                      break;
                case ASC_IS_VL:
 -                      shost->unchecked_isa_dma = false;
                        share_irq = 0;
                        break;
                case ASC_IS_EISA:
 -                      shost->unchecked_isa_dma = false;
                        share_irq = IRQF_SHARED;
                        break;
  #endif /* CONFIG_ISA */
  #ifdef CONFIG_PCI
                case ASC_IS_PCI:
 -                      shost->unchecked_isa_dma = false;
                        share_irq = IRQF_SHARED;
                        break;
  #endif /* CONFIG_PCI */
                default:
                        shost_printk(KERN_ERR, shost, "unknown adapter type: "
                                        "%d\n", asc_dvc_varp->bus_type);
 -                      shost->unchecked_isa_dma = false;
                        share_irq = 0;
                        break;
                }
                 * For Wide boards set PCI information before calling
                 * AdvInitGetConfig().
                 */
 -              shost->unchecked_isa_dma = false;
                share_irq = IRQF_SHARED;
                ASC_DBG(2, "AdvInitGetConfig()\n");
  
                ep->init_sdtr = asc_dvc_varp->cfg->sdtr_enable;
                ep->disc_enable = asc_dvc_varp->cfg->disc_enable;
                ep->use_cmd_qng = asc_dvc_varp->cfg->cmd_qng_enabled;
 -              ASC_EEP_SET_DMA_SPD(ep, asc_dvc_varp->cfg->isa_dma_speed);
 +              ASC_EEP_SET_DMA_SPD(ep, ASC_DEF_ISA_DMA_SPEED);
                ep->start_motor = asc_dvc_varp->start_motor;
                ep->cntl = asc_dvc_varp->dvc_cntl;
                ep->no_scam = asc_dvc_varp->no_scam;
  
        /* Register DMA Channel for Narrow boards. */
        shost->dma_channel = NO_ISA_DMA;        /* Default to no ISA DMA. */
 -#ifdef CONFIG_ISA
 -      if (ASC_NARROW_BOARD(boardp)) {
 -              /* Register DMA channel for ISA bus. */
 -              if (asc_dvc_varp->bus_type & ASC_IS_ISA) {
 -                      shost->dma_channel = asc_dvc_varp->cfg->isa_dma_channel;
 -                      ret = request_dma(shost->dma_channel, DRV_NAME);
 -                      if (ret) {
 -                              shost_printk(KERN_ERR, shost, "request_dma() "
 -                                              "%d failed %d\n",
 -                                              shost->dma_channel, ret);
 -                              goto err_unmap;
 -                      }
 -                      AscEnableIsaDma(shost->dma_channel);
 -              }
 -      }
 -#endif /* CONFIG_ISA */
  
        /* Register IRQ Number. */
        ASC_DBG(2, "request_irq(%d, %p)\n", boardp->irq, shost);
                        shost_printk(KERN_ERR, shost, "request_irq(): IRQ 0x%x "
                                        "failed with %d\n", boardp->irq, ret);
                }
 -              goto err_free_dma;
 +              goto err_unmap;
        }
  
        /*
                advansys_wide_free_mem(boardp);
   err_free_irq:
        free_irq(boardp->irq, shost);
 - err_free_dma:
 -#ifdef CONFIG_ISA
 -      if (shost->dma_channel != NO_ISA_DMA)
 -              free_dma(shost->dma_channel);
 -#endif
   err_unmap:
        if (boardp->ioremap_addr)
                iounmap(boardp->ioremap_addr);
@@@ -11161,7 -11339,12 +11161,7 @@@ static int advansys_release(struct Scsi
        ASC_DBG(1, "begin\n");
        scsi_remove_host(shost);
        free_irq(board->irq, shost);
 -#ifdef CONFIG_ISA
 -      if (shost->dma_channel != NO_ISA_DMA) {
 -              ASC_DBG(1, "free_dma()\n");
 -              free_dma(shost->dma_channel);
 -      }
 -#endif
 +
        if (ASC_NARROW_BOARD(board)) {
                dma_unmap_single(board->dev,
                                        board->dvc_var.asc_dvc_var.overrun_dma,
@@@ -11183,13 -11366,79 +11183,13 @@@ static PortAddr _asc_def_iop_base[ASC_I
        0x0210, 0x0230, 0x0250, 0x0330
  };
  
 -/*
 - * The ISA IRQ number is found in bits 2 and 3 of the CfgLsw.  It decodes as:
 - * 00: 10
 - * 01: 11
 - * 10: 12
 - * 11: 15
 - */
 -static unsigned int advansys_isa_irq_no(PortAddr iop_base)
 -{
 -      unsigned short cfg_lsw = AscGetChipCfgLsw(iop_base);
 -      unsigned int chip_irq = ((cfg_lsw >> 2) & 0x03) + 10;
 -      if (chip_irq == 13)
 -              chip_irq = 15;
 -      return chip_irq;
 -}
 -
 -static int advansys_isa_probe(struct device *dev, unsigned int id)
 -{
 -      int err = -ENODEV;
 -      PortAddr iop_base = _asc_def_iop_base[id];
 -      struct Scsi_Host *shost;
 -      struct asc_board *board;
 -
 -      if (!request_region(iop_base, ASC_IOADR_GAP, DRV_NAME)) {
 -              ASC_DBG(1, "I/O port 0x%x busy\n", iop_base);
 -              return -ENODEV;
 -      }
 -      ASC_DBG(1, "probing I/O port 0x%x\n", iop_base);
 -      if (!AscFindSignature(iop_base))
 -              goto release_region;
 -      if (!(AscGetChipVersion(iop_base, ASC_IS_ISA) & ASC_CHIP_VER_ISA_BIT))
 -              goto release_region;
 -
 -      err = -ENOMEM;
 -      shost = scsi_host_alloc(&advansys_template, sizeof(*board));
 -      if (!shost)
 -              goto release_region;
 -
 -      board = shost_priv(shost);
 -      board->irq = advansys_isa_irq_no(iop_base);
 -      board->dev = dev;
 -      board->shost = shost;
 -
 -      err = advansys_board_found(shost, iop_base, ASC_IS_ISA);
 -      if (err)
 -              goto free_host;
 -
 -      dev_set_drvdata(dev, shost);
 -      return 0;
 -
 - free_host:
 -      scsi_host_put(shost);
 - release_region:
 -      release_region(iop_base, ASC_IOADR_GAP);
 -      return err;
 -}
 -
 -static void advansys_isa_remove(struct device *dev, unsigned int id)
 +static void advansys_vlb_remove(struct device *dev, unsigned int id)
  {
        int ioport = _asc_def_iop_base[id];
        advansys_release(dev_get_drvdata(dev));
        release_region(ioport, ASC_IOADR_GAP);
  }
  
 -static struct isa_driver advansys_isa_driver = {
 -      .probe          = advansys_isa_probe,
 -      .remove         = advansys_isa_remove,
 -      .driver = {
 -              .owner  = THIS_MODULE,
 -              .name   = DRV_NAME,
 -      },
 -};
 -
  /*
   * The VLB IRQ number is found in bits 2 to 4 of the CfgLsw.  It decodes as:
   * 000: invalid
@@@ -11258,7 -11507,7 +11258,7 @@@ static int advansys_vlb_probe(struct de
  
  static struct isa_driver advansys_vlb_driver = {
        .probe          = advansys_vlb_probe,
 -      .remove         = advansys_isa_remove,
 +      .remove         = advansys_vlb_remove,
        .driver = {
                .owner  = THIS_MODULE,
                .name   = "advansys_vlb",
@@@ -11508,10 -11757,15 +11508,10 @@@ static int __init advansys_init(void
  {
        int error;
  
 -      error = isa_register_driver(&advansys_isa_driver,
 -                                  ASC_IOADR_TABLE_MAX_IX);
 -      if (error)
 -              goto fail;
 -
        error = isa_register_driver(&advansys_vlb_driver,
                                    ASC_IOADR_TABLE_MAX_IX);
        if (error)
 -              goto unregister_isa;
 +              goto fail;
  
        error = eisa_driver_register(&advansys_eisa_driver);
        if (error)
        eisa_driver_unregister(&advansys_eisa_driver);
   unregister_vlb:
        isa_unregister_driver(&advansys_vlb_driver);
 - unregister_isa:
 -      isa_unregister_driver(&advansys_isa_driver);
   fail:
        return error;
  }
@@@ -11536,6 -11792,7 +11536,6 @@@ static void __exit advansys_exit(void
        pci_unregister_driver(&advansys_pci_driver);
        eisa_driver_unregister(&advansys_eisa_driver);
        isa_unregister_driver(&advansys_vlb_driver);
 -      isa_unregister_driver(&advansys_isa_driver);
  }
  
  module_init(advansys_init);
@@@ -326,6 -326,7 +326,7 @@@ static const char *ibmvfc_get_cmd_error
  
  /**
   * ibmvfc_get_err_result - Find the scsi status to return for the fcp response
+  * @vhost:      ibmvfc host struct
   * @vfc_cmd:  ibmvfc command struct
   *
   * Return value:
@@@ -603,8 -604,17 +604,17 @@@ static void ibmvfc_set_host_action(stru
                if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
                        vhost->action = action;
                break;
+       case IBMVFC_HOST_ACTION_REENABLE:
+       case IBMVFC_HOST_ACTION_RESET:
+               vhost->action = action;
+               break;
        case IBMVFC_HOST_ACTION_INIT:
        case IBMVFC_HOST_ACTION_TGT_DEL:
+       case IBMVFC_HOST_ACTION_LOGO:
+       case IBMVFC_HOST_ACTION_QUERY_TGTS:
+       case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
+       case IBMVFC_HOST_ACTION_NONE:
+       default:
                switch (vhost->action) {
                case IBMVFC_HOST_ACTION_RESET:
                case IBMVFC_HOST_ACTION_REENABLE:
                        break;
                }
                break;
-       case IBMVFC_HOST_ACTION_LOGO:
-       case IBMVFC_HOST_ACTION_QUERY_TGTS:
-       case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
-       case IBMVFC_HOST_ACTION_NONE:
-       case IBMVFC_HOST_ACTION_RESET:
-       case IBMVFC_HOST_ACTION_REENABLE:
-       default:
-               vhost->action = action;
-               break;
        }
  }
  
@@@ -650,8 -651,6 +651,6 @@@ static void ibmvfc_reinit_host(struct i
  /**
   * ibmvfc_del_tgt - Schedule cleanup and removal of the target
   * @tgt:              ibmvfc target struct
-  * @job_step: job step to perform
-  *
   **/
  static void ibmvfc_del_tgt(struct ibmvfc_target *tgt)
  {
@@@ -768,6 -767,8 +767,8 @@@ static int ibmvfc_send_crq_init_complet
  /**
   * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
   * @vhost:    ibmvfc host who owns the event pool
+  * @queue:      ibmvfc queue struct
+  * @size:       pool size
   *
   * Returns zero on success.
   **/
@@@ -820,6 -821,7 +821,7 @@@ static int ibmvfc_init_event_pool(struc
  /**
   * ibmvfc_free_event_pool - Frees memory of the event pool of a host
   * @vhost:    ibmvfc host who owns the event pool
+  * @queue:      ibmvfc queue struct
   *
   **/
  static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost,
@@@ -1414,6 -1416,7 +1416,7 @@@ static int ibmvfc_issue_fc_host_lip(str
  
  /**
   * ibmvfc_gather_partition_info - Gather info about the LPAR
+  * @vhost:      ibmvfc host struct
   *
   * Return value:
   *    none
@@@ -1484,7 -1487,7 +1487,7 @@@ static void ibmvfc_set_login_info(struc
  
  /**
   * ibmvfc_get_event - Gets the next free event in pool
-  * @vhost:    ibmvfc host struct
+  * @queue:      ibmvfc queue struct
   *
   * Returns a free event from the pool.
   **/
@@@ -1631,7 -1634,7 +1634,7 @@@ static int ibmvfc_map_sg_data(struct sc
  
  /**
   * ibmvfc_timeout - Internal command timeout handler
-  * @evt:      struct ibmvfc_event that timed out
+  * @t:        struct ibmvfc_event that timed out
   *
   * Called when an internally generated command times out
   **/
@@@ -1892,8 -1895,8 +1895,8 @@@ static struct ibmvfc_cmd *ibmvfc_init_v
  
  /**
   * ibmvfc_queuecommand - The queuecommand function of the scsi template
+  * @shost:    scsi host struct
   * @cmnd:     struct scsi_cmnd to be executed
-  * @done:     Callback function to be called when cmnd is completed
   *
   * Returns:
   *    0 on success / other on failure
@@@ -2324,7 -2327,7 +2327,7 @@@ static int ibmvfc_reset_device(struct s
  /**
   * ibmvfc_match_rport - Match function for specified remote port
   * @evt:      ibmvfc event struct
-  * @device:   device to match (rport)
+  * @rport:    device to match
   *
   * Returns:
   *    1 if event matches rport / 0 if event does not match rport
@@@ -3176,8 -3179,9 +3179,9 @@@ static void ibmvfc_handle_async(struct 
   * ibmvfc_handle_crq - Handles and frees received events in the CRQ
   * @crq:      Command/Response queue
   * @vhost:    ibmvfc host struct
+  * @evt_doneq:        Event done queue
   *
 **/
+ **/
  static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
                              struct list_head *evt_doneq)
  {
@@@ -3358,7 -3362,6 +3362,6 @@@ static int ibmvfc_slave_configure(struc
   * ibmvfc_change_queue_depth - Change the device's queue depth
   * @sdev:     scsi device struct
   * @qdepth:   depth to set
-  * @reason:   calling context
   *
   * Return value:
   *    actual depth set
@@@ -3430,6 -3433,7 +3433,7 @@@ static ssize_t ibmvfc_show_host_capabil
  /**
   * ibmvfc_show_log_level - Show the adapter's error logging level
   * @dev:      class device struct
+  * @attr:     unused
   * @buf:      buffer
   *
   * Return value:
@@@ -3452,7 -3456,9 +3456,9 @@@ static ssize_t ibmvfc_show_log_level(st
  /**
   * ibmvfc_store_log_level - Change the adapter's error logging level
   * @dev:      class device struct
+  * @attr:     unused
   * @buf:      buffer
+  * @count:      buffer size
   *
   * Return value:
   *    number of bytes printed to buffer
@@@ -3530,7 -3536,7 +3536,7 @@@ static ssize_t ibmvfc_read_trace(struc
                                 struct bin_attribute *bin_attr,
                                 char *buf, loff_t off, size_t count)
  {
-       struct device *dev = container_of(kobj, struct device, kobj);
+       struct device *dev = kobj_to_dev(kobj);
        struct Scsi_Host *shost = class_to_shost(dev);
        struct ibmvfc_host *vhost = shost_priv(shost);
        unsigned long flags = 0;
@@@ -4162,6 -4168,7 +4168,7 @@@ static void ibmvfc_tgt_implicit_logout_
  /**
   * __ibmvfc_tgt_get_implicit_logout_evt - Allocate and init an event for implicit logout
   * @tgt:              ibmvfc target struct
+  * @done:             Routine to call when the event is responded to
   *
   * Returns:
   *    Allocated and initialized ibmvfc_event struct
@@@ -4478,7 -4485,7 +4485,7 @@@ static void ibmvfc_tgt_adisc_cancel_don
  
  /**
   * ibmvfc_adisc_timeout - Handle an ADISC timeout
-  * @tgt:              ibmvfc target struct
+  * @t:                ibmvfc target struct
   *
   * If an ADISC times out, send a cancel. If the cancel times
   * out, reset the CRQ. When the ADISC comes back as cancelled,
@@@ -4681,7 -4688,7 +4688,7 @@@ static void ibmvfc_tgt_query_target(str
  /**
   * ibmvfc_alloc_target - Allocate and initialize an ibmvfc target
   * @vhost:            ibmvfc host struct
-  * @scsi_id:  SCSI ID to allocate target for
+  * @target:           Holds SCSI ID to allocate target forand the WWPN
   *
   * Returns:
   *    0 on success / other on failure
@@@ -5111,7 -5118,7 +5118,7 @@@ static void ibmvfc_npiv_login(struct ib
  
  /**
   * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
-  * @vhost:            ibmvfc host struct
+  * @evt:              ibmvfc event struct
   *
   **/
  static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
@@@ -5373,30 -5380,49 +5380,49 @@@ static void ibmvfc_do_work(struct ibmvf
        case IBMVFC_HOST_ACTION_INIT_WAIT:
                break;
        case IBMVFC_HOST_ACTION_RESET:
-               vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
                list_splice_init(&vhost->purge, &purge);
                spin_unlock_irqrestore(vhost->host->host_lock, flags);
                ibmvfc_complete_purge(&purge);
                rc = ibmvfc_reset_crq(vhost);
                spin_lock_irqsave(vhost->host->host_lock, flags);
-               if (rc == H_CLOSED)
+               if (!rc || rc == H_CLOSED)
                        vio_enable_interrupts(to_vio_dev(vhost->dev));
-               if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
-                   (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
-                       ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
-                       dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
+               if (vhost->action == IBMVFC_HOST_ACTION_RESET) {
+                       /*
+                        * The only action we could have changed to would have
+                        * been reenable, in which case, we skip the rest of
+                        * this path and wait until we've done the re-enable
+                        * before sending the crq init.
+                        */
+                       vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
+                       if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
+                           (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
+                               ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+                               dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
+                       }
                }
                break;
        case IBMVFC_HOST_ACTION_REENABLE:
-               vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
                list_splice_init(&vhost->purge, &purge);
                spin_unlock_irqrestore(vhost->host->host_lock, flags);
                ibmvfc_complete_purge(&purge);
                rc = ibmvfc_reenable_crq_queue(vhost);
                spin_lock_irqsave(vhost->host->host_lock, flags);
-               if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
-                       ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
-                       dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
+               if (vhost->action == IBMVFC_HOST_ACTION_REENABLE) {
+                       /*
+                        * The only action we could have changed to would have
+                        * been reset, in which case, we skip the rest of this
+                        * path and wait until we've done the reset before
+                        * sending the crq init.
+                        */
+                       vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
+                       if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
+                               ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+                               dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
+                       }
                }
                break;
        case IBMVFC_HOST_ACTION_LOGO:
@@@ -6091,7 -6117,7 +6117,7 @@@ out
   * Return value:
   *    0
   **/
 -static int ibmvfc_remove(struct vio_dev *vdev)
 +static void ibmvfc_remove(struct vio_dev *vdev)
  {
        struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev);
        LIST_HEAD(purge);
        spin_unlock(&ibmvfc_driver_lock);
        scsi_host_put(vhost->host);
        LEAVE;
 -      return 0;
  }
  
  /**
@@@ -130,9 -130,10 +130,10 @@@ static irqreturn_t ibmvscsi_handle_even
  }
  
  /**
-  * release_crq_queue: - Deallocates data and unregisters CRQ
-  * @queue:    crq_queue to initialize and register
-  * @host_data:        ibmvscsi_host_data of host
+  * ibmvscsi_release_crq_queue() - Deallocates data and unregisters CRQ
+  * @queue:            crq_queue to initialize and register
+  * @hostdata:         ibmvscsi_host_data of host
+  * @max_requests:     maximum requests (unused)
   *
   * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
   * the crq with the hypervisor.
@@@ -276,10 -277,9 +277,9 @@@ static void set_adapter_info(struct ibm
  }
  
  /**
-  * reset_crq_queue: - resets a crq after a failure
+  * ibmvscsi_reset_crq_queue() - resets a crq after a failure
   * @queue:    crq_queue to initialize and register
   * @hostdata: ibmvscsi_host_data of host
-  *
   */
  static int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
                                    struct ibmvscsi_host_data *hostdata)
  }
  
  /**
-  * initialize_crq_queue: - Initializes and registers CRQ with hypervisor
-  * @queue:    crq_queue to initialize and register
-  * @hostdata: ibmvscsi_host_data of host
+  * ibmvscsi_init_crq_queue() - Initializes and registers CRQ with hypervisor
+  * @queue:            crq_queue to initialize and register
+  * @hostdata:         ibmvscsi_host_data of host
+  * @max_requests:     maximum requests (unused)
   *
   * Allocates a page for messages, maps it for dma, and registers
   * the crq with the hypervisor.
@@@ -404,10 -405,9 +405,9 @@@ static int ibmvscsi_init_crq_queue(stru
  }
  
  /**
-  * reenable_crq_queue: - reenables a crq after
+  * ibmvscsi_reenable_crq_queue() - reenables a crq after
   * @queue:    crq_queue to initialize and register
   * @hostdata: ibmvscsi_host_data of host
-  *
   */
  static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
                                       struct ibmvscsi_host_data *hostdata)
   * @hostdata: ibmvscsi_host_data who owns the event pool
   *
   * Returns zero on success.
- */
 */
  static int initialize_event_pool(struct event_pool *pool,
                                 int size, struct ibmvscsi_host_data *hostdata)
  {
  }
  
  /**
-  * release_event_pool: - Frees memory of an event pool of a host
+  * release_event_pool() - Frees memory of an event pool of a host
   * @pool:     event_pool to be released
   * @hostdata: ibmvscsi_host_data who owns the even pool
   *
   * Returns zero on success.
- */
 */
  static void release_event_pool(struct event_pool *pool,
                               struct ibmvscsi_host_data *hostdata)
  {
@@@ -526,11 -526,10 +526,10 @@@ static int valid_event_struct(struct ev
  }
  
  /**
-  * ibmvscsi_free-event_struct: - Changes status of event to "free"
+  * free_event_struct() - Changes status of event to "free"
   * @pool:     event_pool that contains the event
   * @evt:      srp_event_struct to be modified
-  *
- */
+  */
  static void free_event_struct(struct event_pool *pool,
                                       struct srp_event_struct *evt)
  {
  }
  
  /**
-  * get_evt_struct: - Gets the next free event in pool
+  * get_event_struct() - Gets the next free event in pool
   * @pool:     event_pool that contains the events to be searched
   *
   * Returns the next event in "free" state, and NULL if none are free.
@@@ -575,7 -574,7 +574,7 @@@ static struct srp_event_struct *get_eve
  /**
   * init_event_struct: Initialize fields in an event struct that are always 
   *                    required.
-  * @evt:        The event
+  * @evt_struct: The event
   * @done:       Routine to call when the event is responded to
   * @format:     SRP or MAD format
   * @timeout:    timeout value set in the CRQ
@@@ -597,7 -596,7 +596,7 @@@ static void init_event_struct(struct sr
   * Routines for receiving SCSI responses from the hosting partition
   */
  
- /**
+ /*
   * set_srp_direction: Set the fields in the srp related to data
   *     direction and number of buffers based on the direction in
   *     the scsi_cmnd and the number of buffers
@@@ -632,9 -631,9 +631,9 @@@ static void set_srp_direction(struct sc
  /**
   * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format
   * @cmd:      srp_cmd whose additional_data member will be unmapped
+  * @evt_struct: the event
   * @dev:      device for which the memory is mapped
-  *
- */
+  */
  static void unmap_cmd_data(struct srp_cmd *cmd,
                           struct srp_event_struct *evt_struct,
                           struct device *dev)
@@@ -671,6 -670,7 +670,7 @@@ static int map_sg_list(struct scsi_cmn
  /**
   * map_sg_data: - Maps dma for a scatterlist and initializes descriptor fields
   * @cmd:      struct scsi_cmnd with the scatterlist
+  * @evt_struct:       struct srp_event_struct to map
   * @srp_cmd:  srp_cmd that contains the memory descriptor
   * @dev:      device for which to map dma memory
   *
@@@ -717,8 -717,7 +717,7 @@@ static int map_sg_data(struct scsi_cmn
  
        /* get indirect table */
        if (!evt_struct->ext_list) {
-               evt_struct->ext_list = (struct srp_direct_buf *)
-                       dma_alloc_coherent(dev,
+               evt_struct->ext_list = dma_alloc_coherent(dev,
                                           SG_ALL * sizeof(struct srp_direct_buf),
                                           &evt_struct->ext_list_token, 0);
                if (!evt_struct->ext_list) {
  /**
   * map_data_for_srp_cmd: - Calls functions to map data for srp cmds
   * @cmd:      struct scsi_cmnd with the memory to be mapped
+  * @evt_struct:       struct srp_event_struct to map
   * @srp_cmd:  srp_cmd that contains the memory descriptor
   * @dev:      dma device for which to map dma memory
   *
@@@ -778,6 -778,7 +778,7 @@@ static int map_data_for_srp_cmd(struct 
  /**
   * purge_requests: Our virtual adapter just shut down.  purge any sent requests
   * @hostdata:    the adapter
+  * @error_code:  error code to return as the 'result'
   */
  static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
  {
@@@ -838,7 -839,7 +839,7 @@@ static void ibmvscsi_reset_host(struct 
  
  /**
   * ibmvscsi_timeout - Internal command timeout handler
-  * @evt_struct:       struct srp_event_struct that timed out
+  * @t:        struct srp_event_struct that timed out
   *
   * Called when an internally generated command times out
  */
@@@ -1034,8 -1035,8 +1035,8 @@@ static inline u16 lun_from_dev(struct s
  }
  
  /**
-  * ibmvscsi_queue: - The queuecommand function of the scsi template 
-  * @cmd:      struct scsi_cmnd to be executed
+  * ibmvscsi_queuecommand_lck() - The queuecommand function of the scsi template
+  * @cmnd:     struct scsi_cmnd to be executed
   * @done:     Callback function to be called when cmd is completed
  */
  static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,
@@@ -1342,7 -1343,7 +1343,7 @@@ static void fast_fail_rsp(struct srp_ev
  }
  
  /**
-  * init_host - Start host initialization
+  * enable_fast_fail() - Start host initialization
   * @hostdata: ibmvscsi_host_data of host
   *
   * Returns zero if successful.
@@@ -1456,16 -1457,15 +1457,15 @@@ static void send_mad_adapter_info(struc
        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
  };
  
- /**
-  * init_adapter: Start virtual adapter initialization sequence
-  *
+ /*
+  * init_adapter() - Start virtual adapter initialization sequence
   */
  static void init_adapter(struct ibmvscsi_host_data *hostdata)
  {
        send_mad_adapter_info(hostdata);
  }
  
- /**
+ /*
   * sync_completion: Signal that a synchronous command has completed
   * Note that after returning from this call, the evt_struct is freed.
   * the caller waiting on this completion shouldn't touch the evt_struct
@@@ -1480,8 -1480,8 +1480,8 @@@ static void sync_completion(struct srp_
        complete(&evt_struct->comp);
  }
  
- /**
-  * ibmvscsi_abort: Abort a command...from scsi host template
+ /*
+  * ibmvscsi_eh_abort_handler: Abort a command...from scsi host template
   * send this over to the server and wait synchronously for the response
   */
  static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
        return SUCCESS;
  }
  
- /**
+ /*
   * ibmvscsi_eh_device_reset_handler: Reset a single LUN...from scsi host 
   * template send this over to the server and wait synchronously for the 
   * response
@@@ -1884,7 -1884,6 +1884,6 @@@ static int ibmvscsi_slave_configure(str
   * ibmvscsi_change_queue_depth - Change the device's queue depth
   * @sdev:     scsi device struct
   * @qdepth:   depth to set
-  * @reason:   calling context
   *
   * Return value:
   *    actual depth set
@@@ -2214,7 -2213,7 +2213,7 @@@ static int ibmvscsi_work(void *data
        return 0;
  }
  
- /**
+ /*
   * Called by bus code for each adapter
   */
  static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
        return -1;
  }
  
 -static int ibmvscsi_remove(struct vio_dev *vdev)
 +static void ibmvscsi_remove(struct vio_dev *vdev)
  {
        struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
  
        spin_unlock(&ibmvscsi_driver_lock);
  
        scsi_host_put(hostdata->host);
 -
 -      return 0;
  }
  
  /**
@@@ -2374,7 -2375,7 +2373,7 @@@ static int ibmvscsi_resume(struct devic
        return 0;
  }
  
- /**
+ /*
   * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we 
   * support.
   */
@@@ -128,10 -128,10 +128,10 @@@ static bool connection_broken(struct sc
   * This function calls h_free_q then frees the interrupt bit etc.
   * It must release the lock before doing so because of the time it can take
   * for h_free_crq in PHYP
-  * NOTE: the caller must make sure that state and or flags will prevent
-  *     interrupt handler from scheduling work.
-  * NOTE: anyone calling this function may need to set the CRQ_CLOSED flag
-  *     we can't do it here, because we don't have the lock
+  * NOTE: the caller must make sure that state and or flags will prevent
+  *       interrupt handler from scheduling work.
+  *       * anyone calling this function may need to set the CRQ_CLOSED flag
+  *       we can't do it here, because we don't have the lock
   *
   * EXECUTION ENVIRONMENT:
   *    Process level
@@@ -2670,7 -2670,6 +2670,6 @@@ static void ibmvscsis_parse_cmd(struct 
        u64 data_len = 0;
        enum dma_data_direction dir;
        int attr = 0;
-       int rc = 0;
  
        nexus = vscsi->tport.ibmv_nexus;
        /*
  
        srp->lun.scsi_lun[0] &= 0x3f;
  
-       rc = target_submit_cmd(&cmd->se_cmd, nexus->se_sess, srp->cdb,
-                              cmd->sense_buf, scsilun_to_int(&srp->lun),
-                              data_len, attr, dir, 0);
-       if (rc) {
-               dev_err(&vscsi->dev, "target_submit_cmd failed, rc %d\n", rc);
-               spin_lock_bh(&vscsi->intr_lock);
-               list_del(&cmd->list);
-               ibmvscsis_free_cmd_resources(vscsi, cmd);
-               spin_unlock_bh(&vscsi->intr_lock);
-               goto fail;
-       }
+       target_submit_cmd(&cmd->se_cmd, nexus->se_sess, srp->cdb,
+                         cmd->sense_buf, scsilun_to_int(&srp->lun),
+                         data_len, attr, dir, 0);
        return;
  
  fail:
@@@ -3595,7 -3586,7 +3586,7 @@@ free_adapter
        return rc;
  }
  
 -static int ibmvscsis_remove(struct vio_dev *vdev)
 +static void ibmvscsis_remove(struct vio_dev *vdev)
  {
        struct scsi_info *vscsi = dev_get_drvdata(&vdev->dev);
  
        list_del(&vscsi->list);
        spin_unlock_bh(&ibmvscsis_dev_lock);
        kfree(vscsi);
 -
 -      return 0;
  }
  
  static ssize_t system_id_show(struct device *dev,
@@@ -35,46 -35,40 +35,40 @@@ static enum ata_completion_errors sas_t
        /* ts->resp == SAS_TASK_COMPLETE */
        /* task delivered, what happened afterwards? */
        switch (ts->stat) {
-               case SAS_DEV_NO_RESPONSE:
-                       return AC_ERR_TIMEOUT;
-               case SAS_INTERRUPTED:
-               case SAS_PHY_DOWN:
-               case SAS_NAK_R_ERR:
-                       return AC_ERR_ATA_BUS;
-               case SAS_DATA_UNDERRUN:
-                       /*
-                        * Some programs that use the taskfile interface
-                        * (smartctl in particular) can cause underrun
-                        * problems.  Ignore these errors, perhaps at our
-                        * peril.
-                        */
-                       return 0;
-               case SAS_DATA_OVERRUN:
-               case SAS_QUEUE_FULL:
-               case SAS_DEVICE_UNKNOWN:
-               case SAS_SG_ERR:
-                       return AC_ERR_INVALID;
-               case SAS_OPEN_TO:
-               case SAS_OPEN_REJECT:
-                       pr_warn("%s: Saw error %d.  What to do?\n",
-                               __func__, ts->stat);
-                       return AC_ERR_OTHER;
-               case SAM_STAT_CHECK_CONDITION:
-               case SAS_ABORTED_TASK:
-                       return AC_ERR_DEV;
-               case SAS_PROTO_RESPONSE:
-                       /* This means the ending_fis has the error
-                        * value; return 0 here to collect it */
-                       return 0;
-               default:
-                       return 0;
+       case SAS_DEV_NO_RESPONSE:
+               return AC_ERR_TIMEOUT;
+       case SAS_INTERRUPTED:
+       case SAS_PHY_DOWN:
+       case SAS_NAK_R_ERR:
+               return AC_ERR_ATA_BUS;
+       case SAS_DATA_UNDERRUN:
+               /*
+                * Some programs that use the taskfile interface
+                * (smartctl in particular) can cause underrun
+                * problems.  Ignore these errors, perhaps at our
+                * peril.
+                */
+               return 0;
+       case SAS_DATA_OVERRUN:
+       case SAS_QUEUE_FULL:
+       case SAS_DEVICE_UNKNOWN:
+       case SAS_SG_ERR:
+               return AC_ERR_INVALID;
+       case SAS_OPEN_TO:
+       case SAS_OPEN_REJECT:
+               pr_warn("%s: Saw error %d.  What to do?\n",
+                       __func__, ts->stat);
+               return AC_ERR_OTHER;
+       case SAM_STAT_CHECK_CONDITION:
+       case SAS_ABORTED_TASK:
+               return AC_ERR_DEV;
+       case SAS_PROTO_RESPONSE:
+               /* This means the ending_fis has the error
+                * value; return 0 here to collect it
+                */
+               return 0;
+       default:
+               return 0;
        }
  }
  
@@@ -201,17 -195,18 +195,17 @@@ static unsigned int sas_ata_qc_issue(st
                memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len);
                task->total_xfer_len = qc->nbytes;
                task->num_scatter = qc->n_elem;
 +              task->data_dir = qc->dma_dir;
 +      } else if (qc->tf.protocol == ATA_PROT_NODATA) {
 +              task->data_dir = DMA_NONE;
        } else {
                for_each_sg(qc->sg, sg, qc->n_elem, si)
                        xfer += sg_dma_len(sg);
  
                task->total_xfer_len = xfer;
                task->num_scatter = si;
 -      }
 -
 -      if (qc->tf.protocol == ATA_PROT_NODATA)
 -              task->data_dir = DMA_NONE;
 -      else
                task->data_dir = qc->dma_dir;
 +      }
        task->scatter = qc->sg;
        task->ata_task.retry_count = 1;
        task->task_state_flags = SAS_TASK_STATE_PENDING;
diff --combined drivers/scsi/scsi_lib.c
  #endif
  
  static struct kmem_cache *scsi_sense_cache;
 -static struct kmem_cache *scsi_sense_isadma_cache;
  static DEFINE_MUTEX(scsi_sense_cache_mutex);
  
  static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd);
  
 -static inline struct kmem_cache *
 -scsi_select_sense_cache(bool unchecked_isa_dma)
 -{
 -      return unchecked_isa_dma ? scsi_sense_isadma_cache : scsi_sense_cache;
 -}
 -
 -static void scsi_free_sense_buffer(bool unchecked_isa_dma,
 -                                 unsigned char *sense_buffer)
 -{
 -      kmem_cache_free(scsi_select_sense_cache(unchecked_isa_dma),
 -                      sense_buffer);
 -}
 -
 -static unsigned char *scsi_alloc_sense_buffer(bool unchecked_isa_dma,
 -      gfp_t gfp_mask, int numa_node)
 -{
 -      return kmem_cache_alloc_node(scsi_select_sense_cache(unchecked_isa_dma),
 -                                   gfp_mask, numa_node);
 -}
 -
  int scsi_init_sense_cache(struct Scsi_Host *shost)
  {
 -      struct kmem_cache *cache;
        int ret = 0;
  
        mutex_lock(&scsi_sense_cache_mutex);
 -      cache = scsi_select_sense_cache(shost->unchecked_isa_dma);
 -      if (cache)
 -              goto exit;
 -
 -      if (shost->unchecked_isa_dma) {
 -              scsi_sense_isadma_cache =
 -                      kmem_cache_create("scsi_sense_cache(DMA)",
 -                              SCSI_SENSE_BUFFERSIZE, 0,
 -                              SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL);
 -              if (!scsi_sense_isadma_cache)
 -                      ret = -ENOMEM;
 -      } else {
 +      if (!scsi_sense_cache) {
                scsi_sense_cache =
                        kmem_cache_create_usercopy("scsi_sense_cache",
                                SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN,
                if (!scsi_sense_cache)
                        ret = -ENOMEM;
        }
 - exit:
        mutex_unlock(&scsi_sense_cache_mutex);
        return ret;
  }
@@@ -294,7 -328,8 +294,8 @@@ void scsi_device_unbusy(struct scsi_dev
        if (starget->can_queue > 0)
                atomic_dec(&starget->target_busy);
  
-       atomic_dec(&sdev->device_busy);
+       sbitmap_put(&sdev->budget_map, cmd->budget_token);
+       cmd->budget_token = -1;
  }
  
  static void scsi_kick_queue(struct request_queue *q)
@@@ -350,7 -385,7 +351,7 @@@ static void scsi_single_lun_run(struct 
  
  static inline bool scsi_device_is_busy(struct scsi_device *sdev)
  {
-       if (atomic_read(&sdev->device_busy) >= sdev->queue_depth)
+       if (scsi_device_busy(sdev) >= sdev->queue_depth)
                return true;
        if (atomic_read(&sdev->device_blocked) > 0)
                return true;
@@@ -964,8 -999,11 +965,11 @@@ static inline bool scsi_cmd_needs_dma_d
  }
  
  /**
-  * scsi_alloc_sgtables - allocate S/G tables for a command
-  * @cmd:  command descriptor we wish to initialize
+  * scsi_alloc_sgtables - Allocate and initialize data and integrity scatterlists
+  * @cmd: SCSI command data structure to initialize.
+  *
+  * Initializes @cmd->sdb and also @cmd->prot_sdb if data integrity is enabled
+  * for @cmd.
   *
   * Returns:
   * * BLK_STS_OK       - on success
@@@ -1109,6 -1147,7 +1113,7 @@@ void scsi_init_command(struct scsi_devi
        unsigned long jiffies_at_alloc;
        int retries, to_clear;
        bool in_flight;
+       int budget_token = cmd->budget_token;
  
        if (!blk_rq_is_scsi(rq) && !(flags & SCMD_INITIALIZED)) {
                flags |= SCMD_INITIALIZED;
        cmd->retries = retries;
        if (in_flight)
                __set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
+       cmd->budget_token = budget_token;
  
  }
  
@@@ -1220,19 -1260,20 +1226,20 @@@ scsi_device_state_check(struct scsi_dev
  }
  
  /*
-  * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
-  * return 0.
-  *
-  * Called with the queue_lock held.
+  * scsi_dev_queue_ready: if we can send requests to sdev, assign one token
+  * and return the token else return -1.
   */
  static inline int scsi_dev_queue_ready(struct request_queue *q,
                                  struct scsi_device *sdev)
  {
-       unsigned int busy;
+       int token;
  
-       busy = atomic_inc_return(&sdev->device_busy) - 1;
+       token = sbitmap_get(&sdev->budget_map);
        if (atomic_read(&sdev->device_blocked)) {
-               if (busy)
+               if (token < 0)
+                       goto out;
+               if (scsi_device_busy(sdev) > 1)
                        goto out_dec;
  
                /*
                                   "unblocking device at zero depth\n"));
        }
  
-       if (busy >= sdev->queue_depth)
-               goto out_dec;
-       return 1;
+       return token;
  out_dec:
-       atomic_dec(&sdev->device_busy);
-       return 0;
+       if (token >= 0)
+               sbitmap_put(&sdev->budget_map, token);
+ out:
+       return -1;
  }
  
  /*
@@@ -1394,10 -1434,14 +1400,14 @@@ static bool scsi_mq_lld_busy(struct req
        return false;
  }
  
- static void scsi_softirq_done(struct request *rq)
+ /*
+  * Block layer request completion callback. May be called from interrupt
+  * context.
+  */
+ static void scsi_complete(struct request *rq)
  {
        struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
-       int disposition;
+       enum scsi_disposition disposition;
  
        INIT_LIST_HEAD(&cmd->eh_entry);
  
@@@ -1571,19 -1615,20 +1581,20 @@@ static void scsi_mq_done(struct scsi_cm
        blk_mq_complete_request(cmd->request);
  }
  
- static void scsi_mq_put_budget(struct request_queue *q)
+ static void scsi_mq_put_budget(struct request_queue *q, int budget_token)
  {
        struct scsi_device *sdev = q->queuedata;
  
-       atomic_dec(&sdev->device_busy);
+       sbitmap_put(&sdev->budget_map, budget_token);
  }
  
- static bool scsi_mq_get_budget(struct request_queue *q)
+ static int scsi_mq_get_budget(struct request_queue *q)
  {
        struct scsi_device *sdev = q->queuedata;
+       int token = scsi_dev_queue_ready(q, sdev);
  
-       if (scsi_dev_queue_ready(q, sdev))
-               return true;
+       if (token >= 0)
+               return token;
  
        atomic_inc(&sdev->restarts);
  
         * the .restarts flag, and the request queue will be run for handling
         * this request, see scsi_end_request().
         */
-       if (unlikely(atomic_read(&sdev->device_busy) == 0 &&
+       if (unlikely(scsi_device_busy(sdev) == 0 &&
                                !scsi_device_blocked(sdev)))
                blk_mq_delay_run_hw_queues(sdev->request_queue, SCSI_QUEUE_DELAY);
-       return false;
+       return -1;
+ }
+ static void scsi_mq_set_rq_budget_token(struct request *req, int token)
+ {
+       struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
+       cmd->budget_token = token;
+ }
+ static int scsi_mq_get_rq_budget_token(struct request *req)
+ {
+       struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
+       return cmd->budget_token;
  }
  
  static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
        blk_status_t ret;
        int reason;
  
+       WARN_ON_ONCE(cmd->budget_token < 0);
        /*
         * If the device is not in running state we will reject some or all
         * commands.
@@@ -1670,7 -1731,8 +1697,8 @@@ out_dec_target_busy
        if (scsi_target(sdev)->can_queue > 0)
                atomic_dec(&scsi_target(sdev)->target_busy);
  out_put_budget:
-       scsi_mq_put_budget(q);
+       scsi_mq_put_budget(q, cmd->budget_token);
+       cmd->budget_token = -1;
        switch (ret) {
        case BLK_STS_OK:
                break;
@@@ -1714,12 -1776,15 +1742,12 @@@ static int scsi_mq_init_request(struct 
                                unsigned int hctx_idx, unsigned int numa_node)
  {
        struct Scsi_Host *shost = set->driver_data;
 -      const bool unchecked_isa_dma = shost->unchecked_isa_dma;
        struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
        struct scatterlist *sg;
        int ret = 0;
  
 -      if (unchecked_isa_dma)
 -              cmd->flags |= SCMD_UNCHECKED_ISA_DMA;
 -      cmd->sense_buffer = scsi_alloc_sense_buffer(unchecked_isa_dma,
 -                                                  GFP_KERNEL, numa_node);
 +      cmd->sense_buffer =
 +              kmem_cache_alloc_node(scsi_sense_cache, GFP_KERNEL, numa_node);
        if (!cmd->sense_buffer)
                return -ENOMEM;
        cmd->req.sense = cmd->sense_buffer;
        if (shost->hostt->init_cmd_priv) {
                ret = shost->hostt->init_cmd_priv(shost, cmd);
                if (ret < 0)
 -                      scsi_free_sense_buffer(unchecked_isa_dma,
 -                                             cmd->sense_buffer);
 +                      kmem_cache_free(scsi_sense_cache, cmd->sense_buffer);
        }
  
        return ret;
@@@ -1747,9 -1813,30 +1775,29 @@@ static void scsi_mq_exit_request(struc
  
        if (shost->hostt->exit_cmd_priv)
                shost->hostt->exit_cmd_priv(shost, cmd);
 -      scsi_free_sense_buffer(cmd->flags & SCMD_UNCHECKED_ISA_DMA,
 -                             cmd->sense_buffer);
 +      kmem_cache_free(scsi_sense_cache, cmd->sense_buffer);
  }
  
+ static int scsi_mq_poll(struct blk_mq_hw_ctx *hctx)
+ {
+       struct Scsi_Host *shost = hctx->driver_data;
+       if (shost->hostt->mq_poll)
+               return shost->hostt->mq_poll(shost, hctx->queue_num);
+       return 0;
+ }
+ static int scsi_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+                         unsigned int hctx_idx)
+ {
+       struct Scsi_Host *shost = data;
+       hctx->driver_data = shost;
+       return 0;
+ }
  static int scsi_map_queues(struct blk_mq_tag_set *set)
  {
        struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set);
@@@ -1782,6 -1869,8 +1830,6 @@@ void __scsi_init_queue(struct Scsi_Hos
                                dma_max_mapping_size(dev) >> SECTOR_SHIFT);
        }
        blk_queue_max_hw_sectors(q, shost->max_sectors);
 -      if (shost->unchecked_isa_dma)
 -              blk_queue_bounce_limit(q, BLK_BOUNCE_ISA);
        blk_queue_segment_boundary(q, shost->dma_boundary);
        dma_set_seg_boundary(dev, shost->dma_boundary);
  
@@@ -1804,7 -1893,7 +1852,7 @@@ static const struct blk_mq_ops scsi_mq_
        .get_budget     = scsi_mq_get_budget,
        .put_budget     = scsi_mq_put_budget,
        .queue_rq       = scsi_queue_rq,
-       .complete       = scsi_softirq_done,
+       .complete       = scsi_complete,
        .timeout        = scsi_timeout,
  #ifdef CONFIG_BLK_DEBUG_FS
        .show_rq        = scsi_show_rq,
        .cleanup_rq     = scsi_cleanup_rq,
        .busy           = scsi_mq_lld_busy,
        .map_queues     = scsi_map_queues,
+       .init_hctx      = scsi_init_hctx,
+       .poll           = scsi_mq_poll,
+       .set_rq_budget_token = scsi_mq_set_rq_budget_token,
+       .get_rq_budget_token = scsi_mq_get_rq_budget_token,
  };
  
  
  static void scsi_commit_rqs(struct blk_mq_hw_ctx *hctx)
  {
-       struct request_queue *q = hctx->queue;
-       struct scsi_device *sdev = q->queuedata;
-       struct Scsi_Host *shost = sdev->host;
+       struct Scsi_Host *shost = hctx->driver_data;
  
        shost->hostt->commit_rqs(shost, hctx->queue_num);
  }
@@@ -1832,7 -1923,7 +1882,7 @@@ static const struct blk_mq_ops scsi_mq_
        .put_budget     = scsi_mq_put_budget,
        .queue_rq       = scsi_queue_rq,
        .commit_rqs     = scsi_commit_rqs,
-       .complete       = scsi_softirq_done,
+       .complete       = scsi_complete,
        .timeout        = scsi_timeout,
  #ifdef CONFIG_BLK_DEBUG_FS
        .show_rq        = scsi_show_rq,
        .cleanup_rq     = scsi_cleanup_rq,
        .busy           = scsi_mq_lld_busy,
        .map_queues     = scsi_map_queues,
+       .init_hctx      = scsi_init_hctx,
+       .poll           = scsi_mq_poll,
+       .set_rq_budget_token = scsi_mq_set_rq_budget_token,
+       .get_rq_budget_token = scsi_mq_get_rq_budget_token,
  };
  
  struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
@@@ -1875,6 -1970,7 +1929,7 @@@ int scsi_mq_setup_tags(struct Scsi_Hos
        else
                tag_set->ops = &scsi_mq_ops_no_commit;
        tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1;
+       tag_set->nr_maps = shost->nr_maps ? : 1;
        tag_set->queue_depth = shost->can_queue;
        tag_set->cmd_size = cmd_size;
        tag_set->numa_node = NUMA_NO_NODE;
@@@ -1947,6 -2043,7 +2002,6 @@@ EXPORT_SYMBOL(scsi_unblock_requests)
  void scsi_exit_queue(void)
  {
        kmem_cache_destroy(scsi_sense_cache);
 -      kmem_cache_destroy(scsi_sense_isadma_cache);
  }
  
  /**
diff --combined drivers/scsi/scsi_scan.c
@@@ -215,6 -215,7 +215,7 @@@ static void scsi_unlock_floptical(struc
  static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
                                           u64 lun, void *hostdata)
  {
+       unsigned int depth;
        struct scsi_device *sdev;
        int display_failure_msg = 1, ret;
        struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
        WARN_ON_ONCE(!blk_get_queue(sdev->request_queue));
        sdev->request_queue->queuedata = sdev;
  
-       scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun ?
-                                       sdev->host->cmd_per_lun : 1);
+       depth = sdev->host->cmd_per_lun ?: 1;
+       /*
+        * Use .can_queue as budget map's depth because we have to
+        * support adjusting queue depth from sysfs. Meantime use
+        * default device queue depth to figure out sbitmap shift
+        * since we use this queue depth most of times.
+        */
+       if (sbitmap_init_node(&sdev->budget_map,
+                               scsi_device_max_queue_depth(sdev),
+                               sbitmap_calculate_shift(depth),
+                               GFP_KERNEL, sdev->request_queue->node,
+                               false, true)) {
+               put_device(&starget->dev);
+               kfree(sdev);
+               goto out;
+       }
+       scsi_change_queue_depth(sdev, depth);
  
        scsi_sysfs_device_initialize(sdev);
  
@@@ -979,6 -997,7 +997,7 @@@ static int scsi_add_lun(struct scsi_dev
                scsi_attach_vpd(sdev);
  
        sdev->max_queue_depth = sdev->queue_depth;
+       WARN_ON_ONCE(sdev->max_queue_depth > sdev->budget_map.depth);
        sdev->sdev_bflags = *bflags;
  
        /*
@@@ -1078,7 -1097,8 +1097,7 @@@ static int scsi_probe_and_add_lun(struc
        if (!sdev)
                goto out;
  
 -      result = kmalloc(result_len, GFP_KERNEL |
 -                      ((shost->unchecked_isa_dma) ? __GFP_DMA : 0));
 +      result = kmalloc(result_len, GFP_KERNEL);
        if (!result)
                goto out_free_sdev;
  
@@@ -1335,7 -1355,8 +1354,7 @@@ static int scsi_report_lun_scan(struct 
         */
        length = (511 + 1) * sizeof(struct scsi_lun);
  retry:
 -      lun_data = kmalloc(length, GFP_KERNEL |
 -                         (sdev->host->unchecked_isa_dma ? __GFP_DMA : 0));
 +      lun_data = kmalloc(length, GFP_KERNEL);
        if (!lun_data) {
                printk(ALLOC_FAILURE_MSG, __func__);
                goto out;
@@@ -373,6 -373,7 +373,6 @@@ shost_rd_attr(cmd_per_lun, "%hd\n")
  shost_rd_attr(can_queue, "%d\n");
  shost_rd_attr(sg_tablesize, "%hu\n");
  shost_rd_attr(sg_prot_tablesize, "%hu\n");
 -shost_rd_attr(unchecked_isa_dma, "%d\n");
  shost_rd_attr(prot_capabilities, "%u\n");
  shost_rd_attr(prot_guard_type, "%hd\n");
  shost_rd_attr2(proc_name, hostt->proc_name, "%s\n");
@@@ -410,6 -411,7 +410,6 @@@ static struct attribute *scsi_sysfs_sho
        &dev_attr_can_queue.attr,
        &dev_attr_sg_tablesize.attr,
        &dev_attr_sg_prot_tablesize.attr,
 -      &dev_attr_unchecked_isa_dma.attr,
        &dev_attr_proc_name.attr,
        &dev_attr_scan.attr,
        &dev_attr_hstate.attr,
@@@ -475,6 -477,8 +475,8 @@@ static void scsi_device_dev_release_use
        /* NULL queue means the device can't be used */
        sdev->request_queue = NULL;
  
+       sbitmap_free(&sdev->budget_map);
        mutex_lock(&sdev->inquiry_mutex);
        vpd_pg0 = rcu_replace_pointer(sdev->vpd_pg0, vpd_pg0,
                                       lockdep_is_held(&sdev->inquiry_mutex));
@@@ -668,7 -672,7 +670,7 @@@ sdev_show_device_busy(struct device *de
                char *buf)
  {
        struct scsi_device *sdev = to_scsi_device(dev);
-       return snprintf(buf, 20, "%d\n", atomic_read(&sdev->device_busy));
+       return snprintf(buf, 20, "%d\n", scsi_device_busy(sdev));
  }
  static DEVICE_ATTR(device_busy, S_IRUGO, sdev_show_device_busy, NULL);
  
@@@ -1456,7 -1460,7 +1458,7 @@@ void __scsi_remove_device(struct scsi_d
  
        /*
         * Paired with the kref_get() in scsi_sysfs_initialize().  We have
-        * remoed sysfs visibility from the device, so make the target
+        * removed sysfs visibility from the device, so make the target
         * invisible if this was the last device underneath it.
         */
        scsi_target_reap(scsi_target(sdev));
diff --combined drivers/scsi/sg.c
@@@ -974,7 -974,7 +974,7 @@@ sg_ioctl_common(struct file *filp, Sg_d
                 */
                return 0;
        case SG_GET_LOW_DMA:
 -              return put_user((int) sdp->device->host->unchecked_isa_dma, ip);
 +              return put_user(0, ip);
        case SG_GET_SCSI_ID:
                {
                        sg_scsi_id_t v;
@@@ -1777,6 -1777,7 +1777,6 @@@ sg_start_req(Sg_request *srp, unsigned 
  
        if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO &&
            dxfer_dir != SG_DXFER_UNKNOWN && !iov_count &&
 -          !sfp->parentdp->device->host->unchecked_isa_dma &&
            blk_rq_aligned(q, (unsigned long)hp->dxferp, dxfer_len))
                md = NULL;
        else
@@@ -1892,6 -1893,7 +1892,6 @@@ sg_build_indirect(Sg_scatter_hold * sch
        int sg_tablesize = sfp->parentdp->sg_tablesize;
        int blk_size = buff_size, order;
        gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN | __GFP_ZERO;
 -      struct sg_device *sdp = sfp->parentdp;
  
        if (blk_size < 0)
                return -EFAULT;
                        scatter_elem_sz_prev = num;
        }
  
 -      if (sdp->device->host->unchecked_isa_dma)
 -              gfp_mask |= GFP_DMA;
 -
        order = get_order(num);
  retry:
        ret_sz = 1 << (PAGE_SHIFT + order);
@@@ -2498,7 -2503,7 +2498,7 @@@ static int sg_proc_seq_show_dev(struct 
                              scsidp->id, scsidp->lun, (int) scsidp->type,
                              1,
                              (int) scsidp->queue_depth,
-                             (int) atomic_read(&scsidp->device_busy),
+                             (int) scsi_device_busy(scsidp),
                              (int) scsi_device_online(scsidp));
        }
        read_unlock_irqrestore(&sg_index_lock, iflags);
@@@ -2542,7 -2547,8 +2542,7 @@@ static void sg_proc_debug_helper(struc
                           "(res)sgat=%d low_dma=%d\n", k,
                           jiffies_to_msecs(fp->timeout),
                           fp->reserve.bufflen,
 -                         (int) fp->reserve.k_use_sg,
 -                         (int) sdp->device->host->unchecked_isa_dma);
 +                         (int) fp->reserve.k_use_sg, 0);
                seq_printf(s, "   cmd_q=%d f_packid=%d k_orphan=%d closed=0\n",
                           (int) fp->cmd_q, (int) fp->force_packid,
                           (int) fp->keep_orphan);
  #define BUILD_TIMESTAMP
  #endif
  
- #define DRIVER_VERSION                "1.2.16-012"
- #define DRIVER_MAJOR          1
- #define DRIVER_MINOR          2
- #define DRIVER_RELEASE                16
- #define DRIVER_REVISION               12
+ #define DRIVER_VERSION                "2.1.8-045"
+ #define DRIVER_MAJOR          2
+ #define DRIVER_MINOR          1
+ #define DRIVER_RELEASE                8
+ #define DRIVER_REVISION               45
  
  #define DRIVER_NAME           "Microsemi PQI Driver (v" \
                                DRIVER_VERSION BUILD_TIMESTAMP ")"
  
  #define PQI_EXTRA_SGL_MEMORY  (12 * sizeof(struct pqi_sg_descriptor))
  
+ #define PQI_POST_RESET_DELAY_SECS                     5
+ #define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS    10
  MODULE_AUTHOR("Microsemi");
  MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
        DRIVER_VERSION);
 -MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
  MODULE_VERSION(DRIVER_VERSION);
  MODULE_LICENSE("GPL");
  
  static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
  static void pqi_ctrl_offline_worker(struct work_struct *work);
- static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info);
  static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
  static void pqi_scan_start(struct Scsi_Host *shost);
  static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
        struct pqi_io_request *io_request);
  static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
        struct pqi_iu_header *request, unsigned int flags,
-       struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
+       struct pqi_raid_error_info *error_info);
  static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
        struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
        unsigned int cdb_length, struct pqi_queue_group *queue_group,
        struct pqi_encryption_info *encryption_info, bool raid_bypass);
+ static  int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
+       struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
+       struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
+       struct pqi_scsi_dev_raid_map_data *rmd);
+ static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
+       struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
+       struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
+       struct pqi_scsi_dev_raid_map_data *rmd);
  static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
  static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
- static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info);
- static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
-       u32 bytes_requested);
+ static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs);
+ static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info);
  static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
  static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
  static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
-       struct pqi_scsi_dev *device, unsigned long timeout_secs);
+       struct pqi_scsi_dev *device, unsigned long timeout_msecs);
  
  /* for flags argument to pqi_submit_raid_request_synchronous() */
  #define PQI_SYNC_FLAGS_INTERRUPTABLE  0x1
@@@ -147,14 -157,12 +156,12 @@@ MODULE_PARM_DESC(lockup_action, "Actio
  static int pqi_expose_ld_first;
  module_param_named(expose_ld_first,
        pqi_expose_ld_first, int, 0644);
- MODULE_PARM_DESC(expose_ld_first,
-       "Expose logical drives before physical drives.");
+ MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives.");
  
  static int pqi_hide_vsep;
  module_param_named(hide_vsep,
        pqi_hide_vsep, int, 0644);
- MODULE_PARM_DESC(hide_vsep,
-       "Hide the virtual SEP for direct attached drives.");
+ MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives.");
  
  static char *raid_levels[] = {
        "RAID-0",
        "RAID-1(1+0)",
        "RAID-5",
        "RAID-5+1",
-       "RAID-ADG",
-       "RAID-1(ADM)",
+       "RAID-6",
+       "RAID-1(Triple)",
  };
  
  static char *pqi_raid_level_to_string(u8 raid_level)
  #define SA_RAID_5             3       /* also used for RAID 50 */
  #define SA_RAID_51            4
  #define SA_RAID_6             5       /* also used for RAID 60 */
- #define SA_RAID_ADM           6       /* also used for RAID 1+0 ADM */
- #define SA_RAID_MAX           SA_RAID_ADM
+ #define SA_RAID_TRIPLE                6       /* also used for RAID 1+0 Triple */
+ #define SA_RAID_MAX           SA_RAID_TRIPLE
  #define SA_RAID_UNKNOWN               0xff
  
  static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
@@@ -227,8 -235,7 +234,7 @@@ static inline bool pqi_is_hba_lunid(u8 
        return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
  }
  
- static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
-       struct pqi_ctrl_info *ctrl_info)
+ static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info)
  {
        return sis_read_driver_scratch(ctrl_info);
  }
@@@ -239,14 -246,66 +245,66 @@@ static inline void pqi_save_ctrl_mode(s
        sis_write_driver_scratch(ctrl_info, mode);
  }
  
+ static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info)
+ {
+       ctrl_info->scan_blocked = true;
+       mutex_lock(&ctrl_info->scan_mutex);
+ }
+ static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info)
+ {
+       ctrl_info->scan_blocked = false;
+       mutex_unlock(&ctrl_info->scan_mutex);
+ }
+ static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info)
+ {
+       return ctrl_info->scan_blocked;
+ }
  static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
  {
-       ctrl_info->block_device_reset = true;
+       mutex_lock(&ctrl_info->lun_reset_mutex);
+ }
+ static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info)
+ {
+       mutex_unlock(&ctrl_info->lun_reset_mutex);
+ }
+ static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info)
+ {
+       struct Scsi_Host *shost;
+       unsigned int num_loops;
+       int msecs_sleep;
+       shost = ctrl_info->scsi_host;
+       scsi_block_requests(shost);
+       num_loops = 0;
+       msecs_sleep = 20;
+       while (scsi_host_busy(shost)) {
+               num_loops++;
+               if (num_loops == 10)
+                       msecs_sleep = 500;
+               msleep(msecs_sleep);
+       }
+ }
+ static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info)
+ {
+       scsi_unblock_requests(ctrl_info->scsi_host);
+ }
+ static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
+ {
+       atomic_inc(&ctrl_info->num_busy_threads);
  }
  
- static inline bool pqi_device_reset_blocked(struct pqi_ctrl_info *ctrl_info)
+ static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
  {
-       return ctrl_info->block_device_reset;
+       atomic_dec(&ctrl_info->num_busy_threads);
  }
  
  static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
  static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
  {
        ctrl_info->block_requests = true;
-       scsi_block_requests(ctrl_info->scsi_host);
  }
  
  static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
  {
        ctrl_info->block_requests = false;
        wake_up_all(&ctrl_info->block_requests_wait);
-       pqi_retry_raid_bypass_requests(ctrl_info);
-       scsi_unblock_requests(ctrl_info->scsi_host);
  }
  
- static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
-       unsigned long timeout_msecs)
+ static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
  {
-       unsigned long remaining_msecs;
        if (!pqi_ctrl_blocked(ctrl_info))
-               return timeout_msecs;
+               return;
  
        atomic_inc(&ctrl_info->num_blocked_threads);
-       if (timeout_msecs == NO_TIMEOUT) {
-               wait_event(ctrl_info->block_requests_wait,
-                       !pqi_ctrl_blocked(ctrl_info));
-               remaining_msecs = timeout_msecs;
-       } else {
-               unsigned long remaining_jiffies;
-               remaining_jiffies =
-                       wait_event_timeout(ctrl_info->block_requests_wait,
-                               !pqi_ctrl_blocked(ctrl_info),
-                               msecs_to_jiffies(timeout_msecs));
-               remaining_msecs = jiffies_to_msecs(remaining_jiffies);
-       }
+       wait_event(ctrl_info->block_requests_wait,
+               !pqi_ctrl_blocked(ctrl_info));
        atomic_dec(&ctrl_info->num_blocked_threads);
-       return remaining_msecs;
  }
  
+ #define PQI_QUIESCE_WARNING_TIMEOUT_SECS              10
  static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
  {
+       unsigned long start_jiffies;
+       unsigned long warning_timeout;
+       bool displayed_warning;
+       displayed_warning = false;
+       start_jiffies = jiffies;
+       warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * PQI_HZ) + start_jiffies;
        while (atomic_read(&ctrl_info->num_busy_threads) >
-               atomic_read(&ctrl_info->num_blocked_threads))
+               atomic_read(&ctrl_info->num_blocked_threads)) {
+               if (time_after(jiffies, warning_timeout)) {
+                       dev_warn(&ctrl_info->pci_dev->dev,
+                               "waiting %u seconds for driver activity to quiesce\n",
+                               jiffies_to_msecs(jiffies - start_jiffies) / 1000);
+                       displayed_warning = true;
+                       warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * PQI_HZ) + jiffies;
+               }
                usleep_range(1000, 2000);
+       }
+       if (displayed_warning)
+               dev_warn(&ctrl_info->pci_dev->dev,
+                       "driver activity quiesced after waiting for %u seconds\n",
+                       jiffies_to_msecs(jiffies - start_jiffies) / 1000);
  }
  
  static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
        return device->device_offline;
  }
  
- static inline void pqi_device_reset_start(struct pqi_scsi_dev *device)
- {
-       device->in_reset = true;
- }
- static inline void pqi_device_reset_done(struct pqi_scsi_dev *device)
- {
-       device->in_reset = false;
- }
- static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device)
+ static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
  {
-       return device->in_reset;
+       mutex_lock(&ctrl_info->ofa_mutex);
  }
  
- static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
+ static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
  {
-       ctrl_info->in_ofa = true;
+       mutex_unlock(&ctrl_info->ofa_mutex);
  }
  
- static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
+ static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
  {
-       ctrl_info->in_ofa = false;
+       mutex_lock(&ctrl_info->ofa_mutex);
+       mutex_unlock(&ctrl_info->ofa_mutex);
  }
  
- static inline bool pqi_ctrl_in_ofa(struct pqi_ctrl_info *ctrl_info)
+ static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info)
  {
-       return ctrl_info->in_ofa;
+       return mutex_is_locked(&ctrl_info->ofa_mutex);
  }
  
  static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
@@@ -349,23 -401,27 +400,27 @@@ static inline bool pqi_device_in_remove
        return device->in_remove;
  }
  
- static inline void pqi_ctrl_shutdown_start(struct pqi_ctrl_info *ctrl_info)
+ static inline int pqi_event_type_to_event_index(unsigned int event_type)
  {
-       ctrl_info->in_shutdown = true;
+       int index;
+       for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
+               if (event_type == pqi_supported_event_types[index])
+                       return index;
+       return -1;
  }
  
- static inline bool pqi_ctrl_in_shutdown(struct pqi_ctrl_info *ctrl_info)
+ static inline bool pqi_is_supported_event(unsigned int event_type)
  {
-       return ctrl_info->in_shutdown;
+       return pqi_event_type_to_event_index(event_type) != -1;
  }
  
- static inline void pqi_schedule_rescan_worker_with_delay(
-       struct pqi_ctrl_info *ctrl_info, unsigned long delay)
+ static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info,
+       unsigned long delay)
  {
        if (pqi_ctrl_offline(ctrl_info))
                return;
-       if (pqi_ctrl_in_ofa(ctrl_info))
-               return;
  
        schedule_delayed_work(&ctrl_info->rescan_work, delay);
  }
@@@ -377,8 -433,7 +432,7 @@@ static inline void pqi_schedule_rescan_
  
  #define PQI_RESCAN_WORK_DELAY (10 * PQI_HZ)
  
- static inline void pqi_schedule_rescan_worker_delayed(
-       struct pqi_ctrl_info *ctrl_info)
+ static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info)
  {
        pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
  }
@@@ -388,11 -443,6 +442,6 @@@ static inline void pqi_cancel_rescan_wo
        cancel_delayed_work_sync(&ctrl_info->rescan_work);
  }
  
- static inline void pqi_cancel_event_worker(struct pqi_ctrl_info *ctrl_info)
- {
-       cancel_work_sync(&ctrl_info->event_work);
- }
  static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
  {
        if (!ctrl_info->heartbeat_counter)
  
  static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
  {
-       if (!ctrl_info->soft_reset_status)
-               return 0;
        return readb(ctrl_info->soft_reset_status);
  }
  
- static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info,
-       u8 clear)
+ static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
  {
        u8 status;
  
-       if (!ctrl_info->soft_reset_status)
-               return;
        status = pqi_read_soft_reset_status(ctrl_info);
-       status &= ~clear;
+       status &= ~PQI_SOFT_RESET_ABORT;
        writeb(status, ctrl_info->soft_reset_status);
  }
  
@@@ -497,7 -540,7 +539,7 @@@ static int pqi_build_raid_path_request(
                if (cmd == CISS_REPORT_PHYS)
                        cdb[1] = CISS_REPORT_PHYS_FLAG_OTHER;
                else
-                       cdb[1] = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
+                       cdb[1] = ctrl_info->ciss_report_log_flags;
                put_unaligned_be32(cdb_length, &cdb[6]);
                break;
        case CISS_GET_RAID_MAP:
                put_unaligned_be32(cdb_length, &cdb[6]);
                break;
        case SA_FLUSH_CACHE:
+               request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST;
                request->data_direction = SOP_WRITE_FLAG;
                cdb[0] = BMIC_WRITE;
                cdb[6] = BMIC_FLUSH_CACHE;
        case BMIC_IDENTIFY_CONTROLLER:
        case BMIC_IDENTIFY_PHYSICAL_DEVICE:
        case BMIC_SENSE_SUBSYSTEM_INFORMATION:
+       case BMIC_SENSE_FEATURE:
                request->data_direction = SOP_READ_FLAG;
                cdb[0] = BMIC_READ;
                cdb[6] = cmd;
@@@ -600,20 -645,18 +644,18 @@@ static void pqi_free_io_request(struct 
  
  static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
        u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
-       struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
+       struct pqi_raid_error_info *error_info)
  {
        int rc;
        struct pqi_raid_path_request request;
        enum dma_data_direction dir;
  
-       rc = pqi_build_raid_path_request(ctrl_info, &request,
-               cmd, scsi3addr, buffer,
-               buffer_length, vpd_page, &dir);
+       rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr,
+               buffer, buffer_length, vpd_page, &dir);
        if (rc)
                return rc;
  
-       rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
-               error_info, timeout_msecs);
+       rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info);
  
        pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
  
@@@ -626,7 -669,7 +668,7 @@@ static inline int pqi_send_ctrl_raid_re
        u8 cmd, void *buffer, size_t buffer_length)
  {
        return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
-               buffer, buffer_length, 0, NULL, NO_TIMEOUT);
+               buffer, buffer_length, 0, NULL);
  }
  
  static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
        struct pqi_raid_error_info *error_info)
  {
        return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
-               buffer, buffer_length, 0, error_info, NO_TIMEOUT);
+               buffer, buffer_length, 0, error_info);
  }
  
  static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
@@@ -656,7 -699,7 +698,7 @@@ static inline int pqi_scsi_inquiry(stru
        u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
  {
        return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
-               buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT);
+               buffer, buffer_length, vpd_page, NULL);
  }
  
  static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
        request.cdb[2] = (u8)bmic_device_index;
        request.cdb[9] = (u8)(bmic_device_index >> 8);
  
-       rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
-               0, NULL, NO_TIMEOUT);
+       rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
+       pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
+       return rc;
+ }
+ static inline u32 pqi_aio_limit_to_bytes(__le16 *limit)
+ {
+       u32 bytes;
+       bytes = get_unaligned_le16(limit);
+       if (bytes == 0)
+               bytes = ~0;
+       else
+               bytes *= 1024;
+       return bytes;
+ }
+ #pragma pack(1)
+ struct bmic_sense_feature_buffer {
+       struct bmic_sense_feature_buffer_header header;
+       struct bmic_sense_feature_io_page_aio_subpage aio_subpage;
+ };
+ #pragma pack()
+ #define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH     \
+       offsetofend(struct bmic_sense_feature_buffer, \
+               aio_subpage.max_write_raid_1_10_3drive)
+ #define MINIMUM_AIO_SUBPAGE_LENGTH    \
+       (offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \
+               max_write_raid_1_10_3drive) - \
+               sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header))
+ static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info)
+ {
+       int rc;
+       enum dma_data_direction dir;
+       struct pqi_raid_path_request request;
+       struct bmic_sense_feature_buffer *buffer;
+       buffer = kmalloc(sizeof(*buffer), GFP_KERNEL);
+       if (!buffer)
+               return -ENOMEM;
+       rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID,
+               buffer, sizeof(*buffer), 0, &dir);
+       if (rc)
+               goto error;
+       request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE;
+       request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE;
+       rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
  
        pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
  
+       if (rc)
+               goto error;
+       if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE ||
+               buffer->header.subpage_code !=
+                       BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
+               get_unaligned_le16(&buffer->header.buffer_length) <
+                       MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH ||
+               buffer->aio_subpage.header.page_code !=
+                       BMIC_SENSE_FEATURE_IO_PAGE ||
+               buffer->aio_subpage.header.subpage_code !=
+                       BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
+               get_unaligned_le16(&buffer->aio_subpage.header.page_length) <
+                       MINIMUM_AIO_SUBPAGE_LENGTH) {
+               goto error;
+       }
+       ctrl_info->max_transfer_encrypted_sas_sata =
+               pqi_aio_limit_to_bytes(
+                       &buffer->aio_subpage.max_transfer_encrypted_sas_sata);
+       ctrl_info->max_transfer_encrypted_nvme =
+               pqi_aio_limit_to_bytes(
+                       &buffer->aio_subpage.max_transfer_encrypted_nvme);
+       ctrl_info->max_write_raid_5_6 =
+               pqi_aio_limit_to_bytes(
+                       &buffer->aio_subpage.max_write_raid_5_6);
+       ctrl_info->max_write_raid_1_10_2drive =
+               pqi_aio_limit_to_bytes(
+                       &buffer->aio_subpage.max_write_raid_1_10_2drive);
+       ctrl_info->max_write_raid_1_10_3drive =
+               pqi_aio_limit_to_bytes(
+                       &buffer->aio_subpage.max_write_raid_1_10_3drive);
+ error:
+       kfree(buffer);
        return rc;
  }
  
@@@ -692,13 -831,6 +830,6 @@@ static int pqi_flush_cache(struct pqi_c
        int rc;
        struct bmic_flush_cache *flush_cache;
  
-       /*
-        * Don't bother trying to flush the cache if the controller is
-        * locked up.
-        */
-       if (pqi_ctrl_offline(ctrl_info))
-               return -ENXIO;
        flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
        if (!flush_cache)
                return -ENOMEM;
@@@ -877,9 -1009,6 +1008,6 @@@ static void pqi_update_time_worker(stru
        ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
                update_time_work);
  
-       if (pqi_ctrl_offline(ctrl_info))
-               return;
        rc = pqi_write_current_time_to_host_wellness(ctrl_info);
        if (rc)
                dev_warn(&ctrl_info->pci_dev->dev,
                PQI_UPDATE_TIME_WORK_INTERVAL);
  }
  
- static inline void pqi_schedule_update_time_worker(
-       struct pqi_ctrl_info *ctrl_info)
+ static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info)
  {
        schedule_delayed_work(&ctrl_info->update_time_work, 0);
  }
  
- static inline void pqi_cancel_update_time_worker(
-       struct pqi_ctrl_info *ctrl_info)
+ static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info)
  {
        cancel_delayed_work_sync(&ctrl_info->update_time_work);
  }
  
- static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
-       void *buffer, size_t buffer_length)
+ static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer,
+       size_t buffer_length)
  {
-       return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer,
-               buffer_length);
+       return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length);
  }
  
- static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
-       void **buffer)
+ static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer)
  {
        int rc;
        size_t lun_list_length;
                goto out;
        }
  
-       rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
-               sizeof(*report_lun_header));
+       rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header));
        if (rc)
                goto out;
  
@@@ -949,8 -1073,8 +1072,8 @@@ again
        if (rc)
                goto out;
  
-       new_lun_list_length = get_unaligned_be32(
-               &((struct report_lun_header *)lun_data)->list_length);
+       new_lun_list_length =
+               get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length);
  
        if (new_lun_list_length > lun_list_length) {
                lun_list_length = new_lun_list_length;
        return rc;
  }
  
- static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
-       void **buffer)
+ static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
  {
-       return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
-               buffer);
+       return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, buffer);
  }
  
- static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
-       void **buffer)
+ static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
  {
        return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
  }
@@@ -1136,9 -1257,9 +1256,9 @@@ static int pqi_validate_raid_map(struc
                        err_msg = "invalid RAID-1 map";
                        goto bad_raid_map;
                }
-       } else if (device->raid_level == SA_RAID_ADM) {
+       } else if (device->raid_level == SA_RAID_TRIPLE) {
                if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
-                       err_msg = "invalid RAID-1(ADM) map";
+                       err_msg = "invalid RAID-1(Triple) map";
                        goto bad_raid_map;
                }
        } else if ((device->raid_level == SA_RAID_5 ||
@@@ -1177,9 -1298,7 +1297,7 @@@ static int pqi_get_raid_map(struct pqi_
                return -ENOMEM;
  
        rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
-               device->scsi3addr, raid_map, sizeof(*raid_map),
-               0, NULL, NO_TIMEOUT);
+               device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL);
        if (rc)
                goto error;
  
                        return -ENOMEM;
  
                rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
-                       device->scsi3addr, raid_map, raid_map_size,
-                       0, NULL, NO_TIMEOUT);
+                       device->scsi3addr, raid_map, raid_map_size, 0, NULL);
                if (rc)
                        goto error;
  
                if (get_unaligned_le32(&raid_map->structure_size)
                        != raid_map_size) {
                        dev_warn(&ctrl_info->pci_dev->dev,
-                               "Requested %d bytes, received %d bytes",
+                               "requested %u bytes, received %u bytes\n",
                                raid_map_size,
                                get_unaligned_le32(&raid_map->structure_size));
                        goto error;
@@@ -1223,6 -1341,39 +1340,39 @@@ error
        return rc;
  }
  
+ static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_scsi_dev *device)
+ {
+       if (!ctrl_info->lv_drive_type_mix_valid) {
+               device->max_transfer_encrypted = ~0;
+               return;
+       }
+       switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) {
+       case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY:
+       case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY:
+       case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY:
+       case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY:
+       case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY:
+       case LV_DRIVE_TYPE_MIX_SAS_ONLY:
+       case LV_DRIVE_TYPE_MIX_SATA_ONLY:
+               device->max_transfer_encrypted =
+                       ctrl_info->max_transfer_encrypted_sas_sata;
+               break;
+       case LV_DRIVE_TYPE_MIX_NVME_ONLY:
+               device->max_transfer_encrypted =
+                       ctrl_info->max_transfer_encrypted_nvme;
+               break;
+       case LV_DRIVE_TYPE_MIX_UNKNOWN:
+       case LV_DRIVE_TYPE_MIX_NO_RESTRICTION:
+       default:
+               device->max_transfer_encrypted =
+                       min(ctrl_info->max_transfer_encrypted_sas_sata,
+                               ctrl_info->max_transfer_encrypted_nvme);
+               break;
+       }
+ }
  static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
        struct pqi_scsi_dev *device)
  {
                (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
        if (device->raid_bypass_configured &&
                (bypass_status & RAID_BYPASS_ENABLED) &&
-               pqi_get_raid_map(ctrl_info, device) == 0)
+               pqi_get_raid_map(ctrl_info, device) == 0) {
                device->raid_bypass_enabled = true;
+               if (get_unaligned_le16(&device->raid_map->flags) &
+                       RAID_MAP_ENCRYPTION_ENABLED)
+                       pqi_set_max_transfer_encrypted(ctrl_info, device);
+       }
  
  out:
        kfree(buffer);
@@@ -1297,6 -1452,8 +1451,8 @@@ no_buffer
        device->volume_offline = volume_offline;
  }
  
+ #define PQI_DEVICE_PHY_MAP_SUPPORTED  0x10
  static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
        struct pqi_scsi_dev *device,
        struct bmic_identify_physical_device *id_phys)
                sizeof(device->phys_connector));
        device->bay = id_phys->phys_bay_in_box;
  
+       memcpy(&device->page_83_identifier, &id_phys->page_83_identifier,
+               sizeof(device->page_83_identifier));
+       if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) &&
+               id_phys->phy_count)
+               device->phy_id =
+                       id_phys->phy_to_phy_map[device->active_path_index];
+       else
+               device->phy_id = 0xFF;
        return 0;
  }
  
@@@ -1520,16 -1687,16 +1686,16 @@@ static int pqi_add_device(struct pqi_ct
        return rc;
  }
  
- #define PQI_PENDING_IO_TIMEOUT_SECS   20
+ #define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS    (20 * 1000)
  
- static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
-       struct pqi_scsi_dev *device)
+ static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
  {
        int rc;
  
        pqi_device_remove_start(device);
  
-       rc = pqi_device_wait_for_pending_io(ctrl_info, device, PQI_PENDING_IO_TIMEOUT_SECS);
+       rc = pqi_device_wait_for_pending_io(ctrl_info, device,
+               PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
        if (rc)
                dev_err(&ctrl_info->pci_dev->dev,
                        "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
@@@ -1557,8 -1724,7 +1723,7 @@@ static struct pqi_scsi_dev *pqi_find_sc
        return NULL;
  }
  
- static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
-       struct pqi_scsi_dev *dev2)
+ static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2)
  {
        if (dev1->is_physical_device != dev2->is_physical_device)
                return false;
        if (dev1->is_physical_device)
                return dev1->wwid == dev2->wwid;
  
-       return memcmp(dev1->volume_id, dev2->volume_id,
-               sizeof(dev1->volume_id)) == 0;
+       return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0;
  }
  
  enum pqi_find_result {
@@@ -1612,7 -1777,7 +1776,7 @@@ static void pqi_dev_info(struct pqi_ctr
        ssize_t count;
        char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
  
-       count = snprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
+       count = scnprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
                "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
  
        if (device->target_lun_valid)
  static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
        struct pqi_scsi_dev *new_device)
  {
-       existing_device->devtype = new_device->devtype;
        existing_device->device_type = new_device->device_type;
        existing_device->bus = new_device->bus;
        if (new_device->target_lun_valid) {
        existing_device->aio_handle = new_device->aio_handle;
        existing_device->volume_status = new_device->volume_status;
        existing_device->active_path_index = new_device->active_path_index;
+       existing_device->phy_id = new_device->phy_id;
        existing_device->path_map = new_device->path_map;
        existing_device->bay = new_device->bay;
        existing_device->box_index = new_device->box_index;
        existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
-       existing_device->phy_connected_dev_type =
-               new_device->phy_connected_dev_type;
+       existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
        memcpy(existing_device->box, new_device->box,
                sizeof(existing_device->box));
        memcpy(existing_device->phys_connector, new_device->phys_connector,
                sizeof(existing_device->phys_connector));
-       existing_device->offload_to_mirror = 0;
+       existing_device->next_bypass_group = 0;
        kfree(existing_device->raid_map);
        existing_device->raid_map = new_device->raid_map;
        existing_device->raid_bypass_configured =
@@@ -1843,8 -2007,18 +2006,18 @@@ static void pqi_update_device_list(stru
  
        spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
  
-       if (pqi_ctrl_in_ofa(ctrl_info))
-               pqi_ctrl_ofa_done(ctrl_info);
+       /*
+        * If OFA is in progress and there are devices that need to be deleted,
+        * allow any pending reset operations to continue and unblock any SCSI
+        * requests before removal.
+        */
+       if (pqi_ofa_in_progress(ctrl_info)) {
+               list_for_each_entry_safe(device, next, &delete_list, delete_list_entry)
+                       if (pqi_is_device_added(device))
+                               pqi_device_remove_start(device);
+               pqi_ctrl_unblock_device_reset(ctrl_info);
+               pqi_scsi_unblock_requests(ctrl_info);
+       }
  
        /* Remove all devices that have gone away. */
        list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) {
         * Notify the SCSI ML if the queue depth of any existing device has
         * changed.
         */
-       list_for_each_entry(device, &ctrl_info->scsi_device_list,
-               scsi_device_list_entry) {
-               if (device->sdev) {
-                       if (device->queue_depth !=
-                               device->advertised_queue_depth) {
-                               device->advertised_queue_depth = device->queue_depth;
-                               scsi_change_queue_depth(device->sdev,
-                                       device->advertised_queue_depth);
-                       }
+       list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
+               if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
+                       device->advertised_queue_depth = device->queue_depth;
+                       scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
                        if (device->rescan) {
                                scsi_rescan_device(&device->sdev->sdev_gendev);
                                device->rescan = false;
@@@ -1910,7 -2079,7 +2078,7 @@@ static inline bool pqi_is_supported_dev
         */
        if (device->device_type == SA_DEVICE_TYPE_CONTROLLER &&
                !pqi_is_hba_lunid(device->scsi3addr))
-               return false;
+                       return false;
  
        return true;
  }
@@@ -1943,8 -2112,17 +2111,17 @@@ static inline bool pqi_is_device_with_s
  
  static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
  {
-       return !device->is_physical_device ||
-               !pqi_skip_device(device->scsi3addr);
+       return !device->is_physical_device || !pqi_skip_device(device->scsi3addr);
+ }
+ static inline void pqi_set_physical_device_wwid(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_scsi_dev *device, struct report_phys_lun_extended_entry *phys_lun_ext_entry)
+ {
+       if (ctrl_info->unique_wwid_in_report_phys_lun_supported ||
+               pqi_is_device_with_sas_address(device))
+               device->wwid = phys_lun_ext_entry->wwid;
+       else
+               device->wwid = cpu_to_be64(get_unaligned_be64(&device->page_83_identifier));
  }
  
  static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
                        for (i = num_physicals - 1; i >= 0; i--) {
                                phys_lun_ext_entry =
                                                &physdev_list->lun_entries[i];
-                               if (CISS_GET_DRIVE_NUMBER(
-                                       phys_lun_ext_entry->lunid) ==
-                                               PQI_VSEP_CISS_BTL) {
-                                       pqi_mask_device(
-                                               phys_lun_ext_entry->lunid);
+                               if (CISS_GET_DRIVE_NUMBER(phys_lun_ext_entry->lunid) == PQI_VSEP_CISS_BTL) {
+                                       pqi_mask_device(phys_lun_ext_entry->lunid);
                                        break;
                                }
                        }
                }
        }
  
+       if (num_logicals &&
+               (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX))
+               ctrl_info->lv_drive_type_mix_valid = true;
        num_new_devices = num_physicals + num_logicals;
  
        new_device_list = kmalloc_array(num_new_devices,
                        if (device->is_physical_device)
                                dev_warn(&ctrl_info->pci_dev->dev,
                                        "obtaining device info failed, skipping physical device %016llx\n",
-                                       get_unaligned_be64(
-                                               &phys_lun_ext_entry->wwid));
+                                       get_unaligned_be64(&phys_lun_ext_entry->wwid));
                        else
                                dev_warn(&ctrl_info->pci_dev->dev,
                                        "obtaining device info failed, skipping logical device %08x%08x\n",
                pqi_assign_bus_target_lun(device);
  
                if (device->is_physical_device) {
-                       device->wwid = phys_lun_ext_entry->wwid;
+                       pqi_set_physical_device_wwid(ctrl_info, device, phys_lun_ext_entry);
                        if ((phys_lun_ext_entry->device_flags &
                                CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
                                phys_lun_ext_entry->aio_handle) {
-                               device->aio_enabled = true;
-                               device->aio_handle =
-                                       phys_lun_ext_entry->aio_handle;
+                                       device->aio_enabled = true;
+                                       device->aio_handle =
+                                               phys_lun_ext_entry->aio_handle;
                        }
                } else {
                        memcpy(device->volume_id, log_lun_ext_entry->volume_id,
  
  static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
  {
-       int rc = 0;
+       int rc;
+       int mutex_acquired;
  
        if (pqi_ctrl_offline(ctrl_info))
                return -ENXIO;
  
-       if (!mutex_trylock(&ctrl_info->scan_mutex)) {
+       mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
+       if (!mutex_acquired) {
+               if (pqi_ctrl_scan_blocked(ctrl_info))
+                       return -EBUSY;
                pqi_schedule_rescan_worker_delayed(ctrl_info);
-               rc = -EINPROGRESS;
-       } else {
-               rc = pqi_update_scsi_devices(ctrl_info);
-               if (rc)
-                       pqi_schedule_rescan_worker_delayed(ctrl_info);
-               mutex_unlock(&ctrl_info->scan_mutex);
+               return -EINPROGRESS;
        }
  
+       rc = pqi_update_scsi_devices(ctrl_info);
+       if (rc && !pqi_ctrl_scan_blocked(ctrl_info))
+               pqi_schedule_rescan_worker_delayed(ctrl_info);
+       mutex_unlock(&ctrl_info->scan_mutex);
        return rc;
  }
  
@@@ -2175,8 -2359,6 +2358,6 @@@ static void pqi_scan_start(struct Scsi_
        struct pqi_ctrl_info *ctrl_info;
  
        ctrl_info = shost_to_hba(shost);
-       if (pqi_ctrl_in_ofa(ctrl_info))
-               return;
  
        pqi_scan_scsi_devices(ctrl_info);
  }
@@@ -2193,27 -2375,8 +2374,8 @@@ static int pqi_scan_finished(struct Scs
        return !mutex_is_locked(&ctrl_info->scan_mutex);
  }
  
- static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info)
- {
-       mutex_lock(&ctrl_info->scan_mutex);
-       mutex_unlock(&ctrl_info->scan_mutex);
- }
- static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info)
- {
-       mutex_lock(&ctrl_info->lun_reset_mutex);
-       mutex_unlock(&ctrl_info->lun_reset_mutex);
- }
- static void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
- {
-       mutex_lock(&ctrl_info->ofa_mutex);
-       mutex_unlock(&ctrl_info->ofa_mutex);
- }
- static inline void pqi_set_encryption_info(
-       struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
-       u64 first_block)
+ static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info,
+       struct raid_map *raid_map, u64 first_block)
  {
        u32 volume_blk_size;
  
   * Attempt to perform RAID bypass mapping for a logical volume I/O.
   */
  
+ static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_scsi_dev_raid_map_data *rmd)
+ {
+       bool is_supported = true;
+       switch (rmd->raid_level) {
+       case SA_RAID_0:
+               break;
+       case SA_RAID_1:
+               if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
+                       rmd->data_length > ctrl_info->max_write_raid_1_10_2drive))
+                       is_supported = false;
+               break;
+       case SA_RAID_TRIPLE:
+               if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
+                       rmd->data_length > ctrl_info->max_write_raid_1_10_3drive))
+                       is_supported = false;
+               break;
+       case SA_RAID_5:
+               if (rmd->is_write && (!ctrl_info->enable_r5_writes ||
+                       rmd->data_length > ctrl_info->max_write_raid_5_6))
+                       is_supported = false;
+               break;
+       case SA_RAID_6:
+               if (rmd->is_write && (!ctrl_info->enable_r6_writes ||
+                       rmd->data_length > ctrl_info->max_write_raid_5_6))
+                       is_supported = false;
+               break;
+       default:
+               is_supported = false;
+               break;
+       }
+       return is_supported;
+ }
  #define PQI_RAID_BYPASS_INELIGIBLE    1
  
- static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
-       struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
-       struct pqi_queue_group *queue_group)
+ static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd,
+       struct pqi_scsi_dev_raid_map_data *rmd)
  {
-       struct raid_map *raid_map;
-       bool is_write = false;
-       u32 map_index;
-       u64 first_block;
-       u64 last_block;
-       u32 block_cnt;
-       u32 blocks_per_row;
-       u64 first_row;
-       u64 last_row;
-       u32 first_row_offset;
-       u32 last_row_offset;
-       u32 first_column;
-       u32 last_column;
-       u64 r0_first_row;
-       u64 r0_last_row;
-       u32 r5or6_blocks_per_row;
-       u64 r5or6_first_row;
-       u64 r5or6_last_row;
-       u32 r5or6_first_row_offset;
-       u32 r5or6_last_row_offset;
-       u32 r5or6_first_column;
-       u32 r5or6_last_column;
-       u16 data_disks_per_row;
-       u32 total_disks_per_row;
-       u16 layout_map_count;
-       u32 stripesize;
-       u16 strip_size;
-       u32 first_group;
-       u32 last_group;
-       u32 current_group;
-       u32 map_row;
-       u32 aio_handle;
-       u64 disk_block;
-       u32 disk_block_cnt;
-       u8 cdb[16];
-       u8 cdb_length;
-       int offload_to_mirror;
-       struct pqi_encryption_info *encryption_info_ptr;
-       struct pqi_encryption_info encryption_info;
- #if BITS_PER_LONG == 32
-       u64 tmpdiv;
- #endif
        /* Check for valid opcode, get LBA and block count. */
        switch (scmd->cmnd[0]) {
        case WRITE_6:
-               is_write = true;
+               rmd->is_write = true;
                fallthrough;
        case READ_6:
-               first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
+               rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
                        (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
-               block_cnt = (u32)scmd->cmnd[4];
-               if (block_cnt == 0)
-                       block_cnt = 256;
+               rmd->block_cnt = (u32)scmd->cmnd[4];
+               if (rmd->block_cnt == 0)
+                       rmd->block_cnt = 256;
                break;
        case WRITE_10:
-               is_write = true;
+               rmd->is_write = true;
                fallthrough;
        case READ_10:
-               first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
-               block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
+               rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
+               rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
                break;
        case WRITE_12:
-               is_write = true;
+               rmd->is_write = true;
                fallthrough;
        case READ_12:
-               first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
-               block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
+               rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
+               rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
                break;
        case WRITE_16:
-               is_write = true;
+               rmd->is_write = true;
                fallthrough;
        case READ_16:
-               first_block = get_unaligned_be64(&scmd->cmnd[2]);
-               block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
+               rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]);
+               rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
                break;
        default:
                /* Process via normal I/O path. */
                return PQI_RAID_BYPASS_INELIGIBLE;
        }
  
-       /* Check for write to non-RAID-0. */
-       if (is_write && device->raid_level != SA_RAID_0)
-               return PQI_RAID_BYPASS_INELIGIBLE;
+       put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length);
  
-       if (unlikely(block_cnt == 0))
-               return PQI_RAID_BYPASS_INELIGIBLE;
+       return 0;
+ }
  
-       last_block = first_block + block_cnt - 1;
-       raid_map = device->raid_map;
+ static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map)
+ {
+ #if BITS_PER_LONG == 32
+       u64 tmpdiv;
+ #endif
+       rmd->last_block = rmd->first_block + rmd->block_cnt - 1;
  
        /* Check for invalid block or wraparound. */
-       if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
-               last_block < first_block)
+       if (rmd->last_block >=
+               get_unaligned_le64(&raid_map->volume_blk_cnt) ||
+               rmd->last_block < rmd->first_block)
                return PQI_RAID_BYPASS_INELIGIBLE;
  
-       data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
-       strip_size = get_unaligned_le16(&raid_map->strip_size);
-       layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
+       rmd->data_disks_per_row =
+               get_unaligned_le16(&raid_map->data_disks_per_row);
+       rmd->strip_size = get_unaligned_le16(&raid_map->strip_size);
+       rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
  
        /* Calculate stripe information for the request. */
-       blocks_per_row = data_disks_per_row * strip_size;
+       rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size;
+       if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
+               return PQI_RAID_BYPASS_INELIGIBLE;
  #if BITS_PER_LONG == 32
-       tmpdiv = first_block;
-       do_div(tmpdiv, blocks_per_row);
-       first_row = tmpdiv;
-       tmpdiv = last_block;
-       do_div(tmpdiv, blocks_per_row);
-       last_row = tmpdiv;
-       first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
-       last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
-       tmpdiv = first_row_offset;
-       do_div(tmpdiv, strip_size);
-       first_column = tmpdiv;
-       tmpdiv = last_row_offset;
-       do_div(tmpdiv, strip_size);
-       last_column = tmpdiv;
+       tmpdiv = rmd->first_block;
+       do_div(tmpdiv, rmd->blocks_per_row);
+       rmd->first_row = tmpdiv;
+       tmpdiv = rmd->last_block;
+       do_div(tmpdiv, rmd->blocks_per_row);
+       rmd->last_row = tmpdiv;
+       rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row));
+       rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row));
+       tmpdiv = rmd->first_row_offset;
+       do_div(tmpdiv, rmd->strip_size);
+       rmd->first_column = tmpdiv;
+       tmpdiv = rmd->last_row_offset;
+       do_div(tmpdiv, rmd->strip_size);
+       rmd->last_column = tmpdiv;
  #else
-       first_row = first_block / blocks_per_row;
-       last_row = last_block / blocks_per_row;
-       first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
-       last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
-       first_column = first_row_offset / strip_size;
-       last_column = last_row_offset / strip_size;
+       rmd->first_row = rmd->first_block / rmd->blocks_per_row;
+       rmd->last_row = rmd->last_block / rmd->blocks_per_row;
+       rmd->first_row_offset = (u32)(rmd->first_block -
+               (rmd->first_row * rmd->blocks_per_row));
+       rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row *
+               rmd->blocks_per_row));
+       rmd->first_column = rmd->first_row_offset / rmd->strip_size;
+       rmd->last_column = rmd->last_row_offset / rmd->strip_size;
  #endif
  
        /* If this isn't a single row/column then give to the controller. */
-       if (first_row != last_row || first_column != last_column)
+       if (rmd->first_row != rmd->last_row ||
+               rmd->first_column != rmd->last_column)
                return PQI_RAID_BYPASS_INELIGIBLE;
  
        /* Proceeding with driver mapping. */
-       total_disks_per_row = data_disks_per_row +
+       rmd->total_disks_per_row = rmd->data_disks_per_row +
                get_unaligned_le16(&raid_map->metadata_disks_per_row);
-       map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
+       rmd->map_row = ((u32)(rmd->first_row >>
+               raid_map->parity_rotation_shift)) %
                get_unaligned_le16(&raid_map->row_cnt);
-       map_index = (map_row * total_disks_per_row) + first_column;
+       rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) +
+               rmd->first_column;
  
-       /* RAID 1 */
-       if (device->raid_level == SA_RAID_1) {
-               if (device->offload_to_mirror)
-                       map_index += data_disks_per_row;
-               device->offload_to_mirror = !device->offload_to_mirror;
-       } else if (device->raid_level == SA_RAID_ADM) {
-               /* RAID ADM */
-               /*
-                * Handles N-way mirrors  (R1-ADM) and R10 with # of drives
-                * divisible by 3.
-                */
-               offload_to_mirror = device->offload_to_mirror;
-               if (offload_to_mirror == 0)  {
-                       /* use physical disk in the first mirrored group. */
-                       map_index %= data_disks_per_row;
-               } else {
-                       do {
-                               /*
-                                * Determine mirror group that map_index
-                                * indicates.
-                                */
-                               current_group = map_index / data_disks_per_row;
-                               if (offload_to_mirror != current_group) {
-                                       if (current_group <
-                                               layout_map_count - 1) {
-                                               /*
-                                                * Select raid index from
-                                                * next group.
-                                                */
-                                               map_index += data_disks_per_row;
-                                               current_group++;
-                                       } else {
-                                               /*
-                                                * Select raid index from first
-                                                * group.
-                                                */
-                                               map_index %= data_disks_per_row;
-                                               current_group = 0;
-                                       }
-                               }
-                       } while (offload_to_mirror != current_group);
-               }
+       return 0;
+ }
  
-               /* Set mirror group to use next time. */
-               offload_to_mirror =
-                       (offload_to_mirror >= layout_map_count - 1) ?
-                               0 : offload_to_mirror + 1;
-               device->offload_to_mirror = offload_to_mirror;
-               /*
-                * Avoid direct use of device->offload_to_mirror within this
-                * function since multiple threads might simultaneously
-                * increment it beyond the range of device->layout_map_count -1.
-                */
-       } else if ((device->raid_level == SA_RAID_5 ||
-               device->raid_level == SA_RAID_6) && layout_map_count > 1) {
-               /* RAID 50/60 */
-               /* Verify first and last block are in same RAID group */
-               r5or6_blocks_per_row = strip_size * data_disks_per_row;
-               stripesize = r5or6_blocks_per_row * layout_map_count;
+ static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd,
+       struct raid_map *raid_map)
+ {
  #if BITS_PER_LONG == 32
-               tmpdiv = first_block;
-               first_group = do_div(tmpdiv, stripesize);
-               tmpdiv = first_group;
-               do_div(tmpdiv, r5or6_blocks_per_row);
-               first_group = tmpdiv;
-               tmpdiv = last_block;
-               last_group = do_div(tmpdiv, stripesize);
-               tmpdiv = last_group;
-               do_div(tmpdiv, r5or6_blocks_per_row);
-               last_group = tmpdiv;
- #else
-               first_group = (first_block % stripesize) / r5or6_blocks_per_row;
-               last_group = (last_block % stripesize) / r5or6_blocks_per_row;
+       u64 tmpdiv;
  #endif
-               if (first_group != last_group)
-                       return PQI_RAID_BYPASS_INELIGIBLE;
  
-               /* Verify request is in a single row of RAID 5/6 */
+       if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
+               return PQI_RAID_BYPASS_INELIGIBLE;
+       /* RAID 50/60 */
+       /* Verify first and last block are in same RAID group. */
+       rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count;
  #if BITS_PER_LONG == 32
-               tmpdiv = first_block;
-               do_div(tmpdiv, stripesize);
-               first_row = r5or6_first_row = r0_first_row = tmpdiv;
-               tmpdiv = last_block;
-               do_div(tmpdiv, stripesize);
-               r5or6_last_row = r0_last_row = tmpdiv;
+       tmpdiv = rmd->first_block;
+       rmd->first_group = do_div(tmpdiv, rmd->stripesize);
+       tmpdiv = rmd->first_group;
+       do_div(tmpdiv, rmd->blocks_per_row);
+       rmd->first_group = tmpdiv;
+       tmpdiv = rmd->last_block;
+       rmd->last_group = do_div(tmpdiv, rmd->stripesize);
+       tmpdiv = rmd->last_group;
+       do_div(tmpdiv, rmd->blocks_per_row);
+       rmd->last_group = tmpdiv;
  #else
-               first_row = r5or6_first_row = r0_first_row =
-                       first_block / stripesize;
-               r5or6_last_row = r0_last_row = last_block / stripesize;
+       rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row;
+       rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row;
  #endif
-               if (r5or6_first_row != r5or6_last_row)
-                       return PQI_RAID_BYPASS_INELIGIBLE;
+       if (rmd->first_group != rmd->last_group)
+               return PQI_RAID_BYPASS_INELIGIBLE;
+       /* Verify request is in a single row of RAID 5/6. */
+ #if BITS_PER_LONG == 32
+       tmpdiv = rmd->first_block;
+       do_div(tmpdiv, rmd->stripesize);
+       rmd->first_row = tmpdiv;
+       rmd->r5or6_first_row = tmpdiv;
+       tmpdiv = rmd->last_block;
+       do_div(tmpdiv, rmd->stripesize);
+       rmd->r5or6_last_row = tmpdiv;
+ #else
+       rmd->first_row = rmd->r5or6_first_row =
+               rmd->first_block / rmd->stripesize;
+       rmd->r5or6_last_row = rmd->last_block / rmd->stripesize;
+ #endif
+       if (rmd->r5or6_first_row != rmd->r5or6_last_row)
+               return PQI_RAID_BYPASS_INELIGIBLE;
  
-               /* Verify request is in a single column */
+       /* Verify request is in a single column. */
  #if BITS_PER_LONG == 32
-               tmpdiv = first_block;
-               first_row_offset = do_div(tmpdiv, stripesize);
-               tmpdiv = first_row_offset;
-               first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
-               r5or6_first_row_offset = first_row_offset;
-               tmpdiv = last_block;
-               r5or6_last_row_offset = do_div(tmpdiv, stripesize);
-               tmpdiv = r5or6_last_row_offset;
-               r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
-               tmpdiv = r5or6_first_row_offset;
-               do_div(tmpdiv, strip_size);
-               first_column = r5or6_first_column = tmpdiv;
-               tmpdiv = r5or6_last_row_offset;
-               do_div(tmpdiv, strip_size);
-               r5or6_last_column = tmpdiv;
+       tmpdiv = rmd->first_block;
+       rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize);
+       tmpdiv = rmd->first_row_offset;
+       rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row);
+       rmd->r5or6_first_row_offset = rmd->first_row_offset;
+       tmpdiv = rmd->last_block;
+       rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize);
+       tmpdiv = rmd->r5or6_last_row_offset;
+       rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row);
+       tmpdiv = rmd->r5or6_first_row_offset;
+       do_div(tmpdiv, rmd->strip_size);
+       rmd->first_column = rmd->r5or6_first_column = tmpdiv;
+       tmpdiv = rmd->r5or6_last_row_offset;
+       do_div(tmpdiv, rmd->strip_size);
+       rmd->r5or6_last_column = tmpdiv;
  #else
-               first_row_offset = r5or6_first_row_offset =
-                       (u32)((first_block % stripesize) %
-                       r5or6_blocks_per_row);
+       rmd->first_row_offset = rmd->r5or6_first_row_offset =
+               (u32)((rmd->first_block % rmd->stripesize) %
+               rmd->blocks_per_row);
+       rmd->r5or6_last_row_offset =
+               (u32)((rmd->last_block % rmd->stripesize) %
+               rmd->blocks_per_row);
+       rmd->first_column =
+               rmd->r5or6_first_row_offset / rmd->strip_size;
+       rmd->r5or6_first_column = rmd->first_column;
+       rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size;
+ #endif
+       if (rmd->r5or6_first_column != rmd->r5or6_last_column)
+               return PQI_RAID_BYPASS_INELIGIBLE;
+       /* Request is eligible. */
+       rmd->map_row =
+               ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) %
+               get_unaligned_le16(&raid_map->row_cnt);
+       rmd->map_index = (rmd->first_group *
+               (get_unaligned_le16(&raid_map->row_cnt) *
+               rmd->total_disks_per_row)) +
+               (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column;
  
-               r5or6_last_row_offset =
-                       (u32)((last_block % stripesize) %
-                       r5or6_blocks_per_row);
+       if (rmd->is_write) {
+               u32 index;
  
-               first_column = r5or6_first_row_offset / strip_size;
-               r5or6_first_column = first_column;
-               r5or6_last_column = r5or6_last_row_offset / strip_size;
+               /*
+                * p_parity_it_nexus and q_parity_it_nexus are pointers to the
+                * parity entries inside the device's raid_map.
+                *
+                * A device's RAID map is bounded by: number of RAID disks squared.
+                *
+                * The devices RAID map size is checked during device
+                * initialization.
+                */
+               index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row);
+               index *= rmd->total_disks_per_row;
+               index -= get_unaligned_le16(&raid_map->metadata_disks_per_row);
+               rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle;
+               if (rmd->raid_level == SA_RAID_6) {
+                       rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle;
+                       rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1];
+               }
+ #if BITS_PER_LONG == 32
+               tmpdiv = rmd->first_block;
+               do_div(tmpdiv, rmd->blocks_per_row);
+               rmd->row = tmpdiv;
+ #else
+               rmd->row = rmd->first_block / rmd->blocks_per_row;
  #endif
-               if (r5or6_first_column != r5or6_last_column)
-                       return PQI_RAID_BYPASS_INELIGIBLE;
+       }
+       return 0;
+ }
+ static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd)
+ {
+       /* Build the new CDB for the physical disk I/O. */
+       if (rmd->disk_block > 0xffffffff) {
+               rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16;
+               rmd->cdb[1] = 0;
+               put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]);
+               put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]);
+               rmd->cdb[14] = 0;
+               rmd->cdb[15] = 0;
+               rmd->cdb_length = 16;
+       } else {
+               rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10;
+               rmd->cdb[1] = 0;
+               put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]);
+               rmd->cdb[6] = 0;
+               put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]);
+               rmd->cdb[9] = 0;
+               rmd->cdb_length = 10;
+       }
+ }
+ static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map,
+       struct pqi_scsi_dev_raid_map_data *rmd)
+ {
+       u32 index;
+       u32 group;
+       group = rmd->map_index / rmd->data_disks_per_row;
+       index = rmd->map_index - (group * rmd->data_disks_per_row);
+       rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle;
+       index += rmd->data_disks_per_row;
+       rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle;
+       if (rmd->layout_map_count > 2) {
+               index += rmd->data_disks_per_row;
+               rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle;
+       }
+       rmd->num_it_nexus_entries = rmd->layout_map_count;
+ }
+ static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
+       struct pqi_queue_group *queue_group)
+ {
+       int rc;
+       struct raid_map *raid_map;
+       u32 group;
+       u32 next_bypass_group;
+       struct pqi_encryption_info *encryption_info_ptr;
+       struct pqi_encryption_info encryption_info;
+       struct pqi_scsi_dev_raid_map_data rmd = { 0 };
+       rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
+       if (rc)
+               return PQI_RAID_BYPASS_INELIGIBLE;
+       rmd.raid_level = device->raid_level;
  
-               /* Request is eligible */
-               map_row =
-                       ((u32)(first_row >> raid_map->parity_rotation_shift)) %
-                       get_unaligned_le16(&raid_map->row_cnt);
+       if (!pqi_aio_raid_level_supported(ctrl_info, &rmd))
+               return PQI_RAID_BYPASS_INELIGIBLE;
+       if (unlikely(rmd.block_cnt == 0))
+               return PQI_RAID_BYPASS_INELIGIBLE;
+       raid_map = device->raid_map;
+       rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map);
+       if (rc)
+               return PQI_RAID_BYPASS_INELIGIBLE;
  
-               map_index = (first_group *
-                       (get_unaligned_le16(&raid_map->row_cnt) *
-                       total_disks_per_row)) +
-                       (map_row * total_disks_per_row) + first_column;
+       if (device->raid_level == SA_RAID_1 ||
+               device->raid_level == SA_RAID_TRIPLE) {
+               if (rmd.is_write) {
+                       pqi_calc_aio_r1_nexus(raid_map, &rmd);
+               } else {
+                       group = device->next_bypass_group;
+                       next_bypass_group = group + 1;
+                       if (next_bypass_group >= rmd.layout_map_count)
+                               next_bypass_group = 0;
+                       device->next_bypass_group = next_bypass_group;
+                       rmd.map_index += group * rmd.data_disks_per_row;
+               }
+       } else if ((device->raid_level == SA_RAID_5 ||
+               device->raid_level == SA_RAID_6) &&
+               (rmd.layout_map_count > 1 || rmd.is_write)) {
+               rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map);
+               if (rc)
+                       return PQI_RAID_BYPASS_INELIGIBLE;
        }
  
-       aio_handle = raid_map->disk_data[map_index].aio_handle;
-       disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
-               first_row * strip_size +
-               (first_row_offset - first_column * strip_size);
-       disk_block_cnt = block_cnt;
+       if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES))
+               return PQI_RAID_BYPASS_INELIGIBLE;
+       rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle;
+       rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
+               rmd.first_row * rmd.strip_size +
+               (rmd.first_row_offset - rmd.first_column * rmd.strip_size);
+       rmd.disk_block_cnt = rmd.block_cnt;
  
        /* Handle differing logical/physical block sizes. */
        if (raid_map->phys_blk_shift) {
-               disk_block <<= raid_map->phys_blk_shift;
-               disk_block_cnt <<= raid_map->phys_blk_shift;
+               rmd.disk_block <<= raid_map->phys_blk_shift;
+               rmd.disk_block_cnt <<= raid_map->phys_blk_shift;
        }
  
-       if (unlikely(disk_block_cnt > 0xffff))
+       if (unlikely(rmd.disk_block_cnt > 0xffff))
                return PQI_RAID_BYPASS_INELIGIBLE;
  
-       /* Build the new CDB for the physical disk I/O. */
-       if (disk_block > 0xffffffff) {
-               cdb[0] = is_write ? WRITE_16 : READ_16;
-               cdb[1] = 0;
-               put_unaligned_be64(disk_block, &cdb[2]);
-               put_unaligned_be32(disk_block_cnt, &cdb[10]);
-               cdb[14] = 0;
-               cdb[15] = 0;
-               cdb_length = 16;
-       } else {
-               cdb[0] = is_write ? WRITE_10 : READ_10;
-               cdb[1] = 0;
-               put_unaligned_be32((u32)disk_block, &cdb[2]);
-               cdb[6] = 0;
-               put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
-               cdb[9] = 0;
-               cdb_length = 10;
-       }
-       if (get_unaligned_le16(&raid_map->flags) &
-               RAID_MAP_ENCRYPTION_ENABLED) {
-               pqi_set_encryption_info(&encryption_info, raid_map,
-                       first_block);
+       pqi_set_aio_cdb(&rmd);
+       if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) {
+               if (rmd.data_length > device->max_transfer_encrypted)
+                       return PQI_RAID_BYPASS_INELIGIBLE;
+               pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block);
                encryption_info_ptr = &encryption_info;
        } else {
                encryption_info_ptr = NULL;
        }
  
-       return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
-               cdb, cdb_length, queue_group, encryption_info_ptr, true);
+       if (rmd.is_write) {
+               switch (device->raid_level) {
+               case SA_RAID_1:
+               case SA_RAID_TRIPLE:
+                       return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group,
+                               encryption_info_ptr, device, &rmd);
+               case SA_RAID_5:
+               case SA_RAID_6:
+                       return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group,
+                               encryption_info_ptr, device, &rmd);
+               }
+       }
+       return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle,
+               rmd.cdb, rmd.cdb_length, queue_group,
+               encryption_info_ptr, true);
  }
  
  #define PQI_STATUS_IDLE               0x0
@@@ -2858,7 -3108,7 +3107,7 @@@ static void pqi_process_io_error(unsign
        }
  }
  
- static int pqi_interpret_task_management_response(
+ static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info,
        struct pqi_task_management_response *response)
  {
        int rc;
                break;
        }
  
+       if (rc)
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "Task Management Function error: %d (response code: %u)\n", rc, response->response_code);
        return rc;
  }
  
@@@ -2941,13 -3195,11 +3194,11 @@@ static int pqi_process_io_intr(struct p
                case PQI_RESPONSE_IU_VENDOR_GENERAL:
                        io_request->status =
                                get_unaligned_le16(
-                               &((struct pqi_vendor_general_response *)
-                                       response)->status);
+                               &((struct pqi_vendor_general_response *)response)->status);
                        break;
                case PQI_RESPONSE_IU_TASK_MANAGEMENT:
-                       io_request->status =
-                               pqi_interpret_task_management_response(
-                                       (void *)response);
+                       io_request->status = pqi_interpret_task_management_response(ctrl_info,
+                               (void *)response);
                        break;
                case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
                        pqi_aio_path_disabled(io_request);
@@@ -3055,8 -3307,8 +3306,8 @@@ static void pqi_acknowledge_event(struc
        put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
                &request.header.iu_length);
        request.event_type = event->event_type;
-       request.event_id = event->event_id;
-       request.additional_event_id = event->additional_event_id;
+       put_unaligned_le16(event->event_id, &request.event_id);
+       put_unaligned_le32(event->additional_event_id, &request.additional_event_id);
  
        pqi_send_event_ack(ctrl_info, &request, sizeof(request));
  }
  static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
        struct pqi_ctrl_info *ctrl_info)
  {
-       unsigned long timeout;
        u8 status;
+       unsigned long timeout;
  
        timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies;
  
                if (status & PQI_SOFT_RESET_ABORT)
                        return RESET_ABORT;
  
+               if (!sis_is_firmware_running(ctrl_info))
+                       return RESET_NORESPONSE;
                if (time_after(jiffies, timeout)) {
-                       dev_err(&ctrl_info->pci_dev->dev,
+                       dev_warn(&ctrl_info->pci_dev->dev,
                                "timed out waiting for soft reset status\n");
                        return RESET_TIMEDOUT;
                }
  
-               if (!sis_is_firmware_running(ctrl_info))
-                       return RESET_NORESPONSE;
                ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
        }
  }
  
- static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info,
-       enum pqi_soft_reset_status reset_status)
+ static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
  {
        int rc;
+       unsigned int delay_secs;
+       enum pqi_soft_reset_status reset_status;
+       if (ctrl_info->soft_reset_handshake_supported)
+               reset_status = pqi_poll_for_soft_reset_status(ctrl_info);
+       else
+               reset_status = RESET_INITIATE_FIRMWARE;
+       delay_secs = PQI_POST_RESET_DELAY_SECS;
  
        switch (reset_status) {
-       case RESET_INITIATE_DRIVER:
        case RESET_TIMEDOUT:
+               delay_secs = PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS;
+               fallthrough;
+       case RESET_INITIATE_DRIVER:
                dev_info(&ctrl_info->pci_dev->dev,
-                       "resetting controller %u\n", ctrl_info->ctrl_id);
+                               "Online Firmware Activation: resetting controller\n");
                sis_soft_reset(ctrl_info);
                fallthrough;
        case RESET_INITIATE_FIRMWARE:
-               rc = pqi_ofa_ctrl_restart(ctrl_info);
+               ctrl_info->pqi_mode_enabled = false;
+               pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
+               rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs);
                pqi_ofa_free_host_buffer(ctrl_info);
+               pqi_ctrl_ofa_done(ctrl_info);
                dev_info(&ctrl_info->pci_dev->dev,
-                       "Online Firmware Activation for controller %u: %s\n",
-                       ctrl_info->ctrl_id, rc == 0 ? "SUCCESS" : "FAILED");
+                               "Online Firmware Activation: %s\n",
+                               rc == 0 ? "SUCCESS" : "FAILED");
                break;
        case RESET_ABORT:
-               pqi_ofa_ctrl_unquiesce(ctrl_info);
                dev_info(&ctrl_info->pci_dev->dev,
-                       "Online Firmware Activation for controller %u: %s\n",
-                       ctrl_info->ctrl_id, "ABORTED");
+                               "Online Firmware Activation ABORTED\n");
+               if (ctrl_info->soft_reset_handshake_supported)
+                       pqi_clear_soft_reset_status(ctrl_info);
+               pqi_ofa_free_host_buffer(ctrl_info);
+               pqi_ctrl_ofa_done(ctrl_info);
+               pqi_ofa_ctrl_unquiesce(ctrl_info);
                break;
        case RESET_NORESPONSE:
+               fallthrough;
+       default:
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "unexpected Online Firmware Activation reset status: 0x%x\n",
+                       reset_status);
                pqi_ofa_free_host_buffer(ctrl_info);
+               pqi_ctrl_ofa_done(ctrl_info);
+               pqi_ofa_ctrl_unquiesce(ctrl_info);
                pqi_take_ctrl_offline(ctrl_info);
                break;
        }
  }
  
- static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
-       struct pqi_event *event)
+ static void pqi_ofa_memory_alloc_worker(struct work_struct *work)
  {
-       u16 event_id;
-       enum pqi_soft_reset_status status;
+       struct pqi_ctrl_info *ctrl_info;
  
-       event_id = get_unaligned_le16(&event->event_id);
+       ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work);
  
-       mutex_lock(&ctrl_info->ofa_mutex);
+       pqi_ctrl_ofa_start(ctrl_info);
+       pqi_ofa_setup_host_buffer(ctrl_info);
+       pqi_ofa_host_memory_update(ctrl_info);
+ }
  
-       if (event_id == PQI_EVENT_OFA_QUIESCE) {
-               dev_info(&ctrl_info->pci_dev->dev,
-                       "Received Online Firmware Activation quiesce event for controller %u\n",
-                       ctrl_info->ctrl_id);
-               pqi_ofa_ctrl_quiesce(ctrl_info);
-               pqi_acknowledge_event(ctrl_info, event);
-               if (ctrl_info->soft_reset_handshake_supported) {
-                       status = pqi_poll_for_soft_reset_status(ctrl_info);
-                       pqi_process_soft_reset(ctrl_info, status);
-               } else {
-                       pqi_process_soft_reset(ctrl_info,
-                                       RESET_INITIATE_FIRMWARE);
-               }
+ static void pqi_ofa_quiesce_worker(struct work_struct *work)
+ {
+       struct pqi_ctrl_info *ctrl_info;
+       struct pqi_event *event;
  
-       } else if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
-               pqi_acknowledge_event(ctrl_info, event);
-               pqi_ofa_setup_host_buffer(ctrl_info,
-                       le32_to_cpu(event->ofa_bytes_requested));
-               pqi_ofa_host_memory_update(ctrl_info);
-       } else if (event_id == PQI_EVENT_OFA_CANCELLED) {
-               pqi_ofa_free_host_buffer(ctrl_info);
-               pqi_acknowledge_event(ctrl_info, event);
+       ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work);
+       event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)];
+       pqi_ofa_ctrl_quiesce(ctrl_info);
+       pqi_acknowledge_event(ctrl_info, event);
+       pqi_process_soft_reset(ctrl_info);
+ }
+ static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_event *event)
+ {
+       bool ack_event;
+       ack_event = true;
+       switch (event->event_id) {
+       case PQI_EVENT_OFA_MEMORY_ALLOCATION:
+               dev_info(&ctrl_info->pci_dev->dev,
+                       "received Online Firmware Activation memory allocation request\n");
+               schedule_work(&ctrl_info->ofa_memory_alloc_work);
+               break;
+       case PQI_EVENT_OFA_QUIESCE:
                dev_info(&ctrl_info->pci_dev->dev,
-                       "Online Firmware Activation(%u) cancel reason : %u\n",
-                       ctrl_info->ctrl_id, event->ofa_cancel_reason);
+                       "received Online Firmware Activation quiesce request\n");
+               schedule_work(&ctrl_info->ofa_quiesce_work);
+               ack_event = false;
+               break;
+       case PQI_EVENT_OFA_CANCELED:
+               dev_info(&ctrl_info->pci_dev->dev,
+                       "received Online Firmware Activation cancel request: reason: %u\n",
+                       ctrl_info->ofa_cancel_reason);
+               pqi_ofa_free_host_buffer(ctrl_info);
+               pqi_ctrl_ofa_done(ctrl_info);
+               break;
+       default:
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "received unknown Online Firmware Activation request: event ID: %u\n",
+                       event->event_id);
+               break;
        }
  
-       mutex_unlock(&ctrl_info->ofa_mutex);
+       return ack_event;
  }
  
  static void pqi_event_worker(struct work_struct *work)
  {
        unsigned int i;
+       bool rescan_needed;
        struct pqi_ctrl_info *ctrl_info;
        struct pqi_event *event;
+       bool ack_event;
  
        ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
  
        pqi_ctrl_busy(ctrl_info);
-       pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT);
+       pqi_wait_if_ctrl_blocked(ctrl_info);
        if (pqi_ctrl_offline(ctrl_info))
                goto out;
  
-       pqi_schedule_rescan_worker_delayed(ctrl_info);
+       rescan_needed = false;
        event = ctrl_info->events;
        for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
                if (event->pending) {
                        event->pending = false;
                        if (event->event_type == PQI_EVENT_TYPE_OFA) {
-                               pqi_ctrl_unbusy(ctrl_info);
-                               pqi_ofa_process_event(ctrl_info, event);
-                               return;
+                               ack_event = pqi_ofa_process_event(ctrl_info, event);
+                       } else {
+                               ack_event = true;
+                               rescan_needed = true;
                        }
-                       pqi_acknowledge_event(ctrl_info, event);
+                       if (ack_event)
+                               pqi_acknowledge_event(ctrl_info, event);
                }
                event++;
        }
  
+       if (rescan_needed)
+               pqi_schedule_rescan_worker_delayed(ctrl_info);
  out:
        pqi_ctrl_unbusy(ctrl_info);
  }
@@@ -3204,8 -3506,7 +3505,7 @@@ static void pqi_heartbeat_timer_handler
  {
        int num_interrupts;
        u32 heartbeat_count;
-       struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t,
-                                                    heartbeat_timer);
+       struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer);
  
        pqi_check_ctrl_health(ctrl_info);
        if (pqi_ctrl_offline(ctrl_info))
@@@ -3251,37 -3552,18 +3551,18 @@@ static inline void pqi_stop_heartbeat_t
        del_timer_sync(&ctrl_info->heartbeat_timer);
  }
  
- static inline int pqi_event_type_to_event_index(unsigned int event_type)
- {
-       int index;
-       for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
-               if (event_type == pqi_supported_event_types[index])
-                       return index;
-       return -1;
- }
- static inline bool pqi_is_supported_event(unsigned int event_type)
- {
-       return pqi_event_type_to_event_index(event_type) != -1;
- }
- static void pqi_ofa_capture_event_payload(struct pqi_event *event,
-       struct pqi_event_response *response)
+ static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_event *event, struct pqi_event_response *response)
  {
-       u16 event_id;
-       event_id = get_unaligned_le16(&event->event_id);
-       if (event->event_type == PQI_EVENT_TYPE_OFA) {
-               if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
-                       event->ofa_bytes_requested =
-                       response->data.ofa_memory_allocation.bytes_requested;
-               } else if (event_id == PQI_EVENT_OFA_CANCELLED) {
-                       event->ofa_cancel_reason =
-                       response->data.ofa_cancelled.reason;
-               }
+       switch (event->event_id) {
+       case PQI_EVENT_OFA_MEMORY_ALLOCATION:
+               ctrl_info->ofa_bytes_requested =
+                       get_unaligned_le32(&response->data.ofa_memory_allocation.bytes_requested);
+               break;
+       case PQI_EVENT_OFA_CANCELED:
+               ctrl_info->ofa_cancel_reason =
+                       get_unaligned_le16(&response->data.ofa_cancelled.reason);
+               break;
        }
  }
  
@@@ -3315,17 -3597,17 +3596,17 @@@ static int pqi_process_event_intr(struc
                num_events++;
                response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
  
-               event_index =
-                       pqi_event_type_to_event_index(response->event_type);
+               event_index = pqi_event_type_to_event_index(response->event_type);
  
                if (event_index >= 0 && response->request_acknowledge) {
                        event = &ctrl_info->events[event_index];
                        event->pending = true;
                        event->event_type = response->event_type;
-                       event->event_id = response->event_id;
-                       event->additional_event_id = response->additional_event_id;
+                       event->event_id = get_unaligned_le16(&response->event_id);
+                       event->additional_event_id =
+                               get_unaligned_le32(&response->additional_event_id);
                        if (event->event_type == PQI_EVENT_TYPE_OFA)
-                               pqi_ofa_capture_event_payload(event, response);
+                               pqi_ofa_capture_event_payload(ctrl_info, event, response);
                }
  
                oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
  
  #define PQI_LEGACY_INTX_MASK  0x1
  
- static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
-       bool enable_intx)
+ static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx)
  {
        u32 intx_mask;
        struct pqi_device_registers __iomem *pqi_registers;
@@@ -3420,8 -3701,7 +3700,7 @@@ static inline bool pqi_is_valid_irq(str
                valid_irq = true;
                break;
        case IRQ_MODE_INTX:
-               intx_status =
-                       readl(&ctrl_info->pqi_registers->legacy_intx_status);
+               intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status);
                if (intx_status & PQI_LEGACY_INTX_PENDING)
                        valid_irq = true;
                else
@@@ -3742,7 -4022,8 +4021,8 @@@ static int pqi_alloc_admin_queues(struc
                &admin_queues_aligned->iq_element_array;
        admin_queues->oq_element_array =
                &admin_queues_aligned->oq_element_array;
-       admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
+       admin_queues->iq_ci =
+               (pqi_index_t __iomem *)&admin_queues_aligned->iq_ci;
        admin_queues->oq_pi =
                (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
  
                ctrl_info->admin_queue_memory_base);
        admin_queues->iq_ci_bus_addr =
                ctrl_info->admin_queue_memory_base_dma_handle +
-               ((void *)admin_queues->iq_ci -
-               ctrl_info->admin_queue_memory_base);
+               ((void __iomem *)admin_queues->iq_ci -
+               (void __iomem *)ctrl_info->admin_queue_memory_base);
        admin_queues->oq_pi_bus_addr =
                ctrl_info->admin_queue_memory_base_dma_handle +
                ((void __iomem *)admin_queues->oq_pi -
@@@ -3793,6 -4074,7 +4073,7 @@@ static int pqi_create_admin_queues(stru
                (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
                (admin_queues->int_msg_num << 16);
        writel(reg, &pqi_registers->admin_iq_num_elements);
        writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
                &pqi_registers->function_and_status_code);
  
@@@ -4020,59 -4302,40 +4301,40 @@@ static int pqi_process_raid_io_error_sy
        return rc;
  }
  
+ static inline bool pqi_is_blockable_request(struct pqi_iu_header *request)
+ {
+       return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0;
+ }
  static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
        struct pqi_iu_header *request, unsigned int flags,
-       struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
+       struct pqi_raid_error_info *error_info)
  {
        int rc = 0;
        struct pqi_io_request *io_request;
-       unsigned long start_jiffies;
-       unsigned long msecs_blocked;
        size_t iu_length;
        DECLARE_COMPLETION_ONSTACK(wait);
  
-       /*
-        * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
-        * are mutually exclusive.
-        */
        if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
                if (down_interruptible(&ctrl_info->sync_request_sem))
                        return -ERESTARTSYS;
        } else {
-               if (timeout_msecs == NO_TIMEOUT) {
-                       down(&ctrl_info->sync_request_sem);
-               } else {
-                       start_jiffies = jiffies;
-                       if (down_timeout(&ctrl_info->sync_request_sem,
-                               msecs_to_jiffies(timeout_msecs)))
-                               return -ETIMEDOUT;
-                       msecs_blocked =
-                               jiffies_to_msecs(jiffies - start_jiffies);
-                       if (msecs_blocked >= timeout_msecs) {
-                               rc = -ETIMEDOUT;
-                               goto out;
-                       }
-                       timeout_msecs -= msecs_blocked;
-               }
+               down(&ctrl_info->sync_request_sem);
        }
  
        pqi_ctrl_busy(ctrl_info);
-       timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs);
-       if (timeout_msecs == 0) {
-               pqi_ctrl_unbusy(ctrl_info);
-               rc = -ETIMEDOUT;
-               goto out;
-       }
+       /*
+        * Wait for other admin queue updates such as;
+        * config table changes, OFA memory updates, ...
+        */
+       if (pqi_is_blockable_request(request))
+               pqi_wait_if_ctrl_blocked(ctrl_info);
  
        if (pqi_ctrl_offline(ctrl_info)) {
-               pqi_ctrl_unbusy(ctrl_info);
                rc = -ENXIO;
                goto out;
        }
  
-       atomic_inc(&ctrl_info->sync_cmds_outstanding);
        io_request = pqi_alloc_io_request(ctrl_info);
  
        put_unaligned_le16(io_request->index,
        io_request->io_complete_callback = pqi_raid_synchronous_complete;
        io_request->context = &wait;
  
-       pqi_start_io(ctrl_info,
-               &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
+       pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
                io_request);
  
-       pqi_ctrl_unbusy(ctrl_info);
-       if (timeout_msecs == NO_TIMEOUT) {
-               pqi_wait_for_completion_io(ctrl_info, &wait);
-       } else {
-               if (!wait_for_completion_io_timeout(&wait,
-                       msecs_to_jiffies(timeout_msecs))) {
-                       dev_warn(&ctrl_info->pci_dev->dev,
-                               "command timed out\n");
-                       rc = -ETIMEDOUT;
-               }
-       }
+       pqi_wait_for_completion_io(ctrl_info, &wait);
  
        if (error_info) {
                if (io_request->error_info)
-                       memcpy(error_info, io_request->error_info,
-                               sizeof(*error_info));
+                       memcpy(error_info, io_request->error_info, sizeof(*error_info));
                else
                        memset(error_info, 0, sizeof(*error_info));
        } else if (rc == 0 && io_request->error_info) {
-               rc = pqi_process_raid_io_error_synchronous(
-                       io_request->error_info);
+               rc = pqi_process_raid_io_error_synchronous(io_request->error_info);
        }
  
        pqi_free_io_request(io_request);
  
-       atomic_dec(&ctrl_info->sync_cmds_outstanding);
  out:
+       pqi_ctrl_unbusy(ctrl_info);
        up(&ctrl_info->sync_request_sem);
  
        return rc;
@@@ -4157,8 -4406,7 +4405,7 @@@ static int pqi_submit_admin_request_syn
        rc = pqi_poll_for_admin_response(ctrl_info, response);
  
        if (rc == 0)
-               rc = pqi_validate_admin_response(response,
-                       request->function_code);
+               rc = pqi_validate_admin_response(response, request->function_code);
  
        return rc;
  }
@@@ -4192,8 -4440,7 +4439,7 @@@ static int pqi_report_device_capability
        if (rc)
                goto out;
  
-       rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
-               &response);
+       rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response);
  
        pqi_pci_unmap(ctrl_info->pci_dev,
                &request.data.report_device_capability.sg_descriptor, 1,
@@@ -4528,8 -4775,7 +4774,7 @@@ static int pqi_configure_events(struct 
        if (rc)
                goto out;
  
-       rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
-               0, NULL, NO_TIMEOUT);
+       rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
  
        pqi_pci_unmap(ctrl_info->pci_dev,
                request.data.report_event_configuration.sg_descriptors, 1,
                event_descriptor = &event_config->descriptors[i];
                if (enable_events &&
                        pqi_is_supported_event(event_descriptor->event_type))
-                       put_unaligned_le16(ctrl_info->event_queue.oq_id,
+                               put_unaligned_le16(ctrl_info->event_queue.oq_id,
                                        &event_descriptor->oq_id);
                else
                        put_unaligned_le16(0, &event_descriptor->oq_id);
        if (rc)
                goto out;
  
-       rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
-               NULL, NO_TIMEOUT);
+       rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
  
        pqi_pci_unmap(ctrl_info->pci_dev,
                request.data.report_event_configuration.sg_descriptors, 1,
@@@ -4582,11 -4827,6 +4826,6 @@@ static inline int pqi_enable_events(str
        return pqi_configure_events(ctrl_info, true);
  }
  
- static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info)
- {
-       return pqi_configure_events(ctrl_info, false);
- }
  static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
  {
        unsigned int i;
  
  static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
  {
        ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
                                     ctrl_info->error_buffer_length,
                                     &ctrl_info->error_buffer_dma_handle,
@@@ -4637,9 -4876,8 +4875,8 @@@ static int pqi_alloc_io_resources(struc
        struct device *dev;
        struct pqi_io_request *io_request;
  
-       ctrl_info->io_request_pool =
-               kcalloc(ctrl_info->max_io_slots,
-                       sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
+       ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots,
+               sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
  
        if (!ctrl_info->io_request_pool) {
                dev_err(&ctrl_info->pci_dev->dev,
        io_request = ctrl_info->io_request_pool;
  
        for (i = 0; i < ctrl_info->max_io_slots; i++) {
-               io_request->iu =
-                       kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
+               io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
  
                if (!io_request->iu) {
                        dev_err(&ctrl_info->pci_dev->dev,
  
                io_request->index = i;
                io_request->sg_chain_buffer = sg_chain_buffer;
-               io_request->sg_chain_buffer_dma_handle =
-                       sg_chain_buffer_dma_handle;
+               io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle;
                io_request++;
        }
  
@@@ -4781,10 -5017,16 +5016,16 @@@ static void pqi_calculate_queue_resourc
                PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
                sizeof(struct pqi_sg_descriptor)) +
                PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
+       ctrl_info->max_sg_per_r56_iu =
+               ((ctrl_info->max_inbound_iu_length -
+               PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
+               sizeof(struct pqi_sg_descriptor)) +
+               PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS;
  }
  
- static inline void pqi_set_sg_descriptor(
-       struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
+ static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor,
+       struct scatterlist *sg)
  {
        u64 address = (u64)sg_dma_address(sg);
        unsigned int length = sg_dma_len(sg);
        put_unaligned_le32(0, &sg_descriptor->flags);
  }
  
- static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
-       struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
-       struct pqi_io_request *io_request)
+ static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor,
+       struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request,
+       int max_sg_per_iu, bool *chained)
  {
        int i;
-       u16 iu_length;
-       int sg_count;
-       bool chained;
        unsigned int num_sg_in_iu;
-       unsigned int max_sg_per_iu;
-       struct scatterlist *sg;
-       struct pqi_sg_descriptor *sg_descriptor;
-       sg_count = scsi_dma_map(scmd);
-       if (sg_count < 0)
-               return sg_count;
-       iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
-               PQI_REQUEST_HEADER_LENGTH;
  
-       if (sg_count == 0)
-               goto out;
-       sg = scsi_sglist(scmd);
-       sg_descriptor = request->sg_descriptors;
-       max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
-       chained = false;
-       num_sg_in_iu = 0;
+       *chained = false;
        i = 0;
+       num_sg_in_iu = 0;
+       max_sg_per_iu--;        /* Subtract 1 to leave room for chain marker. */
  
        while (1) {
                pqi_set_sg_descriptor(sg_descriptor, sg);
-               if (!chained)
+               if (!*chained)
                        num_sg_in_iu++;
                i++;
                if (i == sg_count)
                        break;
                sg_descriptor++;
                if (i == max_sg_per_iu) {
-                       put_unaligned_le64(
-                               (u64)io_request->sg_chain_buffer_dma_handle,
+                       put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle,
                                &sg_descriptor->address);
-                       put_unaligned_le32((sg_count - num_sg_in_iu)
-                               * sizeof(*sg_descriptor),
+                       put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor),
                                &sg_descriptor->length);
-                       put_unaligned_le32(CISS_SG_CHAIN,
-                               &sg_descriptor->flags);
-                       chained = true;
+                       put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags);
+                       *chained = true;
                        num_sg_in_iu++;
                        sg_descriptor = io_request->sg_chain_buffer;
                }
        }
  
        put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
-       request->partial = chained;
-       iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
- out:
-       put_unaligned_le16(iu_length, &request->header.iu_length);
  
-       return 0;
+       return num_sg_in_iu;
  }
  
- static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
-       struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
+ static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
        struct pqi_io_request *io_request)
  {
-       int i;
        u16 iu_length;
        int sg_count;
        bool chained;
        unsigned int num_sg_in_iu;
-       unsigned int max_sg_per_iu;
        struct scatterlist *sg;
        struct pqi_sg_descriptor *sg_descriptor;
  
        if (sg_count < 0)
                return sg_count;
  
-       iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
+       iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
                PQI_REQUEST_HEADER_LENGTH;
-       num_sg_in_iu = 0;
  
        if (sg_count == 0)
                goto out;
  
        sg = scsi_sglist(scmd);
        sg_descriptor = request->sg_descriptors;
-       max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
-       chained = false;
-       i = 0;
  
-       while (1) {
-               pqi_set_sg_descriptor(sg_descriptor, sg);
-               if (!chained)
-                       num_sg_in_iu++;
-               i++;
-               if (i == sg_count)
-                       break;
-               sg_descriptor++;
-               if (i == max_sg_per_iu) {
-                       put_unaligned_le64(
-                               (u64)io_request->sg_chain_buffer_dma_handle,
-                               &sg_descriptor->address);
-                       put_unaligned_le32((sg_count - num_sg_in_iu)
-                               * sizeof(*sg_descriptor),
-                               &sg_descriptor->length);
-                       put_unaligned_le32(CISS_SG_CHAIN,
-                               &sg_descriptor->flags);
-                       chained = true;
-                       num_sg_in_iu++;
-                       sg_descriptor = io_request->sg_chain_buffer;
-               }
-               sg = sg_next(sg);
-       }
+       num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
+               ctrl_info->max_sg_per_iu, &chained);
  
-       put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
        request->partial = chained;
        iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
  
  out:
        put_unaligned_le16(iu_length, &request->header.iu_length);
-       request->num_sg_descriptors = num_sg_in_iu;
  
        return 0;
  }
  
- static void pqi_raid_io_complete(struct pqi_io_request *io_request,
-       void *context)
+ static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd,
+       struct pqi_io_request *io_request)
  {
-       struct scsi_cmnd *scmd;
+       u16 iu_length;
+       int sg_count;
+       bool chained;
+       unsigned int num_sg_in_iu;
+       struct scatterlist *sg;
+       struct pqi_sg_descriptor *sg_descriptor;
+       sg_count = scsi_dma_map(scmd);
+       if (sg_count < 0)
+               return sg_count;
+       iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) -
+               PQI_REQUEST_HEADER_LENGTH;
+       num_sg_in_iu = 0;
+       if (sg_count == 0)
+               goto out;
+       sg = scsi_sglist(scmd);
+       sg_descriptor = request->sg_descriptors;
+       num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
+               ctrl_info->max_sg_per_iu, &chained);
+       request->partial = chained;
+       iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
+ out:
+       put_unaligned_le16(iu_length, &request->header.iu_length);
+       request->num_sg_descriptors = num_sg_in_iu;
+       return 0;
+ }
+ static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd,
+       struct pqi_io_request *io_request)
+ {
+       u16 iu_length;
+       int sg_count;
+       bool chained;
+       unsigned int num_sg_in_iu;
+       struct scatterlist *sg;
+       struct pqi_sg_descriptor *sg_descriptor;
+       sg_count = scsi_dma_map(scmd);
+       if (sg_count < 0)
+               return sg_count;
+       iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) -
+               PQI_REQUEST_HEADER_LENGTH;
+       num_sg_in_iu = 0;
+       if (sg_count != 0) {
+               sg = scsi_sglist(scmd);
+               sg_descriptor = request->sg_descriptors;
+               num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
+                       ctrl_info->max_sg_per_r56_iu, &chained);
+               request->partial = chained;
+               iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
+       }
+       put_unaligned_le16(iu_length, &request->header.iu_length);
+       request->num_sg_descriptors = num_sg_in_iu;
+       return 0;
+ }
+ static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
+       struct pqi_io_request *io_request)
+ {
+       u16 iu_length;
+       int sg_count;
+       bool chained;
+       unsigned int num_sg_in_iu;
+       struct scatterlist *sg;
+       struct pqi_sg_descriptor *sg_descriptor;
+       sg_count = scsi_dma_map(scmd);
+       if (sg_count < 0)
+               return sg_count;
+       iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
+               PQI_REQUEST_HEADER_LENGTH;
+       num_sg_in_iu = 0;
+       if (sg_count == 0)
+               goto out;
+       sg = scsi_sglist(scmd);
+       sg_descriptor = request->sg_descriptors;
+       num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
+               ctrl_info->max_sg_per_iu, &chained);
+       request->partial = chained;
+       iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
+ out:
+       put_unaligned_le16(iu_length, &request->header.iu_length);
+       request->num_sg_descriptors = num_sg_in_iu;
+       return 0;
+ }
+ static void pqi_raid_io_complete(struct pqi_io_request *io_request,
+       void *context)
+ {
+       struct scsi_cmnd *scmd;
        scmd = io_request->scmd;
        pqi_free_io_request(io_request);
        scsi_dma_unmap(scmd);
@@@ -4947,16 -5246,14 +5245,14 @@@ static int pqi_raid_submit_scsi_cmd_wit
        io_request->scmd = scmd;
  
        request = io_request->iu;
-       memset(request, 0,
-               offsetof(struct pqi_raid_path_request, sg_descriptors));
+       memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
  
        request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
        put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
        request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
        put_unaligned_le16(io_request->index, &request->request_id);
        request->error_index = request->request_id;
-       memcpy(request->lun_number, device->scsi3addr,
-               sizeof(request->lun_number));
+       memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number));
  
        cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
        memcpy(request->cdb, scmd->cmnd, cdb_length);
        case 10:
        case 12:
        case 16:
-               /* No bytes in the Additional CDB bytes field */
-               request->additional_cdb_bytes_usage =
-                       SOP_ADDITIONAL_CDB_BYTES_0;
+               request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
                break;
        case 20:
-               /* 4 bytes in the Additional cdb field */
-               request->additional_cdb_bytes_usage =
-                       SOP_ADDITIONAL_CDB_BYTES_4;
+               request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4;
                break;
        case 24:
-               /* 8 bytes in the Additional cdb field */
-               request->additional_cdb_bytes_usage =
-                       SOP_ADDITIONAL_CDB_BYTES_8;
+               request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8;
                break;
        case 28:
-               /* 12 bytes in the Additional cdb field */
-               request->additional_cdb_bytes_usage =
-                       SOP_ADDITIONAL_CDB_BYTES_12;
+               request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12;
                break;
        case 32:
        default:
-               /* 16 bytes in the Additional cdb field */
-               request->additional_cdb_bytes_usage =
-                       SOP_ADDITIONAL_CDB_BYTES_16;
+               request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16;
                break;
        }
  
@@@ -5036,12 -5323,6 +5322,6 @@@ static inline int pqi_raid_submit_scsi_
                device, scmd, queue_group);
  }
  
- static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info)
- {
-       if (!pqi_ctrl_blocked(ctrl_info))
-               schedule_work(&ctrl_info->raid_bypass_retry_work);
- }
  static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
  {
        struct scsi_cmnd *scmd;
                return false;
  
        device = scmd->device->hostdata;
-       if (pqi_device_offline(device))
+       if (pqi_device_offline(device) || pqi_device_in_remove(device))
                return false;
  
        ctrl_info = shost_to_hba(scmd->device->host);
        return true;
  }
  
- static inline void pqi_add_to_raid_bypass_retry_list(
-       struct pqi_ctrl_info *ctrl_info,
-       struct pqi_io_request *io_request, bool at_head)
- {
-       unsigned long flags;
-       spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
-       if (at_head)
-               list_add(&io_request->request_list_entry,
-                       &ctrl_info->raid_bypass_retry_list);
-       else
-               list_add_tail(&io_request->request_list_entry,
-                       &ctrl_info->raid_bypass_retry_list);
-       spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
- }
- static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request,
-       void *context)
- {
-       struct scsi_cmnd *scmd;
-       scmd = io_request->scmd;
-       pqi_free_io_request(io_request);
-       pqi_scsi_done(scmd);
- }
- static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request)
- {
-       struct scsi_cmnd *scmd;
-       struct pqi_ctrl_info *ctrl_info;
-       io_request->io_complete_callback = pqi_queued_raid_bypass_complete;
-       scmd = io_request->scmd;
-       scmd->result = 0;
-       ctrl_info = shost_to_hba(scmd->device->host);
-       pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false);
-       pqi_schedule_bypass_retry(ctrl_info);
- }
- static int pqi_retry_raid_bypass(struct pqi_io_request *io_request)
- {
-       struct scsi_cmnd *scmd;
-       struct pqi_scsi_dev *device;
-       struct pqi_ctrl_info *ctrl_info;
-       struct pqi_queue_group *queue_group;
-       scmd = io_request->scmd;
-       device = scmd->device->hostdata;
-       if (pqi_device_in_reset(device)) {
-               pqi_free_io_request(io_request);
-               set_host_byte(scmd, DID_RESET);
-               pqi_scsi_done(scmd);
-               return 0;
-       }
-       ctrl_info = shost_to_hba(scmd->device->host);
-       queue_group = io_request->queue_group;
-       pqi_reinit_io_request(io_request);
-       return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
-               device, scmd, queue_group);
- }
- static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request(
-       struct pqi_ctrl_info *ctrl_info)
- {
-       unsigned long flags;
-       struct pqi_io_request *io_request;
-       spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
-       io_request = list_first_entry_or_null(
-               &ctrl_info->raid_bypass_retry_list,
-               struct pqi_io_request, request_list_entry);
-       if (io_request)
-               list_del(&io_request->request_list_entry);
-       spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
-       return io_request;
- }
- static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info)
- {
-       int rc;
-       struct pqi_io_request *io_request;
-       pqi_ctrl_busy(ctrl_info);
-       while (1) {
-               if (pqi_ctrl_blocked(ctrl_info))
-                       break;
-               io_request = pqi_next_queued_raid_bypass_request(ctrl_info);
-               if (!io_request)
-                       break;
-               rc = pqi_retry_raid_bypass(io_request);
-               if (rc) {
-                       pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request,
-                               true);
-                       pqi_schedule_bypass_retry(ctrl_info);
-                       break;
-               }
-       }
-       pqi_ctrl_unbusy(ctrl_info);
- }
- static void pqi_raid_bypass_retry_worker(struct work_struct *work)
- {
-       struct pqi_ctrl_info *ctrl_info;
-       ctrl_info = container_of(work, struct pqi_ctrl_info,
-               raid_bypass_retry_work);
-       pqi_retry_raid_bypass_requests(ctrl_info);
- }
- static void pqi_clear_all_queued_raid_bypass_retries(
-       struct pqi_ctrl_info *ctrl_info)
- {
-       unsigned long flags;
-       spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
-       INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
-       spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
- }
  static void pqi_aio_io_complete(struct pqi_io_request *io_request,
        void *context)
  {
  
        scmd = io_request->scmd;
        scsi_dma_unmap(scmd);
-       if (io_request->status == -EAGAIN)
+       if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) {
                set_host_byte(scmd, DID_IMM_RETRY);
-       else if (pqi_raid_bypass_retry_needed(io_request)) {
-               pqi_queue_raid_bypass_retry(io_request);
-               return;
+               scmd->SCp.this_residual++;
        }
        pqi_free_io_request(io_request);
        pqi_scsi_done(scmd);
  }
@@@ -5234,8 -5388,7 +5387,7 @@@ static int pqi_aio_submit_io(struct pqi
        io_request->raid_bypass = raid_bypass;
  
        request = io_request->iu;
-       memset(request, 0,
-               offsetof(struct pqi_raid_path_request, sg_descriptors));
+       memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
  
        request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
        put_unaligned_le32(aio_handle, &request->nexus_id);
        return 0;
  }
  
+ static  int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
+       struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
+       struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
+       struct pqi_scsi_dev_raid_map_data *rmd)
+ {
+       int rc;
+       struct pqi_io_request *io_request;
+       struct pqi_aio_r1_path_request *r1_request;
+       io_request = pqi_alloc_io_request(ctrl_info);
+       io_request->io_complete_callback = pqi_aio_io_complete;
+       io_request->scmd = scmd;
+       io_request->raid_bypass = true;
+       r1_request = io_request->iu;
+       memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors));
+       r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO;
+       put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id);
+       r1_request->num_drives = rmd->num_it_nexus_entries;
+       put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1);
+       put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2);
+       if (rmd->num_it_nexus_entries == 3)
+               put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3);
+       put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length);
+       r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
+       put_unaligned_le16(io_request->index, &r1_request->request_id);
+       r1_request->error_index = r1_request->request_id;
+       if (rmd->cdb_length > sizeof(r1_request->cdb))
+               rmd->cdb_length = sizeof(r1_request->cdb);
+       r1_request->cdb_length = rmd->cdb_length;
+       memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length);
+       /* The direction is always write. */
+       r1_request->data_direction = SOP_READ_FLAG;
+       if (encryption_info) {
+               r1_request->encryption_enable = true;
+               put_unaligned_le16(encryption_info->data_encryption_key_index,
+                               &r1_request->data_encryption_key_index);
+               put_unaligned_le32(encryption_info->encrypt_tweak_lower,
+                               &r1_request->encrypt_tweak_lower);
+               put_unaligned_le32(encryption_info->encrypt_tweak_upper,
+                               &r1_request->encrypt_tweak_upper);
+       }
+       rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request);
+       if (rc) {
+               pqi_free_io_request(io_request);
+               return SCSI_MLQUEUE_HOST_BUSY;
+       }
+       pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
+       return 0;
+ }
+ static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
+       struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
+       struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
+       struct pqi_scsi_dev_raid_map_data *rmd)
+ {
+       int rc;
+       struct pqi_io_request *io_request;
+       struct pqi_aio_r56_path_request *r56_request;
+       io_request = pqi_alloc_io_request(ctrl_info);
+       io_request->io_complete_callback = pqi_aio_io_complete;
+       io_request->scmd = scmd;
+       io_request->raid_bypass = true;
+       r56_request = io_request->iu;
+       memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors));
+       if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51)
+               r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO;
+       else
+               r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO;
+       put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id);
+       put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus);
+       put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus);
+       if (rmd->raid_level == SA_RAID_6) {
+               put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus);
+               r56_request->xor_multiplier = rmd->xor_mult;
+       }
+       put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length);
+       r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
+       put_unaligned_le64(rmd->row, &r56_request->row);
+       put_unaligned_le16(io_request->index, &r56_request->request_id);
+       r56_request->error_index = r56_request->request_id;
+       if (rmd->cdb_length > sizeof(r56_request->cdb))
+               rmd->cdb_length = sizeof(r56_request->cdb);
+       r56_request->cdb_length = rmd->cdb_length;
+       memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length);
+       /* The direction is always write. */
+       r56_request->data_direction = SOP_READ_FLAG;
+       if (encryption_info) {
+               r56_request->encryption_enable = true;
+               put_unaligned_le16(encryption_info->data_encryption_key_index,
+                               &r56_request->data_encryption_key_index);
+               put_unaligned_le32(encryption_info->encrypt_tweak_lower,
+                               &r56_request->encrypt_tweak_lower);
+               put_unaligned_le32(encryption_info->encrypt_tweak_upper,
+                               &r56_request->encrypt_tweak_upper);
+       }
+       rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request);
+       if (rc) {
+               pqi_free_io_request(io_request);
+               return SCSI_MLQUEUE_HOST_BUSY;
+       }
+       pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
+       return 0;
+ }
  static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
        struct scsi_cmnd *scmd)
  {
        return hw_queue;
  }
  
+ static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
+ {
+       if (blk_rq_is_passthrough(scmd->request))
+               return false;
+       return scmd->SCp.this_residual == 0;
+ }
  /*
   * This function gets called just before we hand the completed SCSI request
   * back to the SML.
@@@ -5324,9 -5608,83 +5607,83 @@@ void pqi_prep_for_scsi_done(struct scsi
        atomic_dec(&device->scsi_cmds_outstanding);
  }
  
- static int pqi_scsi_queue_command(struct Scsi_Host *shost,
+ static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
        struct scsi_cmnd *scmd)
  {
+       u32 oldest_jiffies;
+       u8 lru_index;
+       int i;
+       int rc;
+       struct pqi_scsi_dev *device;
+       struct pqi_stream_data *pqi_stream_data;
+       struct pqi_scsi_dev_raid_map_data rmd;
+       if (!ctrl_info->enable_stream_detection)
+               return false;
+       rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
+       if (rc)
+               return false;
+       /* Check writes only. */
+       if (!rmd.is_write)
+               return false;
+       device = scmd->device->hostdata;
+       /* Check for RAID 5/6 streams. */
+       if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6)
+               return false;
+       /*
+        * If controller does not support AIO RAID{5,6} writes, need to send
+        * requests down non-AIO path.
+        */
+       if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) ||
+               (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes))
+               return true;
+       lru_index = 0;
+       oldest_jiffies = INT_MAX;
+       for (i = 0; i < NUM_STREAMS_PER_LUN; i++) {
+               pqi_stream_data = &device->stream_data[i];
+               /*
+                * Check for adjacent request or request is within
+                * the previous request.
+                */
+               if ((pqi_stream_data->next_lba &&
+                       rmd.first_block >= pqi_stream_data->next_lba) &&
+                       rmd.first_block <= pqi_stream_data->next_lba +
+                               rmd.block_cnt) {
+                       pqi_stream_data->next_lba = rmd.first_block +
+                               rmd.block_cnt;
+                       pqi_stream_data->last_accessed = jiffies;
+                       return true;
+               }
+               /* unused entry */
+               if (pqi_stream_data->last_accessed == 0) {
+                       lru_index = i;
+                       break;
+               }
+               /* Find entry with oldest last accessed time. */
+               if (pqi_stream_data->last_accessed <= oldest_jiffies) {
+                       oldest_jiffies = pqi_stream_data->last_accessed;
+                       lru_index = i;
+               }
+       }
+       /* Set LRU entry. */
+       pqi_stream_data = &device->stream_data[lru_index];
+       pqi_stream_data->last_accessed = jiffies;
+       pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt;
+       return false;
+ }
+ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
+ {
        int rc;
        struct pqi_ctrl_info *ctrl_info;
        struct pqi_scsi_dev *device;
        bool raid_bypassed;
  
        device = scmd->device->hostdata;
-       ctrl_info = shost_to_hba(shost);
  
        if (!device) {
                set_host_byte(scmd, DID_NO_CONNECT);
  
        atomic_inc(&device->scsi_cmds_outstanding);
  
+       ctrl_info = shost_to_hba(shost);
        if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) {
                set_host_byte(scmd, DID_NO_CONNECT);
                pqi_scsi_done(scmd);
                return 0;
        }
  
-       pqi_ctrl_busy(ctrl_info);
-       if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) ||
-           pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info)) {
+       if (pqi_ctrl_blocked(ctrl_info)) {
                rc = SCSI_MLQUEUE_HOST_BUSY;
                goto out;
        }
        if (pqi_is_logical_device(device)) {
                raid_bypassed = false;
                if (device->raid_bypass_enabled &&
-                       !blk_rq_is_passthrough(scmd->request)) {
-                       rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
-                               scmd, queue_group);
+                       pqi_is_bypass_eligible_request(scmd) &&
+                       !pqi_is_parity_write_stream(ctrl_info, scmd)) {
+                       rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
                        if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
                                raid_bypassed = true;
                                atomic_inc(&device->raid_bypass_cnt);
        }
  
  out:
-       pqi_ctrl_unbusy(ctrl_info);
        if (rc)
                atomic_dec(&device->scsi_cmds_outstanding);
  
@@@ -5478,6 -5834,7 +5833,7 @@@ static void pqi_fail_io_queued_for_devi
                        list_for_each_entry_safe(io_request, next,
                                &queue_group->request_list[path],
                                request_list_entry) {
                                scmd = io_request->scmd;
                                if (!scmd)
                                        continue;
  
                                list_del(&io_request->request_list_entry);
                                set_host_byte(scmd, DID_RESET);
+                               pqi_free_io_request(io_request);
+                               scsi_dma_unmap(scmd);
                                pqi_scsi_done(scmd);
                        }
  
        }
  }
  
- static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info)
- {
-       unsigned int i;
-       unsigned int path;
-       struct pqi_queue_group *queue_group;
-       unsigned long flags;
-       struct pqi_io_request *io_request;
-       struct pqi_io_request *next;
-       struct scsi_cmnd *scmd;
-       for (i = 0; i < ctrl_info->num_queue_groups; i++) {
-               queue_group = &ctrl_info->queue_groups[i];
-               for (path = 0; path < 2; path++) {
-                       spin_lock_irqsave(&queue_group->submit_lock[path],
-                                               flags);
-                       list_for_each_entry_safe(io_request, next,
-                               &queue_group->request_list[path],
-                               request_list_entry) {
-                               scmd = io_request->scmd;
-                               if (!scmd)
-                                       continue;
-                               list_del(&io_request->request_list_entry);
-                               set_host_byte(scmd, DID_RESET);
-                               pqi_scsi_done(scmd);
-                       }
-                       spin_unlock_irqrestore(
-                               &queue_group->submit_lock[path], flags);
-               }
-       }
- }
+ #define PQI_PENDING_IO_WARNING_TIMEOUT_SECS   10
  
  static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
-       struct pqi_scsi_dev *device, unsigned long timeout_secs)
+       struct pqi_scsi_dev *device, unsigned long timeout_msecs)
  {
-       unsigned long timeout;
+       int cmds_outstanding;
+       unsigned long start_jiffies;
+       unsigned long warning_timeout;
+       unsigned long msecs_waiting;
  
-       timeout = (timeout_secs * PQI_HZ) + jiffies;
+       start_jiffies = jiffies;
+       warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * PQI_HZ) + start_jiffies;
  
-       while (atomic_read(&device->scsi_cmds_outstanding)) {
+       while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding)) > 0) {
                pqi_check_ctrl_health(ctrl_info);
                if (pqi_ctrl_offline(ctrl_info))
                        return -ENXIO;
-               if (timeout_secs != NO_TIMEOUT) {
-                       if (time_after(jiffies, timeout)) {
-                               dev_err(&ctrl_info->pci_dev->dev,
-                                       "timed out waiting for pending IO\n");
-                               return -ETIMEDOUT;
-                       }
-               }
-               usleep_range(1000, 2000);
-       }
-       return 0;
- }
- static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
-       unsigned long timeout_secs)
- {
-       bool io_pending;
-       unsigned long flags;
-       unsigned long timeout;
-       struct pqi_scsi_dev *device;
-       timeout = (timeout_secs * PQI_HZ) + jiffies;
-       while (1) {
-               io_pending = false;
-               spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
-               list_for_each_entry(device, &ctrl_info->scsi_device_list,
-                       scsi_device_list_entry) {
-                       if (atomic_read(&device->scsi_cmds_outstanding)) {
-                               io_pending = true;
-                               break;
-                       }
+               msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies);
+               if (msecs_waiting > timeout_msecs) {
+                       dev_err(&ctrl_info->pci_dev->dev,
+                               "scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n",
+                               ctrl_info->scsi_host->host_no, device->bus, device->target,
+                               device->lun, msecs_waiting / 1000, cmds_outstanding);
+                       return -ETIMEDOUT;
                }
-               spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
-                                       flags);
-               if (!io_pending)
-                       break;
-               pqi_check_ctrl_health(ctrl_info);
-               if (pqi_ctrl_offline(ctrl_info))
-                       return -ENXIO;
-               if (timeout_secs != NO_TIMEOUT) {
-                       if (time_after(jiffies, timeout)) {
-                               dev_err(&ctrl_info->pci_dev->dev,
-                                       "timed out waiting for pending IO\n");
-                               return -ETIMEDOUT;
-                       }
+               if (time_after(jiffies, warning_timeout)) {
+                       dev_warn(&ctrl_info->pci_dev->dev,
+                               "scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n",
+                               ctrl_info->scsi_host->host_no, device->bus, device->target,
+                               device->lun, msecs_waiting / 1000, cmds_outstanding);
+                       warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * PQI_HZ) + jiffies;
                }
                usleep_range(1000, 2000);
        }
        return 0;
  }
  
- static int pqi_ctrl_wait_for_pending_sync_cmds(struct pqi_ctrl_info *ctrl_info)
- {
-       while (atomic_read(&ctrl_info->sync_cmds_outstanding)) {
-               pqi_check_ctrl_health(ctrl_info);
-               if (pqi_ctrl_offline(ctrl_info))
-                       return -ENXIO;
-               usleep_range(1000, 2000);
-       }
-       return 0;
- }
  static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
        void *context)
  {
        complete(waiting);
  }
  
- #define PQI_LUN_RESET_TIMEOUT_SECS            30
  #define PQI_LUN_RESET_POLL_COMPLETION_SECS    10
  
  static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
        struct pqi_scsi_dev *device, struct completion *wait)
  {
        int rc;
+       unsigned int wait_secs;
+       wait_secs = 0;
  
        while (1) {
                if (wait_for_completion_io_timeout(wait,
                        rc = -ENXIO;
                        break;
                }
+               wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS;
+               dev_warn(&ctrl_info->pci_dev->dev,
+                       "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete\n",
+                       ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun,
+                       wait_secs);
        }
  
        return rc;
  }
  
- static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
-       struct pqi_scsi_dev *device)
+ #define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS   30
+ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
  {
        int rc;
        struct pqi_io_request *io_request;
                sizeof(request->lun_number));
        request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
        if (ctrl_info->tmf_iu_timeout_supported)
-               put_unaligned_le16(PQI_LUN_RESET_TIMEOUT_SECS,
-                                       &request->timeout);
+               put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout);
  
-       pqi_start_io(ctrl_info,
-               &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
+       pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
                io_request);
  
        rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
        return rc;
  }
  
- /* Performs a reset at the LUN level. */
- #define PQI_LUN_RESET_RETRIES                 3
- #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS    10000
- #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS 120
+ #define PQI_LUN_RESET_RETRIES                         3
+ #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS            (10 * 1000)
+ #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS                (10 * 60 * 1000)
+ #define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS (2 * 60 * 1000)
  
- static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
-       struct pqi_scsi_dev *device)
+ static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
  {
-       int rc;
+       int reset_rc;
+       int wait_rc;
        unsigned int retries;
-       unsigned long timeout_secs;
+       unsigned long timeout_msecs;
  
        for (retries = 0;;) {
-               rc = pqi_lun_reset(ctrl_info, device);
-               if (rc == 0 || ++retries > PQI_LUN_RESET_RETRIES)
+               reset_rc = pqi_lun_reset(ctrl_info, device);
+               if (reset_rc == 0 || ++retries > PQI_LUN_RESET_RETRIES)
                        break;
                msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
        }
  
-       timeout_secs = rc ? PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS : NO_TIMEOUT;
+       timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS :
+               PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS;
  
-       rc |= pqi_device_wait_for_pending_io(ctrl_info, device, timeout_secs);
+       wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, timeout_msecs);
+       if (wait_rc && reset_rc == 0)
+               reset_rc = wait_rc;
  
-       return rc == 0 ? SUCCESS : FAILED;
+       return reset_rc == 0 ? SUCCESS : FAILED;
  }
  
  static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
  {
        int rc;
  
-       mutex_lock(&ctrl_info->lun_reset_mutex);
        pqi_ctrl_block_requests(ctrl_info);
        pqi_ctrl_wait_until_quiesced(ctrl_info);
        pqi_fail_io_queued_for_device(ctrl_info, device);
        rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
-       pqi_device_reset_start(device);
-       pqi_ctrl_unblock_requests(ctrl_info);
        if (rc)
                rc = FAILED;
        else
-               rc = _pqi_device_reset(ctrl_info, device);
-       pqi_device_reset_done(device);
-       mutex_unlock(&ctrl_info->lun_reset_mutex);
+               rc = pqi_lun_reset_with_retries(ctrl_info, device);
+       pqi_ctrl_unblock_requests(ctrl_info);
  
        return rc;
  }
@@@ -5748,29 -6032,25 +6031,25 @@@ static int pqi_eh_device_reset_handler(
        ctrl_info = shost_to_hba(shost);
        device = scmd->device->hostdata;
  
+       mutex_lock(&ctrl_info->lun_reset_mutex);
        dev_err(&ctrl_info->pci_dev->dev,
                "resetting scsi %d:%d:%d:%d\n",
                shost->host_no, device->bus, device->target, device->lun);
  
        pqi_check_ctrl_health(ctrl_info);
-       if (pqi_ctrl_offline(ctrl_info) ||
-               pqi_device_reset_blocked(ctrl_info)) {
+       if (pqi_ctrl_offline(ctrl_info))
                rc = FAILED;
-               goto out;
-       }
-       pqi_wait_until_ofa_finished(ctrl_info);
-       atomic_inc(&ctrl_info->sync_cmds_outstanding);
-       rc = pqi_device_reset(ctrl_info, device);
-       atomic_dec(&ctrl_info->sync_cmds_outstanding);
+       else
+               rc = pqi_device_reset(ctrl_info, device);
  
- out:
        dev_err(&ctrl_info->pci_dev->dev,
                "reset of scsi %d:%d:%d:%d: %s\n",
                shost->host_no, device->bus, device->target, device->lun,
                rc == SUCCESS ? "SUCCESS" : "FAILED");
  
+       mutex_unlock(&ctrl_info->lun_reset_mutex);
        return rc;
  }
  
@@@ -5808,10 -6088,13 +6087,13 @@@ static int pqi_slave_alloc(struct scsi_
                        scsi_change_queue_depth(sdev,
                                device->advertised_queue_depth);
                }
-               if (pqi_is_logical_device(device))
+               if (pqi_is_logical_device(device)) {
                        pqi_disable_write_same(sdev);
-               else
+               } else {
                        sdev->allow_restart = 1;
+                       if (device->device_type == SA_DEVICE_TYPE_NVME)
+                               pqi_disable_write_same(sdev);
+               }
        }
  
        spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
@@@ -5985,6 -6268,8 +6267,8 @@@ static int pqi_passthru_ioctl(struct pq
  
        if (pqi_ctrl_offline(ctrl_info))
                return -ENXIO;
+       if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info))
+               return -EBUSY;
        if (!arg)
                return -EINVAL;
        if (!capable(CAP_SYS_RAWIO))
                put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
  
        rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
-               PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
+               PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info);
  
        if (iocommand.buf_size > 0)
                pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
@@@ -6121,9 -6406,6 +6405,6 @@@ static int pqi_ioctl(struct scsi_devic
  
        ctrl_info = shost_to_hba(sdev->host);
  
-       if (pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info))
-               return -EBUSY;
        switch (cmd) {
        case CCISS_DEREGDISK:
        case CCISS_REGNEWDISK:
@@@ -6156,14 -6438,13 +6437,13 @@@ static ssize_t pqi_firmware_version_sho
        shost = class_to_shost(dev);
        ctrl_info = shost_to_hba(shost);
  
-       return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
+       return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
  }
  
  static ssize_t pqi_driver_version_show(struct device *dev,
        struct device_attribute *attr, char *buffer)
  {
-       return snprintf(buffer, PAGE_SIZE, "%s\n",
-                       DRIVER_VERSION BUILD_TIMESTAMP);
+       return scnprintf(buffer, PAGE_SIZE, "%s\n", DRIVER_VERSION BUILD_TIMESTAMP);
  }
  
  static ssize_t pqi_serial_number_show(struct device *dev,
        shost = class_to_shost(dev);
        ctrl_info = shost_to_hba(shost);
  
-       return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
+       return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
  }
  
  static ssize_t pqi_model_show(struct device *dev,
        shost = class_to_shost(dev);
        ctrl_info = shost_to_hba(shost);
  
-       return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
+       return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
  }
  
  static ssize_t pqi_vendor_show(struct device *dev,
        shost = class_to_shost(dev);
        ctrl_info = shost_to_hba(shost);
  
-       return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
+       return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
  }
  
  static ssize_t pqi_host_rescan_store(struct device *dev,
@@@ -6252,14 -6533,103 +6532,103 @@@ static ssize_t pqi_lockup_action_store(
        return -EINVAL;
  }
  
+ static ssize_t pqi_host_enable_stream_detection_show(struct device *dev,
+       struct device_attribute *attr, char *buffer)
+ {
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+       return scnprintf(buffer, 10, "%x\n",
+                       ctrl_info->enable_stream_detection);
+ }
+ static ssize_t pqi_host_enable_stream_detection_store(struct device *dev,
+       struct device_attribute *attr, const char *buffer, size_t count)
+ {
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+       u8 set_stream_detection = 0;
+       if (kstrtou8(buffer, 0, &set_stream_detection))
+               return -EINVAL;
+       if (set_stream_detection > 0)
+               set_stream_detection = 1;
+       ctrl_info->enable_stream_detection = set_stream_detection;
+       return count;
+ }
+ static ssize_t pqi_host_enable_r5_writes_show(struct device *dev,
+       struct device_attribute *attr, char *buffer)
+ {
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+       return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes);
+ }
+ static ssize_t pqi_host_enable_r5_writes_store(struct device *dev,
+       struct device_attribute *attr, const char *buffer, size_t count)
+ {
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+       u8 set_r5_writes = 0;
+       if (kstrtou8(buffer, 0, &set_r5_writes))
+               return -EINVAL;
+       if (set_r5_writes > 0)
+               set_r5_writes = 1;
+       ctrl_info->enable_r5_writes = set_r5_writes;
+       return count;
+ }
+ static ssize_t pqi_host_enable_r6_writes_show(struct device *dev,
+       struct device_attribute *attr, char *buffer)
+ {
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+       return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes);
+ }
+ static ssize_t pqi_host_enable_r6_writes_store(struct device *dev,
+       struct device_attribute *attr, const char *buffer, size_t count)
+ {
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+       u8 set_r6_writes = 0;
+       if (kstrtou8(buffer, 0, &set_r6_writes))
+               return -EINVAL;
+       if (set_r6_writes > 0)
+               set_r6_writes = 1;
+       ctrl_info->enable_r6_writes = set_r6_writes;
+       return count;
+ }
  static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL);
  static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
  static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
  static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
  static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
  static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
- static DEVICE_ATTR(lockup_action, 0644,
-       pqi_lockup_action_show, pqi_lockup_action_store);
+ static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show,
+       pqi_lockup_action_store);
+ static DEVICE_ATTR(enable_stream_detection, 0644,
+       pqi_host_enable_stream_detection_show,
+       pqi_host_enable_stream_detection_store);
+ static DEVICE_ATTR(enable_r5_writes, 0644,
+       pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store);
+ static DEVICE_ATTR(enable_r6_writes, 0644,
+       pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store);
  
  static struct device_attribute *pqi_shost_attrs[] = {
        &dev_attr_driver_version,
        &dev_attr_vendor,
        &dev_attr_rescan,
        &dev_attr_lockup_action,
+       &dev_attr_enable_stream_detection,
+       &dev_attr_enable_r5_writes,
+       &dev_attr_enable_r6_writes,
        NULL
  };
  
@@@ -6301,8 -6674,9 +6673,9 @@@ static ssize_t pqi_unique_id_show(struc
  
        spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
  
-       return snprintf(buffer, PAGE_SIZE,
-               "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
+       return scnprintf(buffer, PAGE_SIZE,
+               "%02X%02X%02X%02X%02X%02X%02X%02X"
+               "%02X%02X%02X%02X%02X%02X%02X%02X\n",
                unique_id[0], unique_id[1], unique_id[2], unique_id[3],
                unique_id[4], unique_id[5], unique_id[6], unique_id[7],
                unique_id[8], unique_id[9], unique_id[10], unique_id[11],
@@@ -6333,7 -6707,7 +6706,7 @@@ static ssize_t pqi_lunid_show(struct de
  
        spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
  
-       return snprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
+       return scnprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
  }
  
  #define MAX_PATHS     8
@@@ -6445,7 -6819,7 +6818,7 @@@ static ssize_t pqi_sas_address_show(str
  
        spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
  
-       return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
+       return scnprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
  }
  
  static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
@@@ -6503,7 -6877,7 +6876,7 @@@ static ssize_t pqi_raid_level_show(stru
  
        spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
  
-       return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
+       return scnprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
  }
  
  static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
  
        spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
  
-       return snprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
+       return scnprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
  }
  
  static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
@@@ -6577,9 -6951,7 +6950,7 @@@ static int pqi_register_scsi(struct pqi
  
        shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
        if (!shost) {
-               dev_err(&ctrl_info->pci_dev->dev,
-                       "scsi_host_alloc failed for controller %u\n",
-                       ctrl_info->ctrl_id);
+               dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n");
                return -ENOMEM;
        }
  
        shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
        shost->unique_id = shost->irq;
        shost->nr_hw_queues = ctrl_info->num_queue_groups;
+       shost->host_tagset = 1;
        shost->hostdata[0] = (unsigned long)ctrl_info;
  
        rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
        if (rc) {
-               dev_err(&ctrl_info->pci_dev->dev,
-                       "scsi_add_host failed for controller %u\n",
-                       ctrl_info->ctrl_id);
+               dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n");
                goto free_host;
        }
  
        rc = pqi_add_sas_host(shost, ctrl_info);
        if (rc) {
-               dev_err(&ctrl_info->pci_dev->dev,
-                       "add SAS host failed for controller %u\n",
-                       ctrl_info->ctrl_id);
+               dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n");
                goto remove_host;
        }
  
@@@ -6682,8 -7051,7 +7050,7 @@@ static int pqi_reset(struct pqi_ctrl_in
                rc = sis_pqi_reset_quiesce(ctrl_info);
                if (rc) {
                        dev_err(&ctrl_info->pci_dev->dev,
-                               "PQI reset failed during quiesce with error %d\n",
-                               rc);
+                               "PQI reset failed during quiesce with error %d\n", rc);
                        return rc;
                }
        }
@@@ -6738,13 -7106,24 +7105,24 @@@ static int pqi_get_ctrl_product_details
        if (rc)
                goto out;
  
-       memcpy(ctrl_info->firmware_version, identify->firmware_version,
-               sizeof(identify->firmware_version));
-       ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
-       snprintf(ctrl_info->firmware_version +
-               strlen(ctrl_info->firmware_version),
-               sizeof(ctrl_info->firmware_version),
-               "-%u", get_unaligned_le16(&identify->firmware_build_number));
+       if (get_unaligned_le32(&identify->extra_controller_flags) &
+               BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) {
+               memcpy(ctrl_info->firmware_version,
+                       identify->firmware_version_long,
+                       sizeof(identify->firmware_version_long));
+       } else {
+               memcpy(ctrl_info->firmware_version,
+                       identify->firmware_version_short,
+                       sizeof(identify->firmware_version_short));
+               ctrl_info->firmware_version
+                       [sizeof(identify->firmware_version_short)] = '\0';
+               snprintf(ctrl_info->firmware_version +
+                       strlen(ctrl_info->firmware_version),
+                       sizeof(ctrl_info->firmware_version) -
+                       sizeof(identify->firmware_version_short),
+                       "-%u",
+                       get_unaligned_le16(&identify->firmware_build_number));
+       }
  
        memcpy(ctrl_info->model, identify->product_id,
                sizeof(identify->product_id));
@@@ -6831,8 -7210,7 +7209,7 @@@ static int pqi_config_table_update(stru
        put_unaligned_le16(last_section,
                &request.data.config_table_update.last_section);
  
-       return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
-               0, NULL, NO_TIMEOUT);
+       return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
  }
  
  static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
  {
        void *features_requested;
        void __iomem *features_requested_iomem_addr;
+       void __iomem *host_max_known_feature_iomem_addr;
  
        features_requested = firmware_features->features_supported +
                le16_to_cpu(firmware_features->num_elements);
        memcpy_toio(features_requested_iomem_addr, features_requested,
                le16_to_cpu(firmware_features->num_elements));
  
+       if (pqi_is_firmware_feature_supported(firmware_features,
+               PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) {
+               host_max_known_feature_iomem_addr =
+                       features_requested_iomem_addr +
+                       (le16_to_cpu(firmware_features->num_elements) * 2) +
+                       sizeof(__le16);
+               writew(PQI_FIRMWARE_FEATURE_MAXIMUM,
+                       host_max_known_feature_iomem_addr);
+       }
        return pqi_config_table_update(ctrl_info,
                PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
                PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
@@@ -6888,16 -7277,28 +7276,28 @@@ static void pqi_ctrl_update_feature_fla
        struct pqi_firmware_feature *firmware_feature)
  {
        switch (firmware_feature->feature_bit) {
+       case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS:
+               ctrl_info->enable_r1_writes = firmware_feature->enabled;
+               break;
+       case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS:
+               ctrl_info->enable_r5_writes = firmware_feature->enabled;
+               break;
+       case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS:
+               ctrl_info->enable_r6_writes = firmware_feature->enabled;
+               break;
        case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
                ctrl_info->soft_reset_handshake_supported =
-                       firmware_feature->enabled;
+                       firmware_feature->enabled &&
+                       pqi_read_soft_reset_status(ctrl_info);
                break;
        case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
-               ctrl_info->raid_iu_timeout_supported =
-                       firmware_feature->enabled;
+               ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled;
                break;
        case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
-               ctrl_info->tmf_iu_timeout_supported =
+               ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled;
+               break;
+       case PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN:
+               ctrl_info->unique_wwid_in_report_phys_lun_supported =
                        firmware_feature->enabled;
                break;
        }
@@@ -6916,16 -7317,61 +7316,61 @@@ static DEFINE_MUTEX(pqi_firmware_featur
  
  static struct pqi_firmware_feature pqi_firmware_features[] = {
        {
-               .feature_name = "Online Firmware Activation",
-               .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
+               .feature_name = "Online Firmware Activation",
+               .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
+               .feature_status = pqi_firmware_feature_status,
+       },
+       {
+               .feature_name = "Serial Management Protocol",
+               .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
+               .feature_status = pqi_firmware_feature_status,
+       },
+       {
+               .feature_name = "Maximum Known Feature",
+               .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE,
+               .feature_status = pqi_firmware_feature_status,
+       },
+       {
+               .feature_name = "RAID 0 Read Bypass",
+               .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS,
                .feature_status = pqi_firmware_feature_status,
        },
        {
-               .feature_name = "Serial Management Protocol",
-               .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
+               .feature_name = "RAID 1 Read Bypass",
+               .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS,
+               .feature_status = pqi_firmware_feature_status,
+       },
+       {
+               .feature_name = "RAID 5 Read Bypass",
+               .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS,
                .feature_status = pqi_firmware_feature_status,
        },
        {
+               .feature_name = "RAID 6 Read Bypass",
+               .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS,
+               .feature_status = pqi_firmware_feature_status,
+       },
+       {
+               .feature_name = "RAID 0 Write Bypass",
+               .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS,
+               .feature_status = pqi_firmware_feature_status,
+       },
+       {
+               .feature_name = "RAID 1 Write Bypass",
+               .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS,
+               .feature_status = pqi_ctrl_update_feature_flags,
+       },
+       {
+               .feature_name = "RAID 5 Write Bypass",
+               .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS,
+               .feature_status = pqi_ctrl_update_feature_flags,
+       },
+       {
+               .feature_name = "RAID 6 Write Bypass",
+               .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS,
+               .feature_status = pqi_ctrl_update_feature_flags,
+       },
+       {
                .feature_name = "New Soft Reset Handshake",
                .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
                .feature_status = pqi_ctrl_update_feature_flags,
                .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
                .feature_status = pqi_ctrl_update_feature_flags,
        },
+       {
+               .feature_name = "RAID Bypass on encrypted logical volumes on NVMe",
+               .feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME,
+               .feature_status = pqi_firmware_feature_status,
+       },
+       {
+               .feature_name = "Unique WWID in Report Physical LUN",
+               .feature_bit = PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN,
+               .feature_status = pqi_ctrl_update_feature_flags,
+       },
  };
  
  static void pqi_process_firmware_features(
                if (pqi_is_firmware_feature_enabled(firmware_features,
                        firmware_features_iomem_addr,
                        pqi_firmware_features[i].feature_bit)) {
-                       pqi_firmware_features[i].enabled = true;
+                               pqi_firmware_features[i].enabled = true;
                }
                pqi_firmware_feature_update(ctrl_info,
                        &pqi_firmware_features[i]);
@@@ -7024,14 -7480,34 +7479,34 @@@ static void pqi_process_firmware_featur
        mutex_unlock(&pqi_firmware_features_mutex);
  }
  
+ /*
+  * Reset all controller settings that can be initialized during the processing
+  * of the PQI Configuration Table.
+  */
+ static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
+ {
+       ctrl_info->heartbeat_counter = NULL;
+       ctrl_info->soft_reset_status = NULL;
+       ctrl_info->soft_reset_handshake_supported = false;
+       ctrl_info->enable_r1_writes = false;
+       ctrl_info->enable_r5_writes = false;
+       ctrl_info->enable_r6_writes = false;
+       ctrl_info->raid_iu_timeout_supported = false;
+       ctrl_info->tmf_iu_timeout_supported = false;
+       ctrl_info->unique_wwid_in_report_phys_lun_supported = false;
+ }
  static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
  {
        u32 table_length;
        u32 section_offset;
+       bool firmware_feature_section_present;
        void __iomem *table_iomem_addr;
        struct pqi_config_table *config_table;
        struct pqi_config_table_section_header *section;
        struct pqi_config_table_section_info section_info;
+       struct pqi_config_table_section_info feature_section_info;
  
        table_length = ctrl_info->config_table_length;
        if (table_length == 0)
         * Copy the config table contents from I/O memory space into the
         * temporary buffer.
         */
-       table_iomem_addr = ctrl_info->iomem_base +
-               ctrl_info->config_table_offset;
+       table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset;
        memcpy_fromio(config_table, table_iomem_addr, table_length);
  
+       firmware_feature_section_present = false;
        section_info.ctrl_info = ctrl_info;
-       section_offset =
-               get_unaligned_le32(&config_table->first_section_offset);
+       section_offset = get_unaligned_le32(&config_table->first_section_offset);
  
        while (section_offset) {
                section = (void *)config_table + section_offset;
  
                section_info.section = section;
                section_info.section_offset = section_offset;
-               section_info.section_iomem_addr =
-                       table_iomem_addr + section_offset;
+               section_info.section_iomem_addr = table_iomem_addr + section_offset;
  
                switch (get_unaligned_le16(&section->section_id)) {
                case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
-                       pqi_process_firmware_features_section(&section_info);
+                       firmware_feature_section_present = true;
+                       feature_section_info = section_info;
                        break;
                case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
                        if (pqi_disable_heartbeat)
                                ctrl_info->heartbeat_counter =
                                        table_iomem_addr +
                                        section_offset +
-                                       offsetof(
-                                       struct pqi_config_table_heartbeat,
+                                       offsetof(struct pqi_config_table_heartbeat,
                                                heartbeat_counter);
                        break;
                case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
                                table_iomem_addr +
                                section_offset +
                                offsetof(struct pqi_config_table_soft_reset,
-                                               soft_reset_status);
+                                       soft_reset_status);
                        break;
                }
  
-               section_offset =
-                       get_unaligned_le16(&section->next_section_offset);
+               section_offset = get_unaligned_le16(&section->next_section_offset);
        }
  
+       /*
+        * We process the firmware feature section after all other sections
+        * have been processed so that the feature bit callbacks can take
+        * into account the settings configured by other sections.
+        */
+       if (firmware_feature_section_present)
+               pqi_process_firmware_features_section(&feature_section_info);
        kfree(config_table);
  
        return 0;
@@@ -7140,15 -7621,14 +7620,14 @@@ static int pqi_force_sis_mode(struct pq
        return pqi_revert_to_sis_mode(ctrl_info);
  }
  
- #define PQI_POST_RESET_DELAY_B4_MSGU_READY    5000
  static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
  {
        int rc;
+       u32 product_id;
  
        if (reset_devices) {
                sis_soft_reset(ctrl_info);
-               msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
+               msleep(PQI_POST_RESET_DELAY_SECS * PQI_HZ);
        } else {
                rc = pqi_force_sis_mode(ctrl_info);
                if (rc)
                return rc;
        }
  
+       product_id = sis_get_product_id(ctrl_info);
+       ctrl_info->product_id = (u8)product_id;
+       ctrl_info->product_revision = (u8)(product_id >> 8);
        if (reset_devices) {
                if (ctrl_info->max_outstanding_requests >
                        PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
-                       ctrl_info->max_outstanding_requests =
+                               ctrl_info->max_outstanding_requests =
                                        PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
        } else {
                if (ctrl_info->max_outstanding_requests >
                        PQI_MAX_OUTSTANDING_REQUESTS)
-                       ctrl_info->max_outstanding_requests =
+                               ctrl_info->max_outstanding_requests =
                                        PQI_MAX_OUTSTANDING_REQUESTS;
        }
  
  
        pqi_start_heartbeat_timer(ctrl_info);
  
+       if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
+               rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
+               if (rc) { /* Supported features not returned correctly. */
+                       dev_err(&ctrl_info->pci_dev->dev,
+                               "error obtaining advanced RAID bypass configuration\n");
+                       return rc;
+               }
+               ctrl_info->ciss_report_log_flags |=
+                       CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
+       }
        rc = pqi_enable_events(ctrl_info);
        if (rc) {
                dev_err(&ctrl_info->pci_dev->dev,
@@@ -7443,12 -7938,25 +7937,25 @@@ static int pqi_ctrl_init_resume(struct 
        ctrl_info->controller_online = true;
        pqi_ctrl_unblock_requests(ctrl_info);
  
+       pqi_ctrl_reset_config(ctrl_info);
        rc = pqi_process_config_table(ctrl_info);
        if (rc)
                return rc;
  
        pqi_start_heartbeat_timer(ctrl_info);
  
+       if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
+               rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
+               if (rc) {
+                       dev_err(&ctrl_info->pci_dev->dev,
+                               "error obtaining advanced RAID bypass configuration\n");
+                       return rc;
+               }
+               ctrl_info->ciss_report_log_flags |=
+                       CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
+       }
        rc = pqi_enable_events(ctrl_info);
        if (rc) {
                dev_err(&ctrl_info->pci_dev->dev,
                return rc;
        }
  
-       pqi_schedule_update_time_worker(ctrl_info);
+       if (pqi_ofa_in_progress(ctrl_info))
+               pqi_ctrl_unblock_scan(ctrl_info);
  
        pqi_scan_scsi_devices(ctrl_info);
  
        return 0;
  }
  
- static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev,
-       u16 timeout)
+ static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout)
  {
        int rc;
  
@@@ -7591,7 -8099,6 +8098,6 @@@ static struct pqi_ctrl_info *pqi_alloc_
  
        INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
        atomic_set(&ctrl_info->num_interrupts, 0);
-       atomic_set(&ctrl_info->sync_cmds_outstanding, 0);
  
        INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
        INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
        timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
        INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
  
+       INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker);
+       INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker);
        sema_init(&ctrl_info->sync_request_sem,
                PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
        init_waitqueue_head(&ctrl_info->block_requests_wait);
  
-       INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
-       spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock);
-       INIT_WORK(&ctrl_info->raid_bypass_retry_work,
-               pqi_raid_bypass_retry_worker);
        ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
        ctrl_info->irq_mode = IRQ_MODE_NONE;
        ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
  
+       ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
+       ctrl_info->max_transfer_encrypted_sas_sata =
+               PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA;
+       ctrl_info->max_transfer_encrypted_nvme =
+               PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME;
+       ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6;
+       ctrl_info->max_write_raid_1_10_2drive = ~0;
+       ctrl_info->max_write_raid_1_10_3drive = ~0;
        return ctrl_info;
  }
  
@@@ -7663,81 -8177,57 +8176,57 @@@ static void pqi_remove_ctrl(struct pqi_
  
  static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
  {
-       pqi_cancel_update_time_worker(ctrl_info);
-       pqi_cancel_rescan_worker(ctrl_info);
-       pqi_wait_until_lun_reset_finished(ctrl_info);
-       pqi_wait_until_scan_finished(ctrl_info);
-       pqi_ctrl_ofa_start(ctrl_info);
+       pqi_ctrl_block_scan(ctrl_info);
+       pqi_scsi_block_requests(ctrl_info);
+       pqi_ctrl_block_device_reset(ctrl_info);
        pqi_ctrl_block_requests(ctrl_info);
        pqi_ctrl_wait_until_quiesced(ctrl_info);
-       pqi_ctrl_wait_for_pending_io(ctrl_info, PQI_PENDING_IO_TIMEOUT_SECS);
-       pqi_fail_io_queued_for_all_devices(ctrl_info);
-       pqi_wait_until_inbound_queues_empty(ctrl_info);
        pqi_stop_heartbeat_timer(ctrl_info);
-       ctrl_info->pqi_mode_enabled = false;
-       pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
  }
  
  static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
  {
-       pqi_ofa_free_host_buffer(ctrl_info);
-       ctrl_info->pqi_mode_enabled = true;
-       pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
-       ctrl_info->controller_online = true;
-       pqi_ctrl_unblock_requests(ctrl_info);
        pqi_start_heartbeat_timer(ctrl_info);
-       pqi_schedule_update_time_worker(ctrl_info);
-       pqi_clear_soft_reset_status(ctrl_info,
-               PQI_SOFT_RESET_ABORT);
-       pqi_scan_scsi_devices(ctrl_info);
+       pqi_ctrl_unblock_requests(ctrl_info);
+       pqi_ctrl_unblock_device_reset(ctrl_info);
+       pqi_scsi_unblock_requests(ctrl_info);
+       pqi_ctrl_unblock_scan(ctrl_info);
  }
  
- static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info,
-       u32 total_size, u32 chunk_size)
+ static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size)
  {
-       u32 sg_count;
-       u32 size;
        int i;
-       struct pqi_sg_descriptor *mem_descriptor = NULL;
+       u32 sg_count;
        struct device *dev;
        struct pqi_ofa_memory *ofap;
-       dev = &ctrl_info->pci_dev->dev;
-       sg_count = (total_size + chunk_size - 1);
-       sg_count /= chunk_size;
+       struct pqi_sg_descriptor *mem_descriptor;
+       dma_addr_t dma_handle;
  
        ofap = ctrl_info->pqi_ofa_mem_virt_addr;
  
-       if (sg_count*chunk_size < total_size)
+       sg_count = DIV_ROUND_UP(total_size, chunk_size);
+       if (sg_count == 0 || sg_count > PQI_OFA_MAX_SG_DESCRIPTORS)
                goto out;
  
-       ctrl_info->pqi_ofa_chunk_virt_addr =
-                               kcalloc(sg_count, sizeof(void *), GFP_KERNEL);
+       ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL);
        if (!ctrl_info->pqi_ofa_chunk_virt_addr)
                goto out;
  
-       for (size = 0, i = 0; size < total_size; size += chunk_size, i++) {
-               dma_addr_t dma_handle;
+       dev = &ctrl_info->pci_dev->dev;
  
+       for (i = 0; i < sg_count; i++) {
                ctrl_info->pqi_ofa_chunk_virt_addr[i] =
-                       dma_alloc_coherent(dev, chunk_size, &dma_handle,
-                                          GFP_KERNEL);
+                       dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL);
                if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
-                       break;
+                       goto out_free_chunks;
                mem_descriptor = &ofap->sg_descriptor[i];
-               put_unaligned_le64 ((u64) dma_handle, &mem_descriptor->address);
-               put_unaligned_le32 (chunk_size, &mem_descriptor->length);
+               put_unaligned_le64((u64)dma_handle, &mem_descriptor->address);
+               put_unaligned_le32(chunk_size, &mem_descriptor->length);
        }
  
-       if (!size || size < total_size)
-               goto out_free_chunks;
        put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
        put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
-       put_unaligned_le32(size, &ofap->bytes_allocated);
+       put_unaligned_le32(sg_count * chunk_size, &ofap->bytes_allocated);
  
        return 0;
  
@@@ -7745,82 -8235,87 +8234,87 @@@ out_free_chunks
        while (--i >= 0) {
                mem_descriptor = &ofap->sg_descriptor[i];
                dma_free_coherent(dev, chunk_size,
-                               ctrl_info->pqi_ofa_chunk_virt_addr[i],
-                               get_unaligned_le64(&mem_descriptor->address));
+                       ctrl_info->pqi_ofa_chunk_virt_addr[i],
+                       get_unaligned_le64(&mem_descriptor->address));
        }
        kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
  
  out:
-       put_unaligned_le32 (0, &ofap->bytes_allocated);
        return -ENOMEM;
  }
  
  static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
  {
        u32 total_size;
+       u32 chunk_size;
        u32 min_chunk_size;
-       u32 chunk_sz;
  
-       total_size = le32_to_cpu(
-                       ctrl_info->pqi_ofa_mem_virt_addr->bytes_allocated);
-       min_chunk_size = total_size / PQI_OFA_MAX_SG_DESCRIPTORS;
+       if (ctrl_info->ofa_bytes_requested == 0)
+               return 0;
+       total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested);
+       min_chunk_size = DIV_ROUND_UP(total_size, PQI_OFA_MAX_SG_DESCRIPTORS);
+       min_chunk_size = PAGE_ALIGN(min_chunk_size);
  
-       for (chunk_sz = total_size; chunk_sz >= min_chunk_size; chunk_sz /= 2)
-               if (!pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_sz))
+       for (chunk_size = total_size; chunk_size >= min_chunk_size;) {
+               if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0)
                        return 0;
+               chunk_size /= 2;
+               chunk_size = PAGE_ALIGN(chunk_size);
+       }
  
        return -ENOMEM;
  }
  
- static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
-       u32 bytes_requested)
+ static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info)
  {
-       struct pqi_ofa_memory *pqi_ofa_memory;
        struct device *dev;
+       struct pqi_ofa_memory *ofap;
  
        dev = &ctrl_info->pci_dev->dev;
-       pqi_ofa_memory = dma_alloc_coherent(dev,
-                                           PQI_OFA_MEMORY_DESCRIPTOR_LENGTH,
-                                           &ctrl_info->pqi_ofa_mem_dma_handle,
-                                           GFP_KERNEL);
  
-       if (!pqi_ofa_memory)
+       ofap = dma_alloc_coherent(dev, sizeof(*ofap),
+               &ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL);
+       if (!ofap)
                return;
  
-       put_unaligned_le16(PQI_OFA_VERSION, &pqi_ofa_memory->version);
-       memcpy(&pqi_ofa_memory->signature, PQI_OFA_SIGNATURE,
-                                       sizeof(pqi_ofa_memory->signature));
-       pqi_ofa_memory->bytes_allocated = cpu_to_le32(bytes_requested);
-       ctrl_info->pqi_ofa_mem_virt_addr = pqi_ofa_memory;
+       ctrl_info->pqi_ofa_mem_virt_addr = ofap;
  
        if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
-               dev_err(dev, "Failed to allocate host buffer of size = %u",
-                       bytes_requested);
+               dev_err(dev,
+                       "failed to allocate host buffer for Online Firmware Activation\n");
+               dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle);
+               ctrl_info->pqi_ofa_mem_virt_addr = NULL;
+               return;
        }
  
-       return;
+       put_unaligned_le16(PQI_OFA_VERSION, &ofap->version);
+       memcpy(&ofap->signature, PQI_OFA_SIGNATURE, sizeof(ofap->signature));
  }
  
  static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
  {
-       int i;
-       struct pqi_sg_descriptor *mem_descriptor;
+       unsigned int i;
+       struct device *dev;
        struct pqi_ofa_memory *ofap;
+       struct pqi_sg_descriptor *mem_descriptor;
+       unsigned int num_memory_descriptors;
  
        ofap = ctrl_info->pqi_ofa_mem_virt_addr;
        if (!ofap)
                return;
  
-       if (!ofap->bytes_allocated)
+       dev = &ctrl_info->pci_dev->dev;
+       if (get_unaligned_le32(&ofap->bytes_allocated) == 0)
                goto out;
  
        mem_descriptor = ofap->sg_descriptor;
+       num_memory_descriptors =
+               get_unaligned_le16(&ofap->num_memory_descriptors);
  
-       for (i = 0; i < get_unaligned_le16(&ofap->num_memory_descriptors);
-               i++) {
-               dma_free_coherent(&ctrl_info->pci_dev->dev,
+       for (i = 0; i < num_memory_descriptors; i++) {
+               dma_free_coherent(dev,
                        get_unaligned_le32(&mem_descriptor[i].length),
                        ctrl_info->pqi_ofa_chunk_virt_addr[i],
                        get_unaligned_le64(&mem_descriptor[i].address));
        kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
  
  out:
-       dma_free_coherent(&ctrl_info->pci_dev->dev,
-                       PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, ofap,
-                       ctrl_info->pqi_ofa_mem_dma_handle);
+       dma_free_coherent(dev, sizeof(*ofap), ofap,
+               ctrl_info->pqi_ofa_mem_dma_handle);
        ctrl_info->pqi_ofa_mem_virt_addr = NULL;
  }
  
  static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
  {
+       u32 buffer_length;
        struct pqi_vendor_general_request request;
-       size_t size;
        struct pqi_ofa_memory *ofap;
  
        memset(&request, 0, sizeof(request));
  
-       ofap = ctrl_info->pqi_ofa_mem_virt_addr;
        request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
        put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
                &request.header.iu_length);
        put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
                &request.function_code);
  
+       ofap = ctrl_info->pqi_ofa_mem_virt_addr;
        if (ofap) {
-               size = offsetof(struct pqi_ofa_memory, sg_descriptor) +
+               buffer_length = offsetof(struct pqi_ofa_memory, sg_descriptor) +
                        get_unaligned_le16(&ofap->num_memory_descriptors) *
                        sizeof(struct pqi_sg_descriptor);
  
                put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
                        &request.data.ofa_memory_allocation.buffer_address);
-               put_unaligned_le32(size,
+               put_unaligned_le32(buffer_length,
                        &request.data.ofa_memory_allocation.buffer_length);
        }
  
-       return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
-               0, NULL, NO_TIMEOUT);
+       return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
  }
  
- static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info)
+ static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs)
  {
-       msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
+       ssleep(delay_secs);
        return pqi_ctrl_init_resume(ctrl_info);
  }
  
@@@ -7926,7 -8419,6 +8418,6 @@@ static void pqi_take_ctrl_offline_defer
        pqi_cancel_update_time_worker(ctrl_info);
        pqi_ctrl_wait_until_quiesced(ctrl_info);
        pqi_fail_all_outstanding_requests(ctrl_info);
-       pqi_clear_all_queued_raid_bypass_retries(ctrl_info);
        pqi_ctrl_unblock_requests(ctrl_info);
  }
  
@@@ -8059,24 -8551,12 +8550,12 @@@ static void pqi_shutdown(struct pci_de
                return;
        }
  
-       pqi_disable_events(ctrl_info);
        pqi_wait_until_ofa_finished(ctrl_info);
-       pqi_cancel_update_time_worker(ctrl_info);
-       pqi_cancel_rescan_worker(ctrl_info);
-       pqi_cancel_event_worker(ctrl_info);
-       pqi_ctrl_shutdown_start(ctrl_info);
-       pqi_ctrl_wait_until_quiesced(ctrl_info);
-       rc = pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
-       if (rc) {
-               dev_err(&pci_dev->dev,
-                       "wait for pending I/O failed\n");
-               return;
-       }
  
+       pqi_scsi_block_requests(ctrl_info);
        pqi_ctrl_block_device_reset(ctrl_info);
-       pqi_wait_until_lun_reset_finished(ctrl_info);
+       pqi_ctrl_block_requests(ctrl_info);
+       pqi_ctrl_wait_until_quiesced(ctrl_info);
  
        /*
         * Write all data in the controller's battery-backed cache to
                dev_err(&pci_dev->dev,
                        "unable to flush controller cache\n");
  
-       pqi_ctrl_block_requests(ctrl_info);
-       rc = pqi_ctrl_wait_for_pending_sync_cmds(ctrl_info);
-       if (rc) {
-               dev_err(&pci_dev->dev,
-                       "wait for pending sync cmds failed\n");
-               return;
-       }
        pqi_crash_if_pending_command(ctrl_info);
        pqi_reset(ctrl_info);
  }
@@@ -8130,19 -8601,18 +8600,18 @@@ static __maybe_unused int pqi_suspend(s
  
        ctrl_info = pci_get_drvdata(pci_dev);
  
-       pqi_disable_events(ctrl_info);
-       pqi_cancel_update_time_worker(ctrl_info);
-       pqi_cancel_rescan_worker(ctrl_info);
-       pqi_wait_until_scan_finished(ctrl_info);
-       pqi_wait_until_lun_reset_finished(ctrl_info);
        pqi_wait_until_ofa_finished(ctrl_info);
-       pqi_flush_cache(ctrl_info, SUSPEND);
+       pqi_ctrl_block_scan(ctrl_info);
+       pqi_scsi_block_requests(ctrl_info);
+       pqi_ctrl_block_device_reset(ctrl_info);
        pqi_ctrl_block_requests(ctrl_info);
        pqi_ctrl_wait_until_quiesced(ctrl_info);
-       pqi_wait_until_inbound_queues_empty(ctrl_info);
-       pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
+       pqi_flush_cache(ctrl_info, SUSPEND);
        pqi_stop_heartbeat_timer(ctrl_info);
  
+       pqi_crash_if_pending_command(ctrl_info);
        if (state.event == PM_EVENT_FREEZE)
                return 0;
  
@@@ -8175,14 -8645,21 +8644,21 @@@ static __maybe_unused int pqi_resume(st
                                pci_dev->irq, rc);
                        return rc;
                }
-               pqi_start_heartbeat_timer(ctrl_info);
+               pqi_ctrl_unblock_device_reset(ctrl_info);
                pqi_ctrl_unblock_requests(ctrl_info);
+               pqi_scsi_unblock_requests(ctrl_info);
+               pqi_ctrl_unblock_scan(ctrl_info);
                return 0;
        }
  
        pci_set_power_state(pci_dev, PCI_D0);
        pci_restore_state(pci_dev);
  
+       pqi_ctrl_unblock_device_reset(ctrl_info);
+       pqi_ctrl_unblock_requests(ctrl_info);
+       pqi_scsi_unblock_requests(ctrl_info);
+       pqi_ctrl_unblock_scan(ctrl_info);
        return pqi_ctrl_init_resume(ctrl_info);
  }
  
@@@ -8218,6 -8695,10 +8694,10 @@@ static const struct pci_device_id pqi_p
        },
        {
                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x193d, 0x8460)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
                               0x193d, 0x1104)
        },
        {
        },
        {
                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x1bd4, 0x0051)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x1bd4, 0x0052)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x1bd4, 0x0053)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x1bd4, 0x0054)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
                               0x19e5, 0xd227)
        },
        {
        },
        {
                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1400)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1402)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1410)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1411)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1412)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1420)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1430)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1440)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1441)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1450)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1452)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1460)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1461)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1462)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1470)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1471)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1472)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1480)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1490)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1491)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x14a0)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x14a1)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x14b1)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x14c0)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x14e0)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
                               PCI_VENDOR_ID_ADVANTECH, 0x8312)
        },
        {
        },
        {
                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_HP, 0x1002)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
                               PCI_VENDOR_ID_HP, 0x1100)
        },
        {
        },
        {
                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x1590, 0x0294)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x1590, 0x02db)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x1590, 0x02dc)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x1590, 0x032e)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
                               0x1d8d, 0x0800)
        },
        {
@@@ -8602,6 -9235,8 +9234,8 @@@ static void __attribute__((unused)) ver
        BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
                sis_driver_scratch) != 0xb0);
        BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
+               sis_product_identifier) != 0xb4);
+       BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
                sis_firmware_status) != 0xbc);
        BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
                sis_mailbox) != 0x1000);
        BUILD_BUG_ON(offsetof(struct pqi_iu_header,
                response_queue_id) != 0x4);
        BUILD_BUG_ON(offsetof(struct pqi_iu_header,
-               work_area) != 0x6);
+               driver_flags) != 0x6);
        BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
  
        BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
        BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
                header.iu_length) != 2);
        BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
-               header.work_area) != 6);
+               header.driver_flags) != 6);
        BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
                request_id) != 8);
        BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
        BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
                header.iu_length) != 2);
        BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
-               header.work_area) != 6);
+               header.driver_flags) != 6);
        BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
                request_id) != 8);
        BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
        BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
                header.response_queue_id) != 4);
        BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
-               header.work_area) != 6);
+               header.driver_flags) != 6);
        BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
                request_id) != 8);
        BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
        BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
                header.response_queue_id) != 4);
        BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
-               header.work_area) != 6);
+               header.driver_flags) != 6);
        BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
                request_id) != 8);
        BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
        BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
                configuration_signature) != 1);
        BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
-               firmware_version) != 5);
+               firmware_version_short) != 5);
        BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
                extended_logical_unit_count) != 154);
        BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
                firmware_build_number) != 190);
        BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+               vendor_id) != 200);
+       BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+               product_id) != 208);
+       BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+               extra_controller_flags) != 286);
+       BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
                controller_mode) != 292);
+       BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+               spare_part_number) != 293);
+       BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+               firmware_version_long) != 325);
  
        BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
                phys_bay_in_box) != 115);
                current_queue_depth_limit) != 1796);
        BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
  
+       BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4);
+       BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
+               page_code) != 0);
+       BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
+               subpage_code) != 1);
+       BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
+               buffer_length) != 2);
+       BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4);
+       BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
+               page_code) != 0);
+       BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
+               subpage_code) != 1);
+       BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
+               page_length) != 2);
+       BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage)
+               != 18);
+       BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+               header) != 0);
+       BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+               firmware_read_support) != 4);
+       BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+               driver_read_support) != 5);
+       BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+               firmware_write_support) != 6);
+       BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+               driver_write_support) != 7);
+       BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+               max_transfer_encrypted_sas_sata) != 8);
+       BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+               max_transfer_encrypted_nvme) != 10);
+       BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+               max_write_raid_5_6) != 12);
+       BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+               max_write_raid_1_10_2drive) != 14);
+       BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+               max_write_raid_1_10_3drive) != 16);
        BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
        BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
        BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
@@@ -34,8 -34,6 +34,6 @@@
  #include "target_core_internal.h"
  #include "target_core_pscsi.h"
  
- #define ISPRINT(a)  ((a >= ' ') && (a <= '~'))
  static inline struct pscsi_dev_virt *PSCSI_DEV(struct se_device *dev)
  {
        return container_of(dev, struct pscsi_dev_virt, dev);
@@@ -620,8 -618,9 +618,9 @@@ static void pscsi_complete_cmd(struct s
                        unsigned char *buf;
  
                        buf = transport_kmap_data_sg(cmd);
-                       if (!buf)
+                       if (!buf) {
                                ; /* XXX: TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE */
+                       }
  
                        if (cdb[0] == MODE_SENSE_10) {
                                if (!(buf[3] & 0x80))
@@@ -910,7 -909,7 +909,7 @@@ new_bio
                                        " %d i: %d bio: %p, allocating another"
                                        " bio\n", bio->bi_vcnt, i, bio);
  
 -                              rc = blk_rq_append_bio(req, &bio);
 +                              rc = blk_rq_append_bio(req, bio);
                                if (rc) {
                                        pr_err("pSCSI: failed to append bio\n");
                                        goto fail;
        }
  
        if (bio) {
 -              rc = blk_rq_append_bio(req, &bio);
 +              rc = blk_rq_append_bio(req, bio);
                if (rc) {
                        pr_err("pSCSI: failed to append bio\n");
                        goto fail;
@@@ -1047,7 -1046,7 +1046,7 @@@ static void pscsi_req_done(struct reque
        int result = scsi_req(req)->result;
        u8 scsi_status = status_byte(result) << 1;
  
-       if (scsi_status) {
+       if (scsi_status != SAM_STAT_GOOD) {
                pr_debug("PSCSI Status Byte exception at cmd: %p CDB:"
                        " 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0],
                        result);
diff --combined include/linux/hyperv.h
@@@ -234,7 -234,6 +234,7 @@@ static inline u32 hv_get_avail_to_write
   * 5 . 0  (Newer Windows 10)
   * 5 . 1  (Windows 10 RS4)
   * 5 . 2  (Windows Server 2019, RS5)
 + * 5 . 3  (Windows Server 2022)
   */
  
  #define VERSION_WS2008  ((0 << 16) | (13))
  #define VERSION_WIN10_V5 ((5 << 16) | (0))
  #define VERSION_WIN10_V5_1 ((5 << 16) | (1))
  #define VERSION_WIN10_V5_2 ((5 << 16) | (2))
 +#define VERSION_WIN10_V5_3 ((5 << 16) | (3))
  
  /* Make maximum size of pipe payload of 16K */
  #define MAX_PIPE_DATA_PAYLOAD         (sizeof(u8) * 16384)
@@@ -286,7 -284,7 +286,7 @@@ struct vmbus_channel_offer 
  
                /*
                 * Pipes:
 -               * The following sructure is an integrated pipe protocol, which
 +               * The following structure is an integrated pipe protocol, which
                 * is implemented on top of standard user-defined data. Pipe
                 * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own
                 * use.
@@@ -477,7 -475,6 +477,7 @@@ enum vmbus_channel_message_type 
        CHANNELMSG_TL_CONNECT_REQUEST           = 21,
        CHANNELMSG_MODIFYCHANNEL                = 22,
        CHANNELMSG_TL_CONNECT_RESULT            = 23,
 +      CHANNELMSG_MODIFYCHANNEL_RESPONSE       = 24,
        CHANNELMSG_COUNT
  };
  
@@@ -591,13 -588,6 +591,13 @@@ struct vmbus_channel_open_result 
        u32 status;
  } __packed;
  
 +/* Modify Channel Result parameters */
 +struct vmbus_channel_modifychannel_response {
 +      struct vmbus_channel_message_header header;
 +      u32 child_relid;
 +      u32 status;
 +} __packed;
 +
  /* Close channel parameters; */
  struct vmbus_channel_close_channel {
        struct vmbus_channel_message_header header;
@@@ -730,7 -720,6 +730,7 @@@ struct vmbus_channel_msginfo 
                struct vmbus_channel_gpadl_torndown gpadl_torndown;
                struct vmbus_channel_gpadl_created gpadl_created;
                struct vmbus_channel_version_response version_response;
 +              struct vmbus_channel_modifychannel_response modify_response;
        } response;
  
        u32 msgsize;
@@@ -894,11 -883,11 +894,11 @@@ struct vmbus_channel 
         * Support for sub-channels. For high performance devices,
         * it will be useful to have multiple sub-channels to support
         * a scalable communication infrastructure with the host.
 -       * The support for sub-channels is implemented as an extention
 +       * The support for sub-channels is implemented as an extension
         * to the current infrastructure.
         * The initial offer is considered the primary channel and this
         * offer message will indicate if the host supports sub-channels.
 -       * The guest is free to ask for sub-channels to be offerred and can
 +       * The guest is free to ask for sub-channels to be offered and can
         * open these sub-channels as a normal "primary" channel. However,
         * all sub-channels will have the same type and instance guids as the
         * primary channel. Requests sent on a given channel will result in a
         * Clearly, these optimizations improve throughput at the expense of
         * latency. Furthermore, since the channel is shared for both
         * control and data messages, control messages currently suffer
 -       * unnecessary latency adversley impacting performance and boot
 +       * unnecessary latency adversely impacting performance and boot
         * time. To fix this issue, permit tagging the channel as being
         * in "low latency" mode. In this mode, we will bypass the monitor
         * mechanism.
@@@ -1605,7 -1594,7 +1605,7 @@@ extern __u32 vmbus_proto_version
  
  int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
                                  const guid_t *shv_host_servie_id);
 -int vmbus_send_modifychannel(u32 child_relid, u32 target_vp);
 +int vmbus_send_modifychannel(struct vmbus_channel *channel, u32 target_vp);
  void vmbus_set_event(struct vmbus_channel *channel);
  
  /* Get the start of the ring buffer. */
@@@ -1737,6 -1726,7 +1737,7 @@@ static inline unsigned long virt_to_hvp
  #define NR_HV_HYP_PAGES_IN_PAGE       (PAGE_SIZE / HV_HYP_PAGE_SIZE)
  #define offset_in_hvpage(ptr) ((unsigned long)(ptr) & ~HV_HYP_PAGE_MASK)
  #define HVPFN_UP(x)   (((x) + HV_HYP_PAGE_SIZE-1) >> HV_HYP_PAGE_SHIFT)
+ #define HVPFN_DOWN(x) ((x) >> HV_HYP_PAGE_SHIFT)
  #define page_to_hvpfn(page)   (page_to_pfn(page) * NR_HV_HYP_PAGES_IN_PAGE)
  
  #endif /* _HYPERV_H */
diff --combined include/scsi/scsi_cmnd.h
@@@ -10,6 -10,7 +10,7 @@@
  #include <linux/timer.h>
  #include <linux/scatterlist.h>
  #include <scsi/scsi_device.h>
+ #include <scsi/scsi_host.h>
  #include <scsi/scsi_request.h>
  
  struct Scsi_Host;
@@@ -55,10 -56,11 +56,10 @@@ struct scsi_pointer 
  
  /* for scmd->flags */
  #define SCMD_TAGGED           (1 << 0)
 -#define SCMD_UNCHECKED_ISA_DMA        (1 << 1)
 -#define SCMD_INITIALIZED      (1 << 2)
 -#define SCMD_LAST             (1 << 3)
 +#define SCMD_INITIALIZED      (1 << 1)
 +#define SCMD_LAST             (1 << 2)
  /* flags preserved across unprep / reprep */
 -#define SCMD_PRESERVED_FLAGS  (SCMD_UNCHECKED_ISA_DMA | SCMD_INITIALIZED)
 +#define SCMD_PRESERVED_FLAGS  (SCMD_INITIALIZED)
  
  /* for scmd->state */
  #define SCMD_STATE_COMPLETE   0
@@@ -74,6 -76,8 +75,8 @@@ struct scsi_cmnd 
  
        int eh_eflags;          /* Used by error handlr */
  
+       int budget_token;
        /*
         * This is set to jiffies as it was when the command was first
         * allocated.  It is used to time how long the command has
diff --combined include/scsi/scsi_host.h
@@@ -19,7 -19,6 +19,6 @@@ struct scsi_device
  struct scsi_host_cmd_pool;
  struct scsi_target;
  struct Scsi_Host;
- struct scsi_host_cmd_pool;
  struct scsi_transport_template;
  
  
  #define MODE_TARGET 0x02
  
  struct scsi_host_template {
-       struct module *module;
-       const char *name;
        /*
-        * The info function will return whatever useful information the
-        * developer sees fit.  If not provided, then the name field will
-        * be used instead.
-        *
-        * Status: OPTIONAL
+        * Put fields referenced in IO submission path together in
+        * same cacheline
         */
-       const char *(* info)(struct Scsi_Host *);
  
        /*
-        * Ioctl interface
-        *
-        * Status: OPTIONAL
-        */
-       int (*ioctl)(struct scsi_device *dev, unsigned int cmd,
-                    void __user *arg);
- #ifdef CONFIG_COMPAT
-       /* 
-        * Compat handler. Handle 32bit ABI.
-        * When unknown ioctl is passed return -ENOIOCTLCMD.
-        *
-        * Status: OPTIONAL
+        * Additional per-command data allocated for the driver.
         */
-       int (*compat_ioctl)(struct scsi_device *dev, unsigned int cmd,
-                           void __user *arg);
- #endif
-       int (*init_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
-       int (*exit_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
+       unsigned int cmd_size;
  
        /*
         * The queuecommand function is used to queue up a scsi
         */
        void (*commit_rqs)(struct Scsi_Host *, u16);
  
+       struct module *module;
+       const char *name;
+       /*
+        * The info function will return whatever useful information the
+        * developer sees fit.  If not provided, then the name field will
+        * be used instead.
+        *
+        * Status: OPTIONAL
+        */
+       const char *(*info)(struct Scsi_Host *);
+       /*
+        * Ioctl interface
+        *
+        * Status: OPTIONAL
+        */
+       int (*ioctl)(struct scsi_device *dev, unsigned int cmd,
+                    void __user *arg);
+ #ifdef CONFIG_COMPAT
+       /*
+        * Compat handler. Handle 32bit ABI.
+        * When unknown ioctl is passed return -ENOIOCTLCMD.
+        *
+        * Status: OPTIONAL
+        */
+       int (*compat_ioctl)(struct scsi_device *dev, unsigned int cmd,
+                           void __user *arg);
+ #endif
+       int (*init_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
+       int (*exit_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
        /*
         * This is an error handling strategy routine.  You don't need to
         * define one of these if you don't want to - there is a default
        int (* map_queues)(struct Scsi_Host *shost);
  
        /*
+        * SCSI interface of blk_poll - poll for IO completions.
+        * Only applicable if SCSI LLD exposes multiple h/w queues.
+        *
+        * Return value: Number of completed entries found.
+        *
+        * Status: OPTIONAL
+        */
+       int (* mq_poll)(struct Scsi_Host *shost, unsigned int queue_num);
+       /*
         * Check if scatterlists need to be padded for DMA draining.
         *
         * Status: OPTIONAL
        unsigned supported_mode:2;
  
        /*
 -       * True if this host adapter uses unchecked DMA onto an ISA bus.
 -       */
 -      unsigned unchecked_isa_dma:1;
 -
 -      /*
         * True for emulated SCSI host adapters (e.g. ATAPI).
         */
        unsigned emulated:1;
         */
        u64 vendor_id;
  
-       /*
-        * Additional per-command data allocated for the driver.
-        */
-       unsigned int cmd_size;
        struct scsi_host_cmd_pool *cmd_pool;
  
        /* Delay for runtime autosuspend */
@@@ -611,7 -631,9 +626,8 @@@ struct Scsi_Host 
         * the total queue depth is can_queue.
         */
        unsigned nr_hw_queues;
+       unsigned nr_maps;
        unsigned active_mode:2;
 -      unsigned unchecked_isa_dma:1;
  
        /*
         * Host has requested that no further requests come through for the