isci: combine request flags
[profile/ivi/kernel-adaptation-intel-automotive.git] / drivers / scsi / isci / task.c
index 338f08e..d1a4671 100644 (file)
 
 #include <linux/completion.h>
 #include <linux/irqflags.h>
-#include <scsi/sas_ata.h>
-#include "scic_task_request.h"
-#include "scic_remote_device.h"
-#include "scic_io_request.h"
-#include "scic_sds_remote_device.h"
-#include "scic_sds_remote_node_context.h"
+#include "sas.h"
+#include <scsi/libsas.h>
+#include "remote_device.h"
+#include "remote_node_context.h"
 #include "isci.h"
 #include "request.h"
 #include "sata.h"
 #include "task.h"
+#include "host.h"
 
 /**
 * isci_task_refuse() - complete the request to the upper layer driver in
@@ -93,26 +92,13 @@ static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
                case isci_perform_normal_io_completion:
                        /* Normal notification (task_done) */
                        dev_dbg(&ihost->pdev->dev,
-                               "%s: Normal - task = %p, response=%d, status=%d\n",
+                               "%s: Normal - task = %p, response=%d, "
+                               "status=%d\n",
                                __func__, task, response, status);
 
                        task->lldd_task = NULL;
-                       if (dev_is_sata(task->dev)) {
-                               /* Since we are still in the submit path, and since
-                               * libsas takes the host lock on behalf of SATA
-                               * devices before I/O starts, we need to unlock
-                               * before we can call back and report the I/O
-                               * submission error.
-                               */
-                               unsigned long flags;
-
-                               raw_local_irq_save(flags);
-                               spin_unlock(ihost->shost->host_lock);
-                               task->task_done(task);
-                               spin_lock(ihost->shost->host_lock);
-                               raw_local_irq_restore(flags);
-                       } else
-                               task->task_done(task);
+
+                       isci_execpath_callback(ihost, task, task->task_done);
                        break;
 
                case isci_perform_aborted_io_completion:
@@ -120,16 +106,19 @@ static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
                        * abort path.
                        */
                        dev_warn(&ihost->pdev->dev,
-                                "%s: Aborted - task = %p, response=%d, status=%d\n",
+                                "%s: Aborted - task = %p, response=%d, "
+                               "status=%d\n",
                                 __func__, task, response, status);
                        break;
 
                case isci_perform_error_io_completion:
                        /* Use sas_task_abort */
                        dev_warn(&ihost->pdev->dev,
-                                "%s: Error - task = %p, response=%d, status=%d\n",
+                                "%s: Error - task = %p, response=%d, "
+                               "status=%d\n",
                                 __func__, task, response, status);
-                       sas_task_abort(task);
+
+                       isci_execpath_callback(ihost, task, sas_task_abort);
                        break;
 
                default:
@@ -145,6 +134,15 @@ static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
        for (; num > 0; num--,\
             task = list_entry(task->list.next, struct sas_task, list))
 
+
+static inline int isci_device_io_ready(struct isci_remote_device *idev,
+                                      struct sas_task *task)
+{
+       return idev ? test_bit(IDEV_IO_READY, &idev->flags) ||
+                     (test_bit(IDEV_IO_NCQERROR, &idev->flags) &&
+                      isci_task_is_ncq_recovery(task))
+                   : 0;
+}
 /**
  * isci_task_execute_task() - This function is one of the SAS Domain Template
  *    functions. This function is called by libsas to send a task down to
@@ -157,88 +155,55 @@ static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
  */
 int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
 {
-       struct isci_host *ihost = task->dev->port->ha->lldd_ha;
-       struct isci_request *request = NULL;
-       struct isci_remote_device *device;
+       struct isci_host *ihost = dev_to_ihost(task->dev);
+       struct isci_remote_device *idev;
        unsigned long flags;
-       int ret;
-       enum sci_status status;
-       enum isci_status device_status;
+       bool io_ready;
+       u16 tag;
 
        dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num);
 
-       /* Check if we have room for more tasks */
-       ret = isci_host_can_queue(ihost, num);
-
-       if (ret) {
-               dev_warn(&ihost->pdev->dev, "%s: queue full\n", __func__);
-               return ret;
-       }
-
        for_each_sas_task(num, task) {
-               dev_dbg(&ihost->pdev->dev,
-                       "task = %p, num = %d; dev = %p; cmd = %p\n",
-                           task, num, task->dev, task->uldd_task);
+               enum sci_status status = SCI_FAILURE;
 
-               device = isci_dev_from_domain_dev(task->dev);
+               spin_lock_irqsave(&ihost->scic_lock, flags);
+               idev = isci_lookup_device(task->dev);
+               io_ready = isci_device_io_ready(idev, task);
+               tag = isci_alloc_tag(ihost);
+               spin_unlock_irqrestore(&ihost->scic_lock, flags);
 
-               if (device)
-                       device_status = device->status;
-               else
-                       device_status = isci_freed;
-
-               /* From this point onward, any process that needs to guarantee
-                * that there is no kernel I/O being started will have to wait
-                * for the quiesce spinlock.
-                */
-
-               if (device_status != isci_ready_for_io) {
-
-                       /* Forces a retry from scsi mid layer. */
-                       dev_warn(&ihost->pdev->dev,
-                                "%s: task %p: isci_host->status = %d, "
-                                "device = %p; device_status = 0x%x\n\n",
-                                __func__,
-                                task,
-                                isci_host_get_state(ihost),
-                                device, device_status);
-
-                       if (device_status == isci_ready) {
-                               /* Indicate QUEUE_FULL so that the scsi midlayer
-                               * retries.
-                               */
-                               isci_task_refuse(ihost, task,
-                                                SAS_TASK_COMPLETE,
-                                                SAS_QUEUE_FULL);
-                       } else {
-                               /* Else, the device is going down. */
-                               isci_task_refuse(ihost, task,
-                                                SAS_TASK_UNDELIVERED,
-                                                SAS_DEVICE_UNKNOWN);
-                       }
-                       isci_host_can_dequeue(ihost, 1);
+               dev_dbg(&ihost->pdev->dev,
+                       "task: %p, num: %d dev: %p idev: %p:%#lx cmd = %p\n",
+                       task, num, task->dev, idev, idev ? idev->flags : 0,
+                       task->uldd_task);
+
+               if (!idev) {
+                       isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED,
+                                        SAS_DEVICE_UNKNOWN);
+               } else if (!io_ready || tag == SCI_CONTROLLER_INVALID_IO_TAG) {
+                       /* Indicate QUEUE_FULL so that the scsi midlayer
+                        * retries.
+                         */
+                       isci_task_refuse(ihost, task, SAS_TASK_COMPLETE,
+                                        SAS_QUEUE_FULL);
                } else {
                        /* There is a device and it's ready for I/O. */
                        spin_lock_irqsave(&task->task_state_lock, flags);
 
                        if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
-
+                               /* The I/O was aborted. */
                                spin_unlock_irqrestore(&task->task_state_lock,
                                                       flags);
 
                                isci_task_refuse(ihost, task,
                                                 SAS_TASK_UNDELIVERED,
                                                 SAM_STAT_TASK_ABORTED);
-
-                               /* The I/O was aborted. */
-
                        } else {
                                task->task_state_flags |= SAS_TASK_AT_INITIATOR;
                                spin_unlock_irqrestore(&task->task_state_lock, flags);
 
                                /* build and send the request. */
-                               status = isci_request_execute(ihost, task, &request,
-                                                             gfp_flags);
+                               status = isci_request_execute(ihost, idev, task, tag, gfp_flags);
 
                                if (status != SCI_SUCCESS) {
 
@@ -257,68 +222,46 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
                                        isci_task_refuse(ihost, task,
                                                         SAS_TASK_COMPLETE,
                                                         SAS_QUEUE_FULL);
-                                       isci_host_can_dequeue(ihost, 1);
                                }
                        }
                }
+               if (status != SCI_SUCCESS && tag != SCI_CONTROLLER_INVALID_IO_TAG) {
+                       spin_lock_irqsave(&ihost->scic_lock, flags);
+                       /* command never hit the device, so just free
+                        * the tci and skip the sequence increment
+                        */
+                       isci_tci_free(ihost, ISCI_TAG_TCI(tag));
+                       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+               }
+               isci_put_device(idev);
        }
        return 0;
 }
 
-
-
-/**
- * isci_task_request_build() - This function builds the task request object.
- * @isci_host: This parameter specifies the ISCI host object
- * @request: This parameter points to the isci_request object allocated in the
- *    request construct function.
- * @tmf: This parameter is the task management struct to be built
- *
- * SCI_SUCCESS on successfull completion, or specific failure code.
- */
-static enum sci_status isci_task_request_build(
-       struct isci_host *isci_host,
-       struct isci_request **isci_request,
-       struct isci_tmf *isci_tmf)
+static struct isci_request *isci_task_request_build(struct isci_host *ihost,
+                                                   struct isci_remote_device *idev,
+                                                   u16 tag, struct isci_tmf *isci_tmf)
 {
-       struct scic_sds_remote_device *sci_device;
        enum sci_status status = SCI_FAILURE;
-       struct isci_request *request;
-       struct isci_remote_device *isci_device;
-/*     struct sci_sas_identify_address_frame_protocols dev_protocols; */
-       struct smp_discover_response_protocols dev_protocols;
+       struct isci_request *ireq = NULL;
+       struct domain_device *dev;
 
-
-       dev_dbg(&isci_host->pdev->dev,
+       dev_dbg(&ihost->pdev->dev,
                "%s: isci_tmf = %p\n", __func__, isci_tmf);
 
-       isci_device = isci_tmf->device;
-       sci_device = to_sci_dev(isci_device);
+       dev = idev->domain_dev;
 
        /* do common allocation and init of request object. */
-       status = isci_request_alloc_tmf(
-               isci_host,
-               isci_tmf,
-               &request,
-               isci_device,
-               GFP_ATOMIC
-               );
-
-       if (status != SCI_SUCCESS)
-               goto out;
+       ireq = isci_request_alloc_tmf(ihost, isci_tmf, GFP_ATOMIC);
+       if (!ireq)
+               return NULL;
 
        /* let the core do it's construct. */
-       status = scic_task_request_construct(
-               isci_host->core_controller,
-               sci_device,
-               SCI_CONTROLLER_INVALID_IO_TAG,
-               request,
-               request->sci_request_mem_ptr,
-               &request->sci_request_handle
-               );
+       status = scic_task_request_construct(&ihost->sci, &idev->sci, tag,
+                                            &ireq->sci);
 
        if (status != SCI_SUCCESS) {
-               dev_warn(&isci_host->pdev->dev,
+               dev_warn(&ihost->pdev->dev,
                         "%s: scic_task_request_construct failed - "
                         "status = 0x%x\n",
                         __func__,
@@ -326,209 +269,124 @@ static enum sci_status isci_task_request_build(
                goto errout;
        }
 
-       sci_object_set_association(
-               request->sci_request_handle,
-               request
-               );
-
-       scic_remote_device_get_protocols(
-               sci_device,
-               &dev_protocols
-               );
-
-       /* let the core do it's protocol
-        * specific construction.
-        */
-       if (dev_protocols.u.bits.attached_ssp_target) {
-
+       /* XXX convert to get this from task->tproto like other drivers */
+       if (dev->dev_type == SAS_END_DEV) {
                isci_tmf->proto = SAS_PROTOCOL_SSP;
-               status = scic_task_request_construct_ssp(
-                       request->sci_request_handle
-                       );
+               status = scic_task_request_construct_ssp(&ireq->sci);
                if (status != SCI_SUCCESS)
                        goto errout;
        }
 
-       if (dev_protocols.u.bits.attached_stp_target) {
-
+       if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
                isci_tmf->proto = SAS_PROTOCOL_SATA;
-               status = isci_sata_management_task_request_build(request);
+               status = isci_sata_management_task_request_build(ireq);
 
                if (status != SCI_SUCCESS)
                        goto errout;
        }
-
-       goto out;
-
+       return ireq;
  errout:
-
-       /* release the dma memory if we fail. */
-       isci_request_free(isci_host, request);
-       request = NULL;
-
- out:
-       *isci_request = request;
-       return status;
+       isci_request_free(ihost, ireq);
+       return NULL;
 }
 
-/**
- * isci_tmf_timeout_cb() - This function is called as a kernel callback when
- *    the timeout period for the TMF has expired.
- *
- *
- */
-static void isci_tmf_timeout_cb(void *tmf_request_arg)
-{
-       struct isci_request *request = (struct isci_request *)tmf_request_arg;
-       struct isci_tmf *tmf = isci_request_access_tmf(request);
-       enum sci_status status;
-
-       BUG_ON(request->ttype != tmf_task);
-
-       /* This task management request has timed-out.  Terminate the request
-        * so that the request eventually completes to the requestor in the
-        * request completion callback path.
-        */
-       /* Note - the timer callback function itself has provided spinlock
-        * exclusion from the start and completion paths.  No need to take
-        * the request->isci_host->scic_lock here.
-        */
-
-       if (tmf->timeout_timer != NULL) {
-               /* Call the users callback, if any. */
-               if (tmf->cb_state_func != NULL)
-                       tmf->cb_state_func(isci_tmf_timed_out, tmf,
-                                          tmf->cb_data);
-
-               /* Terminate the TMF transmit request. */
-               status = scic_controller_terminate_request(
-                       request->isci_host->core_controller,
-                       to_sci_dev(request->isci_device),
-                       request->sci_request_handle
-                       );
-
-               dev_dbg(&request->isci_host->pdev->dev,
-                       "%s: tmf_request = %p; tmf = %p; status = %d\n",
-                       __func__, request, tmf, status);
-       } else
-               dev_dbg(&request->isci_host->pdev->dev,
-                       "%s: timer already canceled! "
-                       "tmf_request = %p; tmf = %p\n",
-                       __func__, request, tmf);
-
-       /* No need to unlock since the caller to this callback is doing it for
-        * us.
-        * request->isci_host->scic_lock
-        */
-}
-
-/**
- * isci_task_execute_tmf() - This function builds and sends a task request,
- *    then waits for the completion.
- * @isci_host: This parameter specifies the ISCI host object
- * @tmf: This parameter is the pointer to the task management structure for
- *    this request.
- * @timeout_ms: This parameter specifies the timeout period for the task
- *    management request.
- *
- * TMF_RESP_FUNC_COMPLETE on successful completion of the TMF (this includes
- * error conditions reported in the IU status), or TMF_RESP_FUNC_FAILED.
- */
-int isci_task_execute_tmf(
-       struct isci_host *isci_host,
-       struct isci_tmf *tmf,
-       unsigned long timeout_ms)
+int isci_task_execute_tmf(struct isci_host *ihost,
+                         struct isci_remote_device *isci_device,
+                         struct isci_tmf *tmf, unsigned long timeout_ms)
 {
        DECLARE_COMPLETION_ONSTACK(completion);
-       enum sci_status status = SCI_FAILURE;
+       enum sci_task_status status = SCI_TASK_FAILURE;
        struct scic_sds_remote_device *sci_device;
-       struct isci_remote_device *isci_device = tmf->device;
-       struct isci_request *request;
+       struct isci_request *ireq;
        int ret = TMF_RESP_FUNC_FAILED;
        unsigned long flags;
+       unsigned long timeleft;
+       u16 tag;
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+       tag = isci_alloc_tag(ihost);
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+       if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
+               return ret;
 
        /* sanity check, return TMF_RESP_FUNC_FAILED
         * if the device is not there and ready.
         */
-       if (!isci_device || isci_device->status != isci_ready_for_io) {
-               dev_dbg(&isci_host->pdev->dev,
-                       "%s: isci_device = %p not ready (%d)\n",
+       if (!isci_device ||
+           (!test_bit(IDEV_IO_READY, &isci_device->flags) &&
+            !test_bit(IDEV_IO_NCQERROR, &isci_device->flags))) {
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: isci_device = %p not ready (%#lx)\n",
                        __func__,
-                       isci_device, isci_device->status);
-               return TMF_RESP_FUNC_FAILED;
+                       isci_device, isci_device ? isci_device->flags : 0);
+               goto err_tci;
        } else
-               dev_dbg(&isci_host->pdev->dev,
+               dev_dbg(&ihost->pdev->dev,
                        "%s: isci_device = %p\n",
                        __func__, isci_device);
 
-       sci_device = to_sci_dev(isci_device);
+       sci_device = &isci_device->sci;
 
        /* Assign the pointer to the TMF's completion kernel wait structure. */
        tmf->complete = &completion;
 
-       isci_task_request_build(
-               isci_host,
-               &request,
-               tmf
-               );
+       ireq = isci_task_request_build(ihost, isci_device, tag, tmf);
+       if (!ireq)
+               goto err_tci;
 
-       if (!request) {
-               dev_warn(&isci_host->pdev->dev,
-                       "%s: isci_task_request_build failed\n",
-                       __func__);
-               return TMF_RESP_FUNC_FAILED;
-       }
-
-       /* Allocate the TMF timeout timer. */
-       spin_lock_irqsave(&isci_host->scic_lock, flags);
-       tmf->timeout_timer = isci_timer_create(isci_host, request, isci_tmf_timeout_cb);
-
-       /* Start the timer. */
-       if (tmf->timeout_timer)
-               isci_timer_start(tmf->timeout_timer, timeout_ms);
-       else
-               dev_warn(&isci_host->pdev->dev,
-                        "%s: isci_timer_create failed!!!!\n",
-                        __func__);
+       spin_lock_irqsave(&ihost->scic_lock, flags);
 
        /* start the TMF io. */
-       status = scic_controller_start_task(
-               isci_host->core_controller,
-               sci_device,
-               request->sci_request_handle,
-               SCI_CONTROLLER_INVALID_IO_TAG
-               );
+       status = scic_controller_start_task(&ihost->sci,
+                                           sci_device,
+                                           &ireq->sci);
 
-       if (status != SCI_SUCCESS) {
-               dev_warn(&isci_host->pdev->dev,
+       if (status != SCI_TASK_SUCCESS) {
+               dev_warn(&ihost->pdev->dev,
                         "%s: start_io failed - status = 0x%x, request = %p\n",
                         __func__,
                         status,
-                        request);
-               goto cleanup_request;
+                        ireq);
+               spin_unlock_irqrestore(&ihost->scic_lock, flags);
+               goto err_ireq;
        }
 
-       /* Call the users callback, if any. */
        if (tmf->cb_state_func != NULL)
                tmf->cb_state_func(isci_tmf_started, tmf, tmf->cb_data);
 
-       /* Change the state of the TMF-bearing request to "started". */
-       isci_request_change_state(request, started);
+       isci_request_change_state(ireq, started);
 
        /* add the request to the remote device request list. */
-       list_add(&request->dev_node, &isci_device->reqs_in_process);
+       list_add(&ireq->dev_node, &isci_device->reqs_in_process);
 
-       spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
 
        /* Wait for the TMF to complete, or a timeout. */
-       wait_for_completion(&completion);
+       timeleft = wait_for_completion_timeout(&completion,
+                                              msecs_to_jiffies(timeout_ms));
+
+       if (timeleft == 0) {
+               spin_lock_irqsave(&ihost->scic_lock, flags);
+
+               if (tmf->cb_state_func != NULL)
+                       tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data);
+
+               scic_controller_terminate_request(&ihost->sci,
+                                                 &isci_device->sci,
+                                                 &ireq->sci);
+
+               spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+               wait_for_completion(tmf->complete);
+       }
 
        isci_print_tmf(tmf);
 
        if (tmf->status == SCI_SUCCESS)
                ret =  TMF_RESP_FUNC_COMPLETE;
        else if (tmf->status == SCI_FAILURE_IO_RESPONSE_VALID) {
-               dev_dbg(&isci_host->pdev->dev,
+               dev_dbg(&ihost->pdev->dev,
                        "%s: tmf.status == "
                        "SCI_FAILURE_IO_RESPONSE_VALID\n",
                        __func__);
@@ -536,102 +394,51 @@ int isci_task_execute_tmf(
        }
        /* Else - leave the default "failed" status alone. */
 
-       dev_dbg(&isci_host->pdev->dev,
+       dev_dbg(&ihost->pdev->dev,
                "%s: completed request = %p\n",
                __func__,
-               request);
-
-       if (request->io_request_completion != NULL) {
-
-               /* The fact that this is non-NULL for a TMF request
-                * means there is a thread waiting for this TMF to
-                * finish.
-                */
-               complete(request->io_request_completion);
-       }
-
-       spin_lock_irqsave(&isci_host->scic_lock, flags);
+               ireq);
 
- cleanup_request:
-
-       /* Clean up the timer if needed. */
-       if (tmf->timeout_timer) {
-               isci_del_timer(isci_host, tmf->timeout_timer);
-               tmf->timeout_timer = NULL;
-       }
-
-       spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+       return ret;
 
-       isci_request_free(isci_host, request);
+ err_ireq:
+       isci_request_free(ihost, ireq);
+ err_tci:
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+       isci_tci_free(ihost, ISCI_TAG_TCI(tag));
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
 
        return ret;
 }
 
 void isci_task_build_tmf(
        struct isci_tmf *tmf,
-       struct isci_remote_device *isci_device,
        enum isci_tmf_function_codes code,
        void (*tmf_sent_cb)(enum isci_tmf_cb_state,
                            struct isci_tmf *,
                            void *),
        void *cb_data)
 {
-       dev_dbg(&isci_device->isci_port->isci_host->pdev->dev,
-               "%s: isci_device = %p\n", __func__, isci_device);
-
        memset(tmf, 0, sizeof(*tmf));
 
-       tmf->device        = isci_device;
        tmf->tmf_code      = code;
-       tmf->timeout_timer = NULL;
        tmf->cb_state_func = tmf_sent_cb;
        tmf->cb_data       = cb_data;
 }
 
 static void isci_task_build_abort_task_tmf(
        struct isci_tmf *tmf,
-       struct isci_remote_device *isci_device,
        enum isci_tmf_function_codes code,
        void (*tmf_sent_cb)(enum isci_tmf_cb_state,
                            struct isci_tmf *,
                            void *),
        struct isci_request *old_request)
 {
-       isci_task_build_tmf(tmf, isci_device, code, tmf_sent_cb,
+       isci_task_build_tmf(tmf, code, tmf_sent_cb,
                            (void *)old_request);
        tmf->io_tag = old_request->io_tag;
 }
 
-static struct isci_request *isci_task_get_request_from_task(
-       struct sas_task *task,
-       struct isci_host **isci_host,
-       struct isci_remote_device **isci_device)
-{
-
-       struct isci_request *request = NULL;
-       unsigned long flags;
-
-       spin_lock_irqsave(&task->task_state_lock, flags);
-
-       request = task->lldd_task;
-
-       /* If task is already done, the request isn't valid */
-       if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
-           (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
-           (request != NULL)) {
-
-               if (isci_host != NULL)
-                       *isci_host = request->isci_host;
-
-               if (isci_device != NULL)
-                       *isci_device = request->isci_device;
-       }
-
-       spin_unlock_irqrestore(&task->task_state_lock, flags);
-
-       return request;
-}
-
 /**
  * isci_task_validate_request_to_abort() - This function checks the given I/O
  *    against the "started" state.  If the request is still "started", it's
@@ -667,31 +474,33 @@ static enum isci_request_status isci_task_validate_request_to_abort(
        return old_state;
 }
 
+/**
+* isci_request_cleanup_completed_loiterer() - This function will take care of
+*    the final cleanup on any request which has been explicitly terminated.
+* @isci_host: This parameter specifies the ISCI host object
+* @isci_device: This is the device to which the request is pending.
+* @isci_request: This parameter specifies the terminated request object.
+* @task: This parameter is the libsas I/O request.
+*/
 static void isci_request_cleanup_completed_loiterer(
-       struct isci_host *isci_host,
+       struct isci_host          *isci_host,
        struct isci_remote_device *isci_device,
-       struct isci_request *isci_request)
+       struct isci_request       *isci_request,
+       struct sas_task           *task)
 {
-       struct sas_task     *task;
-       unsigned long       flags;
-
-       task = (isci_request->ttype == io_task)
-               ? isci_request_access_task(isci_request)
-               : NULL;
+       unsigned long flags;
 
        dev_dbg(&isci_host->pdev->dev,
                "%s: isci_device=%p, request=%p, task=%p\n",
                __func__, isci_device, isci_request, task);
 
-       spin_lock_irqsave(&isci_host->scic_lock, flags);
-       list_del_init(&isci_request->dev_node);
-       spin_unlock_irqrestore(&isci_host->scic_lock, flags);
-
        if (task != NULL) {
 
                spin_lock_irqsave(&task->task_state_lock, flags);
                task->lldd_task = NULL;
 
+               task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET;
+
                isci_set_task_doneflags(task);
 
                /* If this task is not in the abort path, call task_done. */
@@ -702,61 +511,16 @@ static void isci_request_cleanup_completed_loiterer(
                } else
                        spin_unlock_irqrestore(&task->task_state_lock, flags);
        }
-       isci_request_free(isci_host, isci_request);
-}
-
-/**
-* @isci_termination_timed_out(): this function will deal with a request for
-* which the wait for termination has timed-out.
-*
-* @isci_host    This SCU.
-* @isci_request The I/O request being terminated.
-*/
-static void
-isci_termination_timed_out(
-       struct isci_host    * host,
-       struct isci_request * request
-       )
-{
-       unsigned long state_flags;
-
-       dev_warn(&host->pdev->dev,
-               "%s: host = %p; request = %p\n",
-               __func__, host, request);
-
-       /* At this point, the request to terminate
-       * has timed out. The best we can do is to
-       * have the request die a silent death
-       * if it ever completes.
-       */
-       spin_lock_irqsave(&request->state_lock, state_flags);
-
-       if (request->status == started) {
-
-               /* Set the request state to "dead",
-               * and clear the task pointer so that an actual
-               * completion event callback doesn't do
-               * anything.
-               */
-               request->status = dead;
-
-               /* Clear the timeout completion event pointer.*/
-               request->io_request_completion = NULL;
 
-               if (request->ttype == io_task) {
-
-                       /* Break links with the sas_task. */
-                       if (request->ttype_ptr.io_task_ptr != NULL) {
+       if (isci_request != NULL) {
+               spin_lock_irqsave(&isci_host->scic_lock, flags);
+               list_del_init(&isci_request->dev_node);
+               spin_unlock_irqrestore(&isci_host->scic_lock, flags);
 
-                               request->ttype_ptr.io_task_ptr->lldd_task = NULL;
-                               request->ttype_ptr.io_task_ptr            = NULL;
-                       }
-               }
+               isci_request_free(isci_host, isci_request);
        }
-       spin_unlock_irqrestore(&request->state_lock, state_flags);
 }
 
-
 /**
  * isci_terminate_request_core() - This function will terminate the given
  *    request, and wait for it to complete.  This function must only be called
@@ -766,7 +530,6 @@ isci_termination_timed_out(
  * @isci_device: The target.
  * @isci_request: The I/O request to be terminated.
  *
- *
  */
 static void isci_terminate_request_core(
        struct isci_host *isci_host,
@@ -777,9 +540,10 @@ static void isci_terminate_request_core(
        bool was_terminated         = false;
        bool needs_cleanup_handling = false;
        enum isci_request_status request_status;
-       unsigned long flags;
-       unsigned long timeout_remaining;
-
+       unsigned long     flags;
+       unsigned long     termination_completed = 1;
+       struct completion *io_request_completion;
+       struct sas_task   *task;
 
        dev_dbg(&isci_host->pdev->dev,
                "%s: device = %p; request = %p\n",
@@ -787,23 +551,28 @@ static void isci_terminate_request_core(
 
        spin_lock_irqsave(&isci_host->scic_lock, flags);
 
+       io_request_completion = isci_request->io_request_completion;
+
+       task = (isci_request->ttype == io_task)
+               ? isci_request_access_task(isci_request)
+               : NULL;
+
        /* Note that we are not going to control
-       * the target to abort the request.
-       */
-       isci_request->complete_in_target = true;
+        * the target to abort the request.
+        */
+       set_bit(IREQ_COMPLETE_IN_TARGET, &isci_request->flags);
 
        /* Make sure the request wasn't just sitting around signalling
         * device condition (if the request handle is NULL, then the
         * request completed but needed additional handling here).
         */
-       if (isci_request->sci_request_handle != NULL) {
+       if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
                was_terminated = true;
                needs_cleanup_handling = true;
                status = scic_controller_terminate_request(
-                       isci_host->core_controller,
-                       to_sci_dev(isci_device),
-                       isci_request->sci_request_handle
-                       );
+                       &isci_host->sci,
+                       &isci_device->sci,
+                       &isci_request->sci);
        }
        spin_unlock_irqrestore(&isci_host->scic_lock, flags);
 
@@ -816,96 +585,172 @@ static void isci_terminate_request_core(
                dev_err(&isci_host->pdev->dev,
                        "%s: scic_controller_terminate_request"
                        " returned = 0x%x\n",
-                       __func__,
-                       status);
-               /* Clear the completion pointer from the request. */
+                       __func__, status);
+
                isci_request->io_request_completion = NULL;
 
        } else {
                if (was_terminated) {
                        dev_dbg(&isci_host->pdev->dev,
-                               "%s: before completion wait (%p)\n",
-                               __func__,
-                               isci_request->io_request_completion);
+                               "%s: before completion wait (%p/%p)\n",
+                               __func__, isci_request, io_request_completion);
 
                        /* Wait here for the request to complete. */
-                       #define TERMINATION_TIMEOUT_MSEC 50
-                       timeout_remaining
+                       #define TERMINATION_TIMEOUT_MSEC 500
+                       termination_completed
                                = wait_for_completion_timeout(
-                                  isci_request->io_request_completion,
+                                  io_request_completion,
                                   msecs_to_jiffies(TERMINATION_TIMEOUT_MSEC));
 
-                       if (!timeout_remaining) {
+                       if (!termination_completed) {
+
+                               /* The request to terminate has timed out.  */
+                               spin_lock_irqsave(&isci_host->scic_lock,
+                                                 flags);
+
+                               /* Check for state changes. */
+                               if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
+
+                                       /* The best we can do is to have the
+                                        * request die a silent death if it
+                                        * ever really completes.
+                                        *
+                                        * Set the request state to "dead",
+                                        * and clear the task pointer so that
+                                        * an actual completion event callback
+                                        * doesn't do anything.
+                                        */
+                                       isci_request->status = dead;
+                                       isci_request->io_request_completion
+                                               = NULL;
+
+                                       if (isci_request->ttype == io_task) {
+
+                                               /* Break links with the
+                                               * sas_task.
+                                               */
+                                               isci_request->ttype_ptr.io_task_ptr
+                                                       = NULL;
+                                       }
+                               } else
+                                       termination_completed = 1;
+
+                               spin_unlock_irqrestore(&isci_host->scic_lock,
+                                                      flags);
 
-                               isci_termination_timed_out(isci_host,
-                                                          isci_request);
+                               if (!termination_completed) {
 
-                               dev_err(&isci_host->pdev->dev,
-                                       "%s: *** Timeout waiting for "
-                                       "termination(%p/%p)\n",
-                                       __func__,
-                                       isci_request->io_request_completion,
-                                       isci_request);
+                                       dev_err(&isci_host->pdev->dev,
+                                               "%s: *** Timeout waiting for "
+                                               "termination(%p/%p)\n",
+                                               __func__, io_request_completion,
+                                               isci_request);
 
-                       } else
+                                       /* The request can no longer be referenced
+                                        * safely since it may go away if the
+                                        * termination every really does complete.
+                                        */
+                                       isci_request = NULL;
+                               }
+                       }
+                       if (termination_completed)
                                dev_dbg(&isci_host->pdev->dev,
-                                       "%s: after completion wait (%p)\n",
-                                       __func__,
-                                       isci_request->io_request_completion);
+                                       "%s: after completion wait (%p/%p)\n",
+                                       __func__, isci_request, io_request_completion);
                }
-               /* Clear the completion pointer from the request. */
-               isci_request->io_request_completion = NULL;
 
-               /* Peek at the status of the request.  This will tell
-               * us if there was special handling on the request such that it
-               * needs to be detached and freed here.
-               */
-               spin_lock_irqsave(&isci_request->state_lock, flags);
-               request_status = isci_request_get_state(isci_request);
-
-               if ((isci_request->ttype == io_task) /* TMFs are in their own thread */
-                   && ((request_status == aborted)
-                       || (request_status == aborting)
-                       || (request_status == terminating)
-                       || (request_status == completed)
-                       || (request_status == dead)
-                       )
-                   ) {
-
-                       /* The completion routine won't free a request in
-                       * the aborted/aborting/etc. states, so we do
-                       * it here.
-                       */
-                       needs_cleanup_handling = true;
-               }
-               spin_unlock_irqrestore(&isci_request->state_lock, flags);
+               if (termination_completed) {
+
+                       isci_request->io_request_completion = NULL;
 
+                       /* Peek at the status of the request.  This will tell
+                        * us if there was special handling on the request such that it
+                        * needs to be detached and freed here.
+                        */
+                       spin_lock_irqsave(&isci_request->state_lock, flags);
+                       request_status = isci_request_get_state(isci_request);
+
+                       if ((isci_request->ttype == io_task) /* TMFs are in their own thread */
+                           && ((request_status == aborted)
+                               || (request_status == aborting)
+                               || (request_status == terminating)
+                               || (request_status == completed)
+                               || (request_status == dead)
+                               )
+                           ) {
+
+                               /* The completion routine won't free a request in
+                                * the aborted/aborting/etc. states, so we do
+                                * it here.
+                                */
+                               needs_cleanup_handling = true;
+                       }
+                       spin_unlock_irqrestore(&isci_request->state_lock, flags);
+
+               }
                if (needs_cleanup_handling)
                        isci_request_cleanup_completed_loiterer(
-                               isci_host, isci_device, isci_request
-                               );
+                               isci_host, isci_device, isci_request, task);
        }
 }
 
-static void isci_terminate_request(
-       struct isci_host *isci_host,
-       struct isci_remote_device *isci_device,
-       struct isci_request *isci_request,
-       enum isci_request_status new_request_state)
+/**
+ * isci_terminate_pending_requests() - This function will change the all of the
+ *    requests on the given device's state to "aborting", will terminate the
+ *    requests, and wait for them to complete.  This function must only be
+ *    called from a thread that can wait.  Note that the requests are all
+ *    terminated and completed (back to the host, if started there).
+ * @isci_host: This parameter specifies SCU.
+ * @isci_device: This parameter specifies the target.
+ *
+ */
+void isci_terminate_pending_requests(struct isci_host *ihost,
+                                    struct isci_remote_device *idev)
 {
+       struct completion request_completion;
        enum isci_request_status old_state;
-       DECLARE_COMPLETION_ONSTACK(request_completion);
+       unsigned long flags;
+       LIST_HEAD(list);
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+       list_splice_init(&idev->reqs_in_process, &list);
+
+       /* assumes that isci_terminate_request_core deletes from the list */
+       while (!list_empty(&list)) {
+               struct isci_request *ireq = list_entry(list.next, typeof(*ireq), dev_node);
+
+               /* Change state to "terminating" if it is currently
+                * "started".
+                */
+               old_state = isci_request_change_started_to_newstate(ireq,
+                                                                   &request_completion,
+                                                                   terminating);
+               switch (old_state) {
+               case started:
+               case completed:
+               case aborting:
+                       break;
+               default:
+                       /* termination in progress, or otherwise dispositioned.
+                        * We know the request was on 'list' so should be safe
+                        * to move it back to reqs_in_process
+                        */
+                       list_move(&ireq->dev_node, &idev->reqs_in_process);
+                       ireq = NULL;
+                       break;
+               }
+
+               if (!ireq)
+                       continue;
+               spin_unlock_irqrestore(&ihost->scic_lock, flags);
 
-       /* Change state to "new_request_state" if it is currently "started" */
-       old_state = isci_request_change_started_to_newstate(
-               isci_request,
-               &request_completion,
-               new_request_state
-               );
+               init_completion(&request_completion);
 
-       if ((old_state == started) ||
-           (old_state == completed) ||
-           (old_state == aborting)) {
+               dev_dbg(&ihost->pdev->dev,
+                        "%s: idev=%p request=%p; task=%p old_state=%d\n",
+                        __func__, idev, ireq,
+                       ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL,
+                       old_state);
 
                /* If the old_state is started:
                 * This request was not already being aborted. If it had been,
@@ -927,65 +772,10 @@ static void isci_terminate_request(
                 * This request has already gone through a TMF timeout, but may
                 * not have been terminated; needs cleaning up at least.
                 */
-               isci_terminate_request_core(isci_host, isci_device,
-                                           isci_request);
-       }
-}
-
-/**
- * isci_terminate_pending_requests() - This function will change the all of the
- *    requests on the given device's state to "aborting", will terminate the
- *    requests, and wait for them to complete.  This function must only be
- *    called from a thread that can wait.  Note that the requests are all
- *    terminated and completed (back to the host, if started there).
- * @isci_host: This parameter specifies SCU.
- * @isci_device: This parameter specifies the target.
- *
- *
- */
-void isci_terminate_pending_requests(
-       struct isci_host *isci_host,
-       struct isci_remote_device *isci_device,
-       enum isci_request_status new_request_state)
-{
-       struct isci_request *request;
-       struct isci_request *next_request;
-       unsigned long       flags;
-       struct list_head    aborted_request_list;
-
-       INIT_LIST_HEAD(&aborted_request_list);
-
-       dev_dbg(&isci_host->pdev->dev,
-               "%s: isci_device = %p (new request state = %d)\n",
-               __func__, isci_device, new_request_state);
-
-       spin_lock_irqsave(&isci_host->scic_lock, flags);
-
-       /* Move all of the pending requests off of the device list. */
-       list_splice_init(&isci_device->reqs_in_process,
-                        &aborted_request_list);
-
-       spin_unlock_irqrestore(&isci_host->scic_lock, flags);
-
-       /* Iterate through the now-local list. */
-       list_for_each_entry_safe(request, next_request,
-                                &aborted_request_list, dev_node) {
-
-               dev_warn(&isci_host->pdev->dev,
-                       "%s: isci_device=%p request=%p; task=%p\n",
-                       __func__,
-                       isci_device, request,
-                       ((request->ttype == io_task)
-                               ? isci_request_access_task(request)
-                               : NULL));
-
-               /* Mark all still pending I/O with the selected next
-               * state, terminate and free it.
-               */
-               isci_terminate_request(isci_host, isci_device,
-                                      request, new_request_state
-                                      );
+               isci_terminate_request_core(ihost, idev, ireq);
+               spin_lock_irqsave(&ihost->scic_lock, flags);
        }
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
 }
 
 /**
@@ -1011,11 +801,10 @@ static int isci_task_send_lu_reset_sas(
         * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or
         * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED").
         */
-       isci_task_build_tmf(&tmf, isci_device, isci_tmf_ssp_lun_reset, NULL,
-                           NULL);
+       isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset, NULL, NULL);
 
        #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */
-       ret = isci_task_execute_tmf(isci_host, &tmf, ISCI_LU_RESET_TIMEOUT_MS);
+       ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS);
 
        if (ret == TMF_RESP_FUNC_COMPLETE)
                dev_dbg(&isci_host->pdev->dev,
@@ -1038,54 +827,41 @@ static int isci_task_send_lu_reset_sas(
  *
  * status, zero indicates success.
  */
-int isci_task_lu_reset(
-       struct domain_device *domain_device,
-       u8 *lun)
+int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun)
 {
-       struct isci_host *isci_host = NULL;
-       struct isci_remote_device *isci_device = NULL;
+       struct isci_host *isci_host = dev_to_ihost(domain_device);
+       struct isci_remote_device *isci_device;
+       unsigned long flags;
        int ret;
-       bool device_stopping = false;
 
-       if (domain_device == NULL) {
-               pr_warn("%s: domain_device == NULL\n", __func__);
-               return TMF_RESP_FUNC_FAILED;
-       }
-
-       isci_device = isci_dev_from_domain_dev(domain_device);
-
-       if (domain_device->port != NULL)
-               isci_host = isci_host_from_sas_ha(domain_device->port->ha);
+       spin_lock_irqsave(&isci_host->scic_lock, flags);
+       isci_device = isci_lookup_device(domain_device);
+       spin_unlock_irqrestore(&isci_host->scic_lock, flags);
 
-       pr_debug("%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
+       dev_dbg(&isci_host->pdev->dev,
+               "%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
                 __func__, domain_device, isci_host, isci_device);
 
-       if (isci_device != NULL)
-               device_stopping = (isci_device->status == isci_stopping)
-                                 || (isci_device->status == isci_stopped);
+       if (isci_device)
+               set_bit(IDEV_EH, &isci_device->flags);
 
        /* If there is a device reset pending on any request in the
         * device's list, fail this LUN reset request in order to
         * escalate to the device reset.
         */
-       if ((isci_device == NULL) ||
-           (isci_host == NULL) ||
-           ((isci_host != NULL) &&
-            (isci_device != NULL) &&
-            (device_stopping ||
-             (isci_device_is_reset_pending(isci_host, isci_device))))) {
+       if (!isci_device ||
+           isci_device_is_reset_pending(isci_host, isci_device)) {
                dev_warn(&isci_host->pdev->dev,
-                        "%s: No dev (%p), no host (%p), or "
+                        "%s: No dev (%p), or "
                         "RESET PENDING: domain_device=%p\n",
-                        __func__, isci_device, isci_host, domain_device);
-               return TMF_RESP_FUNC_FAILED;
+                        __func__, isci_device, domain_device);
+               ret = TMF_RESP_FUNC_FAILED;
+               goto out;
        }
 
        /* Send the task management part of the reset. */
        if (sas_protocol_ata(domain_device->tproto)) {
-               ret = isci_task_send_lu_reset_sata(
-                       isci_host, isci_device, lun
-                       );
+               ret = isci_task_send_lu_reset_sata(isci_host, isci_device, lun);
        } else
                ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun);
 
@@ -1093,9 +869,10 @@ int isci_task_lu_reset(
        if (ret == TMF_RESP_FUNC_COMPLETE)
                /* Terminate all I/O now. */
                isci_terminate_pending_requests(isci_host,
-                                               isci_device,
-                                               terminating);
+                                               isci_device);
 
+ out:
+       isci_put_device(isci_device);
        return ret;
 }
 
@@ -1113,12 +890,6 @@ int isci_task_clear_nexus_ha(struct sas_ha_struct *ha)
        return TMF_RESP_FUNC_FAILED;
 }
 
-int isci_task_I_T_nexus_reset(struct domain_device *dev)
-{
-       return TMF_RESP_FUNC_FAILED;
-}
-
-
 /* Task Management Functions. Must be called from process context.      */
 
 /**
@@ -1151,8 +922,11 @@ static void isci_abort_task_process_cb(
                 * request state was already set to "aborted" by the abort
                 * task function.
                 */
-               BUG_ON((old_request->status != aborted)
-                       && (old_request->status != completed));
+               if ((old_request->status != aborted)
+                       && (old_request->status != completed))
+                       dev_err(&old_request->isci_host->pdev->dev,
+                               "%s: Bad request status (%d): tmf=%p, old_request=%p\n",
+                               __func__, old_request->status, tmf, old_request);
                break;
 
        case isci_tmf_timed_out:
@@ -1184,64 +958,63 @@ static void isci_abort_task_process_cb(
  */
 int isci_task_abort_task(struct sas_task *task)
 {
+       struct isci_host *isci_host = dev_to_ihost(task->dev);
        DECLARE_COMPLETION_ONSTACK(aborted_io_completion);
        struct isci_request       *old_request = NULL;
        enum isci_request_status  old_state;
        struct isci_remote_device *isci_device = NULL;
-       struct isci_host          *isci_host = NULL;
        struct isci_tmf           tmf;
        int                       ret = TMF_RESP_FUNC_FAILED;
        unsigned long             flags;
        bool                      any_dev_reset = false;
-       bool                      device_stopping;
 
        /* Get the isci_request reference from the task.  Note that
         * this check does not depend on the pending request list
         * in the device, because tasks driving resets may land here
         * after completion in the core.
         */
-       old_request = isci_task_get_request_from_task(task, &isci_host,
-                                                     &isci_device);
+       spin_lock_irqsave(&isci_host->scic_lock, flags);
+       spin_lock(&task->task_state_lock);
+
+       old_request = task->lldd_task;
+
+       /* If task is already done, the request isn't valid */
+       if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
+           (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
+           old_request)
+               isci_device = isci_lookup_device(task->dev);
+
+       spin_unlock(&task->task_state_lock);
+       spin_unlock_irqrestore(&isci_host->scic_lock, flags);
 
        dev_dbg(&isci_host->pdev->dev,
                "%s: task = %p\n", __func__, task);
 
-       /* Check if the device has been / is currently being removed.
-        * If so, no task management will be done, and the I/O will
-        * be terminated.
-        */
-       device_stopping = (isci_device->status == isci_stopping)
-                         || (isci_device->status == isci_stopped);
+       if (!isci_device || !old_request)
+               goto out;
+
+       set_bit(IDEV_EH, &isci_device->flags);
 
        /* This version of the driver will fail abort requests for
         * SATA/STP.  Failing the abort request this way will cause the
         * SCSI error handler thread to escalate to LUN reset
         */
-       if (sas_protocol_ata(task->task_proto) && !device_stopping) {
+       if (sas_protocol_ata(task->task_proto)) {
                dev_warn(&isci_host->pdev->dev,
                            " task %p is for a STP/SATA device;"
                            " returning TMF_RESP_FUNC_FAILED\n"
                            " to cause a LUN reset...\n", task);
-               return TMF_RESP_FUNC_FAILED;
+               goto out;
        }
 
        dev_dbg(&isci_host->pdev->dev,
                "%s: old_request == %p\n", __func__, old_request);
 
-       if (!device_stopping)
-               any_dev_reset = isci_device_is_reset_pending(isci_host,isci_device);
+       any_dev_reset = isci_device_is_reset_pending(isci_host,isci_device);
 
        spin_lock_irqsave(&task->task_state_lock, flags);
 
-       /* Don't do resets to stopping devices. */
-       if (device_stopping) {
-
-               task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET;
-               any_dev_reset = false;
-
-       } else  /* See if there is a pending device reset for this device. */
-               any_dev_reset = any_dev_reset
-                       || (task->task_state_flags & SAS_TASK_NEED_DEV_RESET);
+       any_dev_reset = any_dev_reset || (task->task_state_flags & SAS_TASK_NEED_DEV_RESET);
 
        /* If the extraction of the request reference from the task
         * failed, then the request has been completed (or if there is a
@@ -1295,8 +1068,7 @@ int isci_task_abort_task(struct sas_task *task)
                                "%s: abort task not needed for %p\n",
                                __func__, task);
                }
-
-               return ret;
+               goto out;
        }
        else
                spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -1323,22 +1095,19 @@ int isci_task_abort_task(struct sas_task *task)
                        "%s:  device = %p; old_request %p already being aborted\n",
                        __func__,
                        isci_device, old_request);
-
-               return TMF_RESP_FUNC_COMPLETE;
+               ret = TMF_RESP_FUNC_COMPLETE;
+               goto out;
        }
-       if ((task->task_proto == SAS_PROTOCOL_SMP)
-           || device_stopping
-           || old_request->complete_in_target
-           ) {
+       if (task->task_proto == SAS_PROTOCOL_SMP ||
+           test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
 
                spin_unlock_irqrestore(&isci_host->scic_lock, flags);
 
                dev_dbg(&isci_host->pdev->dev,
                        "%s: SMP request (%d)"
-                       " or device is stopping (%d)"
                        " or complete_in_target (%d), thus no TMF\n",
                        __func__, (task->task_proto == SAS_PROTOCOL_SMP),
-                       device_stopping, old_request->complete_in_target);
+                       test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags));
 
                /* Set the state on the task. */
                isci_task_all_done(task);
@@ -1350,15 +1119,14 @@ int isci_task_abort_task(struct sas_task *task)
                 */
        } else {
                /* Fill in the tmf stucture */
-               isci_task_build_abort_task_tmf(&tmf, isci_device,
-                                              isci_tmf_ssp_task_abort,
+               isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort,
                                               isci_abort_task_process_cb,
                                               old_request);
 
                spin_unlock_irqrestore(&isci_host->scic_lock, flags);
 
                #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* half second timeout. */
-               ret = isci_task_execute_tmf(isci_host, &tmf,
+               ret = isci_task_execute_tmf(isci_host, isci_device, &tmf,
                                            ISCI_ABORT_TASK_TIMEOUT_MS);
 
                if (ret != TMF_RESP_FUNC_COMPLETE)
@@ -1367,16 +1135,18 @@ int isci_task_abort_task(struct sas_task *task)
                                __func__);
        }
        if (ret == TMF_RESP_FUNC_COMPLETE) {
-               old_request->complete_in_target = true;
+               set_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags);
 
-               /* Clean up the request on our side, and wait for the aborted I/O to
-               * complete.
-               */
+               /* Clean up the request on our side, and wait for the aborted
+                * I/O to complete.
+                */
                isci_terminate_request_core(isci_host, isci_device, old_request);
        }
 
        /* Make sure we do not leave a reference to aborted_io_completion */
        old_request->io_request_completion = NULL;
+ out:
+       isci_put_device(isci_device);
        return ret;
 }
 
@@ -1455,253 +1225,412 @@ int isci_task_query_task(
                return TMF_RESP_FUNC_SUCC;
 }
 
-/**
+/*
  * isci_task_request_complete() - This function is called by the sci core when
  *    an task request completes.
- * @isci_host: This parameter specifies the ISCI host object
- * @request: This parameter is the completed isci_request object.
+ * @ihost: This parameter specifies the ISCI host object
+ * @ireq: This parameter is the completed isci_request object.
  * @completion_status: This parameter specifies the completion status from the
  *    sci core.
  *
  * none.
  */
-void isci_task_request_complete(
-       struct isci_host *isci_host,
-       struct isci_request *request,
-       enum sci_task_status completion_status)
+void
+isci_task_request_complete(struct isci_host *ihost,
+                          struct isci_request *ireq,
+                          enum sci_task_status completion_status)
 {
-       struct isci_remote_device *isci_device = request->isci_device;
-       enum isci_request_status old_state;
-       struct isci_tmf *tmf = isci_request_access_tmf(request);
+       struct isci_tmf *tmf = isci_request_access_tmf(ireq);
        struct completion *tmf_complete;
+       struct scic_sds_request *sci_req = &ireq->sci;
 
-       dev_dbg(&isci_host->pdev->dev,
+       dev_dbg(&ihost->pdev->dev,
                "%s: request = %p, status=%d\n",
-               __func__, request, completion_status);
+               __func__, ireq, completion_status);
 
-       old_state = isci_request_change_state(request, completed);
+       isci_request_change_state(ireq, completed);
 
        tmf->status = completion_status;
-       request->complete_in_target = true;
-
-       if (SAS_PROTOCOL_SSP == tmf->proto) {
+       set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags);
 
+       if (tmf->proto == SAS_PROTOCOL_SSP) {
                memcpy(&tmf->resp.resp_iu,
-                      scic_io_request_get_response_iu_address(
-                              request->sci_request_handle
-                              ),
-                      sizeof(struct sci_ssp_response_iu));
-
-       } else if (SAS_PROTOCOL_SATA == tmf->proto) {
-
+                      &sci_req->ssp.rsp,
+                      SSP_RESP_IU_MAX_SIZE);
+       } else if (tmf->proto == SAS_PROTOCOL_SATA) {
                memcpy(&tmf->resp.d2h_fis,
-                      scic_stp_io_request_get_d2h_reg_address(
-                              request->sci_request_handle
-                              ),
-                      sizeof(struct sata_fis_reg_d2h)
-                      );
-       }
-
-       /* Manage the timer if it is still running. */
-       if (tmf->timeout_timer) {
-               isci_del_timer(isci_host, tmf->timeout_timer);
-               tmf->timeout_timer = NULL;
+                      &sci_req->stp.rsp,
+                      sizeof(struct dev_to_host_fis));
        }
 
        /* PRINT_TMF( ((struct isci_tmf *)request->task)); */
        tmf_complete = tmf->complete;
 
-       scic_controller_complete_task(
-               isci_host->core_controller,
-               to_sci_dev(isci_device),
-               request->sci_request_handle
-               );
-       /* NULL the request handle to make sure it cannot be terminated
+       scic_controller_complete_io(&ihost->sci, ireq->sci.target_device, &ireq->sci);
+       /* set the 'terminated' flag handle to make sure it cannot be terminated
         *  or completed again.
         */
-       request->sci_request_handle = NULL;
+       set_bit(IREQ_TERMINATED, &ireq->flags);
 
-       isci_request_change_state(request, unallocated);
-       list_del_init(&request->dev_node);
+       isci_request_change_state(ireq, unallocated);
+       list_del_init(&ireq->dev_node);
 
        /* The task management part completes last. */
        complete(tmf_complete);
 }
 
-
-/**
- * isci_task_ssp_request_get_lun() - This function is called by the sci core to
- *    retrieve the lun for a given task request.
- * @request: This parameter is the isci_request object.
- *
- * lun for specified task request.
- */
-
-/**
- * isci_task_ssp_request_get_function() - This function is called by the sci
- *    core to retrieve the function for a given task request.
- * @request: This parameter is the isci_request object.
- *
- * function code for specified task request.
- */
-u8 isci_task_ssp_request_get_function(struct isci_request *request)
+static void isci_smp_task_timedout(unsigned long _task)
 {
-       struct isci_tmf *isci_tmf = isci_request_access_tmf(request);
+       struct sas_task *task = (void *) _task;
+       unsigned long flags;
 
-       dev_dbg(&request->isci_host->pdev->dev,
-               "%s: func = %d\n", __func__, isci_tmf->tmf_code);
+       spin_lock_irqsave(&task->task_state_lock, flags);
+       if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
+               task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+       spin_unlock_irqrestore(&task->task_state_lock, flags);
 
-       return isci_tmf->tmf_code;
+       complete(&task->completion);
 }
 
-/**
- * isci_task_ssp_request_get_io_tag_to_manage() - This function is called by
- *    the sci core to retrieve the io tag for a given task request.
- * @request: This parameter is the isci_request object.
- *
- * io tag for specified task request.
- */
-u16 isci_task_ssp_request_get_io_tag_to_manage(struct isci_request *request)
+static void isci_smp_task_done(struct sas_task *task)
 {
-       u16 io_tag = SCI_CONTROLLER_INVALID_IO_TAG;
+       if (!del_timer(&task->timer))
+               return;
+       complete(&task->completion);
+}
 
-       if (tmf_task == request->ttype) {
-               struct isci_tmf *tmf = isci_request_access_tmf(request);
-               io_tag = tmf->io_tag;
+static struct sas_task *isci_alloc_task(void)
+{
+       struct sas_task *task = kzalloc(sizeof(*task), GFP_KERNEL);
+
+       if (task) {
+               INIT_LIST_HEAD(&task->list);
+               spin_lock_init(&task->task_state_lock);
+               task->task_state_flags = SAS_TASK_STATE_PENDING;
+               init_timer(&task->timer);
+               init_completion(&task->completion);
        }
 
-       dev_dbg(&request->isci_host->pdev->dev,
-               "%s: request = %p, io_tag = %d\n",
-               __func__, request, io_tag);
+       return task;
+}
 
-       return io_tag;
+static void isci_free_task(struct isci_host *ihost, struct sas_task  *task)
+{
+       if (task) {
+               BUG_ON(!list_empty(&task->list));
+               kfree(task);
+       }
 }
 
-/**
- * isci_task_ssp_request_get_response_data_address() - This function is called
- *    by the sci core to retrieve the response data address for a given task
- *    request.
- * @request: This parameter is the isci_request object.
- *
- * response data address for specified task request.
- */
-void *isci_task_ssp_request_get_response_data_address(
-       struct isci_request *request)
+static int isci_smp_execute_task(struct isci_host *ihost,
+                                struct domain_device *dev, void *req,
+                                int req_size, void *resp, int resp_size)
 {
-       struct isci_tmf *isci_tmf = isci_request_access_tmf(request);
+       int res, retry;
+       struct sas_task *task = NULL;
+
+       for (retry = 0; retry < 3; retry++) {
+               task = isci_alloc_task();
+               if (!task)
+                       return -ENOMEM;
+
+               task->dev = dev;
+               task->task_proto = dev->tproto;
+               sg_init_one(&task->smp_task.smp_req, req, req_size);
+               sg_init_one(&task->smp_task.smp_resp, resp, resp_size);
+
+               task->task_done = isci_smp_task_done;
+
+               task->timer.data = (unsigned long) task;
+               task->timer.function = isci_smp_task_timedout;
+               task->timer.expires = jiffies + 10*HZ;
+               add_timer(&task->timer);
+
+               res = isci_task_execute_task(task, 1, GFP_KERNEL);
+
+               if (res) {
+                       del_timer(&task->timer);
+                       dev_err(&ihost->pdev->dev,
+                               "%s: executing SMP task failed:%d\n",
+                               __func__, res);
+                       goto ex_err;
+               }
 
-       return &isci_tmf->resp.resp_iu;
+               wait_for_completion(&task->completion);
+               res = -ECOMM;
+               if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+                       dev_err(&ihost->pdev->dev,
+                               "%s: smp task timed out or aborted\n",
+                               __func__);
+                       isci_task_abort_task(task);
+                       if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
+                               dev_err(&ihost->pdev->dev,
+                                       "%s: SMP task aborted and not done\n",
+                                       __func__);
+                               goto ex_err;
+                       }
+               }
+               if (task->task_status.resp == SAS_TASK_COMPLETE &&
+                   task->task_status.stat == SAM_STAT_GOOD) {
+                       res = 0;
+                       break;
+               }
+               if (task->task_status.resp == SAS_TASK_COMPLETE &&
+                     task->task_status.stat == SAS_DATA_UNDERRUN) {
+                       /* no error, but return the number of bytes of
+                       * underrun */
+                       res = task->task_status.residual;
+                       break;
+               }
+               if (task->task_status.resp == SAS_TASK_COMPLETE &&
+                     task->task_status.stat == SAS_DATA_OVERRUN) {
+                       res = -EMSGSIZE;
+                       break;
+               } else {
+                       dev_err(&ihost->pdev->dev,
+                               "%s: task to dev %016llx response: 0x%x "
+                               "status 0x%x\n", __func__,
+                               SAS_ADDR(dev->sas_addr),
+                               task->task_status.resp,
+                               task->task_status.stat);
+                       isci_free_task(ihost, task);
+                       task = NULL;
+               }
+       }
+ex_err:
+       BUG_ON(retry == 3 && task != NULL);
+       isci_free_task(ihost, task);
+       return res;
 }
 
-/**
- * isci_task_ssp_request_get_response_data_length() - This function is called
- *    by the sci core to retrieve the response data length for a given task
- *    request.
- * @request: This parameter is the isci_request object.
- *
- * response data length for specified task request.
- */
-u32 isci_task_ssp_request_get_response_data_length(
-       struct isci_request *request)
+#define DISCOVER_REQ_SIZE  16
+#define DISCOVER_RESP_SIZE 56
+
+int isci_smp_get_phy_attached_dev_type(struct isci_host *ihost,
+                                      struct domain_device *dev,
+                                      int phy_id, int *adt)
 {
-       struct isci_tmf *isci_tmf = isci_request_access_tmf(request);
+       struct smp_resp *disc_resp;
+       u8 *disc_req;
+       int res;
+
+       disc_resp = kzalloc(DISCOVER_RESP_SIZE, GFP_KERNEL);
+       if (!disc_resp)
+               return -ENOMEM;
+
+       disc_req = kzalloc(DISCOVER_REQ_SIZE, GFP_KERNEL);
+       if (disc_req) {
+               disc_req[0] = SMP_REQUEST;
+               disc_req[1] = SMP_DISCOVER;
+               disc_req[9] = phy_id;
+       } else {
+               kfree(disc_resp);
+               return -ENOMEM;
+       }
+       res = isci_smp_execute_task(ihost, dev, disc_req, DISCOVER_REQ_SIZE,
+                                   disc_resp, DISCOVER_RESP_SIZE);
+       if (!res) {
+               if (disc_resp->result != SMP_RESP_FUNC_ACC)
+                       res = disc_resp->result;
+               else
+                       *adt = disc_resp->disc.attached_dev_type;
+       }
+       kfree(disc_req);
+       kfree(disc_resp);
 
-       return sizeof(isci_tmf->resp.resp_iu);
+       return res;
 }
 
-/**
- * isci_bus_reset_handler() - This function performs a target reset of the
- *    device referenced by "cmd'.  This function is exported through the
- *    "struct scsi_host_template" structure such that it is called when an I/O
- *    recovery process has escalated to a target reset. Note that this function
- *    is called from the scsi error handler event thread, so may block on calls.
- * @scsi_cmd: This parameter specifies the target to be reset.
- *
- * SUCCESS if the reset process was successful, else FAILED.
- */
-int isci_bus_reset_handler(struct scsi_cmnd *cmd)
+static void isci_wait_for_smp_phy_reset(struct isci_remote_device *idev, int phy_num)
 {
-       unsigned long flags = 0;
-       struct isci_host *isci_host = NULL;
-       enum sci_status status;
-       int base_status;
-       struct isci_remote_device *isci_dev
-               = isci_dev_from_domain_dev(
-               sdev_to_domain_dev(cmd->device));
+       struct domain_device *dev = idev->domain_dev;
+       struct isci_port *iport = idev->isci_port;
+       struct isci_host *ihost = iport->isci_host;
+       int res, iteration = 0, attached_device_type;
+       #define STP_WAIT_MSECS 25000
+       unsigned long tmo = msecs_to_jiffies(STP_WAIT_MSECS);
+       unsigned long deadline = jiffies + tmo;
+       enum {
+               SMP_PHYWAIT_PHYDOWN,
+               SMP_PHYWAIT_PHYUP,
+               SMP_PHYWAIT_DONE
+       } phy_state = SMP_PHYWAIT_PHYDOWN;
+
+       /* While there is time, wait for the phy to go away and come back */
+       while (time_is_after_jiffies(deadline) && phy_state != SMP_PHYWAIT_DONE) {
+               int event = atomic_read(&iport->event);
+
+               ++iteration;
+
+               tmo = wait_event_timeout(ihost->eventq,
+                                        event != atomic_read(&iport->event) ||
+                                        !test_bit(IPORT_BCN_BLOCKED, &iport->flags),
+                                        tmo);
+               /* link down, stop polling */
+               if (!test_bit(IPORT_BCN_BLOCKED, &iport->flags))
+                       break;
 
-       dev_dbg(&cmd->device->sdev_gendev,
-               "%s: cmd %p, isci_dev %p\n",
-               __func__, cmd, isci_dev);
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: iport %p, iteration %d,"
+                       " phase %d: time_remaining %lu, bcns = %d\n",
+                       __func__, iport, iteration, phy_state,
+                       tmo, test_bit(IPORT_BCN_PENDING, &iport->flags));
 
-       if (!isci_dev) {
-               dev_warn(&cmd->device->sdev_gendev,
-                        "%s: isci_dev is GONE!\n",
-                        __func__);
+               res = isci_smp_get_phy_attached_dev_type(ihost, dev, phy_num,
+                                                        &attached_device_type);
+               tmo = deadline - jiffies;
+
+               if (res) {
+                       dev_warn(&ihost->pdev->dev,
+                                "%s: iteration %d, phase %d:"
+                                " SMP error=%d, time_remaining=%lu\n",
+                                __func__, iteration, phy_state, res, tmo);
+                       break;
+               }
+               dev_dbg(&ihost->pdev->dev,
+                       "%s: iport %p, iteration %d,"
+                       " phase %d: time_remaining %lu, bcns = %d, "
+                       "attdevtype = %x\n",
+                       __func__, iport, iteration, phy_state,
+                       tmo, test_bit(IPORT_BCN_PENDING, &iport->flags),
+                       attached_device_type);
+
+               switch (phy_state) {
+               case SMP_PHYWAIT_PHYDOWN:
+                       /* Has the device gone away? */
+                       if (!attached_device_type)
+                               phy_state = SMP_PHYWAIT_PHYUP;
+
+                       break;
+
+               case SMP_PHYWAIT_PHYUP:
+                       /* Has the device come back? */
+                       if (attached_device_type)
+                               phy_state = SMP_PHYWAIT_DONE;
+                       break;
+
+               case SMP_PHYWAIT_DONE:
+                       break;
+               }
 
-               return TMF_RESP_FUNC_COMPLETE; /* Nothing to reset. */
        }
+       dev_dbg(&ihost->pdev->dev, "%s: done\n",  __func__);
+}
 
-       if (isci_dev->isci_port != NULL)
-               isci_host = isci_dev->isci_port->isci_host;
+static int isci_reset_device(struct isci_host *ihost,
+                            struct isci_remote_device *idev, int hard_reset)
+{
+       struct sas_phy *phy = sas_find_local_phy(idev->domain_dev);
+       struct isci_port *iport = idev->isci_port;
+       enum sci_status status;
+       unsigned long flags;
+       int rc;
 
-       if (isci_host != NULL)
-               spin_lock_irqsave(&isci_host->scic_lock, flags);
+       dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
 
-       status = scic_remote_device_reset(to_sci_dev(isci_dev));
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+       status = scic_remote_device_reset(&idev->sci);
        if (status != SCI_SUCCESS) {
+               spin_unlock_irqrestore(&ihost->scic_lock, flags);
 
-               if (isci_host != NULL)
-                       spin_unlock_irqrestore(&isci_host->scic_lock, flags);
-
-               scmd_printk(KERN_WARNING, cmd,
-                           "%s: scic_remote_device_reset(%p) returned %d!\n",
-                           __func__, isci_dev, status);
+               dev_warn(&ihost->pdev->dev,
+                        "%s: scic_remote_device_reset(%p) returned %d!\n",
+                        __func__, idev, status);
 
                return TMF_RESP_FUNC_FAILED;
        }
-       if (isci_host != NULL)
-               spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
 
        /* Make sure all pending requests are able to be fully terminated. */
-       isci_device_clear_reset_pending(isci_dev);
-
-       /* Terminate in-progress I/O now. */
-       isci_remote_device_nuke_requests(isci_dev);
+       isci_device_clear_reset_pending(ihost, idev);
 
-       /* Call into the libsas default handler (which calls sas_phy_reset). */
-       base_status = sas_eh_bus_reset_handler(cmd);
+       /* If this is a device on an expander, disable BCN processing. */
+       if (!scsi_is_sas_phy_local(phy))
+               set_bit(IPORT_BCN_BLOCKED, &iport->flags);
 
-       if (base_status != SUCCESS) {
+       rc = sas_phy_reset(phy, hard_reset);
 
-               /* There can be cases where the resets to individual devices
-                * behind an expander will fail because of an unplug of the
-                * expander itself.
+       /* Terminate in-progress I/O now. */
+       isci_remote_device_nuke_requests(ihost, idev);
+
+       /* Since all pending TCs have been cleaned, resume the RNC. */
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+       status = scic_remote_device_reset_complete(&idev->sci);
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+       /* If this is a device on an expander, bring the phy back up. */
+       if (!scsi_is_sas_phy_local(phy)) {
+               /* A phy reset will cause the device to go away then reappear.
+                * Since libsas will take action on incoming BCNs (eg. remove
+                * a device going through an SMP phy-control driven reset),
+                * we need to wait until the phy comes back up before letting
+                * discovery proceed in libsas.
                 */
-               scmd_printk(KERN_WARNING, cmd,
-                           "%s: sas_eh_bus_reset_handler(%p) returned %d!\n",
-                           __func__, cmd, base_status);
+               isci_wait_for_smp_phy_reset(idev, phy->number);
+
+               spin_lock_irqsave(&ihost->scic_lock, flags);
+               isci_port_bcn_enable(ihost, idev->isci_port);
+               spin_unlock_irqrestore(&ihost->scic_lock, flags);
        }
 
-       /* WHAT TO DO HERE IF sas_phy_reset FAILS? */
+       if (status != SCI_SUCCESS) {
+               dev_warn(&ihost->pdev->dev,
+                        "%s: scic_remote_device_reset_complete(%p) "
+                        "returned %d!\n", __func__, idev, status);
+       }
 
-       if (isci_host != NULL)
-               spin_lock_irqsave(&isci_host->scic_lock, flags);
-       status = scic_remote_device_reset_complete(to_sci_dev(isci_dev));
+       dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev);
 
-       if (isci_host != NULL)
-               spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+       return rc;
+}
 
-       if (status != SCI_SUCCESS) {
-               scmd_printk(KERN_WARNING, cmd,
-                           "%s: scic_remote_device_reset_complete(%p) "
-                           "returned %d!\n",
-                           __func__, isci_dev, status);
+int isci_task_I_T_nexus_reset(struct domain_device *dev)
+{
+       struct isci_host *ihost = dev_to_ihost(dev);
+       struct isci_remote_device *idev;
+       int ret, hard_reset = 1;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+       idev = isci_lookup_device(dev);
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+       if (!idev || !test_bit(IDEV_EH, &idev->flags)) {
+               ret = TMF_RESP_FUNC_COMPLETE;
+               goto out;
        }
-       /* WHAT TO DO HERE IF scic_remote_device_reset_complete FAILS? */
 
-       dev_dbg(&cmd->device->sdev_gendev,
-               "%s: cmd %p, isci_dev %p complete.\n",
-               __func__, cmd, isci_dev);
+       if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
+               hard_reset = 0;
+
+       ret = isci_reset_device(ihost, idev, hard_reset);
+ out:
+       isci_put_device(idev);
+       return ret;
+}
 
-       return TMF_RESP_FUNC_COMPLETE;
+int isci_bus_reset_handler(struct scsi_cmnd *cmd)
+{
+       struct domain_device *dev = sdev_to_domain_dev(cmd->device);
+       struct isci_host *ihost = dev_to_ihost(dev);
+       struct isci_remote_device *idev;
+       int ret, hard_reset = 1;
+       unsigned long flags;
+
+       if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
+               hard_reset = 0;
+
+       spin_lock_irqsave(&ihost->scic_lock, flags);
+       idev = isci_lookup_device(dev);
+       spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+       if (!idev) {
+               ret = TMF_RESP_FUNC_COMPLETE;
+               goto out;
+       }
+
+       ret = isci_reset_device(ihost, idev, hard_reset);
+ out:
+       isci_put_device(idev);
+       return ret;
 }