scsi_proc_hostdir_rm(shost->hostt);
+ /* Wait for functions invoked through call_rcu(&shost->rcu, ...) */
+ rcu_barrier();
+
if (shost->tmf_work_q)
destroy_workqueue(shost->tmf_work_q);
if (shost->ehandler)
if (shost->work_q)
destroy_workqueue(shost->work_q);
+ destroy_rcu_head(&shost->rcu);
+
if (shost->shost_state == SHOST_CREATED) {
/*
* Free the shost_dev device name here if scsi_host_alloc()
INIT_LIST_HEAD(&shost->starved_list);
init_waitqueue_head(&shost->host_wait);
mutex_init(&shost->scan_mutex);
+ init_rcu_head(&shost->rcu);
index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
if (index < 0)
}
}
+static void scsi_eh_inc_host_failed(struct rcu_head *head)
+{
+ struct Scsi_Host *shost = container_of(head, typeof(*shost), rcu);
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ shost->host_failed++;
+ scsi_eh_wakeup(shost);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
/**
* scsi_eh_scmd_add - add scsi cmd to error handling.
* @scmd: scmd to run eh on.
scsi_eh_reset(scmd);
list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
- shost->host_failed++;
- scsi_eh_wakeup(shost);
spin_unlock_irqrestore(shost->host_lock, flags);
+ /*
+ * Ensure that all tasks observe the host state change before the
+ * host_failed change.
+ */
+ call_rcu(&shost->rcu, scsi_eh_inc_host_failed);
}
/**
cmd->cmd_len = scsi_command_size(cmd->cmnd);
}
-void scsi_device_unbusy(struct scsi_device *sdev)
+/*
+ * Decrement the host_busy counter and wake up the error handler if necessary.
+ * Avoid as follows that the error handler is not woken up if shost->host_busy
+ * == shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination
+ * with an RCU read lock in this function to ensure that this function in its
+ * entirety either finishes before scsi_eh_scmd_add() increases the
+ * host_failed counter or that it notices the shost state change made by
+ * scsi_eh_scmd_add().
+ */
+static void scsi_dec_host_busy(struct Scsi_Host *shost)
{
- struct Scsi_Host *shost = sdev->host;
- struct scsi_target *starget = scsi_target(sdev);
unsigned long flags;
+ rcu_read_lock();
atomic_dec(&shost->host_busy);
- if (starget->can_queue > 0)
- atomic_dec(&starget->target_busy);
-
- if (unlikely(scsi_host_in_recovery(shost) &&
- (shost->host_failed || shost->host_eh_scheduled))) {
+ if (unlikely(scsi_host_in_recovery(shost))) {
spin_lock_irqsave(shost->host_lock, flags);
- scsi_eh_wakeup(shost);
+ if (shost->host_failed || shost->host_eh_scheduled)
+ scsi_eh_wakeup(shost);
spin_unlock_irqrestore(shost->host_lock, flags);
}
+ rcu_read_unlock();
+}
+
+void scsi_device_unbusy(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost = sdev->host;
+ struct scsi_target *starget = scsi_target(sdev);
+
+ scsi_dec_host_busy(shost);
+
+ if (starget->can_queue > 0)
+ atomic_dec(&starget->target_busy);
atomic_dec(&sdev->device_busy);
}
list_add_tail(&sdev->starved_entry, &shost->starved_list);
spin_unlock_irq(shost->host_lock);
out_dec:
- atomic_dec(&shost->host_busy);
+ scsi_dec_host_busy(shost);
return 0;
}
return BLK_STS_OK;
out_dec_host_busy:
- atomic_dec(&shost->host_busy);
+ scsi_dec_host_busy(shost);
out_dec_target_busy:
if (scsi_target(sdev)->can_queue > 0)
atomic_dec(&scsi_target(sdev)->target_busy);