target: close target_put_sess_cmd() vs. core_tmr_abort_task() race
authorJoern Engel <joern@logfs.org>
Mon, 13 May 2013 20:30:06 +0000 (16:30 -0400)
committerNicholas Bellinger <nab@linux-iscsi.org>
Wed, 15 May 2013 08:47:35 +0000 (01:47 -0700)
It is possible for one thread to to take se_sess->sess_cmd_lock in
core_tmr_abort_task() before taking a reference count on
se_cmd->cmd_kref, while another thread in target_put_sess_cmd() drops
se_cmd->cmd_kref before taking se_sess->sess_cmd_lock.

This introduces kref_put_spinlock_irqsave() and uses it in
target_put_sess_cmd() to close the race window.

Signed-off-by: Joern Engel <joern@logfs.org>
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: <stable@vger.kernel.org>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
drivers/target/target_core_transport.c
include/linux/kref.h

index c3477fa..4a79336 100644 (file)
@@ -2211,21 +2211,19 @@ static void target_release_cmd_kref(struct kref *kref)
 {
        struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
        struct se_session *se_sess = se_cmd->se_sess;
-       unsigned long flags;
 
-       spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
        if (list_empty(&se_cmd->se_cmd_list)) {
-               spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+               spin_unlock(&se_sess->sess_cmd_lock);
                se_cmd->se_tfo->release_cmd(se_cmd);
                return;
        }
        if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
-               spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+               spin_unlock(&se_sess->sess_cmd_lock);
                complete(&se_cmd->cmd_wait_comp);
                return;
        }
        list_del(&se_cmd->se_cmd_list);
-       spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+       spin_unlock(&se_sess->sess_cmd_lock);
 
        se_cmd->se_tfo->release_cmd(se_cmd);
 }
@@ -2236,7 +2234,8 @@ static void target_release_cmd_kref(struct kref *kref)
  */
 int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
 {
-       return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
+       return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref,
+                       &se_sess->sess_cmd_lock);
 }
 EXPORT_SYMBOL(target_put_sess_cmd);
 
index 4972e6e..7419c02 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/atomic.h>
 #include <linux/kernel.h>
 #include <linux/mutex.h>
+#include <linux/spinlock.h>
 
 struct kref {
        atomic_t refcount;
@@ -95,6 +96,38 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)
        return kref_sub(kref, 1, release);
 }
 
+/**
+ * kref_put_spinlock_irqsave - decrement refcount for object.
+ * @kref: object.
+ * @release: pointer to the function that will clean up the object when the
+ *          last reference to the object is released.
+ *          This pointer is required, and it is not acceptable to pass kfree
+ *          in as this function.
+ * @lock: lock to take in release case
+ *
+ * Behaves identical to kref_put with one exception.  If the reference count
+ * drops to zero, the lock will be taken atomically wrt dropping the reference
+ * count.  The release function has to call spin_unlock() without _irqrestore.
+ */
+static inline int kref_put_spinlock_irqsave(struct kref *kref,
+               void (*release)(struct kref *kref),
+               spinlock_t *lock)
+{
+       unsigned long flags;
+
+       WARN_ON(release == NULL);
+       if (atomic_add_unless(&kref->refcount, -1, 1))
+               return 0;
+       spin_lock_irqsave(lock, flags);
+       if (atomic_dec_and_test(&kref->refcount)) {
+               release(kref);
+               local_irq_restore(flags);
+               return 1;
+       }
+       spin_unlock_irqrestore(lock, flags);
+       return 0;
+}
+
 static inline int kref_put_mutex(struct kref *kref,
                                 void (*release)(struct kref *kref),
                                 struct mutex *lock)