scsi: klist: Make it safe to use klists in atomic context
authorBart Van Assche <bart.vanassche@wdc.com>
Fri, 22 Jun 2018 21:54:49 +0000 (14:54 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 4 Oct 2018 00:00:48 +0000 (17:00 -0700)
[ Upstream commit 624fa7790f80575a4ec28fbdb2034097dc18d051 ]

In the scsi_transport_srp implementation it cannot be avoided to
iterate over a klist from atomic context when using the legacy block
layer instead of blk-mq. Hence this patch that makes it safe to use
klists in atomic context. This patch avoids that lockdep reports the
following:

WARNING: SOFTIRQ-safe -> SOFTIRQ-unsafe lock order detected
 Possible interrupt unsafe locking scenario:

       CPU0                    CPU1
       ----                    ----
  lock(&(&k->k_lock)->rlock);
                               local_irq_disable();
                               lock(&(&q->__queue_lock)->rlock);
                               lock(&(&k->k_lock)->rlock);
  <Interrupt>
    lock(&(&q->__queue_lock)->rlock);

stack backtrace:
Workqueue: kblockd blk_timeout_work
Call Trace:
 dump_stack+0xa4/0xf5
 check_usage+0x6e6/0x700
 __lock_acquire+0x185d/0x1b50
 lock_acquire+0xd2/0x260
 _raw_spin_lock+0x32/0x50
 klist_next+0x47/0x190
 device_for_each_child+0x8e/0x100
 srp_timed_out+0xaf/0x1d0 [scsi_transport_srp]
 scsi_times_out+0xd4/0x410 [scsi_mod]
 blk_rq_timed_out+0x36/0x70
 blk_timeout_work+0x1b5/0x220
 process_one_work+0x4fe/0xad0
 worker_thread+0x63/0x5a0
 kthread+0x1c1/0x1e0
 ret_from_fork+0x24/0x30

See also commit c9ddf73476ff ("scsi: scsi_transport_srp: Fix shost to
rport translation").

Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
Cc: Martin K. Petersen <martin.petersen@oracle.com>
Cc: James Bottomley <jejb@linux.vnet.ibm.com>
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
lib/klist.c

index 0507fa5d84c534917d0842a453bdbcaba386422a..f6b547812fe3de34a408e3ce64fce23084dedf34 100644 (file)
@@ -336,8 +336,9 @@ struct klist_node *klist_prev(struct klist_iter *i)
        void (*put)(struct klist_node *) = i->i_klist->put;
        struct klist_node *last = i->i_cur;
        struct klist_node *prev;
+       unsigned long flags;
 
-       spin_lock(&i->i_klist->k_lock);
+       spin_lock_irqsave(&i->i_klist->k_lock, flags);
 
        if (last) {
                prev = to_klist_node(last->n_node.prev);
@@ -356,7 +357,7 @@ struct klist_node *klist_prev(struct klist_iter *i)
                prev = to_klist_node(prev->n_node.prev);
        }
 
-       spin_unlock(&i->i_klist->k_lock);
+       spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
 
        if (put && last)
                put(last);
@@ -377,8 +378,9 @@ struct klist_node *klist_next(struct klist_iter *i)
        void (*put)(struct klist_node *) = i->i_klist->put;
        struct klist_node *last = i->i_cur;
        struct klist_node *next;
+       unsigned long flags;
 
-       spin_lock(&i->i_klist->k_lock);
+       spin_lock_irqsave(&i->i_klist->k_lock, flags);
 
        if (last) {
                next = to_klist_node(last->n_node.next);
@@ -397,7 +399,7 @@ struct klist_node *klist_next(struct klist_iter *i)
                next = to_klist_node(next->n_node.next);
        }
 
-       spin_unlock(&i->i_klist->k_lock);
+       spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
 
        if (put && last)
                put(last);