block: Fix a race between blk_cleanup_queue() and timeout handling
authorBart Van Assche <bart.vanassche@wdc.com>
Thu, 19 Oct 2017 17:00:48 +0000 (10:00 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 30 Nov 2017 08:39:06 +0000 (08:39 +0000)
commit 4e9b6f20828ac880dbc1fa2fdbafae779473d1af upstream.

Make sure that if the timeout timer fires after a queue has been
marked "dying" that the affected requests are finished.

Reported-by: chenxiang (M) <chenxiang66@hisilicon.com>
Fixes: commit 287922eb0b18 ("block: defer timeouts to a workqueue")
Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
Tested-by: chenxiang (M) <chenxiang66@hisilicon.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Keith Busch <keith.busch@intel.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Ming Lei <ming.lei@redhat.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
block/blk-core.c
block/blk-timeout.c

index 95379fc..b1c76aa 100644 (file)
@@ -282,6 +282,7 @@ EXPORT_SYMBOL(blk_stop_queue);
 void blk_sync_queue(struct request_queue *q)
 {
        del_timer_sync(&q->timeout);
+       cancel_work_sync(&q->timeout_work);
 
        if (q->mq_ops) {
                struct blk_mq_hw_ctx *hctx;
@@ -720,6 +721,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
        setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
                    laptop_mode_timer_fn, (unsigned long) q);
        setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
+       INIT_WORK(&q->timeout_work, NULL);
        INIT_LIST_HEAD(&q->queue_head);
        INIT_LIST_HEAD(&q->timeout_list);
        INIT_LIST_HEAD(&q->icq_list);
index a30441a..220661a 100644 (file)
@@ -135,8 +135,6 @@ void blk_timeout_work(struct work_struct *work)
        struct request *rq, *tmp;
        int next_set = 0;
 
-       if (blk_queue_enter(q, true))
-               return;
        spin_lock_irqsave(q->queue_lock, flags);
 
        list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
@@ -146,7 +144,6 @@ void blk_timeout_work(struct work_struct *work)
                mod_timer(&q->timeout, round_jiffies_up(next));
 
        spin_unlock_irqrestore(q->queue_lock, flags);
-       blk_queue_exit(q);
 }
 
 /**