blk-mq: introduce blk_mq_delay_kick_requeue_list()
authorMike Snitzer <snitzer@redhat.com>
Wed, 14 Sep 2016 17:28:30 +0000 (13:28 -0400)
committerJens Axboe <axboe@fb.com>
Wed, 14 Sep 2016 17:48:34 +0000 (11:48 -0600)
blk_mq_delay_kick_requeue_list() provides the ability to kick the
q->requeue_list after a specified time.  To do this the request_queue's
'requeue_work' member was changed to a delayed_work.

blk_mq_delay_kick_requeue_list() allows DM to defer processing requeued
requests while it doesn't make sense to immediately requeue them
(e.g. when all paths in a DM multipath have failed).

Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
block/blk-mq.c
include/linux/blk-mq.h
include/linux/blkdev.h

index eea0d23..7ddc796 100644 (file)
@@ -502,7 +502,7 @@ EXPORT_SYMBOL(blk_mq_requeue_request);
 static void blk_mq_requeue_work(struct work_struct *work)
 {
        struct request_queue *q =
-               container_of(work, struct request_queue, requeue_work);
+               container_of(work, struct request_queue, requeue_work.work);
        LIST_HEAD(rq_list);
        struct request *rq, *next;
        unsigned long flags;
@@ -557,16 +557,24 @@ EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
 
 void blk_mq_cancel_requeue_work(struct request_queue *q)
 {
-       cancel_work_sync(&q->requeue_work);
+       cancel_delayed_work_sync(&q->requeue_work);
 }
 EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work);
 
 void blk_mq_kick_requeue_list(struct request_queue *q)
 {
-       kblockd_schedule_work(&q->requeue_work);
+       kblockd_schedule_delayed_work(&q->requeue_work, 0);
 }
 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
 
+void blk_mq_delay_kick_requeue_list(struct request_queue *q,
+                                   unsigned long msecs)
+{
+       kblockd_schedule_delayed_work(&q->requeue_work,
+                                     msecs_to_jiffies(msecs));
+}
+EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
+
 void blk_mq_abort_requeue_list(struct request_queue *q)
 {
        unsigned long flags;
@@ -2084,7 +2092,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
 
        q->sg_reserved_size = INT_MAX;
 
-       INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
+       INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
        INIT_LIST_HEAD(&q->requeue_list);
        spin_lock_init(&q->requeue_lock);
 
index ff14f68..60ef14c 100644 (file)
@@ -233,6 +233,7 @@ void blk_mq_requeue_request(struct request *rq);
 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
 void blk_mq_cancel_requeue_work(struct request_queue *q);
 void blk_mq_kick_requeue_list(struct request_queue *q);
+void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
 void blk_mq_abort_requeue_list(struct request_queue *q);
 void blk_mq_complete_request(struct request *rq, int error);
 
index 69aae72..c47c358 100644 (file)
@@ -449,7 +449,7 @@ struct request_queue {
 
        struct list_head        requeue_list;
        spinlock_t              requeue_lock;
-       struct work_struct      requeue_work;
+       struct delayed_work     requeue_work;
 
        struct mutex            sysfs_lock;