dm table: add dm_table_run_md_queue_async
authorMike Snitzer <snitzer@redhat.com>
Fri, 28 Feb 2014 14:33:43 +0000 (15:33 +0100)
committerMike Snitzer <snitzer@redhat.com>
Thu, 27 Mar 2014 20:56:24 +0000 (16:56 -0400)
Introduce dm_table_run_md_queue_async() to run the request_queue of the
mapped_device associated with a request-based DM table.

Also add dm_md_get_queue() wrapper to extract the request_queue from a
mapped_device.

Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
drivers/md/dm-table.c
drivers/md/dm.c
drivers/md/dm.h
include/linux/device-mapper.h

index 2ae35b2..50601ec 100644 (file)
@@ -1618,6 +1618,25 @@ struct mapped_device *dm_table_get_md(struct dm_table *t)
 }
 EXPORT_SYMBOL(dm_table_get_md);
 
+void dm_table_run_md_queue_async(struct dm_table *t)
+{
+       struct mapped_device *md;
+       struct request_queue *queue;
+       unsigned long flags;
+
+       if (!dm_table_request_based(t))
+               return;
+
+       md = dm_table_get_md(t);
+       queue = dm_get_md_queue(md);
+       if (queue) {
+               spin_lock_irqsave(queue->queue_lock, flags);
+               blk_run_queue_async(queue);
+               spin_unlock_irqrestore(queue->queue_lock, flags);
+       }
+}
+EXPORT_SYMBOL(dm_table_run_md_queue_async);
+
 static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
                                  sector_t start, sector_t len, void *data)
 {
index 6382213..455e649 100644 (file)
@@ -468,6 +468,11 @@ sector_t dm_get_size(struct mapped_device *md)
        return get_capacity(md->disk);
 }
 
+struct request_queue *dm_get_md_queue(struct mapped_device *md)
+{
+       return md->queue;
+}
+
 struct dm_stats *dm_get_stats(struct mapped_device *md)
 {
        return &md->stats;
index 88cc58c..ed76126 100644 (file)
@@ -188,6 +188,7 @@ int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only
 int dm_cancel_deferred_remove(struct mapped_device *md);
 int dm_request_based(struct mapped_device *md);
 sector_t dm_get_size(struct mapped_device *md);
+struct request_queue *dm_get_md_queue(struct mapped_device *md);
 struct dm_stats *dm_get_stats(struct mapped_device *md);
 
 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
index 5eeeab4..63da56e 100644 (file)
@@ -463,6 +463,11 @@ struct mapped_device *dm_table_get_md(struct dm_table *t);
 void dm_table_event(struct dm_table *t);
 
 /*
+ * Run the queue for request-based targets.
+ */
+void dm_table_run_md_queue_async(struct dm_table *t);
+
+/*
  * The device must be suspended before calling this method.
  * Returns the previous table, which the caller must destroy.
  */