}
EXPORT_SYMBOL(dm_table_get_md);
+void dm_table_run_md_queue_async(struct dm_table *t)
+{
+ struct mapped_device *md;
+ struct request_queue *queue;
+ unsigned long flags;
+
+ if (!dm_table_request_based(t))
+ return;
+
+ md = dm_table_get_md(t);
+ queue = dm_get_md_queue(md);
+ if (queue) {
+ spin_lock_irqsave(queue->queue_lock, flags);
+ blk_run_queue_async(queue);
+ spin_unlock_irqrestore(queue->queue_lock, flags);
+ }
+}
+EXPORT_SYMBOL(dm_table_run_md_queue_async);
+
static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
return get_capacity(md->disk);
}
+struct request_queue *dm_get_md_queue(struct mapped_device *md)
+{
+ return md->queue;
+}
+
struct dm_stats *dm_get_stats(struct mapped_device *md)
{
return &md->stats;
int dm_cancel_deferred_remove(struct mapped_device *md);
int dm_request_based(struct mapped_device *md);
sector_t dm_get_size(struct mapped_device *md);
+struct request_queue *dm_get_md_queue(struct mapped_device *md);
struct dm_stats *dm_get_stats(struct mapped_device *md);
int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
void dm_table_event(struct dm_table *t);
/*
+ * Run the queue for request-based targets.
+ */
+void dm_table_run_md_queue_async(struct dm_table *t);
+
+/*
* The device must be suspended before calling this method.
* Returns the previous table, which the caller must destroy.
*/