return &t->targets[(KEYS_PER_NODE * n) + k];
}
+static int device_not_poll_capable(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+
+ return !test_bit(QUEUE_FLAG_POLL, &q->queue_flags);
+}
+
/*
* type->iterate_devices() should be called when the sanity check needs to
* iterate and check all underlying data devices. iterate_devices() will
return 0;
}
+static int dm_table_supports_poll(struct dm_table *t)
+{
+ return !dm_table_any_dev_attr(t, device_not_poll_capable, NULL);
+}
+
/*
* Check whether a table has no data devices attached using each
* target's iterate_devices method.
dm_update_crypto_profile(q, t);
disk_update_readahead(t->md->disk);
+ /*
+ * Check for request-based device is left to
+ * dm_mq_init_request_queue()->blk_mq_init_allocated_queue().
+ *
+ * For bio-based device, only set QUEUE_FLAG_POLL when all
+ * underlying devices supporting polling.
+ */
+ if (__table_type_bio_based(t->type)) {
+ if (dm_table_supports_poll(t))
+ blk_queue_flag_set(QUEUE_FLAG_POLL, q);
+ else
+ blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
+ }
+
return 0;
}
#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
#define DM_COOKIE_LENGTH 24
+/*
+ * For REQ_POLLED fs bio, this flag is set if we link mapped underlying
+ * dm_io into one list, and reuse bio->bi_private as the list head. Before
+ * ending this fs bio, we will recover its ->bi_private.
+ */
+#define REQ_DM_POLL_LIST REQ_DRV
+
static const char *_name = DM_NAME;
static unsigned int major = 0;
struct dm_io *io;
sector_t sector;
unsigned sector_count;
+ bool submit_as_polled;
};
#define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone))
if (!clone)
return NULL;
+ /* REQ_DM_POLL_LIST shouldn't be inherited */
+ clone->bi_opf &= ~REQ_DM_POLL_LIST;
+
tio = clone_to_tio(clone);
tio->inside_dm_io = false;
}
if (unlikely(wq_has_sleeper(&md->wait)))
wake_up(&md->wait);
- if (io_error == BLK_STS_DM_REQUEUE)
+ if (io_error == BLK_STS_DM_REQUEUE) {
+ /*
+ * Upper layer won't help us poll split bio, io->orig_bio
+ * may only reflect a subset of the pre-split original,
+ * so clear REQ_POLLED in case of requeue
+ */
+ bio->bi_opf &= ~REQ_POLLED;
return;
+ }
if (bio_is_flush_with_data(bio)) {
/*
return true;
}
+/*
+ * Reuse ->bi_private as hlist head for storing all dm_io instances
+ * associated with this bio, and this bio's bi_private needs to be
+ * stored in dm_io->data before the reuse.
+ *
+ * bio->bi_private is owned by fs or upper layer, so block layer won't
+ * touch it after splitting. Meantime it won't be changed by anyone after
+ * bio is submitted. So this reuse is safe.
+ */
+static inline struct hlist_head *dm_get_bio_hlist_head(struct bio *bio)
+{
+ return (struct hlist_head *)&bio->bi_private;
+}
+
+static void dm_queue_poll_io(struct bio *bio, struct dm_io *io)
+{
+ struct hlist_head *head = dm_get_bio_hlist_head(bio);
+
+ if (!(bio->bi_opf & REQ_DM_POLL_LIST)) {
+ bio->bi_opf |= REQ_DM_POLL_LIST;
+ /*
+ * Save .bi_private into dm_io, so that we can reuse
+ * .bi_private as hlist head for storing dm_io list
+ */
+ io->data = bio->bi_private;
+
+ INIT_HLIST_HEAD(head);
+
+ /* tell block layer to poll for completion */
+ bio->bi_cookie = ~BLK_QC_T_NONE;
+ } else {
+ /*
+ * bio recursed due to split, reuse original poll list,
+ * and save bio->bi_private too.
+ */
+ io->data = hlist_entry(head->first, struct dm_io, node)->data;
+ }
+
+ hlist_add_head(&io->node, head);
+}
+
/*
* Select the correct strategy for processing a non-flush bio.
*/
if (__process_abnormal_io(ci, ti, &r))
return r;
+ /*
+ * Only support bio polling for normal IO, and the target io is
+ * exactly inside the dm_io instance (verified in dm_poll_dm_io)
+ */
+ ci->submit_as_polled = ci->bio->bi_opf & REQ_POLLED;
+
len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count);
clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO);
__map_bio(clone);
ci->map = map;
ci->io = alloc_io(md, bio);
ci->bio = bio;
+ ci->submit_as_polled = false;
ci->sector = bio->bi_iter.bi_sector;
ci->sector_count = bio_sectors(bio);
if (ci.io->start_io_acct)
dm_start_io_acct(ci.io, NULL);
- /* drop the extra reference count */
- dm_io_dec_pending(ci.io, errno_to_blk_status(error));
+ /*
+ * Drop the extra reference count for non-POLLED bio, and hold one
+ * reference for POLLED bio, which will be released in dm_poll_bio
+ *
+ * Add every dm_io instance into the hlist_head which is stored in
+ * bio->bi_private, so that dm_poll_bio can poll them all.
+ */
+ if (error || !ci.submit_as_polled)
+ dm_io_dec_pending(ci.io, errno_to_blk_status(error));
+ else
+ dm_queue_poll_io(bio, ci.io);
}
static void dm_submit_bio(struct bio *bio)
dm_put_live_table(md, srcu_idx);
}
+static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob,
+ unsigned int flags)
+{
+ WARN_ON_ONCE(!io->tio.inside_dm_io);
+
+ /* don't poll if the mapped io is done */
+ if (atomic_read(&io->io_count) > 1)
+ bio_poll(&io->tio.clone, iob, flags);
+
+ /* bio_poll holds the last reference */
+ return atomic_read(&io->io_count) == 1;
+}
+
+static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob,
+ unsigned int flags)
+{
+ struct hlist_head *head = dm_get_bio_hlist_head(bio);
+ struct hlist_head tmp = HLIST_HEAD_INIT;
+ struct hlist_node *next;
+ struct dm_io *io;
+
+ /* Only poll normal bio which was marked as REQ_DM_POLL_LIST */
+ if (!(bio->bi_opf & REQ_DM_POLL_LIST))
+ return 0;
+
+ WARN_ON_ONCE(hlist_empty(head));
+
+ hlist_move_list(head, &tmp);
+
+ /*
+ * Restore .bi_private before possibly completing dm_io.
+ *
+ * bio_poll() is only possible once @bio has been completely
+ * submitted via submit_bio_noacct()'s depth-first submission.
+ * So there is no dm_queue_poll_io() race associated with
+ * clearing REQ_DM_POLL_LIST here.
+ */
+ bio->bi_opf &= ~REQ_DM_POLL_LIST;
+ bio->bi_private = hlist_entry(tmp.first, struct dm_io, node)->data;
+
+ hlist_for_each_entry_safe(io, next, &tmp, node) {
+ if (dm_poll_dm_io(io, iob, flags)) {
+ hlist_del_init(&io->node);
+ /*
+ * clone_endio() has already occurred, so passing
+ * error as 0 here doesn't override io->status
+ */
+ dm_io_dec_pending(io, 0);
+ }
+ }
+
+ /* Not done? */
+ if (!hlist_empty(&tmp)) {
+ bio->bi_opf |= REQ_DM_POLL_LIST;
+ /* Reset bio->bi_private to dm_io list head */
+ hlist_move_list(&tmp, head);
+ return 0;
+ }
+ return 1;
+}
+
/*-----------------------------------------------------------------
* An IDR is used to keep track of allocated minor numbers.
*---------------------------------------------------------------*/
static const struct block_device_operations dm_blk_dops = {
.submit_bio = dm_submit_bio,
+ .poll_bio = dm_poll_bio,
.open = dm_blk_open,
.release = dm_blk_close,
.ioctl = dm_blk_ioctl,