}
/*
- * One of these is allocated per bio.
+ * One of these is allocated per original bio.
*/
struct dm_io {
struct mapped_device *md;
blk_status_t status;
atomic_t io_count;
- struct bio *bio;
+ struct bio *orig_bio;
unsigned long start_time;
spinlock_t endio_lock;
struct dm_stats_aux stats_aux;
static void start_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
- struct bio *bio = io->bio;
+ struct bio *bio = io->orig_bio;
int cpu;
int rw = bio_data_dir(bio);
static void end_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
- struct bio *bio = io->bio;
+ struct bio *bio = io->orig_bio;
unsigned long duration = jiffies - io->start_time;
int pending;
int rw = bio_data_dir(bio);
/* Push-back supersedes any I/O errors */
if (unlikely(error)) {
spin_lock_irqsave(&io->endio_lock, flags);
- if (!(io->status == BLK_STS_DM_REQUEUE &&
- __noflush_suspending(md)))
+ if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md)))
io->status = error;
spin_unlock_irqrestore(&io->endio_lock, flags);
}
*/
spin_lock_irqsave(&md->deferred_lock, flags);
if (__noflush_suspending(md))
- bio_list_add_head(&md->deferred, io->bio);
+ /* NOTE early return due to BLK_STS_DM_REQUEUE below */
+ bio_list_add_head(&md->deferred, io->orig_bio);
else
/* noflush suspend was interrupted. */
io->status = BLK_STS_IOERR;
}
io_error = io->status;
- bio = io->bio;
+ bio = io->orig_bio;
end_io_acct(io);
free_io(md, io);
{
#ifdef CONFIG_BLK_DEV_ZONED
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
- struct bio *report_bio = tio->io->bio;
+ struct bio *report_bio = tio->io->orig_bio;
struct blk_zone_report_hdr *hdr = NULL;
struct blk_zone *zone;
unsigned int nr_rep = 0;
case DM_MAPIO_REMAPPED:
/* the bio has been remapped so dispatch it */
trace_block_bio_remap(clone->bi_disk->queue, clone,
- bio_dev(tio->io->bio), sector);
+ bio_dev(tio->io->orig_bio), sector);
generic_make_request(clone);
break;
case DM_MAPIO_KILL:
ci.io = alloc_io(md);
ci.io->status = 0;
atomic_set(&ci.io->io_count, 1);
- ci.io->bio = bio;
+ ci.io->orig_bio = bio;
ci.io->md = md;
spin_lock_init(&ci.io->endio_lock);
ci.sector = bio->bi_iter.bi_sector;
* so that it gets handled *after* bios already submitted
* have been completely processed.
* We take a clone of the original to store in
- * ci.io->bio to be used by end_io_acct() and
+ * ci.io->orig_bio to be used by end_io_acct() and
* for dec_pending to use for completion handling.
* As this path is not used for REQ_OP_ZONE_REPORT,
- * the usage of io->bio in dm_remap_zone_report()
+ * the usage of io->orig_bio in dm_remap_zone_report()
* won't be affected by this reassignment.
*/
struct bio *b = bio_clone_bioset(bio, GFP_NOIO,
md->queue->bio_split);
- ci.io->bio = b;
+ ci.io->orig_bio = b;
bio_advance(bio, (bio_sectors(bio) - ci.sector_count) << 9);
bio_chain(b, bio);
generic_make_request(bio);