{
struct dm_mpath_io *mpio = get_mpio(map_context);
struct pgpath *pgpath = mpio->pgpath;
+ int r = DM_ENDIO_DONE;
/*
* We don't queue any clone request inside the multipath target
if (error && !noretry_error(error)) {
struct multipath *m = ti->private;
- error = DM_ENDIO_REQUEUE;
+ r = DM_ENDIO_REQUEUE;
if (pgpath)
fail_path(pgpath);
if (atomic_read(&m->nr_valid_paths) == 0 &&
- !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
- error = dm_report_EIO(m);
+ !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
+ if (error == -EIO)
+ error = dm_report_EIO(m);
+ /* complete with the original error */
+ r = DM_ENDIO_DONE;
+ }
}
if (pgpath) {
ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
}
- return error;
+ return r;
}
static int do_end_io_bio(struct multipath *m, struct bio *clone,
static void dm_done(struct request *clone, int error, bool mapped)
{
- int r = error;
+ int r = DM_ENDIO_DONE;
struct dm_rq_target_io *tio = clone->end_io_data;
dm_request_endio_fn rq_end_io = NULL;
r = rq_end_io(tio->ti, clone, error, &tio->info);
}
- if (unlikely(r == -EREMOTEIO)) {
+ if (unlikely(error == -EREMOTEIO)) {
if (req_op(clone) == REQ_OP_WRITE_SAME &&
!clone->q->limits.max_write_same_sectors)
disable_write_same(tio->md);
disable_write_zeroes(tio->md);
}
- if (r <= 0)
+ switch (r) {
+ case DM_ENDIO_DONE:
/* The target wants to complete the I/O */
- dm_end_request(clone, r);
- else if (r == DM_ENDIO_INCOMPLETE)
+ dm_end_request(clone, error);
+ break;
+ case DM_ENDIO_INCOMPLETE:
/* The target will handle the I/O */
return;
- else if (r == DM_ENDIO_REQUEUE)
+ case DM_ENDIO_REQUEUE:
/* The target wants to requeue the I/O */
dm_requeue_original_request(tio, false);
- else {
+ break;
+ default:
DMWARN("unimplemented target endio return value: %d", r);
BUG();
}