/*
* inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
*/
- if (bio_rw_ahead(bio) || bio_failfast(bio))
- req->cmd_flags |= REQ_FAILFAST;
+ if (bio_rw_ahead(bio))
+ req->cmd_flags |= (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER);
+ if (bio_failfast_dev(bio))
+ req->cmd_flags |= REQ_FAILFAST_DEV;
+ if (bio_failfast_transport(bio))
+ req->cmd_flags |= REQ_FAILFAST_TRANSPORT;
+ if (bio_failfast_driver(bio))
+ req->cmd_flags |= REQ_FAILFAST_DRIVER;
/*
* REQ_BARRIER implies no merging, but lets make it explicit
dm_bio_record(&mpio->details, bio);
map_context->ptr = mpio;
- bio->bi_rw |= (1 << BIO_RW_FAILFAST);
+ bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
r = map_io(m, bio, mpio, 0);
if (r < 0 || r == DM_MAPIO_REQUEUE)
mempool_free(mpio, m->mpio_pool);
mp_bh->bio = *bio;
mp_bh->bio.bi_sector += multipath->rdev->data_offset;
mp_bh->bio.bi_bdev = multipath->rdev->bdev;
- mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST);
+ mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
mp_bh->bio.bi_end_io = multipath_end_request;
mp_bh->bio.bi_private = mp_bh;
generic_make_request(&mp_bh->bio);
*bio = *(mp_bh->master_bio);
bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset;
bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
- bio->bi_rw |= (1 << BIO_RW_FAILFAST);
+ bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
bio->bi_end_io = multipath_end_request;
bio->bi_private = mp_bh;
generic_make_request(bio);
}
cqr->retries = DIAG_MAX_RETRIES;
cqr->buildclk = get_clock();
- if (req->cmd_flags & REQ_FAILFAST)
+ if (blk_noretry_request(req))
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->startdev = memdev;
cqr->memdev = memdev;
recid++;
}
}
- if (req->cmd_flags & REQ_FAILFAST)
+ if (blk_noretry_request(req))
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->startdev = startdev;
cqr->memdev = startdev;
recid++;
}
}
- if (req->cmd_flags & REQ_FAILFAST)
+ if (blk_noretry_request(req))
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->startdev = memdev;
cqr->memdev = memdev;
}
rq->cmd_type = REQ_TYPE_BLOCK_PC;
- rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
+ rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER | REQ_NOMERGE;
rq->retries = ALUA_FAILOVER_RETRIES;
rq->timeout = ALUA_FAILOVER_TIMEOUT;
rq->cmd[4] = len;
rq->cmd_type = REQ_TYPE_BLOCK_PC;
- rq->cmd_flags |= REQ_FAILFAST;
+ rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER;
rq->timeout = CLARIION_TIMEOUT;
rq->retries = CLARIION_RETRIES;
return SCSI_DH_RES_TEMP_UNAVAIL;
req->cmd_type = REQ_TYPE_BLOCK_PC;
- req->cmd_flags |= REQ_FAILFAST;
+ req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER;
req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY);
req->cmd[0] = TEST_UNIT_READY;
req->timeout = HP_SW_TIMEOUT;
return SCSI_DH_RES_TEMP_UNAVAIL;
req->cmd_type = REQ_TYPE_BLOCK_PC;
- req->cmd_flags |= REQ_FAILFAST;
+ req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER;
req->cmd_len = COMMAND_SIZE(START_STOP);
req->cmd[0] = START_STOP;
req->cmd[4] = 1; /* Start spin cycle */
}
rq->cmd_type = REQ_TYPE_BLOCK_PC;
- rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
+ rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER;
rq->retries = RDAC_RETRIES;
rq->timeout = RDAC_TIMEOUT;
for(i = 0; i < DV_RETRIES; i++) {
result = scsi_execute(sdev, cmd, dir, buffer, bufflen,
sense, DV_TIMEOUT, /* retries */ 1,
- REQ_FAILFAST);
+ REQ_FAILFAST_DEV |
+ REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER);
if (result & DRIVER_SENSE) {
struct scsi_sense_hdr sshdr_tmp;
if (!sshdr)
* bit 2 -- barrier
* Insert a serialization point in the IO queue, forcing previously
* submitted IO to be completed before this oen is issued.
- * bit 3 -- fail fast, don't want low level driver retries
- * bit 4 -- synchronous I/O hint: the block layer will unplug immediately
+ * bit 3 -- synchronous I/O hint: the block layer will unplug immediately
* Note that this does NOT indicate that the IO itself is sync, just
* that the block layer will not postpone issue of this IO by plugging.
- * bit 5 -- metadata request
+ * bit 4 -- metadata request
* Used for tracing to differentiate metadata and data IO. May also
* get some preferential treatment in the IO scheduler
- * bit 6 -- discard sectors
+ * bit 5 -- discard sectors
* Informs the lower level device that this range of sectors is no longer
* used by the file system and may thus be freed by the device. Used
* for flash based storage.
+ * bit 6 -- fail fast device errors
+ * bit 7 -- fail fast transport errors
+ * bit 8 -- fail fast driver errors
+ * Don't want driver retries for any fast fail whatever the reason.
*/
#define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */
#define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */
#define BIO_RW_BARRIER 2
-#define BIO_RW_FAILFAST 3
-#define BIO_RW_SYNC 4
-#define BIO_RW_META 5
-#define BIO_RW_DISCARD 6
+#define BIO_RW_SYNC 3
+#define BIO_RW_META 4
+#define BIO_RW_DISCARD 5
+#define BIO_RW_FAILFAST_DEV 6
+#define BIO_RW_FAILFAST_TRANSPORT 7
+#define BIO_RW_FAILFAST_DRIVER 8
/*
* upper 16 bits of bi_rw define the io priority of this bio
#define bio_sectors(bio) ((bio)->bi_size >> 9)
#define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER))
#define bio_sync(bio) ((bio)->bi_rw & (1 << BIO_RW_SYNC))
-#define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST))
+#define bio_failfast_dev(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_DEV))
+#define bio_failfast_transport(bio) \
+ ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_TRANSPORT))
+#define bio_failfast_driver(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_DRIVER))
#define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD))
#define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META))
#define bio_discard(bio) ((bio)->bi_rw & (1 << BIO_RW_DISCARD))
*/
enum rq_flag_bits {
__REQ_RW, /* not set, read. set, write */
- __REQ_FAILFAST, /* no low level driver retries */
+ __REQ_FAILFAST_DEV, /* no driver retries of device errors */
+ __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
+ __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
__REQ_DISCARD, /* request to discard sectors */
__REQ_SORTED, /* elevator knows about this request */
__REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
};
#define REQ_RW (1 << __REQ_RW)
+#define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV)
+#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT)
+#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER)
#define REQ_DISCARD (1 << __REQ_DISCARD)
-#define REQ_FAILFAST (1 << __REQ_FAILFAST)
#define REQ_SORTED (1 << __REQ_SORTED)
#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
#define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL)
#define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE)
-#define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST)
+#define blk_failfast_dev(rq) ((rq)->cmd_flags & REQ_FAILFAST_DEV)
+#define blk_failfast_transport(rq) ((rq)->cmd_flags & REQ_FAILFAST_TRANSPORT)
+#define blk_failfast_driver(rq) ((rq)->cmd_flags & REQ_FAILFAST_DRIVER)
+#define blk_noretry_request(rq) (blk_failfast_dev(rq) || \
+ blk_failfast_transport(rq) || \
+ blk_failfast_driver(rq))
#define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED)
#define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq)))