return false;
}
+static blk_qc_t do_make_request(struct bio *bio)
+{
+ struct request_queue *q = bio->bi_disk->queue;
+ blk_qc_t ret = BLK_QC_T_NONE;
+
+ if (blk_crypto_bio_prep(&bio)) {
+ if (!q->make_request_fn)
+ return blk_mq_make_request(q, bio);
+ ret = q->make_request_fn(q, bio);
+ }
+ blk_queue_exit(q);
+ return ret;
+}
+
/**
* generic_make_request - re-submit a bio to the block device layer for I/O
* @bio: The bio describing the location in memory and on the device.
/* Create a fresh bio_list for all subordinate requests */
bio_list_on_stack[1] = bio_list_on_stack[0];
bio_list_init(&bio_list_on_stack[0]);
- if (blk_crypto_bio_prep(&bio)) {
- if (q->make_request_fn)
- ret = q->make_request_fn(q, bio);
- else
- ret = blk_mq_make_request(q, bio);
- }
-
- blk_queue_exit(q);
+ ret = do_make_request(bio);
/* sort new bios into those for a lower level
* and those for the same level
blk_qc_t direct_make_request(struct bio *bio)
{
struct request_queue *q = bio->bi_disk->queue;
- blk_qc_t ret = BLK_QC_T_NONE;
if (WARN_ON_ONCE(q->make_request_fn)) {
bio_io_error(bio);
return BLK_QC_T_NONE;
if (unlikely(bio_queue_enter(bio)))
return BLK_QC_T_NONE;
- if (blk_crypto_bio_prep(&bio))
- ret = blk_mq_make_request(q, bio);
- blk_queue_exit(q);
- return ret;
+ if (!blk_crypto_bio_prep(&bio)) {
+ blk_queue_exit(q);
+ return BLK_QC_T_NONE;
+ }
+ return blk_mq_make_request(q, bio);
}
EXPORT_SYMBOL_GPL(direct_make_request);
__blk_queue_split(q, &bio, &nr_segs);
if (!bio_integrity_prep(bio))
- return BLK_QC_T_NONE;
+ goto queue_exit;
if (!is_flush_fua && !blk_queue_nomerges(q) &&
blk_attempt_plug_merge(q, bio, nr_segs, &same_queue_rq))
- return BLK_QC_T_NONE;
+ goto queue_exit;
if (blk_mq_sched_bio_merge(q, bio, nr_segs))
- return BLK_QC_T_NONE;
+ goto queue_exit;
rq_qos_throttle(q, bio);
data.cmd_flags = bio->bi_opf;
- blk_queue_enter_live(q);
rq = blk_mq_get_request(q, bio, &data);
if (unlikely(!rq)) {
- blk_queue_exit(q);
rq_qos_cleanup(q, bio);
if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio);
- return BLK_QC_T_NONE;
+ goto queue_exit;
}
trace_block_getrq(q, bio, bio->bi_opf);
}
return cookie;
+queue_exit:
+ blk_queue_exit(q);
+ return BLK_QC_T_NONE;
}
EXPORT_SYMBOL_GPL(blk_mq_make_request); /* only for request based dm */
void blk_freeze_queue(struct request_queue *q);
-static inline void blk_queue_enter_live(struct request_queue *q)
-{
- /*
- * Given that running in generic_make_request() context
- * guarantees that a live reference against q_usage_counter has
- * been established, further references under that same context
- * need not check that the queue has been frozen (marked dead).
- */
- percpu_ref_get(&q->q_usage_counter);
-}
-
static inline bool biovec_phys_mergeable(struct request_queue *q,
struct bio_vec *vec1, struct bio_vec *vec2)
{
int srcu_idx;
struct dm_table *map;
- if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED)
+ if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) {
+ /*
+ * We are called with a live reference on q_usage_counter, but
+ * that one will be released as soon as we return. Grab an
+ * extra one as blk_mq_make_request expects to be able to
+ * consume a reference (which lives until the request is freed
+ * in case a request is allocated).
+ */
+ percpu_ref_get(&q->q_usage_counter);
return blk_mq_make_request(q, bio);
+ }
map = dm_get_live_table(md, &srcu_idx);