static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
struct sbitmap_queue *bt)
{
- if (!(data->flags & BLK_MQ_REQ_INTERNAL) &&
- !hctx_may_queue(data->hctx, bt))
+ if (!data->q->elevator && !hctx_may_queue(data->hctx, bt))
return BLK_MQ_NO_TAG;
+
if (data->shallow_depth)
return __sbitmap_queue_get_shallow(bt, data->shallow_depth);
else
struct request *rq = tags->static_rqs[tag];
req_flags_t rq_flags = 0;
- if (data->flags & BLK_MQ_REQ_INTERNAL) {
+ if (data->q->elevator) {
rq->tag = BLK_MQ_NO_TAG;
rq->internal_tag = tag;
} else {
data->flags |= BLK_MQ_REQ_NOWAIT;
if (e) {
- data->flags |= BLK_MQ_REQ_INTERNAL;
-
/*
* Flush requests are special and go directly to the
* dispatch list. Don't include reserved tags in the
retry:
data->ctx = blk_mq_get_ctx(q);
data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
- if (!(data->flags & BLK_MQ_REQ_INTERNAL))
+ if (!e)
blk_mq_tag_busy(data->hctx);
/*
cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
data.ctx = __blk_mq_get_ctx(q, cpu);
- if (q->elevator)
- data.flags |= BLK_MQ_REQ_INTERNAL;
- else
+ if (!q->elevator)
blk_mq_tag_busy(data.hctx);
ret = -EWOULDBLOCK;
static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
{
- if (data->flags & BLK_MQ_REQ_INTERNAL)
+ if (data->q->elevator)
return data->hctx->sched_tags;
return data->hctx->tags;
BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0),
/* allocate from reserved pool */
BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1),
- /* allocate internal/sched tag */
- BLK_MQ_REQ_INTERNAL = (__force blk_mq_req_flags_t)(1 << 2),
/* set RQF_PREEMPT */
BLK_MQ_REQ_PREEMPT = (__force blk_mq_req_flags_t)(1 << 3),
};