}
static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
- struct request *rq)
+ struct request *rq, bool at_head)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
trace_block_rq_insert(hctx->queue, rq);
- list_add_tail(&rq->queuelist, &ctx->rq_list);
+ if (at_head)
+ list_add(&rq->queuelist, &ctx->rq_list);
+ else
+ list_add_tail(&rq->queuelist, &ctx->rq_list);
blk_mq_hctx_mark_pending(hctx, ctx);
/*
}
void blk_mq_insert_request(struct request_queue *q, struct request *rq,
- bool run_queue)
+ bool at_head, bool run_queue)
{
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx, *current_ctx;
rq->mq_ctx = ctx;
}
spin_lock(&ctx->lock);
- __blk_mq_insert_request(hctx, rq);
+ __blk_mq_insert_request(hctx, rq, at_head);
spin_unlock(&ctx->lock);
blk_mq_put_ctx(current_ctx);
/* ctx->cpu might be offline */
spin_lock(&ctx->lock);
- __blk_mq_insert_request(hctx, rq);
+ __blk_mq_insert_request(hctx, rq, false);
spin_unlock(&ctx->lock);
blk_mq_put_ctx(current_ctx);
rq = list_first_entry(list, struct request, queuelist);
list_del_init(&rq->queuelist);
rq->mq_ctx = ctx;
- __blk_mq_insert_request(hctx, rq);
+ __blk_mq_insert_request(hctx, rq, false);
}
spin_unlock(&ctx->lock);
__blk_mq_free_request(hctx, ctx, rq);
else {
blk_mq_bio_to_request(rq, bio);
- __blk_mq_insert_request(hctx, rq);
+ __blk_mq_insert_request(hctx, rq, false);
}
spin_unlock(&ctx->lock);
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
-void blk_mq_insert_request(struct request_queue *, struct request *, bool);
+void blk_mq_insert_request(struct request_queue *, struct request *,
+ bool, bool);
void blk_mq_run_queues(struct request_queue *q, bool async);
void blk_mq_free_request(struct request *rq);
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);