case REQ_FSEQ_DATA:
list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
- blk_mq_add_to_requeue_list(rq, true, true);
+ blk_mq_add_to_requeue_list(rq, true);
+ blk_mq_kick_requeue_list(q);
break;
case REQ_FSEQ_DONE:
smp_wmb();
req_ref_set(flush_rq, 1);
- blk_mq_add_to_requeue_list(flush_rq, false, true);
+ blk_mq_add_to_requeue_list(flush_rq, false);
+ blk_mq_kick_requeue_list(q);
}
static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
{
+ struct request_queue *q = rq->q;
+
__blk_mq_requeue_request(rq);
/* this request will be re-inserted to io scheduler queue */
blk_mq_sched_requeue_request(rq);
- blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
+ blk_mq_add_to_requeue_list(rq, true);
+
+ if (kick_requeue_list)
+ blk_mq_kick_requeue_list(q);
}
EXPORT_SYMBOL(blk_mq_requeue_request);
blk_mq_run_hw_queues(q, false);
}
-void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
- bool kick_requeue_list)
+void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
{
struct request_queue *q = rq->q;
unsigned long flags;
list_add_tail(&rq->queuelist, &q->requeue_list);
}
spin_unlock_irqrestore(&q->requeue_lock, flags);
-
- if (kick_requeue_list)
- blk_mq_kick_requeue_list(q);
}
void blk_mq_kick_requeue_list(struct request_queue *q)
void blk_mq_wake_waiters(struct request_queue *q);
bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
unsigned int);
-void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
- bool kick_requeue_list);
+void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *start);