void mmc_blk_mq_complete(struct request *req)
{
struct mmc_queue *mq = req->q->queuedata;
+ struct mmc_host *host = mq->card->host;
- if (mq->use_cqe)
+ if (host->cqe_enabled)
mmc_blk_cqe_complete_rq(mq, req);
else if (likely(!blk_should_fake_timeout(req->q)))
mmc_blk_mq_complete_rq(mq, req);
static int mmc_blk_wait_for_idle(struct mmc_queue *mq, struct mmc_host *host)
{
- if (mq->use_cqe)
+ if (host->cqe_enabled)
return host->cqe_ops->cqe_wait_for_idle(host);
return mmc_blk_rw_wait(mq, NULL);
break;
case REQ_OP_READ:
case REQ_OP_WRITE:
- if (mq->use_cqe)
+ if (host->cqe_enabled)
ret = mmc_blk_cqe_issue_rw_rq(mq, req);
else
ret = mmc_blk_mq_issue_rw_rq(mq, req);
{
struct mmc_host *host = mq->card->host;
- if (mq->use_cqe && !host->hsq_enabled)
+ if (host->cqe_enabled && !host->hsq_enabled)
return mmc_cqe_issue_type(host, req);
if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE)
bool ignore_tout;
spin_lock_irqsave(&mq->lock, flags);
- ignore_tout = mq->recovery_needed || !mq->use_cqe || host->hsq_enabled;
+ ignore_tout = mq->recovery_needed || !host->cqe_enabled || host->hsq_enabled;
spin_unlock_irqrestore(&mq->lock, flags);
return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req);
mq->in_recovery = true;
- if (mq->use_cqe && !host->hsq_enabled)
+ if (host->cqe_enabled && !host->hsq_enabled)
mmc_blk_cqe_recovery(mq);
else
mmc_blk_mq_recovery(mq);
if (get_card)
mmc_get_card(card, &mq->ctx);
- if (mq->use_cqe) {
+ if (host->cqe_enabled) {
host->retune_now = host->need_retune && cqe_retune_ok &&
!host->hold_retune;
}
int ret;
mq->card = card;
- mq->use_cqe = host->cqe_enabled;
spin_lock_init(&mq->lock);
* The queue depth for CQE must match the hardware because the request
* tag is used to index the hardware queue.
*/
- if (mq->use_cqe && !host->hsq_enabled)
+ if (host->cqe_enabled && !host->hsq_enabled)
mq->tag_set.queue_depth =
min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth);
else