1 // SPDX-License-Identifier: GPL-2.0
4 * MMC software queue support based on command queue interfaces
6 * Copyright (C) 2019 Linaro, Inc.
7 * Author: Baolin Wang <baolin.wang@linaro.org>
10 #include <linux/mmc/card.h>
11 #include <linux/mmc/host.h>
12 #include <linux/module.h>
16 static void mmc_hsq_retry_handler(struct work_struct *work)
18 struct mmc_hsq *hsq = container_of(work, struct mmc_hsq, retry_work);
19 struct mmc_host *mmc = hsq->mmc;
21 mmc->ops->request(mmc, hsq->mrq);
24 static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
26 struct mmc_host *mmc = hsq->mmc;
27 struct hsq_slot *slot;
31 spin_lock_irqsave(&hsq->lock, flags);
33 /* Make sure we are not already running a request now */
34 if (hsq->mrq || hsq->recovery_halt) {
35 spin_unlock_irqrestore(&hsq->lock, flags);
39 /* Make sure there are remain requests need to pump */
40 if (!hsq->qcnt || !hsq->enabled) {
41 spin_unlock_irqrestore(&hsq->lock, flags);
45 slot = &hsq->slot[hsq->next_tag];
49 spin_unlock_irqrestore(&hsq->lock, flags);
51 if (mmc->ops->request_atomic)
52 ret = mmc->ops->request_atomic(mmc, hsq->mrq);
54 mmc->ops->request(mmc, hsq->mrq);
57 * If returning BUSY from request_atomic(), which means the card
58 * may be busy now, and we should change to non-atomic context to
59 * try again for this unusual case, to avoid time-consuming operations
60 * in the atomic context.
62 * Note: we just give a warning for other error cases, since the host
63 * driver will handle them.
66 schedule_work(&hsq->retry_work);
71 static void mmc_hsq_update_next_tag(struct mmc_hsq *hsq, int remains)
76 * If there are no remain requests in software queue, then set a invalid
80 hsq->next_tag = HSQ_INVALID_TAG;
81 hsq->tail_tag = HSQ_INVALID_TAG;
85 tag = hsq->tag_slot[hsq->next_tag];
86 hsq->tag_slot[hsq->next_tag] = HSQ_INVALID_TAG;
90 static void mmc_hsq_post_request(struct mmc_hsq *hsq)
95 spin_lock_irqsave(&hsq->lock, flags);
100 /* Update the next available tag to be queued. */
101 mmc_hsq_update_next_tag(hsq, remains);
103 if (hsq->waiting_for_idle && !remains) {
104 hsq->waiting_for_idle = false;
105 wake_up(&hsq->wait_queue);
108 /* Do not pump new request in recovery mode. */
109 if (hsq->recovery_halt) {
110 spin_unlock_irqrestore(&hsq->lock, flags);
114 spin_unlock_irqrestore(&hsq->lock, flags);
117 * Try to pump new request to host controller as fast as possible,
118 * after completing previous request.
121 mmc_hsq_pump_requests(hsq);
125 * mmc_hsq_finalize_request - finalize one request if the request is done
126 * @mmc: the host controller
127 * @mrq: the request need to be finalized
129 * Return true if we finalized the corresponding request in software queue,
130 * otherwise return false.
132 bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq)
134 struct mmc_hsq *hsq = mmc->cqe_private;
137 spin_lock_irqsave(&hsq->lock, flags);
139 if (!hsq->enabled || !hsq->mrq || hsq->mrq != mrq) {
140 spin_unlock_irqrestore(&hsq->lock, flags);
145 * Clear current completed slot request to make a room for new request.
147 hsq->slot[hsq->next_tag].mrq = NULL;
149 spin_unlock_irqrestore(&hsq->lock, flags);
151 mmc_cqe_request_done(mmc, hsq->mrq);
153 mmc_hsq_post_request(hsq);
157 EXPORT_SYMBOL_GPL(mmc_hsq_finalize_request);
159 static void mmc_hsq_recovery_start(struct mmc_host *mmc)
161 struct mmc_hsq *hsq = mmc->cqe_private;
164 spin_lock_irqsave(&hsq->lock, flags);
166 hsq->recovery_halt = true;
168 spin_unlock_irqrestore(&hsq->lock, flags);
171 static void mmc_hsq_recovery_finish(struct mmc_host *mmc)
173 struct mmc_hsq *hsq = mmc->cqe_private;
176 spin_lock_irq(&hsq->lock);
178 hsq->recovery_halt = false;
181 spin_unlock_irq(&hsq->lock);
184 * Try to pump new request if there are request pending in software
185 * queue after finishing recovery.
188 mmc_hsq_pump_requests(hsq);
191 static int mmc_hsq_request(struct mmc_host *mmc, struct mmc_request *mrq)
193 struct mmc_hsq *hsq = mmc->cqe_private;
196 spin_lock_irq(&hsq->lock);
199 spin_unlock_irq(&hsq->lock);
203 /* Do not queue any new requests in recovery mode. */
204 if (hsq->recovery_halt) {
205 spin_unlock_irq(&hsq->lock);
209 hsq->slot[tag].mrq = mrq;
212 * Set the next tag as current request tag if no available
215 if (hsq->next_tag == HSQ_INVALID_TAG) {
218 hsq->tag_slot[hsq->tail_tag] = HSQ_INVALID_TAG;
220 hsq->tag_slot[hsq->tail_tag] = tag;
226 spin_unlock_irq(&hsq->lock);
228 mmc_hsq_pump_requests(hsq);
233 static void mmc_hsq_post_req(struct mmc_host *mmc, struct mmc_request *mrq)
235 if (mmc->ops->post_req)
236 mmc->ops->post_req(mmc, mrq, 0);
239 static bool mmc_hsq_queue_is_idle(struct mmc_hsq *hsq, int *ret)
243 spin_lock_irq(&hsq->lock);
245 is_idle = (!hsq->mrq && !hsq->qcnt) ||
248 *ret = hsq->recovery_halt ? -EBUSY : 0;
249 hsq->waiting_for_idle = !is_idle;
251 spin_unlock_irq(&hsq->lock);
256 static int mmc_hsq_wait_for_idle(struct mmc_host *mmc)
258 struct mmc_hsq *hsq = mmc->cqe_private;
261 wait_event(hsq->wait_queue,
262 mmc_hsq_queue_is_idle(hsq, &ret));
267 static void mmc_hsq_disable(struct mmc_host *mmc)
269 struct mmc_hsq *hsq = mmc->cqe_private;
273 spin_lock_irq(&hsq->lock);
276 spin_unlock_irq(&hsq->lock);
280 spin_unlock_irq(&hsq->lock);
282 ret = wait_event_timeout(hsq->wait_queue,
283 mmc_hsq_queue_is_idle(hsq, &ret),
284 msecs_to_jiffies(timeout));
286 pr_warn("could not stop mmc software queue\n");
290 spin_lock_irq(&hsq->lock);
292 hsq->enabled = false;
294 spin_unlock_irq(&hsq->lock);
297 static int mmc_hsq_enable(struct mmc_host *mmc, struct mmc_card *card)
299 struct mmc_hsq *hsq = mmc->cqe_private;
301 spin_lock_irq(&hsq->lock);
304 spin_unlock_irq(&hsq->lock);
310 spin_unlock_irq(&hsq->lock);
315 static const struct mmc_cqe_ops mmc_hsq_ops = {
316 .cqe_enable = mmc_hsq_enable,
317 .cqe_disable = mmc_hsq_disable,
318 .cqe_request = mmc_hsq_request,
319 .cqe_post_req = mmc_hsq_post_req,
320 .cqe_wait_for_idle = mmc_hsq_wait_for_idle,
321 .cqe_recovery_start = mmc_hsq_recovery_start,
322 .cqe_recovery_finish = mmc_hsq_recovery_finish,
325 int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc)
328 hsq->num_slots = HSQ_NUM_SLOTS;
329 hsq->next_tag = HSQ_INVALID_TAG;
330 hsq->tail_tag = HSQ_INVALID_TAG;
332 hsq->slot = devm_kcalloc(mmc_dev(mmc), hsq->num_slots,
333 sizeof(struct hsq_slot), GFP_KERNEL);
338 hsq->mmc->cqe_private = hsq;
339 mmc->cqe_ops = &mmc_hsq_ops;
341 for (i = 0; i < HSQ_NUM_SLOTS; i++)
342 hsq->tag_slot[i] = HSQ_INVALID_TAG;
344 INIT_WORK(&hsq->retry_work, mmc_hsq_retry_handler);
345 spin_lock_init(&hsq->lock);
346 init_waitqueue_head(&hsq->wait_queue);
350 EXPORT_SYMBOL_GPL(mmc_hsq_init);
352 void mmc_hsq_suspend(struct mmc_host *mmc)
354 mmc_hsq_disable(mmc);
356 EXPORT_SYMBOL_GPL(mmc_hsq_suspend);
358 int mmc_hsq_resume(struct mmc_host *mmc)
360 return mmc_hsq_enable(mmc, NULL);
362 EXPORT_SYMBOL_GPL(mmc_hsq_resume);
364 MODULE_DESCRIPTION("MMC Host Software Queue support");
365 MODULE_LICENSE("GPL v2");