Merge tag 'mtd/fixes-for-6.6-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git...
[platform/kernel/linux-rpi.git] / drivers / mmc / host / mmc_hsq.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * MMC software queue support based on command queue interfaces
5  *
6  * Copyright (C) 2019 Linaro, Inc.
7  * Author: Baolin Wang <baolin.wang@linaro.org>
8  */
9
10 #include <linux/mmc/card.h>
11 #include <linux/mmc/host.h>
12 #include <linux/module.h>
13
14 #include "mmc_hsq.h"
15
16 static void mmc_hsq_retry_handler(struct work_struct *work)
17 {
18         struct mmc_hsq *hsq = container_of(work, struct mmc_hsq, retry_work);
19         struct mmc_host *mmc = hsq->mmc;
20
21         mmc->ops->request(mmc, hsq->mrq);
22 }
23
24 static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
25 {
26         struct mmc_host *mmc = hsq->mmc;
27         struct hsq_slot *slot;
28         unsigned long flags;
29         int ret = 0;
30
31         spin_lock_irqsave(&hsq->lock, flags);
32
33         /* Make sure we are not already running a request now */
34         if (hsq->mrq || hsq->recovery_halt) {
35                 spin_unlock_irqrestore(&hsq->lock, flags);
36                 return;
37         }
38
39         /* Make sure there are remain requests need to pump */
40         if (!hsq->qcnt || !hsq->enabled) {
41                 spin_unlock_irqrestore(&hsq->lock, flags);
42                 return;
43         }
44
45         slot = &hsq->slot[hsq->next_tag];
46         hsq->mrq = slot->mrq;
47         hsq->qcnt--;
48
49         spin_unlock_irqrestore(&hsq->lock, flags);
50
51         if (mmc->ops->request_atomic)
52                 ret = mmc->ops->request_atomic(mmc, hsq->mrq);
53         else
54                 mmc->ops->request(mmc, hsq->mrq);
55
56         /*
57          * If returning BUSY from request_atomic(), which means the card
58          * may be busy now, and we should change to non-atomic context to
59          * try again for this unusual case, to avoid time-consuming operations
60          * in the atomic context.
61          *
62          * Note: we just give a warning for other error cases, since the host
63          * driver will handle them.
64          */
65         if (ret == -EBUSY)
66                 schedule_work(&hsq->retry_work);
67         else
68                 WARN_ON_ONCE(ret);
69 }
70
71 static void mmc_hsq_update_next_tag(struct mmc_hsq *hsq, int remains)
72 {
73         int tag;
74
75         /*
76          * If there are no remain requests in software queue, then set a invalid
77          * tag.
78          */
79         if (!remains) {
80                 hsq->next_tag = HSQ_INVALID_TAG;
81                 hsq->tail_tag = HSQ_INVALID_TAG;
82                 return;
83         }
84
85         tag = hsq->tag_slot[hsq->next_tag];
86         hsq->tag_slot[hsq->next_tag] = HSQ_INVALID_TAG;
87         hsq->next_tag = tag;
88 }
89
90 static void mmc_hsq_post_request(struct mmc_hsq *hsq)
91 {
92         unsigned long flags;
93         int remains;
94
95         spin_lock_irqsave(&hsq->lock, flags);
96
97         remains = hsq->qcnt;
98         hsq->mrq = NULL;
99
100         /* Update the next available tag to be queued. */
101         mmc_hsq_update_next_tag(hsq, remains);
102
103         if (hsq->waiting_for_idle && !remains) {
104                 hsq->waiting_for_idle = false;
105                 wake_up(&hsq->wait_queue);
106         }
107
108         /* Do not pump new request in recovery mode. */
109         if (hsq->recovery_halt) {
110                 spin_unlock_irqrestore(&hsq->lock, flags);
111                 return;
112         }
113
114         spin_unlock_irqrestore(&hsq->lock, flags);
115
116          /*
117           * Try to pump new request to host controller as fast as possible,
118           * after completing previous request.
119           */
120         if (remains > 0)
121                 mmc_hsq_pump_requests(hsq);
122 }
123
124 /**
125  * mmc_hsq_finalize_request - finalize one request if the request is done
126  * @mmc: the host controller
127  * @mrq: the request need to be finalized
128  *
129  * Return true if we finalized the corresponding request in software queue,
130  * otherwise return false.
131  */
132 bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq)
133 {
134         struct mmc_hsq *hsq = mmc->cqe_private;
135         unsigned long flags;
136
137         spin_lock_irqsave(&hsq->lock, flags);
138
139         if (!hsq->enabled || !hsq->mrq || hsq->mrq != mrq) {
140                 spin_unlock_irqrestore(&hsq->lock, flags);
141                 return false;
142         }
143
144         /*
145          * Clear current completed slot request to make a room for new request.
146          */
147         hsq->slot[hsq->next_tag].mrq = NULL;
148
149         spin_unlock_irqrestore(&hsq->lock, flags);
150
151         mmc_cqe_request_done(mmc, hsq->mrq);
152
153         mmc_hsq_post_request(hsq);
154
155         return true;
156 }
157 EXPORT_SYMBOL_GPL(mmc_hsq_finalize_request);
158
159 static void mmc_hsq_recovery_start(struct mmc_host *mmc)
160 {
161         struct mmc_hsq *hsq = mmc->cqe_private;
162         unsigned long flags;
163
164         spin_lock_irqsave(&hsq->lock, flags);
165
166         hsq->recovery_halt = true;
167
168         spin_unlock_irqrestore(&hsq->lock, flags);
169 }
170
171 static void mmc_hsq_recovery_finish(struct mmc_host *mmc)
172 {
173         struct mmc_hsq *hsq = mmc->cqe_private;
174         int remains;
175
176         spin_lock_irq(&hsq->lock);
177
178         hsq->recovery_halt = false;
179         remains = hsq->qcnt;
180
181         spin_unlock_irq(&hsq->lock);
182
183         /*
184          * Try to pump new request if there are request pending in software
185          * queue after finishing recovery.
186          */
187         if (remains > 0)
188                 mmc_hsq_pump_requests(hsq);
189 }
190
191 static int mmc_hsq_request(struct mmc_host *mmc, struct mmc_request *mrq)
192 {
193         struct mmc_hsq *hsq = mmc->cqe_private;
194         int tag = mrq->tag;
195
196         spin_lock_irq(&hsq->lock);
197
198         if (!hsq->enabled) {
199                 spin_unlock_irq(&hsq->lock);
200                 return -ESHUTDOWN;
201         }
202
203         /* Do not queue any new requests in recovery mode. */
204         if (hsq->recovery_halt) {
205                 spin_unlock_irq(&hsq->lock);
206                 return -EBUSY;
207         }
208
209         hsq->slot[tag].mrq = mrq;
210
211         /*
212          * Set the next tag as current request tag if no available
213          * next tag.
214          */
215         if (hsq->next_tag == HSQ_INVALID_TAG) {
216                 hsq->next_tag = tag;
217                 hsq->tail_tag = tag;
218                 hsq->tag_slot[hsq->tail_tag] = HSQ_INVALID_TAG;
219         } else {
220                 hsq->tag_slot[hsq->tail_tag] = tag;
221                 hsq->tail_tag = tag;
222         }
223
224         hsq->qcnt++;
225
226         spin_unlock_irq(&hsq->lock);
227
228         mmc_hsq_pump_requests(hsq);
229
230         return 0;
231 }
232
233 static void mmc_hsq_post_req(struct mmc_host *mmc, struct mmc_request *mrq)
234 {
235         if (mmc->ops->post_req)
236                 mmc->ops->post_req(mmc, mrq, 0);
237 }
238
239 static bool mmc_hsq_queue_is_idle(struct mmc_hsq *hsq, int *ret)
240 {
241         bool is_idle;
242
243         spin_lock_irq(&hsq->lock);
244
245         is_idle = (!hsq->mrq && !hsq->qcnt) ||
246                 hsq->recovery_halt;
247
248         *ret = hsq->recovery_halt ? -EBUSY : 0;
249         hsq->waiting_for_idle = !is_idle;
250
251         spin_unlock_irq(&hsq->lock);
252
253         return is_idle;
254 }
255
256 static int mmc_hsq_wait_for_idle(struct mmc_host *mmc)
257 {
258         struct mmc_hsq *hsq = mmc->cqe_private;
259         int ret;
260
261         wait_event(hsq->wait_queue,
262                    mmc_hsq_queue_is_idle(hsq, &ret));
263
264         return ret;
265 }
266
267 static void mmc_hsq_disable(struct mmc_host *mmc)
268 {
269         struct mmc_hsq *hsq = mmc->cqe_private;
270         u32 timeout = 500;
271         int ret;
272
273         spin_lock_irq(&hsq->lock);
274
275         if (!hsq->enabled) {
276                 spin_unlock_irq(&hsq->lock);
277                 return;
278         }
279
280         spin_unlock_irq(&hsq->lock);
281
282         ret = wait_event_timeout(hsq->wait_queue,
283                                  mmc_hsq_queue_is_idle(hsq, &ret),
284                                  msecs_to_jiffies(timeout));
285         if (ret == 0) {
286                 pr_warn("could not stop mmc software queue\n");
287                 return;
288         }
289
290         spin_lock_irq(&hsq->lock);
291
292         hsq->enabled = false;
293
294         spin_unlock_irq(&hsq->lock);
295 }
296
297 static int mmc_hsq_enable(struct mmc_host *mmc, struct mmc_card *card)
298 {
299         struct mmc_hsq *hsq = mmc->cqe_private;
300
301         spin_lock_irq(&hsq->lock);
302
303         if (hsq->enabled) {
304                 spin_unlock_irq(&hsq->lock);
305                 return -EBUSY;
306         }
307
308         hsq->enabled = true;
309
310         spin_unlock_irq(&hsq->lock);
311
312         return 0;
313 }
314
315 static const struct mmc_cqe_ops mmc_hsq_ops = {
316         .cqe_enable = mmc_hsq_enable,
317         .cqe_disable = mmc_hsq_disable,
318         .cqe_request = mmc_hsq_request,
319         .cqe_post_req = mmc_hsq_post_req,
320         .cqe_wait_for_idle = mmc_hsq_wait_for_idle,
321         .cqe_recovery_start = mmc_hsq_recovery_start,
322         .cqe_recovery_finish = mmc_hsq_recovery_finish,
323 };
324
325 int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc)
326 {
327         int i;
328         hsq->num_slots = HSQ_NUM_SLOTS;
329         hsq->next_tag = HSQ_INVALID_TAG;
330         hsq->tail_tag = HSQ_INVALID_TAG;
331
332         hsq->slot = devm_kcalloc(mmc_dev(mmc), hsq->num_slots,
333                                  sizeof(struct hsq_slot), GFP_KERNEL);
334         if (!hsq->slot)
335                 return -ENOMEM;
336
337         hsq->mmc = mmc;
338         hsq->mmc->cqe_private = hsq;
339         mmc->cqe_ops = &mmc_hsq_ops;
340
341         for (i = 0; i < HSQ_NUM_SLOTS; i++)
342                 hsq->tag_slot[i] = HSQ_INVALID_TAG;
343
344         INIT_WORK(&hsq->retry_work, mmc_hsq_retry_handler);
345         spin_lock_init(&hsq->lock);
346         init_waitqueue_head(&hsq->wait_queue);
347
348         return 0;
349 }
350 EXPORT_SYMBOL_GPL(mmc_hsq_init);
351
352 void mmc_hsq_suspend(struct mmc_host *mmc)
353 {
354         mmc_hsq_disable(mmc);
355 }
356 EXPORT_SYMBOL_GPL(mmc_hsq_suspend);
357
358 int mmc_hsq_resume(struct mmc_host *mmc)
359 {
360         return mmc_hsq_enable(mmc, NULL);
361 }
362 EXPORT_SYMBOL_GPL(mmc_hsq_resume);
363
364 MODULE_DESCRIPTION("MMC Host Software Queue support");
365 MODULE_LICENSE("GPL v2");