Merge tag 'v5.15.57' into rpi-5.15.y
[platform/kernel/linux-rpi.git] / drivers / usb / host / dwc_otg / dwc_otg_hcd_ddma.c
1 /*==========================================================================
2  * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd_ddma.c $
3  * $Revision: #10 $
4  * $Date: 2011/10/20 $
5  * $Change: 1869464 $
6  *
7  * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
8  * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
9  * otherwise expressly agreed to in writing between Synopsys and you.
10  *
11  * The Software IS NOT an item of Licensed Software or Licensed Product under
12  * any End User Software License Agreement or Agreement for Licensed Product
13  * with Synopsys or any supplement thereto. You are permitted to use and
14  * redistribute this Software in source and binary forms, with or without
15  * modification, provided that redistributions of source code must retain this
16  * notice. You may not view, use, disclose, copy or distribute this file or
17  * any information contained herein except pursuant to this license grant from
18  * Synopsys. If you do not agree with this notice, including the disclaimer
19  * below, then you are not authorized to use the Software.
20  *
21  * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
25  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31  * DAMAGE.
32  * ========================================================================== */
33 #ifndef DWC_DEVICE_ONLY
34
35 /** @file
36  * This file contains Descriptor DMA support implementation for host mode.
37  */
38
39 #include "dwc_otg_hcd.h"
40 #include "dwc_otg_regs.h"
41
42 extern bool microframe_schedule;
43
44 static inline uint8_t frame_list_idx(uint16_t frame)
45 {
46         return (frame & (MAX_FRLIST_EN_NUM - 1));
47 }
48
49 static inline uint16_t desclist_idx_inc(uint16_t idx, uint16_t inc, uint8_t speed)
50 {
51         return (idx + inc) &
52             (((speed ==
53                DWC_OTG_EP_SPEED_HIGH) ? MAX_DMA_DESC_NUM_HS_ISOC :
54               MAX_DMA_DESC_NUM_GENERIC) - 1);
55 }
56
57 static inline uint16_t desclist_idx_dec(uint16_t idx, uint16_t inc, uint8_t speed)
58 {
59         return (idx - inc) &
60             (((speed ==
61                DWC_OTG_EP_SPEED_HIGH) ? MAX_DMA_DESC_NUM_HS_ISOC :
62               MAX_DMA_DESC_NUM_GENERIC) - 1);
63 }
64
65 static inline uint16_t max_desc_num(dwc_otg_qh_t * qh)
66 {
67         return (((qh->ep_type == UE_ISOCHRONOUS)
68                  && (qh->dev_speed == DWC_OTG_EP_SPEED_HIGH))
69                 ? MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC);
70 }
71 static inline uint16_t frame_incr_val(dwc_otg_qh_t * qh)
72 {
73         return ((qh->dev_speed == DWC_OTG_EP_SPEED_HIGH)
74                 ? ((qh->interval + 8 - 1) / 8)
75                 : qh->interval);
76 }
77
78 static int desc_list_alloc(struct device *dev, dwc_otg_qh_t * qh)
79 {
80         int retval = 0;
81
82         qh->desc_list = (dwc_otg_host_dma_desc_t *)
83             DWC_DMA_ALLOC(dev, sizeof(dwc_otg_host_dma_desc_t) * max_desc_num(qh),
84                           &qh->desc_list_dma);
85
86         if (!qh->desc_list) {
87                 retval = -DWC_E_NO_MEMORY;
88                 DWC_ERROR("%s: DMA descriptor list allocation failed\n", __func__);
89
90         }
91
92         dwc_memset(qh->desc_list, 0x00,
93                    sizeof(dwc_otg_host_dma_desc_t) * max_desc_num(qh));
94
95         qh->n_bytes =
96             (uint32_t *) DWC_ALLOC(sizeof(uint32_t) * max_desc_num(qh));
97
98         if (!qh->n_bytes) {
99                 retval = -DWC_E_NO_MEMORY;
100                 DWC_ERROR
101                     ("%s: Failed to allocate array for descriptors' size actual values\n",
102                      __func__);
103
104         }
105         return retval;
106
107 }
108
109 static void desc_list_free(struct device *dev, dwc_otg_qh_t * qh)
110 {
111         if (qh->desc_list) {
112                 DWC_DMA_FREE(dev, max_desc_num(qh), qh->desc_list,
113                              qh->desc_list_dma);
114                 qh->desc_list = NULL;
115         }
116
117         if (qh->n_bytes) {
118                 DWC_FREE(qh->n_bytes);
119                 qh->n_bytes = NULL;
120         }
121 }
122
123 static int frame_list_alloc(dwc_otg_hcd_t * hcd)
124 {
125         struct device *dev = dwc_otg_hcd_to_dev(hcd);
126         int retval = 0;
127
128         if (hcd->frame_list)
129                 return 0;
130
131         hcd->frame_list = DWC_DMA_ALLOC(dev, 4 * MAX_FRLIST_EN_NUM,
132                                         &hcd->frame_list_dma);
133         if (!hcd->frame_list) {
134                 retval = -DWC_E_NO_MEMORY;
135                 DWC_ERROR("%s: Frame List allocation failed\n", __func__);
136         }
137
138         dwc_memset(hcd->frame_list, 0x00, 4 * MAX_FRLIST_EN_NUM);
139
140         return retval;
141 }
142
143 static void frame_list_free(dwc_otg_hcd_t * hcd)
144 {
145         struct device *dev = dwc_otg_hcd_to_dev(hcd);
146
147         if (!hcd->frame_list)
148                 return;
149
150         DWC_DMA_FREE(dev, 4 * MAX_FRLIST_EN_NUM, hcd->frame_list, hcd->frame_list_dma);
151         hcd->frame_list = NULL;
152 }
153
154 static void per_sched_enable(dwc_otg_hcd_t * hcd, uint16_t fr_list_en)
155 {
156
157         hcfg_data_t hcfg;
158
159         hcfg.d32 = DWC_READ_REG32(&hcd->core_if->host_if->host_global_regs->hcfg);
160
161         if (hcfg.b.perschedena) {
162                 /* already enabled */
163                 return;
164         }
165
166         DWC_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hflbaddr,
167                         hcd->frame_list_dma);
168
169         switch (fr_list_en) {
170         case 64:
171                 hcfg.b.frlisten = 3;
172                 break;
173         case 32:
174                 hcfg.b.frlisten = 2;
175                 break;
176         case 16:
177                 hcfg.b.frlisten = 1;
178                 break;
179         case 8:
180                 hcfg.b.frlisten = 0;
181                 break;
182         default:
183                 break;
184         }
185
186         hcfg.b.perschedena = 1;
187
188         DWC_DEBUGPL(DBG_HCD, "Enabling Periodic schedule\n");
189         DWC_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hcfg, hcfg.d32);
190
191 }
192
193 static void per_sched_disable(dwc_otg_hcd_t * hcd)
194 {
195         hcfg_data_t hcfg;
196
197         hcfg.d32 = DWC_READ_REG32(&hcd->core_if->host_if->host_global_regs->hcfg);
198
199         if (!hcfg.b.perschedena) {
200                 /* already disabled */
201                 return;
202         }
203         hcfg.b.perschedena = 0;
204
205         DWC_DEBUGPL(DBG_HCD, "Disabling Periodic schedule\n");
206         DWC_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hcfg, hcfg.d32);
207 }
208
209 /*
210  * Activates/Deactivates FrameList entries for the channel
211  * based on endpoint servicing period.
212  */
213 void update_frame_list(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh, uint8_t enable)
214 {
215         uint16_t i, j, inc;
216         dwc_hc_t *hc = NULL;
217
218         if (!qh->channel) {
219                 DWC_ERROR("qh->channel = %p", qh->channel);
220                 return;
221         }
222
223         if (!hcd) {
224                 DWC_ERROR("------hcd = %p", hcd);
225                 return;
226         }
227
228         if (!hcd->frame_list) {
229                 DWC_ERROR("-------hcd->frame_list = %p", hcd->frame_list);
230                 return;
231         }
232
233         hc = qh->channel;
234         inc = frame_incr_val(qh);
235         if (qh->ep_type == UE_ISOCHRONOUS)
236                 i = frame_list_idx(qh->sched_frame);
237         else
238                 i = 0;
239
240         j = i;
241         do {
242                 if (enable)
243                         hcd->frame_list[j] |= (1 << hc->hc_num);
244                 else
245                         hcd->frame_list[j] &= ~(1 << hc->hc_num);
246                 j = (j + inc) & (MAX_FRLIST_EN_NUM - 1);
247         }
248         while (j != i);
249         if (!enable)
250                 return;
251         hc->schinfo = 0;
252         if (qh->channel->speed == DWC_OTG_EP_SPEED_HIGH) {
253                 j = 1;
254                 /* TODO - check this */
255                 inc = (8 + qh->interval - 1) / qh->interval;
256                 for (i = 0; i < inc; i++) {
257                         hc->schinfo |= j;
258                         j = j << qh->interval;
259                 }
260         } else {
261                 hc->schinfo = 0xff;
262         }
263 }
264
265 #if 1
266 void dump_frame_list(dwc_otg_hcd_t * hcd)
267 {
268         int i = 0;
269         DWC_PRINTF("--FRAME LIST (hex) --\n");
270         for (i = 0; i < MAX_FRLIST_EN_NUM; i++) {
271                 DWC_PRINTF("%x\t", hcd->frame_list[i]);
272                 if (!(i % 8) && i)
273                         DWC_PRINTF("\n");
274         }
275         DWC_PRINTF("\n----\n");
276
277 }
278 #endif
279
280 static void release_channel_ddma(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
281 {
282         dwc_hc_t *hc = qh->channel;
283         if (dwc_qh_is_non_per(qh)) {
284                 if (!microframe_schedule)
285                         hcd->non_periodic_channels--;
286                 else
287                         hcd->available_host_channels++;
288         } else
289                 update_frame_list(hcd, qh, 0);
290
291         /*
292          * The condition is added to prevent double cleanup try in case of device
293          * disconnect. See channel cleanup in dwc_otg_hcd_disconnect_cb().
294          */
295         if (hc->qh) {
296                 dwc_otg_hc_cleanup(hcd->core_if, hc);
297                 DWC_CIRCLEQ_INSERT_TAIL(&hcd->free_hc_list, hc, hc_list_entry);
298                 hc->qh = NULL;
299         }
300
301         qh->channel = NULL;
302         qh->ntd = 0;
303
304         if (qh->desc_list) {
305                 dwc_memset(qh->desc_list, 0x00,
306                            sizeof(dwc_otg_host_dma_desc_t) * max_desc_num(qh));
307         }
308 }
309
310 /**
311  * Initializes a QH structure's Descriptor DMA related members.
312  * Allocates memory for descriptor list.
313  * On first periodic QH, allocates memory for FrameList
314  * and enables periodic scheduling.
315  *
316  * @param hcd The HCD state structure for the DWC OTG controller.
317  * @param qh The QH to init.
318  *
319  * @return 0 if successful, negative error code otherwise.
320  */
321 int dwc_otg_hcd_qh_init_ddma(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
322 {
323         struct device *dev = dwc_otg_hcd_to_dev(hcd);
324         int retval = 0;
325
326         if (qh->do_split) {
327                 DWC_ERROR("SPLIT Transfers are not supported in Descriptor DMA.\n");
328                 return -1;
329         }
330
331         retval = desc_list_alloc(dev, qh);
332
333         if ((retval == 0)
334             && (qh->ep_type == UE_ISOCHRONOUS || qh->ep_type == UE_INTERRUPT)) {
335                 if (!hcd->frame_list) {
336                         retval = frame_list_alloc(hcd);
337                         /* Enable periodic schedule on first periodic QH */
338                         if (retval == 0)
339                                 per_sched_enable(hcd, MAX_FRLIST_EN_NUM);
340                 }
341         }
342
343         qh->ntd = 0;
344
345         return retval;
346 }
347
348 /**
349  * Frees descriptor list memory associated with the QH.
350  * If QH is periodic and the last, frees FrameList memory
351  * and disables periodic scheduling.
352  *
353  * @param hcd The HCD state structure for the DWC OTG controller.
354  * @param qh The QH to init.
355  */
356 void dwc_otg_hcd_qh_free_ddma(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
357 {
358         struct device *dev = dwc_otg_hcd_to_dev(hcd);
359
360         desc_list_free(dev, qh);
361
362         /*
363          * Channel still assigned due to some reasons.
364          * Seen on Isoc URB dequeue. Channel halted but no subsequent
365          * ChHalted interrupt to release the channel. Afterwards
366          * when it comes here from endpoint disable routine
367          * channel remains assigned.
368          */
369         if (qh->channel)
370                 release_channel_ddma(hcd, qh);
371
372         if ((qh->ep_type == UE_ISOCHRONOUS || qh->ep_type == UE_INTERRUPT)
373             && (microframe_schedule || !hcd->periodic_channels) && hcd->frame_list) {
374
375                 per_sched_disable(hcd);
376                 frame_list_free(hcd);
377         }
378 }
379
380 static uint8_t frame_to_desc_idx(dwc_otg_qh_t * qh, uint16_t frame_idx)
381 {
382         if (qh->dev_speed == DWC_OTG_EP_SPEED_HIGH) {
383                 /*
384                  * Descriptor set(8 descriptors) index
385                  * which is 8-aligned.
386                  */
387                 return (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8;
388         } else {
389                 return (frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1));
390         }
391 }
392
393 /*
394  * Determine starting frame for Isochronous transfer.
395  * Few frames skipped to prevent race condition with HC.
396  */
397 static uint8_t calc_starting_frame(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh,
398                                    uint8_t * skip_frames)
399 {
400         uint16_t frame = 0;
401         hcd->frame_number = dwc_otg_hcd_get_frame_number(hcd);
402
403         /* sched_frame is always frame number(not uFrame) both in FS and HS !! */
404
405         /*
406          * skip_frames is used to limit activated descriptors number
407          * to avoid the situation when HC services the last activated
408          * descriptor firstly.
409          * Example for FS:
410          * Current frame is 1, scheduled frame is 3. Since HC always fetches the descriptor
411          * corresponding to curr_frame+1, the descriptor corresponding to frame 2
412          * will be fetched. If the number of descriptors is max=64 (or greather) the
413          * list will be fully programmed with Active descriptors and it is possible
414          * case(rare) that the latest descriptor(considering rollback) corresponding
415          * to frame 2 will be serviced first. HS case is more probable because, in fact,
416          * up to 11 uframes(16 in the code) may be skipped.
417          */
418         if (qh->dev_speed == DWC_OTG_EP_SPEED_HIGH) {
419                 /*
420                  * Consider uframe counter also, to start xfer asap.
421                  * If half of the frame elapsed skip 2 frames otherwise
422                  * just 1 frame.
423                  * Starting descriptor index must be 8-aligned, so
424                  * if the current frame is near to complete the next one
425                  * is skipped as well.
426                  */
427
428                 if (dwc_micro_frame_num(hcd->frame_number) >= 5) {
429                         *skip_frames = 2 * 8;
430                         frame = dwc_frame_num_inc(hcd->frame_number, *skip_frames);
431                 } else {
432                         *skip_frames = 1 * 8;
433                         frame = dwc_frame_num_inc(hcd->frame_number, *skip_frames);
434                 }
435
436                 frame = dwc_full_frame_num(frame);
437         } else {
438                 /*
439                  * Two frames are skipped for FS - the current and the next.
440                  * But for descriptor programming, 1 frame(descriptor) is enough,
441                  * see example above.
442                  */
443                 *skip_frames = 1;
444                 frame = dwc_frame_num_inc(hcd->frame_number, 2);
445         }
446
447         return frame;
448 }
449
450 /*
451  * Calculate initial descriptor index for isochronous transfer
452  * based on scheduled frame.
453  */
454 static uint8_t recalc_initial_desc_idx(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
455 {
456         uint16_t frame = 0, fr_idx, fr_idx_tmp;
457         uint8_t skip_frames = 0;
458         /*
459          * With current ISOC processing algorithm the channel is being
460          * released when no more QTDs in the list(qh->ntd == 0).
461          * Thus this function is called only when qh->ntd == 0 and qh->channel == 0.
462          *
463          * So qh->channel != NULL branch is not used and just not removed from the
464          * source file. It is required for another possible approach which is,
465          * do not disable and release the channel when ISOC session completed,
466          * just move QH to inactive schedule until new QTD arrives.
467          * On new QTD, the QH moved back to 'ready' schedule,
468          * starting frame and therefore starting desc_index are recalculated.
469          * In this case channel is released only on ep_disable.
470          */
471
472         /* Calculate starting descriptor index. For INTERRUPT endpoint it is always 0. */
473         if (qh->channel) {
474                 frame = calc_starting_frame(hcd, qh, &skip_frames);
475                 /*
476                  * Calculate initial descriptor index based on FrameList current bitmap
477                  * and servicing period.
478                  */
479                 fr_idx_tmp = frame_list_idx(frame);
480                 fr_idx =
481                     (MAX_FRLIST_EN_NUM + frame_list_idx(qh->sched_frame) -
482                      fr_idx_tmp)
483                     % frame_incr_val(qh);
484                 fr_idx = (fr_idx + fr_idx_tmp) % MAX_FRLIST_EN_NUM;
485         } else {
486                 qh->sched_frame = calc_starting_frame(hcd, qh, &skip_frames);
487                 fr_idx = frame_list_idx(qh->sched_frame);
488         }
489
490         qh->td_first = qh->td_last = frame_to_desc_idx(qh, fr_idx);
491
492         return skip_frames;
493 }
494
495 #define ISOC_URB_GIVEBACK_ASAP
496
497 #define MAX_ISOC_XFER_SIZE_FS 1023
498 #define MAX_ISOC_XFER_SIZE_HS 3072
499 #define DESCNUM_THRESHOLD 4
500
501 static void init_isoc_dma_desc(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh,
502                                uint8_t skip_frames)
503 {
504         struct dwc_otg_hcd_iso_packet_desc *frame_desc;
505         dwc_otg_qtd_t *qtd;
506         dwc_otg_host_dma_desc_t *dma_desc;
507         uint16_t idx, inc, n_desc, ntd_max, max_xfer_size;
508
509         idx = qh->td_last;
510         inc = qh->interval;
511         n_desc = 0;
512
513         ntd_max = (max_desc_num(qh) + qh->interval - 1) / qh->interval;
514         if (skip_frames && !qh->channel)
515                 ntd_max = ntd_max - skip_frames / qh->interval;
516
517         max_xfer_size =
518             (qh->dev_speed ==
519              DWC_OTG_EP_SPEED_HIGH) ? MAX_ISOC_XFER_SIZE_HS :
520             MAX_ISOC_XFER_SIZE_FS;
521
522         DWC_CIRCLEQ_FOREACH(qtd, &qh->qtd_list, qtd_list_entry) {
523                 while ((qh->ntd < ntd_max)
524                        && (qtd->isoc_frame_index_last <
525                            qtd->urb->packet_count)) {
526
527                         dma_desc = &qh->desc_list[idx];
528                         dwc_memset(dma_desc, 0x00, sizeof(dwc_otg_host_dma_desc_t));
529
530                         frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
531
532                         if (frame_desc->length > max_xfer_size)
533                                 qh->n_bytes[idx] = max_xfer_size;
534                         else
535                                 qh->n_bytes[idx] = frame_desc->length;
536                         dma_desc->status.b_isoc.n_bytes = qh->n_bytes[idx];
537                         dma_desc->status.b_isoc.a = 1;
538                         dma_desc->status.b_isoc.sts = 0;
539
540                         dma_desc->buf = qtd->urb->dma + frame_desc->offset;
541
542                         qh->ntd++;
543
544                         qtd->isoc_frame_index_last++;
545
546 #ifdef  ISOC_URB_GIVEBACK_ASAP
547                         /*
548                          * Set IOC for each descriptor corresponding to the
549                          * last frame of the URB.
550                          */
551                         if (qtd->isoc_frame_index_last ==
552                             qtd->urb->packet_count)
553                                 dma_desc->status.b_isoc.ioc = 1;
554
555 #endif
556                         idx = desclist_idx_inc(idx, inc, qh->dev_speed);
557                         n_desc++;
558
559                 }
560                 qtd->in_process = 1;
561         }
562
563         qh->td_last = idx;
564
565 #ifdef  ISOC_URB_GIVEBACK_ASAP
566         /* Set IOC for the last descriptor if descriptor list is full */
567         if (qh->ntd == ntd_max) {
568                 idx = desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
569                 qh->desc_list[idx].status.b_isoc.ioc = 1;
570         }
571 #else
572         /*
573          * Set IOC bit only for one descriptor.
574          * Always try to be ahead of HW processing,
575          * i.e. on IOC generation driver activates next descriptors but
576          * core continues to process descriptors followed the one with IOC set.
577          */
578
579         if (n_desc > DESCNUM_THRESHOLD) {
580                 /*
581                  * Move IOC "up". Required even if there is only one QTD
582                  * in the list, cause QTDs migth continue to be queued,
583                  * but during the activation it was only one queued.
584                  * Actually more than one QTD might be in the list if this function called
585                  * from XferCompletion - QTDs was queued during HW processing of the previous
586                  * descriptor chunk.
587                  */
588                 idx = dwc_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2), qh->dev_speed);
589         } else {
590                 /*
591                  * Set the IOC for the latest descriptor
592                  * if either number of descriptor is not greather than threshold
593                  * or no more new descriptors activated.
594                  */
595                 idx = dwc_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
596         }
597
598         qh->desc_list[idx].status.b_isoc.ioc = 1;
599 #endif
600 }
601
602 static void init_non_isoc_dma_desc(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
603 {
604
605         dwc_hc_t *hc;
606         dwc_otg_host_dma_desc_t *dma_desc;
607         dwc_otg_qtd_t *qtd;
608         int num_packets, len, n_desc = 0;
609
610         hc = qh->channel;
611
612         /*
613          * Start with hc->xfer_buff initialized in
614          * assign_and_init_hc(), then if SG transfer consists of multiple URBs,
615          * this pointer re-assigned to the buffer of the currently processed QTD.
616          * For non-SG request there is always one QTD active.
617          */
618
619         DWC_CIRCLEQ_FOREACH(qtd, &qh->qtd_list, qtd_list_entry) {
620
621                 if (n_desc) {
622                         /* SG request - more than 1 QTDs */
623                         hc->xfer_buff = (uint8_t *)(uintptr_t)qtd->urb->dma +
624                                         qtd->urb->actual_length;
625                         hc->xfer_len = qtd->urb->length - qtd->urb->actual_length;
626                 }
627
628                 qtd->n_desc = 0;
629
630                 do {
631                         dma_desc = &qh->desc_list[n_desc];
632                         len = hc->xfer_len;
633
634                         if (len > MAX_DMA_DESC_SIZE)
635                                 len = MAX_DMA_DESC_SIZE - hc->max_packet + 1;
636
637                         if (hc->ep_is_in) {
638                                 if (len > 0) {
639                                         num_packets = (len + hc->max_packet - 1) / hc->max_packet;
640                                 } else {
641                                         /* Need 1 packet for transfer length of 0. */
642                                         num_packets = 1;
643                                 }
644                                 /* Always program an integral # of max packets for IN transfers. */
645                                 len = num_packets * hc->max_packet;
646                         }
647
648                         dma_desc->status.b.n_bytes = len;
649
650                         qh->n_bytes[n_desc] = len;
651
652                         if ((qh->ep_type == UE_CONTROL)
653                             && (qtd->control_phase == DWC_OTG_CONTROL_SETUP))
654                                 dma_desc->status.b.sup = 1;     /* Setup Packet */
655
656                         dma_desc->status.b.a = 1;       /* Active descriptor */
657                         dma_desc->status.b.sts = 0;
658
659                         dma_desc->buf =
660                             ((unsigned long)hc->xfer_buff & 0xffffffff);
661
662                         /*
663                          * Last descriptor(or single) of IN transfer
664                          * with actual size less than MaxPacket.
665                          */
666                         if (len > hc->xfer_len) {
667                                 hc->xfer_len = 0;
668                         } else {
669                                 hc->xfer_buff += len;
670                                 hc->xfer_len -= len;
671                         }
672
673                         qtd->n_desc++;
674                         n_desc++;
675                 }
676                 while ((hc->xfer_len > 0) && (n_desc != MAX_DMA_DESC_NUM_GENERIC));
677
678
679                 qtd->in_process = 1;
680
681                 if (qh->ep_type == UE_CONTROL)
682                         break;
683
684                 if (n_desc == MAX_DMA_DESC_NUM_GENERIC)
685                         break;
686         }
687
688         if (n_desc) {
689                 /* Request Transfer Complete interrupt for the last descriptor */
690                 qh->desc_list[n_desc - 1].status.b.ioc = 1;
691                 /* End of List indicator */
692                 qh->desc_list[n_desc - 1].status.b.eol = 1;
693
694                 hc->ntd = n_desc;
695         }
696 }
697
698 /**
699  * For Control and Bulk endpoints initializes descriptor list
700  * and starts the transfer.
701  *
702  * For Interrupt and Isochronous endpoints initializes descriptor list
703  * then updates FrameList, marking appropriate entries as active.
704  * In case of Isochronous, the starting descriptor index is calculated based
705  * on the scheduled frame, but only on the first transfer descriptor within a session.
706  * Then starts the transfer via enabling the channel.
707  * For Isochronous endpoint the channel is not halted on XferComplete
708  * interrupt so remains assigned to the endpoint(QH) until session is done.
709  *
710  * @param hcd The HCD state structure for the DWC OTG controller.
711  * @param qh The QH to init.
712  *
713  * @return 0 if successful, negative error code otherwise.
714  */
715 void dwc_otg_hcd_start_xfer_ddma(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
716 {
717         /* Channel is already assigned */
718         dwc_hc_t *hc = qh->channel;
719         uint8_t skip_frames = 0;
720
721         switch (hc->ep_type) {
722         case DWC_OTG_EP_TYPE_CONTROL:
723         case DWC_OTG_EP_TYPE_BULK:
724                 init_non_isoc_dma_desc(hcd, qh);
725
726                 dwc_otg_hc_start_transfer_ddma(hcd->core_if, hc);
727                 break;
728         case DWC_OTG_EP_TYPE_INTR:
729                 init_non_isoc_dma_desc(hcd, qh);
730
731                 update_frame_list(hcd, qh, 1);
732
733                 dwc_otg_hc_start_transfer_ddma(hcd->core_if, hc);
734                 break;
735         case DWC_OTG_EP_TYPE_ISOC:
736
737                 if (!qh->ntd)
738                         skip_frames = recalc_initial_desc_idx(hcd, qh);
739
740                 init_isoc_dma_desc(hcd, qh, skip_frames);
741
742                 if (!hc->xfer_started) {
743
744                         update_frame_list(hcd, qh, 1);
745
746                         /*
747                          * Always set to max, instead of actual size.
748                          * Otherwise ntd will be changed with
749                          * channel being enabled. Not recommended.
750                          *
751                          */
752                         hc->ntd = max_desc_num(qh);
753                         /* Enable channel only once for ISOC */
754                         dwc_otg_hc_start_transfer_ddma(hcd->core_if, hc);
755                 }
756
757                 break;
758         default:
759
760                 break;
761         }
762 }
763
764 static void complete_isoc_xfer_ddma(dwc_otg_hcd_t * hcd,
765                                     dwc_hc_t * hc,
766                                     dwc_otg_hc_regs_t * hc_regs,
767                                     dwc_otg_halt_status_e halt_status)
768 {
769         struct dwc_otg_hcd_iso_packet_desc *frame_desc;
770         dwc_otg_qtd_t *qtd, *qtd_tmp;
771         dwc_otg_qh_t *qh;
772         dwc_otg_host_dma_desc_t *dma_desc;
773         uint16_t idx, remain;
774         uint8_t urb_compl;
775
776         qh = hc->qh;
777         idx = qh->td_first;
778
779         if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE) {
780                 DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry)
781                     qtd->in_process = 0;
782                 return;
783         } else if ((halt_status == DWC_OTG_HC_XFER_AHB_ERR) ||
784                    (halt_status == DWC_OTG_HC_XFER_BABBLE_ERR)) {
785                 /*
786                  * Channel is halted in these error cases.
787                  * Considered as serious issues.
788                  * Complete all URBs marking all frames as failed,
789                  * irrespective whether some of the descriptors(frames) succeeded or no.
790                  * Pass error code to completion routine as well, to
791                  * update urb->status, some of class drivers might use it to stop
792                  * queing transfer requests.
793                  */
794                 int err = (halt_status == DWC_OTG_HC_XFER_AHB_ERR)
795                     ? (-DWC_E_IO)
796                     : (-DWC_E_OVERFLOW);
797
798                 DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry) {
799                         for (idx = 0; idx < qtd->urb->packet_count; idx++) {
800                                 frame_desc = &qtd->urb->iso_descs[idx];
801                                 frame_desc->status = err;
802                         }
803                         hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, err);
804                         dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
805                 }
806                 return;
807         }
808
809         DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry) {
810
811                 if (!qtd->in_process)
812                         break;
813
814                 urb_compl = 0;
815
816                 do {
817
818                         dma_desc = &qh->desc_list[idx];
819
820                         frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
821                         remain = hc->ep_is_in ? dma_desc->status.b_isoc.n_bytes : 0;
822
823                         if (dma_desc->status.b_isoc.sts == DMA_DESC_STS_PKTERR) {
824                                 /*
825                                  * XactError or, unable to complete all the transactions
826                                  * in the scheduled micro-frame/frame,
827                                  * both indicated by DMA_DESC_STS_PKTERR.
828                                  */
829                                 qtd->urb->error_count++;
830                                 frame_desc->actual_length = qh->n_bytes[idx] - remain;
831                                 frame_desc->status = -DWC_E_PROTOCOL;
832                         } else {
833                                 /* Success */
834
835                                 frame_desc->actual_length = qh->n_bytes[idx] - remain;
836                                 frame_desc->status = 0;
837                         }
838
839                         if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
840                                 /*
841                                  * urb->status is not used for isoc transfers here.
842                                  * The individual frame_desc status are used instead.
843                                  */
844
845                                 hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, 0);
846                                 dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
847
848                                 /*
849                                  * This check is necessary because urb_dequeue can be called
850                                  * from urb complete callback(sound driver example).
851                                  * All pending URBs are dequeued there, so no need for
852                                  * further processing.
853                                  */
854                                 if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE) {
855                                         return;
856                                 }
857
858                                 urb_compl = 1;
859
860                         }
861
862                         qh->ntd--;
863
864                         /* Stop if IOC requested descriptor reached */
865                         if (dma_desc->status.b_isoc.ioc) {
866                                 idx = desclist_idx_inc(idx, qh->interval, hc->speed);
867                                 goto stop_scan;
868                         }
869
870                         idx = desclist_idx_inc(idx, qh->interval, hc->speed);
871
872                         if (urb_compl)
873                                 break;
874                 }
875                 while (idx != qh->td_first);
876         }
877 stop_scan:
878         qh->td_first = idx;
879 }
880
881 uint8_t update_non_isoc_urb_state_ddma(dwc_otg_hcd_t * hcd,
882                                        dwc_hc_t * hc,
883                                        dwc_otg_qtd_t * qtd,
884                                        dwc_otg_host_dma_desc_t * dma_desc,
885                                        dwc_otg_halt_status_e halt_status,
886                                        uint32_t n_bytes, uint8_t * xfer_done)
887 {
888
889         uint16_t remain = hc->ep_is_in ? dma_desc->status.b.n_bytes : 0;
890         dwc_otg_hcd_urb_t *urb = qtd->urb;
891
892         if (halt_status == DWC_OTG_HC_XFER_AHB_ERR) {
893                 urb->status = -DWC_E_IO;
894                 return 1;
895         }
896         if (dma_desc->status.b.sts == DMA_DESC_STS_PKTERR) {
897                 switch (halt_status) {
898                 case DWC_OTG_HC_XFER_STALL:
899                         urb->status = -DWC_E_PIPE;
900                         break;
901                 case DWC_OTG_HC_XFER_BABBLE_ERR:
902                         urb->status = -DWC_E_OVERFLOW;
903                         break;
904                 case DWC_OTG_HC_XFER_XACT_ERR:
905                         urb->status = -DWC_E_PROTOCOL;
906                         break;
907                 default:
908                         DWC_ERROR("%s: Unhandled descriptor error status (%d)\n", __func__,
909                                   halt_status);
910                         break;
911                 }
912                 return 1;
913         }
914
915         if (dma_desc->status.b.a == 1) {
916                 DWC_DEBUGPL(DBG_HCDV,
917                             "Active descriptor encountered on channel %d\n",
918                             hc->hc_num);
919                 return 0;
920         }
921
922         if (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL) {
923                 if (qtd->control_phase == DWC_OTG_CONTROL_DATA) {
924                         urb->actual_length += n_bytes - remain;
925                         if (remain || urb->actual_length == urb->length) {
926                                 /*
927                                  * For Control Data stage do not set urb->status=0 to prevent
928                                  * URB callback. Set it when Status phase done. See below.
929                                  */
930                                 *xfer_done = 1;
931                         }
932
933                 } else if (qtd->control_phase == DWC_OTG_CONTROL_STATUS) {
934                         urb->status = 0;
935                         *xfer_done = 1;
936                 }
937                 /* No handling for SETUP stage */
938         } else {
939                 /* BULK and INTR */
940                 urb->actual_length += n_bytes - remain;
941                 if (remain || urb->actual_length == urb->length) {
942                         urb->status = 0;
943                         *xfer_done = 1;
944                 }
945         }
946
947         return 0;
948 }
949
950 static void complete_non_isoc_xfer_ddma(dwc_otg_hcd_t * hcd,
951                                         dwc_hc_t * hc,
952                                         dwc_otg_hc_regs_t * hc_regs,
953                                         dwc_otg_halt_status_e halt_status)
954 {
955         dwc_otg_hcd_urb_t *urb = NULL;
956         dwc_otg_qtd_t *qtd, *qtd_tmp;
957         dwc_otg_qh_t *qh;
958         dwc_otg_host_dma_desc_t *dma_desc;
959         uint32_t n_bytes, n_desc, i;
960         uint8_t failed = 0, xfer_done;
961
962         n_desc = 0;
963
964         qh = hc->qh;
965
966         if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE) {
967                 DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry) {
968                         qtd->in_process = 0;
969                 }
970                 return;
971         }
972
973         DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) {
974
975                 urb = qtd->urb;
976
977                 n_bytes = 0;
978                 xfer_done = 0;
979
980                 for (i = 0; i < qtd->n_desc; i++) {
981                         dma_desc = &qh->desc_list[n_desc];
982
983                         n_bytes = qh->n_bytes[n_desc];
984
985                         failed =
986                             update_non_isoc_urb_state_ddma(hcd, hc, qtd,
987                                                            dma_desc,
988                                                            halt_status, n_bytes,
989                                                            &xfer_done);
990
991                         if (failed
992                             || (xfer_done
993                                 && (urb->status != -DWC_E_IN_PROGRESS))) {
994
995                                 hcd->fops->complete(hcd, urb->priv, urb,
996                                                     urb->status);
997                                 dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
998
999                                 if (failed)
1000                                         goto stop_scan;
1001                         } else if (qh->ep_type == UE_CONTROL) {
1002                                 if (qtd->control_phase == DWC_OTG_CONTROL_SETUP) {
1003                                         if (urb->length > 0) {
1004                                                 qtd->control_phase = DWC_OTG_CONTROL_DATA;
1005                                         } else {
1006                                                 qtd->control_phase = DWC_OTG_CONTROL_STATUS;
1007                                         }
1008                                         DWC_DEBUGPL(DBG_HCDV, "  Control setup transaction done\n");
1009                                 } else if (qtd->control_phase == DWC_OTG_CONTROL_DATA) {
1010                                         if (xfer_done) {
1011                                                 qtd->control_phase = DWC_OTG_CONTROL_STATUS;
1012                                                 DWC_DEBUGPL(DBG_HCDV, "  Control data transfer done\n");
1013                                         } else if (i + 1 == qtd->n_desc) {
1014                                                 /*
1015                                                  * Last descriptor for Control data stage which is
1016                                                  * not completed yet.
1017                                                  */
1018                                                 dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
1019                                         }
1020                                 }
1021                         }
1022
1023                         n_desc++;
1024                 }
1025
1026         }
1027
1028 stop_scan:
1029
1030         if (qh->ep_type != UE_CONTROL) {
1031                 /*
1032                  * Resetting the data toggle for bulk
1033                  * and interrupt endpoints in case of stall. See handle_hc_stall_intr()
1034                  */
1035                 if (halt_status == DWC_OTG_HC_XFER_STALL)
1036                         qh->data_toggle = DWC_OTG_HC_PID_DATA0;
1037                 else
1038                         dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
1039         }
1040
1041         if (halt_status == DWC_OTG_HC_XFER_COMPLETE) {
1042                 hcint_data_t hcint;
1043                 hcint.d32 = DWC_READ_REG32(&hc_regs->hcint);
1044                 if (hcint.b.nyet) {
1045                         /*
1046                          * Got a NYET on the last transaction of the transfer. It
1047                          * means that the endpoint should be in the PING state at the
1048                          * beginning of the next transfer.
1049                          */
1050                         qh->ping_state = 1;
1051                         clear_hc_int(hc_regs, nyet);
1052                 }
1053
1054         }
1055
1056 }
1057
1058 /**
1059  * This function is called from interrupt handlers.
1060  * Scans the descriptor list, updates URB's status and
1061  * calls completion routine for the URB if it's done.
1062  * Releases the channel to be used by other transfers.
1063  * In case of Isochronous endpoint the channel is not halted until
1064  * the end of the session, i.e. QTD list is empty.
1065  * If periodic channel released the FrameList is updated accordingly.
1066  *
1067  * Calls transaction selection routines to activate pending transfers.
1068  *
1069  * @param hcd The HCD state structure for the DWC OTG controller.
1070  * @param hc Host channel, the transfer is completed on.
1071  * @param hc_regs Host channel registers.
1072  * @param halt_status Reason the channel is being halted,
1073  *                    or just XferComplete for isochronous transfer
1074  */
1075 void dwc_otg_hcd_complete_xfer_ddma(dwc_otg_hcd_t * hcd,
1076                                     dwc_hc_t * hc,
1077                                     dwc_otg_hc_regs_t * hc_regs,
1078                                     dwc_otg_halt_status_e halt_status)
1079 {
1080         uint8_t continue_isoc_xfer = 0;
1081         dwc_otg_transaction_type_e tr_type;
1082         dwc_otg_qh_t *qh = hc->qh;
1083
1084         if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
1085
1086                 complete_isoc_xfer_ddma(hcd, hc, hc_regs, halt_status);
1087
1088                 /* Release the channel if halted or session completed */
1089                 if (halt_status != DWC_OTG_HC_XFER_COMPLETE ||
1090                     DWC_CIRCLEQ_EMPTY(&qh->qtd_list)) {
1091
1092                         /* Halt the channel if session completed */
1093                         if (halt_status == DWC_OTG_HC_XFER_COMPLETE) {
1094                                 dwc_otg_hc_halt(hcd->core_if, hc, halt_status);
1095                         }
1096
1097                         release_channel_ddma(hcd, qh);
1098                         dwc_otg_hcd_qh_remove(hcd, qh);
1099                 } else {
1100                         /* Keep in assigned schedule to continue transfer */
1101                         DWC_LIST_MOVE_HEAD(&hcd->periodic_sched_assigned,
1102                                            &qh->qh_list_entry);
1103                         continue_isoc_xfer = 1;
1104
1105                 }
1106                 /** @todo Consider the case when period exceeds FrameList size.
1107                  *  Frame Rollover interrupt should be used.
1108                  */
1109         } else {
1110                 /* Scan descriptor list to complete the URB(s), then release the channel */
1111                 complete_non_isoc_xfer_ddma(hcd, hc, hc_regs, halt_status);
1112
1113                 release_channel_ddma(hcd, qh);
1114                 dwc_otg_hcd_qh_remove(hcd, qh);
1115
1116                 if (!DWC_CIRCLEQ_EMPTY(&qh->qtd_list)) {
1117                         /* Add back to inactive non-periodic schedule on normal completion */
1118                         dwc_otg_hcd_qh_add(hcd, qh);
1119                 }
1120
1121         }
1122         tr_type = dwc_otg_hcd_select_transactions(hcd);
1123         if (tr_type != DWC_OTG_TRANSACTION_NONE || continue_isoc_xfer) {
1124                 if (continue_isoc_xfer) {
1125                         if (tr_type == DWC_OTG_TRANSACTION_NONE) {
1126                                 tr_type = DWC_OTG_TRANSACTION_PERIODIC;
1127                         } else if (tr_type == DWC_OTG_TRANSACTION_NON_PERIODIC) {
1128                                 tr_type = DWC_OTG_TRANSACTION_ALL;
1129                         }
1130                 }
1131                 dwc_otg_hcd_queue_transactions(hcd, tr_type);
1132         }
1133 }
1134
1135 #endif /* DWC_DEVICE_ONLY */