usb: dwc_otg: Fix invalid pointer casting
[platform/kernel/linux-rpi.git] / drivers / usb / host / dwc_otg / dwc_otg_hcd_ddma.c
1 /*==========================================================================
2  * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd_ddma.c $
3  * $Revision: #10 $
4  * $Date: 2011/10/20 $
5  * $Change: 1869464 $
6  *
7  * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
8  * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
9  * otherwise expressly agreed to in writing between Synopsys and you.
10  *
11  * The Software IS NOT an item of Licensed Software or Licensed Product under
12  * any End User Software License Agreement or Agreement for Licensed Product
13  * with Synopsys or any supplement thereto. You are permitted to use and
14  * redistribute this Software in source and binary forms, with or without
15  * modification, provided that redistributions of source code must retain this
16  * notice. You may not view, use, disclose, copy or distribute this file or
17  * any information contained herein except pursuant to this license grant from
18  * Synopsys. If you do not agree with this notice, including the disclaimer
19  * below, then you are not authorized to use the Software.
20  *
21  * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
25  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31  * DAMAGE.
32  * ========================================================================== */
33 #ifndef DWC_DEVICE_ONLY
34
35 /** @file
36  * This file contains Descriptor DMA support implementation for host mode.
37  */
38
39 #include "dwc_otg_hcd.h"
40 #include "dwc_otg_regs.h"
41
42 extern bool microframe_schedule;
43
44 static inline uint8_t frame_list_idx(uint16_t frame)
45 {
46         return (frame & (MAX_FRLIST_EN_NUM - 1));
47 }
48
49 static inline uint16_t desclist_idx_inc(uint16_t idx, uint16_t inc, uint8_t speed)
50 {
51         return (idx + inc) &
52             (((speed ==
53                DWC_OTG_EP_SPEED_HIGH) ? MAX_DMA_DESC_NUM_HS_ISOC :
54               MAX_DMA_DESC_NUM_GENERIC) - 1);
55 }
56
57 static inline uint16_t desclist_idx_dec(uint16_t idx, uint16_t inc, uint8_t speed)
58 {
59         return (idx - inc) &
60             (((speed ==
61                DWC_OTG_EP_SPEED_HIGH) ? MAX_DMA_DESC_NUM_HS_ISOC :
62               MAX_DMA_DESC_NUM_GENERIC) - 1);
63 }
64
65 static inline uint16_t max_desc_num(dwc_otg_qh_t * qh)
66 {
67         return (((qh->ep_type == UE_ISOCHRONOUS)
68                  && (qh->dev_speed == DWC_OTG_EP_SPEED_HIGH))
69                 ? MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC);
70 }
71 static inline uint16_t frame_incr_val(dwc_otg_qh_t * qh)
72 {
73         return ((qh->dev_speed == DWC_OTG_EP_SPEED_HIGH)
74                 ? ((qh->interval + 8 - 1) / 8)
75                 : qh->interval);
76 }
77
78 static int desc_list_alloc(struct device *dev, dwc_otg_qh_t * qh)
79 {
80         int retval = 0;
81
82         qh->desc_list = (dwc_otg_host_dma_desc_t *)
83             DWC_DMA_ALLOC(dev, sizeof(dwc_otg_host_dma_desc_t) * max_desc_num(qh),
84                           &qh->desc_list_dma);
85
86         if (!qh->desc_list) {
87                 retval = -DWC_E_NO_MEMORY;
88                 DWC_ERROR("%s: DMA descriptor list allocation failed\n", __func__);
89
90         }
91
92         dwc_memset(qh->desc_list, 0x00,
93                    sizeof(dwc_otg_host_dma_desc_t) * max_desc_num(qh));
94
95         qh->n_bytes =
96             (uint32_t *) DWC_ALLOC(sizeof(uint32_t) * max_desc_num(qh));
97
98         if (!qh->n_bytes) {
99                 retval = -DWC_E_NO_MEMORY;
100                 DWC_ERROR
101                     ("%s: Failed to allocate array for descriptors' size actual values\n",
102                      __func__);
103
104         }
105         return retval;
106
107 }
108
109 static void desc_list_free(struct device *dev, dwc_otg_qh_t * qh)
110 {
111         if (qh->desc_list) {
112                 DWC_DMA_FREE(dev, max_desc_num(qh), qh->desc_list,
113                              qh->desc_list_dma);
114                 qh->desc_list = NULL;
115         }
116
117         if (qh->n_bytes) {
118                 DWC_FREE(qh->n_bytes);
119                 qh->n_bytes = NULL;
120         }
121 }
122
123 static int frame_list_alloc(dwc_otg_hcd_t * hcd)
124 {
125         struct device *dev = dwc_otg_hcd_to_dev(hcd);
126         int retval = 0;
127
128         if (hcd->frame_list)
129                 return 0;
130
131         hcd->frame_list = DWC_DMA_ALLOC(dev, 4 * MAX_FRLIST_EN_NUM,
132                                         &hcd->frame_list_dma);
133         if (!hcd->frame_list) {
134                 retval = -DWC_E_NO_MEMORY;
135                 DWC_ERROR("%s: Frame List allocation failed\n", __func__);
136         }
137
138         dwc_memset(hcd->frame_list, 0x00, 4 * MAX_FRLIST_EN_NUM);
139
140         return retval;
141 }
142
143 static void frame_list_free(dwc_otg_hcd_t * hcd)
144 {
145         struct device *dev = dwc_otg_hcd_to_dev(hcd);
146
147         if (!hcd->frame_list)
148                 return;
149
150         DWC_DMA_FREE(dev, 4 * MAX_FRLIST_EN_NUM, hcd->frame_list, hcd->frame_list_dma);
151         hcd->frame_list = NULL;
152 }
153
154 static void per_sched_enable(dwc_otg_hcd_t * hcd, uint16_t fr_list_en)
155 {
156
157         hcfg_data_t hcfg;
158
159         hcfg.d32 = DWC_READ_REG32(&hcd->core_if->host_if->host_global_regs->hcfg);
160
161         if (hcfg.b.perschedena) {
162                 /* already enabled */
163                 return;
164         }
165
166         DWC_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hflbaddr,
167                         hcd->frame_list_dma);
168
169         switch (fr_list_en) {
170         case 64:
171                 hcfg.b.frlisten = 3;
172                 break;
173         case 32:
174                 hcfg.b.frlisten = 2;
175                 break;
176         case 16:
177                 hcfg.b.frlisten = 1;
178                 break;
179         case 8:
180                 hcfg.b.frlisten = 0;
181                 break;
182         default:
183                 break;
184         }
185
186         hcfg.b.perschedena = 1;
187
188         DWC_DEBUGPL(DBG_HCD, "Enabling Periodic schedule\n");
189         DWC_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hcfg, hcfg.d32);
190
191 }
192
193 static void per_sched_disable(dwc_otg_hcd_t * hcd)
194 {
195         hcfg_data_t hcfg;
196
197         hcfg.d32 = DWC_READ_REG32(&hcd->core_if->host_if->host_global_regs->hcfg);
198
199         if (!hcfg.b.perschedena) {
200                 /* already disabled */
201                 return;
202         }
203         hcfg.b.perschedena = 0;
204
205         DWC_DEBUGPL(DBG_HCD, "Disabling Periodic schedule\n");
206         DWC_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hcfg, hcfg.d32);
207 }
208
209 /*
210  * Activates/Deactivates FrameList entries for the channel
211  * based on endpoint servicing period.
212  */
213 void update_frame_list(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh, uint8_t enable)
214 {
215         uint16_t i, j, inc;
216         dwc_hc_t *hc = NULL;
217
218         if (!qh->channel) {
219                 DWC_ERROR("qh->channel = %p", qh->channel);
220                 return;
221         }
222
223         if (!hcd) {
224                 DWC_ERROR("------hcd = %p", hcd);
225                 return;
226         }
227
228         if (!hcd->frame_list) {
229                 DWC_ERROR("-------hcd->frame_list = %p", hcd->frame_list);
230                 return;
231         }
232
233         hc = qh->channel;
234         inc = frame_incr_val(qh);
235         if (qh->ep_type == UE_ISOCHRONOUS)
236                 i = frame_list_idx(qh->sched_frame);
237         else
238                 i = 0;
239
240         j = i;
241         do {
242                 if (enable)
243                         hcd->frame_list[j] |= (1 << hc->hc_num);
244                 else
245                         hcd->frame_list[j] &= ~(1 << hc->hc_num);
246                 j = (j + inc) & (MAX_FRLIST_EN_NUM - 1);
247         }
248         while (j != i);
249         if (!enable)
250                 return;
251         hc->schinfo = 0;
252         if (qh->channel->speed == DWC_OTG_EP_SPEED_HIGH) {
253                 j = 1;
254                 /* TODO - check this */
255                 inc = (8 + qh->interval - 1) / qh->interval;
256                 for (i = 0; i < inc; i++) {
257                         hc->schinfo |= j;
258                         j = j << qh->interval;
259                 }
260         } else {
261                 hc->schinfo = 0xff;
262         }
263 }
264
265 #if 1
266 void dump_frame_list(dwc_otg_hcd_t * hcd)
267 {
268         int i = 0;
269         DWC_PRINTF("--FRAME LIST (hex) --\n");
270         for (i = 0; i < MAX_FRLIST_EN_NUM; i++) {
271                 DWC_PRINTF("%x\t", hcd->frame_list[i]);
272                 if (!(i % 8) && i)
273                         DWC_PRINTF("\n");
274         }
275         DWC_PRINTF("\n----\n");
276
277 }
278 #endif
279
280 static void release_channel_ddma(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
281 {
282         dwc_hc_t *hc = qh->channel;
283         if (dwc_qh_is_non_per(qh)) {
284                 if (!microframe_schedule)
285                         hcd->non_periodic_channels--;
286                 else
287                         hcd->available_host_channels++;
288         } else
289                 update_frame_list(hcd, qh, 0);
290
291         /*
292          * The condition is added to prevent double cleanup try in case of device
293          * disconnect. See channel cleanup in dwc_otg_hcd_disconnect_cb().
294          */
295         if (hc->qh) {
296                 dwc_otg_hc_cleanup(hcd->core_if, hc);
297                 DWC_CIRCLEQ_INSERT_TAIL(&hcd->free_hc_list, hc, hc_list_entry);
298                 hc->qh = NULL;
299         }
300
301         qh->channel = NULL;
302         qh->ntd = 0;
303
304         if (qh->desc_list) {
305                 dwc_memset(qh->desc_list, 0x00,
306                            sizeof(dwc_otg_host_dma_desc_t) * max_desc_num(qh));
307         }
308 }
309
310 /**
311  * Initializes a QH structure's Descriptor DMA related members.
312  * Allocates memory for descriptor list.
313  * On first periodic QH, allocates memory for FrameList
314  * and enables periodic scheduling.
315  *
316  * @param hcd The HCD state structure for the DWC OTG controller.
317  * @param qh The QH to init.
318  *
319  * @return 0 if successful, negative error code otherwise.
320  */
321 int dwc_otg_hcd_qh_init_ddma(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
322 {
323         struct device *dev = dwc_otg_hcd_to_dev(hcd);
324         int retval = 0;
325
326         if (qh->do_split) {
327                 DWC_ERROR("SPLIT Transfers are not supported in Descriptor DMA.\n");
328                 return -1;
329         }
330
331         retval = desc_list_alloc(dev, qh);
332
333         if ((retval == 0)
334             && (qh->ep_type == UE_ISOCHRONOUS || qh->ep_type == UE_INTERRUPT)) {
335                 if (!hcd->frame_list) {
336                         retval = frame_list_alloc(hcd);
337                         /* Enable periodic schedule on first periodic QH */
338                         if (retval == 0)
339                                 per_sched_enable(hcd, MAX_FRLIST_EN_NUM);
340                 }
341         }
342
343         qh->ntd = 0;
344
345         return retval;
346 }
347
348 /**
349  * Frees descriptor list memory associated with the QH.
350  * If QH is periodic and the last, frees FrameList memory
351  * and disables periodic scheduling.
352  *
353  * @param hcd The HCD state structure for the DWC OTG controller.
354  * @param qh The QH to init.
355  */
356 void dwc_otg_hcd_qh_free_ddma(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
357 {
358         struct device *dev = dwc_otg_hcd_to_dev(hcd);
359
360         desc_list_free(dev, qh);
361
362         /*
363          * Channel still assigned due to some reasons.
364          * Seen on Isoc URB dequeue. Channel halted but no subsequent
365          * ChHalted interrupt to release the channel. Afterwards
366          * when it comes here from endpoint disable routine
367          * channel remains assigned.
368          */
369         if (qh->channel)
370                 release_channel_ddma(hcd, qh);
371
372         if ((qh->ep_type == UE_ISOCHRONOUS || qh->ep_type == UE_INTERRUPT)
373             && (microframe_schedule || !hcd->periodic_channels) && hcd->frame_list) {
374
375                 per_sched_disable(hcd);
376                 frame_list_free(hcd);
377         }
378 }
379
380 static uint8_t frame_to_desc_idx(dwc_otg_qh_t * qh, uint16_t frame_idx)
381 {
382         if (qh->dev_speed == DWC_OTG_EP_SPEED_HIGH) {
383                 /*
384                  * Descriptor set(8 descriptors) index
385                  * which is 8-aligned.
386                  */
387                 return (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8;
388         } else {
389                 return (frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1));
390         }
391 }
392
393 /*
394  * Determine starting frame for Isochronous transfer.
395  * Few frames skipped to prevent race condition with HC.
396  */
397 static uint8_t calc_starting_frame(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh,
398                                    uint8_t * skip_frames)
399 {
400         uint16_t frame = 0;
401         hcd->frame_number = dwc_otg_hcd_get_frame_number(hcd);
402
403         /* sched_frame is always frame number(not uFrame) both in FS and HS !! */
404
405         /*
406          * skip_frames is used to limit activated descriptors number
407          * to avoid the situation when HC services the last activated
408          * descriptor firstly.
409          * Example for FS:
410          * Current frame is 1, scheduled frame is 3. Since HC always fetches the descriptor
411          * corresponding to curr_frame+1, the descriptor corresponding to frame 2
412          * will be fetched. If the number of descriptors is max=64 (or greather) the
413          * list will be fully programmed with Active descriptors and it is possible
414          * case(rare) that the latest descriptor(considering rollback) corresponding
415          * to frame 2 will be serviced first. HS case is more probable because, in fact,
416          * up to 11 uframes(16 in the code) may be skipped.
417          */
418         if (qh->dev_speed == DWC_OTG_EP_SPEED_HIGH) {
419                 /*
420                  * Consider uframe counter also, to start xfer asap.
421                  * If half of the frame elapsed skip 2 frames otherwise
422                  * just 1 frame.
423                  * Starting descriptor index must be 8-aligned, so
424                  * if the current frame is near to complete the next one
425                  * is skipped as well.
426                  */
427
428                 if (dwc_micro_frame_num(hcd->frame_number) >= 5) {
429                         *skip_frames = 2 * 8;
430                         frame = dwc_frame_num_inc(hcd->frame_number, *skip_frames);
431                 } else {
432                         *skip_frames = 1 * 8;
433                         frame = dwc_frame_num_inc(hcd->frame_number, *skip_frames);
434                 }
435
436                 frame = dwc_full_frame_num(frame);
437         } else {
438                 /*
439                  * Two frames are skipped for FS - the current and the next.
440                  * But for descriptor programming, 1 frame(descriptor) is enough,
441                  * see example above.
442                  */
443                 *skip_frames = 1;
444                 frame = dwc_frame_num_inc(hcd->frame_number, 2);
445         }
446
447         return frame;
448 }
449
450 /*
451  * Calculate initial descriptor index for isochronous transfer
452  * based on scheduled frame.
453  */
454 static uint8_t recalc_initial_desc_idx(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
455 {
456         uint16_t frame = 0, fr_idx, fr_idx_tmp;
457         uint8_t skip_frames = 0;
458         /*
459          * With current ISOC processing algorithm the channel is being
460          * released when no more QTDs in the list(qh->ntd == 0).
461          * Thus this function is called only when qh->ntd == 0 and qh->channel == 0.
462          *
463          * So qh->channel != NULL branch is not used and just not removed from the
464          * source file. It is required for another possible approach which is,
465          * do not disable and release the channel when ISOC session completed,
466          * just move QH to inactive schedule until new QTD arrives.
467          * On new QTD, the QH moved back to 'ready' schedule,
468          * starting frame and therefore starting desc_index are recalculated.
469          * In this case channel is released only on ep_disable.
470          */
471
472         /* Calculate starting descriptor index. For INTERRUPT endpoint it is always 0. */
473         if (qh->channel) {
474                 frame = calc_starting_frame(hcd, qh, &skip_frames);
475                 /*
476                  * Calculate initial descriptor index based on FrameList current bitmap
477                  * and servicing period.
478                  */
479                 fr_idx_tmp = frame_list_idx(frame);
480                 fr_idx =
481                     (MAX_FRLIST_EN_NUM + frame_list_idx(qh->sched_frame) -
482                      fr_idx_tmp)
483                     % frame_incr_val(qh);
484                 fr_idx = (fr_idx + fr_idx_tmp) % MAX_FRLIST_EN_NUM;
485         } else {
486                 qh->sched_frame = calc_starting_frame(hcd, qh, &skip_frames);
487                 fr_idx = frame_list_idx(qh->sched_frame);
488         }
489
490         qh->td_first = qh->td_last = frame_to_desc_idx(qh, fr_idx);
491
492         return skip_frames;
493 }
494
495 #define ISOC_URB_GIVEBACK_ASAP
496
497 #define MAX_ISOC_XFER_SIZE_FS 1023
498 #define MAX_ISOC_XFER_SIZE_HS 3072
499 #define DESCNUM_THRESHOLD 4
500
501 static void init_isoc_dma_desc(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh,
502                                uint8_t skip_frames)
503 {
504         struct dwc_otg_hcd_iso_packet_desc *frame_desc;
505         dwc_otg_qtd_t *qtd;
506         dwc_otg_host_dma_desc_t *dma_desc;
507         uint16_t idx, inc, n_desc, ntd_max, max_xfer_size;
508
509         idx = qh->td_last;
510         inc = qh->interval;
511         n_desc = 0;
512
513         ntd_max = (max_desc_num(qh) + qh->interval - 1) / qh->interval;
514         if (skip_frames && !qh->channel)
515                 ntd_max = ntd_max - skip_frames / qh->interval;
516
517         max_xfer_size =
518             (qh->dev_speed ==
519              DWC_OTG_EP_SPEED_HIGH) ? MAX_ISOC_XFER_SIZE_HS :
520             MAX_ISOC_XFER_SIZE_FS;
521
522         DWC_CIRCLEQ_FOREACH(qtd, &qh->qtd_list, qtd_list_entry) {
523                 while ((qh->ntd < ntd_max)
524                        && (qtd->isoc_frame_index_last <
525                            qtd->urb->packet_count)) {
526
527                         dma_desc = &qh->desc_list[idx];
528                         dwc_memset(dma_desc, 0x00, sizeof(dwc_otg_host_dma_desc_t));
529
530                         frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
531
532                         if (frame_desc->length > max_xfer_size)
533                                 qh->n_bytes[idx] = max_xfer_size;
534                         else
535                                 qh->n_bytes[idx] = frame_desc->length;
536                         dma_desc->status.b_isoc.n_bytes = qh->n_bytes[idx];
537                         dma_desc->status.b_isoc.a = 1;
538                         dma_desc->status.b_isoc.sts = 0;
539
540                         dma_desc->buf = qtd->urb->dma + frame_desc->offset;
541
542                         qh->ntd++;
543
544                         qtd->isoc_frame_index_last++;
545
546 #ifdef  ISOC_URB_GIVEBACK_ASAP
547                         /*
548                          * Set IOC for each descriptor corresponding to the
549                          * last frame of the URB.
550                          */
551                         if (qtd->isoc_frame_index_last ==
552                             qtd->urb->packet_count)
553                                 dma_desc->status.b_isoc.ioc = 1;
554
555 #endif
556                         idx = desclist_idx_inc(idx, inc, qh->dev_speed);
557                         n_desc++;
558
559                 }
560                 qtd->in_process = 1;
561         }
562
563         qh->td_last = idx;
564
565 #ifdef  ISOC_URB_GIVEBACK_ASAP
566         /* Set IOC for the last descriptor if descriptor list is full */
567         if (qh->ntd == ntd_max) {
568                 idx = desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
569                 qh->desc_list[idx].status.b_isoc.ioc = 1;
570         }
571 #else
572         /*
573          * Set IOC bit only for one descriptor.
574          * Always try to be ahead of HW processing,
575          * i.e. on IOC generation driver activates next descriptors but
576          * core continues to process descriptors followed the one with IOC set.
577          */
578
579         if (n_desc > DESCNUM_THRESHOLD) {
580                 /*
581                  * Move IOC "up". Required even if there is only one QTD
582                  * in the list, cause QTDs migth continue to be queued,
583                  * but during the activation it was only one queued.
584                  * Actually more than one QTD might be in the list if this function called
585                  * from XferCompletion - QTDs was queued during HW processing of the previous
586                  * descriptor chunk.
587                  */
588                 idx = dwc_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2), qh->dev_speed);
589         } else {
590                 /*
591                  * Set the IOC for the latest descriptor
592                  * if either number of descriptor is not greather than threshold
593                  * or no more new descriptors activated.
594                  */
595                 idx = dwc_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
596         }
597
598         qh->desc_list[idx].status.b_isoc.ioc = 1;
599 #endif
600 }
601
602 static void init_non_isoc_dma_desc(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
603 {
604
605         dwc_hc_t *hc;
606         dwc_otg_host_dma_desc_t *dma_desc;
607         dwc_otg_qtd_t *qtd;
608         int num_packets, len, n_desc = 0;
609
610         hc = qh->channel;
611
612         /*
613          * Start with hc->xfer_buff initialized in
614          * assign_and_init_hc(), then if SG transfer consists of multiple URBs,
615          * this pointer re-assigned to the buffer of the currently processed QTD.
616          * For non-SG request there is always one QTD active.
617          */
618
619         DWC_CIRCLEQ_FOREACH(qtd, &qh->qtd_list, qtd_list_entry) {
620
621                 if (n_desc) {
622                         /* SG request - more than 1 QTDs */
623                         hc->xfer_buff = (uint8_t *)((uintptr_t)qtd->urb->dma + qtd->urb->actual_length);
624                         hc->xfer_len = qtd->urb->length - qtd->urb->actual_length;
625                 }
626
627                 qtd->n_desc = 0;
628
629                 do {
630                         dma_desc = &qh->desc_list[n_desc];
631                         len = hc->xfer_len;
632
633                         if (len > MAX_DMA_DESC_SIZE)
634                                 len = MAX_DMA_DESC_SIZE - hc->max_packet + 1;
635
636                         if (hc->ep_is_in) {
637                                 if (len > 0) {
638                                         num_packets = (len + hc->max_packet - 1) / hc->max_packet;
639                                 } else {
640                                         /* Need 1 packet for transfer length of 0. */
641                                         num_packets = 1;
642                                 }
643                                 /* Always program an integral # of max packets for IN transfers. */
644                                 len = num_packets * hc->max_packet;
645                         }
646
647                         dma_desc->status.b.n_bytes = len;
648
649                         qh->n_bytes[n_desc] = len;
650
651                         if ((qh->ep_type == UE_CONTROL)
652                             && (qtd->control_phase == DWC_OTG_CONTROL_SETUP))
653                                 dma_desc->status.b.sup = 1;     /* Setup Packet */
654
655                         dma_desc->status.b.a = 1;       /* Active descriptor */
656                         dma_desc->status.b.sts = 0;
657
658                         dma_desc->buf =
659                             ((unsigned long)hc->xfer_buff & 0xffffffff);
660
661                         /*
662                          * Last descriptor(or single) of IN transfer
663                          * with actual size less than MaxPacket.
664                          */
665                         if (len > hc->xfer_len) {
666                                 hc->xfer_len = 0;
667                         } else {
668                                 hc->xfer_buff += len;
669                                 hc->xfer_len -= len;
670                         }
671
672                         qtd->n_desc++;
673                         n_desc++;
674                 }
675                 while ((hc->xfer_len > 0) && (n_desc != MAX_DMA_DESC_NUM_GENERIC));
676
677
678                 qtd->in_process = 1;
679
680                 if (qh->ep_type == UE_CONTROL)
681                         break;
682
683                 if (n_desc == MAX_DMA_DESC_NUM_GENERIC)
684                         break;
685         }
686
687         if (n_desc) {
688                 /* Request Transfer Complete interrupt for the last descriptor */
689                 qh->desc_list[n_desc - 1].status.b.ioc = 1;
690                 /* End of List indicator */
691                 qh->desc_list[n_desc - 1].status.b.eol = 1;
692
693                 hc->ntd = n_desc;
694         }
695 }
696
697 /**
698  * For Control and Bulk endpoints initializes descriptor list
699  * and starts the transfer.
700  *
701  * For Interrupt and Isochronous endpoints initializes descriptor list
702  * then updates FrameList, marking appropriate entries as active.
703  * In case of Isochronous, the starting descriptor index is calculated based
704  * on the scheduled frame, but only on the first transfer descriptor within a session.
705  * Then starts the transfer via enabling the channel.
706  * For Isochronous endpoint the channel is not halted on XferComplete
707  * interrupt so remains assigned to the endpoint(QH) until session is done.
708  *
709  * @param hcd The HCD state structure for the DWC OTG controller.
710  * @param qh The QH to init.
711  *
712  * @return 0 if successful, negative error code otherwise.
713  */
714 void dwc_otg_hcd_start_xfer_ddma(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
715 {
716         /* Channel is already assigned */
717         dwc_hc_t *hc = qh->channel;
718         uint8_t skip_frames = 0;
719
720         switch (hc->ep_type) {
721         case DWC_OTG_EP_TYPE_CONTROL:
722         case DWC_OTG_EP_TYPE_BULK:
723                 init_non_isoc_dma_desc(hcd, qh);
724
725                 dwc_otg_hc_start_transfer_ddma(hcd->core_if, hc);
726                 break;
727         case DWC_OTG_EP_TYPE_INTR:
728                 init_non_isoc_dma_desc(hcd, qh);
729
730                 update_frame_list(hcd, qh, 1);
731
732                 dwc_otg_hc_start_transfer_ddma(hcd->core_if, hc);
733                 break;
734         case DWC_OTG_EP_TYPE_ISOC:
735
736                 if (!qh->ntd)
737                         skip_frames = recalc_initial_desc_idx(hcd, qh);
738
739                 init_isoc_dma_desc(hcd, qh, skip_frames);
740
741                 if (!hc->xfer_started) {
742
743                         update_frame_list(hcd, qh, 1);
744
745                         /*
746                          * Always set to max, instead of actual size.
747                          * Otherwise ntd will be changed with
748                          * channel being enabled. Not recommended.
749                          *
750                          */
751                         hc->ntd = max_desc_num(qh);
752                         /* Enable channel only once for ISOC */
753                         dwc_otg_hc_start_transfer_ddma(hcd->core_if, hc);
754                 }
755
756                 break;
757         default:
758
759                 break;
760         }
761 }
762
763 static void complete_isoc_xfer_ddma(dwc_otg_hcd_t * hcd,
764                                     dwc_hc_t * hc,
765                                     dwc_otg_hc_regs_t * hc_regs,
766                                     dwc_otg_halt_status_e halt_status)
767 {
768         struct dwc_otg_hcd_iso_packet_desc *frame_desc;
769         dwc_otg_qtd_t *qtd, *qtd_tmp;
770         dwc_otg_qh_t *qh;
771         dwc_otg_host_dma_desc_t *dma_desc;
772         uint16_t idx, remain;
773         uint8_t urb_compl;
774
775         qh = hc->qh;
776         idx = qh->td_first;
777
778         if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE) {
779                 DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry)
780                     qtd->in_process = 0;
781                 return;
782         } else if ((halt_status == DWC_OTG_HC_XFER_AHB_ERR) ||
783                    (halt_status == DWC_OTG_HC_XFER_BABBLE_ERR)) {
784                 /*
785                  * Channel is halted in these error cases.
786                  * Considered as serious issues.
787                  * Complete all URBs marking all frames as failed,
788                  * irrespective whether some of the descriptors(frames) succeeded or no.
789                  * Pass error code to completion routine as well, to
790                  * update urb->status, some of class drivers might use it to stop
791                  * queing transfer requests.
792                  */
793                 int err = (halt_status == DWC_OTG_HC_XFER_AHB_ERR)
794                     ? (-DWC_E_IO)
795                     : (-DWC_E_OVERFLOW);
796
797                 DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry) {
798                         for (idx = 0; idx < qtd->urb->packet_count; idx++) {
799                                 frame_desc = &qtd->urb->iso_descs[idx];
800                                 frame_desc->status = err;
801                         }
802                         hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, err);
803                         dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
804                 }
805                 return;
806         }
807
808         DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry) {
809
810                 if (!qtd->in_process)
811                         break;
812
813                 urb_compl = 0;
814
815                 do {
816
817                         dma_desc = &qh->desc_list[idx];
818
819                         frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
820                         remain = hc->ep_is_in ? dma_desc->status.b_isoc.n_bytes : 0;
821
822                         if (dma_desc->status.b_isoc.sts == DMA_DESC_STS_PKTERR) {
823                                 /*
824                                  * XactError or, unable to complete all the transactions
825                                  * in the scheduled micro-frame/frame,
826                                  * both indicated by DMA_DESC_STS_PKTERR.
827                                  */
828                                 qtd->urb->error_count++;
829                                 frame_desc->actual_length = qh->n_bytes[idx] - remain;
830                                 frame_desc->status = -DWC_E_PROTOCOL;
831                         } else {
832                                 /* Success */
833
834                                 frame_desc->actual_length = qh->n_bytes[idx] - remain;
835                                 frame_desc->status = 0;
836                         }
837
838                         if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
839                                 /*
840                                  * urb->status is not used for isoc transfers here.
841                                  * The individual frame_desc status are used instead.
842                                  */
843
844                                 hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, 0);
845                                 dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
846
847                                 /*
848                                  * This check is necessary because urb_dequeue can be called
849                                  * from urb complete callback(sound driver example).
850                                  * All pending URBs are dequeued there, so no need for
851                                  * further processing.
852                                  */
853                                 if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE) {
854                                         return;
855                                 }
856
857                                 urb_compl = 1;
858
859                         }
860
861                         qh->ntd--;
862
863                         /* Stop if IOC requested descriptor reached */
864                         if (dma_desc->status.b_isoc.ioc) {
865                                 idx = desclist_idx_inc(idx, qh->interval, hc->speed);
866                                 goto stop_scan;
867                         }
868
869                         idx = desclist_idx_inc(idx, qh->interval, hc->speed);
870
871                         if (urb_compl)
872                                 break;
873                 }
874                 while (idx != qh->td_first);
875         }
876 stop_scan:
877         qh->td_first = idx;
878 }
879
880 uint8_t update_non_isoc_urb_state_ddma(dwc_otg_hcd_t * hcd,
881                                        dwc_hc_t * hc,
882                                        dwc_otg_qtd_t * qtd,
883                                        dwc_otg_host_dma_desc_t * dma_desc,
884                                        dwc_otg_halt_status_e halt_status,
885                                        uint32_t n_bytes, uint8_t * xfer_done)
886 {
887
888         uint16_t remain = hc->ep_is_in ? dma_desc->status.b.n_bytes : 0;
889         dwc_otg_hcd_urb_t *urb = qtd->urb;
890
891         if (halt_status == DWC_OTG_HC_XFER_AHB_ERR) {
892                 urb->status = -DWC_E_IO;
893                 return 1;
894         }
895         if (dma_desc->status.b.sts == DMA_DESC_STS_PKTERR) {
896                 switch (halt_status) {
897                 case DWC_OTG_HC_XFER_STALL:
898                         urb->status = -DWC_E_PIPE;
899                         break;
900                 case DWC_OTG_HC_XFER_BABBLE_ERR:
901                         urb->status = -DWC_E_OVERFLOW;
902                         break;
903                 case DWC_OTG_HC_XFER_XACT_ERR:
904                         urb->status = -DWC_E_PROTOCOL;
905                         break;
906                 default:
907                         DWC_ERROR("%s: Unhandled descriptor error status (%d)\n", __func__,
908                                   halt_status);
909                         break;
910                 }
911                 return 1;
912         }
913
914         if (dma_desc->status.b.a == 1) {
915                 DWC_DEBUGPL(DBG_HCDV,
916                             "Active descriptor encountered on channel %d\n",
917                             hc->hc_num);
918                 return 0;
919         }
920
921         if (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL) {
922                 if (qtd->control_phase == DWC_OTG_CONTROL_DATA) {
923                         urb->actual_length += n_bytes - remain;
924                         if (remain || urb->actual_length == urb->length) {
925                                 /*
926                                  * For Control Data stage do not set urb->status=0 to prevent
927                                  * URB callback. Set it when Status phase done. See below.
928                                  */
929                                 *xfer_done = 1;
930                         }
931
932                 } else if (qtd->control_phase == DWC_OTG_CONTROL_STATUS) {
933                         urb->status = 0;
934                         *xfer_done = 1;
935                 }
936                 /* No handling for SETUP stage */
937         } else {
938                 /* BULK and INTR */
939                 urb->actual_length += n_bytes - remain;
940                 if (remain || urb->actual_length == urb->length) {
941                         urb->status = 0;
942                         *xfer_done = 1;
943                 }
944         }
945
946         return 0;
947 }
948
949 static void complete_non_isoc_xfer_ddma(dwc_otg_hcd_t * hcd,
950                                         dwc_hc_t * hc,
951                                         dwc_otg_hc_regs_t * hc_regs,
952                                         dwc_otg_halt_status_e halt_status)
953 {
954         dwc_otg_hcd_urb_t *urb = NULL;
955         dwc_otg_qtd_t *qtd, *qtd_tmp;
956         dwc_otg_qh_t *qh;
957         dwc_otg_host_dma_desc_t *dma_desc;
958         uint32_t n_bytes, n_desc, i;
959         uint8_t failed = 0, xfer_done;
960
961         n_desc = 0;
962
963         qh = hc->qh;
964
965         if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE) {
966                 DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry) {
967                         qtd->in_process = 0;
968                 }
969                 return;
970         }
971
972         DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) {
973
974                 urb = qtd->urb;
975
976                 n_bytes = 0;
977                 xfer_done = 0;
978
979                 for (i = 0; i < qtd->n_desc; i++) {
980                         dma_desc = &qh->desc_list[n_desc];
981
982                         n_bytes = qh->n_bytes[n_desc];
983
984                         failed =
985                             update_non_isoc_urb_state_ddma(hcd, hc, qtd,
986                                                            dma_desc,
987                                                            halt_status, n_bytes,
988                                                            &xfer_done);
989
990                         if (failed
991                             || (xfer_done
992                                 && (urb->status != -DWC_E_IN_PROGRESS))) {
993
994                                 hcd->fops->complete(hcd, urb->priv, urb,
995                                                     urb->status);
996                                 dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
997
998                                 if (failed)
999                                         goto stop_scan;
1000                         } else if (qh->ep_type == UE_CONTROL) {
1001                                 if (qtd->control_phase == DWC_OTG_CONTROL_SETUP) {
1002                                         if (urb->length > 0) {
1003                                                 qtd->control_phase = DWC_OTG_CONTROL_DATA;
1004                                         } else {
1005                                                 qtd->control_phase = DWC_OTG_CONTROL_STATUS;
1006                                         }
1007                                         DWC_DEBUGPL(DBG_HCDV, "  Control setup transaction done\n");
1008                                 } else if (qtd->control_phase == DWC_OTG_CONTROL_DATA) {
1009                                         if (xfer_done) {
1010                                                 qtd->control_phase = DWC_OTG_CONTROL_STATUS;
1011                                                 DWC_DEBUGPL(DBG_HCDV, "  Control data transfer done\n");
1012                                         } else if (i + 1 == qtd->n_desc) {
1013                                                 /*
1014                                                  * Last descriptor for Control data stage which is
1015                                                  * not completed yet.
1016                                                  */
1017                                                 dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
1018                                         }
1019                                 }
1020                         }
1021
1022                         n_desc++;
1023                 }
1024
1025         }
1026
1027 stop_scan:
1028
1029         if (qh->ep_type != UE_CONTROL) {
1030                 /*
1031                  * Resetting the data toggle for bulk
1032                  * and interrupt endpoints in case of stall. See handle_hc_stall_intr()
1033                  */
1034                 if (halt_status == DWC_OTG_HC_XFER_STALL)
1035                         qh->data_toggle = DWC_OTG_HC_PID_DATA0;
1036                 else
1037                         dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
1038         }
1039
1040         if (halt_status == DWC_OTG_HC_XFER_COMPLETE) {
1041                 hcint_data_t hcint;
1042                 hcint.d32 = DWC_READ_REG32(&hc_regs->hcint);
1043                 if (hcint.b.nyet) {
1044                         /*
1045                          * Got a NYET on the last transaction of the transfer. It
1046                          * means that the endpoint should be in the PING state at the
1047                          * beginning of the next transfer.
1048                          */
1049                         qh->ping_state = 1;
1050                         clear_hc_int(hc_regs, nyet);
1051                 }
1052
1053         }
1054
1055 }
1056
1057 /**
1058  * This function is called from interrupt handlers.
1059  * Scans the descriptor list, updates URB's status and
1060  * calls completion routine for the URB if it's done.
1061  * Releases the channel to be used by other transfers.
1062  * In case of Isochronous endpoint the channel is not halted until
1063  * the end of the session, i.e. QTD list is empty.
1064  * If periodic channel released the FrameList is updated accordingly.
1065  *
1066  * Calls transaction selection routines to activate pending transfers.
1067  *
1068  * @param hcd The HCD state structure for the DWC OTG controller.
1069  * @param hc Host channel, the transfer is completed on.
1070  * @param hc_regs Host channel registers.
1071  * @param halt_status Reason the channel is being halted,
1072  *                    or just XferComplete for isochronous transfer
1073  */
1074 void dwc_otg_hcd_complete_xfer_ddma(dwc_otg_hcd_t * hcd,
1075                                     dwc_hc_t * hc,
1076                                     dwc_otg_hc_regs_t * hc_regs,
1077                                     dwc_otg_halt_status_e halt_status)
1078 {
1079         uint8_t continue_isoc_xfer = 0;
1080         dwc_otg_transaction_type_e tr_type;
1081         dwc_otg_qh_t *qh = hc->qh;
1082
1083         if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
1084
1085                 complete_isoc_xfer_ddma(hcd, hc, hc_regs, halt_status);
1086
1087                 /* Release the channel if halted or session completed */
1088                 if (halt_status != DWC_OTG_HC_XFER_COMPLETE ||
1089                     DWC_CIRCLEQ_EMPTY(&qh->qtd_list)) {
1090
1091                         /* Halt the channel if session completed */
1092                         if (halt_status == DWC_OTG_HC_XFER_COMPLETE) {
1093                                 dwc_otg_hc_halt(hcd->core_if, hc, halt_status);
1094                         }
1095
1096                         release_channel_ddma(hcd, qh);
1097                         dwc_otg_hcd_qh_remove(hcd, qh);
1098                 } else {
1099                         /* Keep in assigned schedule to continue transfer */
1100                         DWC_LIST_MOVE_HEAD(&hcd->periodic_sched_assigned,
1101                                            &qh->qh_list_entry);
1102                         continue_isoc_xfer = 1;
1103
1104                 }
1105                 /** @todo Consider the case when period exceeds FrameList size.
1106                  *  Frame Rollover interrupt should be used.
1107                  */
1108         } else {
1109                 /* Scan descriptor list to complete the URB(s), then release the channel */
1110                 complete_non_isoc_xfer_ddma(hcd, hc, hc_regs, halt_status);
1111
1112                 release_channel_ddma(hcd, qh);
1113                 dwc_otg_hcd_qh_remove(hcd, qh);
1114
1115                 if (!DWC_CIRCLEQ_EMPTY(&qh->qtd_list)) {
1116                         /* Add back to inactive non-periodic schedule on normal completion */
1117                         dwc_otg_hcd_qh_add(hcd, qh);
1118                 }
1119
1120         }
1121         tr_type = dwc_otg_hcd_select_transactions(hcd);
1122         if (tr_type != DWC_OTG_TRANSACTION_NONE || continue_isoc_xfer) {
1123                 if (continue_isoc_xfer) {
1124                         if (tr_type == DWC_OTG_TRANSACTION_NONE) {
1125                                 tr_type = DWC_OTG_TRANSACTION_PERIODIC;
1126                         } else if (tr_type == DWC_OTG_TRANSACTION_NON_PERIODIC) {
1127                                 tr_type = DWC_OTG_TRANSACTION_ALL;
1128                         }
1129                 }
1130                 dwc_otg_hcd_queue_transactions(hcd, tr_type);
1131         }
1132 }
1133
1134 #endif /* DWC_DEVICE_ONLY */