tizen 2.4 release
[profile/mobile/platform/kernel/linux-3.10-sc7730.git] / drivers / usb / gadget / dwc_otg / dwc_otg_hcd_ddma.c
1 /*==========================================================================
2  * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd_ddma.c $
3  * $Revision: #10 $
4  * $Date: 2011/10/20 $
5  * $Change: 1869464 $
6  *
7  * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
8  * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
9  * otherwise expressly agreed to in writing between Synopsys and you.
10  *
11  * The Software IS NOT an item of Licensed Software or Licensed Product under
12  * any End User Software License Agreement or Agreement for Licensed Product
13  * with Synopsys or any supplement thereto. You are permitted to use and
14  * redistribute this Software in source and binary forms, with or without
15  * modification, provided that redistributions of source code must retain this
16  * notice. You may not view, use, disclose, copy or distribute this file or
17  * any information contained herein except pursuant to this license grant from
18  * Synopsys. If you do not agree with this notice, including the disclaimer
19  * below, then you are not authorized to use the Software.
20  *
21  * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
25  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31  * DAMAGE.
32  * ========================================================================== */
33 #ifndef DWC_DEVICE_ONLY
34
35 /** @file
36  * This file contains Descriptor DMA support implementation for host mode.
37  */
38
39 #include "dwc_otg_hcd.h"
40 #include "dwc_otg_regs.h"
41 #include "dwc_otg_driver.h"
42
43 static inline uint8_t frame_list_idx(uint16_t frame)
44 {
45         return (frame & (MAX_FRLIST_EN_NUM - 1));
46 }
47
48 static inline uint16_t desclist_idx_inc(uint16_t idx, uint16_t inc, uint8_t speed)
49 {
50         return (idx + inc) &
51             (((speed ==
52                DWC_OTG_EP_SPEED_HIGH) ? MAX_DMA_DESC_NUM_HS_ISOC :
53               MAX_DMA_DESC_NUM_GENERIC) - 1);
54 }
55
56 static inline uint16_t desclist_idx_dec(uint16_t idx, uint16_t inc, uint8_t speed)
57 {
58         return (idx - inc) &
59             (((speed ==
60                DWC_OTG_EP_SPEED_HIGH) ? MAX_DMA_DESC_NUM_HS_ISOC :
61               MAX_DMA_DESC_NUM_GENERIC) - 1);
62 }
63
64 static inline uint16_t max_desc_num(dwc_otg_qh_t * qh)
65 {
66         return (((qh->ep_type == UE_ISOCHRONOUS)
67                  && (qh->dev_speed == DWC_OTG_EP_SPEED_HIGH))
68                 ? MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC);
69 }
70 static inline uint16_t frame_incr_val(dwc_otg_qh_t * qh)
71 {
72         return ((qh->dev_speed == DWC_OTG_EP_SPEED_HIGH)
73                 ? ((qh->interval + 8 - 1) / 8)
74                 : qh->interval);
75 }
76
77 static int desc_list_alloc(dwc_otg_qh_t * qh)
78 {
79         int retval = 0;
80
81         qh->desc_list = (dwc_otg_host_dma_desc_t *)
82             DWC_DMA_ALLOC(get_hcd_device() , sizeof(dwc_otg_host_dma_desc_t) * max_desc_num(qh),
83                           &qh->desc_list_dma);
84
85         if (!qh->desc_list) {
86                 retval = -DWC_E_NO_MEMORY;
87                 DWC_ERROR("%s: DMA descriptor list allocation failed\n", __func__);
88                 return retval;
89         }
90
91         dwc_memset(qh->desc_list, 0x00,
92                    sizeof(dwc_otg_host_dma_desc_t) * max_desc_num(qh));
93
94         qh->n_bytes =
95             (uint32_t *) DWC_ALLOC(sizeof(uint32_t) * max_desc_num(qh));
96
97         if (!qh->n_bytes) {
98                 retval = -DWC_E_NO_MEMORY;
99                 DWC_ERROR
100                     ("%s: Failed to allocate array for descriptors' size actual values\n",
101                      __func__);
102
103         }
104         return retval;
105
106 }
107
108 static void desc_list_free(dwc_otg_qh_t * qh)
109 {
110         if (qh->desc_list) {
111                 DWC_DMA_FREE(get_hcd_device() , max_desc_num(qh), qh->desc_list,
112                              qh->desc_list_dma);
113                 qh->desc_list = NULL;
114         }
115
116         if (qh->n_bytes) {
117                 DWC_FREE(qh->n_bytes);
118                 qh->n_bytes = NULL;
119         }
120 }
121
122 static int frame_list_alloc(dwc_otg_hcd_t * hcd)
123 {
124         int retval = 0;
125         if (hcd->frame_list)
126                 return 0;
127
128         hcd->frame_list = DWC_DMA_ALLOC(get_hcd_device() , 4 * MAX_FRLIST_EN_NUM,
129                                         &hcd->frame_list_dma);
130         if (!hcd->frame_list) {
131                 retval = -DWC_E_NO_MEMORY;
132                 DWC_ERROR("%s: Frame List allocation failed\n", __func__);
133                 return retval;
134         }
135
136         dwc_memset(hcd->frame_list, 0x00, 4 * MAX_FRLIST_EN_NUM);
137
138         return retval;
139 }
140
141 static void frame_list_free(dwc_otg_hcd_t * hcd)
142 {
143         if (!hcd->frame_list)
144                 return;
145         DWC_DMA_FREE(get_hcd_device() , 4 * MAX_FRLIST_EN_NUM, hcd->frame_list, hcd->frame_list_dma);
146         hcd->frame_list = NULL;
147 }
148
149 static void per_sched_enable(dwc_otg_hcd_t * hcd, uint16_t fr_list_en)
150 {
151
152         hcfg_data_t hcfg;
153
154         hcfg.d32 = DWC_READ_REG32(&hcd->core_if->host_if->host_global_regs->hcfg);
155
156         if (hcfg.b.perschedena) {
157                 /* already enabled */
158                 return;
159         }
160
161         DWC_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hflbaddr,
162                         hcd->frame_list_dma);
163
164         switch (fr_list_en) {
165         case 64:
166                 hcfg.b.frlisten = 3;
167                 break;
168         case 32:
169                 hcfg.b.frlisten = 2;
170                 break;
171         case 16:
172                 hcfg.b.frlisten = 1;
173                 break;
174         case 8:
175                 hcfg.b.frlisten = 0;
176                 break;
177         default:
178                 break;
179         }
180
181         hcfg.b.perschedena = 1;
182
183         DWC_DEBUGPL(DBG_HCD, "Enabling Periodic schedule\n");
184         DWC_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hcfg, hcfg.d32);
185
186 }
187
188 static void per_sched_disable(dwc_otg_hcd_t * hcd)
189 {
190         hcfg_data_t hcfg;
191
192         hcfg.d32 = DWC_READ_REG32(&hcd->core_if->host_if->host_global_regs->hcfg);
193         
194         if (!hcfg.b.perschedena) {
195                 /* already disabled */
196                 return;
197         }
198         hcfg.b.perschedena = 0;
199
200         DWC_DEBUGPL(DBG_HCD, "Disabling Periodic schedule\n");
201         DWC_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hcfg, hcfg.d32);
202 }
203
204 /* 
205  * Activates/Deactivates FrameList entries for the channel 
206  * based on endpoint servicing period.
207  */
208 void update_frame_list(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh, uint8_t enable)
209 {
210         uint16_t i, j, inc;
211         dwc_hc_t *hc = NULL;
212
213         if (!qh->channel) {
214                 DWC_ERROR("qh->channel = %p", qh->channel);
215                 return;
216         }
217
218         if (!hcd) {
219                 DWC_ERROR("------hcd = %p", hcd);
220                 return;
221         }
222
223         if (!hcd->frame_list) {
224                 DWC_ERROR("-------hcd->frame_list = %p", hcd->frame_list);
225                 return;
226         }
227
228         hc = qh->channel;
229         inc = frame_incr_val(qh);
230         if (qh->ep_type == UE_ISOCHRONOUS)
231                 i = frame_list_idx(qh->sched_frame);
232         else
233                 i = 0;
234
235         j = i;
236         do {
237                 if (enable)
238                         hcd->frame_list[j] |= (1 << hc->hc_num);
239                 else
240                         hcd->frame_list[j] &= ~(1 << hc->hc_num);
241                 j = (j + inc) & (MAX_FRLIST_EN_NUM - 1);
242         }
243         while (j != i);
244         if (!enable)
245                 return;
246         hc->schinfo = 0;
247         if (qh->channel->speed == DWC_OTG_EP_SPEED_HIGH) {
248                 j = 1;
249                 /* TODO - check this */
250                 inc = (8 + qh->interval - 1) / qh->interval;
251                 for (i = 0; i < inc; i++) {
252                         hc->schinfo |= j;
253                         j = j << qh->interval;
254                 }
255         } else {
256                 hc->schinfo = 0xff;
257         }
258 }
259
260 #if 1
261 void dump_frame_list(dwc_otg_hcd_t * hcd)
262 {
263         int i = 0;
264         DWC_PRINTF("--FRAME LIST (hex) --\n");
265         for (i = 0; i < MAX_FRLIST_EN_NUM; i++) {
266                 DWC_PRINTF("%x\t", hcd->frame_list[i]);
267                 if (!(i % 8) && i)
268                         DWC_PRINTF("\n");
269         }
270         DWC_PRINTF("\n----\n");
271
272 }
273 #endif
274
275 static void release_channel_ddma(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
276 {
277         dwc_hc_t *hc = qh->channel;
278         if (dwc_qh_is_non_per(qh))
279                 hcd->non_periodic_channels--;
280         else
281                 update_frame_list(hcd, qh, 0);
282
283         /* 
284          * The condition is added to prevent double cleanup try in case of device
285          * disconnect. See channel cleanup in dwc_otg_hcd_disconnect_cb().
286          */
287         if (hc->qh) {
288                 dwc_otg_hc_cleanup(hcd->core_if, hc);
289                 DWC_CIRCLEQ_INSERT_TAIL(&hcd->free_hc_list, hc, hc_list_entry);
290                 hc->qh = NULL;
291         }
292
293         qh->channel = NULL;
294         qh->ntd = 0;
295
296         if (qh->desc_list) {
297                 dwc_memset(qh->desc_list, 0x00,
298                            sizeof(dwc_otg_host_dma_desc_t) * max_desc_num(qh));
299         }
300 }
301
302 /** 
303  * Initializes a QH structure's Descriptor DMA related members.
304  * Allocates memory for descriptor list.
305  * On first periodic QH, allocates memory for FrameList 
306  * and enables periodic scheduling.
307  *
308  * @param hcd The HCD state structure for the DWC OTG controller.
309  * @param qh The QH to init.
310  *
311  * @return 0 if successful, negative error code otherwise.
312  */
313 int dwc_otg_hcd_qh_init_ddma(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
314 {
315         int retval = 0;
316
317         if (qh->do_split) {
318                 DWC_ERROR("SPLIT Transfers are not supported in Descriptor DMA.\n");
319                 return -1;
320         }
321
322         retval = desc_list_alloc(qh);
323
324         if ((retval == 0)
325             && (qh->ep_type == UE_ISOCHRONOUS || qh->ep_type == UE_INTERRUPT)) {
326                 if (!hcd->frame_list) {
327                         retval = frame_list_alloc(hcd);
328                         /* Enable periodic schedule on first periodic QH */
329                         if (retval == 0)
330                                 per_sched_enable(hcd, MAX_FRLIST_EN_NUM);
331                 }
332         }
333
334         qh->ntd = 0;
335
336         return retval;
337 }
338
339 /** 
340  * Frees descriptor list memory associated with the QH. 
341  * If QH is periodic and the last, frees FrameList memory 
342  * and disables periodic scheduling. 
343  *
344  * @param hcd The HCD state structure for the DWC OTG controller.
345  * @param qh The QH to init.
346  */
347 void dwc_otg_hcd_qh_free_ddma(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
348 {
349         desc_list_free(qh);
350
351         /* 
352          * Channel still assigned due to some reasons. 
353          * Seen on Isoc URB dequeue. Channel halted but no subsequent
354          * ChHalted interrupt to release the channel. Afterwards
355          * when it comes here from endpoint disable routine
356          * channel remains assigned.
357          */
358         if (qh->channel)
359                 release_channel_ddma(hcd, qh);
360
361         if ((qh->ep_type == UE_ISOCHRONOUS || qh->ep_type == UE_INTERRUPT)
362             && !hcd->periodic_channels && hcd->frame_list) {
363
364                 per_sched_disable(hcd);
365                 frame_list_free(hcd);
366         }
367 }
368
369 static uint8_t frame_to_desc_idx(dwc_otg_qh_t * qh, uint16_t frame_idx)
370 {
371         if (qh->dev_speed == DWC_OTG_EP_SPEED_HIGH) {
372                 /* 
373                  * Descriptor set(8 descriptors) index
374                  * which is 8-aligned.
375                  */
376                 return (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8;
377         } else {
378                 return (frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1));
379         }
380 }
381
382 /* 
383  * Determine starting frame for Isochronous transfer. 
384  * Few frames skipped to prevent race condition with HC. 
385  */
386 static uint8_t calc_starting_frame(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh,
387                                    uint8_t * skip_frames)
388 {
389         uint16_t frame = 0;
390         hcd->frame_number = dwc_otg_hcd_get_frame_number(hcd);
391         
392         /* sched_frame is always frame number(not uFrame) both in FS and HS !! */
393         
394         /* 
395          * skip_frames is used to limit activated descriptors number
396          * to avoid the situation when HC services the last activated
397          * descriptor firstly.
398          * Example for FS:
399          * Current frame is 1, scheduled frame is 3. Since HC always fetches the descriptor
400          * corresponding to curr_frame+1, the descriptor corresponding to frame 2
401          * will be fetched. If the number of descriptors is max=64 (or greather) the
402          * list will be fully programmed with Active descriptors and it is possible
403          * case(rare) that the latest descriptor(considering rollback) corresponding
404          * to frame 2 will be serviced first. HS case is more probable because, in fact,
405          * up to 11 uframes(16 in the code) may be skipped.
406          */
407         if (qh->dev_speed == DWC_OTG_EP_SPEED_HIGH) {
408                 /* 
409                  * Consider uframe counter also, to start xfer asap.
410                  * If half of the frame elapsed skip 2 frames otherwise
411                  * just 1 frame. 
412                  * Starting descriptor index must be 8-aligned, so
413                  * if the current frame is near to complete the next one
414                  * is skipped as well.
415                  */
416
417                 if (dwc_micro_frame_num(hcd->frame_number) >= 5) {
418                         *skip_frames = 2 * 8;
419                         frame = dwc_frame_num_inc(hcd->frame_number, *skip_frames);
420                 } else {
421                         *skip_frames = 1 * 8;
422                         frame = dwc_frame_num_inc(hcd->frame_number, *skip_frames);
423                 }
424
425                 frame = dwc_full_frame_num(frame);
426         } else {
427                 /* 
428                  * Two frames are skipped for FS - the current and the next.
429                  * But for descriptor programming, 1 frame(descriptor) is enough,
430                  * see example above.
431                  */
432                 *skip_frames = 1;
433                 frame = dwc_frame_num_inc(hcd->frame_number, 2);
434         }
435
436         return frame;
437 }
438
439 /* 
440  * Calculate initial descriptor index for isochronous transfer
441  * based on scheduled frame. 
442  */
443 static uint8_t recalc_initial_desc_idx(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
444 {
445         uint16_t frame = 0, fr_idx, fr_idx_tmp;
446         uint8_t skip_frames = 0;
447         /* 
448          * With current ISOC processing algorithm the channel is being
449          * released when no more QTDs in the list(qh->ntd == 0).
450          * Thus this function is called only when qh->ntd == 0 and qh->channel == 0. 
451          *
452          * So qh->channel != NULL branch is not used and just not removed from the
453          * source file. It is required for another possible approach which is,
454          * do not disable and release the channel when ISOC session completed, 
455          * just move QH to inactive schedule until new QTD arrives. 
456          * On new QTD, the QH moved back to 'ready' schedule,
457          * starting frame and therefore starting desc_index are recalculated.
458          * In this case channel is released only on ep_disable.
459          */
460
461         /* Calculate starting descriptor index. For INTERRUPT endpoint it is always 0. */
462         if (qh->channel) {
463                 frame = calc_starting_frame(hcd, qh, &skip_frames);
464                 /* 
465                  * Calculate initial descriptor index based on FrameList current bitmap
466                  * and servicing period.
467                  */
468                 fr_idx_tmp = frame_list_idx(frame);
469                 fr_idx =
470                     (MAX_FRLIST_EN_NUM + frame_list_idx(qh->sched_frame) -
471                      fr_idx_tmp)
472                     % frame_incr_val(qh);
473                 fr_idx = (fr_idx + fr_idx_tmp) % MAX_FRLIST_EN_NUM;
474         } else {
475                 qh->sched_frame = calc_starting_frame(hcd, qh, &skip_frames);
476                 fr_idx = frame_list_idx(qh->sched_frame);
477         }
478
479         qh->td_first = qh->td_last = frame_to_desc_idx(qh, fr_idx);
480
481         return skip_frames;
482 }
483
484 #define ISOC_URB_GIVEBACK_ASAP
485
486 #define MAX_ISOC_XFER_SIZE_FS 1023
487 #define MAX_ISOC_XFER_SIZE_HS 3072
488 #define DESCNUM_THRESHOLD 4
489
490 static void init_isoc_dma_desc(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh,
491                                uint8_t skip_frames)
492 {
493         struct dwc_otg_hcd_iso_packet_desc *frame_desc;
494         dwc_otg_qtd_t *qtd;
495         dwc_otg_host_dma_desc_t *dma_desc;
496         uint16_t idx, inc, n_desc, ntd_max, max_xfer_size;
497
498         idx = qh->td_last;
499         inc = qh->interval;
500         n_desc = 0;
501
502         ntd_max = (max_desc_num(qh) + qh->interval - 1) / qh->interval;
503         if (skip_frames && !qh->channel)
504                 ntd_max = ntd_max - skip_frames / qh->interval;
505
506         max_xfer_size =
507             (qh->dev_speed ==
508              DWC_OTG_EP_SPEED_HIGH) ? MAX_ISOC_XFER_SIZE_HS :
509             MAX_ISOC_XFER_SIZE_FS;
510
511         DWC_CIRCLEQ_FOREACH(qtd, &qh->qtd_list, qtd_list_entry) {
512                 while ((qh->ntd < ntd_max)
513                        && (qtd->isoc_frame_index_last <
514                            qtd->urb->packet_count)) {
515
516                         dma_desc = &qh->desc_list[idx];
517                         dwc_memset(dma_desc, 0x00, sizeof(dwc_otg_host_dma_desc_t));
518
519                         frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
520
521                         if (frame_desc->length > max_xfer_size)
522                                 qh->n_bytes[idx] = max_xfer_size;
523                         else
524                                 qh->n_bytes[idx] = frame_desc->length;
525                         dma_desc->status.b_isoc.n_bytes = qh->n_bytes[idx];
526                         dma_desc->status.b_isoc.a = 1;
527                         dma_desc->status.b_isoc.sts = 0;
528
529                         dma_desc->buf = qtd->urb->dma + frame_desc->offset;
530
531                         qh->ntd++;
532
533                         qtd->isoc_frame_index_last++;
534
535 #ifdef  ISOC_URB_GIVEBACK_ASAP
536                         /* 
537                          * Set IOC for each descriptor corresponding to the 
538                          * last frame of the URB.
539                          */
540                         if (qtd->isoc_frame_index_last ==
541                             qtd->urb->packet_count)
542                                 dma_desc->status.b_isoc.ioc = 1;
543
544 #endif
545                         idx = desclist_idx_inc(idx, inc, qh->dev_speed);
546                         n_desc++;
547
548                 }
549                 qtd->in_process = 1;
550         }
551
552         qh->td_last = idx;
553
554 #ifdef  ISOC_URB_GIVEBACK_ASAP
555         /* Set IOC for the last descriptor if descriptor list is full */
556         if (qh->ntd == ntd_max) {
557                 idx = desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
558                 qh->desc_list[idx].status.b_isoc.ioc = 1;
559         }
560 #else
561         /* 
562          * Set IOC bit only for one descriptor. 
563          * Always try to be ahead of HW processing,
564          * i.e. on IOC generation driver activates next descriptors but
565          * core continues to process descriptors followed the one with IOC set.
566          */
567
568         if (n_desc > DESCNUM_THRESHOLD) {
569                 /* 
570                  * Move IOC "up". Required even if there is only one QTD 
571                  * in the list, cause QTDs migth continue to be queued,
572                  * but during the activation it was only one queued.
573                  * Actually more than one QTD might be in the list if this function called 
574                  * from XferCompletion - QTDs was queued during HW processing of the previous
575                  * descriptor chunk.
576                  */
577                 idx = dwc_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2), qh->dev_speed);
578         } else {
579                 /* 
580                  * Set the IOC for the latest descriptor
581                  * if either number of descriptor is not greather than threshold
582                  * or no more new descriptors activated.
583                  */
584                 idx = dwc_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
585         }
586
587         qh->desc_list[idx].status.b_isoc.ioc = 1;
588 #endif
589 }
590
591 static void init_non_isoc_dma_desc(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
592 {
593
594         dwc_hc_t *hc;
595         dwc_otg_host_dma_desc_t *dma_desc;
596         dwc_otg_qtd_t *qtd;
597         int num_packets, len, n_desc = 0;
598
599         hc = qh->channel;
600
601         /* 
602          * Start with hc->xfer_buff initialized in 
603          * assign_and_init_hc(), then if SG transfer consists of multiple URBs,
604          * this pointer re-assigned to the buffer of the currently processed QTD.
605          * For non-SG request there is always one QTD active.
606          */
607
608         DWC_CIRCLEQ_FOREACH(qtd, &qh->qtd_list, qtd_list_entry) {
609
610                 if (n_desc) {
611                         /* SG request - more than 1 QTDs */
612                         hc->xfer_buff = (uint8_t *)qtd->urb->dma + qtd->urb->actual_length;
613                         hc->xfer_len = qtd->urb->length - qtd->urb->actual_length;
614                 }
615
616                 qtd->n_desc = 0;
617
618                 do {
619                         dma_desc = &qh->desc_list[n_desc];
620                         len = hc->xfer_len;
621
622                         if (len > MAX_DMA_DESC_SIZE)
623                                 len = MAX_DMA_DESC_SIZE - hc->max_packet + 1;
624
625                         if (hc->ep_is_in) {
626                                 if (len > 0) {
627                                         num_packets = (len + hc->max_packet - 1) / hc->max_packet;
628                                 } else {
629                                         /* Need 1 packet for transfer length of 0. */
630                                         num_packets = 1;
631                                 }
632                                 /* Always program an integral # of max packets for IN transfers. */
633                                 len = num_packets * hc->max_packet;
634                         }
635
636                         dma_desc->status.b.n_bytes = len;
637
638                         qh->n_bytes[n_desc] = len;
639
640                         if ((qh->ep_type == UE_CONTROL)
641                             && (qtd->control_phase == DWC_OTG_CONTROL_SETUP))
642                                 dma_desc->status.b.sup = 1;     /* Setup Packet */
643
644                         dma_desc->status.b.a = 1;       /* Active descriptor */
645                         dma_desc->status.b.sts = 0;
646
647                         dma_desc->buf =
648                             ((unsigned long)hc->xfer_buff & 0xffffffff);
649
650                         /* 
651                          * Last descriptor(or single) of IN transfer 
652                          * with actual size less than MaxPacket.
653                          */
654                         if (len > hc->xfer_len) {
655                                 hc->xfer_len = 0;
656                         } else {
657                                 hc->xfer_buff += len;
658                                 hc->xfer_len -= len;
659                         }
660
661                         qtd->n_desc++;
662                         n_desc++;
663                 }
664                 while ((hc->xfer_len > 0) && (n_desc != MAX_DMA_DESC_NUM_GENERIC));
665                 
666
667                 qtd->in_process = 1;
668
669                 if (qh->ep_type == UE_CONTROL)\r
670                         break;
671
672                 if (n_desc == MAX_DMA_DESC_NUM_GENERIC)
673                         break;
674         }
675
676         if (n_desc) {
677                 /* Request Transfer Complete interrupt for the last descriptor */
678                 qh->desc_list[n_desc - 1].status.b.ioc = 1;
679                 /* End of List indicator */
680                 qh->desc_list[n_desc - 1].status.b.eol = 1;
681
682                 hc->ntd = n_desc;
683         }
684 }
685
686 /** 
687  * For Control and Bulk endpoints initializes descriptor list
688  * and starts the transfer.
689  *
690  * For Interrupt and Isochronous endpoints initializes descriptor list
691  * then updates FrameList, marking appropriate entries as active.
692  * In case of Isochronous, the starting descriptor index is calculated based
693  * on the scheduled frame, but only on the first transfer descriptor within a session.
694  * Then starts the transfer via enabling the channel. 
695  * For Isochronous endpoint the channel is not halted on XferComplete 
696  * interrupt so remains assigned to the endpoint(QH) until session is done.
697  *
698  * @param hcd The HCD state structure for the DWC OTG controller.
699  * @param qh The QH to init.
700  *
701  * @return 0 if successful, negative error code otherwise.
702  */
703 void dwc_otg_hcd_start_xfer_ddma(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
704 {
705         /* Channel is already assigned */
706         dwc_hc_t *hc = qh->channel;
707         uint8_t skip_frames = 0;
708
709         switch (hc->ep_type) {
710         case DWC_OTG_EP_TYPE_CONTROL:
711         case DWC_OTG_EP_TYPE_BULK:
712                 init_non_isoc_dma_desc(hcd, qh);
713
714                 dwc_otg_hc_start_transfer_ddma(hcd->core_if, hc);
715                 break;
716         case DWC_OTG_EP_TYPE_INTR:
717                 init_non_isoc_dma_desc(hcd, qh);
718
719                 update_frame_list(hcd, qh, 1);
720
721                 dwc_otg_hc_start_transfer_ddma(hcd->core_if, hc);
722                 break;
723         case DWC_OTG_EP_TYPE_ISOC:
724
725                 if (!qh->ntd)
726                         skip_frames = recalc_initial_desc_idx(hcd, qh);
727
728                 init_isoc_dma_desc(hcd, qh, skip_frames);
729
730                 if (!hc->xfer_started) {
731
732                         update_frame_list(hcd, qh, 1);
733
734                         /* 
735                          * Always set to max, instead of actual size.
736                          * Otherwise ntd will be changed with 
737                          * channel being enabled. Not recommended.
738                          *
739                          */
740                         hc->ntd = max_desc_num(qh);
741                         /* Enable channel only once for ISOC */
742                         dwc_otg_hc_start_transfer_ddma(hcd->core_if, hc);
743                 }
744
745                 break;
746         default:
747
748                 break;
749         }
750 }
751
752 static void complete_isoc_xfer_ddma(dwc_otg_hcd_t * hcd,
753                                     dwc_hc_t * hc,
754                                     dwc_otg_hc_regs_t * hc_regs,
755                                     dwc_otg_halt_status_e halt_status)
756 {
757         struct dwc_otg_hcd_iso_packet_desc *frame_desc;
758         dwc_otg_qtd_t *qtd, *qtd_tmp;
759         dwc_otg_qh_t *qh;
760         dwc_otg_host_dma_desc_t *dma_desc;
761         uint16_t idx, remain;
762         uint8_t urb_compl;
763
764         qh = hc->qh;
765         idx = qh->td_first;
766
767         if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE) {
768                 DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry)
769                     qtd->in_process = 0;
770                 return;
771         } else if ((halt_status == DWC_OTG_HC_XFER_AHB_ERR) ||
772                    (halt_status == DWC_OTG_HC_XFER_BABBLE_ERR)) {
773                 /* 
774                  * Channel is halted in these error cases.
775                  * Considered as serious issues.
776                  * Complete all URBs marking all frames as failed, 
777                  * irrespective whether some of the descriptors(frames) succeeded or no.
778                  * Pass error code to completion routine as well, to
779                  * update urb->status, some of class drivers might use it to stop
780                  * queing transfer requests.
781                  */
782                 int err = (halt_status == DWC_OTG_HC_XFER_AHB_ERR)
783                     ? (-DWC_E_IO)
784                     : (-DWC_E_OVERFLOW);
785                                                 
786                 DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry) {
787                         for (idx = 0; idx < qtd->urb->packet_count; idx++) {
788                                 frame_desc = &qtd->urb->iso_descs[idx];
789                                 frame_desc->status = err;
790                         }
791                         hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, err);
792                         dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
793                 }
794                 return;
795         }
796
797         DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry) {
798
799                 if (!qtd->in_process)
800                         break;
801
802                 urb_compl = 0;
803
804                 do {
805
806                         dma_desc = &qh->desc_list[idx];
807                         
808                         frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
809                         remain = hc->ep_is_in ? dma_desc->status.b_isoc.n_bytes : 0;
810
811                         if (dma_desc->status.b_isoc.sts == DMA_DESC_STS_PKTERR) {
812                                 /* 
813                                  * XactError or, unable to complete all the transactions 
814                                  * in the scheduled micro-frame/frame, 
815                                  * both indicated by DMA_DESC_STS_PKTERR.
816                                  */
817                                 qtd->urb->error_count++;
818                                 frame_desc->actual_length = qh->n_bytes[idx] - remain;
819                                 frame_desc->status = -DWC_E_PROTOCOL;
820                         } else {
821                                 /* Success */
822                                                                 
823                                 frame_desc->actual_length = qh->n_bytes[idx] - remain;
824                                 frame_desc->status = 0;
825                         }
826
827                         if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
828                                 /*
829                                  * urb->status is not used for isoc transfers here.
830                                  * The individual frame_desc status are used instead.
831                                  */
832
833                                 hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, 0);
834                                 dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
835
836                                 /* 
837                                  * This check is necessary because urb_dequeue can be called 
838                                  * from urb complete callback(sound driver example).
839                                  * All pending URBs are dequeued there, so no need for
840                                  * further processing.
841                                  */
842                                 if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE) {   
843                                         return;
844                                 }
845
846                                 urb_compl = 1;
847
848                         }
849
850                         qh->ntd--;
851
852                         /* Stop if IOC requested descriptor reached */
853                         if (dma_desc->status.b_isoc.ioc) {
854                                 idx = desclist_idx_inc(idx, qh->interval, hc->speed);   
855                                 goto stop_scan;
856                         }
857
858                         idx = desclist_idx_inc(idx, qh->interval, hc->speed);
859
860                         if (urb_compl)
861                                 break;
862                 }
863                 while (idx != qh->td_first);
864         }
865 stop_scan:
866         qh->td_first = idx;
867 }
868
869 uint8_t update_non_isoc_urb_state_ddma(dwc_otg_hcd_t * hcd,
870                                        dwc_hc_t * hc,
871                                        dwc_otg_qtd_t * qtd,
872                                        dwc_otg_host_dma_desc_t * dma_desc,
873                                        dwc_otg_halt_status_e halt_status,
874                                        uint32_t n_bytes, uint8_t * xfer_done)
875 {
876
877         uint16_t remain = hc->ep_is_in ? dma_desc->status.b.n_bytes : 0;
878         dwc_otg_hcd_urb_t *urb = qtd->urb;
879
880         if (halt_status == DWC_OTG_HC_XFER_AHB_ERR) {
881                 urb->status = -DWC_E_IO;
882                 return 1;
883         }
884         if (dma_desc->status.b.sts == DMA_DESC_STS_PKTERR) {
885                 switch (halt_status) {
886                 case DWC_OTG_HC_XFER_STALL:
887                         urb->status = -DWC_E_PIPE;
888                         break;
889                 case DWC_OTG_HC_XFER_BABBLE_ERR:
890                         urb->status = -DWC_E_OVERFLOW;
891                         break;
892                 case DWC_OTG_HC_XFER_XACT_ERR:
893                         urb->status = -DWC_E_PROTOCOL;
894                         break;
895                 default:        
896                         DWC_ERROR("%s: Unhandled descriptor error status (%d)\n", __func__,
897                                   halt_status);
898                         break;
899                 }
900                 return 1;
901         }
902
903         if (dma_desc->status.b.a == 1) {
904                 DWC_DEBUGPL(DBG_HCDV,
905                             "Active descriptor encountered on channel %d\n",
906                             hc->hc_num);
907                 return 0;
908         }
909
910         if (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL) {
911                 if (qtd->control_phase == DWC_OTG_CONTROL_DATA) {
912                         urb->actual_length += n_bytes - remain;
913                         if (remain || urb->actual_length == urb->length) {
914                                 /* 
915                                  * For Control Data stage do not set urb->status=0 to prevent
916                                  * URB callback. Set it when Status phase done. See below.
917                                  */
918                                 *xfer_done = 1;
919                         }
920
921                 } else if (qtd->control_phase == DWC_OTG_CONTROL_STATUS) {
922                         urb->status = 0;
923                         *xfer_done = 1;
924                 }
925                 /* No handling for SETUP stage */
926         } else {
927                 /* BULK and INTR */
928                 urb->actual_length += n_bytes - remain;
929                 if (remain || urb->actual_length == urb->length) {
930                         urb->status = 0;
931                         *xfer_done = 1;
932                 }
933         }
934
935         return 0;
936 }
937
938 static void complete_non_isoc_xfer_ddma(dwc_otg_hcd_t * hcd,
939                                         dwc_hc_t * hc,
940                                         dwc_otg_hc_regs_t * hc_regs,
941                                         dwc_otg_halt_status_e halt_status)
942 {
943         dwc_otg_hcd_urb_t *urb = NULL;
944         dwc_otg_qtd_t *qtd, *qtd_tmp;
945         dwc_otg_qh_t *qh;
946         dwc_otg_host_dma_desc_t *dma_desc;
947         uint32_t n_bytes, n_desc, i;
948         uint8_t failed = 0, xfer_done;
949
950         n_desc = 0;
951
952         qh = hc->qh;
953
954         if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE) {
955                 DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry) {
956                         qtd->in_process = 0;
957                 }
958                 return;
959         }
960
961         DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) {
962
963                 urb = qtd->urb;
964
965                 n_bytes = 0;
966                 xfer_done = 0;
967
968                 for (i = 0; i < qtd->n_desc; i++) {
969                         dma_desc = &qh->desc_list[n_desc];
970
971                         n_bytes = qh->n_bytes[n_desc];
972
973                         failed =
974                             update_non_isoc_urb_state_ddma(hcd, hc, qtd,
975                                                            dma_desc,
976                                                            halt_status, n_bytes,
977                                                            &xfer_done);
978
979                         if (failed
980                             || (xfer_done
981                                 && (urb->status != -DWC_E_IN_PROGRESS))) {
982
983                                 hcd->fops->complete(hcd, urb->priv, urb,
984                                                     urb->status);
985                                 dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
986
987                                 if (failed)
988                                         goto stop_scan;
989                         } else if (qh->ep_type == UE_CONTROL) {
990                                 if (qtd->control_phase == DWC_OTG_CONTROL_SETUP) {
991                                         if (urb->length > 0) {
992                                                 qtd->control_phase = DWC_OTG_CONTROL_DATA;
993                                         } else {
994                                                 qtd->control_phase = DWC_OTG_CONTROL_STATUS;
995                                         }
996                                         DWC_DEBUGPL(DBG_HCDV, "  Control setup transaction done\n");
997                                 } else if (qtd->control_phase == DWC_OTG_CONTROL_DATA) {
998                                         if (xfer_done) {
999                                                 qtd->control_phase = DWC_OTG_CONTROL_STATUS;
1000                                                 DWC_DEBUGPL(DBG_HCDV, "  Control data transfer done\n");
1001                                         } else if (i + 1 == qtd->n_desc) {
1002                                                 /* 
1003                                                  * Last descriptor for Control data stage which is
1004                                                  * not completed yet.
1005                                                  */
1006                                                 dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
1007                                         }
1008                                 }
1009                         }
1010
1011                         n_desc++;
1012                 }
1013
1014         }
1015
1016 stop_scan:
1017
1018         if (qh->ep_type != UE_CONTROL) {
1019                 /* 
1020                  * Resetting the data toggle for bulk
1021                  * and interrupt endpoints in case of stall. See handle_hc_stall_intr() 
1022                  */
1023                 if (halt_status == DWC_OTG_HC_XFER_STALL)
1024                         qh->data_toggle = DWC_OTG_HC_PID_DATA0;
1025                 else
1026                         dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
1027         }
1028
1029         if (halt_status == DWC_OTG_HC_XFER_COMPLETE) {
1030                 hcint_data_t hcint;
1031                 hcint.d32 = DWC_READ_REG32(&hc_regs->hcint);
1032                 if (hcint.b.nyet) {
1033                         /*
1034                          * Got a NYET on the last transaction of the transfer. It
1035                          * means that the endpoint should be in the PING state at the
1036                          * beginning of the next transfer.
1037                          */
1038                         qh->ping_state = 1;
1039                         clear_hc_int(hc_regs, nyet);
1040                 }
1041
1042         }
1043
1044 }
1045
1046 /**
1047  * This function is called from interrupt handlers.
1048  * Scans the descriptor list, updates URB's status and
1049  * calls completion routine for the URB if it's done.
1050  * Releases the channel to be used by other transfers.
1051  * In case of Isochronous endpoint the channel is not halted until 
1052  * the end of the session, i.e. QTD list is empty.
1053  * If periodic channel released the FrameList is updated accordingly.
1054  *
1055  * Calls transaction selection routines to activate pending transfers.
1056  *
1057  * @param hcd The HCD state structure for the DWC OTG controller.
1058  * @param hc Host channel, the transfer is completed on.
1059  * @param hc_regs Host channel registers.
1060  * @param halt_status Reason the channel is being halted, 
1061  *                    or just XferComplete for isochronous transfer
1062  */
1063 void dwc_otg_hcd_complete_xfer_ddma(dwc_otg_hcd_t * hcd,
1064                                     dwc_hc_t * hc,
1065                                     dwc_otg_hc_regs_t * hc_regs,
1066                                     dwc_otg_halt_status_e halt_status)
1067 {
1068         uint8_t continue_isoc_xfer = 0;
1069         dwc_otg_transaction_type_e tr_type;
1070         dwc_otg_qh_t *qh = hc->qh;
1071
1072         if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
1073
1074                 complete_isoc_xfer_ddma(hcd, hc, hc_regs, halt_status);
1075
1076                 /* Release the channel if halted or session completed */
1077                 if (halt_status != DWC_OTG_HC_XFER_COMPLETE ||
1078                     DWC_CIRCLEQ_EMPTY(&qh->qtd_list)) {
1079
1080                         /* Halt the channel if session completed */
1081                         if (halt_status == DWC_OTG_HC_XFER_COMPLETE) {
1082                                 dwc_otg_hc_halt(hcd->core_if, hc, halt_status);
1083                         }
1084
1085                         release_channel_ddma(hcd, qh);
1086                         dwc_otg_hcd_qh_remove(hcd, qh);
1087                 } else {
1088                         /* Keep in assigned schedule to continue transfer */
1089                         DWC_LIST_MOVE_HEAD(&hcd->periodic_sched_assigned,
1090                                            &qh->qh_list_entry);
1091                         continue_isoc_xfer = 1;
1092
1093                 }
1094                 /** @todo Consider the case when period exceeds FrameList size.
1095                  *  Frame Rollover interrupt should be used. 
1096                  */
1097         } else {
1098                 /* Scan descriptor list to complete the URB(s), then release the channel */
1099                 complete_non_isoc_xfer_ddma(hcd, hc, hc_regs, halt_status);
1100
1101                 release_channel_ddma(hcd, qh);
1102                 dwc_otg_hcd_qh_remove(hcd, qh);
1103
1104                 if (!DWC_CIRCLEQ_EMPTY(&qh->qtd_list)) {
1105                         /* Add back to inactive non-periodic schedule on normal completion */
1106                         dwc_otg_hcd_qh_add(hcd, qh);
1107                 }
1108
1109         }
1110         tr_type = dwc_otg_hcd_select_transactions(hcd);
1111         if (tr_type != DWC_OTG_TRANSACTION_NONE || continue_isoc_xfer) {
1112                 if (continue_isoc_xfer) {
1113                         if (tr_type == DWC_OTG_TRANSACTION_NONE) {
1114                                 tr_type = DWC_OTG_TRANSACTION_PERIODIC;
1115                         } else if (tr_type == DWC_OTG_TRANSACTION_NON_PERIODIC) {
1116                                 tr_type = DWC_OTG_TRANSACTION_ALL;
1117                         }
1118                 }
1119                 dwc_otg_hcd_queue_transactions(hcd, tr_type);
1120         }
1121 }
1122
1123 #endif /* DWC_DEVICE_ONLY */