1 /*==========================================================================
2 * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd_ddma.c $
7 * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
8 * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
9 * otherwise expressly agreed to in writing between Synopsys and you.
11 * The Software IS NOT an item of Licensed Software or Licensed Product under
12 * any End User Software License Agreement or Agreement for Licensed Product
13 * with Synopsys or any supplement thereto. You are permitted to use and
14 * redistribute this Software in source and binary forms, with or without
15 * modification, provided that redistributions of source code must retain this
16 * notice. You may not view, use, disclose, copy or distribute this file or
17 * any information contained herein except pursuant to this license grant from
18 * Synopsys. If you do not agree with this notice, including the disclaimer
19 * below, then you are not authorized to use the Software.
21 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
25 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
32 * ========================================================================== */
33 #ifndef DWC_DEVICE_ONLY
36 * This file contains Descriptor DMA support implementation for host mode.
39 #include "dwc_otg_hcd.h"
40 #include "dwc_otg_regs.h"
41 #include "dwc_otg_driver.h"
43 static inline uint8_t frame_list_idx(uint16_t frame)
45 return (frame & (MAX_FRLIST_EN_NUM - 1));
48 static inline uint16_t desclist_idx_inc(uint16_t idx, uint16_t inc, uint8_t speed)
52 DWC_OTG_EP_SPEED_HIGH) ? MAX_DMA_DESC_NUM_HS_ISOC :
53 MAX_DMA_DESC_NUM_GENERIC) - 1);
56 static inline uint16_t desclist_idx_dec(uint16_t idx, uint16_t inc, uint8_t speed)
60 DWC_OTG_EP_SPEED_HIGH) ? MAX_DMA_DESC_NUM_HS_ISOC :
61 MAX_DMA_DESC_NUM_GENERIC) - 1);
64 static inline uint16_t max_desc_num(dwc_otg_qh_t * qh)
66 return (((qh->ep_type == UE_ISOCHRONOUS)
67 && (qh->dev_speed == DWC_OTG_EP_SPEED_HIGH))
68 ? MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC);
70 static inline uint16_t frame_incr_val(dwc_otg_qh_t * qh)
72 return ((qh->dev_speed == DWC_OTG_EP_SPEED_HIGH)
73 ? ((qh->interval + 8 - 1) / 8)
77 static int desc_list_alloc(dwc_otg_qh_t * qh)
81 qh->desc_list = (dwc_otg_host_dma_desc_t *)
82 DWC_DMA_ALLOC(get_hcd_device() , sizeof(dwc_otg_host_dma_desc_t) * max_desc_num(qh),
86 retval = -DWC_E_NO_MEMORY;
87 DWC_ERROR("%s: DMA descriptor list allocation failed\n", __func__);
91 dwc_memset(qh->desc_list, 0x00,
92 sizeof(dwc_otg_host_dma_desc_t) * max_desc_num(qh));
95 (uint32_t *) DWC_ALLOC(sizeof(uint32_t) * max_desc_num(qh));
98 retval = -DWC_E_NO_MEMORY;
100 ("%s: Failed to allocate array for descriptors' size actual values\n",
108 static void desc_list_free(dwc_otg_qh_t * qh)
111 DWC_DMA_FREE(get_hcd_device() , max_desc_num(qh), qh->desc_list,
113 qh->desc_list = NULL;
117 DWC_FREE(qh->n_bytes);
122 static int frame_list_alloc(dwc_otg_hcd_t * hcd)
128 hcd->frame_list = DWC_DMA_ALLOC(get_hcd_device() , 4 * MAX_FRLIST_EN_NUM,
129 &hcd->frame_list_dma);
130 if (!hcd->frame_list) {
131 retval = -DWC_E_NO_MEMORY;
132 DWC_ERROR("%s: Frame List allocation failed\n", __func__);
136 dwc_memset(hcd->frame_list, 0x00, 4 * MAX_FRLIST_EN_NUM);
141 static void frame_list_free(dwc_otg_hcd_t * hcd)
143 if (!hcd->frame_list)
145 DWC_DMA_FREE(get_hcd_device() , 4 * MAX_FRLIST_EN_NUM, hcd->frame_list, hcd->frame_list_dma);
146 hcd->frame_list = NULL;
149 static void per_sched_enable(dwc_otg_hcd_t * hcd, uint16_t fr_list_en)
154 hcfg.d32 = DWC_READ_REG32(&hcd->core_if->host_if->host_global_regs->hcfg);
156 if (hcfg.b.perschedena) {
157 /* already enabled */
161 DWC_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hflbaddr,
162 hcd->frame_list_dma);
164 switch (fr_list_en) {
181 hcfg.b.perschedena = 1;
183 DWC_DEBUGPL(DBG_HCD, "Enabling Periodic schedule\n");
184 DWC_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hcfg, hcfg.d32);
188 static void per_sched_disable(dwc_otg_hcd_t * hcd)
192 hcfg.d32 = DWC_READ_REG32(&hcd->core_if->host_if->host_global_regs->hcfg);
194 if (!hcfg.b.perschedena) {
195 /* already disabled */
198 hcfg.b.perschedena = 0;
200 DWC_DEBUGPL(DBG_HCD, "Disabling Periodic schedule\n");
201 DWC_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hcfg, hcfg.d32);
205 * Activates/Deactivates FrameList entries for the channel
206 * based on endpoint servicing period.
208 void update_frame_list(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh, uint8_t enable)
214 DWC_ERROR("qh->channel = %p", qh->channel);
219 DWC_ERROR("------hcd = %p", hcd);
223 if (!hcd->frame_list) {
224 DWC_ERROR("-------hcd->frame_list = %p", hcd->frame_list);
229 inc = frame_incr_val(qh);
230 if (qh->ep_type == UE_ISOCHRONOUS)
231 i = frame_list_idx(qh->sched_frame);
238 hcd->frame_list[j] |= (1 << hc->hc_num);
240 hcd->frame_list[j] &= ~(1 << hc->hc_num);
241 j = (j + inc) & (MAX_FRLIST_EN_NUM - 1);
247 if (qh->channel->speed == DWC_OTG_EP_SPEED_HIGH) {
249 /* TODO - check this */
250 inc = (8 + qh->interval - 1) / qh->interval;
251 for (i = 0; i < inc; i++) {
253 j = j << qh->interval;
261 void dump_frame_list(dwc_otg_hcd_t * hcd)
264 DWC_PRINTF("--FRAME LIST (hex) --\n");
265 for (i = 0; i < MAX_FRLIST_EN_NUM; i++) {
266 DWC_PRINTF("%x\t", hcd->frame_list[i]);
270 DWC_PRINTF("\n----\n");
275 static void release_channel_ddma(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
277 dwc_hc_t *hc = qh->channel;
278 if (dwc_qh_is_non_per(qh))
279 hcd->non_periodic_channels--;
281 update_frame_list(hcd, qh, 0);
284 * The condition is added to prevent double cleanup try in case of device
285 * disconnect. See channel cleanup in dwc_otg_hcd_disconnect_cb().
288 dwc_otg_hc_cleanup(hcd->core_if, hc);
289 DWC_CIRCLEQ_INSERT_TAIL(&hcd->free_hc_list, hc, hc_list_entry);
297 dwc_memset(qh->desc_list, 0x00,
298 sizeof(dwc_otg_host_dma_desc_t) * max_desc_num(qh));
303 * Initializes a QH structure's Descriptor DMA related members.
304 * Allocates memory for descriptor list.
305 * On first periodic QH, allocates memory for FrameList
306 * and enables periodic scheduling.
308 * @param hcd The HCD state structure for the DWC OTG controller.
309 * @param qh The QH to init.
311 * @return 0 if successful, negative error code otherwise.
313 int dwc_otg_hcd_qh_init_ddma(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
318 DWC_ERROR("SPLIT Transfers are not supported in Descriptor DMA.\n");
322 retval = desc_list_alloc(qh);
325 && (qh->ep_type == UE_ISOCHRONOUS || qh->ep_type == UE_INTERRUPT)) {
326 if (!hcd->frame_list) {
327 retval = frame_list_alloc(hcd);
328 /* Enable periodic schedule on first periodic QH */
330 per_sched_enable(hcd, MAX_FRLIST_EN_NUM);
340 * Frees descriptor list memory associated with the QH.
341 * If QH is periodic and the last, frees FrameList memory
342 * and disables periodic scheduling.
344 * @param hcd The HCD state structure for the DWC OTG controller.
345 * @param qh The QH to init.
347 void dwc_otg_hcd_qh_free_ddma(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
352 * Channel still assigned due to some reasons.
353 * Seen on Isoc URB dequeue. Channel halted but no subsequent
354 * ChHalted interrupt to release the channel. Afterwards
355 * when it comes here from endpoint disable routine
356 * channel remains assigned.
359 release_channel_ddma(hcd, qh);
361 if ((qh->ep_type == UE_ISOCHRONOUS || qh->ep_type == UE_INTERRUPT)
362 && !hcd->periodic_channels && hcd->frame_list) {
364 per_sched_disable(hcd);
365 frame_list_free(hcd);
369 static uint8_t frame_to_desc_idx(dwc_otg_qh_t * qh, uint16_t frame_idx)
371 if (qh->dev_speed == DWC_OTG_EP_SPEED_HIGH) {
373 * Descriptor set(8 descriptors) index
374 * which is 8-aligned.
376 return (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8;
378 return (frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1));
383 * Determine starting frame for Isochronous transfer.
384 * Few frames skipped to prevent race condition with HC.
386 static uint8_t calc_starting_frame(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh,
387 uint8_t * skip_frames)
390 hcd->frame_number = dwc_otg_hcd_get_frame_number(hcd);
392 /* sched_frame is always frame number(not uFrame) both in FS and HS !! */
395 * skip_frames is used to limit activated descriptors number
396 * to avoid the situation when HC services the last activated
397 * descriptor firstly.
399 * Current frame is 1, scheduled frame is 3. Since HC always fetches the descriptor
400 * corresponding to curr_frame+1, the descriptor corresponding to frame 2
401 * will be fetched. If the number of descriptors is max=64 (or greather) the
402 * list will be fully programmed with Active descriptors and it is possible
403 * case(rare) that the latest descriptor(considering rollback) corresponding
404 * to frame 2 will be serviced first. HS case is more probable because, in fact,
405 * up to 11 uframes(16 in the code) may be skipped.
407 if (qh->dev_speed == DWC_OTG_EP_SPEED_HIGH) {
409 * Consider uframe counter also, to start xfer asap.
410 * If half of the frame elapsed skip 2 frames otherwise
412 * Starting descriptor index must be 8-aligned, so
413 * if the current frame is near to complete the next one
414 * is skipped as well.
417 if (dwc_micro_frame_num(hcd->frame_number) >= 5) {
418 *skip_frames = 2 * 8;
419 frame = dwc_frame_num_inc(hcd->frame_number, *skip_frames);
421 *skip_frames = 1 * 8;
422 frame = dwc_frame_num_inc(hcd->frame_number, *skip_frames);
425 frame = dwc_full_frame_num(frame);
428 * Two frames are skipped for FS - the current and the next.
429 * But for descriptor programming, 1 frame(descriptor) is enough,
433 frame = dwc_frame_num_inc(hcd->frame_number, 2);
440 * Calculate initial descriptor index for isochronous transfer
441 * based on scheduled frame.
443 static uint8_t recalc_initial_desc_idx(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
445 uint16_t frame = 0, fr_idx, fr_idx_tmp;
446 uint8_t skip_frames = 0;
448 * With current ISOC processing algorithm the channel is being
449 * released when no more QTDs in the list(qh->ntd == 0).
450 * Thus this function is called only when qh->ntd == 0 and qh->channel == 0.
452 * So qh->channel != NULL branch is not used and just not removed from the
453 * source file. It is required for another possible approach which is,
454 * do not disable and release the channel when ISOC session completed,
455 * just move QH to inactive schedule until new QTD arrives.
456 * On new QTD, the QH moved back to 'ready' schedule,
457 * starting frame and therefore starting desc_index are recalculated.
458 * In this case channel is released only on ep_disable.
461 /* Calculate starting descriptor index. For INTERRUPT endpoint it is always 0. */
463 frame = calc_starting_frame(hcd, qh, &skip_frames);
465 * Calculate initial descriptor index based on FrameList current bitmap
466 * and servicing period.
468 fr_idx_tmp = frame_list_idx(frame);
470 (MAX_FRLIST_EN_NUM + frame_list_idx(qh->sched_frame) -
472 % frame_incr_val(qh);
473 fr_idx = (fr_idx + fr_idx_tmp) % MAX_FRLIST_EN_NUM;
475 qh->sched_frame = calc_starting_frame(hcd, qh, &skip_frames);
476 fr_idx = frame_list_idx(qh->sched_frame);
479 qh->td_first = qh->td_last = frame_to_desc_idx(qh, fr_idx);
484 #define ISOC_URB_GIVEBACK_ASAP
486 #define MAX_ISOC_XFER_SIZE_FS 1023
487 #define MAX_ISOC_XFER_SIZE_HS 3072
488 #define DESCNUM_THRESHOLD 4
490 static void init_isoc_dma_desc(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh,
493 struct dwc_otg_hcd_iso_packet_desc *frame_desc;
495 dwc_otg_host_dma_desc_t *dma_desc;
496 uint16_t idx, inc, n_desc, ntd_max, max_xfer_size;
502 ntd_max = (max_desc_num(qh) + qh->interval - 1) / qh->interval;
503 if (skip_frames && !qh->channel)
504 ntd_max = ntd_max - skip_frames / qh->interval;
508 DWC_OTG_EP_SPEED_HIGH) ? MAX_ISOC_XFER_SIZE_HS :
509 MAX_ISOC_XFER_SIZE_FS;
511 DWC_CIRCLEQ_FOREACH(qtd, &qh->qtd_list, qtd_list_entry) {
512 while ((qh->ntd < ntd_max)
513 && (qtd->isoc_frame_index_last <
514 qtd->urb->packet_count)) {
516 dma_desc = &qh->desc_list[idx];
517 dwc_memset(dma_desc, 0x00, sizeof(dwc_otg_host_dma_desc_t));
519 frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
521 if (frame_desc->length > max_xfer_size)
522 qh->n_bytes[idx] = max_xfer_size;
524 qh->n_bytes[idx] = frame_desc->length;
525 dma_desc->status.b_isoc.n_bytes = qh->n_bytes[idx];
526 dma_desc->status.b_isoc.a = 1;
527 dma_desc->status.b_isoc.sts = 0;
529 dma_desc->buf = qtd->urb->dma + frame_desc->offset;
533 qtd->isoc_frame_index_last++;
535 #ifdef ISOC_URB_GIVEBACK_ASAP
537 * Set IOC for each descriptor corresponding to the
538 * last frame of the URB.
540 if (qtd->isoc_frame_index_last ==
541 qtd->urb->packet_count)
542 dma_desc->status.b_isoc.ioc = 1;
545 idx = desclist_idx_inc(idx, inc, qh->dev_speed);
554 #ifdef ISOC_URB_GIVEBACK_ASAP
555 /* Set IOC for the last descriptor if descriptor list is full */
556 if (qh->ntd == ntd_max) {
557 idx = desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
558 qh->desc_list[idx].status.b_isoc.ioc = 1;
562 * Set IOC bit only for one descriptor.
563 * Always try to be ahead of HW processing,
564 * i.e. on IOC generation driver activates next descriptors but
565 * core continues to process descriptors followed the one with IOC set.
568 if (n_desc > DESCNUM_THRESHOLD) {
570 * Move IOC "up". Required even if there is only one QTD
571 * in the list, cause QTDs migth continue to be queued,
572 * but during the activation it was only one queued.
573 * Actually more than one QTD might be in the list if this function called
574 * from XferCompletion - QTDs was queued during HW processing of the previous
577 idx = dwc_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2), qh->dev_speed);
580 * Set the IOC for the latest descriptor
581 * if either number of descriptor is not greather than threshold
582 * or no more new descriptors activated.
584 idx = dwc_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
587 qh->desc_list[idx].status.b_isoc.ioc = 1;
591 static void init_non_isoc_dma_desc(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
595 dwc_otg_host_dma_desc_t *dma_desc;
597 int num_packets, len, n_desc = 0;
602 * Start with hc->xfer_buff initialized in
603 * assign_and_init_hc(), then if SG transfer consists of multiple URBs,
604 * this pointer re-assigned to the buffer of the currently processed QTD.
605 * For non-SG request there is always one QTD active.
608 DWC_CIRCLEQ_FOREACH(qtd, &qh->qtd_list, qtd_list_entry) {
611 /* SG request - more than 1 QTDs */
612 hc->xfer_buff = (uint8_t *)qtd->urb->dma + qtd->urb->actual_length;
613 hc->xfer_len = qtd->urb->length - qtd->urb->actual_length;
619 dma_desc = &qh->desc_list[n_desc];
622 if (len > MAX_DMA_DESC_SIZE)
623 len = MAX_DMA_DESC_SIZE - hc->max_packet + 1;
627 num_packets = (len + hc->max_packet - 1) / hc->max_packet;
629 /* Need 1 packet for transfer length of 0. */
632 /* Always program an integral # of max packets for IN transfers. */
633 len = num_packets * hc->max_packet;
636 dma_desc->status.b.n_bytes = len;
638 qh->n_bytes[n_desc] = len;
640 if ((qh->ep_type == UE_CONTROL)
641 && (qtd->control_phase == DWC_OTG_CONTROL_SETUP))
642 dma_desc->status.b.sup = 1; /* Setup Packet */
644 dma_desc->status.b.a = 1; /* Active descriptor */
645 dma_desc->status.b.sts = 0;
648 ((unsigned long)hc->xfer_buff & 0xffffffff);
651 * Last descriptor(or single) of IN transfer
652 * with actual size less than MaxPacket.
654 if (len > hc->xfer_len) {
657 hc->xfer_buff += len;
664 while ((hc->xfer_len > 0) && (n_desc != MAX_DMA_DESC_NUM_GENERIC));
669 if (qh->ep_type == UE_CONTROL)
\r
672 if (n_desc == MAX_DMA_DESC_NUM_GENERIC)
677 /* Request Transfer Complete interrupt for the last descriptor */
678 qh->desc_list[n_desc - 1].status.b.ioc = 1;
679 /* End of List indicator */
680 qh->desc_list[n_desc - 1].status.b.eol = 1;
687 * For Control and Bulk endpoints initializes descriptor list
688 * and starts the transfer.
690 * For Interrupt and Isochronous endpoints initializes descriptor list
691 * then updates FrameList, marking appropriate entries as active.
692 * In case of Isochronous, the starting descriptor index is calculated based
693 * on the scheduled frame, but only on the first transfer descriptor within a session.
694 * Then starts the transfer via enabling the channel.
695 * For Isochronous endpoint the channel is not halted on XferComplete
696 * interrupt so remains assigned to the endpoint(QH) until session is done.
698 * @param hcd The HCD state structure for the DWC OTG controller.
699 * @param qh The QH to init.
701 * @return 0 if successful, negative error code otherwise.
703 void dwc_otg_hcd_start_xfer_ddma(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
705 /* Channel is already assigned */
706 dwc_hc_t *hc = qh->channel;
707 uint8_t skip_frames = 0;
709 switch (hc->ep_type) {
710 case DWC_OTG_EP_TYPE_CONTROL:
711 case DWC_OTG_EP_TYPE_BULK:
712 init_non_isoc_dma_desc(hcd, qh);
714 dwc_otg_hc_start_transfer_ddma(hcd->core_if, hc);
716 case DWC_OTG_EP_TYPE_INTR:
717 init_non_isoc_dma_desc(hcd, qh);
719 update_frame_list(hcd, qh, 1);
721 dwc_otg_hc_start_transfer_ddma(hcd->core_if, hc);
723 case DWC_OTG_EP_TYPE_ISOC:
726 skip_frames = recalc_initial_desc_idx(hcd, qh);
728 init_isoc_dma_desc(hcd, qh, skip_frames);
730 if (!hc->xfer_started) {
732 update_frame_list(hcd, qh, 1);
735 * Always set to max, instead of actual size.
736 * Otherwise ntd will be changed with
737 * channel being enabled. Not recommended.
740 hc->ntd = max_desc_num(qh);
741 /* Enable channel only once for ISOC */
742 dwc_otg_hc_start_transfer_ddma(hcd->core_if, hc);
752 static void complete_isoc_xfer_ddma(dwc_otg_hcd_t * hcd,
754 dwc_otg_hc_regs_t * hc_regs,
755 dwc_otg_halt_status_e halt_status)
757 struct dwc_otg_hcd_iso_packet_desc *frame_desc;
758 dwc_otg_qtd_t *qtd, *qtd_tmp;
760 dwc_otg_host_dma_desc_t *dma_desc;
761 uint16_t idx, remain;
767 if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE) {
768 DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry)
771 } else if ((halt_status == DWC_OTG_HC_XFER_AHB_ERR) ||
772 (halt_status == DWC_OTG_HC_XFER_BABBLE_ERR)) {
774 * Channel is halted in these error cases.
775 * Considered as serious issues.
776 * Complete all URBs marking all frames as failed,
777 * irrespective whether some of the descriptors(frames) succeeded or no.
778 * Pass error code to completion routine as well, to
779 * update urb->status, some of class drivers might use it to stop
780 * queing transfer requests.
782 int err = (halt_status == DWC_OTG_HC_XFER_AHB_ERR)
786 DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry) {
787 for (idx = 0; idx < qtd->urb->packet_count; idx++) {
788 frame_desc = &qtd->urb->iso_descs[idx];
789 frame_desc->status = err;
791 hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, err);
792 dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
797 DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry) {
799 if (!qtd->in_process)
806 dma_desc = &qh->desc_list[idx];
808 frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
809 remain = hc->ep_is_in ? dma_desc->status.b_isoc.n_bytes : 0;
811 if (dma_desc->status.b_isoc.sts == DMA_DESC_STS_PKTERR) {
813 * XactError or, unable to complete all the transactions
814 * in the scheduled micro-frame/frame,
815 * both indicated by DMA_DESC_STS_PKTERR.
817 qtd->urb->error_count++;
818 frame_desc->actual_length = qh->n_bytes[idx] - remain;
819 frame_desc->status = -DWC_E_PROTOCOL;
823 frame_desc->actual_length = qh->n_bytes[idx] - remain;
824 frame_desc->status = 0;
827 if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
829 * urb->status is not used for isoc transfers here.
830 * The individual frame_desc status are used instead.
833 hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, 0);
834 dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
837 * This check is necessary because urb_dequeue can be called
838 * from urb complete callback(sound driver example).
839 * All pending URBs are dequeued there, so no need for
840 * further processing.
842 if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE) {
852 /* Stop if IOC requested descriptor reached */
853 if (dma_desc->status.b_isoc.ioc) {
854 idx = desclist_idx_inc(idx, qh->interval, hc->speed);
858 idx = desclist_idx_inc(idx, qh->interval, hc->speed);
863 while (idx != qh->td_first);
869 uint8_t update_non_isoc_urb_state_ddma(dwc_otg_hcd_t * hcd,
872 dwc_otg_host_dma_desc_t * dma_desc,
873 dwc_otg_halt_status_e halt_status,
874 uint32_t n_bytes, uint8_t * xfer_done)
877 uint16_t remain = hc->ep_is_in ? dma_desc->status.b.n_bytes : 0;
878 dwc_otg_hcd_urb_t *urb = qtd->urb;
880 if (halt_status == DWC_OTG_HC_XFER_AHB_ERR) {
881 urb->status = -DWC_E_IO;
884 if (dma_desc->status.b.sts == DMA_DESC_STS_PKTERR) {
885 switch (halt_status) {
886 case DWC_OTG_HC_XFER_STALL:
887 urb->status = -DWC_E_PIPE;
889 case DWC_OTG_HC_XFER_BABBLE_ERR:
890 urb->status = -DWC_E_OVERFLOW;
892 case DWC_OTG_HC_XFER_XACT_ERR:
893 urb->status = -DWC_E_PROTOCOL;
896 DWC_ERROR("%s: Unhandled descriptor error status (%d)\n", __func__,
903 if (dma_desc->status.b.a == 1) {
904 DWC_DEBUGPL(DBG_HCDV,
905 "Active descriptor encountered on channel %d\n",
910 if (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL) {
911 if (qtd->control_phase == DWC_OTG_CONTROL_DATA) {
912 urb->actual_length += n_bytes - remain;
913 if (remain || urb->actual_length == urb->length) {
915 * For Control Data stage do not set urb->status=0 to prevent
916 * URB callback. Set it when Status phase done. See below.
921 } else if (qtd->control_phase == DWC_OTG_CONTROL_STATUS) {
925 /* No handling for SETUP stage */
928 urb->actual_length += n_bytes - remain;
929 if (remain || urb->actual_length == urb->length) {
938 static void complete_non_isoc_xfer_ddma(dwc_otg_hcd_t * hcd,
940 dwc_otg_hc_regs_t * hc_regs,
941 dwc_otg_halt_status_e halt_status)
943 dwc_otg_hcd_urb_t *urb = NULL;
944 dwc_otg_qtd_t *qtd, *qtd_tmp;
946 dwc_otg_host_dma_desc_t *dma_desc;
947 uint32_t n_bytes, n_desc, i;
948 uint8_t failed = 0, xfer_done;
954 if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE) {
955 DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry) {
961 DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) {
968 for (i = 0; i < qtd->n_desc; i++) {
969 dma_desc = &qh->desc_list[n_desc];
971 n_bytes = qh->n_bytes[n_desc];
974 update_non_isoc_urb_state_ddma(hcd, hc, qtd,
976 halt_status, n_bytes,
981 && (urb->status != -DWC_E_IN_PROGRESS))) {
983 hcd->fops->complete(hcd, urb->priv, urb,
985 dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
989 } else if (qh->ep_type == UE_CONTROL) {
990 if (qtd->control_phase == DWC_OTG_CONTROL_SETUP) {
991 if (urb->length > 0) {
992 qtd->control_phase = DWC_OTG_CONTROL_DATA;
994 qtd->control_phase = DWC_OTG_CONTROL_STATUS;
996 DWC_DEBUGPL(DBG_HCDV, " Control setup transaction done\n");
997 } else if (qtd->control_phase == DWC_OTG_CONTROL_DATA) {
999 qtd->control_phase = DWC_OTG_CONTROL_STATUS;
1000 DWC_DEBUGPL(DBG_HCDV, " Control data transfer done\n");
1001 } else if (i + 1 == qtd->n_desc) {
1003 * Last descriptor for Control data stage which is
1004 * not completed yet.
1006 dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
1018 if (qh->ep_type != UE_CONTROL) {
1020 * Resetting the data toggle for bulk
1021 * and interrupt endpoints in case of stall. See handle_hc_stall_intr()
1023 if (halt_status == DWC_OTG_HC_XFER_STALL)
1024 qh->data_toggle = DWC_OTG_HC_PID_DATA0;
1026 dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
1029 if (halt_status == DWC_OTG_HC_XFER_COMPLETE) {
1031 hcint.d32 = DWC_READ_REG32(&hc_regs->hcint);
1034 * Got a NYET on the last transaction of the transfer. It
1035 * means that the endpoint should be in the PING state at the
1036 * beginning of the next transfer.
1039 clear_hc_int(hc_regs, nyet);
1047 * This function is called from interrupt handlers.
1048 * Scans the descriptor list, updates URB's status and
1049 * calls completion routine for the URB if it's done.
1050 * Releases the channel to be used by other transfers.
1051 * In case of Isochronous endpoint the channel is not halted until
1052 * the end of the session, i.e. QTD list is empty.
1053 * If periodic channel released the FrameList is updated accordingly.
1055 * Calls transaction selection routines to activate pending transfers.
1057 * @param hcd The HCD state structure for the DWC OTG controller.
1058 * @param hc Host channel, the transfer is completed on.
1059 * @param hc_regs Host channel registers.
1060 * @param halt_status Reason the channel is being halted,
1061 * or just XferComplete for isochronous transfer
1063 void dwc_otg_hcd_complete_xfer_ddma(dwc_otg_hcd_t * hcd,
1065 dwc_otg_hc_regs_t * hc_regs,
1066 dwc_otg_halt_status_e halt_status)
1068 uint8_t continue_isoc_xfer = 0;
1069 dwc_otg_transaction_type_e tr_type;
1070 dwc_otg_qh_t *qh = hc->qh;
1072 if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
1074 complete_isoc_xfer_ddma(hcd, hc, hc_regs, halt_status);
1076 /* Release the channel if halted or session completed */
1077 if (halt_status != DWC_OTG_HC_XFER_COMPLETE ||
1078 DWC_CIRCLEQ_EMPTY(&qh->qtd_list)) {
1080 /* Halt the channel if session completed */
1081 if (halt_status == DWC_OTG_HC_XFER_COMPLETE) {
1082 dwc_otg_hc_halt(hcd->core_if, hc, halt_status);
1085 release_channel_ddma(hcd, qh);
1086 dwc_otg_hcd_qh_remove(hcd, qh);
1088 /* Keep in assigned schedule to continue transfer */
1089 DWC_LIST_MOVE_HEAD(&hcd->periodic_sched_assigned,
1090 &qh->qh_list_entry);
1091 continue_isoc_xfer = 1;
1094 /** @todo Consider the case when period exceeds FrameList size.
1095 * Frame Rollover interrupt should be used.
1098 /* Scan descriptor list to complete the URB(s), then release the channel */
1099 complete_non_isoc_xfer_ddma(hcd, hc, hc_regs, halt_status);
1101 release_channel_ddma(hcd, qh);
1102 dwc_otg_hcd_qh_remove(hcd, qh);
1104 if (!DWC_CIRCLEQ_EMPTY(&qh->qtd_list)) {
1105 /* Add back to inactive non-periodic schedule on normal completion */
1106 dwc_otg_hcd_qh_add(hcd, qh);
1110 tr_type = dwc_otg_hcd_select_transactions(hcd);
1111 if (tr_type != DWC_OTG_TRANSACTION_NONE || continue_isoc_xfer) {
1112 if (continue_isoc_xfer) {
1113 if (tr_type == DWC_OTG_TRANSACTION_NONE) {
1114 tr_type = DWC_OTG_TRANSACTION_PERIODIC;
1115 } else if (tr_type == DWC_OTG_TRANSACTION_NON_PERIODIC) {
1116 tr_type = DWC_OTG_TRANSACTION_ALL;
1119 dwc_otg_hcd_queue_transactions(hcd, tr_type);
1123 #endif /* DWC_DEVICE_ONLY */