1 /*==========================================================================
2 * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd_ddma.c $
7 * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
8 * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
9 * otherwise expressly agreed to in writing between Synopsys and you.
11 * The Software IS NOT an item of Licensed Software or Licensed Product under
12 * any End User Software License Agreement or Agreement for Licensed Product
13 * with Synopsys or any supplement thereto. You are permitted to use and
14 * redistribute this Software in source and binary forms, with or without
15 * modification, provided that redistributions of source code must retain this
16 * notice. You may not view, use, disclose, copy or distribute this file or
17 * any information contained herein except pursuant to this license grant from
18 * Synopsys. If you do not agree with this notice, including the disclaimer
19 * below, then you are not authorized to use the Software.
21 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
25 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
32 * ========================================================================== */
33 #ifndef DWC_DEVICE_ONLY
36 * This file contains Descriptor DMA support implementation for host mode.
39 #include "dwc_otg_hcd.h"
40 #include "dwc_otg_regs.h"
42 extern bool microframe_schedule;
44 static inline uint8_t frame_list_idx(uint16_t frame)
46 return (frame & (MAX_FRLIST_EN_NUM - 1));
49 static inline uint16_t desclist_idx_inc(uint16_t idx, uint16_t inc, uint8_t speed)
53 DWC_OTG_EP_SPEED_HIGH) ? MAX_DMA_DESC_NUM_HS_ISOC :
54 MAX_DMA_DESC_NUM_GENERIC) - 1);
57 static inline uint16_t desclist_idx_dec(uint16_t idx, uint16_t inc, uint8_t speed)
61 DWC_OTG_EP_SPEED_HIGH) ? MAX_DMA_DESC_NUM_HS_ISOC :
62 MAX_DMA_DESC_NUM_GENERIC) - 1);
65 static inline uint16_t max_desc_num(dwc_otg_qh_t * qh)
67 return (((qh->ep_type == UE_ISOCHRONOUS)
68 && (qh->dev_speed == DWC_OTG_EP_SPEED_HIGH))
69 ? MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC);
71 static inline uint16_t frame_incr_val(dwc_otg_qh_t * qh)
73 return ((qh->dev_speed == DWC_OTG_EP_SPEED_HIGH)
74 ? ((qh->interval + 8 - 1) / 8)
78 static int desc_list_alloc(struct device *dev, dwc_otg_qh_t * qh)
82 qh->desc_list = (dwc_otg_host_dma_desc_t *)
83 DWC_DMA_ALLOC(dev, sizeof(dwc_otg_host_dma_desc_t) * max_desc_num(qh),
87 retval = -DWC_E_NO_MEMORY;
88 DWC_ERROR("%s: DMA descriptor list allocation failed\n", __func__);
92 dwc_memset(qh->desc_list, 0x00,
93 sizeof(dwc_otg_host_dma_desc_t) * max_desc_num(qh));
96 (uint32_t *) DWC_ALLOC(sizeof(uint32_t) * max_desc_num(qh));
99 retval = -DWC_E_NO_MEMORY;
101 ("%s: Failed to allocate array for descriptors' size actual values\n",
109 static void desc_list_free(struct device *dev, dwc_otg_qh_t * qh)
112 DWC_DMA_FREE(dev, max_desc_num(qh), qh->desc_list,
114 qh->desc_list = NULL;
118 DWC_FREE(qh->n_bytes);
123 static int frame_list_alloc(dwc_otg_hcd_t * hcd)
125 struct device *dev = dwc_otg_hcd_to_dev(hcd);
131 hcd->frame_list = DWC_DMA_ALLOC(dev, 4 * MAX_FRLIST_EN_NUM,
132 &hcd->frame_list_dma);
133 if (!hcd->frame_list) {
134 retval = -DWC_E_NO_MEMORY;
135 DWC_ERROR("%s: Frame List allocation failed\n", __func__);
138 dwc_memset(hcd->frame_list, 0x00, 4 * MAX_FRLIST_EN_NUM);
143 static void frame_list_free(dwc_otg_hcd_t * hcd)
145 struct device *dev = dwc_otg_hcd_to_dev(hcd);
147 if (!hcd->frame_list)
150 DWC_DMA_FREE(dev, 4 * MAX_FRLIST_EN_NUM, hcd->frame_list, hcd->frame_list_dma);
151 hcd->frame_list = NULL;
154 static void per_sched_enable(dwc_otg_hcd_t * hcd, uint16_t fr_list_en)
159 hcfg.d32 = DWC_READ_REG32(&hcd->core_if->host_if->host_global_regs->hcfg);
161 if (hcfg.b.perschedena) {
162 /* already enabled */
166 DWC_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hflbaddr,
167 hcd->frame_list_dma);
169 switch (fr_list_en) {
186 hcfg.b.perschedena = 1;
188 DWC_DEBUGPL(DBG_HCD, "Enabling Periodic schedule\n");
189 DWC_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hcfg, hcfg.d32);
193 static void per_sched_disable(dwc_otg_hcd_t * hcd)
197 hcfg.d32 = DWC_READ_REG32(&hcd->core_if->host_if->host_global_regs->hcfg);
199 if (!hcfg.b.perschedena) {
200 /* already disabled */
203 hcfg.b.perschedena = 0;
205 DWC_DEBUGPL(DBG_HCD, "Disabling Periodic schedule\n");
206 DWC_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hcfg, hcfg.d32);
210 * Activates/Deactivates FrameList entries for the channel
211 * based on endpoint servicing period.
213 void update_frame_list(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh, uint8_t enable)
219 DWC_ERROR("qh->channel = %p", qh->channel);
224 DWC_ERROR("------hcd = %p", hcd);
228 if (!hcd->frame_list) {
229 DWC_ERROR("-------hcd->frame_list = %p", hcd->frame_list);
234 inc = frame_incr_val(qh);
235 if (qh->ep_type == UE_ISOCHRONOUS)
236 i = frame_list_idx(qh->sched_frame);
243 hcd->frame_list[j] |= (1 << hc->hc_num);
245 hcd->frame_list[j] &= ~(1 << hc->hc_num);
246 j = (j + inc) & (MAX_FRLIST_EN_NUM - 1);
252 if (qh->channel->speed == DWC_OTG_EP_SPEED_HIGH) {
254 /* TODO - check this */
255 inc = (8 + qh->interval - 1) / qh->interval;
256 for (i = 0; i < inc; i++) {
258 j = j << qh->interval;
266 void dump_frame_list(dwc_otg_hcd_t * hcd)
269 DWC_PRINTF("--FRAME LIST (hex) --\n");
270 for (i = 0; i < MAX_FRLIST_EN_NUM; i++) {
271 DWC_PRINTF("%x\t", hcd->frame_list[i]);
275 DWC_PRINTF("\n----\n");
280 static void release_channel_ddma(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
282 dwc_hc_t *hc = qh->channel;
283 if (dwc_qh_is_non_per(qh)) {
284 if (!microframe_schedule)
285 hcd->non_periodic_channels--;
287 hcd->available_host_channels++;
289 update_frame_list(hcd, qh, 0);
292 * The condition is added to prevent double cleanup try in case of device
293 * disconnect. See channel cleanup in dwc_otg_hcd_disconnect_cb().
296 dwc_otg_hc_cleanup(hcd->core_if, hc);
297 DWC_CIRCLEQ_INSERT_TAIL(&hcd->free_hc_list, hc, hc_list_entry);
305 dwc_memset(qh->desc_list, 0x00,
306 sizeof(dwc_otg_host_dma_desc_t) * max_desc_num(qh));
311 * Initializes a QH structure's Descriptor DMA related members.
312 * Allocates memory for descriptor list.
313 * On first periodic QH, allocates memory for FrameList
314 * and enables periodic scheduling.
316 * @param hcd The HCD state structure for the DWC OTG controller.
317 * @param qh The QH to init.
319 * @return 0 if successful, negative error code otherwise.
321 int dwc_otg_hcd_qh_init_ddma(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
323 struct device *dev = dwc_otg_hcd_to_dev(hcd);
327 DWC_ERROR("SPLIT Transfers are not supported in Descriptor DMA.\n");
331 retval = desc_list_alloc(dev, qh);
334 && (qh->ep_type == UE_ISOCHRONOUS || qh->ep_type == UE_INTERRUPT)) {
335 if (!hcd->frame_list) {
336 retval = frame_list_alloc(hcd);
337 /* Enable periodic schedule on first periodic QH */
339 per_sched_enable(hcd, MAX_FRLIST_EN_NUM);
349 * Frees descriptor list memory associated with the QH.
350 * If QH is periodic and the last, frees FrameList memory
351 * and disables periodic scheduling.
353 * @param hcd The HCD state structure for the DWC OTG controller.
354 * @param qh The QH to init.
356 void dwc_otg_hcd_qh_free_ddma(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
358 struct device *dev = dwc_otg_hcd_to_dev(hcd);
360 desc_list_free(dev, qh);
363 * Channel still assigned due to some reasons.
364 * Seen on Isoc URB dequeue. Channel halted but no subsequent
365 * ChHalted interrupt to release the channel. Afterwards
366 * when it comes here from endpoint disable routine
367 * channel remains assigned.
370 release_channel_ddma(hcd, qh);
372 if ((qh->ep_type == UE_ISOCHRONOUS || qh->ep_type == UE_INTERRUPT)
373 && (microframe_schedule || !hcd->periodic_channels) && hcd->frame_list) {
375 per_sched_disable(hcd);
376 frame_list_free(hcd);
380 static uint8_t frame_to_desc_idx(dwc_otg_qh_t * qh, uint16_t frame_idx)
382 if (qh->dev_speed == DWC_OTG_EP_SPEED_HIGH) {
384 * Descriptor set(8 descriptors) index
385 * which is 8-aligned.
387 return (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8;
389 return (frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1));
394 * Determine starting frame for Isochronous transfer.
395 * Few frames skipped to prevent race condition with HC.
397 static uint8_t calc_starting_frame(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh,
398 uint8_t * skip_frames)
401 hcd->frame_number = dwc_otg_hcd_get_frame_number(hcd);
403 /* sched_frame is always frame number(not uFrame) both in FS and HS !! */
406 * skip_frames is used to limit activated descriptors number
407 * to avoid the situation when HC services the last activated
408 * descriptor firstly.
410 * Current frame is 1, scheduled frame is 3. Since HC always fetches the descriptor
411 * corresponding to curr_frame+1, the descriptor corresponding to frame 2
412 * will be fetched. If the number of descriptors is max=64 (or greather) the
413 * list will be fully programmed with Active descriptors and it is possible
414 * case(rare) that the latest descriptor(considering rollback) corresponding
415 * to frame 2 will be serviced first. HS case is more probable because, in fact,
416 * up to 11 uframes(16 in the code) may be skipped.
418 if (qh->dev_speed == DWC_OTG_EP_SPEED_HIGH) {
420 * Consider uframe counter also, to start xfer asap.
421 * If half of the frame elapsed skip 2 frames otherwise
423 * Starting descriptor index must be 8-aligned, so
424 * if the current frame is near to complete the next one
425 * is skipped as well.
428 if (dwc_micro_frame_num(hcd->frame_number) >= 5) {
429 *skip_frames = 2 * 8;
430 frame = dwc_frame_num_inc(hcd->frame_number, *skip_frames);
432 *skip_frames = 1 * 8;
433 frame = dwc_frame_num_inc(hcd->frame_number, *skip_frames);
436 frame = dwc_full_frame_num(frame);
439 * Two frames are skipped for FS - the current and the next.
440 * But for descriptor programming, 1 frame(descriptor) is enough,
444 frame = dwc_frame_num_inc(hcd->frame_number, 2);
451 * Calculate initial descriptor index for isochronous transfer
452 * based on scheduled frame.
454 static uint8_t recalc_initial_desc_idx(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
456 uint16_t frame = 0, fr_idx, fr_idx_tmp;
457 uint8_t skip_frames = 0;
459 * With current ISOC processing algorithm the channel is being
460 * released when no more QTDs in the list(qh->ntd == 0).
461 * Thus this function is called only when qh->ntd == 0 and qh->channel == 0.
463 * So qh->channel != NULL branch is not used and just not removed from the
464 * source file. It is required for another possible approach which is,
465 * do not disable and release the channel when ISOC session completed,
466 * just move QH to inactive schedule until new QTD arrives.
467 * On new QTD, the QH moved back to 'ready' schedule,
468 * starting frame and therefore starting desc_index are recalculated.
469 * In this case channel is released only on ep_disable.
472 /* Calculate starting descriptor index. For INTERRUPT endpoint it is always 0. */
474 frame = calc_starting_frame(hcd, qh, &skip_frames);
476 * Calculate initial descriptor index based on FrameList current bitmap
477 * and servicing period.
479 fr_idx_tmp = frame_list_idx(frame);
481 (MAX_FRLIST_EN_NUM + frame_list_idx(qh->sched_frame) -
483 % frame_incr_val(qh);
484 fr_idx = (fr_idx + fr_idx_tmp) % MAX_FRLIST_EN_NUM;
486 qh->sched_frame = calc_starting_frame(hcd, qh, &skip_frames);
487 fr_idx = frame_list_idx(qh->sched_frame);
490 qh->td_first = qh->td_last = frame_to_desc_idx(qh, fr_idx);
495 #define ISOC_URB_GIVEBACK_ASAP
497 #define MAX_ISOC_XFER_SIZE_FS 1023
498 #define MAX_ISOC_XFER_SIZE_HS 3072
499 #define DESCNUM_THRESHOLD 4
501 static void init_isoc_dma_desc(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh,
504 struct dwc_otg_hcd_iso_packet_desc *frame_desc;
506 dwc_otg_host_dma_desc_t *dma_desc;
507 uint16_t idx, inc, n_desc, ntd_max, max_xfer_size;
513 ntd_max = (max_desc_num(qh) + qh->interval - 1) / qh->interval;
514 if (skip_frames && !qh->channel)
515 ntd_max = ntd_max - skip_frames / qh->interval;
519 DWC_OTG_EP_SPEED_HIGH) ? MAX_ISOC_XFER_SIZE_HS :
520 MAX_ISOC_XFER_SIZE_FS;
522 DWC_CIRCLEQ_FOREACH(qtd, &qh->qtd_list, qtd_list_entry) {
523 while ((qh->ntd < ntd_max)
524 && (qtd->isoc_frame_index_last <
525 qtd->urb->packet_count)) {
527 dma_desc = &qh->desc_list[idx];
528 dwc_memset(dma_desc, 0x00, sizeof(dwc_otg_host_dma_desc_t));
530 frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
532 if (frame_desc->length > max_xfer_size)
533 qh->n_bytes[idx] = max_xfer_size;
535 qh->n_bytes[idx] = frame_desc->length;
536 dma_desc->status.b_isoc.n_bytes = qh->n_bytes[idx];
537 dma_desc->status.b_isoc.a = 1;
538 dma_desc->status.b_isoc.sts = 0;
540 dma_desc->buf = qtd->urb->dma + frame_desc->offset;
544 qtd->isoc_frame_index_last++;
546 #ifdef ISOC_URB_GIVEBACK_ASAP
548 * Set IOC for each descriptor corresponding to the
549 * last frame of the URB.
551 if (qtd->isoc_frame_index_last ==
552 qtd->urb->packet_count)
553 dma_desc->status.b_isoc.ioc = 1;
556 idx = desclist_idx_inc(idx, inc, qh->dev_speed);
565 #ifdef ISOC_URB_GIVEBACK_ASAP
566 /* Set IOC for the last descriptor if descriptor list is full */
567 if (qh->ntd == ntd_max) {
568 idx = desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
569 qh->desc_list[idx].status.b_isoc.ioc = 1;
573 * Set IOC bit only for one descriptor.
574 * Always try to be ahead of HW processing,
575 * i.e. on IOC generation driver activates next descriptors but
576 * core continues to process descriptors followed the one with IOC set.
579 if (n_desc > DESCNUM_THRESHOLD) {
581 * Move IOC "up". Required even if there is only one QTD
582 * in the list, cause QTDs migth continue to be queued,
583 * but during the activation it was only one queued.
584 * Actually more than one QTD might be in the list if this function called
585 * from XferCompletion - QTDs was queued during HW processing of the previous
588 idx = dwc_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2), qh->dev_speed);
591 * Set the IOC for the latest descriptor
592 * if either number of descriptor is not greather than threshold
593 * or no more new descriptors activated.
595 idx = dwc_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
598 qh->desc_list[idx].status.b_isoc.ioc = 1;
602 static void init_non_isoc_dma_desc(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
606 dwc_otg_host_dma_desc_t *dma_desc;
608 int num_packets, len, n_desc = 0;
613 * Start with hc->xfer_buff initialized in
614 * assign_and_init_hc(), then if SG transfer consists of multiple URBs,
615 * this pointer re-assigned to the buffer of the currently processed QTD.
616 * For non-SG request there is always one QTD active.
619 DWC_CIRCLEQ_FOREACH(qtd, &qh->qtd_list, qtd_list_entry) {
622 /* SG request - more than 1 QTDs */
623 hc->xfer_buff = (uint8_t *)qtd->urb->dma + qtd->urb->actual_length;
624 hc->xfer_len = qtd->urb->length - qtd->urb->actual_length;
630 dma_desc = &qh->desc_list[n_desc];
633 if (len > MAX_DMA_DESC_SIZE)
634 len = MAX_DMA_DESC_SIZE - hc->max_packet + 1;
638 num_packets = (len + hc->max_packet - 1) / hc->max_packet;
640 /* Need 1 packet for transfer length of 0. */
643 /* Always program an integral # of max packets for IN transfers. */
644 len = num_packets * hc->max_packet;
647 dma_desc->status.b.n_bytes = len;
649 qh->n_bytes[n_desc] = len;
651 if ((qh->ep_type == UE_CONTROL)
652 && (qtd->control_phase == DWC_OTG_CONTROL_SETUP))
653 dma_desc->status.b.sup = 1; /* Setup Packet */
655 dma_desc->status.b.a = 1; /* Active descriptor */
656 dma_desc->status.b.sts = 0;
659 ((unsigned long)hc->xfer_buff & 0xffffffff);
662 * Last descriptor(or single) of IN transfer
663 * with actual size less than MaxPacket.
665 if (len > hc->xfer_len) {
668 hc->xfer_buff += len;
675 while ((hc->xfer_len > 0) && (n_desc != MAX_DMA_DESC_NUM_GENERIC));
680 if (qh->ep_type == UE_CONTROL)
683 if (n_desc == MAX_DMA_DESC_NUM_GENERIC)
688 /* Request Transfer Complete interrupt for the last descriptor */
689 qh->desc_list[n_desc - 1].status.b.ioc = 1;
690 /* End of List indicator */
691 qh->desc_list[n_desc - 1].status.b.eol = 1;
698 * For Control and Bulk endpoints initializes descriptor list
699 * and starts the transfer.
701 * For Interrupt and Isochronous endpoints initializes descriptor list
702 * then updates FrameList, marking appropriate entries as active.
703 * In case of Isochronous, the starting descriptor index is calculated based
704 * on the scheduled frame, but only on the first transfer descriptor within a session.
705 * Then starts the transfer via enabling the channel.
706 * For Isochronous endpoint the channel is not halted on XferComplete
707 * interrupt so remains assigned to the endpoint(QH) until session is done.
709 * @param hcd The HCD state structure for the DWC OTG controller.
710 * @param qh The QH to init.
712 * @return 0 if successful, negative error code otherwise.
714 void dwc_otg_hcd_start_xfer_ddma(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
716 /* Channel is already assigned */
717 dwc_hc_t *hc = qh->channel;
718 uint8_t skip_frames = 0;
720 switch (hc->ep_type) {
721 case DWC_OTG_EP_TYPE_CONTROL:
722 case DWC_OTG_EP_TYPE_BULK:
723 init_non_isoc_dma_desc(hcd, qh);
725 dwc_otg_hc_start_transfer_ddma(hcd->core_if, hc);
727 case DWC_OTG_EP_TYPE_INTR:
728 init_non_isoc_dma_desc(hcd, qh);
730 update_frame_list(hcd, qh, 1);
732 dwc_otg_hc_start_transfer_ddma(hcd->core_if, hc);
734 case DWC_OTG_EP_TYPE_ISOC:
737 skip_frames = recalc_initial_desc_idx(hcd, qh);
739 init_isoc_dma_desc(hcd, qh, skip_frames);
741 if (!hc->xfer_started) {
743 update_frame_list(hcd, qh, 1);
746 * Always set to max, instead of actual size.
747 * Otherwise ntd will be changed with
748 * channel being enabled. Not recommended.
751 hc->ntd = max_desc_num(qh);
752 /* Enable channel only once for ISOC */
753 dwc_otg_hc_start_transfer_ddma(hcd->core_if, hc);
763 static void complete_isoc_xfer_ddma(dwc_otg_hcd_t * hcd,
765 dwc_otg_hc_regs_t * hc_regs,
766 dwc_otg_halt_status_e halt_status)
768 struct dwc_otg_hcd_iso_packet_desc *frame_desc;
769 dwc_otg_qtd_t *qtd, *qtd_tmp;
771 dwc_otg_host_dma_desc_t *dma_desc;
772 uint16_t idx, remain;
778 if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE) {
779 DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry)
782 } else if ((halt_status == DWC_OTG_HC_XFER_AHB_ERR) ||
783 (halt_status == DWC_OTG_HC_XFER_BABBLE_ERR)) {
785 * Channel is halted in these error cases.
786 * Considered as serious issues.
787 * Complete all URBs marking all frames as failed,
788 * irrespective whether some of the descriptors(frames) succeeded or no.
789 * Pass error code to completion routine as well, to
790 * update urb->status, some of class drivers might use it to stop
791 * queing transfer requests.
793 int err = (halt_status == DWC_OTG_HC_XFER_AHB_ERR)
797 DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry) {
798 for (idx = 0; idx < qtd->urb->packet_count; idx++) {
799 frame_desc = &qtd->urb->iso_descs[idx];
800 frame_desc->status = err;
802 hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, err);
803 dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
808 DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry) {
810 if (!qtd->in_process)
817 dma_desc = &qh->desc_list[idx];
819 frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
820 remain = hc->ep_is_in ? dma_desc->status.b_isoc.n_bytes : 0;
822 if (dma_desc->status.b_isoc.sts == DMA_DESC_STS_PKTERR) {
824 * XactError or, unable to complete all the transactions
825 * in the scheduled micro-frame/frame,
826 * both indicated by DMA_DESC_STS_PKTERR.
828 qtd->urb->error_count++;
829 frame_desc->actual_length = qh->n_bytes[idx] - remain;
830 frame_desc->status = -DWC_E_PROTOCOL;
834 frame_desc->actual_length = qh->n_bytes[idx] - remain;
835 frame_desc->status = 0;
838 if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
840 * urb->status is not used for isoc transfers here.
841 * The individual frame_desc status are used instead.
844 hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, 0);
845 dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
848 * This check is necessary because urb_dequeue can be called
849 * from urb complete callback(sound driver example).
850 * All pending URBs are dequeued there, so no need for
851 * further processing.
853 if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE) {
863 /* Stop if IOC requested descriptor reached */
864 if (dma_desc->status.b_isoc.ioc) {
865 idx = desclist_idx_inc(idx, qh->interval, hc->speed);
869 idx = desclist_idx_inc(idx, qh->interval, hc->speed);
874 while (idx != qh->td_first);
880 uint8_t update_non_isoc_urb_state_ddma(dwc_otg_hcd_t * hcd,
883 dwc_otg_host_dma_desc_t * dma_desc,
884 dwc_otg_halt_status_e halt_status,
885 uint32_t n_bytes, uint8_t * xfer_done)
888 uint16_t remain = hc->ep_is_in ? dma_desc->status.b.n_bytes : 0;
889 dwc_otg_hcd_urb_t *urb = qtd->urb;
891 if (halt_status == DWC_OTG_HC_XFER_AHB_ERR) {
892 urb->status = -DWC_E_IO;
895 if (dma_desc->status.b.sts == DMA_DESC_STS_PKTERR) {
896 switch (halt_status) {
897 case DWC_OTG_HC_XFER_STALL:
898 urb->status = -DWC_E_PIPE;
900 case DWC_OTG_HC_XFER_BABBLE_ERR:
901 urb->status = -DWC_E_OVERFLOW;
903 case DWC_OTG_HC_XFER_XACT_ERR:
904 urb->status = -DWC_E_PROTOCOL;
907 DWC_ERROR("%s: Unhandled descriptor error status (%d)\n", __func__,
914 if (dma_desc->status.b.a == 1) {
915 DWC_DEBUGPL(DBG_HCDV,
916 "Active descriptor encountered on channel %d\n",
921 if (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL) {
922 if (qtd->control_phase == DWC_OTG_CONTROL_DATA) {
923 urb->actual_length += n_bytes - remain;
924 if (remain || urb->actual_length == urb->length) {
926 * For Control Data stage do not set urb->status=0 to prevent
927 * URB callback. Set it when Status phase done. See below.
932 } else if (qtd->control_phase == DWC_OTG_CONTROL_STATUS) {
936 /* No handling for SETUP stage */
939 urb->actual_length += n_bytes - remain;
940 if (remain || urb->actual_length == urb->length) {
949 static void complete_non_isoc_xfer_ddma(dwc_otg_hcd_t * hcd,
951 dwc_otg_hc_regs_t * hc_regs,
952 dwc_otg_halt_status_e halt_status)
954 dwc_otg_hcd_urb_t *urb = NULL;
955 dwc_otg_qtd_t *qtd, *qtd_tmp;
957 dwc_otg_host_dma_desc_t *dma_desc;
958 uint32_t n_bytes, n_desc, i;
959 uint8_t failed = 0, xfer_done;
965 if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE) {
966 DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry) {
972 DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) {
979 for (i = 0; i < qtd->n_desc; i++) {
980 dma_desc = &qh->desc_list[n_desc];
982 n_bytes = qh->n_bytes[n_desc];
985 update_non_isoc_urb_state_ddma(hcd, hc, qtd,
987 halt_status, n_bytes,
992 && (urb->status != -DWC_E_IN_PROGRESS))) {
994 hcd->fops->complete(hcd, urb->priv, urb,
996 dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
1000 } else if (qh->ep_type == UE_CONTROL) {
1001 if (qtd->control_phase == DWC_OTG_CONTROL_SETUP) {
1002 if (urb->length > 0) {
1003 qtd->control_phase = DWC_OTG_CONTROL_DATA;
1005 qtd->control_phase = DWC_OTG_CONTROL_STATUS;
1007 DWC_DEBUGPL(DBG_HCDV, " Control setup transaction done\n");
1008 } else if (qtd->control_phase == DWC_OTG_CONTROL_DATA) {
1010 qtd->control_phase = DWC_OTG_CONTROL_STATUS;
1011 DWC_DEBUGPL(DBG_HCDV, " Control data transfer done\n");
1012 } else if (i + 1 == qtd->n_desc) {
1014 * Last descriptor for Control data stage which is
1015 * not completed yet.
1017 dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
1029 if (qh->ep_type != UE_CONTROL) {
1031 * Resetting the data toggle for bulk
1032 * and interrupt endpoints in case of stall. See handle_hc_stall_intr()
1034 if (halt_status == DWC_OTG_HC_XFER_STALL)
1035 qh->data_toggle = DWC_OTG_HC_PID_DATA0;
1037 dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
1040 if (halt_status == DWC_OTG_HC_XFER_COMPLETE) {
1042 hcint.d32 = DWC_READ_REG32(&hc_regs->hcint);
1045 * Got a NYET on the last transaction of the transfer. It
1046 * means that the endpoint should be in the PING state at the
1047 * beginning of the next transfer.
1050 clear_hc_int(hc_regs, nyet);
1058 * This function is called from interrupt handlers.
1059 * Scans the descriptor list, updates URB's status and
1060 * calls completion routine for the URB if it's done.
1061 * Releases the channel to be used by other transfers.
1062 * In case of Isochronous endpoint the channel is not halted until
1063 * the end of the session, i.e. QTD list is empty.
1064 * If periodic channel released the FrameList is updated accordingly.
1066 * Calls transaction selection routines to activate pending transfers.
1068 * @param hcd The HCD state structure for the DWC OTG controller.
1069 * @param hc Host channel, the transfer is completed on.
1070 * @param hc_regs Host channel registers.
1071 * @param halt_status Reason the channel is being halted,
1072 * or just XferComplete for isochronous transfer
1074 void dwc_otg_hcd_complete_xfer_ddma(dwc_otg_hcd_t * hcd,
1076 dwc_otg_hc_regs_t * hc_regs,
1077 dwc_otg_halt_status_e halt_status)
1079 uint8_t continue_isoc_xfer = 0;
1080 dwc_otg_transaction_type_e tr_type;
1081 dwc_otg_qh_t *qh = hc->qh;
1083 if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
1085 complete_isoc_xfer_ddma(hcd, hc, hc_regs, halt_status);
1087 /* Release the channel if halted or session completed */
1088 if (halt_status != DWC_OTG_HC_XFER_COMPLETE ||
1089 DWC_CIRCLEQ_EMPTY(&qh->qtd_list)) {
1091 /* Halt the channel if session completed */
1092 if (halt_status == DWC_OTG_HC_XFER_COMPLETE) {
1093 dwc_otg_hc_halt(hcd->core_if, hc, halt_status);
1096 release_channel_ddma(hcd, qh);
1097 dwc_otg_hcd_qh_remove(hcd, qh);
1099 /* Keep in assigned schedule to continue transfer */
1100 DWC_LIST_MOVE_HEAD(&hcd->periodic_sched_assigned,
1101 &qh->qh_list_entry);
1102 continue_isoc_xfer = 1;
1105 /** @todo Consider the case when period exceeds FrameList size.
1106 * Frame Rollover interrupt should be used.
1109 /* Scan descriptor list to complete the URB(s), then release the channel */
1110 complete_non_isoc_xfer_ddma(hcd, hc, hc_regs, halt_status);
1112 release_channel_ddma(hcd, qh);
1113 dwc_otg_hcd_qh_remove(hcd, qh);
1115 if (!DWC_CIRCLEQ_EMPTY(&qh->qtd_list)) {
1116 /* Add back to inactive non-periodic schedule on normal completion */
1117 dwc_otg_hcd_qh_add(hcd, qh);
1121 tr_type = dwc_otg_hcd_select_transactions(hcd);
1122 if (tr_type != DWC_OTG_TRANSACTION_NONE || continue_isoc_xfer) {
1123 if (continue_isoc_xfer) {
1124 if (tr_type == DWC_OTG_TRANSACTION_NONE) {
1125 tr_type = DWC_OTG_TRANSACTION_PERIODIC;
1126 } else if (tr_type == DWC_OTG_TRANSACTION_NON_PERIODIC) {
1127 tr_type = DWC_OTG_TRANSACTION_ALL;
1130 dwc_otg_hcd_queue_transactions(hcd, tr_type);
1134 #endif /* DWC_DEVICE_ONLY */