1 /* ==========================================================================
2 * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd_intr.c $
7 * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
8 * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
9 * otherwise expressly agreed to in writing between Synopsys and you.
11 * The Software IS NOT an item of Licensed Software or Licensed Product under
12 * any End User Software License Agreement or Agreement for Licensed Product
13 * with Synopsys or any supplement thereto. You are permitted to use and
14 * redistribute this Software in source and binary forms, with or without
15 * modification, provided that redistributions of source code must retain this
16 * notice. You may not view, use, disclose, copy or distribute this file or
17 * any information contained herein except pursuant to this license grant from
18 * Synopsys. If you do not agree with this notice, including the disclaimer
19 * below, then you are not authorized to use the Software.
21 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
25 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
32 * ========================================================================== */
33 #ifndef DWC_DEVICE_ONLY
35 #include "dwc_otg_hcd.h"
36 #include "dwc_otg_regs.h"
38 #include <linux/jiffies.h>
43 extern bool microframe_schedule;
46 * This file contains the implementation of the HCD Interrupt handlers.
49 int fiq_done, int_done;
54 void notrace _fiq_print(FIQDBG_T dbg_lvl, char *fmt, ...)
56 FIQDBG_T dbg_lvl_req = FIQDBG_PORTHUB;
59 hfnum_data_t hfnum = { .d32 = FIQ_READ(dwc_regs_base + 0x408) };
61 if(dbg_lvl & dbg_lvl_req || dbg_lvl == FIQDBG_ERR)
64 snprintf(text, 9, "%4d%d:%d ", hfnum.b.frnum/8, hfnum.b.frnum%8, 8 - hfnum.b.frrem/937);
66 vsnprintf(text+8, 9, fmt, args);
69 memcpy(buffer + wptr, text, 16);
70 wptr = (wptr + 16) % sizeof(buffer);
76 /** This function handles interrupts for the HCD. */
77 int32_t dwc_otg_hcd_handle_intr(dwc_otg_hcd_t * dwc_otg_hcd)
81 dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if;
82 gintsts_data_t gintsts;
83 gintmsk_data_t gintmsk;
85 haintmsk_data_t haintmsk;
88 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
92 gintsts.d32 = DWC_READ_REG32(&core_if->core_global_regs->gintsts);
93 gintmsk.d32 = DWC_READ_REG32(&core_if->core_global_regs->gintmsk);
95 /* Exit from ISR if core is hibernated */
96 if (core_if->hibernation_suspend == 1) {
97 goto exit_handler_routine;
99 DWC_SPINLOCK(dwc_otg_hcd->lock);
100 /* Check if HOST Mode */
101 if (dwc_otg_is_host_mode(core_if)) {
104 fiq_fsm_spin_lock(&dwc_otg_hcd->fiq_state->lock);
105 /* Pull in from the FIQ's disabled mask */
106 gintmsk.d32 = gintmsk.d32 | ~(dwc_otg_hcd->fiq_state->gintmsk_saved.d32);
107 dwc_otg_hcd->fiq_state->gintmsk_saved.d32 = ~0;
110 if (fiq_fsm_enable && ( 0x0000FFFF & ~(dwc_otg_hcd->fiq_state->haintmsk_saved.b2.chint))) {
111 gintsts.b.hcintr = 1;
114 /* Danger will robinson: fake a SOF if necessary */
115 if (fiq_fsm_enable && (dwc_otg_hcd->fiq_state->gintmsk_saved.b.sofintr == 1)) {
116 gintsts.b.sofintr = 1;
118 gintsts.d32 &= gintmsk.d32;
121 fiq_fsm_spin_unlock(&dwc_otg_hcd->fiq_state->lock);
126 goto exit_handler_routine;
130 // We should be OK doing this because the common interrupts should already have been serviced
131 /* Don't print debug message in the interrupt handler on SOF */
133 if (gintsts.d32 != DWC_SOF_INTR_MASK)
135 DWC_DEBUGPL(DBG_HCDI, "\n");
140 if (gintsts.d32 != DWC_SOF_INTR_MASK)
142 DWC_DEBUGPL(DBG_HCDI,
143 "DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x core_if=%p\n",
144 gintsts.d32, core_if);
146 hfnum.d32 = DWC_READ_REG32(&dwc_otg_hcd->core_if->host_if->host_global_regs->hfnum);
147 if (gintsts.b.sofintr) {
148 retval |= dwc_otg_hcd_handle_sof_intr(dwc_otg_hcd);
151 if (gintsts.b.rxstsqlvl) {
153 dwc_otg_hcd_handle_rx_status_q_level_intr
156 if (gintsts.b.nptxfempty) {
158 dwc_otg_hcd_handle_np_tx_fifo_empty_intr
161 if (gintsts.b.i2cintr) {
162 /** @todo Implement i2cintr handler. */
164 if (gintsts.b.portintr) {
166 gintmsk_data_t gintmsk = { .b.portintr = 1};
167 retval |= dwc_otg_hcd_handle_port_intr(dwc_otg_hcd);
170 fiq_fsm_spin_lock(&dwc_otg_hcd->fiq_state->lock);
171 DWC_MODIFY_REG32(&dwc_otg_hcd->core_if->core_global_regs->gintmsk, 0, gintmsk.d32);
172 fiq_fsm_spin_unlock(&dwc_otg_hcd->fiq_state->lock);
175 DWC_MODIFY_REG32(&dwc_otg_hcd->core_if->core_global_regs->gintmsk, 0, gintmsk.d32);
178 if (gintsts.b.hcintr) {
179 retval |= dwc_otg_hcd_handle_hc_intr(dwc_otg_hcd);
181 if (gintsts.b.ptxfempty) {
183 dwc_otg_hcd_handle_perio_tx_fifo_empty_intr
188 if (gintsts.d32 != DWC_SOF_INTR_MASK)
191 DWC_DEBUGPL(DBG_HCDI,
192 "DWC OTG HCD Finished Servicing Interrupts\n");
193 DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gintsts=0x%08x\n",
194 DWC_READ_REG32(&global_regs->gintsts));
195 DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gintmsk=0x%08x\n",
196 DWC_READ_REG32(&global_regs->gintmsk));
202 if (gintsts.d32 != DWC_SOF_INTR_MASK)
204 DWC_DEBUGPL(DBG_HCDI, "\n");
209 exit_handler_routine:
211 gintmsk_data_t gintmsk_new;
212 haintmsk_data_t haintmsk_new;
214 fiq_fsm_spin_lock(&dwc_otg_hcd->fiq_state->lock);
215 gintmsk_new.d32 = *(volatile uint32_t *)&dwc_otg_hcd->fiq_state->gintmsk_saved.d32;
217 haintmsk_new.d32 = *(volatile uint32_t *)&dwc_otg_hcd->fiq_state->haintmsk_saved.d32;
219 haintmsk_new.d32 = 0x0000FFFF;
221 /* The FIQ could have sneaked another interrupt in. If so, don't clear MPHI */
222 if ((gintmsk_new.d32 == ~0) && (haintmsk_new.d32 == 0x0000FFFF)) {
223 if (dwc_otg_hcd->fiq_state->mphi_regs.swirq_clr) {
224 DWC_WRITE_REG32(dwc_otg_hcd->fiq_state->mphi_regs.swirq_clr, 1);
226 DWC_WRITE_REG32(dwc_otg_hcd->fiq_state->mphi_regs.intstat, (1<<16));
228 if (dwc_otg_hcd->fiq_state->mphi_int_count >= 50) {
229 fiq_print(FIQDBG_INT, dwc_otg_hcd->fiq_state, "MPHI CLR");
230 DWC_WRITE_REG32(dwc_otg_hcd->fiq_state->mphi_regs.ctrl, ((1<<31) + (1<<16)));
231 while (!(DWC_READ_REG32(dwc_otg_hcd->fiq_state->mphi_regs.ctrl) & (1 << 17)))
233 DWC_WRITE_REG32(dwc_otg_hcd->fiq_state->mphi_regs.ctrl, (1<<31));
234 dwc_otg_hcd->fiq_state->mphi_int_count = 0;
238 haintmsk.d32 = DWC_READ_REG32(&core_if->host_if->host_global_regs->haintmsk);
239 /* Re-enable interrupts that the FIQ masked (first time round) */
240 FIQ_WRITE(dwc_otg_hcd->fiq_state->dwc_regs_base + GINTMSK, gintmsk.d32);
241 fiq_fsm_spin_unlock(&dwc_otg_hcd->fiq_state->lock);
244 if ((jiffies / HZ) > last_time) {
246 //dwc_list_link_t *cur;
247 /* Once a second output the fiq and irq numbers, useful for debug */
248 last_time = jiffies / HZ;
249 // DWC_WARN("np_kick=%d AHC=%d sched_frame=%d cur_frame=%d int_done=%d fiq_done=%d",
250 // dwc_otg_hcd->fiq_state->kick_np_queues, dwc_otg_hcd->available_host_channels,
251 // dwc_otg_hcd->fiq_state->next_sched_frame, hfnum.b.frnum, int_done, dwc_otg_hcd->fiq_state->fiq_done);
252 //printk(KERN_WARNING "Periodic queues:\n");
256 DWC_SPINUNLOCK(dwc_otg_hcd->lock);
260 #ifdef DWC_TRACK_MISSED_SOFS
262 #warning Compiling code to track missed SOFs
263 #define FRAME_NUM_ARRAY_SIZE 1000
265 * This function is for debug only.
267 static inline void track_missed_sofs(uint16_t curr_frame_number)
269 static uint16_t frame_num_array[FRAME_NUM_ARRAY_SIZE];
270 static uint16_t last_frame_num_array[FRAME_NUM_ARRAY_SIZE];
271 static int frame_num_idx = 0;
272 static uint16_t last_frame_num = DWC_HFNUM_MAX_FRNUM;
273 static int dumped_frame_num_array = 0;
275 if (frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
276 if (((last_frame_num + 1) & DWC_HFNUM_MAX_FRNUM) !=
278 frame_num_array[frame_num_idx] = curr_frame_number;
279 last_frame_num_array[frame_num_idx++] = last_frame_num;
281 } else if (!dumped_frame_num_array) {
283 DWC_PRINTF("Frame Last Frame\n");
284 DWC_PRINTF("----- ----------\n");
285 for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) {
286 DWC_PRINTF("0x%04x 0x%04x\n",
287 frame_num_array[i], last_frame_num_array[i]);
289 dumped_frame_num_array = 1;
291 last_frame_num = curr_frame_number;
296 * Handles the start-of-frame interrupt in host mode. Non-periodic
297 * transactions may be queued to the DWC_otg controller for the current
298 * (micro)frame. Periodic transactions may be queued to the controller for the
301 int32_t dwc_otg_hcd_handle_sof_intr(dwc_otg_hcd_t * hcd)
304 gintsts_data_t gintsts = { .d32 = 0 };
305 dwc_list_link_t *qh_entry;
307 dwc_otg_transaction_type_e tr_type;
308 int did_something = 0;
309 int32_t next_sched_frame = -1;
312 DWC_READ_REG32(&hcd->core_if->host_if->host_global_regs->hfnum);
315 DWC_DEBUGPL(DBG_HCD, "--Start of Frame Interrupt--\n");
317 hcd->frame_number = hfnum.b.frnum;
320 hcd->frrem_accum += hfnum.b.frrem;
321 hcd->frrem_samples++;
324 #ifdef DWC_TRACK_MISSED_SOFS
325 track_missed_sofs(hcd->frame_number);
327 /* Determine whether any periodic QHs should be executed. */
328 qh_entry = DWC_LIST_FIRST(&hcd->periodic_sched_inactive);
329 while (qh_entry != &hcd->periodic_sched_inactive) {
330 qh = DWC_LIST_ENTRY(qh_entry, dwc_otg_qh_t, qh_list_entry);
331 qh_entry = qh_entry->next;
332 if (dwc_frame_num_le(qh->sched_frame, hcd->frame_number)) {
335 * Move QH to the ready list to be executed next
338 DWC_LIST_MOVE_HEAD(&hcd->periodic_sched_ready,
345 if(next_sched_frame < 0 || dwc_frame_num_le(qh->sched_frame, next_sched_frame))
347 next_sched_frame = qh->sched_frame;
352 hcd->fiq_state->next_sched_frame = next_sched_frame;
354 tr_type = dwc_otg_hcd_select_transactions(hcd);
355 if (tr_type != DWC_OTG_TRANSACTION_NONE) {
356 dwc_otg_hcd_queue_transactions(hcd, tr_type);
360 /* Clear interrupt - but do not trample on the FIQ sof */
361 if (!fiq_fsm_enable) {
362 gintsts.b.sofintr = 1;
363 DWC_WRITE_REG32(&hcd->core_if->core_global_regs->gintsts, gintsts.d32);
368 /** Handles the Rx Status Queue Level Interrupt, which indicates that there is at
369 * least one packet in the Rx FIFO. The packets are moved from the FIFO to
370 * memory if the DWC_otg controller is operating in Slave mode. */
371 int32_t dwc_otg_hcd_handle_rx_status_q_level_intr(dwc_otg_hcd_t * dwc_otg_hcd)
373 host_grxsts_data_t grxsts;
376 DWC_DEBUGPL(DBG_HCD, "--RxStsQ Level Interrupt--\n");
379 DWC_READ_REG32(&dwc_otg_hcd->core_if->core_global_regs->grxstsp);
381 hc = dwc_otg_hcd->hc_ptr_array[grxsts.b.chnum];
383 DWC_ERROR("Unable to get corresponding channel\n");
388 DWC_DEBUGPL(DBG_HCDV, " Ch num = %d\n", grxsts.b.chnum);
389 DWC_DEBUGPL(DBG_HCDV, " Count = %d\n", grxsts.b.bcnt);
390 DWC_DEBUGPL(DBG_HCDV, " DPID = %d, hc.dpid = %d\n", grxsts.b.dpid,
392 DWC_DEBUGPL(DBG_HCDV, " PStatus = %d\n", grxsts.b.pktsts);
394 switch (grxsts.b.pktsts) {
395 case DWC_GRXSTS_PKTSTS_IN:
396 /* Read the data into the host buffer. */
397 if (grxsts.b.bcnt > 0) {
398 dwc_otg_read_packet(dwc_otg_hcd->core_if,
399 hc->xfer_buff, grxsts.b.bcnt);
401 /* Update the HC fields for the next packet received. */
402 hc->xfer_count += grxsts.b.bcnt;
403 hc->xfer_buff += grxsts.b.bcnt;
406 case DWC_GRXSTS_PKTSTS_IN_XFER_COMP:
407 case DWC_GRXSTS_PKTSTS_DATA_TOGGLE_ERR:
408 case DWC_GRXSTS_PKTSTS_CH_HALTED:
409 /* Handled in interrupt, just ignore data */
412 DWC_ERROR("RX_STS_Q Interrupt: Unknown status %d\n",
420 /** This interrupt occurs when the non-periodic Tx FIFO is half-empty. More
421 * data packets may be written to the FIFO for OUT transfers. More requests
422 * may be written to the non-periodic request queue for IN transfers. This
423 * interrupt is enabled only in Slave mode. */
424 int32_t dwc_otg_hcd_handle_np_tx_fifo_empty_intr(dwc_otg_hcd_t * dwc_otg_hcd)
426 DWC_DEBUGPL(DBG_HCD, "--Non-Periodic TxFIFO Empty Interrupt--\n");
427 dwc_otg_hcd_queue_transactions(dwc_otg_hcd,
428 DWC_OTG_TRANSACTION_NON_PERIODIC);
432 /** This interrupt occurs when the periodic Tx FIFO is half-empty. More data
433 * packets may be written to the FIFO for OUT transfers. More requests may be
434 * written to the periodic request queue for IN transfers. This interrupt is
435 * enabled only in Slave mode. */
436 int32_t dwc_otg_hcd_handle_perio_tx_fifo_empty_intr(dwc_otg_hcd_t * dwc_otg_hcd)
438 DWC_DEBUGPL(DBG_HCD, "--Periodic TxFIFO Empty Interrupt--\n");
439 dwc_otg_hcd_queue_transactions(dwc_otg_hcd,
440 DWC_OTG_TRANSACTION_PERIODIC);
444 /** There are multiple conditions that can cause a port interrupt. This function
445 * determines which interrupt conditions have occurred and handles them
447 int32_t dwc_otg_hcd_handle_port_intr(dwc_otg_hcd_t * dwc_otg_hcd)
451 hprt0_data_t hprt0_modify;
453 hprt0.d32 = DWC_READ_REG32(dwc_otg_hcd->core_if->host_if->hprt0);
454 hprt0_modify.d32 = DWC_READ_REG32(dwc_otg_hcd->core_if->host_if->hprt0);
456 /* Clear appropriate bits in HPRT0 to clear the interrupt bit in
459 hprt0_modify.b.prtena = 0;
460 hprt0_modify.b.prtconndet = 0;
461 hprt0_modify.b.prtenchng = 0;
462 hprt0_modify.b.prtovrcurrchng = 0;
464 /* Port Connect Detected
465 * Set flag and clear if detected */
466 if (dwc_otg_hcd->core_if->hibernation_suspend == 1) {
467 // Dont modify port status if we are in hibernation state
468 hprt0_modify.b.prtconndet = 1;
469 hprt0_modify.b.prtenchng = 1;
470 DWC_WRITE_REG32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0_modify.d32);
471 hprt0.d32 = DWC_READ_REG32(dwc_otg_hcd->core_if->host_if->hprt0);
475 if (hprt0.b.prtconndet) {
476 /** @todo - check if steps performed in 'else' block should be perfromed regardles adp */
477 if (dwc_otg_hcd->core_if->adp_enable &&
478 dwc_otg_hcd->core_if->adp.vbuson_timer_started == 1) {
479 DWC_PRINTF("PORT CONNECT DETECTED ----------------\n");
480 DWC_TIMER_CANCEL(dwc_otg_hcd->core_if->adp.vbuson_timer);
481 dwc_otg_hcd->core_if->adp.vbuson_timer_started = 0;
482 /* TODO - check if this is required, as
483 * host initialization was already performed
484 * after initial ADP probing
486 /*dwc_otg_hcd->core_if->adp.vbuson_timer_started = 0;
487 dwc_otg_core_init(dwc_otg_hcd->core_if);
488 dwc_otg_enable_global_interrupts(dwc_otg_hcd->core_if);
489 cil_hcd_start(dwc_otg_hcd->core_if);*/
492 DWC_DEBUGPL(DBG_HCD, "--Port Interrupt HPRT0=0x%08x "
493 "Port Connect Detected--\n", hprt0.d32);
494 dwc_otg_hcd->flags.b.port_connect_status_change = 1;
495 dwc_otg_hcd->flags.b.port_connect_status = 1;
496 hprt0_modify.b.prtconndet = 1;
498 /* B-Device has connected, Delete the connection timer. */
499 DWC_TIMER_CANCEL(dwc_otg_hcd->conn_timer);
501 /* The Hub driver asserts a reset when it sees port connect
502 * status change flag */
506 /* Port Enable Changed
507 * Clear if detected - Set internal flag if disabled */
508 if (hprt0.b.prtenchng) {
509 DWC_DEBUGPL(DBG_HCD, " --Port Interrupt HPRT0=0x%08x "
510 "Port Enable Changed--\n", hprt0.d32);
511 hprt0_modify.b.prtenchng = 1;
512 if (hprt0.b.prtena == 1) {
515 dwc_otg_core_params_t *params =
516 dwc_otg_hcd->core_if->core_params;
517 dwc_otg_core_global_regs_t *global_regs =
518 dwc_otg_hcd->core_if->core_global_regs;
519 dwc_otg_host_if_t *host_if =
520 dwc_otg_hcd->core_if->host_if;
522 dwc_otg_hcd->flags.b.port_speed = hprt0.b.prtspd;
523 if (microframe_schedule)
524 init_hcd_usecs(dwc_otg_hcd);
526 /* Every time when port enables calculate
529 hfir.d32 = DWC_READ_REG32(&host_if->host_global_regs->hfir);
530 hfir.b.frint = calc_frame_interval(dwc_otg_hcd->core_if);
531 DWC_WRITE_REG32(&host_if->host_global_regs->hfir, hfir.d32);
533 /* Check if we need to adjust the PHY clock speed for
534 * low power and adjust it */
535 if (params->host_support_fs_ls_low_power) {
536 gusbcfg_data_t usbcfg;
539 DWC_READ_REG32(&global_regs->gusbcfg);
541 if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED
543 DWC_HPRT0_PRTSPD_FULL_SPEED) {
548 if (usbcfg.b.phylpwrclksel == 0) {
549 /* Set PHY low power clock select for FS/LS devices */
550 usbcfg.b.phylpwrclksel = 1;
552 (&global_regs->gusbcfg,
559 (&host_if->host_global_regs->hcfg);
561 if (hprt0.b.prtspd ==
562 DWC_HPRT0_PRTSPD_LOW_SPEED
563 && params->host_ls_low_power_phy_clk
565 DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)
569 "FS_PHY programming HCFG to 6 MHz (Low Power)\n");
570 if (hcfg.b.fslspclksel !=
575 (&host_if->host_global_regs->hcfg,
582 "FS_PHY programming HCFG to 48 MHz ()\n");
583 if (hcfg.b.fslspclksel !=
588 (&host_if->host_global_regs->hcfg,
597 if (usbcfg.b.phylpwrclksel == 1) {
598 usbcfg.b.phylpwrclksel = 0;
600 (&global_regs->gusbcfg,
607 DWC_TASK_SCHEDULE(dwc_otg_hcd->reset_tasklet);
612 /* Port has been enabled set the reset change flag */
613 dwc_otg_hcd->flags.b.port_reset_change = 1;
616 dwc_otg_hcd->flags.b.port_enable_change = 1;
621 /** Overcurrent Change Interrupt */
622 if (hprt0.b.prtovrcurrchng) {
623 DWC_DEBUGPL(DBG_HCD, " --Port Interrupt HPRT0=0x%08x "
624 "Port Overcurrent Changed--\n", hprt0.d32);
625 dwc_otg_hcd->flags.b.port_over_current_change = 1;
626 hprt0_modify.b.prtovrcurrchng = 1;
630 /* Clear Port Interrupts */
631 DWC_WRITE_REG32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0_modify.d32);
636 /** This interrupt indicates that one or more host channels has a pending
637 * interrupt. There are multiple conditions that can cause each host channel
638 * interrupt. This function determines which conditions have occurred for each
639 * host channel interrupt and handles them appropriately. */
640 int32_t dwc_otg_hcd_handle_hc_intr(dwc_otg_hcd_t * dwc_otg_hcd)
644 haint_data_t haint = { .d32 = 0 } ;
646 /* Clear appropriate bits in HCINTn to clear the interrupt bit in
650 haint.d32 = dwc_otg_read_host_all_channels_intr(dwc_otg_hcd->core_if);
652 // Overwrite with saved interrupts from fiq handler
655 /* check the mask? */
657 fiq_fsm_spin_lock(&dwc_otg_hcd->fiq_state->lock);
658 haint.b2.chint |= ~(dwc_otg_hcd->fiq_state->haintmsk_saved.b2.chint);
659 dwc_otg_hcd->fiq_state->haintmsk_saved.b2.chint = ~0;
660 fiq_fsm_spin_unlock(&dwc_otg_hcd->fiq_state->lock);
664 for (i = 0; i < dwc_otg_hcd->core_if->core_params->host_channels; i++) {
665 if (haint.b2.chint & (1 << i)) {
666 retval |= dwc_otg_hcd_handle_hc_n_intr(dwc_otg_hcd, i);
674 * Gets the actual length of a transfer after the transfer halts. _halt_status
675 * holds the reason for the halt.
677 * For IN transfers where halt_status is DWC_OTG_HC_XFER_COMPLETE,
678 * *short_read is set to 1 upon return if less than the requested
679 * number of bytes were transferred. Otherwise, *short_read is set to 0 upon
680 * return. short_read may also be NULL on entry, in which case it remains
683 static uint32_t get_actual_xfer_length(dwc_hc_t * hc,
684 dwc_otg_hc_regs_t * hc_regs,
686 dwc_otg_halt_status_e halt_status,
689 hctsiz_data_t hctsiz;
692 if (short_read != NULL) {
695 hctsiz.d32 = DWC_READ_REG32(&hc_regs->hctsiz);
697 if (halt_status == DWC_OTG_HC_XFER_COMPLETE) {
699 length = hc->xfer_len - hctsiz.b.xfersize;
700 if (short_read != NULL) {
701 *short_read = (hctsiz.b.xfersize != 0);
703 } else if (hc->qh->do_split) {
704 //length = split_out_xfersize[hc->hc_num];
705 length = qtd->ssplit_out_xfer_count;
707 length = hc->xfer_len;
711 * Must use the hctsiz.pktcnt field to determine how much data
712 * has been transferred. This field reflects the number of
713 * packets that have been transferred via the USB. This is
714 * always an integral number of packets if the transfer was
715 * halted before its normal completion. (Can't use the
716 * hctsiz.xfersize field because that reflects the number of
717 * bytes transferred via the AHB, not the USB).
720 (hc->start_pkt_count - hctsiz.b.pktcnt) * hc->max_packet;
727 * Updates the state of the URB after a Transfer Complete interrupt on the
728 * host channel. Updates the actual_length field of the URB based on the
729 * number of bytes transferred via the host channel. Sets the URB status
730 * if the data transfer is finished.
732 * @return 1 if the data transfer specified by the URB is completely finished,
735 static int update_urb_state_xfer_comp(dwc_hc_t * hc,
736 dwc_otg_hc_regs_t * hc_regs,
737 dwc_otg_hcd_urb_t * urb,
745 xfer_length = get_actual_xfer_length(hc, hc_regs, qtd,
746 DWC_OTG_HC_XFER_COMPLETE,
749 if (urb->actual_length + xfer_length > urb->length) {
750 printk_once(KERN_DEBUG "dwc_otg: DEVICE:%03d : %s:%d:trimming xfer length\n",
751 hc->dev_addr, __func__, __LINE__);
752 xfer_length = urb->length - urb->actual_length;
755 /* non DWORD-aligned buffer case handling. */
756 if (hc->align_buff && xfer_length && hc->ep_is_in) {
757 dwc_memcpy(urb->buf + urb->actual_length, hc->qh->dw_align_buf,
761 urb->actual_length += xfer_length;
763 if (xfer_length && (hc->ep_type == DWC_OTG_EP_TYPE_BULK) &&
764 (urb->flags & URB_SEND_ZERO_PACKET)
765 && (urb->actual_length == urb->length)
766 && !(urb->length % hc->max_packet)) {
768 } else if (short_read || urb->actual_length >= urb->length) {
775 hctsiz_data_t hctsiz;
776 hctsiz.d32 = DWC_READ_REG32(&hc_regs->hctsiz);
777 DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, channel %d\n",
778 __func__, (hc->ep_is_in ? "IN" : "OUT"),
780 DWC_DEBUGPL(DBG_HCDV, " hc->xfer_len %d\n", hc->xfer_len);
781 DWC_DEBUGPL(DBG_HCDV, " hctsiz.xfersize %d\n",
783 DWC_DEBUGPL(DBG_HCDV, " urb->transfer_buffer_length %d\n",
785 DWC_DEBUGPL(DBG_HCDV, " urb->actual_length %d\n",
787 DWC_DEBUGPL(DBG_HCDV, " short_read %d, xfer_done %d\n",
788 short_read, xfer_done);
796 * Save the starting data toggle for the next transfer. The data toggle is
797 * saved in the QH for non-control transfers and it's saved in the QTD for
800 void dwc_otg_hcd_save_data_toggle(dwc_hc_t * hc,
801 dwc_otg_hc_regs_t * hc_regs, dwc_otg_qtd_t * qtd)
803 hctsiz_data_t hctsiz;
804 hctsiz.d32 = DWC_READ_REG32(&hc_regs->hctsiz);
806 if (hc->ep_type != DWC_OTG_EP_TYPE_CONTROL) {
807 dwc_otg_qh_t *qh = hc->qh;
808 if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) {
809 qh->data_toggle = DWC_OTG_HC_PID_DATA0;
811 qh->data_toggle = DWC_OTG_HC_PID_DATA1;
814 if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) {
815 qtd->data_toggle = DWC_OTG_HC_PID_DATA0;
817 qtd->data_toggle = DWC_OTG_HC_PID_DATA1;
823 * Updates the state of an Isochronous URB when the transfer is stopped for
824 * any reason. The fields of the current entry in the frame descriptor array
825 * are set based on the transfer state and the input _halt_status. Completes
826 * the Isochronous URB if all the URB frames have been completed.
828 * @return DWC_OTG_HC_XFER_COMPLETE if there are more frames remaining to be
829 * transferred in the URB. Otherwise return DWC_OTG_HC_XFER_URB_COMPLETE.
831 static dwc_otg_halt_status_e
832 update_isoc_urb_state(dwc_otg_hcd_t * hcd,
834 dwc_otg_hc_regs_t * hc_regs,
835 dwc_otg_qtd_t * qtd, dwc_otg_halt_status_e halt_status)
837 dwc_otg_hcd_urb_t *urb = qtd->urb;
838 dwc_otg_halt_status_e ret_val = halt_status;
839 struct dwc_otg_hcd_iso_packet_desc *frame_desc;
841 frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
842 switch (halt_status) {
843 case DWC_OTG_HC_XFER_COMPLETE:
844 frame_desc->status = 0;
845 frame_desc->actual_length =
846 get_actual_xfer_length(hc, hc_regs, qtd, halt_status, NULL);
848 /* non DWORD-aligned buffer case handling. */
849 if (hc->align_buff && frame_desc->actual_length && hc->ep_is_in) {
850 dwc_memcpy(urb->buf + frame_desc->offset + qtd->isoc_split_offset,
851 hc->qh->dw_align_buf, frame_desc->actual_length);
855 case DWC_OTG_HC_XFER_FRAME_OVERRUN:
858 frame_desc->status = -DWC_E_NO_STREAM_RES;
860 frame_desc->status = -DWC_E_COMMUNICATION;
862 frame_desc->actual_length = 0;
864 case DWC_OTG_HC_XFER_BABBLE_ERR:
866 frame_desc->status = -DWC_E_OVERFLOW;
867 /* Don't need to update actual_length in this case. */
869 case DWC_OTG_HC_XFER_XACT_ERR:
871 frame_desc->status = -DWC_E_PROTOCOL;
872 frame_desc->actual_length =
873 get_actual_xfer_length(hc, hc_regs, qtd, halt_status, NULL);
875 /* non DWORD-aligned buffer case handling. */
876 if (hc->align_buff && frame_desc->actual_length && hc->ep_is_in) {
877 dwc_memcpy(urb->buf + frame_desc->offset + qtd->isoc_split_offset,
878 hc->qh->dw_align_buf, frame_desc->actual_length);
880 /* Skip whole frame */
881 if (hc->qh->do_split && (hc->ep_type == DWC_OTG_EP_TYPE_ISOC) &&
882 hc->ep_is_in && hcd->core_if->dma_enable) {
883 qtd->complete_split = 0;
884 qtd->isoc_split_offset = 0;
889 DWC_ASSERT(1, "Unhandled _halt_status (%d)\n", halt_status);
892 if (++qtd->isoc_frame_index == urb->packet_count) {
894 * urb->status is not used for isoc transfers.
895 * The individual frame_desc statuses are used instead.
897 hcd->fops->complete(hcd, urb->priv, urb, 0);
898 ret_val = DWC_OTG_HC_XFER_URB_COMPLETE;
900 ret_val = DWC_OTG_HC_XFER_COMPLETE;
906 * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic
907 * QHs, removes the QH from the active non-periodic schedule. If any QTDs are
908 * still linked to the QH, the QH is added to the end of the inactive
909 * non-periodic schedule. For periodic QHs, removes the QH from the periodic
910 * schedule if no more QTDs are linked to the QH.
912 static void deactivate_qh(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh, int free_qtd)
914 int continue_split = 0;
917 DWC_DEBUGPL(DBG_HCDV, " %s(%p,%p,%d)\n", __func__, hcd, qh, free_qtd);
919 qtd = DWC_CIRCLEQ_FIRST(&qh->qtd_list);
921 if (qtd->complete_split) {
923 } else if (qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_MID ||
924 qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_END) {
929 dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
934 dwc_otg_hcd_qh_deactivate(hcd, qh, continue_split);
938 * Releases a host channel for use by other transfers. Attempts to select and
939 * queue more transactions since at least one host channel is available.
941 * @param hcd The HCD state structure.
942 * @param hc The host channel to release.
943 * @param qtd The QTD associated with the host channel. This QTD may be freed
944 * if the transfer is complete or an error has occurred.
945 * @param halt_status Reason the channel is being released. This status
946 * determines the actions taken by this function.
948 static void release_channel(dwc_otg_hcd_t * hcd,
951 dwc_otg_halt_status_e halt_status)
953 dwc_otg_transaction_type_e tr_type;
958 DWC_DEBUGPL(DBG_HCDV, " %s: channel %d, halt_status %d, xfer_len %d\n",
959 __func__, hc->hc_num, halt_status, hc->xfer_len);
961 if(fiq_fsm_enable && hc->do_split) {
962 if(!hc->ep_is_in && hc->ep_type == UE_ISOCHRONOUS) {
963 if(hc->xact_pos == DWC_HCSPLIT_XACTPOS_MID ||
964 hc->xact_pos == DWC_HCSPLIT_XACTPOS_BEGIN) {
970 switch (halt_status) {
971 case DWC_OTG_HC_XFER_URB_COMPLETE:
974 case DWC_OTG_HC_XFER_AHB_ERR:
975 case DWC_OTG_HC_XFER_STALL:
976 case DWC_OTG_HC_XFER_BABBLE_ERR:
979 case DWC_OTG_HC_XFER_XACT_ERR:
980 if (qtd->error_count >= 3) {
981 DWC_DEBUGPL(DBG_HCDV,
982 " Complete URB with transaction error\n");
984 qtd->urb->status = -DWC_E_PROTOCOL;
985 hcd->fops->complete(hcd, qtd->urb->priv,
986 qtd->urb, -DWC_E_PROTOCOL);
991 case DWC_OTG_HC_XFER_URB_DEQUEUE:
993 * The QTD has already been removed and the QH has been
994 * deactivated. Don't want to do anything except release the
995 * host channel and try to queue more transfers.
998 case DWC_OTG_HC_XFER_NO_HALT_STATUS:
1001 case DWC_OTG_HC_XFER_PERIODIC_INCOMPLETE:
1002 DWC_DEBUGPL(DBG_HCDV,
1003 " Complete URB with I/O error\n");
1005 qtd->urb->status = -DWC_E_IO;
1006 hcd->fops->complete(hcd, qtd->urb->priv,
1007 qtd->urb, -DWC_E_IO);
1014 deactivate_qh(hcd, hc->qh, free_qtd);
1018 * Release the host channel for use by other transfers. The cleanup
1019 * function clears the channel interrupt enables and conditions, so
1020 * there's no need to clear the Channel Halted interrupt separately.
1022 if (fiq_fsm_enable && hcd->fiq_state->channel[hc->hc_num].fsm != FIQ_PASSTHROUGH)
1023 dwc_otg_cleanup_fiq_channel(hcd, hc->hc_num);
1024 dwc_otg_hc_cleanup(hcd->core_if, hc);
1025 DWC_CIRCLEQ_INSERT_TAIL(&hcd->free_hc_list, hc, hc_list_entry);
1027 if (!microframe_schedule) {
1028 switch (hc->ep_type) {
1029 case DWC_OTG_EP_TYPE_CONTROL:
1030 case DWC_OTG_EP_TYPE_BULK:
1031 hcd->non_periodic_channels--;
1036 * Don't release reservations for periodic channels here.
1037 * That's done when a periodic transfer is descheduled (i.e.
1038 * when the QH is removed from the periodic schedule).
1043 hcd->available_host_channels++;
1044 fiq_print(FIQDBG_INT, hcd->fiq_state, "AHC = %d ", hcd->available_host_channels);
1047 /* Try to queue more transfers now that there's a free channel. */
1048 tr_type = dwc_otg_hcd_select_transactions(hcd);
1049 if (tr_type != DWC_OTG_TRANSACTION_NONE) {
1050 dwc_otg_hcd_queue_transactions(hcd, tr_type);
1055 * Halts a host channel. If the channel cannot be halted immediately because
1056 * the request queue is full, this function ensures that the FIFO empty
1057 * interrupt for the appropriate queue is enabled so that the halt request can
1058 * be queued when there is space in the request queue.
1060 * This function may also be called in DMA mode. In that case, the channel is
1061 * simply released since the core always halts the channel automatically in
1064 static void halt_channel(dwc_otg_hcd_t * hcd,
1066 dwc_otg_qtd_t * qtd, dwc_otg_halt_status_e halt_status)
1068 if (hcd->core_if->dma_enable) {
1069 release_channel(hcd, hc, qtd, halt_status);
1073 /* Slave mode processing... */
1074 dwc_otg_hc_halt(hcd->core_if, hc, halt_status);
1076 if (hc->halt_on_queue) {
1077 gintmsk_data_t gintmsk = {.d32 = 0 };
1078 dwc_otg_core_global_regs_t *global_regs;
1079 global_regs = hcd->core_if->core_global_regs;
1081 if (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL ||
1082 hc->ep_type == DWC_OTG_EP_TYPE_BULK) {
1084 * Make sure the Non-periodic Tx FIFO empty interrupt
1085 * is enabled so that the non-periodic schedule will
1088 gintmsk.b.nptxfempty = 1;
1090 local_fiq_disable();
1091 fiq_fsm_spin_lock(&hcd->fiq_state->lock);
1092 DWC_MODIFY_REG32(&global_regs->gintmsk, 0, gintmsk.d32);
1093 fiq_fsm_spin_unlock(&hcd->fiq_state->lock);
1096 DWC_MODIFY_REG32(&global_regs->gintmsk, 0, gintmsk.d32);
1100 * Move the QH from the periodic queued schedule to
1101 * the periodic assigned schedule. This allows the
1102 * halt to be queued when the periodic schedule is
1105 DWC_LIST_MOVE_HEAD(&hcd->periodic_sched_assigned,
1106 &hc->qh->qh_list_entry);
1109 * Make sure the Periodic Tx FIFO Empty interrupt is
1110 * enabled so that the periodic schedule will be
1113 gintmsk.b.ptxfempty = 1;
1115 local_fiq_disable();
1116 fiq_fsm_spin_lock(&hcd->fiq_state->lock);
1117 DWC_MODIFY_REG32(&global_regs->gintmsk, 0, gintmsk.d32);
1118 fiq_fsm_spin_unlock(&hcd->fiq_state->lock);
1121 DWC_MODIFY_REG32(&global_regs->gintmsk, 0, gintmsk.d32);
1128 * Performs common cleanup for non-periodic transfers after a Transfer
1129 * Complete interrupt. This function should be called after any endpoint type
1130 * specific handling is finished to release the host channel.
1132 static void complete_non_periodic_xfer(dwc_otg_hcd_t * hcd,
1134 dwc_otg_hc_regs_t * hc_regs,
1135 dwc_otg_qtd_t * qtd,
1136 dwc_otg_halt_status_e halt_status)
1140 qtd->error_count = 0;
1142 hcint.d32 = DWC_READ_REG32(&hc_regs->hcint);
1145 * Got a NYET on the last transaction of the transfer. This
1146 * means that the endpoint should be in the PING state at the
1147 * beginning of the next transfer.
1149 hc->qh->ping_state = 1;
1150 clear_hc_int(hc_regs, nyet);
1154 * Always halt and release the host channel to make it available for
1155 * more transfers. There may still be more phases for a control
1156 * transfer or more data packets for a bulk transfer at this point,
1157 * but the host channel is still halted. A channel will be reassigned
1158 * to the transfer when the non-periodic schedule is processed after
1159 * the channel is released. This allows transactions to be queued
1160 * properly via dwc_otg_hcd_queue_transactions, which also enables the
1161 * Tx FIFO Empty interrupt if necessary.
1165 * IN transfers in Slave mode require an explicit disable to
1166 * halt the channel. (In DMA mode, this call simply releases
1169 halt_channel(hcd, hc, qtd, halt_status);
1172 * The channel is automatically disabled by the core for OUT
1173 * transfers in Slave mode.
1175 release_channel(hcd, hc, qtd, halt_status);
1180 * Performs common cleanup for periodic transfers after a Transfer Complete
1181 * interrupt. This function should be called after any endpoint type specific
1182 * handling is finished to release the host channel.
1184 static void complete_periodic_xfer(dwc_otg_hcd_t * hcd,
1186 dwc_otg_hc_regs_t * hc_regs,
1187 dwc_otg_qtd_t * qtd,
1188 dwc_otg_halt_status_e halt_status)
1190 hctsiz_data_t hctsiz;
1191 qtd->error_count = 0;
1193 hctsiz.d32 = DWC_READ_REG32(&hc_regs->hctsiz);
1194 if (!hc->ep_is_in || hctsiz.b.pktcnt == 0) {
1195 /* Core halts channel in these cases. */
1196 release_channel(hcd, hc, qtd, halt_status);
1198 /* Flush any outstanding requests from the Tx queue. */
1199 halt_channel(hcd, hc, qtd, halt_status);
1203 static int32_t handle_xfercomp_isoc_split_in(dwc_otg_hcd_t * hcd,
1205 dwc_otg_hc_regs_t * hc_regs,
1206 dwc_otg_qtd_t * qtd)
1209 struct dwc_otg_hcd_iso_packet_desc *frame_desc;
1210 frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
1212 len = get_actual_xfer_length(hc, hc_regs, qtd,
1213 DWC_OTG_HC_XFER_COMPLETE, NULL);
1216 qtd->complete_split = 0;
1217 qtd->isoc_split_offset = 0;
1220 frame_desc->actual_length += len;
1222 if (hc->align_buff && len)
1223 dwc_memcpy(qtd->urb->buf + frame_desc->offset +
1224 qtd->isoc_split_offset, hc->qh->dw_align_buf, len);
1225 qtd->isoc_split_offset += len;
1227 if (frame_desc->length == frame_desc->actual_length) {
1228 frame_desc->status = 0;
1229 qtd->isoc_frame_index++;
1230 qtd->complete_split = 0;
1231 qtd->isoc_split_offset = 0;
1234 if (qtd->isoc_frame_index == qtd->urb->packet_count) {
1235 hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, 0);
1236 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_URB_COMPLETE);
1238 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NO_HALT_STATUS);
1241 return 1; /* Indicates that channel released */
1245 * Handles a host channel Transfer Complete interrupt. This handler may be
1246 * called in either DMA mode or Slave mode.
1248 static int32_t handle_hc_xfercomp_intr(dwc_otg_hcd_t * hcd,
1250 dwc_otg_hc_regs_t * hc_regs,
1251 dwc_otg_qtd_t * qtd)
1254 dwc_otg_halt_status_e halt_status = DWC_OTG_HC_XFER_COMPLETE;
1255 dwc_otg_hcd_urb_t *urb = qtd->urb;
1256 int pipe_type = dwc_otg_hcd_get_pipe_type(&urb->pipe_info);
1258 DWC_DEBUGPL(DBG_HCDI, "--Host Channel %d Interrupt: "
1259 "Transfer Complete--\n", hc->hc_num);
1261 if (hcd->core_if->dma_desc_enable) {
1262 dwc_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs, halt_status);
1263 if (pipe_type == UE_ISOCHRONOUS) {
1264 /* Do not disable the interrupt, just clear it */
1265 clear_hc_int(hc_regs, xfercomp);
1268 goto handle_xfercomp_done;
1272 * Handle xfer complete on CSPLIT.
1275 if (hc->qh->do_split) {
1276 if ((hc->ep_type == DWC_OTG_EP_TYPE_ISOC) && hc->ep_is_in
1277 && hcd->core_if->dma_enable) {
1278 if (qtd->complete_split
1279 && handle_xfercomp_isoc_split_in(hcd, hc, hc_regs,
1281 goto handle_xfercomp_done;
1283 qtd->complete_split = 0;
1287 /* Update the QTD and URB states. */
1288 switch (pipe_type) {
1290 switch (qtd->control_phase) {
1291 case DWC_OTG_CONTROL_SETUP:
1292 if (urb->length > 0) {
1293 qtd->control_phase = DWC_OTG_CONTROL_DATA;
1295 qtd->control_phase = DWC_OTG_CONTROL_STATUS;
1297 DWC_DEBUGPL(DBG_HCDV,
1298 " Control setup transaction done\n");
1299 halt_status = DWC_OTG_HC_XFER_COMPLETE;
1301 case DWC_OTG_CONTROL_DATA:{
1303 update_urb_state_xfer_comp(hc, hc_regs, urb,
1305 if (urb_xfer_done) {
1306 qtd->control_phase =
1307 DWC_OTG_CONTROL_STATUS;
1308 DWC_DEBUGPL(DBG_HCDV,
1309 " Control data transfer done\n");
1311 dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
1313 halt_status = DWC_OTG_HC_XFER_COMPLETE;
1316 case DWC_OTG_CONTROL_STATUS:
1317 DWC_DEBUGPL(DBG_HCDV, " Control transfer complete\n");
1318 if (urb->status == -DWC_E_IN_PROGRESS) {
1321 hcd->fops->complete(hcd, urb->priv, urb, urb->status);
1322 halt_status = DWC_OTG_HC_XFER_URB_COMPLETE;
1326 complete_non_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status);
1329 DWC_DEBUGPL(DBG_HCDV, " Bulk transfer complete\n");
1331 update_urb_state_xfer_comp(hc, hc_regs, urb, qtd);
1332 if (urb_xfer_done) {
1333 hcd->fops->complete(hcd, urb->priv, urb, urb->status);
1334 halt_status = DWC_OTG_HC_XFER_URB_COMPLETE;
1336 halt_status = DWC_OTG_HC_XFER_COMPLETE;
1339 dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
1340 complete_non_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status);
1343 DWC_DEBUGPL(DBG_HCDV, " Interrupt transfer complete\n");
1345 update_urb_state_xfer_comp(hc, hc_regs, urb, qtd);
1348 * Interrupt URB is done on the first transfer complete
1351 if (urb_xfer_done) {
1352 hcd->fops->complete(hcd, urb->priv, urb, urb->status);
1353 halt_status = DWC_OTG_HC_XFER_URB_COMPLETE;
1355 halt_status = DWC_OTG_HC_XFER_COMPLETE;
1358 dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
1359 complete_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status);
1361 case UE_ISOCHRONOUS:
1362 DWC_DEBUGPL(DBG_HCDV, " Isochronous transfer complete\n");
1363 if (qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_ALL) {
1365 update_isoc_urb_state(hcd, hc, hc_regs, qtd,
1366 DWC_OTG_HC_XFER_COMPLETE);
1368 complete_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status);
1372 handle_xfercomp_done:
1373 disable_hc_int(hc_regs, xfercompl);
1379 * Handles a host channel STALL interrupt. This handler may be called in
1380 * either DMA mode or Slave mode.
1382 static int32_t handle_hc_stall_intr(dwc_otg_hcd_t * hcd,
1384 dwc_otg_hc_regs_t * hc_regs,
1385 dwc_otg_qtd_t * qtd)
1387 dwc_otg_hcd_urb_t *urb = qtd->urb;
1388 int pipe_type = dwc_otg_hcd_get_pipe_type(&urb->pipe_info);
1390 DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1391 "STALL Received--\n", hc->hc_num);
1393 if (hcd->core_if->dma_desc_enable) {
1394 dwc_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs, DWC_OTG_HC_XFER_STALL);
1395 goto handle_stall_done;
1398 if (pipe_type == UE_CONTROL) {
1399 hcd->fops->complete(hcd, urb->priv, urb, -DWC_E_PIPE);
1402 if (pipe_type == UE_BULK || pipe_type == UE_INTERRUPT) {
1403 hcd->fops->complete(hcd, urb->priv, urb, -DWC_E_PIPE);
1405 * USB protocol requires resetting the data toggle for bulk
1406 * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT)
1407 * setup command is issued to the endpoint. Anticipate the
1408 * CLEAR_FEATURE command since a STALL has occurred and reset
1409 * the data toggle now.
1411 hc->qh->data_toggle = 0;
1414 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_STALL);
1417 disable_hc_int(hc_regs, stall);
1423 * Updates the state of the URB when a transfer has been stopped due to an
1424 * abnormal condition before the transfer completes. Modifies the
1425 * actual_length field of the URB to reflect the number of bytes that have
1426 * actually been transferred via the host channel.
1428 static void update_urb_state_xfer_intr(dwc_hc_t * hc,
1429 dwc_otg_hc_regs_t * hc_regs,
1430 dwc_otg_hcd_urb_t * urb,
1431 dwc_otg_qtd_t * qtd,
1432 dwc_otg_halt_status_e halt_status)
1434 uint32_t bytes_transferred = get_actual_xfer_length(hc, hc_regs, qtd,
1437 if (urb->actual_length + bytes_transferred > urb->length) {
1438 printk_once(KERN_DEBUG "dwc_otg: DEVICE:%03d : %s:%d:trimming xfer length\n",
1439 hc->dev_addr, __func__, __LINE__);
1440 bytes_transferred = urb->length - urb->actual_length;
1443 /* non DWORD-aligned buffer case handling. */
1444 if (hc->align_buff && bytes_transferred && hc->ep_is_in) {
1445 dwc_memcpy(urb->buf + urb->actual_length, hc->qh->dw_align_buf,
1449 urb->actual_length += bytes_transferred;
1453 hctsiz_data_t hctsiz;
1454 hctsiz.d32 = DWC_READ_REG32(&hc_regs->hctsiz);
1455 DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, channel %d\n",
1456 __func__, (hc->ep_is_in ? "IN" : "OUT"),
1458 DWC_DEBUGPL(DBG_HCDV, " hc->start_pkt_count %d\n",
1459 hc->start_pkt_count);
1460 DWC_DEBUGPL(DBG_HCDV, " hctsiz.pktcnt %d\n", hctsiz.b.pktcnt);
1461 DWC_DEBUGPL(DBG_HCDV, " hc->max_packet %d\n", hc->max_packet);
1462 DWC_DEBUGPL(DBG_HCDV, " bytes_transferred %d\n",
1464 DWC_DEBUGPL(DBG_HCDV, " urb->actual_length %d\n",
1465 urb->actual_length);
1466 DWC_DEBUGPL(DBG_HCDV, " urb->transfer_buffer_length %d\n",
1473 * Handles a host channel NAK interrupt. This handler may be called in either
1474 * DMA mode or Slave mode.
1476 static int32_t handle_hc_nak_intr(dwc_otg_hcd_t * hcd,
1478 dwc_otg_hc_regs_t * hc_regs,
1479 dwc_otg_qtd_t * qtd)
1481 DWC_DEBUGPL(DBG_HCDI, "--Host Channel %d Interrupt: "
1482 "NAK Received--\n", hc->hc_num);
1485 * When we get bulk NAKs then remember this so we holdoff on this qh until
1486 * the beginning of the next frame
1488 switch(dwc_otg_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1491 if (nak_holdoff && qtd->qh->do_split)
1492 hc->qh->nak_frame = dwc_otg_hcd_get_frame_number(hcd);
1496 * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and
1497 * interrupt. Re-start the SSPLIT transfer.
1500 if (hc->complete_split) {
1501 qtd->error_count = 0;
1503 qtd->complete_split = 0;
1504 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NAK);
1505 goto handle_nak_done;
1508 switch (dwc_otg_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1511 if (hcd->core_if->dma_enable && hc->ep_is_in) {
1513 * NAK interrupts are enabled on bulk/control IN
1514 * transfers in DMA mode for the sole purpose of
1515 * resetting the error count after a transaction error
1516 * occurs. The core will continue transferring data.
1517 * Disable other interrupts unmasked for the same
1520 disable_hc_int(hc_regs, datatglerr);
1521 disable_hc_int(hc_regs, ack);
1522 qtd->error_count = 0;
1523 goto handle_nak_done;
1527 * NAK interrupts normally occur during OUT transfers in DMA
1528 * or Slave mode. For IN transfers, more requests will be
1529 * queued as request queue space is available.
1531 qtd->error_count = 0;
1533 if (!hc->qh->ping_state) {
1534 update_urb_state_xfer_intr(hc, hc_regs,
1536 DWC_OTG_HC_XFER_NAK);
1537 dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
1539 if (hc->speed == DWC_OTG_EP_SPEED_HIGH)
1540 hc->qh->ping_state = 1;
1544 * Halt the channel so the transfer can be re-started from
1545 * the appropriate point or the PING protocol will
1548 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NAK);
1551 qtd->error_count = 0;
1552 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NAK);
1554 case UE_ISOCHRONOUS:
1555 /* Should never get called for isochronous transfers. */
1556 DWC_ASSERT(1, "NACK interrupt for ISOC transfer\n");
1561 disable_hc_int(hc_regs, nak);
1567 * Handles a host channel ACK interrupt. This interrupt is enabled when
1568 * performing the PING protocol in Slave mode, when errors occur during
1569 * either Slave mode or DMA mode, and during Start Split transactions.
1571 static int32_t handle_hc_ack_intr(dwc_otg_hcd_t * hcd,
1573 dwc_otg_hc_regs_t * hc_regs,
1574 dwc_otg_qtd_t * qtd)
1576 DWC_DEBUGPL(DBG_HCDI, "--Host Channel %d Interrupt: "
1577 "ACK Received--\n", hc->hc_num);
1581 * Handle ACK on SSPLIT.
1582 * ACK should not occur in CSPLIT.
1584 if (!hc->ep_is_in && hc->data_pid_start != DWC_OTG_HC_PID_SETUP) {
1585 qtd->ssplit_out_xfer_count = hc->xfer_len;
1587 if (!(hc->ep_type == DWC_OTG_EP_TYPE_ISOC && !hc->ep_is_in)) {
1588 /* Don't need complete for isochronous out transfers. */
1589 qtd->complete_split = 1;
1593 if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC && !hc->ep_is_in) {
1594 switch (hc->xact_pos) {
1595 case DWC_HCSPLIT_XACTPOS_ALL:
1597 case DWC_HCSPLIT_XACTPOS_END:
1598 qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_ALL;
1599 qtd->isoc_split_offset = 0;
1601 case DWC_HCSPLIT_XACTPOS_BEGIN:
1602 case DWC_HCSPLIT_XACTPOS_MID:
1604 * For BEGIN or MID, calculate the length for
1605 * the next microframe to determine the correct
1606 * SSPLIT token, either MID or END.
1609 struct dwc_otg_hcd_iso_packet_desc
1614 iso_descs[qtd->isoc_frame_index];
1615 qtd->isoc_split_offset += 188;
1617 if ((frame_desc->length -
1618 qtd->isoc_split_offset) <= 188) {
1619 qtd->isoc_split_pos =
1620 DWC_HCSPLIT_XACTPOS_END;
1622 qtd->isoc_split_pos =
1623 DWC_HCSPLIT_XACTPOS_MID;
1630 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_ACK);
1634 * An unmasked ACK on a non-split DMA transaction is
1635 * for the sole purpose of resetting error counts. Disable other
1636 * interrupts unmasked for the same reason.
1638 if(hcd->core_if->dma_enable) {
1639 disable_hc_int(hc_regs, datatglerr);
1640 disable_hc_int(hc_regs, nak);
1642 qtd->error_count = 0;
1644 if (hc->qh->ping_state) {
1645 hc->qh->ping_state = 0;
1647 * Halt the channel so the transfer can be re-started
1648 * from the appropriate point. This only happens in
1649 * Slave mode. In DMA mode, the ping_state is cleared
1650 * when the transfer is started because the core
1651 * automatically executes the PING, then the transfer.
1653 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_ACK);
1658 * If the ACK occurred when _not_ in the PING state, let the channel
1659 * continue transferring data after clearing the error count.
1662 disable_hc_int(hc_regs, ack);
1668 * Handles a host channel NYET interrupt. This interrupt should only occur on
1669 * Bulk and Control OUT endpoints and for complete split transactions. If a
1670 * NYET occurs at the same time as a Transfer Complete interrupt, it is
1671 * handled in the xfercomp interrupt handler, not here. This handler may be
1672 * called in either DMA mode or Slave mode.
1674 static int32_t handle_hc_nyet_intr(dwc_otg_hcd_t * hcd,
1676 dwc_otg_hc_regs_t * hc_regs,
1677 dwc_otg_qtd_t * qtd)
1679 DWC_DEBUGPL(DBG_HCDI, "--Host Channel %d Interrupt: "
1680 "NYET Received--\n", hc->hc_num);
1684 * re-do the CSPLIT immediately on non-periodic
1686 if (hc->do_split && hc->complete_split) {
1687 if (hc->ep_is_in && (hc->ep_type == DWC_OTG_EP_TYPE_ISOC)
1688 && hcd->core_if->dma_enable) {
1689 qtd->complete_split = 0;
1690 qtd->isoc_split_offset = 0;
1691 if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
1692 hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, 0);
1693 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_URB_COMPLETE);
1696 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NO_HALT_STATUS);
1697 goto handle_nyet_done;
1700 if (hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
1701 hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
1702 int frnum = dwc_otg_hcd_get_frame_number(hcd);
1704 // With the FIQ running we only ever see the failed NYET
1705 if (dwc_full_frame_num(frnum) !=
1706 dwc_full_frame_num(hc->qh->sched_frame) ||
1709 * No longer in the same full speed frame.
1710 * Treat this as a transaction error.
1713 /** @todo Fix system performance so this can
1714 * be treated as an error. Right now complete
1715 * splits cannot be scheduled precisely enough
1716 * due to other system activity, so this error
1717 * occurs regularly in Slave mode.
1721 qtd->complete_split = 0;
1722 halt_channel(hcd, hc, qtd,
1723 DWC_OTG_HC_XFER_XACT_ERR);
1724 /** @todo add support for isoc release */
1725 goto handle_nyet_done;
1729 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NYET);
1730 goto handle_nyet_done;
1733 hc->qh->ping_state = 1;
1734 qtd->error_count = 0;
1736 update_urb_state_xfer_intr(hc, hc_regs, qtd->urb, qtd,
1737 DWC_OTG_HC_XFER_NYET);
1738 dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
1741 * Halt the channel and re-start the transfer so the PING
1742 * protocol will start.
1744 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NYET);
1747 disable_hc_int(hc_regs, nyet);
1752 * Handles a host channel babble interrupt. This handler may be called in
1753 * either DMA mode or Slave mode.
1755 static int32_t handle_hc_babble_intr(dwc_otg_hcd_t * hcd,
1757 dwc_otg_hc_regs_t * hc_regs,
1758 dwc_otg_qtd_t * qtd)
1760 DWC_DEBUGPL(DBG_HCDI, "--Host Channel %d Interrupt: "
1761 "Babble Error--\n", hc->hc_num);
1763 if (hcd->core_if->dma_desc_enable) {
1764 dwc_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs,
1765 DWC_OTG_HC_XFER_BABBLE_ERR);
1766 goto handle_babble_done;
1769 if (hc->ep_type != DWC_OTG_EP_TYPE_ISOC) {
1770 hcd->fops->complete(hcd, qtd->urb->priv,
1771 qtd->urb, -DWC_E_OVERFLOW);
1772 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_BABBLE_ERR);
1774 dwc_otg_halt_status_e halt_status;
1775 halt_status = update_isoc_urb_state(hcd, hc, hc_regs, qtd,
1776 DWC_OTG_HC_XFER_BABBLE_ERR);
1777 halt_channel(hcd, hc, qtd, halt_status);
1781 disable_hc_int(hc_regs, bblerr);
1786 * Handles a host channel AHB error interrupt. This handler is only called in
1789 static int32_t handle_hc_ahberr_intr(dwc_otg_hcd_t * hcd,
1791 dwc_otg_hc_regs_t * hc_regs,
1792 dwc_otg_qtd_t * qtd)
1794 hcchar_data_t hcchar;
1795 hcsplt_data_t hcsplt;
1796 hctsiz_data_t hctsiz;
1798 char *pipetype, *speed;
1800 dwc_otg_hcd_urb_t *urb = qtd->urb;
1802 DWC_DEBUGPL(DBG_HCDI, "--Host Channel %d Interrupt: "
1803 "AHB Error--\n", hc->hc_num);
1805 hcchar.d32 = DWC_READ_REG32(&hc_regs->hcchar);
1806 hcsplt.d32 = DWC_READ_REG32(&hc_regs->hcsplt);
1807 hctsiz.d32 = DWC_READ_REG32(&hc_regs->hctsiz);
1808 hcdma = DWC_READ_REG32(&hc_regs->hcdma);
1810 DWC_ERROR("AHB ERROR, Channel %d\n", hc->hc_num);
1811 DWC_ERROR(" hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.d32, hcsplt.d32);
1812 DWC_ERROR(" hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d32, hcdma);
1813 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Enqueue\n");
1814 DWC_ERROR(" Device address: %d\n",
1815 dwc_otg_hcd_get_dev_addr(&urb->pipe_info));
1816 DWC_ERROR(" Endpoint: %d, %s\n",
1817 dwc_otg_hcd_get_ep_num(&urb->pipe_info),
1818 (dwc_otg_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT"));
1820 switch (dwc_otg_hcd_get_pipe_type(&urb->pipe_info)) {
1822 pipetype = "CONTROL";
1828 pipetype = "INTERRUPT";
1830 case UE_ISOCHRONOUS:
1831 pipetype = "ISOCHRONOUS";
1834 pipetype = "UNKNOWN";
1838 DWC_ERROR(" Endpoint type: %s\n", pipetype);
1840 switch (hc->speed) {
1841 case DWC_OTG_EP_SPEED_HIGH:
1844 case DWC_OTG_EP_SPEED_FULL:
1847 case DWC_OTG_EP_SPEED_LOW:
1855 DWC_ERROR(" Speed: %s\n", speed);
1857 DWC_ERROR(" Max packet size: %d\n",
1858 dwc_otg_hcd_get_mps(&urb->pipe_info));
1859 DWC_ERROR(" Data buffer length: %d\n", urb->length);
1860 DWC_ERROR(" Transfer buffer: %p, Transfer DMA: %p\n",
1861 urb->buf, (void *)urb->dma);
1862 DWC_ERROR(" Setup buffer: %p, Setup DMA: %p\n",
1863 urb->setup_packet, (void *)urb->setup_dma);
1864 DWC_ERROR(" Interval: %d\n", urb->interval);
1866 /* Core haltes the channel for Descriptor DMA mode */
1867 if (hcd->core_if->dma_desc_enable) {
1868 dwc_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs,
1869 DWC_OTG_HC_XFER_AHB_ERR);
1870 goto handle_ahberr_done;
1873 hcd->fops->complete(hcd, urb->priv, urb, -DWC_E_IO);
1876 * Force a channel halt. Don't call halt_channel because that won't
1877 * write to the HCCHARn register in DMA mode to force the halt.
1879 dwc_otg_hc_halt(hcd->core_if, hc, DWC_OTG_HC_XFER_AHB_ERR);
1881 disable_hc_int(hc_regs, ahberr);
1886 * Handles a host channel transaction error interrupt. This handler may be
1887 * called in either DMA mode or Slave mode.
1889 static int32_t handle_hc_xacterr_intr(dwc_otg_hcd_t * hcd,
1891 dwc_otg_hc_regs_t * hc_regs,
1892 dwc_otg_qtd_t * qtd)
1894 DWC_DEBUGPL(DBG_HCDI, "--Host Channel %d Interrupt: "
1895 "Transaction Error--\n", hc->hc_num);
1897 if (hcd->core_if->dma_desc_enable) {
1898 dwc_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs,
1899 DWC_OTG_HC_XFER_XACT_ERR);
1900 goto handle_xacterr_done;
1903 switch (dwc_otg_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1907 if (!hc->qh->ping_state) {
1909 update_urb_state_xfer_intr(hc, hc_regs,
1911 DWC_OTG_HC_XFER_XACT_ERR);
1912 dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
1913 if (!hc->ep_is_in && hc->speed == DWC_OTG_EP_SPEED_HIGH) {
1914 hc->qh->ping_state = 1;
1919 * Halt the channel so the transfer can be re-started from
1920 * the appropriate point or the PING protocol will start.
1922 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR);
1926 if (hc->do_split && hc->complete_split) {
1927 qtd->complete_split = 0;
1929 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR);
1931 case UE_ISOCHRONOUS:
1933 dwc_otg_halt_status_e halt_status;
1935 update_isoc_urb_state(hcd, hc, hc_regs, qtd,
1936 DWC_OTG_HC_XFER_XACT_ERR);
1938 halt_channel(hcd, hc, qtd, halt_status);
1942 handle_xacterr_done:
1943 disable_hc_int(hc_regs, xacterr);
1949 * Handles a host channel frame overrun interrupt. This handler may be called
1950 * in either DMA mode or Slave mode.
1952 static int32_t handle_hc_frmovrun_intr(dwc_otg_hcd_t * hcd,
1954 dwc_otg_hc_regs_t * hc_regs,
1955 dwc_otg_qtd_t * qtd)
1957 DWC_DEBUGPL(DBG_HCDI, "--Host Channel %d Interrupt: "
1958 "Frame Overrun--\n", hc->hc_num);
1960 switch (dwc_otg_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1965 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_FRAME_OVERRUN);
1967 case UE_ISOCHRONOUS:
1969 dwc_otg_halt_status_e halt_status;
1971 update_isoc_urb_state(hcd, hc, hc_regs, qtd,
1972 DWC_OTG_HC_XFER_FRAME_OVERRUN);
1974 halt_channel(hcd, hc, qtd, halt_status);
1979 disable_hc_int(hc_regs, frmovrun);
1985 * Handles a host channel data toggle error interrupt. This handler may be
1986 * called in either DMA mode or Slave mode.
1988 static int32_t handle_hc_datatglerr_intr(dwc_otg_hcd_t * hcd,
1990 dwc_otg_hc_regs_t * hc_regs,
1991 dwc_otg_qtd_t * qtd)
1993 DWC_DEBUGPL(DBG_HCDI, "--Host Channel %d Interrupt: "
1994 "Data Toggle Error on %s transfer--\n",
1995 hc->hc_num, (hc->ep_is_in ? "IN" : "OUT"));
1997 /* Data toggles on split transactions cause the hc to halt.
1998 * restart transfer */
1999 if(hc->qh->do_split)
2002 dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
2003 update_urb_state_xfer_intr(hc, hc_regs,
2004 qtd->urb, qtd, DWC_OTG_HC_XFER_XACT_ERR);
2005 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR);
2006 } else if (hc->ep_is_in) {
2007 /* An unmasked data toggle error on a non-split DMA transaction is
2008 * for the sole purpose of resetting error counts. Disable other
2009 * interrupts unmasked for the same reason.
2011 if(hcd->core_if->dma_enable) {
2012 disable_hc_int(hc_regs, ack);
2013 disable_hc_int(hc_regs, nak);
2015 qtd->error_count = 0;
2018 disable_hc_int(hc_regs, datatglerr);
2025 * This function is for debug only. It checks that a valid halt status is set
2026 * and that HCCHARn.chdis is clear. If there's a problem, corrective action is
2027 * taken and a warning is issued.
2028 * @return 1 if halt status is ok, 0 otherwise.
2030 static inline int halt_status_ok(dwc_otg_hcd_t * hcd,
2032 dwc_otg_hc_regs_t * hc_regs,
2033 dwc_otg_qtd_t * qtd)
2035 hcchar_data_t hcchar;
2036 hctsiz_data_t hctsiz;
2038 hcintmsk_data_t hcintmsk;
2039 hcsplt_data_t hcsplt;
2041 if (hc->halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS) {
2043 * This code is here only as a check. This condition should
2044 * never happen. Ignore the halt if it does occur.
2046 hcchar.d32 = DWC_READ_REG32(&hc_regs->hcchar);
2047 hctsiz.d32 = DWC_READ_REG32(&hc_regs->hctsiz);
2048 hcint.d32 = DWC_READ_REG32(&hc_regs->hcint);
2049 hcintmsk.d32 = DWC_READ_REG32(&hc_regs->hcintmsk);
2050 hcsplt.d32 = DWC_READ_REG32(&hc_regs->hcsplt);
2052 ("%s: hc->halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS, "
2053 "channel %d, hcchar 0x%08x, hctsiz 0x%08x, "
2054 "hcint 0x%08x, hcintmsk 0x%08x, "
2055 "hcsplt 0x%08x, qtd->complete_split %d\n", __func__,
2056 hc->hc_num, hcchar.d32, hctsiz.d32, hcint.d32,
2057 hcintmsk.d32, hcsplt.d32, qtd->complete_split);
2059 DWC_WARN("%s: no halt status, channel %d, ignoring interrupt\n",
2060 __func__, hc->hc_num);
2062 clear_hc_int(hc_regs, chhltd);
2067 * This code is here only as a check. hcchar.chdis should
2068 * never be set when the halt interrupt occurs. Halt the
2069 * channel again if it does occur.
2071 hcchar.d32 = DWC_READ_REG32(&hc_regs->hcchar);
2072 if (hcchar.b.chdis) {
2073 DWC_WARN("%s: hcchar.chdis set unexpectedly, "
2074 "hcchar 0x%08x, trying to halt again\n",
2075 __func__, hcchar.d32);
2076 clear_hc_int(hc_regs, chhltd);
2077 hc->halt_pending = 0;
2078 halt_channel(hcd, hc, qtd, hc->halt_status);
2087 * Handles a host Channel Halted interrupt in DMA mode. This handler
2088 * determines the reason the channel halted and proceeds accordingly.
2090 static void handle_hc_chhltd_intr_dma(dwc_otg_hcd_t * hcd,
2092 dwc_otg_hc_regs_t * hc_regs,
2093 dwc_otg_qtd_t * qtd)
2095 int out_nak_enh = 0;
2097 hcintmsk_data_t hcintmsk;
2098 /* For core with OUT NAK enhancement, the flow for high-
2099 * speed CONTROL/BULK OUT is handled a little differently.
2101 if (hcd->core_if->snpsid >= OTG_CORE_REV_2_71a) {
2102 if (hc->speed == DWC_OTG_EP_SPEED_HIGH && !hc->ep_is_in &&
2103 (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL ||
2104 hc->ep_type == DWC_OTG_EP_TYPE_BULK)) {
2109 if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE ||
2110 (hc->halt_status == DWC_OTG_HC_XFER_AHB_ERR
2111 && !hcd->core_if->dma_desc_enable)) {
2113 * Just release the channel. A dequeue can happen on a
2114 * transfer timeout. In the case of an AHB Error, the channel
2115 * was forced to halt because there's no way to gracefully
2118 if (hcd->core_if->dma_desc_enable)
2119 dwc_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs,
2122 release_channel(hcd, hc, qtd, hc->halt_status);
2126 /* Read the HCINTn register to determine the cause for the halt. */
2128 hcint.d32 = DWC_READ_REG32(&hc_regs->hcint);
2129 hcintmsk.d32 = DWC_READ_REG32(&hc_regs->hcintmsk);
2131 if (hcint.b.xfercomp) {
2132 /** @todo This is here because of a possible hardware bug. Spec
2133 * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT
2134 * interrupt w/ACK bit set should occur, but I only see the
2135 * XFERCOMP bit, even with it masked out. This is a workaround
2136 * for that behavior. Should fix this when hardware is fixed.
2138 if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC && !hc->ep_is_in) {
2139 handle_hc_ack_intr(hcd, hc, hc_regs, qtd);
2141 handle_hc_xfercomp_intr(hcd, hc, hc_regs, qtd);
2142 } else if (hcint.b.stall) {
2143 handle_hc_stall_intr(hcd, hc, hc_regs, qtd);
2144 } else if (hcint.b.xacterr && !hcd->core_if->dma_desc_enable) {
2146 if (hcint.b.nyet || hcint.b.nak || hcint.b.ack) {
2147 DWC_DEBUGPL(DBG_HCD, "XactErr with NYET/NAK/ACK\n");
2148 qtd->error_count = 0;
2150 DWC_DEBUGPL(DBG_HCD, "XactErr without NYET/NAK/ACK\n");
2155 * Must handle xacterr before nak or ack. Could get a xacterr
2156 * at the same time as either of these on a BULK/CONTROL OUT
2157 * that started with a PING. The xacterr takes precedence.
2159 handle_hc_xacterr_intr(hcd, hc, hc_regs, qtd);
2160 } else if (hcint.b.xcs_xact && hcd->core_if->dma_desc_enable) {
2161 handle_hc_xacterr_intr(hcd, hc, hc_regs, qtd);
2162 } else if (hcint.b.ahberr && hcd->core_if->dma_desc_enable) {
2163 handle_hc_ahberr_intr(hcd, hc, hc_regs, qtd);
2164 } else if (hcint.b.bblerr) {
2165 handle_hc_babble_intr(hcd, hc, hc_regs, qtd);
2166 } else if (hcint.b.frmovrun) {
2167 handle_hc_frmovrun_intr(hcd, hc, hc_regs, qtd);
2168 } else if (hcint.b.datatglerr) {
2169 handle_hc_datatglerr_intr(hcd, hc, hc_regs, qtd);
2170 } else if (!out_nak_enh) {
2173 * Must handle nyet before nak or ack. Could get a nyet at the
2174 * same time as either of those on a BULK/CONTROL OUT that
2175 * started with a PING. The nyet takes precedence.
2177 handle_hc_nyet_intr(hcd, hc, hc_regs, qtd);
2178 } else if (hcint.b.nak && !hcintmsk.b.nak) {
2180 * If nak is not masked, it's because a non-split IN transfer
2181 * is in an error state. In that case, the nak is handled by
2182 * the nak interrupt handler, not here. Handle nak here for
2183 * BULK/CONTROL OUT transfers, which halt on a NAK to allow
2184 * rewinding the buffer pointer.
2186 handle_hc_nak_intr(hcd, hc, hc_regs, qtd);
2187 } else if (hcint.b.ack && !hcintmsk.b.ack) {
2189 * If ack is not masked, it's because a non-split IN transfer
2190 * is in an error state. In that case, the ack is handled by
2191 * the ack interrupt handler, not here. Handle ack here for
2192 * split transfers. Start splits halt on ACK.
2194 handle_hc_ack_intr(hcd, hc, hc_regs, qtd);
2196 if (hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
2197 hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
2199 * A periodic transfer halted with no other channel
2200 * interrupts set. Assume it was halted by the core
2201 * because it could not be completed in its scheduled
2206 ("%s: Halt channel %d (assume incomplete periodic transfer)\n",
2207 __func__, hc->hc_num);
2209 halt_channel(hcd, hc, qtd,
2210 DWC_OTG_HC_XFER_PERIODIC_INCOMPLETE);
2213 ("%s: Channel %d, DMA Mode -- ChHltd set, but reason "
2214 "for halting is unknown, hcint 0x%08x, intsts 0x%08x\n",
2215 __func__, hc->hc_num, hcint.d32,
2216 DWC_READ_REG32(&hcd->
2217 core_if->core_global_regs->
2219 /* Failthrough: use 3-strikes rule */
2221 dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
2222 update_urb_state_xfer_intr(hc, hc_regs,
2223 qtd->urb, qtd, DWC_OTG_HC_XFER_XACT_ERR);
2224 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR);
2229 DWC_PRINTF("NYET/NAK/ACK/other in non-error case, 0x%08x\n",
2231 /* Failthrough: use 3-strikes rule */
2233 dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
2234 update_urb_state_xfer_intr(hc, hc_regs,
2235 qtd->urb, qtd, DWC_OTG_HC_XFER_XACT_ERR);
2236 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR);
2241 * Handles a host channel Channel Halted interrupt.
2243 * In slave mode, this handler is called only when the driver specifically
2244 * requests a halt. This occurs during handling other host channel interrupts
2245 * (e.g. nak, xacterr, stall, nyet, etc.).
2247 * In DMA mode, this is the interrupt that occurs when the core has finished
2248 * processing a transfer on a channel. Other host channel interrupts (except
2249 * ahberr) are disabled in DMA mode.
2251 static int32_t handle_hc_chhltd_intr(dwc_otg_hcd_t * hcd,
2253 dwc_otg_hc_regs_t * hc_regs,
2254 dwc_otg_qtd_t * qtd)
2256 DWC_DEBUGPL(DBG_HCDI, "--Host Channel %d Interrupt: "
2257 "Channel Halted--\n", hc->hc_num);
2259 if (hcd->core_if->dma_enable) {
2260 handle_hc_chhltd_intr_dma(hcd, hc, hc_regs, qtd);
2263 if (!halt_status_ok(hcd, hc, hc_regs, qtd)) {
2267 release_channel(hcd, hc, qtd, hc->halt_status);
2275 * dwc_otg_fiq_unmangle_isoc() - Update the iso_frame_desc structure on
2276 * FIQ transfer completion
2277 * @hcd: Pointer to dwc_otg_hcd struct
2278 * @num: Host channel number
2280 * 1. Un-mangle the status as recorded in each iso_frame_desc status
2281 * 2. Copy it from the dwc_otg_urb into the real URB
2283 void dwc_otg_fiq_unmangle_isoc(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, dwc_otg_qtd_t *qtd, uint32_t num)
2285 struct dwc_otg_hcd_urb *dwc_urb = qtd->urb;
2286 int nr_frames = dwc_urb->packet_count;
2288 hcint_data_t frame_hcint;
2290 for (i = 0; i < nr_frames; i++) {
2291 frame_hcint.d32 = dwc_urb->iso_descs[i].status;
2292 if (frame_hcint.b.xfercomp) {
2293 dwc_urb->iso_descs[i].status = 0;
2294 dwc_urb->actual_length += dwc_urb->iso_descs[i].actual_length;
2295 } else if (frame_hcint.b.frmovrun) {
2297 dwc_urb->iso_descs[i].status = -DWC_E_NO_STREAM_RES;
2299 dwc_urb->iso_descs[i].status = -DWC_E_COMMUNICATION;
2300 dwc_urb->error_count++;
2301 dwc_urb->iso_descs[i].actual_length = 0;
2302 } else if (frame_hcint.b.xacterr) {
2303 dwc_urb->iso_descs[i].status = -DWC_E_PROTOCOL;
2304 dwc_urb->error_count++;
2305 dwc_urb->iso_descs[i].actual_length = 0;
2306 } else if (frame_hcint.b.bblerr) {
2307 dwc_urb->iso_descs[i].status = -DWC_E_OVERFLOW;
2308 dwc_urb->error_count++;
2309 dwc_urb->iso_descs[i].actual_length = 0;
2311 /* Something went wrong */
2312 dwc_urb->iso_descs[i].status = -1;
2313 dwc_urb->iso_descs[i].actual_length = 0;
2314 dwc_urb->error_count++;
2317 qh->sched_frame = dwc_frame_num_inc(qh->sched_frame, qh->interval * (nr_frames - 1));
2319 //printk_ratelimited(KERN_INFO "%s: HS isochronous of %d/%d frames with %d errors complete\n",
2320 // __FUNCTION__, i, dwc_urb->packet_count, dwc_urb->error_count);
2324 * dwc_otg_fiq_unsetup_per_dma() - Remove data from bounce buffers for split transactions
2325 * @hcd: Pointer to dwc_otg_hcd struct
2326 * @num: Host channel number
2328 * Copies data from the FIQ bounce buffers into the URB's transfer buffer. Does not modify URB state.
2329 * Returns total length of data or -1 if the buffers were not used.
2332 int dwc_otg_fiq_unsetup_per_dma(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, dwc_otg_qtd_t *qtd, uint32_t num)
2334 dwc_hc_t *hc = qh->channel;
2335 struct fiq_dma_blob *blob = hcd->fiq_dmab;
2336 struct fiq_channel_state *st = &hcd->fiq_state->channel[num];
2337 uint8_t *ptr = NULL;
2338 int index = 0, len = 0;
2341 /* Copy data out of the DMA bounce buffers to the URB's buffer.
2342 * The align_buf is ignored as this is ignored on FSM enqueue. */
2343 ptr = qtd->urb->buf;
2344 if (qh->ep_type == UE_ISOCHRONOUS) {
2345 /* Isoc IN transactions - grab the offset of the iso_frame_desc into the URB transfer buffer */
2346 index = qtd->isoc_frame_index;
2347 ptr += qtd->urb->iso_descs[index].offset;
2349 /* Need to increment by actual_length for interrupt IN */
2350 ptr += qtd->urb->actual_length;
2353 for (i = 0; i < st->dma_info.index; i++) {
2354 len += st->dma_info.slot_len[i];
2355 dwc_memcpy(ptr, &blob->channel[num].index[i].buf[0], st->dma_info.slot_len[i]);
2356 ptr += st->dma_info.slot_len[i];
2360 /* OUT endpoints - nothing to do. */
2366 * dwc_otg_hcd_handle_hc_fsm() - handle an unmasked channel interrupt
2367 * from a channel handled in the FIQ
2368 * @hcd: Pointer to dwc_otg_hcd struct
2369 * @num: Host channel number
2371 * If a host channel interrupt was received by the IRQ and this was a channel
2372 * used by the FIQ, the execution flow for transfer completion is substantially
2373 * different from the normal (messy) path. This function and its friends handles
2374 * channel cleanup and transaction completion from a FIQ transaction.
2376 void dwc_otg_hcd_handle_hc_fsm(dwc_otg_hcd_t *hcd, uint32_t num)
2378 struct fiq_channel_state *st = &hcd->fiq_state->channel[num];
2379 dwc_hc_t *hc = hcd->hc_ptr_array[num];
2381 dwc_otg_hc_regs_t *hc_regs = hcd->core_if->host_if->hc_regs[num];
2382 hcint_data_t hcint = hcd->fiq_state->channel[num].hcint_copy;
2383 hctsiz_data_t hctsiz = hcd->fiq_state->channel[num].hctsiz_copy;
2384 int hostchannels = 0;
2385 fiq_print(FIQDBG_INT, hcd->fiq_state, "OUT %01d %01d ", num , st->fsm);
2387 hostchannels = hcd->available_host_channels;
2388 if (hc->halt_pending) {
2389 /* Dequeue: The FIQ was allowed to complete the transfer but state has been cleared. */
2390 if (hc->qh && st->fsm == FIQ_NP_SPLIT_DONE &&
2391 hcint.b.xfercomp && hc->qh->ep_type == UE_BULK) {
2392 if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) {
2393 hc->qh->data_toggle = DWC_OTG_HC_PID_DATA1;
2395 hc->qh->data_toggle = DWC_OTG_HC_PID_DATA0;
2398 release_channel(hcd, hc, NULL, hc->halt_status);
2402 qtd = DWC_CIRCLEQ_FIRST(&hc->qh->qtd_list);
2407 case FIQ_DEQUEUE_ISSUED:
2408 /* Handled above, but keep for posterity */
2409 release_channel(hcd, hc, NULL, hc->halt_status);
2412 case FIQ_NP_SPLIT_DONE:
2413 /* Nonperiodic transaction complete. */
2414 if (!hc->ep_is_in) {
2415 qtd->ssplit_out_xfer_count = hc->xfer_len;
2417 if (hcint.b.xfercomp) {
2418 handle_hc_xfercomp_intr(hcd, hc, hc_regs, qtd);
2419 } else if (hcint.b.nak) {
2420 handle_hc_nak_intr(hcd, hc, hc_regs, qtd);
2422 DWC_WARN("Unexpected IRQ state on FSM transaction:"
2423 "dev_addr=%d ep=%d fsm=%d, hcint=0x%08x\n",
2424 hc->dev_addr, hc->ep_num, st->fsm, hcint.d32);
2425 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NO_HALT_STATUS);
2429 case FIQ_NP_SPLIT_HS_ABORTED:
2430 /* A HS abort is a 3-strikes on the HS bus at any point in the transaction.
2431 * Normally a CLEAR_TT_BUFFER hub command would be required: we can't do that
2432 * because there's no guarantee which order a non-periodic split happened in.
2433 * We could end up clearing a perfectly good transaction out of the buffer.
2435 if (hcint.b.xacterr) {
2436 qtd->error_count += st->nr_errors;
2437 handle_hc_xacterr_intr(hcd, hc, hc_regs, qtd);
2438 } else if (hcint.b.ahberr) {
2439 handle_hc_ahberr_intr(hcd, hc, hc_regs, qtd);
2441 DWC_WARN("Unexpected IRQ state on FSM transaction:"
2442 "dev_addr=%d ep=%d fsm=%d, hcint=0x%08x\n",
2443 hc->dev_addr, hc->ep_num, st->fsm, hcint.d32);
2444 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NO_HALT_STATUS);
2448 case FIQ_NP_SPLIT_LS_ABORTED:
2449 /* A few cases can cause this - either an unknown state on a SSPLIT or
2450 * STALL/data toggle error response on a CSPLIT */
2451 if (hcint.b.stall) {
2452 handle_hc_stall_intr(hcd, hc, hc_regs, qtd);
2453 } else if (hcint.b.datatglerr) {
2454 handle_hc_datatglerr_intr(hcd, hc, hc_regs, qtd);
2455 } else if (hcint.b.bblerr) {
2456 handle_hc_babble_intr(hcd, hc, hc_regs, qtd);
2457 } else if (hcint.b.ahberr) {
2458 handle_hc_ahberr_intr(hcd, hc, hc_regs, qtd);
2460 DWC_WARN("Unexpected IRQ state on FSM transaction:"
2461 "dev_addr=%d ep=%d fsm=%d, hcint=0x%08x\n",
2462 hc->dev_addr, hc->ep_num, st->fsm, hcint.d32);
2463 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NO_HALT_STATUS);
2467 case FIQ_PER_SPLIT_DONE:
2468 /* Isoc IN or Interrupt IN/OUT */
2470 /* Flow control here is different from the normal execution by the driver.
2471 * We need to completely ignore most of the driver's method of handling
2472 * split transactions and do it ourselves.
2474 if (hc->ep_type == UE_INTERRUPT) {
2476 handle_hc_nak_intr(hcd, hc, hc_regs, qtd);
2477 } else if (hc->ep_is_in) {
2479 len = dwc_otg_fiq_unsetup_per_dma(hcd, hc->qh, qtd, num);
2480 //printk(KERN_NOTICE "FIQ Transaction: hc=%d len=%d urb_len = %d\n", num, len, qtd->urb->length);
2481 qtd->urb->actual_length += len;
2482 if (qtd->urb->actual_length >= qtd->urb->length) {
2483 qtd->urb->status = 0;
2484 hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, qtd->urb->status);
2485 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_URB_COMPLETE);
2487 /* Interrupt transfer not complete yet - is it a short read? */
2488 if (len < hc->max_packet) {
2489 /* Interrupt transaction complete */
2490 qtd->urb->status = 0;
2491 hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, qtd->urb->status);
2492 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_URB_COMPLETE);
2494 /* Further transactions required */
2495 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_COMPLETE);
2499 /* Interrupt OUT complete. */
2500 dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
2501 qtd->urb->actual_length += hc->xfer_len;
2502 if (qtd->urb->actual_length >= qtd->urb->length) {
2503 qtd->urb->status = 0;
2504 hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, qtd->urb->status);
2505 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_URB_COMPLETE);
2507 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_COMPLETE);
2511 /* ISOC IN complete. */
2512 struct dwc_otg_hcd_iso_packet_desc *frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
2514 /* Record errors, update qtd. */
2515 if (st->nr_errors) {
2516 frame_desc->actual_length = 0;
2517 frame_desc->status = -DWC_E_PROTOCOL;
2519 frame_desc->status = 0;
2521 len = dwc_otg_fiq_unsetup_per_dma(hcd, hc->qh, qtd, num);
2522 frame_desc->actual_length = len;
2524 qtd->isoc_frame_index++;
2525 if (qtd->isoc_frame_index == qtd->urb->packet_count) {
2526 hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, 0);
2527 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_URB_COMPLETE);
2529 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_COMPLETE);
2534 case FIQ_PER_ISO_OUT_DONE: {
2535 struct dwc_otg_hcd_iso_packet_desc *frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
2536 /* Record errors, update qtd. */
2537 if (st->nr_errors) {
2538 frame_desc->actual_length = 0;
2539 frame_desc->status = -DWC_E_PROTOCOL;
2541 frame_desc->status = 0;
2542 frame_desc->actual_length = frame_desc->length;
2544 qtd->isoc_frame_index++;
2545 qtd->isoc_split_offset = 0;
2546 if (qtd->isoc_frame_index == qtd->urb->packet_count) {
2547 hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, 0);
2548 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_URB_COMPLETE);
2550 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_COMPLETE);
2555 case FIQ_PER_SPLIT_NYET_ABORTED:
2556 /* Doh. lost the data. */
2557 printk_ratelimited(KERN_INFO "Transfer to device %d endpoint 0x%x frame %d failed "
2558 "- FIQ reported NYET. Data may have been lost.\n",
2559 hc->dev_addr, hc->ep_num, dwc_otg_hcd_get_frame_number(hcd) >> 3);
2560 if (hc->ep_type == UE_ISOCHRONOUS) {
2561 struct dwc_otg_hcd_iso_packet_desc *frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
2562 /* Record errors, update qtd. */
2563 frame_desc->actual_length = 0;
2564 frame_desc->status = -DWC_E_PROTOCOL;
2565 qtd->isoc_frame_index++;
2566 qtd->isoc_split_offset = 0;
2567 if (qtd->isoc_frame_index == qtd->urb->packet_count) {
2568 hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, 0);
2569 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_URB_COMPLETE);
2571 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_COMPLETE);
2574 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NO_HALT_STATUS);
2578 case FIQ_HS_ISOC_DONE:
2579 /* The FIQ has performed a whole pile of isochronous transactions.
2580 * The status is recorded as the interrupt state should the transaction
2583 dwc_otg_fiq_unmangle_isoc(hcd, hc->qh, qtd, num);
2584 hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, 0);
2585 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_URB_COMPLETE);
2588 case FIQ_PER_SPLIT_LS_ABORTED:
2589 if (hcint.b.xacterr) {
2590 /* Hub has responded with an ERR packet. Device
2591 * has been unplugged or the port has been disabled.
2592 * TODO: need to issue a reset to the hub port. */
2593 qtd->error_count += 3;
2594 handle_hc_xacterr_intr(hcd, hc, hc_regs, qtd);
2595 } else if (hcint.b.stall) {
2596 handle_hc_stall_intr(hcd, hc, hc_regs, qtd);
2597 } else if (hcint.b.bblerr) {
2598 handle_hc_babble_intr(hcd, hc, hc_regs, qtd);
2600 printk_ratelimited(KERN_INFO "Transfer to device %d endpoint 0x%x failed "
2601 "- FIQ reported FSM=%d. Data may have been lost.\n",
2602 st->fsm, hc->dev_addr, hc->ep_num);
2603 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NO_HALT_STATUS);
2607 case FIQ_PER_SPLIT_HS_ABORTED:
2608 /* Either the SSPLIT phase suffered transaction errors or something
2609 * unexpected happened.
2611 qtd->error_count += 3;
2612 handle_hc_xacterr_intr(hcd, hc, hc_regs, qtd);
2613 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NO_HALT_STATUS);
2616 case FIQ_PER_SPLIT_TIMEOUT:
2617 /* Couldn't complete in the nominated frame */
2618 printk(KERN_INFO "Transfer to device %d endpoint 0x%x frame %d failed "
2619 "- FIQ timed out. Data may have been lost.\n",
2620 hc->dev_addr, hc->ep_num, dwc_otg_hcd_get_frame_number(hcd) >> 3);
2621 if (hc->ep_type == UE_ISOCHRONOUS) {
2622 struct dwc_otg_hcd_iso_packet_desc *frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
2623 /* Record errors, update qtd. */
2624 frame_desc->actual_length = 0;
2626 frame_desc->status = -DWC_E_NO_STREAM_RES;
2628 frame_desc->status = -DWC_E_COMMUNICATION;
2630 qtd->isoc_frame_index++;
2631 if (qtd->isoc_frame_index == qtd->urb->packet_count) {
2632 hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, 0);
2633 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_URB_COMPLETE);
2635 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_COMPLETE);
2638 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NO_HALT_STATUS);
2643 DWC_WARN("Unexpected state received on hc=%d fsm=%d on transfer to device %d ep 0x%x",
2644 hc->hc_num, st->fsm, hc->dev_addr, hc->ep_num);
2646 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NO_HALT_STATUS);
2651 /** Handles interrupt for a specific Host Channel */
2652 int32_t dwc_otg_hcd_handle_hc_n_intr(dwc_otg_hcd_t * dwc_otg_hcd, uint32_t num)
2656 hcintmsk_data_t hcintmsk;
2658 dwc_otg_hc_regs_t *hc_regs;
2661 DWC_DEBUGPL(DBG_HCDV, "--Host Channel Interrupt--, Channel %d\n", num);
2663 hc = dwc_otg_hcd->hc_ptr_array[num];
2664 hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[num];
2665 if(hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE) {
2666 /* A dequeue was issued for this transfer. Our QTD has gone away
2667 * but in the case of a FIQ transfer, the transfer would have run
2670 if (fiq_fsm_enable && dwc_otg_hcd->fiq_state->channel[num].fsm != FIQ_PASSTHROUGH) {
2671 dwc_otg_hcd_handle_hc_fsm(dwc_otg_hcd, num);
2673 release_channel(dwc_otg_hcd, hc, NULL, hc->halt_status);
2677 qtd = DWC_CIRCLEQ_FIRST(&hc->qh->qtd_list);
2680 * FSM mode: Check to see if this is a HC interrupt from a channel handled by the FIQ.
2681 * Execution path is fundamentally different for the channels after a FIQ has completed
2682 * a split transaction.
2684 if (fiq_fsm_enable) {
2685 switch (dwc_otg_hcd->fiq_state->channel[num].fsm) {
2686 case FIQ_PASSTHROUGH:
2688 case FIQ_PASSTHROUGH_ERRORSTATE:
2689 /* Hook into the error count */
2690 fiq_print(FIQDBG_ERR, dwc_otg_hcd->fiq_state, "HCDERR%02d", num);
2691 if (!dwc_otg_hcd->fiq_state->channel[num].nr_errors) {
2692 qtd->error_count = 0;
2693 fiq_print(FIQDBG_ERR, dwc_otg_hcd->fiq_state, "RESET ");
2697 dwc_otg_hcd_handle_hc_fsm(dwc_otg_hcd, num);
2702 hcint.d32 = DWC_READ_REG32(&hc_regs->hcint);
2703 hcintmsk.d32 = DWC_READ_REG32(&hc_regs->hcintmsk);
2704 hcint.d32 = hcint.d32 & hcintmsk.d32;
2705 if (!dwc_otg_hcd->core_if->dma_enable) {
2706 if (hcint.b.chhltd && hcint.d32 != 0x2) {
2711 if (hcint.b.xfercomp) {
2713 handle_hc_xfercomp_intr(dwc_otg_hcd, hc, hc_regs, qtd);
2715 * If NYET occurred at same time as Xfer Complete, the NYET is
2716 * handled by the Xfer Complete interrupt handler. Don't want
2717 * to call the NYET interrupt handler in this case.
2721 if (hcint.b.chhltd) {
2722 retval |= handle_hc_chhltd_intr(dwc_otg_hcd, hc, hc_regs, qtd);
2724 if (hcint.b.ahberr) {
2725 retval |= handle_hc_ahberr_intr(dwc_otg_hcd, hc, hc_regs, qtd);
2727 if (hcint.b.stall) {
2728 retval |= handle_hc_stall_intr(dwc_otg_hcd, hc, hc_regs, qtd);
2731 retval |= handle_hc_nak_intr(dwc_otg_hcd, hc, hc_regs, qtd);
2735 retval |= handle_hc_ack_intr(dwc_otg_hcd, hc, hc_regs, qtd);
2738 retval |= handle_hc_nyet_intr(dwc_otg_hcd, hc, hc_regs, qtd);
2740 if (hcint.b.xacterr) {
2741 retval |= handle_hc_xacterr_intr(dwc_otg_hcd, hc, hc_regs, qtd);
2743 if (hcint.b.bblerr) {
2744 retval |= handle_hc_babble_intr(dwc_otg_hcd, hc, hc_regs, qtd);
2746 if (hcint.b.frmovrun) {
2748 handle_hc_frmovrun_intr(dwc_otg_hcd, hc, hc_regs, qtd);
2750 if (hcint.b.datatglerr) {
2752 handle_hc_datatglerr_intr(dwc_otg_hcd, hc, hc_regs, qtd);
2757 #endif /* DWC_DEVICE_ONLY */