d3097ef3728ca02eac74907e43a0515cd14d93db
[platform/kernel/linux-rpi.git] / drivers / usb / host / dwc_otg / dwc_otg_hcd_intr.c
1 /* ==========================================================================
2  * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd_intr.c $
3  * $Revision: #89 $
4  * $Date: 2011/10/20 $
5  * $Change: 1869487 $
6  *
7  * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
8  * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
9  * otherwise expressly agreed to in writing between Synopsys and you.
10  *
11  * The Software IS NOT an item of Licensed Software or Licensed Product under
12  * any End User Software License Agreement or Agreement for Licensed Product
13  * with Synopsys or any supplement thereto. You are permitted to use and
14  * redistribute this Software in source and binary forms, with or without
15  * modification, provided that redistributions of source code must retain this
16  * notice. You may not view, use, disclose, copy or distribute this file or
17  * any information contained herein except pursuant to this license grant from
18  * Synopsys. If you do not agree with this notice, including the disclaimer
19  * below, then you are not authorized to use the Software.
20  *
21  * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
25  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31  * DAMAGE.
32  * ========================================================================== */
33 #ifndef DWC_DEVICE_ONLY
34
35 #include "dwc_otg_hcd.h"
36 #include "dwc_otg_regs.h"
37
38 #include <linux/jiffies.h>
39 #ifdef CONFIG_ARM
40 #include <asm/fiq.h>
41 #endif
42
43 extern bool microframe_schedule;
44
45 /** @file
46  * This file contains the implementation of the HCD Interrupt handlers.
47  */
48
49 int fiq_done, int_done;
50
51 #ifdef FIQ_DEBUG
52 char buffer[1000*16];
53 int wptr;
54 void notrace _fiq_print(FIQDBG_T dbg_lvl, char *fmt, ...)
55 {
56         FIQDBG_T dbg_lvl_req = FIQDBG_PORTHUB;
57         va_list args;
58         char text[17];
59         hfnum_data_t hfnum = { .d32 = FIQ_READ(dwc_regs_base + 0x408) };
60
61         if(dbg_lvl & dbg_lvl_req || dbg_lvl == FIQDBG_ERR)
62         {
63                 local_fiq_disable();
64                 snprintf(text, 9, "%4d%d:%d ", hfnum.b.frnum/8, hfnum.b.frnum%8, 8 - hfnum.b.frrem/937);
65                 va_start(args, fmt);
66                 vsnprintf(text+8, 9, fmt, args);
67                 va_end(args);
68
69                 memcpy(buffer + wptr, text, 16);
70                 wptr = (wptr + 16) % sizeof(buffer);
71                 local_fiq_enable();
72         }
73 }
74 #endif
75
76 /** This function handles interrupts for the HCD. */
77 int32_t dwc_otg_hcd_handle_intr(dwc_otg_hcd_t * dwc_otg_hcd)
78 {
79         int retval = 0;
80         static int last_time;
81         dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if;
82         gintsts_data_t gintsts;
83         gintmsk_data_t gintmsk;
84         hfnum_data_t hfnum;
85         haintmsk_data_t haintmsk;
86
87 #ifdef DEBUG
88         dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
89
90 #endif
91
92         gintsts.d32 = DWC_READ_REG32(&core_if->core_global_regs->gintsts);
93         gintmsk.d32 = DWC_READ_REG32(&core_if->core_global_regs->gintmsk);
94
95         /* Exit from ISR if core is hibernated */
96         if (core_if->hibernation_suspend == 1) {
97                 goto exit_handler_routine;
98         }
99         DWC_SPINLOCK(dwc_otg_hcd->lock);
100         /* Check if HOST Mode */
101         if (dwc_otg_is_host_mode(core_if)) {
102                 if (fiq_enable) {
103                         local_fiq_disable();
104                         fiq_fsm_spin_lock(&dwc_otg_hcd->fiq_state->lock);
105                         /* Pull in from the FIQ's disabled mask */
106                         gintmsk.d32 = gintmsk.d32 | ~(dwc_otg_hcd->fiq_state->gintmsk_saved.d32);
107                         dwc_otg_hcd->fiq_state->gintmsk_saved.d32 = ~0;
108                 }
109
110                 if (fiq_fsm_enable && ( 0x0000FFFF & ~(dwc_otg_hcd->fiq_state->haintmsk_saved.b2.chint))) {
111                         gintsts.b.hcintr = 1;
112                 }
113
114                 /* Danger will robinson: fake a SOF if necessary */
115                 if (fiq_fsm_enable && (dwc_otg_hcd->fiq_state->gintmsk_saved.b.sofintr == 1)) {
116                         gintsts.b.sofintr = 1;
117                 }
118                 gintsts.d32 &= gintmsk.d32;
119
120                 if (fiq_enable) {
121                         fiq_fsm_spin_unlock(&dwc_otg_hcd->fiq_state->lock);
122                         local_fiq_enable();
123                 }
124
125                 if (!gintsts.d32) {
126                         goto exit_handler_routine;
127                 }
128
129 #ifdef DEBUG
130                 // We should be OK doing this because the common interrupts should already have been serviced
131                 /* Don't print debug message in the interrupt handler on SOF */
132 #ifndef DEBUG_SOF
133                 if (gintsts.d32 != DWC_SOF_INTR_MASK)
134 #endif
135                         DWC_DEBUGPL(DBG_HCDI, "\n");
136 #endif
137
138 #ifdef DEBUG
139 #ifndef DEBUG_SOF
140                 if (gintsts.d32 != DWC_SOF_INTR_MASK)
141 #endif
142                         DWC_DEBUGPL(DBG_HCDI,
143                                     "DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x core_if=%p\n",
144                                     gintsts.d32, core_if);
145 #endif
146                 hfnum.d32 = DWC_READ_REG32(&dwc_otg_hcd->core_if->host_if->host_global_regs->hfnum);
147                 if (gintsts.b.sofintr) {
148                         retval |= dwc_otg_hcd_handle_sof_intr(dwc_otg_hcd);
149                 }
150
151                 if (gintsts.b.rxstsqlvl) {
152                         retval |=
153                             dwc_otg_hcd_handle_rx_status_q_level_intr
154                             (dwc_otg_hcd);
155                 }
156                 if (gintsts.b.nptxfempty) {
157                         retval |=
158                             dwc_otg_hcd_handle_np_tx_fifo_empty_intr
159                             (dwc_otg_hcd);
160                 }
161                 if (gintsts.b.i2cintr) {
162                         /** @todo Implement i2cintr handler. */
163                 }
164                 if (gintsts.b.portintr) {
165
166                         gintmsk_data_t gintmsk = { .b.portintr = 1};
167                         retval |= dwc_otg_hcd_handle_port_intr(dwc_otg_hcd);
168                         if (fiq_enable) {
169                                 local_fiq_disable();
170                                 fiq_fsm_spin_lock(&dwc_otg_hcd->fiq_state->lock);
171                                 DWC_MODIFY_REG32(&dwc_otg_hcd->core_if->core_global_regs->gintmsk, 0, gintmsk.d32);
172                                 fiq_fsm_spin_unlock(&dwc_otg_hcd->fiq_state->lock);
173                                 local_fiq_enable();
174                         } else {
175                                 DWC_MODIFY_REG32(&dwc_otg_hcd->core_if->core_global_regs->gintmsk, 0, gintmsk.d32);
176                         }
177                 }
178                 if (gintsts.b.hcintr) {
179                         retval |= dwc_otg_hcd_handle_hc_intr(dwc_otg_hcd);
180                 }
181                 if (gintsts.b.ptxfempty) {
182                         retval |=
183                             dwc_otg_hcd_handle_perio_tx_fifo_empty_intr
184                             (dwc_otg_hcd);
185                 }
186 #ifdef DEBUG
187 #ifndef DEBUG_SOF
188                 if (gintsts.d32 != DWC_SOF_INTR_MASK)
189 #endif
190                 {
191                         DWC_DEBUGPL(DBG_HCDI,
192                                     "DWC OTG HCD Finished Servicing Interrupts\n");
193                         DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gintsts=0x%08x\n",
194                                     DWC_READ_REG32(&global_regs->gintsts));
195                         DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gintmsk=0x%08x\n",
196                                     DWC_READ_REG32(&global_regs->gintmsk));
197                 }
198 #endif
199
200 #ifdef DEBUG
201 #ifndef DEBUG_SOF
202                 if (gintsts.d32 != DWC_SOF_INTR_MASK)
203 #endif
204                         DWC_DEBUGPL(DBG_HCDI, "\n");
205 #endif
206
207         }
208
209 exit_handler_routine:
210         if (fiq_enable) {
211                 gintmsk_data_t gintmsk_new;
212                 haintmsk_data_t haintmsk_new;
213                 local_fiq_disable();
214                 fiq_fsm_spin_lock(&dwc_otg_hcd->fiq_state->lock);
215                 gintmsk_new.d32 = *(volatile uint32_t *)&dwc_otg_hcd->fiq_state->gintmsk_saved.d32;
216                 if(fiq_fsm_enable)
217                         haintmsk_new.d32 = *(volatile uint32_t *)&dwc_otg_hcd->fiq_state->haintmsk_saved.d32;
218                 else
219                         haintmsk_new.d32 = 0x0000FFFF;
220
221                 /* The FIQ could have sneaked another interrupt in. If so, don't clear MPHI */
222                 if ((gintmsk_new.d32 == ~0) && (haintmsk_new.d32 == 0x0000FFFF)) {
223                         if (dwc_otg_hcd->fiq_state->mphi_regs.swirq_clr) {
224                                 DWC_WRITE_REG32(dwc_otg_hcd->fiq_state->mphi_regs.swirq_clr, 1);
225                         } else {
226                                 DWC_WRITE_REG32(dwc_otg_hcd->fiq_state->mphi_regs.intstat, (1<<16));
227                         }
228                         if (dwc_otg_hcd->fiq_state->mphi_int_count >= 50) {
229                                 fiq_print(FIQDBG_INT, dwc_otg_hcd->fiq_state, "MPHI CLR");
230                                         DWC_WRITE_REG32(dwc_otg_hcd->fiq_state->mphi_regs.ctrl, ((1<<31) + (1<<16)));
231                                         while (!(DWC_READ_REG32(dwc_otg_hcd->fiq_state->mphi_regs.ctrl) & (1 << 17)))
232                                                 ;
233                                         DWC_WRITE_REG32(dwc_otg_hcd->fiq_state->mphi_regs.ctrl, (1<<31));
234                                         dwc_otg_hcd->fiq_state->mphi_int_count = 0;
235                         }
236                         int_done++;
237                 }
238                 haintmsk.d32 = DWC_READ_REG32(&core_if->host_if->host_global_regs->haintmsk);
239                 /* Re-enable interrupts that the FIQ masked (first time round) */
240                 FIQ_WRITE(dwc_otg_hcd->fiq_state->dwc_regs_base + GINTMSK, gintmsk.d32);
241                 fiq_fsm_spin_unlock(&dwc_otg_hcd->fiq_state->lock);
242                 local_fiq_enable();
243
244                 if ((jiffies / HZ) > last_time) {
245                         //dwc_otg_qh_t *qh;
246                         //dwc_list_link_t *cur;
247                         /* Once a second output the fiq and irq numbers, useful for debug */
248                         last_time = jiffies / HZ;
249                 //       DWC_WARN("np_kick=%d AHC=%d sched_frame=%d cur_frame=%d int_done=%d fiq_done=%d",
250                 //      dwc_otg_hcd->fiq_state->kick_np_queues, dwc_otg_hcd->available_host_channels,
251                 //      dwc_otg_hcd->fiq_state->next_sched_frame, hfnum.b.frnum, int_done, dwc_otg_hcd->fiq_state->fiq_done);
252                          //printk(KERN_WARNING "Periodic queues:\n");
253                 }
254         }
255
256         DWC_SPINUNLOCK(dwc_otg_hcd->lock);
257         return retval;
258 }
259
260 #ifdef DWC_TRACK_MISSED_SOFS
261
262 #warning Compiling code to track missed SOFs
263 #define FRAME_NUM_ARRAY_SIZE 1000
264 /**
265  * This function is for debug only.
266  */
267 static inline void track_missed_sofs(uint16_t curr_frame_number)
268 {
269         static uint16_t frame_num_array[FRAME_NUM_ARRAY_SIZE];
270         static uint16_t last_frame_num_array[FRAME_NUM_ARRAY_SIZE];
271         static int frame_num_idx = 0;
272         static uint16_t last_frame_num = DWC_HFNUM_MAX_FRNUM;
273         static int dumped_frame_num_array = 0;
274
275         if (frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
276                 if (((last_frame_num + 1) & DWC_HFNUM_MAX_FRNUM) !=
277                     curr_frame_number) {
278                         frame_num_array[frame_num_idx] = curr_frame_number;
279                         last_frame_num_array[frame_num_idx++] = last_frame_num;
280                 }
281         } else if (!dumped_frame_num_array) {
282                 int i;
283                 DWC_PRINTF("Frame     Last Frame\n");
284                 DWC_PRINTF("-----     ----------\n");
285                 for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) {
286                         DWC_PRINTF("0x%04x    0x%04x\n",
287                                    frame_num_array[i], last_frame_num_array[i]);
288                 }
289                 dumped_frame_num_array = 1;
290         }
291         last_frame_num = curr_frame_number;
292 }
293 #endif
294
295 /**
296  * Handles the start-of-frame interrupt in host mode. Non-periodic
297  * transactions may be queued to the DWC_otg controller for the current
298  * (micro)frame. Periodic transactions may be queued to the controller for the
299  * next (micro)frame.
300  */
301 int32_t dwc_otg_hcd_handle_sof_intr(dwc_otg_hcd_t * hcd)
302 {
303         hfnum_data_t hfnum;
304         gintsts_data_t gintsts = { .d32 = 0 };
305         dwc_list_link_t *qh_entry;
306         dwc_otg_qh_t *qh;
307         dwc_otg_transaction_type_e tr_type;
308         int did_something = 0;
309         int32_t next_sched_frame = -1;
310
311         hfnum.d32 =
312             DWC_READ_REG32(&hcd->core_if->host_if->host_global_regs->hfnum);
313
314 #ifdef DEBUG_SOF
315         DWC_DEBUGPL(DBG_HCD, "--Start of Frame Interrupt--\n");
316 #endif
317         hcd->frame_number = hfnum.b.frnum;
318
319 #ifdef DEBUG
320         hcd->frrem_accum += hfnum.b.frrem;
321         hcd->frrem_samples++;
322 #endif
323
324 #ifdef DWC_TRACK_MISSED_SOFS
325         track_missed_sofs(hcd->frame_number);
326 #endif
327         /* Determine whether any periodic QHs should be executed. */
328         qh_entry = DWC_LIST_FIRST(&hcd->periodic_sched_inactive);
329         while (qh_entry != &hcd->periodic_sched_inactive) {
330                 qh = DWC_LIST_ENTRY(qh_entry, dwc_otg_qh_t, qh_list_entry);
331                 qh_entry = qh_entry->next;
332                 if (dwc_frame_num_le(qh->sched_frame, hcd->frame_number)) {
333
334                         /*
335                          * Move QH to the ready list to be executed next
336                          * (micro)frame.
337                          */
338                         DWC_LIST_MOVE_HEAD(&hcd->periodic_sched_ready,
339                                            &qh->qh_list_entry);
340
341                         did_something = 1;
342                 }
343                 else
344                 {
345                         if(next_sched_frame < 0 || dwc_frame_num_le(qh->sched_frame, next_sched_frame))
346                         {
347                                 next_sched_frame = qh->sched_frame;
348                         }
349                 }
350         }
351         if (fiq_enable)
352                 hcd->fiq_state->next_sched_frame = next_sched_frame;
353
354         tr_type = dwc_otg_hcd_select_transactions(hcd);
355         if (tr_type != DWC_OTG_TRANSACTION_NONE) {
356                 dwc_otg_hcd_queue_transactions(hcd, tr_type);
357                 did_something = 1;
358         }
359
360         /* Clear interrupt - but do not trample on the FIQ sof */
361         if (!fiq_fsm_enable) {
362                 gintsts.b.sofintr = 1;
363                 DWC_WRITE_REG32(&hcd->core_if->core_global_regs->gintsts, gintsts.d32);
364         }
365         return 1;
366 }
367
368 /** Handles the Rx Status Queue Level Interrupt, which indicates that there is at
369  * least one packet in the Rx FIFO.  The packets are moved from the FIFO to
370  * memory if the DWC_otg controller is operating in Slave mode. */
371 int32_t dwc_otg_hcd_handle_rx_status_q_level_intr(dwc_otg_hcd_t * dwc_otg_hcd)
372 {
373         host_grxsts_data_t grxsts;
374         dwc_hc_t *hc = NULL;
375
376         DWC_DEBUGPL(DBG_HCD, "--RxStsQ Level Interrupt--\n");
377
378         grxsts.d32 =
379             DWC_READ_REG32(&dwc_otg_hcd->core_if->core_global_regs->grxstsp);
380
381         hc = dwc_otg_hcd->hc_ptr_array[grxsts.b.chnum];
382         if (!hc) {
383                 DWC_ERROR("Unable to get corresponding channel\n");
384                 return 0;
385         }
386
387         /* Packet Status */
388         DWC_DEBUGPL(DBG_HCDV, "    Ch num = %d\n", grxsts.b.chnum);
389         DWC_DEBUGPL(DBG_HCDV, "    Count = %d\n", grxsts.b.bcnt);
390         DWC_DEBUGPL(DBG_HCDV, "    DPID = %d, hc.dpid = %d\n", grxsts.b.dpid,
391                     hc->data_pid_start);
392         DWC_DEBUGPL(DBG_HCDV, "    PStatus = %d\n", grxsts.b.pktsts);
393
394         switch (grxsts.b.pktsts) {
395         case DWC_GRXSTS_PKTSTS_IN:
396                 /* Read the data into the host buffer. */
397                 if (grxsts.b.bcnt > 0) {
398                         dwc_otg_read_packet(dwc_otg_hcd->core_if,
399                                             hc->xfer_buff, grxsts.b.bcnt);
400
401                         /* Update the HC fields for the next packet received. */
402                         hc->xfer_count += grxsts.b.bcnt;
403                         hc->xfer_buff += grxsts.b.bcnt;
404                 }
405
406         case DWC_GRXSTS_PKTSTS_IN_XFER_COMP:
407         case DWC_GRXSTS_PKTSTS_DATA_TOGGLE_ERR:
408         case DWC_GRXSTS_PKTSTS_CH_HALTED:
409                 /* Handled in interrupt, just ignore data */
410                 break;
411         default:
412                 DWC_ERROR("RX_STS_Q Interrupt: Unknown status %d\n",
413                           grxsts.b.pktsts);
414                 break;
415         }
416
417         return 1;
418 }
419
420 /** This interrupt occurs when the non-periodic Tx FIFO is half-empty. More
421  * data packets may be written to the FIFO for OUT transfers. More requests
422  * may be written to the non-periodic request queue for IN transfers. This
423  * interrupt is enabled only in Slave mode. */
424 int32_t dwc_otg_hcd_handle_np_tx_fifo_empty_intr(dwc_otg_hcd_t * dwc_otg_hcd)
425 {
426         DWC_DEBUGPL(DBG_HCD, "--Non-Periodic TxFIFO Empty Interrupt--\n");
427         dwc_otg_hcd_queue_transactions(dwc_otg_hcd,
428                                        DWC_OTG_TRANSACTION_NON_PERIODIC);
429         return 1;
430 }
431
432 /** This interrupt occurs when the periodic Tx FIFO is half-empty. More data
433  * packets may be written to the FIFO for OUT transfers. More requests may be
434  * written to the periodic request queue for IN transfers. This interrupt is
435  * enabled only in Slave mode. */
436 int32_t dwc_otg_hcd_handle_perio_tx_fifo_empty_intr(dwc_otg_hcd_t * dwc_otg_hcd)
437 {
438         DWC_DEBUGPL(DBG_HCD, "--Periodic TxFIFO Empty Interrupt--\n");
439         dwc_otg_hcd_queue_transactions(dwc_otg_hcd,
440                                        DWC_OTG_TRANSACTION_PERIODIC);
441         return 1;
442 }
443
444 /** There are multiple conditions that can cause a port interrupt. This function
445  * determines which interrupt conditions have occurred and handles them
446  * appropriately. */
447 int32_t dwc_otg_hcd_handle_port_intr(dwc_otg_hcd_t * dwc_otg_hcd)
448 {
449         int retval = 0;
450         hprt0_data_t hprt0;
451         hprt0_data_t hprt0_modify;
452
453         hprt0.d32 = DWC_READ_REG32(dwc_otg_hcd->core_if->host_if->hprt0);
454         hprt0_modify.d32 = DWC_READ_REG32(dwc_otg_hcd->core_if->host_if->hprt0);
455
456         /* Clear appropriate bits in HPRT0 to clear the interrupt bit in
457          * GINTSTS */
458
459         hprt0_modify.b.prtena = 0;
460         hprt0_modify.b.prtconndet = 0;
461         hprt0_modify.b.prtenchng = 0;
462         hprt0_modify.b.prtovrcurrchng = 0;
463
464         /* Port Connect Detected
465          * Set flag and clear if detected */
466         if (dwc_otg_hcd->core_if->hibernation_suspend == 1) {
467                 // Dont modify port status if we are in hibernation state
468                 hprt0_modify.b.prtconndet = 1;
469                 hprt0_modify.b.prtenchng = 1;
470                 DWC_WRITE_REG32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0_modify.d32);
471                 hprt0.d32 = DWC_READ_REG32(dwc_otg_hcd->core_if->host_if->hprt0);
472                 return retval;
473         }
474
475         if (hprt0.b.prtconndet) {
476                 /** @todo - check if steps performed in 'else' block should be perfromed regardles adp */
477                 if (dwc_otg_hcd->core_if->adp_enable &&
478                                 dwc_otg_hcd->core_if->adp.vbuson_timer_started == 1) {
479                         DWC_PRINTF("PORT CONNECT DETECTED ----------------\n");
480                         DWC_TIMER_CANCEL(dwc_otg_hcd->core_if->adp.vbuson_timer);
481                         dwc_otg_hcd->core_if->adp.vbuson_timer_started = 0;
482                         /* TODO - check if this is required, as
483                          * host initialization was already performed
484                          * after initial ADP probing
485                          */
486                         /*dwc_otg_hcd->core_if->adp.vbuson_timer_started = 0;
487                         dwc_otg_core_init(dwc_otg_hcd->core_if);
488                         dwc_otg_enable_global_interrupts(dwc_otg_hcd->core_if);
489                         cil_hcd_start(dwc_otg_hcd->core_if);*/
490                 } else {
491
492                         DWC_DEBUGPL(DBG_HCD, "--Port Interrupt HPRT0=0x%08x "
493                                     "Port Connect Detected--\n", hprt0.d32);
494                         dwc_otg_hcd->flags.b.port_connect_status_change = 1;
495                         dwc_otg_hcd->flags.b.port_connect_status = 1;
496                         hprt0_modify.b.prtconndet = 1;
497
498                         /* B-Device has connected, Delete the connection timer. */
499                         DWC_TIMER_CANCEL(dwc_otg_hcd->conn_timer);
500                 }
501                 /* The Hub driver asserts a reset when it sees port connect
502                  * status change flag */
503                 retval |= 1;
504         }
505
506         /* Port Enable Changed
507          * Clear if detected - Set internal flag if disabled */
508         if (hprt0.b.prtenchng) {
509                 DWC_DEBUGPL(DBG_HCD, "  --Port Interrupt HPRT0=0x%08x "
510                             "Port Enable Changed--\n", hprt0.d32);
511                 hprt0_modify.b.prtenchng = 1;
512                 if (hprt0.b.prtena == 1) {
513                         hfir_data_t hfir;
514                         int do_reset = 0;
515                         dwc_otg_core_params_t *params =
516                             dwc_otg_hcd->core_if->core_params;
517                         dwc_otg_core_global_regs_t *global_regs =
518                             dwc_otg_hcd->core_if->core_global_regs;
519                         dwc_otg_host_if_t *host_if =
520                             dwc_otg_hcd->core_if->host_if;
521
522                         dwc_otg_hcd->flags.b.port_speed = hprt0.b.prtspd;
523                         if (microframe_schedule)
524                                 init_hcd_usecs(dwc_otg_hcd);
525
526                         /* Every time when port enables calculate
527                          * HFIR.FrInterval
528                          */
529                         hfir.d32 = DWC_READ_REG32(&host_if->host_global_regs->hfir);
530                         hfir.b.frint = calc_frame_interval(dwc_otg_hcd->core_if);
531                         DWC_WRITE_REG32(&host_if->host_global_regs->hfir, hfir.d32);
532
533                         /* Check if we need to adjust the PHY clock speed for
534                          * low power and adjust it */
535                         if (params->host_support_fs_ls_low_power) {
536                                 gusbcfg_data_t usbcfg;
537
538                                 usbcfg.d32 =
539                                     DWC_READ_REG32(&global_regs->gusbcfg);
540
541                                 if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED
542                                     || hprt0.b.prtspd ==
543                                     DWC_HPRT0_PRTSPD_FULL_SPEED) {
544                                         /*
545                                          * Low power
546                                          */
547                                         hcfg_data_t hcfg;
548                                         if (usbcfg.b.phylpwrclksel == 0) {
549                                                 /* Set PHY low power clock select for FS/LS devices */
550                                                 usbcfg.b.phylpwrclksel = 1;
551                                                 DWC_WRITE_REG32
552                                                     (&global_regs->gusbcfg,
553                                                      usbcfg.d32);
554                                                 do_reset = 1;
555                                         }
556
557                                         hcfg.d32 =
558                                             DWC_READ_REG32
559                                             (&host_if->host_global_regs->hcfg);
560
561                                         if (hprt0.b.prtspd ==
562                                             DWC_HPRT0_PRTSPD_LOW_SPEED
563                                             && params->host_ls_low_power_phy_clk
564                                             ==
565                                             DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)
566                                         {
567                                                 /* 6 MHZ */
568                                                 DWC_DEBUGPL(DBG_CIL,
569                                                             "FS_PHY programming HCFG to 6 MHz (Low Power)\n");
570                                                 if (hcfg.b.fslspclksel !=
571                                                     DWC_HCFG_6_MHZ) {
572                                                         hcfg.b.fslspclksel =
573                                                             DWC_HCFG_6_MHZ;
574                                                         DWC_WRITE_REG32
575                                                             (&host_if->host_global_regs->hcfg,
576                                                              hcfg.d32);
577                                                         do_reset = 1;
578                                                 }
579                                         } else {
580                                                 /* 48 MHZ */
581                                                 DWC_DEBUGPL(DBG_CIL,
582                                                             "FS_PHY programming HCFG to 48 MHz ()\n");
583                                                 if (hcfg.b.fslspclksel !=
584                                                     DWC_HCFG_48_MHZ) {
585                                                         hcfg.b.fslspclksel =
586                                                             DWC_HCFG_48_MHZ;
587                                                         DWC_WRITE_REG32
588                                                             (&host_if->host_global_regs->hcfg,
589                                                              hcfg.d32);
590                                                         do_reset = 1;
591                                                 }
592                                         }
593                                 } else {
594                                         /*
595                                          * Not low power
596                                          */
597                                         if (usbcfg.b.phylpwrclksel == 1) {
598                                                 usbcfg.b.phylpwrclksel = 0;
599                                                 DWC_WRITE_REG32
600                                                     (&global_regs->gusbcfg,
601                                                      usbcfg.d32);
602                                                 do_reset = 1;
603                                         }
604                                 }
605
606                                 if (do_reset) {
607                                         DWC_TASK_SCHEDULE(dwc_otg_hcd->reset_tasklet);
608                                 }
609                         }
610
611                         if (!do_reset) {
612                                 /* Port has been enabled set the reset change flag */
613                                 dwc_otg_hcd->flags.b.port_reset_change = 1;
614                         }
615                 } else {
616                         dwc_otg_hcd->flags.b.port_enable_change = 1;
617                 }
618                 retval |= 1;
619         }
620
621         /** Overcurrent Change Interrupt */
622         if (hprt0.b.prtovrcurrchng) {
623                 DWC_DEBUGPL(DBG_HCD, "  --Port Interrupt HPRT0=0x%08x "
624                             "Port Overcurrent Changed--\n", hprt0.d32);
625                 dwc_otg_hcd->flags.b.port_over_current_change = 1;
626                 hprt0_modify.b.prtovrcurrchng = 1;
627                 retval |= 1;
628         }
629
630         /* Clear Port Interrupts */
631         DWC_WRITE_REG32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0_modify.d32);
632
633         return retval;
634 }
635
636 /** This interrupt indicates that one or more host channels has a pending
637  * interrupt. There are multiple conditions that can cause each host channel
638  * interrupt. This function determines which conditions have occurred for each
639  * host channel interrupt and handles them appropriately. */
640 int32_t dwc_otg_hcd_handle_hc_intr(dwc_otg_hcd_t * dwc_otg_hcd)
641 {
642         int i;
643         int retval = 0;
644         haint_data_t haint = { .d32 = 0 } ;
645
646         /* Clear appropriate bits in HCINTn to clear the interrupt bit in
647          * GINTSTS */
648
649         if (!fiq_fsm_enable)
650                 haint.d32 = dwc_otg_read_host_all_channels_intr(dwc_otg_hcd->core_if);
651
652         // Overwrite with saved interrupts from fiq handler
653         if(fiq_fsm_enable)
654         {
655                 /* check the mask? */
656                 local_fiq_disable();
657                 fiq_fsm_spin_lock(&dwc_otg_hcd->fiq_state->lock);
658                 haint.b2.chint |= ~(dwc_otg_hcd->fiq_state->haintmsk_saved.b2.chint);
659                 dwc_otg_hcd->fiq_state->haintmsk_saved.b2.chint = ~0;
660                 fiq_fsm_spin_unlock(&dwc_otg_hcd->fiq_state->lock);
661                 local_fiq_enable();
662         }
663
664         for (i = 0; i < dwc_otg_hcd->core_if->core_params->host_channels; i++) {
665                 if (haint.b2.chint & (1 << i)) {
666                         retval |= dwc_otg_hcd_handle_hc_n_intr(dwc_otg_hcd, i);
667                 }
668         }
669
670         return retval;
671 }
672
673 /**
674  * Gets the actual length of a transfer after the transfer halts. _halt_status
675  * holds the reason for the halt.
676  *
677  * For IN transfers where halt_status is DWC_OTG_HC_XFER_COMPLETE,
678  * *short_read is set to 1 upon return if less than the requested
679  * number of bytes were transferred. Otherwise, *short_read is set to 0 upon
680  * return. short_read may also be NULL on entry, in which case it remains
681  * unchanged.
682  */
683 static uint32_t get_actual_xfer_length(dwc_hc_t * hc,
684                                        dwc_otg_hc_regs_t * hc_regs,
685                                        dwc_otg_qtd_t * qtd,
686                                        dwc_otg_halt_status_e halt_status,
687                                        int *short_read)
688 {
689         hctsiz_data_t hctsiz;
690         uint32_t length;
691
692         if (short_read != NULL) {
693                 *short_read = 0;
694         }
695         hctsiz.d32 = DWC_READ_REG32(&hc_regs->hctsiz);
696
697         if (halt_status == DWC_OTG_HC_XFER_COMPLETE) {
698                 if (hc->ep_is_in) {
699                         length = hc->xfer_len - hctsiz.b.xfersize;
700                         if (short_read != NULL) {
701                                 *short_read = (hctsiz.b.xfersize != 0);
702                         }
703                 } else if (hc->qh->do_split) {
704                                 //length = split_out_xfersize[hc->hc_num];
705                                 length = qtd->ssplit_out_xfer_count;
706                 } else {
707                         length = hc->xfer_len;
708                 }
709         } else {
710                 /*
711                  * Must use the hctsiz.pktcnt field to determine how much data
712                  * has been transferred. This field reflects the number of
713                  * packets that have been transferred via the USB. This is
714                  * always an integral number of packets if the transfer was
715                  * halted before its normal completion. (Can't use the
716                  * hctsiz.xfersize field because that reflects the number of
717                  * bytes transferred via the AHB, not the USB).
718                  */
719                 length =
720                     (hc->start_pkt_count - hctsiz.b.pktcnt) * hc->max_packet;
721         }
722
723         return length;
724 }
725
726 /**
727  * Updates the state of the URB after a Transfer Complete interrupt on the
728  * host channel. Updates the actual_length field of the URB based on the
729  * number of bytes transferred via the host channel. Sets the URB status
730  * if the data transfer is finished.
731  *
732  * @return 1 if the data transfer specified by the URB is completely finished,
733  * 0 otherwise.
734  */
735 static int update_urb_state_xfer_comp(dwc_hc_t * hc,
736                                       dwc_otg_hc_regs_t * hc_regs,
737                                       dwc_otg_hcd_urb_t * urb,
738                                       dwc_otg_qtd_t * qtd)
739 {
740         int xfer_done = 0;
741         int short_read = 0;
742
743         int xfer_length;
744
745         xfer_length = get_actual_xfer_length(hc, hc_regs, qtd,
746                                              DWC_OTG_HC_XFER_COMPLETE,
747                                              &short_read);
748
749         if (urb->actual_length + xfer_length > urb->length) {
750                 printk_once(KERN_DEBUG "dwc_otg: DEVICE:%03d : %s:%d:trimming xfer length\n",
751                         hc->dev_addr, __func__, __LINE__);
752                 xfer_length = urb->length - urb->actual_length;
753         }
754
755         /* non DWORD-aligned buffer case handling. */
756         if (hc->align_buff && xfer_length && hc->ep_is_in) {
757                 dwc_memcpy(urb->buf + urb->actual_length, hc->qh->dw_align_buf,
758                            xfer_length);
759         }
760
761         urb->actual_length += xfer_length;
762
763         if (xfer_length && (hc->ep_type == DWC_OTG_EP_TYPE_BULK) &&
764             (urb->flags & URB_SEND_ZERO_PACKET)
765             && (urb->actual_length == urb->length)
766             && !(urb->length % hc->max_packet)) {
767                 xfer_done = 0;
768         } else if (short_read || urb->actual_length >= urb->length) {
769                 xfer_done = 1;
770                 urb->status = 0;
771         }
772
773 #ifdef DEBUG
774         {
775                 hctsiz_data_t hctsiz;
776                 hctsiz.d32 = DWC_READ_REG32(&hc_regs->hctsiz);
777                 DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, channel %d\n",
778                             __func__, (hc->ep_is_in ? "IN" : "OUT"),
779                             hc->hc_num);
780                 DWC_DEBUGPL(DBG_HCDV, "  hc->xfer_len %d\n", hc->xfer_len);
781                 DWC_DEBUGPL(DBG_HCDV, "  hctsiz.xfersize %d\n",
782                             hctsiz.b.xfersize);
783                 DWC_DEBUGPL(DBG_HCDV, "  urb->transfer_buffer_length %d\n",
784                             urb->length);
785                 DWC_DEBUGPL(DBG_HCDV, "  urb->actual_length %d\n",
786                             urb->actual_length);
787                 DWC_DEBUGPL(DBG_HCDV, "  short_read %d, xfer_done %d\n",
788                             short_read, xfer_done);
789         }
790 #endif
791
792         return xfer_done;
793 }
794
795 /*
796  * Save the starting data toggle for the next transfer. The data toggle is
797  * saved in the QH for non-control transfers and it's saved in the QTD for
798  * control transfers.
799  */
800 void dwc_otg_hcd_save_data_toggle(dwc_hc_t * hc,
801                              dwc_otg_hc_regs_t * hc_regs, dwc_otg_qtd_t * qtd)
802 {
803         hctsiz_data_t hctsiz;
804         hctsiz.d32 = DWC_READ_REG32(&hc_regs->hctsiz);
805
806         if (hc->ep_type != DWC_OTG_EP_TYPE_CONTROL) {
807                 dwc_otg_qh_t *qh = hc->qh;
808                 if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) {
809                         qh->data_toggle = DWC_OTG_HC_PID_DATA0;
810                 } else {
811                         qh->data_toggle = DWC_OTG_HC_PID_DATA1;
812                 }
813         } else {
814                 if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) {
815                         qtd->data_toggle = DWC_OTG_HC_PID_DATA0;
816                 } else {
817                         qtd->data_toggle = DWC_OTG_HC_PID_DATA1;
818                 }
819         }
820 }
821
822 /**
823  * Updates the state of an Isochronous URB when the transfer is stopped for
824  * any reason. The fields of the current entry in the frame descriptor array
825  * are set based on the transfer state and the input _halt_status. Completes
826  * the Isochronous URB if all the URB frames have been completed.
827  *
828  * @return DWC_OTG_HC_XFER_COMPLETE if there are more frames remaining to be
829  * transferred in the URB. Otherwise return DWC_OTG_HC_XFER_URB_COMPLETE.
830  */
831 static dwc_otg_halt_status_e
832 update_isoc_urb_state(dwc_otg_hcd_t * hcd,
833                       dwc_hc_t * hc,
834                       dwc_otg_hc_regs_t * hc_regs,
835                       dwc_otg_qtd_t * qtd, dwc_otg_halt_status_e halt_status)
836 {
837         dwc_otg_hcd_urb_t *urb = qtd->urb;
838         dwc_otg_halt_status_e ret_val = halt_status;
839         struct dwc_otg_hcd_iso_packet_desc *frame_desc;
840
841         frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
842         switch (halt_status) {
843         case DWC_OTG_HC_XFER_COMPLETE:
844                 frame_desc->status = 0;
845                 frame_desc->actual_length =
846                     get_actual_xfer_length(hc, hc_regs, qtd, halt_status, NULL);
847
848                 /* non DWORD-aligned buffer case handling. */
849                 if (hc->align_buff && frame_desc->actual_length && hc->ep_is_in) {
850                         dwc_memcpy(urb->buf + frame_desc->offset + qtd->isoc_split_offset,
851                                    hc->qh->dw_align_buf, frame_desc->actual_length);
852                 }
853
854                 break;
855         case DWC_OTG_HC_XFER_FRAME_OVERRUN:
856                 urb->error_count++;
857                 if (hc->ep_is_in) {
858                         frame_desc->status = -DWC_E_NO_STREAM_RES;
859                 } else {
860                         frame_desc->status = -DWC_E_COMMUNICATION;
861                 }
862                 frame_desc->actual_length = 0;
863                 break;
864         case DWC_OTG_HC_XFER_BABBLE_ERR:
865                 urb->error_count++;
866                 frame_desc->status = -DWC_E_OVERFLOW;
867                 /* Don't need to update actual_length in this case. */
868                 break;
869         case DWC_OTG_HC_XFER_XACT_ERR:
870                 urb->error_count++;
871                 frame_desc->status = -DWC_E_PROTOCOL;
872                 frame_desc->actual_length =
873                     get_actual_xfer_length(hc, hc_regs, qtd, halt_status, NULL);
874
875                 /* non DWORD-aligned buffer case handling. */
876                 if (hc->align_buff && frame_desc->actual_length && hc->ep_is_in) {
877                         dwc_memcpy(urb->buf + frame_desc->offset + qtd->isoc_split_offset,
878                                    hc->qh->dw_align_buf, frame_desc->actual_length);
879                 }
880                 /* Skip whole frame */
881                 if (hc->qh->do_split && (hc->ep_type == DWC_OTG_EP_TYPE_ISOC) &&
882                     hc->ep_is_in && hcd->core_if->dma_enable) {
883                         qtd->complete_split = 0;
884                         qtd->isoc_split_offset = 0;
885                 }
886
887                 break;
888         default:
889                 DWC_ASSERT(1, "Unhandled _halt_status (%d)\n", halt_status);
890                 break;
891         }
892         if (++qtd->isoc_frame_index == urb->packet_count) {
893                 /*
894                  * urb->status is not used for isoc transfers.
895                  * The individual frame_desc statuses are used instead.
896                  */
897                 hcd->fops->complete(hcd, urb->priv, urb, 0);
898                 ret_val = DWC_OTG_HC_XFER_URB_COMPLETE;
899         } else {
900                 ret_val = DWC_OTG_HC_XFER_COMPLETE;
901         }
902         return ret_val;
903 }
904
905 /**
906  * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic
907  * QHs, removes the QH from the active non-periodic schedule. If any QTDs are
908  * still linked to the QH, the QH is added to the end of the inactive
909  * non-periodic schedule. For periodic QHs, removes the QH from the periodic
910  * schedule if no more QTDs are linked to the QH.
911  */
912 static void deactivate_qh(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh, int free_qtd)
913 {
914         int continue_split = 0;
915         dwc_otg_qtd_t *qtd;
916
917         DWC_DEBUGPL(DBG_HCDV, "  %s(%p,%p,%d)\n", __func__, hcd, qh, free_qtd);
918
919         qtd = DWC_CIRCLEQ_FIRST(&qh->qtd_list);
920
921         if (qtd->complete_split) {
922                 continue_split = 1;
923         } else if (qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_MID ||
924                    qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_END) {
925                 continue_split = 1;
926         }
927
928         if (free_qtd) {
929                 dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
930                 continue_split = 0;
931         }
932
933         qh->channel = NULL;
934         dwc_otg_hcd_qh_deactivate(hcd, qh, continue_split);
935 }
936
937 /**
938  * Releases a host channel for use by other transfers. Attempts to select and
939  * queue more transactions since at least one host channel is available.
940  *
941  * @param hcd The HCD state structure.
942  * @param hc The host channel to release.
943  * @param qtd The QTD associated with the host channel. This QTD may be freed
944  * if the transfer is complete or an error has occurred.
945  * @param halt_status Reason the channel is being released. This status
946  * determines the actions taken by this function.
947  */
948 static void release_channel(dwc_otg_hcd_t * hcd,
949                             dwc_hc_t * hc,
950                             dwc_otg_qtd_t * qtd,
951                             dwc_otg_halt_status_e halt_status)
952 {
953         dwc_otg_transaction_type_e tr_type;
954         int free_qtd;
955
956         int hog_port = 0;
957
958         DWC_DEBUGPL(DBG_HCDV, "  %s: channel %d, halt_status %d, xfer_len %d\n",
959                     __func__, hc->hc_num, halt_status, hc->xfer_len);
960
961         if(fiq_fsm_enable && hc->do_split) {
962                 if(!hc->ep_is_in && hc->ep_type == UE_ISOCHRONOUS) {
963                         if(hc->xact_pos == DWC_HCSPLIT_XACTPOS_MID ||
964                                         hc->xact_pos == DWC_HCSPLIT_XACTPOS_BEGIN) {
965                                 hog_port = 0;
966                         }
967                 }
968         }
969
970         switch (halt_status) {
971         case DWC_OTG_HC_XFER_URB_COMPLETE:
972                 free_qtd = 1;
973                 break;
974         case DWC_OTG_HC_XFER_AHB_ERR:
975         case DWC_OTG_HC_XFER_STALL:
976         case DWC_OTG_HC_XFER_BABBLE_ERR:
977                 free_qtd = 1;
978                 break;
979         case DWC_OTG_HC_XFER_XACT_ERR:
980                 if (qtd->error_count >= 3) {
981                         DWC_DEBUGPL(DBG_HCDV,
982                                     "  Complete URB with transaction error\n");
983                         free_qtd = 1;
984                         qtd->urb->status = -DWC_E_PROTOCOL;
985                         hcd->fops->complete(hcd, qtd->urb->priv,
986                                             qtd->urb, -DWC_E_PROTOCOL);
987                 } else {
988                         free_qtd = 0;
989                 }
990                 break;
991         case DWC_OTG_HC_XFER_URB_DEQUEUE:
992                 /*
993                  * The QTD has already been removed and the QH has been
994                  * deactivated. Don't want to do anything except release the
995                  * host channel and try to queue more transfers.
996                  */
997                 goto cleanup;
998         case DWC_OTG_HC_XFER_NO_HALT_STATUS:
999                 free_qtd = 0;
1000                 break;
1001         case DWC_OTG_HC_XFER_PERIODIC_INCOMPLETE:
1002                 DWC_DEBUGPL(DBG_HCDV,
1003                         "  Complete URB with I/O error\n");
1004                 free_qtd = 1;
1005                 qtd->urb->status = -DWC_E_IO;
1006                 hcd->fops->complete(hcd, qtd->urb->priv,
1007                         qtd->urb, -DWC_E_IO);
1008                 break;
1009         default:
1010                 free_qtd = 0;
1011                 break;
1012         }
1013
1014         deactivate_qh(hcd, hc->qh, free_qtd);
1015
1016 cleanup:
1017         /*
1018          * Release the host channel for use by other transfers. The cleanup
1019          * function clears the channel interrupt enables and conditions, so
1020          * there's no need to clear the Channel Halted interrupt separately.
1021          */
1022         if (fiq_fsm_enable && hcd->fiq_state->channel[hc->hc_num].fsm != FIQ_PASSTHROUGH)
1023                 dwc_otg_cleanup_fiq_channel(hcd, hc->hc_num);
1024         dwc_otg_hc_cleanup(hcd->core_if, hc);
1025         DWC_CIRCLEQ_INSERT_TAIL(&hcd->free_hc_list, hc, hc_list_entry);
1026
1027         if (!microframe_schedule) {
1028                 switch (hc->ep_type) {
1029                 case DWC_OTG_EP_TYPE_CONTROL:
1030                 case DWC_OTG_EP_TYPE_BULK:
1031                         hcd->non_periodic_channels--;
1032                         break;
1033
1034                 default:
1035                         /*
1036                          * Don't release reservations for periodic channels here.
1037                          * That's done when a periodic transfer is descheduled (i.e.
1038                          * when the QH is removed from the periodic schedule).
1039                          */
1040                         break;
1041                 }
1042         } else {
1043                 hcd->available_host_channels++;
1044                 fiq_print(FIQDBG_INT, hcd->fiq_state, "AHC = %d ", hcd->available_host_channels);
1045         }
1046
1047         /* Try to queue more transfers now that there's a free channel. */
1048         tr_type = dwc_otg_hcd_select_transactions(hcd);
1049         if (tr_type != DWC_OTG_TRANSACTION_NONE) {
1050                 dwc_otg_hcd_queue_transactions(hcd, tr_type);
1051         }
1052 }
1053
1054 /**
1055  * Halts a host channel. If the channel cannot be halted immediately because
1056  * the request queue is full, this function ensures that the FIFO empty
1057  * interrupt for the appropriate queue is enabled so that the halt request can
1058  * be queued when there is space in the request queue.
1059  *
1060  * This function may also be called in DMA mode. In that case, the channel is
1061  * simply released since the core always halts the channel automatically in
1062  * DMA mode.
1063  */
1064 static void halt_channel(dwc_otg_hcd_t * hcd,
1065                          dwc_hc_t * hc,
1066                          dwc_otg_qtd_t * qtd, dwc_otg_halt_status_e halt_status)
1067 {
1068         if (hcd->core_if->dma_enable) {
1069                 release_channel(hcd, hc, qtd, halt_status);
1070                 return;
1071         }
1072
1073         /* Slave mode processing... */
1074         dwc_otg_hc_halt(hcd->core_if, hc, halt_status);
1075
1076         if (hc->halt_on_queue) {
1077                 gintmsk_data_t gintmsk = {.d32 = 0 };
1078                 dwc_otg_core_global_regs_t *global_regs;
1079                 global_regs = hcd->core_if->core_global_regs;
1080
1081                 if (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL ||
1082                     hc->ep_type == DWC_OTG_EP_TYPE_BULK) {
1083                         /*
1084                          * Make sure the Non-periodic Tx FIFO empty interrupt
1085                          * is enabled so that the non-periodic schedule will
1086                          * be processed.
1087                          */
1088                         gintmsk.b.nptxfempty = 1;
1089                         if (fiq_enable) {
1090                                 local_fiq_disable();
1091                                 fiq_fsm_spin_lock(&hcd->fiq_state->lock);
1092                                 DWC_MODIFY_REG32(&global_regs->gintmsk, 0, gintmsk.d32);
1093                                 fiq_fsm_spin_unlock(&hcd->fiq_state->lock);
1094                                 local_fiq_enable();
1095                         } else {
1096                                 DWC_MODIFY_REG32(&global_regs->gintmsk, 0, gintmsk.d32);
1097                         }
1098                 } else {
1099                         /*
1100                          * Move the QH from the periodic queued schedule to
1101                          * the periodic assigned schedule. This allows the
1102                          * halt to be queued when the periodic schedule is
1103                          * processed.
1104                          */
1105                         DWC_LIST_MOVE_HEAD(&hcd->periodic_sched_assigned,
1106                                            &hc->qh->qh_list_entry);
1107
1108                         /*
1109                          * Make sure the Periodic Tx FIFO Empty interrupt is
1110                          * enabled so that the periodic schedule will be
1111                          * processed.
1112                          */
1113                         gintmsk.b.ptxfempty = 1;
1114                         if (fiq_enable) {
1115                                 local_fiq_disable();
1116                                 fiq_fsm_spin_lock(&hcd->fiq_state->lock);
1117                                 DWC_MODIFY_REG32(&global_regs->gintmsk, 0, gintmsk.d32);
1118                                 fiq_fsm_spin_unlock(&hcd->fiq_state->lock);
1119                                 local_fiq_enable();
1120                         } else {
1121                                 DWC_MODIFY_REG32(&global_regs->gintmsk, 0, gintmsk.d32);
1122                         }
1123                 }
1124         }
1125 }
1126
1127 /**
1128  * Performs common cleanup for non-periodic transfers after a Transfer
1129  * Complete interrupt. This function should be called after any endpoint type
1130  * specific handling is finished to release the host channel.
1131  */
1132 static void complete_non_periodic_xfer(dwc_otg_hcd_t * hcd,
1133                                        dwc_hc_t * hc,
1134                                        dwc_otg_hc_regs_t * hc_regs,
1135                                        dwc_otg_qtd_t * qtd,
1136                                        dwc_otg_halt_status_e halt_status)
1137 {
1138         hcint_data_t hcint;
1139
1140         qtd->error_count = 0;
1141
1142         hcint.d32 = DWC_READ_REG32(&hc_regs->hcint);
1143         if (hcint.b.nyet) {
1144                 /*
1145                  * Got a NYET on the last transaction of the transfer. This
1146                  * means that the endpoint should be in the PING state at the
1147                  * beginning of the next transfer.
1148                  */
1149                 hc->qh->ping_state = 1;
1150                 clear_hc_int(hc_regs, nyet);
1151         }
1152
1153         /*
1154          * Always halt and release the host channel to make it available for
1155          * more transfers. There may still be more phases for a control
1156          * transfer or more data packets for a bulk transfer at this point,
1157          * but the host channel is still halted. A channel will be reassigned
1158          * to the transfer when the non-periodic schedule is processed after
1159          * the channel is released. This allows transactions to be queued
1160          * properly via dwc_otg_hcd_queue_transactions, which also enables the
1161          * Tx FIFO Empty interrupt if necessary.
1162          */
1163         if (hc->ep_is_in) {
1164                 /*
1165                  * IN transfers in Slave mode require an explicit disable to
1166                  * halt the channel. (In DMA mode, this call simply releases
1167                  * the channel.)
1168                  */
1169                 halt_channel(hcd, hc, qtd, halt_status);
1170         } else {
1171                 /*
1172                  * The channel is automatically disabled by the core for OUT
1173                  * transfers in Slave mode.
1174                  */
1175                 release_channel(hcd, hc, qtd, halt_status);
1176         }
1177 }
1178
1179 /**
1180  * Performs common cleanup for periodic transfers after a Transfer Complete
1181  * interrupt. This function should be called after any endpoint type specific
1182  * handling is finished to release the host channel.
1183  */
1184 static void complete_periodic_xfer(dwc_otg_hcd_t * hcd,
1185                                    dwc_hc_t * hc,
1186                                    dwc_otg_hc_regs_t * hc_regs,
1187                                    dwc_otg_qtd_t * qtd,
1188                                    dwc_otg_halt_status_e halt_status)
1189 {
1190         hctsiz_data_t hctsiz;
1191         qtd->error_count = 0;
1192
1193         hctsiz.d32 = DWC_READ_REG32(&hc_regs->hctsiz);
1194         if (!hc->ep_is_in || hctsiz.b.pktcnt == 0) {
1195                 /* Core halts channel in these cases. */
1196                 release_channel(hcd, hc, qtd, halt_status);
1197         } else {
1198                 /* Flush any outstanding requests from the Tx queue. */
1199                 halt_channel(hcd, hc, qtd, halt_status);
1200         }
1201 }
1202
1203 static int32_t handle_xfercomp_isoc_split_in(dwc_otg_hcd_t * hcd,
1204                                              dwc_hc_t * hc,
1205                                              dwc_otg_hc_regs_t * hc_regs,
1206                                              dwc_otg_qtd_t * qtd)
1207 {
1208         uint32_t len;
1209         struct dwc_otg_hcd_iso_packet_desc *frame_desc;
1210         frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
1211
1212         len = get_actual_xfer_length(hc, hc_regs, qtd,
1213                                      DWC_OTG_HC_XFER_COMPLETE, NULL);
1214
1215         if (!len) {
1216                 qtd->complete_split = 0;
1217                 qtd->isoc_split_offset = 0;
1218                 return 0;
1219         }
1220         frame_desc->actual_length += len;
1221
1222         if (hc->align_buff && len)
1223                 dwc_memcpy(qtd->urb->buf + frame_desc->offset +
1224                            qtd->isoc_split_offset, hc->qh->dw_align_buf, len);
1225         qtd->isoc_split_offset += len;
1226
1227         if (frame_desc->length == frame_desc->actual_length) {
1228                 frame_desc->status = 0;
1229                 qtd->isoc_frame_index++;
1230                 qtd->complete_split = 0;
1231                 qtd->isoc_split_offset = 0;
1232         }
1233
1234         if (qtd->isoc_frame_index == qtd->urb->packet_count) {
1235                 hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, 0);
1236                 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_URB_COMPLETE);
1237         } else {
1238                 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NO_HALT_STATUS);
1239         }
1240
1241         return 1;               /* Indicates that channel released */
1242 }
1243
1244 /**
1245  * Handles a host channel Transfer Complete interrupt. This handler may be
1246  * called in either DMA mode or Slave mode.
1247  */
1248 static int32_t handle_hc_xfercomp_intr(dwc_otg_hcd_t * hcd,
1249                                        dwc_hc_t * hc,
1250                                        dwc_otg_hc_regs_t * hc_regs,
1251                                        dwc_otg_qtd_t * qtd)
1252 {
1253         int urb_xfer_done;
1254         dwc_otg_halt_status_e halt_status = DWC_OTG_HC_XFER_COMPLETE;
1255         dwc_otg_hcd_urb_t *urb = qtd->urb;
1256         int pipe_type = dwc_otg_hcd_get_pipe_type(&urb->pipe_info);
1257
1258         DWC_DEBUGPL(DBG_HCDI, "--Host Channel %d Interrupt: "
1259                     "Transfer Complete--\n", hc->hc_num);
1260
1261         if (hcd->core_if->dma_desc_enable) {
1262                 dwc_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs, halt_status);
1263                 if (pipe_type == UE_ISOCHRONOUS) {
1264                         /* Do not disable the interrupt, just clear it */
1265                         clear_hc_int(hc_regs, xfercomp);
1266                         return 1;
1267                 }
1268                 goto handle_xfercomp_done;
1269         }
1270
1271         /*
1272          * Handle xfer complete on CSPLIT.
1273          */
1274
1275         if (hc->qh->do_split) {
1276                 if ((hc->ep_type == DWC_OTG_EP_TYPE_ISOC) && hc->ep_is_in
1277                     && hcd->core_if->dma_enable) {
1278                         if (qtd->complete_split
1279                             && handle_xfercomp_isoc_split_in(hcd, hc, hc_regs,
1280                                                              qtd))
1281                                 goto handle_xfercomp_done;
1282                 } else {
1283                         qtd->complete_split = 0;
1284                 }
1285         }
1286
1287         /* Update the QTD and URB states. */
1288         switch (pipe_type) {
1289         case UE_CONTROL:
1290                 switch (qtd->control_phase) {
1291                 case DWC_OTG_CONTROL_SETUP:
1292                         if (urb->length > 0) {
1293                                 qtd->control_phase = DWC_OTG_CONTROL_DATA;
1294                         } else {
1295                                 qtd->control_phase = DWC_OTG_CONTROL_STATUS;
1296                         }
1297                         DWC_DEBUGPL(DBG_HCDV,
1298                                     "  Control setup transaction done\n");
1299                         halt_status = DWC_OTG_HC_XFER_COMPLETE;
1300                         break;
1301                 case DWC_OTG_CONTROL_DATA:{
1302                                 urb_xfer_done =
1303                                     update_urb_state_xfer_comp(hc, hc_regs, urb,
1304                                                                qtd);
1305                                 if (urb_xfer_done) {
1306                                         qtd->control_phase =
1307                                             DWC_OTG_CONTROL_STATUS;
1308                                         DWC_DEBUGPL(DBG_HCDV,
1309                                                     "  Control data transfer done\n");
1310                                 } else {
1311                                         dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
1312                                 }
1313                                 halt_status = DWC_OTG_HC_XFER_COMPLETE;
1314                                 break;
1315                         }
1316                 case DWC_OTG_CONTROL_STATUS:
1317                         DWC_DEBUGPL(DBG_HCDV, "  Control transfer complete\n");
1318                         if (urb->status == -DWC_E_IN_PROGRESS) {
1319                                 urb->status = 0;
1320                         }
1321                         hcd->fops->complete(hcd, urb->priv, urb, urb->status);
1322                         halt_status = DWC_OTG_HC_XFER_URB_COMPLETE;
1323                         break;
1324                 }
1325
1326                 complete_non_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status);
1327                 break;
1328         case UE_BULK:
1329                 DWC_DEBUGPL(DBG_HCDV, "  Bulk transfer complete\n");
1330                 urb_xfer_done =
1331                     update_urb_state_xfer_comp(hc, hc_regs, urb, qtd);
1332                 if (urb_xfer_done) {
1333                         hcd->fops->complete(hcd, urb->priv, urb, urb->status);
1334                         halt_status = DWC_OTG_HC_XFER_URB_COMPLETE;
1335                 } else {
1336                         halt_status = DWC_OTG_HC_XFER_COMPLETE;
1337                 }
1338
1339                 dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
1340                 complete_non_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status);
1341                 break;
1342         case UE_INTERRUPT:
1343                 DWC_DEBUGPL(DBG_HCDV, "  Interrupt transfer complete\n");
1344                 urb_xfer_done =
1345                         update_urb_state_xfer_comp(hc, hc_regs, urb, qtd);
1346
1347                 /*
1348                  * Interrupt URB is done on the first transfer complete
1349                  * interrupt.
1350                  */
1351                 if (urb_xfer_done) {
1352                                 hcd->fops->complete(hcd, urb->priv, urb, urb->status);
1353                                 halt_status = DWC_OTG_HC_XFER_URB_COMPLETE;
1354                 } else {
1355                                 halt_status = DWC_OTG_HC_XFER_COMPLETE;
1356                 }
1357
1358                 dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
1359                 complete_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status);
1360                 break;
1361         case UE_ISOCHRONOUS:
1362                 DWC_DEBUGPL(DBG_HCDV, "  Isochronous transfer complete\n");
1363                 if (qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_ALL) {
1364                         halt_status =
1365                             update_isoc_urb_state(hcd, hc, hc_regs, qtd,
1366                                                   DWC_OTG_HC_XFER_COMPLETE);
1367                 }
1368                 complete_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status);
1369                 break;
1370         }
1371
1372 handle_xfercomp_done:
1373         disable_hc_int(hc_regs, xfercompl);
1374
1375         return 1;
1376 }
1377
1378 /**
1379  * Handles a host channel STALL interrupt. This handler may be called in
1380  * either DMA mode or Slave mode.
1381  */
1382 static int32_t handle_hc_stall_intr(dwc_otg_hcd_t * hcd,
1383                                     dwc_hc_t * hc,
1384                                     dwc_otg_hc_regs_t * hc_regs,
1385                                     dwc_otg_qtd_t * qtd)
1386 {
1387         dwc_otg_hcd_urb_t *urb = qtd->urb;
1388         int pipe_type = dwc_otg_hcd_get_pipe_type(&urb->pipe_info);
1389
1390         DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
1391                     "STALL Received--\n", hc->hc_num);
1392
1393         if (hcd->core_if->dma_desc_enable) {
1394                 dwc_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs, DWC_OTG_HC_XFER_STALL);
1395                 goto handle_stall_done;
1396         }
1397
1398         if (pipe_type == UE_CONTROL) {
1399                 hcd->fops->complete(hcd, urb->priv, urb, -DWC_E_PIPE);
1400         }
1401
1402         if (pipe_type == UE_BULK || pipe_type == UE_INTERRUPT) {
1403                 hcd->fops->complete(hcd, urb->priv, urb, -DWC_E_PIPE);
1404                 /*
1405                  * USB protocol requires resetting the data toggle for bulk
1406                  * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT)
1407                  * setup command is issued to the endpoint. Anticipate the
1408                  * CLEAR_FEATURE command since a STALL has occurred and reset
1409                  * the data toggle now.
1410                  */
1411                 hc->qh->data_toggle = 0;
1412         }
1413
1414         halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_STALL);
1415
1416 handle_stall_done:
1417         disable_hc_int(hc_regs, stall);
1418
1419         return 1;
1420 }
1421
1422 /*
1423  * Updates the state of the URB when a transfer has been stopped due to an
1424  * abnormal condition before the transfer completes. Modifies the
1425  * actual_length field of the URB to reflect the number of bytes that have
1426  * actually been transferred via the host channel.
1427  */
1428 static void update_urb_state_xfer_intr(dwc_hc_t * hc,
1429                                        dwc_otg_hc_regs_t * hc_regs,
1430                                        dwc_otg_hcd_urb_t * urb,
1431                                        dwc_otg_qtd_t * qtd,
1432                                        dwc_otg_halt_status_e halt_status)
1433 {
1434         uint32_t bytes_transferred = get_actual_xfer_length(hc, hc_regs, qtd,
1435                                                             halt_status, NULL);
1436
1437         if (urb->actual_length + bytes_transferred > urb->length) {
1438                 printk_once(KERN_DEBUG "dwc_otg: DEVICE:%03d : %s:%d:trimming xfer length\n",
1439                         hc->dev_addr, __func__, __LINE__);
1440                 bytes_transferred = urb->length - urb->actual_length;
1441         }
1442
1443         /* non DWORD-aligned buffer case handling. */
1444         if (hc->align_buff && bytes_transferred && hc->ep_is_in) {
1445                 dwc_memcpy(urb->buf + urb->actual_length, hc->qh->dw_align_buf,
1446                            bytes_transferred);
1447         }
1448
1449         urb->actual_length += bytes_transferred;
1450
1451 #ifdef DEBUG
1452         {
1453                 hctsiz_data_t hctsiz;
1454                 hctsiz.d32 = DWC_READ_REG32(&hc_regs->hctsiz);
1455                 DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, channel %d\n",
1456                             __func__, (hc->ep_is_in ? "IN" : "OUT"),
1457                             hc->hc_num);
1458                 DWC_DEBUGPL(DBG_HCDV, "  hc->start_pkt_count %d\n",
1459                             hc->start_pkt_count);
1460                 DWC_DEBUGPL(DBG_HCDV, "  hctsiz.pktcnt %d\n", hctsiz.b.pktcnt);
1461                 DWC_DEBUGPL(DBG_HCDV, "  hc->max_packet %d\n", hc->max_packet);
1462                 DWC_DEBUGPL(DBG_HCDV, "  bytes_transferred %d\n",
1463                             bytes_transferred);
1464                 DWC_DEBUGPL(DBG_HCDV, "  urb->actual_length %d\n",
1465                             urb->actual_length);
1466                 DWC_DEBUGPL(DBG_HCDV, "  urb->transfer_buffer_length %d\n",
1467                             urb->length);
1468         }
1469 #endif
1470 }
1471
1472 /**
1473  * Handles a host channel NAK interrupt. This handler may be called in either
1474  * DMA mode or Slave mode.
1475  */
1476 static int32_t handle_hc_nak_intr(dwc_otg_hcd_t * hcd,
1477                                   dwc_hc_t * hc,
1478                                   dwc_otg_hc_regs_t * hc_regs,
1479                                   dwc_otg_qtd_t * qtd)
1480 {
1481         DWC_DEBUGPL(DBG_HCDI, "--Host Channel %d Interrupt: "
1482                     "NAK Received--\n", hc->hc_num);
1483
1484         /*
1485          * When we get bulk NAKs then remember this so we holdoff on this qh until
1486          * the beginning of the next frame
1487          */
1488         switch(dwc_otg_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1489                 case UE_BULK:
1490                 case UE_CONTROL:
1491                 if (nak_holdoff && qtd->qh->do_split)
1492                         hc->qh->nak_frame = dwc_otg_hcd_get_frame_number(hcd);
1493         }
1494
1495         /*
1496          * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and
1497          * interrupt.  Re-start the SSPLIT transfer.
1498          */
1499         if (hc->do_split) {
1500                 if (hc->complete_split) {
1501                         qtd->error_count = 0;
1502                 }
1503                 qtd->complete_split = 0;
1504                 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NAK);
1505                 goto handle_nak_done;
1506         }
1507
1508         switch (dwc_otg_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1509         case UE_CONTROL:
1510         case UE_BULK:
1511                 if (hcd->core_if->dma_enable && hc->ep_is_in) {
1512                         /*
1513                          * NAK interrupts are enabled on bulk/control IN
1514                          * transfers in DMA mode for the sole purpose of
1515                          * resetting the error count after a transaction error
1516                          * occurs. The core will continue transferring data.
1517                          * Disable other interrupts unmasked for the same
1518                          * reason.
1519                          */
1520                         disable_hc_int(hc_regs, datatglerr);
1521                         disable_hc_int(hc_regs, ack);
1522                         qtd->error_count = 0;
1523                         goto handle_nak_done;
1524                 }
1525
1526                 /*
1527                  * NAK interrupts normally occur during OUT transfers in DMA
1528                  * or Slave mode. For IN transfers, more requests will be
1529                  * queued as request queue space is available.
1530                  */
1531                 qtd->error_count = 0;
1532
1533                 if (!hc->qh->ping_state) {
1534                         update_urb_state_xfer_intr(hc, hc_regs,
1535                                                    qtd->urb, qtd,
1536                                                    DWC_OTG_HC_XFER_NAK);
1537                         dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
1538
1539                         if (hc->speed == DWC_OTG_EP_SPEED_HIGH)
1540                                 hc->qh->ping_state = 1;
1541                 }
1542
1543                 /*
1544                  * Halt the channel so the transfer can be re-started from
1545                  * the appropriate point or the PING protocol will
1546                  * start/continue.
1547                  */
1548                 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NAK);
1549                 break;
1550         case UE_INTERRUPT:
1551                 qtd->error_count = 0;
1552                 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NAK);
1553                 break;
1554         case UE_ISOCHRONOUS:
1555                 /* Should never get called for isochronous transfers. */
1556                 DWC_ASSERT(1, "NACK interrupt for ISOC transfer\n");
1557                 break;
1558         }
1559
1560 handle_nak_done:
1561         disable_hc_int(hc_regs, nak);
1562
1563         return 1;
1564 }
1565
1566 /**
1567  * Handles a host channel ACK interrupt. This interrupt is enabled when
1568  * performing the PING protocol in Slave mode, when errors occur during
1569  * either Slave mode or DMA mode, and during Start Split transactions.
1570  */
1571 static int32_t handle_hc_ack_intr(dwc_otg_hcd_t * hcd,
1572                                   dwc_hc_t * hc,
1573                                   dwc_otg_hc_regs_t * hc_regs,
1574                                   dwc_otg_qtd_t * qtd)
1575 {
1576         DWC_DEBUGPL(DBG_HCDI, "--Host Channel %d Interrupt: "
1577                     "ACK Received--\n", hc->hc_num);
1578
1579         if (hc->do_split) {
1580                 /*
1581                  * Handle ACK on SSPLIT.
1582                  * ACK should not occur in CSPLIT.
1583                  */
1584                 if (!hc->ep_is_in && hc->data_pid_start != DWC_OTG_HC_PID_SETUP) {
1585                         qtd->ssplit_out_xfer_count = hc->xfer_len;
1586                 }
1587                 if (!(hc->ep_type == DWC_OTG_EP_TYPE_ISOC && !hc->ep_is_in)) {
1588                         /* Don't need complete for isochronous out transfers. */
1589                         qtd->complete_split = 1;
1590                 }
1591
1592                 /* ISOC OUT */
1593                 if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC && !hc->ep_is_in) {
1594                         switch (hc->xact_pos) {
1595                         case DWC_HCSPLIT_XACTPOS_ALL:
1596                                 break;
1597                         case DWC_HCSPLIT_XACTPOS_END:
1598                                 qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_ALL;
1599                                 qtd->isoc_split_offset = 0;
1600                                 break;
1601                         case DWC_HCSPLIT_XACTPOS_BEGIN:
1602                         case DWC_HCSPLIT_XACTPOS_MID:
1603                                 /*
1604                                  * For BEGIN or MID, calculate the length for
1605                                  * the next microframe to determine the correct
1606                                  * SSPLIT token, either MID or END.
1607                                  */
1608                                 {
1609                                         struct dwc_otg_hcd_iso_packet_desc
1610                                         *frame_desc;
1611
1612                                         frame_desc =
1613                                             &qtd->urb->
1614                                             iso_descs[qtd->isoc_frame_index];
1615                                         qtd->isoc_split_offset += 188;
1616
1617                                         if ((frame_desc->length -
1618                                              qtd->isoc_split_offset) <= 188) {
1619                                                 qtd->isoc_split_pos =
1620                                                     DWC_HCSPLIT_XACTPOS_END;
1621                                         } else {
1622                                                 qtd->isoc_split_pos =
1623                                                     DWC_HCSPLIT_XACTPOS_MID;
1624                                         }
1625
1626                                 }
1627                                 break;
1628                         }
1629                 } else {
1630                         halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_ACK);
1631                 }
1632         } else {
1633                 /*
1634                  * An unmasked ACK on a non-split DMA transaction is
1635                  * for the sole purpose of resetting error counts. Disable other
1636                  * interrupts unmasked for the same reason.
1637                  */
1638                 if(hcd->core_if->dma_enable) {
1639                         disable_hc_int(hc_regs, datatglerr);
1640                         disable_hc_int(hc_regs, nak);
1641                 }
1642                 qtd->error_count = 0;
1643
1644                 if (hc->qh->ping_state) {
1645                         hc->qh->ping_state = 0;
1646                         /*
1647                          * Halt the channel so the transfer can be re-started
1648                          * from the appropriate point. This only happens in
1649                          * Slave mode. In DMA mode, the ping_state is cleared
1650                          * when the transfer is started because the core
1651                          * automatically executes the PING, then the transfer.
1652                          */
1653                         halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_ACK);
1654                 }
1655         }
1656
1657         /*
1658          * If the ACK occurred when _not_ in the PING state, let the channel
1659          * continue transferring data after clearing the error count.
1660          */
1661
1662         disable_hc_int(hc_regs, ack);
1663
1664         return 1;
1665 }
1666
1667 /**
1668  * Handles a host channel NYET interrupt. This interrupt should only occur on
1669  * Bulk and Control OUT endpoints and for complete split transactions. If a
1670  * NYET occurs at the same time as a Transfer Complete interrupt, it is
1671  * handled in the xfercomp interrupt handler, not here. This handler may be
1672  * called in either DMA mode or Slave mode.
1673  */
1674 static int32_t handle_hc_nyet_intr(dwc_otg_hcd_t * hcd,
1675                                    dwc_hc_t * hc,
1676                                    dwc_otg_hc_regs_t * hc_regs,
1677                                    dwc_otg_qtd_t * qtd)
1678 {
1679         DWC_DEBUGPL(DBG_HCDI, "--Host Channel %d Interrupt: "
1680                     "NYET Received--\n", hc->hc_num);
1681
1682         /*
1683          * NYET on CSPLIT
1684          * re-do the CSPLIT immediately on non-periodic
1685          */
1686         if (hc->do_split && hc->complete_split) {
1687                 if (hc->ep_is_in && (hc->ep_type == DWC_OTG_EP_TYPE_ISOC)
1688                     && hcd->core_if->dma_enable) {
1689                         qtd->complete_split = 0;
1690                         qtd->isoc_split_offset = 0;
1691                         if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
1692                                 hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, 0);
1693                                 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_URB_COMPLETE);
1694                         }
1695                         else
1696                                 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NO_HALT_STATUS);
1697                         goto handle_nyet_done;
1698                 }
1699
1700                 if (hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
1701                     hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
1702                         int frnum = dwc_otg_hcd_get_frame_number(hcd);
1703
1704                         // With the FIQ running we only ever see the failed NYET
1705                         if (dwc_full_frame_num(frnum) !=
1706                             dwc_full_frame_num(hc->qh->sched_frame) ||
1707                             fiq_fsm_enable) {
1708                                 /*
1709                                  * No longer in the same full speed frame.
1710                                  * Treat this as a transaction error.
1711                                  */
1712 #if 0
1713                                 /** @todo Fix system performance so this can
1714                                  * be treated as an error. Right now complete
1715                                  * splits cannot be scheduled precisely enough
1716                                  * due to other system activity, so this error
1717                                  * occurs regularly in Slave mode.
1718                                  */
1719                                 qtd->error_count++;
1720 #endif
1721                                 qtd->complete_split = 0;
1722                                 halt_channel(hcd, hc, qtd,
1723                                              DWC_OTG_HC_XFER_XACT_ERR);
1724                                 /** @todo add support for isoc release */
1725                                 goto handle_nyet_done;
1726                         }
1727                 }
1728
1729                 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NYET);
1730                 goto handle_nyet_done;
1731         }
1732
1733         hc->qh->ping_state = 1;
1734         qtd->error_count = 0;
1735
1736         update_urb_state_xfer_intr(hc, hc_regs, qtd->urb, qtd,
1737                                    DWC_OTG_HC_XFER_NYET);
1738         dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
1739
1740         /*
1741          * Halt the channel and re-start the transfer so the PING
1742          * protocol will start.
1743          */
1744         halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NYET);
1745
1746 handle_nyet_done:
1747         disable_hc_int(hc_regs, nyet);
1748         return 1;
1749 }
1750
1751 /**
1752  * Handles a host channel babble interrupt. This handler may be called in
1753  * either DMA mode or Slave mode.
1754  */
1755 static int32_t handle_hc_babble_intr(dwc_otg_hcd_t * hcd,
1756                                      dwc_hc_t * hc,
1757                                      dwc_otg_hc_regs_t * hc_regs,
1758                                      dwc_otg_qtd_t * qtd)
1759 {
1760         DWC_DEBUGPL(DBG_HCDI, "--Host Channel %d Interrupt: "
1761                     "Babble Error--\n", hc->hc_num);
1762
1763         if (hcd->core_if->dma_desc_enable) {
1764                 dwc_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs,
1765                                                DWC_OTG_HC_XFER_BABBLE_ERR);
1766                 goto handle_babble_done;
1767         }
1768
1769         if (hc->ep_type != DWC_OTG_EP_TYPE_ISOC) {
1770                 hcd->fops->complete(hcd, qtd->urb->priv,
1771                                     qtd->urb, -DWC_E_OVERFLOW);
1772                 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_BABBLE_ERR);
1773         } else {
1774                 dwc_otg_halt_status_e halt_status;
1775                 halt_status = update_isoc_urb_state(hcd, hc, hc_regs, qtd,
1776                                                     DWC_OTG_HC_XFER_BABBLE_ERR);
1777                 halt_channel(hcd, hc, qtd, halt_status);
1778         }
1779
1780 handle_babble_done:
1781         disable_hc_int(hc_regs, bblerr);
1782         return 1;
1783 }
1784
1785 /**
1786  * Handles a host channel AHB error interrupt. This handler is only called in
1787  * DMA mode.
1788  */
1789 static int32_t handle_hc_ahberr_intr(dwc_otg_hcd_t * hcd,
1790                                      dwc_hc_t * hc,
1791                                      dwc_otg_hc_regs_t * hc_regs,
1792                                      dwc_otg_qtd_t * qtd)
1793 {
1794         hcchar_data_t hcchar;
1795         hcsplt_data_t hcsplt;
1796         hctsiz_data_t hctsiz;
1797         uint32_t hcdma;
1798         char *pipetype, *speed;
1799
1800         dwc_otg_hcd_urb_t *urb = qtd->urb;
1801
1802         DWC_DEBUGPL(DBG_HCDI, "--Host Channel %d Interrupt: "
1803                     "AHB Error--\n", hc->hc_num);
1804
1805         hcchar.d32 = DWC_READ_REG32(&hc_regs->hcchar);
1806         hcsplt.d32 = DWC_READ_REG32(&hc_regs->hcsplt);
1807         hctsiz.d32 = DWC_READ_REG32(&hc_regs->hctsiz);
1808         hcdma = DWC_READ_REG32(&hc_regs->hcdma);
1809
1810         DWC_ERROR("AHB ERROR, Channel %d\n", hc->hc_num);
1811         DWC_ERROR("  hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.d32, hcsplt.d32);
1812         DWC_ERROR("  hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d32, hcdma);
1813         DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Enqueue\n");
1814         DWC_ERROR("  Device address: %d\n",
1815                   dwc_otg_hcd_get_dev_addr(&urb->pipe_info));
1816         DWC_ERROR("  Endpoint: %d, %s\n",
1817                   dwc_otg_hcd_get_ep_num(&urb->pipe_info),
1818                   (dwc_otg_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT"));
1819
1820         switch (dwc_otg_hcd_get_pipe_type(&urb->pipe_info)) {
1821         case UE_CONTROL:
1822                 pipetype = "CONTROL";
1823                 break;
1824         case UE_BULK:
1825                 pipetype = "BULK";
1826                 break;
1827         case UE_INTERRUPT:
1828                 pipetype = "INTERRUPT";
1829                 break;
1830         case UE_ISOCHRONOUS:
1831                 pipetype = "ISOCHRONOUS";
1832                 break;
1833         default:
1834                 pipetype = "UNKNOWN";
1835                 break;
1836         }
1837
1838         DWC_ERROR("  Endpoint type: %s\n", pipetype);
1839
1840         switch (hc->speed) {
1841         case DWC_OTG_EP_SPEED_HIGH:
1842                 speed = "HIGH";
1843                 break;
1844         case DWC_OTG_EP_SPEED_FULL:
1845                 speed = "FULL";
1846                 break;
1847         case DWC_OTG_EP_SPEED_LOW:
1848                 speed = "LOW";
1849                 break;
1850         default:
1851                 speed = "UNKNOWN";
1852                 break;
1853         };
1854
1855         DWC_ERROR("  Speed: %s\n", speed);
1856
1857         DWC_ERROR("  Max packet size: %d\n",
1858                   dwc_otg_hcd_get_mps(&urb->pipe_info));
1859         DWC_ERROR("  Data buffer length: %d\n", urb->length);
1860         DWC_ERROR("  Transfer buffer: %p, Transfer DMA: %p\n",
1861                   urb->buf, (void *)urb->dma);
1862         DWC_ERROR("  Setup buffer: %p, Setup DMA: %p\n",
1863                   urb->setup_packet, (void *)urb->setup_dma);
1864         DWC_ERROR("  Interval: %d\n", urb->interval);
1865
1866         /* Core haltes the channel for Descriptor DMA mode */
1867         if (hcd->core_if->dma_desc_enable) {
1868                 dwc_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs,
1869                                                DWC_OTG_HC_XFER_AHB_ERR);
1870                 goto handle_ahberr_done;
1871         }
1872
1873         hcd->fops->complete(hcd, urb->priv, urb, -DWC_E_IO);
1874
1875         /*
1876          * Force a channel halt. Don't call halt_channel because that won't
1877          * write to the HCCHARn register in DMA mode to force the halt.
1878          */
1879         dwc_otg_hc_halt(hcd->core_if, hc, DWC_OTG_HC_XFER_AHB_ERR);
1880 handle_ahberr_done:
1881         disable_hc_int(hc_regs, ahberr);
1882         return 1;
1883 }
1884
1885 /**
1886  * Handles a host channel transaction error interrupt. This handler may be
1887  * called in either DMA mode or Slave mode.
1888  */
1889 static int32_t handle_hc_xacterr_intr(dwc_otg_hcd_t * hcd,
1890                                       dwc_hc_t * hc,
1891                                       dwc_otg_hc_regs_t * hc_regs,
1892                                       dwc_otg_qtd_t * qtd)
1893 {
1894         DWC_DEBUGPL(DBG_HCDI, "--Host Channel %d Interrupt: "
1895                     "Transaction Error--\n", hc->hc_num);
1896
1897         if (hcd->core_if->dma_desc_enable) {
1898                 dwc_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs,
1899                                                DWC_OTG_HC_XFER_XACT_ERR);
1900                 goto handle_xacterr_done;
1901         }
1902
1903         switch (dwc_otg_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1904         case UE_CONTROL:
1905         case UE_BULK:
1906                 qtd->error_count++;
1907                 if (!hc->qh->ping_state) {
1908
1909                         update_urb_state_xfer_intr(hc, hc_regs,
1910                                                    qtd->urb, qtd,
1911                                                    DWC_OTG_HC_XFER_XACT_ERR);
1912                         dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
1913                         if (!hc->ep_is_in && hc->speed == DWC_OTG_EP_SPEED_HIGH) {
1914                                 hc->qh->ping_state = 1;
1915                         }
1916                 }
1917
1918                 /*
1919                  * Halt the channel so the transfer can be re-started from
1920                  * the appropriate point or the PING protocol will start.
1921                  */
1922                 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR);
1923                 break;
1924         case UE_INTERRUPT:
1925                 qtd->error_count++;
1926                 if (hc->do_split && hc->complete_split) {
1927                         qtd->complete_split = 0;
1928                 }
1929                 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR);
1930                 break;
1931         case UE_ISOCHRONOUS:
1932                 {
1933                         dwc_otg_halt_status_e halt_status;
1934                         halt_status =
1935                             update_isoc_urb_state(hcd, hc, hc_regs, qtd,
1936                                                   DWC_OTG_HC_XFER_XACT_ERR);
1937
1938                         halt_channel(hcd, hc, qtd, halt_status);
1939                 }
1940                 break;
1941         }
1942 handle_xacterr_done:
1943         disable_hc_int(hc_regs, xacterr);
1944
1945         return 1;
1946 }
1947
1948 /**
1949  * Handles a host channel frame overrun interrupt. This handler may be called
1950  * in either DMA mode or Slave mode.
1951  */
1952 static int32_t handle_hc_frmovrun_intr(dwc_otg_hcd_t * hcd,
1953                                        dwc_hc_t * hc,
1954                                        dwc_otg_hc_regs_t * hc_regs,
1955                                        dwc_otg_qtd_t * qtd)
1956 {
1957         DWC_DEBUGPL(DBG_HCDI, "--Host Channel %d Interrupt: "
1958                     "Frame Overrun--\n", hc->hc_num);
1959
1960         switch (dwc_otg_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1961         case UE_CONTROL:
1962         case UE_BULK:
1963                 break;
1964         case UE_INTERRUPT:
1965                 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_FRAME_OVERRUN);
1966                 break;
1967         case UE_ISOCHRONOUS:
1968                 {
1969                         dwc_otg_halt_status_e halt_status;
1970                         halt_status =
1971                             update_isoc_urb_state(hcd, hc, hc_regs, qtd,
1972                                                   DWC_OTG_HC_XFER_FRAME_OVERRUN);
1973
1974                         halt_channel(hcd, hc, qtd, halt_status);
1975                 }
1976                 break;
1977         }
1978
1979         disable_hc_int(hc_regs, frmovrun);
1980
1981         return 1;
1982 }
1983
1984 /**
1985  * Handles a host channel data toggle error interrupt. This handler may be
1986  * called in either DMA mode or Slave mode.
1987  */
1988 static int32_t handle_hc_datatglerr_intr(dwc_otg_hcd_t * hcd,
1989                                          dwc_hc_t * hc,
1990                                          dwc_otg_hc_regs_t * hc_regs,
1991                                          dwc_otg_qtd_t * qtd)
1992 {
1993         DWC_DEBUGPL(DBG_HCDI, "--Host Channel %d Interrupt: "
1994                 "Data Toggle Error on %s transfer--\n",
1995                 hc->hc_num, (hc->ep_is_in ? "IN" : "OUT"));
1996
1997         /* Data toggles on split transactions cause the hc to halt.
1998          * restart transfer */
1999         if(hc->qh->do_split)
2000         {
2001                 qtd->error_count++;
2002                 dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
2003                 update_urb_state_xfer_intr(hc, hc_regs,
2004                         qtd->urb, qtd, DWC_OTG_HC_XFER_XACT_ERR);
2005                 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR);
2006         } else if (hc->ep_is_in) {
2007                 /* An unmasked data toggle error on a non-split DMA transaction is
2008                  * for the sole purpose of resetting error counts. Disable other
2009                  * interrupts unmasked for the same reason.
2010                  */
2011                 if(hcd->core_if->dma_enable) {
2012                         disable_hc_int(hc_regs, ack);
2013                         disable_hc_int(hc_regs, nak);
2014                 }
2015                 qtd->error_count = 0;
2016         }
2017
2018         disable_hc_int(hc_regs, datatglerr);
2019
2020         return 1;
2021 }
2022
2023 #ifdef DEBUG
2024 /**
2025  * This function is for debug only. It checks that a valid halt status is set
2026  * and that HCCHARn.chdis is clear. If there's a problem, corrective action is
2027  * taken and a warning is issued.
2028  * @return 1 if halt status is ok, 0 otherwise.
2029  */
2030 static inline int halt_status_ok(dwc_otg_hcd_t * hcd,
2031                                  dwc_hc_t * hc,
2032                                  dwc_otg_hc_regs_t * hc_regs,
2033                                  dwc_otg_qtd_t * qtd)
2034 {
2035         hcchar_data_t hcchar;
2036         hctsiz_data_t hctsiz;
2037         hcint_data_t hcint;
2038         hcintmsk_data_t hcintmsk;
2039         hcsplt_data_t hcsplt;
2040
2041         if (hc->halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS) {
2042                 /*
2043                  * This code is here only as a check. This condition should
2044                  * never happen. Ignore the halt if it does occur.
2045                  */
2046                 hcchar.d32 = DWC_READ_REG32(&hc_regs->hcchar);
2047                 hctsiz.d32 = DWC_READ_REG32(&hc_regs->hctsiz);
2048                 hcint.d32 = DWC_READ_REG32(&hc_regs->hcint);
2049                 hcintmsk.d32 = DWC_READ_REG32(&hc_regs->hcintmsk);
2050                 hcsplt.d32 = DWC_READ_REG32(&hc_regs->hcsplt);
2051                 DWC_WARN
2052                     ("%s: hc->halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS, "
2053                      "channel %d, hcchar 0x%08x, hctsiz 0x%08x, "
2054                      "hcint 0x%08x, hcintmsk 0x%08x, "
2055                      "hcsplt 0x%08x, qtd->complete_split %d\n", __func__,
2056                      hc->hc_num, hcchar.d32, hctsiz.d32, hcint.d32,
2057                      hcintmsk.d32, hcsplt.d32, qtd->complete_split);
2058
2059                 DWC_WARN("%s: no halt status, channel %d, ignoring interrupt\n",
2060                          __func__, hc->hc_num);
2061                 DWC_WARN("\n");
2062                 clear_hc_int(hc_regs, chhltd);
2063                 return 0;
2064         }
2065
2066         /*
2067          * This code is here only as a check. hcchar.chdis should
2068          * never be set when the halt interrupt occurs. Halt the
2069          * channel again if it does occur.
2070          */
2071         hcchar.d32 = DWC_READ_REG32(&hc_regs->hcchar);
2072         if (hcchar.b.chdis) {
2073                 DWC_WARN("%s: hcchar.chdis set unexpectedly, "
2074                          "hcchar 0x%08x, trying to halt again\n",
2075                          __func__, hcchar.d32);
2076                 clear_hc_int(hc_regs, chhltd);
2077                 hc->halt_pending = 0;
2078                 halt_channel(hcd, hc, qtd, hc->halt_status);
2079                 return 0;
2080         }
2081
2082         return 1;
2083 }
2084 #endif
2085
2086 /**
2087  * Handles a host Channel Halted interrupt in DMA mode. This handler
2088  * determines the reason the channel halted and proceeds accordingly.
2089  */
2090 static void handle_hc_chhltd_intr_dma(dwc_otg_hcd_t * hcd,
2091                                       dwc_hc_t * hc,
2092                                       dwc_otg_hc_regs_t * hc_regs,
2093                                       dwc_otg_qtd_t * qtd)
2094 {
2095         int out_nak_enh = 0;
2096         hcint_data_t hcint;
2097         hcintmsk_data_t hcintmsk;
2098         /* For core with OUT NAK enhancement, the flow for high-
2099          * speed CONTROL/BULK OUT is handled a little differently.
2100          */
2101         if (hcd->core_if->snpsid >= OTG_CORE_REV_2_71a) {
2102                 if (hc->speed == DWC_OTG_EP_SPEED_HIGH && !hc->ep_is_in &&
2103                     (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL ||
2104                      hc->ep_type == DWC_OTG_EP_TYPE_BULK)) {
2105                         out_nak_enh = 1;
2106                 }
2107         }
2108
2109         if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE ||
2110             (hc->halt_status == DWC_OTG_HC_XFER_AHB_ERR
2111              && !hcd->core_if->dma_desc_enable)) {
2112                 /*
2113                  * Just release the channel. A dequeue can happen on a
2114                  * transfer timeout. In the case of an AHB Error, the channel
2115                  * was forced to halt because there's no way to gracefully
2116                  * recover.
2117                  */
2118                 if (hcd->core_if->dma_desc_enable)
2119                         dwc_otg_hcd_complete_xfer_ddma(hcd, hc, hc_regs,
2120                                                        hc->halt_status);
2121                 else
2122                         release_channel(hcd, hc, qtd, hc->halt_status);
2123                 return;
2124         }
2125
2126         /* Read the HCINTn register to determine the cause for the halt. */
2127
2128         hcint.d32 = DWC_READ_REG32(&hc_regs->hcint);
2129         hcintmsk.d32 = DWC_READ_REG32(&hc_regs->hcintmsk);
2130
2131         if (hcint.b.xfercomp) {
2132                 /** @todo This is here because of a possible hardware bug.  Spec
2133                  * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT
2134                  * interrupt w/ACK bit set should occur, but I only see the
2135                  * XFERCOMP bit, even with it masked out.  This is a workaround
2136                  * for that behavior.  Should fix this when hardware is fixed.
2137                  */
2138                 if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC && !hc->ep_is_in) {
2139                         handle_hc_ack_intr(hcd, hc, hc_regs, qtd);
2140                 }
2141                 handle_hc_xfercomp_intr(hcd, hc, hc_regs, qtd);
2142         } else if (hcint.b.stall) {
2143                 handle_hc_stall_intr(hcd, hc, hc_regs, qtd);
2144         } else if (hcint.b.xacterr && !hcd->core_if->dma_desc_enable) {
2145                 if (out_nak_enh) {
2146                         if (hcint.b.nyet || hcint.b.nak || hcint.b.ack) {
2147                                 DWC_DEBUGPL(DBG_HCD, "XactErr with NYET/NAK/ACK\n");
2148                                 qtd->error_count = 0;
2149                         } else {
2150                                 DWC_DEBUGPL(DBG_HCD, "XactErr without NYET/NAK/ACK\n");
2151                         }
2152                 }
2153
2154                 /*
2155                  * Must handle xacterr before nak or ack. Could get a xacterr
2156                  * at the same time as either of these on a BULK/CONTROL OUT
2157                  * that started with a PING. The xacterr takes precedence.
2158                  */
2159                 handle_hc_xacterr_intr(hcd, hc, hc_regs, qtd);
2160         } else if (hcint.b.xcs_xact && hcd->core_if->dma_desc_enable) {
2161                 handle_hc_xacterr_intr(hcd, hc, hc_regs, qtd);
2162         } else if (hcint.b.ahberr && hcd->core_if->dma_desc_enable) {
2163                 handle_hc_ahberr_intr(hcd, hc, hc_regs, qtd);
2164         } else if (hcint.b.bblerr) {
2165                 handle_hc_babble_intr(hcd, hc, hc_regs, qtd);
2166         } else if (hcint.b.frmovrun) {
2167                 handle_hc_frmovrun_intr(hcd, hc, hc_regs, qtd);
2168         } else if (hcint.b.datatglerr) {
2169                 handle_hc_datatglerr_intr(hcd, hc, hc_regs, qtd);
2170         } else if (!out_nak_enh) {
2171                 if (hcint.b.nyet) {
2172                         /*
2173                          * Must handle nyet before nak or ack. Could get a nyet at the
2174                          * same time as either of those on a BULK/CONTROL OUT that
2175                          * started with a PING. The nyet takes precedence.
2176                          */
2177                         handle_hc_nyet_intr(hcd, hc, hc_regs, qtd);
2178                 } else if (hcint.b.nak && !hcintmsk.b.nak) {
2179                         /*
2180                          * If nak is not masked, it's because a non-split IN transfer
2181                          * is in an error state. In that case, the nak is handled by
2182                          * the nak interrupt handler, not here. Handle nak here for
2183                          * BULK/CONTROL OUT transfers, which halt on a NAK to allow
2184                          * rewinding the buffer pointer.
2185                          */
2186                         handle_hc_nak_intr(hcd, hc, hc_regs, qtd);
2187                 } else if (hcint.b.ack && !hcintmsk.b.ack) {
2188                         /*
2189                          * If ack is not masked, it's because a non-split IN transfer
2190                          * is in an error state. In that case, the ack is handled by
2191                          * the ack interrupt handler, not here. Handle ack here for
2192                          * split transfers. Start splits halt on ACK.
2193                          */
2194                         handle_hc_ack_intr(hcd, hc, hc_regs, qtd);
2195                 } else {
2196                         if (hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
2197                             hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
2198                                 /*
2199                                  * A periodic transfer halted with no other channel
2200                                  * interrupts set. Assume it was halted by the core
2201                                  * because it could not be completed in its scheduled
2202                                  * (micro)frame.
2203                                  */
2204 #ifdef DEBUG
2205                                 DWC_PRINTF
2206                                     ("%s: Halt channel %d (assume incomplete periodic transfer)\n",
2207                                      __func__, hc->hc_num);
2208 #endif
2209                                 halt_channel(hcd, hc, qtd,
2210                                              DWC_OTG_HC_XFER_PERIODIC_INCOMPLETE);
2211                         } else {
2212                                 DWC_ERROR
2213                                     ("%s: Channel %d, DMA Mode -- ChHltd set, but reason "
2214                                      "for halting is unknown, hcint 0x%08x, intsts 0x%08x\n",
2215                                      __func__, hc->hc_num, hcint.d32,
2216                                      DWC_READ_REG32(&hcd->
2217                                                     core_if->core_global_regs->
2218                                                     gintsts));
2219                                 /* Failthrough: use 3-strikes rule */
2220                                 qtd->error_count++;
2221                                 dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
2222                                 update_urb_state_xfer_intr(hc, hc_regs,
2223                                            qtd->urb, qtd, DWC_OTG_HC_XFER_XACT_ERR);
2224                                 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR);
2225                         }
2226
2227                 }
2228         } else {
2229                 DWC_PRINTF("NYET/NAK/ACK/other in non-error case, 0x%08x\n",
2230                            hcint.d32);
2231                 /* Failthrough: use 3-strikes rule */
2232                 qtd->error_count++;
2233                 dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
2234                 update_urb_state_xfer_intr(hc, hc_regs,
2235                            qtd->urb, qtd, DWC_OTG_HC_XFER_XACT_ERR);
2236                 halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR);
2237         }
2238 }
2239
2240 /**
2241  * Handles a host channel Channel Halted interrupt.
2242  *
2243  * In slave mode, this handler is called only when the driver specifically
2244  * requests a halt. This occurs during handling other host channel interrupts
2245  * (e.g. nak, xacterr, stall, nyet, etc.).
2246  *
2247  * In DMA mode, this is the interrupt that occurs when the core has finished
2248  * processing a transfer on a channel. Other host channel interrupts (except
2249  * ahberr) are disabled in DMA mode.
2250  */
2251 static int32_t handle_hc_chhltd_intr(dwc_otg_hcd_t * hcd,
2252                                      dwc_hc_t * hc,
2253                                      dwc_otg_hc_regs_t * hc_regs,
2254                                      dwc_otg_qtd_t * qtd)
2255 {
2256         DWC_DEBUGPL(DBG_HCDI, "--Host Channel %d Interrupt: "
2257                     "Channel Halted--\n", hc->hc_num);
2258
2259         if (hcd->core_if->dma_enable) {
2260                 handle_hc_chhltd_intr_dma(hcd, hc, hc_regs, qtd);
2261         } else {
2262 #ifdef DEBUG
2263                 if (!halt_status_ok(hcd, hc, hc_regs, qtd)) {
2264                         return 1;
2265                 }
2266 #endif
2267                 release_channel(hcd, hc, qtd, hc->halt_status);
2268         }
2269
2270         return 1;
2271 }
2272
2273
2274 /**
2275  * dwc_otg_fiq_unmangle_isoc() - Update the iso_frame_desc structure on
2276  * FIQ transfer completion
2277  * @hcd:        Pointer to dwc_otg_hcd struct
2278  * @num:        Host channel number
2279  *
2280  * 1. Un-mangle the status as recorded in each iso_frame_desc status
2281  * 2. Copy it from the dwc_otg_urb into the real URB
2282  */
2283 void dwc_otg_fiq_unmangle_isoc(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, dwc_otg_qtd_t *qtd, uint32_t num)
2284 {
2285         struct dwc_otg_hcd_urb *dwc_urb = qtd->urb;
2286         int nr_frames = dwc_urb->packet_count;
2287         int i;
2288         hcint_data_t frame_hcint;
2289
2290         for (i = 0; i < nr_frames; i++) {
2291                 frame_hcint.d32 = dwc_urb->iso_descs[i].status;
2292                 if (frame_hcint.b.xfercomp) {
2293                         dwc_urb->iso_descs[i].status = 0;
2294                         dwc_urb->actual_length += dwc_urb->iso_descs[i].actual_length;
2295                 } else if (frame_hcint.b.frmovrun) {
2296                         if (qh->ep_is_in)
2297                                 dwc_urb->iso_descs[i].status = -DWC_E_NO_STREAM_RES;
2298                         else
2299                                 dwc_urb->iso_descs[i].status = -DWC_E_COMMUNICATION;
2300                         dwc_urb->error_count++;
2301                         dwc_urb->iso_descs[i].actual_length = 0;
2302                 } else if (frame_hcint.b.xacterr) {
2303                         dwc_urb->iso_descs[i].status = -DWC_E_PROTOCOL;
2304                         dwc_urb->error_count++;
2305                         dwc_urb->iso_descs[i].actual_length = 0;
2306                 } else if (frame_hcint.b.bblerr) {
2307                         dwc_urb->iso_descs[i].status = -DWC_E_OVERFLOW;
2308                         dwc_urb->error_count++;
2309                         dwc_urb->iso_descs[i].actual_length = 0;
2310                 } else {
2311                         /* Something went wrong */
2312                         dwc_urb->iso_descs[i].status = -1;
2313                         dwc_urb->iso_descs[i].actual_length = 0;
2314                         dwc_urb->error_count++;
2315                 }
2316         }
2317         qh->sched_frame = dwc_frame_num_inc(qh->sched_frame, qh->interval * (nr_frames - 1));
2318
2319         //printk_ratelimited(KERN_INFO "%s: HS isochronous of %d/%d frames with %d errors complete\n",
2320         //                      __FUNCTION__, i, dwc_urb->packet_count, dwc_urb->error_count);
2321 }
2322
2323 /**
2324  * dwc_otg_fiq_unsetup_per_dma() - Remove data from bounce buffers for split transactions
2325  * @hcd:        Pointer to dwc_otg_hcd struct
2326  * @num:        Host channel number
2327  *
2328  * Copies data from the FIQ bounce buffers into the URB's transfer buffer. Does not modify URB state.
2329  * Returns total length of data or -1 if the buffers were not used.
2330  *
2331  */
2332 int dwc_otg_fiq_unsetup_per_dma(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, dwc_otg_qtd_t *qtd, uint32_t num)
2333 {
2334         dwc_hc_t *hc = qh->channel;
2335         struct fiq_dma_blob *blob = hcd->fiq_dmab;
2336         struct fiq_channel_state *st = &hcd->fiq_state->channel[num];
2337         uint8_t *ptr = NULL;
2338         int index = 0, len = 0;
2339         int i = 0;
2340         if (hc->ep_is_in) {
2341                 /* Copy data out of the DMA bounce buffers to the URB's buffer.
2342                  * The align_buf is ignored as this is ignored on FSM enqueue. */
2343                 ptr = qtd->urb->buf;
2344                 if (qh->ep_type == UE_ISOCHRONOUS) {
2345                         /* Isoc IN transactions - grab the offset of the iso_frame_desc into the URB transfer buffer */
2346                         index = qtd->isoc_frame_index;
2347                         ptr += qtd->urb->iso_descs[index].offset;
2348                 } else {
2349                         /* Need to increment by actual_length for interrupt IN */
2350                         ptr += qtd->urb->actual_length;
2351                 }
2352
2353                 for (i = 0; i < st->dma_info.index; i++) {
2354                         len += st->dma_info.slot_len[i];
2355                         dwc_memcpy(ptr, &blob->channel[num].index[i].buf[0], st->dma_info.slot_len[i]);
2356                         ptr += st->dma_info.slot_len[i];
2357                 }
2358                 return len;
2359         } else {
2360                 /* OUT endpoints - nothing to do. */
2361                 return -1;
2362         }
2363
2364 }
2365 /**
2366  * dwc_otg_hcd_handle_hc_fsm() - handle an unmasked channel interrupt
2367  *                               from a channel handled in the FIQ
2368  * @hcd:        Pointer to dwc_otg_hcd struct
2369  * @num:        Host channel number
2370  *
2371  * If a host channel interrupt was received by the IRQ and this was a channel
2372  * used by the FIQ, the execution flow for transfer completion is substantially
2373  * different from the normal (messy) path. This function and its friends handles
2374  * channel cleanup and transaction completion from a FIQ transaction.
2375  */
2376 void dwc_otg_hcd_handle_hc_fsm(dwc_otg_hcd_t *hcd, uint32_t num)
2377 {
2378         struct fiq_channel_state *st = &hcd->fiq_state->channel[num];
2379         dwc_hc_t *hc = hcd->hc_ptr_array[num];
2380         dwc_otg_qtd_t *qtd;
2381         dwc_otg_hc_regs_t *hc_regs = hcd->core_if->host_if->hc_regs[num];
2382         hcint_data_t hcint = hcd->fiq_state->channel[num].hcint_copy;
2383         hctsiz_data_t hctsiz = hcd->fiq_state->channel[num].hctsiz_copy;
2384         int hostchannels  = 0;
2385         fiq_print(FIQDBG_INT, hcd->fiq_state, "OUT %01d %01d ", num , st->fsm);
2386
2387         hostchannels = hcd->available_host_channels;
2388         if (hc->halt_pending) {
2389                 /* Dequeue: The FIQ was allowed to complete the transfer but state has been cleared. */
2390                 if (hc->qh && st->fsm == FIQ_NP_SPLIT_DONE &&
2391                                 hcint.b.xfercomp && hc->qh->ep_type == UE_BULK) {
2392                         if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) {
2393                                 hc->qh->data_toggle = DWC_OTG_HC_PID_DATA1;
2394                         } else {
2395                                 hc->qh->data_toggle = DWC_OTG_HC_PID_DATA0;
2396                         }
2397                 }
2398                 release_channel(hcd, hc, NULL, hc->halt_status);
2399                 return;
2400         }
2401
2402         qtd = DWC_CIRCLEQ_FIRST(&hc->qh->qtd_list);
2403         switch (st->fsm) {
2404         case FIQ_TEST:
2405                 break;
2406
2407         case FIQ_DEQUEUE_ISSUED:
2408                 /* Handled above, but keep for posterity */
2409                 release_channel(hcd, hc, NULL, hc->halt_status);
2410                 break;
2411
2412         case FIQ_NP_SPLIT_DONE:
2413                 /* Nonperiodic transaction complete. */
2414                 if (!hc->ep_is_in) {
2415                         qtd->ssplit_out_xfer_count = hc->xfer_len;
2416                 }
2417                 if (hcint.b.xfercomp) {
2418                         handle_hc_xfercomp_intr(hcd, hc, hc_regs, qtd);
2419                 } else if (hcint.b.nak) {
2420                         handle_hc_nak_intr(hcd, hc, hc_regs, qtd);
2421                 } else {
2422                         DWC_WARN("Unexpected IRQ state on FSM transaction:"
2423                                         "dev_addr=%d ep=%d fsm=%d, hcint=0x%08x\n",
2424                                 hc->dev_addr, hc->ep_num, st->fsm, hcint.d32);
2425                         release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NO_HALT_STATUS);
2426                 }
2427                 break;
2428
2429         case FIQ_NP_SPLIT_HS_ABORTED:
2430                 /* A HS abort is a 3-strikes on the HS bus at any point in the transaction.
2431                  * Normally a CLEAR_TT_BUFFER hub command would be required: we can't do that
2432                  * because there's no guarantee which order a non-periodic split happened in.
2433                  * We could end up clearing a perfectly good transaction out of the buffer.
2434                  */
2435                 if (hcint.b.xacterr) {
2436                         qtd->error_count += st->nr_errors;
2437                         handle_hc_xacterr_intr(hcd, hc, hc_regs, qtd);
2438                 } else if (hcint.b.ahberr) {
2439                         handle_hc_ahberr_intr(hcd, hc, hc_regs, qtd);
2440                 } else {
2441                         DWC_WARN("Unexpected IRQ state on FSM transaction:"
2442                                         "dev_addr=%d ep=%d fsm=%d, hcint=0x%08x\n",
2443                                 hc->dev_addr, hc->ep_num, st->fsm, hcint.d32);
2444                         release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NO_HALT_STATUS);
2445                 }
2446                 break;
2447
2448         case FIQ_NP_SPLIT_LS_ABORTED:
2449                 /* A few cases can cause this - either an unknown state on a SSPLIT or
2450                  * STALL/data toggle error response on a CSPLIT */
2451                 if (hcint.b.stall) {
2452                         handle_hc_stall_intr(hcd, hc, hc_regs, qtd);
2453                 } else if (hcint.b.datatglerr) {
2454                         handle_hc_datatglerr_intr(hcd, hc, hc_regs, qtd);
2455                 } else if (hcint.b.bblerr) {
2456                         handle_hc_babble_intr(hcd, hc, hc_regs, qtd);
2457                 } else if (hcint.b.ahberr) {
2458                         handle_hc_ahberr_intr(hcd, hc, hc_regs, qtd);
2459                 } else {
2460                         DWC_WARN("Unexpected IRQ state on FSM transaction:"
2461                                         "dev_addr=%d ep=%d fsm=%d, hcint=0x%08x\n",
2462                                 hc->dev_addr, hc->ep_num, st->fsm, hcint.d32);
2463                         release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NO_HALT_STATUS);
2464                 }
2465                 break;
2466
2467         case FIQ_PER_SPLIT_DONE:
2468                 /* Isoc IN or Interrupt IN/OUT */
2469
2470                 /* Flow control here is different from the normal execution by the driver.
2471                 * We need to completely ignore most of the driver's method of handling
2472                 * split transactions and do it ourselves.
2473                 */
2474                 if (hc->ep_type == UE_INTERRUPT) {
2475                         if (hcint.b.nak) {
2476                                         handle_hc_nak_intr(hcd, hc, hc_regs, qtd);
2477                         } else if (hc->ep_is_in) {
2478                                 int len;
2479                                 len = dwc_otg_fiq_unsetup_per_dma(hcd, hc->qh, qtd, num);
2480                                 //printk(KERN_NOTICE "FIQ Transaction: hc=%d len=%d urb_len = %d\n", num, len, qtd->urb->length);
2481                                 qtd->urb->actual_length += len;
2482                                 if (qtd->urb->actual_length >= qtd->urb->length) {
2483                                         qtd->urb->status = 0;
2484                                         hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, qtd->urb->status);
2485                                         release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_URB_COMPLETE);
2486                                 } else {
2487                                         /* Interrupt transfer not complete yet - is it a short read? */
2488                                         if (len < hc->max_packet) {
2489                                                 /* Interrupt transaction complete */
2490                                                 qtd->urb->status = 0;
2491                                                 hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, qtd->urb->status);
2492                                                 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_URB_COMPLETE);
2493                                         } else {
2494                                                 /* Further transactions required */
2495                                                 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_COMPLETE);
2496                                         }
2497                                 }
2498                         } else {
2499                                 /* Interrupt OUT complete. */
2500                                 dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
2501                                 qtd->urb->actual_length += hc->xfer_len;
2502                                 if (qtd->urb->actual_length >= qtd->urb->length) {
2503                                         qtd->urb->status = 0;
2504                                         hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, qtd->urb->status);
2505                                         release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_URB_COMPLETE);
2506                                 } else {
2507                                         release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_COMPLETE);
2508                                 }
2509                         }
2510                 } else {
2511                         /* ISOC IN complete. */
2512                         struct dwc_otg_hcd_iso_packet_desc *frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
2513                         int len = 0;
2514                         /* Record errors, update qtd. */
2515                         if (st->nr_errors) {
2516                                 frame_desc->actual_length = 0;
2517                                 frame_desc->status = -DWC_E_PROTOCOL;
2518                         } else {
2519                                 frame_desc->status = 0;
2520                                 /* Unswizzle dma */
2521                                 len = dwc_otg_fiq_unsetup_per_dma(hcd, hc->qh, qtd, num);
2522                                 frame_desc->actual_length = len;
2523                         }
2524                         qtd->isoc_frame_index++;
2525                         if (qtd->isoc_frame_index == qtd->urb->packet_count) {
2526                                 hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, 0);
2527                                 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_URB_COMPLETE);
2528                         } else {
2529                                 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_COMPLETE);
2530                         }
2531                 }
2532                 break;
2533
2534         case FIQ_PER_ISO_OUT_DONE: {
2535                         struct dwc_otg_hcd_iso_packet_desc *frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
2536                         /* Record errors, update qtd. */
2537                         if (st->nr_errors) {
2538                                 frame_desc->actual_length = 0;
2539                                 frame_desc->status = -DWC_E_PROTOCOL;
2540                         } else {
2541                                 frame_desc->status = 0;
2542                                 frame_desc->actual_length = frame_desc->length;
2543                         }
2544                         qtd->isoc_frame_index++;
2545                         qtd->isoc_split_offset = 0;
2546                         if (qtd->isoc_frame_index == qtd->urb->packet_count) {
2547                                 hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, 0);
2548                                 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_URB_COMPLETE);
2549                         } else {
2550                                 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_COMPLETE);
2551                         }
2552                 }
2553                 break;
2554
2555         case FIQ_PER_SPLIT_NYET_ABORTED:
2556                 /* Doh. lost the data. */
2557                 printk_ratelimited(KERN_INFO "Transfer to device %d endpoint 0x%x frame %d failed "
2558                                 "- FIQ reported NYET. Data may have been lost.\n",
2559                                 hc->dev_addr, hc->ep_num, dwc_otg_hcd_get_frame_number(hcd) >> 3);
2560                 if (hc->ep_type == UE_ISOCHRONOUS) {
2561                         struct dwc_otg_hcd_iso_packet_desc *frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
2562                         /* Record errors, update qtd. */
2563                         frame_desc->actual_length = 0;
2564                         frame_desc->status = -DWC_E_PROTOCOL;
2565                         qtd->isoc_frame_index++;
2566                         qtd->isoc_split_offset = 0;
2567                         if (qtd->isoc_frame_index == qtd->urb->packet_count) {
2568                                 hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, 0);
2569                                 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_URB_COMPLETE);
2570                         } else {
2571                                 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_COMPLETE);
2572                         }
2573                 } else {
2574                         release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NO_HALT_STATUS);
2575                 }
2576                 break;
2577
2578         case FIQ_HS_ISOC_DONE:
2579                 /* The FIQ has performed a whole pile of isochronous transactions.
2580                  * The status is recorded as the interrupt state should the transaction
2581                  * fail.
2582                  */
2583                 dwc_otg_fiq_unmangle_isoc(hcd, hc->qh, qtd, num);
2584                 hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, 0);
2585                 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_URB_COMPLETE);
2586                 break;
2587
2588         case FIQ_PER_SPLIT_LS_ABORTED:
2589                 if (hcint.b.xacterr) {
2590                         /* Hub has responded with an ERR packet. Device
2591                          * has been unplugged or the port has been disabled.
2592                          * TODO: need to issue a reset to the hub port. */
2593                         qtd->error_count += 3;
2594                         handle_hc_xacterr_intr(hcd, hc, hc_regs, qtd);
2595                 } else if (hcint.b.stall) {
2596                         handle_hc_stall_intr(hcd, hc, hc_regs, qtd);
2597                 } else if (hcint.b.bblerr) {
2598                         handle_hc_babble_intr(hcd, hc, hc_regs, qtd);
2599                 } else {
2600                         printk_ratelimited(KERN_INFO "Transfer to device %d endpoint 0x%x failed "
2601                                 "- FIQ reported FSM=%d. Data may have been lost.\n",
2602                                 st->fsm, hc->dev_addr, hc->ep_num);
2603                         release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NO_HALT_STATUS);
2604                 }
2605                 break;
2606
2607         case FIQ_PER_SPLIT_HS_ABORTED:
2608                 /* Either the SSPLIT phase suffered transaction errors or something
2609                  * unexpected happened.
2610                  */
2611                 qtd->error_count += 3;
2612                 handle_hc_xacterr_intr(hcd, hc, hc_regs, qtd);
2613                 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NO_HALT_STATUS);
2614                 break;
2615
2616         case FIQ_PER_SPLIT_TIMEOUT:
2617                 /* Couldn't complete in the nominated frame */
2618                 printk(KERN_INFO "Transfer to device %d endpoint 0x%x frame %d failed "
2619                                 "- FIQ timed out. Data may have been lost.\n",
2620                                 hc->dev_addr, hc->ep_num, dwc_otg_hcd_get_frame_number(hcd) >> 3);
2621                 if (hc->ep_type == UE_ISOCHRONOUS) {
2622                         struct dwc_otg_hcd_iso_packet_desc *frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
2623                         /* Record errors, update qtd. */
2624                         frame_desc->actual_length = 0;
2625                         if (hc->ep_is_in) {
2626                                 frame_desc->status = -DWC_E_NO_STREAM_RES;
2627                         } else {
2628                                 frame_desc->status = -DWC_E_COMMUNICATION;
2629                         }
2630                         qtd->isoc_frame_index++;
2631                         if (qtd->isoc_frame_index == qtd->urb->packet_count) {
2632                                 hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, 0);
2633                                 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_URB_COMPLETE);
2634                         } else {
2635                                 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_COMPLETE);
2636                         }
2637                 } else {
2638                         release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NO_HALT_STATUS);
2639                 }
2640                 break;
2641
2642         default:
2643                 DWC_WARN("Unexpected state received on hc=%d fsm=%d on transfer to device %d ep 0x%x", 
2644                                         hc->hc_num, st->fsm, hc->dev_addr, hc->ep_num);
2645                 qtd->error_count++;
2646                 release_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NO_HALT_STATUS);
2647         }
2648         return;
2649 }
2650
2651 /** Handles interrupt for a specific Host Channel */
2652 int32_t dwc_otg_hcd_handle_hc_n_intr(dwc_otg_hcd_t * dwc_otg_hcd, uint32_t num)
2653 {
2654         int retval = 0;
2655         hcint_data_t hcint;
2656         hcintmsk_data_t hcintmsk;
2657         dwc_hc_t *hc;
2658         dwc_otg_hc_regs_t *hc_regs;
2659         dwc_otg_qtd_t *qtd;
2660
2661         DWC_DEBUGPL(DBG_HCDV, "--Host Channel Interrupt--, Channel %d\n", num);
2662
2663         hc = dwc_otg_hcd->hc_ptr_array[num];
2664         hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[num];
2665         if(hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE) {
2666                 /* A dequeue was issued for this transfer. Our QTD has gone away
2667                  * but in the case of a FIQ transfer, the transfer would have run
2668                  * to completion.
2669                  */
2670                 if (fiq_fsm_enable && dwc_otg_hcd->fiq_state->channel[num].fsm != FIQ_PASSTHROUGH) {
2671                         dwc_otg_hcd_handle_hc_fsm(dwc_otg_hcd, num);
2672                 } else {
2673                         release_channel(dwc_otg_hcd, hc, NULL, hc->halt_status);
2674                 }
2675                 return 1;
2676         }
2677         qtd = DWC_CIRCLEQ_FIRST(&hc->qh->qtd_list);
2678
2679         /*
2680          * FSM mode: Check to see if this is a HC interrupt from a channel handled by the FIQ.
2681          * Execution path is fundamentally different for the channels after a FIQ has completed
2682          * a split transaction.
2683          */
2684         if (fiq_fsm_enable) {
2685                 switch (dwc_otg_hcd->fiq_state->channel[num].fsm) {
2686                         case FIQ_PASSTHROUGH:
2687                                 break;
2688                         case FIQ_PASSTHROUGH_ERRORSTATE:
2689                                 /* Hook into the error count */
2690                                 fiq_print(FIQDBG_ERR, dwc_otg_hcd->fiq_state, "HCDERR%02d", num);
2691                                 if (!dwc_otg_hcd->fiq_state->channel[num].nr_errors) {
2692                                         qtd->error_count = 0;
2693                                         fiq_print(FIQDBG_ERR, dwc_otg_hcd->fiq_state, "RESET   ");
2694                                 }
2695                                 break;
2696                         default:
2697                                 dwc_otg_hcd_handle_hc_fsm(dwc_otg_hcd, num);
2698                                 return 1;
2699                 }
2700         }
2701
2702         hcint.d32 = DWC_READ_REG32(&hc_regs->hcint);
2703         hcintmsk.d32 = DWC_READ_REG32(&hc_regs->hcintmsk);
2704         hcint.d32 = hcint.d32 & hcintmsk.d32;
2705         if (!dwc_otg_hcd->core_if->dma_enable) {
2706                 if (hcint.b.chhltd && hcint.d32 != 0x2) {
2707                         hcint.b.chhltd = 0;
2708                 }
2709         }
2710
2711         if (hcint.b.xfercomp) {
2712                 retval |=
2713                     handle_hc_xfercomp_intr(dwc_otg_hcd, hc, hc_regs, qtd);
2714                 /*
2715                  * If NYET occurred at same time as Xfer Complete, the NYET is
2716                  * handled by the Xfer Complete interrupt handler. Don't want
2717                  * to call the NYET interrupt handler in this case.
2718                  */
2719                 hcint.b.nyet = 0;
2720         }
2721         if (hcint.b.chhltd) {
2722                 retval |= handle_hc_chhltd_intr(dwc_otg_hcd, hc, hc_regs, qtd);
2723         }
2724         if (hcint.b.ahberr) {
2725                 retval |= handle_hc_ahberr_intr(dwc_otg_hcd, hc, hc_regs, qtd);
2726         }
2727         if (hcint.b.stall) {
2728                 retval |= handle_hc_stall_intr(dwc_otg_hcd, hc, hc_regs, qtd);
2729         }
2730         if (hcint.b.nak) {
2731                 retval |= handle_hc_nak_intr(dwc_otg_hcd, hc, hc_regs, qtd);
2732         }
2733         if (hcint.b.ack) {
2734                 if(!hcint.b.chhltd)
2735                         retval |= handle_hc_ack_intr(dwc_otg_hcd, hc, hc_regs, qtd);
2736         }
2737         if (hcint.b.nyet) {
2738                 retval |= handle_hc_nyet_intr(dwc_otg_hcd, hc, hc_regs, qtd);
2739         }
2740         if (hcint.b.xacterr) {
2741                 retval |= handle_hc_xacterr_intr(dwc_otg_hcd, hc, hc_regs, qtd);
2742         }
2743         if (hcint.b.bblerr) {
2744                 retval |= handle_hc_babble_intr(dwc_otg_hcd, hc, hc_regs, qtd);
2745         }
2746         if (hcint.b.frmovrun) {
2747                 retval |=
2748                     handle_hc_frmovrun_intr(dwc_otg_hcd, hc, hc_regs, qtd);
2749         }
2750         if (hcint.b.datatglerr) {
2751                 retval |=
2752                     handle_hc_datatglerr_intr(dwc_otg_hcd, hc, hc_regs, qtd);
2753         }
2754
2755         return retval;
2756 }
2757 #endif /* DWC_DEVICE_ONLY */