2 * dwc_otg_fiq_fsm.c - The finite state machine FIQ
4 * Copyright (c) 2013 Raspberry Pi Foundation
6 * Author: Jonathan Bell <jonathan@raspberrypi.org>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * * Neither the name of Raspberry Pi nor the
17 * names of its contributors may be used to endorse or promote products
18 * derived from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
22 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 * DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
24 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 * This FIQ implements functionality that performs split transactions on
32 * the dwc_otg hardware without any outside intervention. A split transaction
33 * is "queued" by nominating a specific host channel to perform the entirety
34 * of a split transaction. This FIQ will then perform the microframe-precise
35 * scheduling required in each phase of the transaction until completion.
37 * The FIQ functionality is glued into the Synopsys driver via the entry point
38 * in the FSM enqueue function, and at the exit point in handling a HC interrupt
39 * for a FSM-enabled channel.
41 * NB: Large parts of this implementation have architecture-specific code.
42 * For porting this functionality to other ARM machines, the minimum is required:
43 * - An interrupt controller allowing the top-level dwc USB interrupt to be routed
45 * - A method of forcing a software generated interrupt from FIQ mode that then
46 * triggers an IRQ entry (with the dwc USB handler called by this IRQ number)
47 * - Guaranteed interrupt routing such that both the FIQ and SGI occur on the same
48 * processor core - there is no locking between the FIQ and IRQ (aside from
53 #include "dwc_otg_fiq_fsm.h"
58 void notrace _fiq_print(enum fiq_debug_level dbg_lvl, volatile struct fiq_state *state, char *fmt, ...)
60 enum fiq_debug_level dbg_lvl_req = FIQDBG_ERR;
63 hfnum_data_t hfnum = { .d32 = FIQ_READ(state->dwc_regs_base + 0x408) };
65 if((dbg_lvl & dbg_lvl_req) || dbg_lvl == FIQDBG_ERR)
67 snprintf(text, 9, " %4d:%1u ", hfnum.b.frnum/8, hfnum.b.frnum & 7);
69 vsnprintf(text+8, 9, fmt, args);
72 memcpy(buffer + wptr, text, 16);
73 wptr = (wptr + 16) % sizeof(buffer);
80 inline void fiq_fsm_spin_lock(fiq_lock_t *lock)
82 spin_lock((spinlock_t *)lock);
85 inline void fiq_fsm_spin_unlock(fiq_lock_t *lock)
87 spin_unlock((spinlock_t *)lock);
93 * fiq_fsm_spin_lock() - ARMv6+ bare bones spinlock
94 * Must be called with local interrupts and FIQ disabled.
96 #if defined(CONFIG_ARCH_BCM2835) && defined(CONFIG_SMP)
97 inline void fiq_fsm_spin_lock(fiq_lock_t *lock)
102 /* Nested locking, yay. If we are on the same CPU as the fiq, then the disable
103 * will be sufficient. If we are on a different CPU, then the lock protects us. */
104 prefetchw(&lock->slock);
106 "1: ldrex %0, [%3]\n"
108 " strex %2, %1, [%3]\n"
111 : "=&r" (lockval), "=&r" (newval), "=&r" (tmp)
112 : "r" (&lock->slock), "I" (1 << 16)
115 while (lockval.tickets.next != lockval.tickets.owner) {
117 lockval.tickets.owner = READ_ONCE(lock->tickets.owner);
122 inline void fiq_fsm_spin_lock(fiq_lock_t *lock) { }
126 * fiq_fsm_spin_unlock() - ARMv6+ bare bones spinunlock
128 #if defined(CONFIG_ARCH_BCM2835) && defined(CONFIG_SMP)
129 inline void fiq_fsm_spin_unlock(fiq_lock_t *lock)
132 lock->tickets.owner++;
136 inline void fiq_fsm_spin_unlock(fiq_lock_t *lock) { }
142 * fiq_fsm_restart_channel() - Poke channel enable bit for a split transaction
143 * @channel: channel to re-enable
145 static void fiq_fsm_restart_channel(struct fiq_state *st, int n, int force)
147 hcchar_data_t hcchar = { .d32 = FIQ_READ(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCCHAR) };
150 if (st->channel[n].hcchar_copy.b.eptype & 0x1) {
151 hfnum_data_t hfnum = { .d32 = FIQ_READ(st->dwc_regs_base + HFNUM) };
152 /* Hardware bug workaround: update the ssplit index */
153 if (st->channel[n].hcsplt_copy.b.spltena)
154 st->channel[n].expected_uframe = (hfnum.b.frnum + 1) & 0x3FFF;
156 hcchar.b.oddfrm = (hfnum.b.frnum & 0x1) ? 0 : 1;
159 FIQ_WRITE(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCCHAR, hcchar.d32);
160 hcchar.d32 = FIQ_READ(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCCHAR);
163 FIQ_WRITE(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCCHAR, hcchar.d32);
164 fiq_print(FIQDBG_INT, st, "HCGO %01d %01d", n, force);
168 * fiq_fsm_setup_csplit() - Prepare a host channel for a CSplit transaction stage
169 * @st: Pointer to the channel's state
170 * @n : channel number
172 * Change host channel registers to perform a complete-split transaction. Being mindful of the
173 * endpoint direction, set control regs up correctly.
175 static void notrace fiq_fsm_setup_csplit(struct fiq_state *st, int n)
177 hcsplt_data_t hcsplt = { .d32 = FIQ_READ(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCSPLT) };
178 hctsiz_data_t hctsiz = { .d32 = FIQ_READ(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCTSIZ) };
180 hcsplt.b.compsplt = 1;
181 if (st->channel[n].hcchar_copy.b.epdir == 1) {
182 // If IN, the CSPLIT result contains the data or a hub handshake. hctsiz = maxpacket.
183 hctsiz.b.xfersize = st->channel[n].hctsiz_copy.b.xfersize;
185 // If OUT, the CSPLIT result contains handshake only.
186 hctsiz.b.xfersize = 0;
188 FIQ_WRITE(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCSPLT, hcsplt.d32);
189 FIQ_WRITE(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCTSIZ, hctsiz.d32);
194 * fiq_fsm_restart_np_pending() - Restart a single non-periodic contended transfer
195 * @st: Pointer to the channel's state
196 * @num_channels: Total number of host channels
197 * @orig_channel: Channel index of completed transfer
199 * In the case where an IN and OUT transfer are simultaneously scheduled to the
200 * same device/EP, inadequate hub implementations will misbehave. Once the first
201 * transfer is complete, a pending non-periodic split can then be issued.
203 static void notrace fiq_fsm_restart_np_pending(struct fiq_state *st, int num_channels, int orig_channel)
206 int dev_addr = st->channel[orig_channel].hcchar_copy.b.devaddr;
207 int ep_num = st->channel[orig_channel].hcchar_copy.b.epnum;
208 for (i = 0; i < num_channels; i++) {
209 if (st->channel[i].fsm == FIQ_NP_SSPLIT_PENDING &&
210 st->channel[i].hcchar_copy.b.devaddr == dev_addr &&
211 st->channel[i].hcchar_copy.b.epnum == ep_num) {
212 st->channel[i].fsm = FIQ_NP_SSPLIT_STARTED;
213 fiq_fsm_restart_channel(st, i, 0);
219 static inline int notrace fiq_get_xfer_len(struct fiq_state *st, int n)
221 /* The xfersize register is a bit wonky. For IN transfers, it decrements by the packet size. */
222 hctsiz_data_t hctsiz = { .d32 = FIQ_READ(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCTSIZ) };
224 if (st->channel[n].hcchar_copy.b.epdir == 0) {
225 return st->channel[n].hctsiz_copy.b.xfersize;
227 return st->channel[n].hctsiz_copy.b.xfersize - hctsiz.b.xfersize;
234 * fiq_increment_dma_buf() - update DMA address for bounce buffers after a CSPLIT
236 * Of use only for IN periodic transfers.
238 static int notrace fiq_increment_dma_buf(struct fiq_state *st, int num_channels, int n)
241 int i = st->channel[n].dma_info.index;
243 struct fiq_dma_blob *blob =
244 (struct fiq_dma_blob *)(uintptr_t)st->dma_base;
246 len = fiq_get_xfer_len(st, n);
247 fiq_print(FIQDBG_INT, st, "LEN: %03d", len);
248 st->channel[n].dma_info.slot_len[i] = len;
253 hcdma.d32 = (u32)(uintptr_t)&blob->channel[n].index[i].buf[0];
254 FIQ_WRITE(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HC_DMA, hcdma.d32);
255 st->channel[n].dma_info.index = i;
260 * fiq_reload_hctsiz() - for IN transactions, reset HCTSIZ
262 static void notrace fiq_fsm_reload_hctsiz(struct fiq_state *st, int n)
264 hctsiz_data_t hctsiz = { .d32 = FIQ_READ(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCTSIZ) };
265 hctsiz.b.xfersize = st->channel[n].hctsiz_copy.b.xfersize;
267 FIQ_WRITE(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCTSIZ, hctsiz.d32);
271 * fiq_fsm_reload_hcdma() - for OUT transactions, rewind DMA pointer
273 static void notrace fiq_fsm_reload_hcdma(struct fiq_state *st, int n)
275 hcdma_data_t hcdma = st->channel[n].hcdma_copy;
276 FIQ_WRITE(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HC_DMA, hcdma.d32);
280 * fiq_iso_out_advance() - update DMA address and split position bits
281 * for isochronous OUT transactions.
283 * Returns 1 if this is the last packet queued, 0 otherwise. Split-ALL and
284 * Split-BEGIN states are not handled - this is done when the transaction was queued.
286 * This function must only be called from the FIQ_ISO_OUT_ACTIVE state.
288 static int notrace fiq_iso_out_advance(struct fiq_state *st, int num_channels, int n)
290 hcsplt_data_t hcsplt;
291 hctsiz_data_t hctsiz;
293 struct fiq_dma_blob *blob =
294 (struct fiq_dma_blob *)(uintptr_t)st->dma_base;
296 int i = st->channel[n].dma_info.index;
298 fiq_print(FIQDBG_INT, st, "ADV %01d %01d ", n, i);
302 if (st->channel[n].dma_info.slot_len[i+1] == 255)
305 /* New DMA address - address of bounce buffer referred to in index */
306 hcdma.d32 = (u32)(uintptr_t)blob->channel[n].index[i].buf;
307 //hcdma.d32 = FIQ_READ(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HC_DMA);
308 //hcdma.d32 += st->channel[n].dma_info.slot_len[i];
309 fiq_print(FIQDBG_INT, st, "LAST: %01d ", last);
310 fiq_print(FIQDBG_INT, st, "LEN: %03d", st->channel[n].dma_info.slot_len[i]);
311 hcsplt.d32 = FIQ_READ(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCSPLT);
312 hctsiz.d32 = FIQ_READ(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCTSIZ);
313 hcsplt.b.xactpos = (last) ? ISOC_XACTPOS_END : ISOC_XACTPOS_MID;
314 /* Set up new packet length */
316 hctsiz.b.xfersize = st->channel[n].dma_info.slot_len[i];
317 fiq_print(FIQDBG_INT, st, "%08x", hctsiz.d32);
319 st->channel[n].dma_info.index++;
320 FIQ_WRITE(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCSPLT, hcsplt.d32);
321 FIQ_WRITE(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCTSIZ, hctsiz.d32);
322 FIQ_WRITE(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HC_DMA, hcdma.d32);
327 * fiq_fsm_tt_next_isoc() - queue next pending isochronous out start-split on a TT
329 * Despite the limitations of the DWC core, we can force a microframe pipeline of
330 * isochronous OUT start-split transactions while waiting for a corresponding other-type
331 * of endpoint to finish its CSPLITs. TTs have big periodic buffers therefore it
332 * is very unlikely that filling the start-split FIFO will cause data loss.
333 * This allows much better interleaving of transactions in an order-independent way-
334 * there is no requirement to prioritise isochronous, just a state-space search has
335 * to be performed on each periodic start-split complete interrupt.
337 static int notrace fiq_fsm_tt_next_isoc(struct fiq_state *st, int num_channels, int n)
339 int hub_addr = st->channel[n].hub_addr;
340 int port_addr = st->channel[n].port_addr;
342 for (i = 0; i < num_channels; i++) {
343 if (i == n || st->channel[i].fsm == FIQ_PASSTHROUGH)
345 if (st->channel[i].hub_addr == hub_addr &&
346 st->channel[i].port_addr == port_addr) {
347 switch (st->channel[i].fsm) {
348 case FIQ_PER_ISO_OUT_PENDING:
349 if (st->channel[i].nrpackets == 1) {
350 st->channel[i].fsm = FIQ_PER_ISO_OUT_LAST;
352 st->channel[i].fsm = FIQ_PER_ISO_OUT_ACTIVE;
354 fiq_fsm_restart_channel(st, i, 0);
369 * fiq_fsm_tt_in_use() - search for host channels using this TT
370 * @n: Channel to use as reference
373 int notrace noinline fiq_fsm_tt_in_use(struct fiq_state *st, int num_channels, int n)
375 int hub_addr = st->channel[n].hub_addr;
376 int port_addr = st->channel[n].port_addr;
378 for (i = 0; i < num_channels; i++) {
379 if (i == n || st->channel[i].fsm == FIQ_PASSTHROUGH)
381 switch (st->channel[i].fsm) {
382 /* TT is reserved for channels that are in the middle of a periodic
385 case FIQ_PER_SSPLIT_STARTED:
386 case FIQ_PER_CSPLIT_WAIT:
387 case FIQ_PER_CSPLIT_NYET1:
388 //case FIQ_PER_CSPLIT_POLL:
389 case FIQ_PER_ISO_OUT_ACTIVE:
390 case FIQ_PER_ISO_OUT_LAST:
391 if (st->channel[i].hub_addr == hub_addr &&
392 st->channel[i].port_addr == port_addr) {
406 * fiq_fsm_more_csplits() - determine whether additional CSPLITs need
407 * to be issued for this IN transaction.
409 * We cannot tell the inbound PID of a data packet due to hardware limitations.
410 * we need to make an educated guess as to whether we need to queue another CSPLIT
411 * or not. A no-brainer is when we have received enough data to fill the endpoint
412 * size, but for endpoints that give variable-length data then we have to resort
415 * We also return whether this is the last CSPLIT to be queued, again based on
416 * heuristics. This is to allow a 1-uframe overlap of periodic split transactions.
417 * Note: requires at least 1 CSPLIT to have been performed prior to being called.
421 * We need some way of guaranteeing if a returned periodic packet of size X
423 * The heuristic value of 144 bytes assumes that the received data has maximal
424 * bit-stuffing and the clock frequency of the transmitting device is at the lowest
425 * permissible limit. If the transfer length results in a final packet size
426 * 144 < p <= 188, then an erroneous CSPLIT will be issued.
427 * Also used to ensure that an endpoint will nominally only return a single
428 * complete-split worth of data.
430 #define DATA0_PID_HEURISTIC 144
432 static int notrace noinline fiq_fsm_more_csplits(struct fiq_state *state, int n, int *probably_last)
438 struct fiq_channel_state *st = &state->channel[n];
440 for (i = 0; i < st->dma_info.index; i++) {
441 total_len += st->dma_info.slot_len[i];
446 if (st->hcchar_copy.b.eptype == 0x3) {
448 * An interrupt endpoint will take max 2 CSPLITs. if we are receiving data
449 * then this is definitely the last CSPLIT.
453 /* Isoc IN. This is a bit risky if we are the first transaction:
454 * we may have been held off slightly. */
455 if (i > 1 && st->dma_info.slot_len[st->dma_info.index-1] <= DATA0_PID_HEURISTIC) {
458 /* If in the next uframe we will receive enough data to fill the endpoint,
459 * then only issue 1 more csplit.
461 if (st->hctsiz_copy.b.xfersize - total_len <= DATA0_PID_HEURISTIC)
465 if (total_len >= st->hctsiz_copy.b.xfersize ||
466 i == 6 || total_len == 0)
467 /* Note: due to bit stuffing it is possible to have > 6 CSPLITs for
468 * a single endpoint. Accepting more would completely break our scheduling mechanism though
469 * - in these extreme cases we will pass through a truncated packet.
477 * fiq_fsm_too_late() - Test transaction for lateness
479 * If a SSPLIT for a large IN transaction is issued too late in a frame,
480 * the hub will disable the port to the device and respond with ERR handshakes.
481 * The hub status endpoint will not reflect this change.
482 * Returns 1 if we will issue a SSPLIT that will result in a device babble.
484 int notrace fiq_fsm_too_late(struct fiq_state *st, int n)
487 hfnum_data_t hfnum = { .d32 = FIQ_READ(st->dwc_regs_base + HFNUM) };
488 uframe = hfnum.b.frnum & 0x7;
489 if ((uframe < 6) && (st->channel[n].nrpackets + 1 + uframe > 7)) {
498 * fiq_fsm_start_next_periodic() - A half-arsed attempt at a microframe pipeline
500 * Search pending transactions in the start-split pending state and queue them.
501 * Don't queue packets in uframe .5 (comes out in .6) (USB2.0 11.18.4).
502 * Note: we specifically don't do isochronous OUT transactions first because better
503 * use of the TT's start-split fifo can be achieved by pipelining an IN before an OUT.
505 static void notrace noinline fiq_fsm_start_next_periodic(struct fiq_state *st, int num_channels)
508 hfnum_data_t hfnum = { .d32 = FIQ_READ(st->dwc_regs_base + HFNUM) };
509 if ((hfnum.b.frnum & 0x7) == 5)
511 for (n = 0; n < num_channels; n++) {
512 if (st->channel[n].fsm == FIQ_PER_SSPLIT_QUEUED) {
513 /* Check to see if any other transactions are using this TT */
514 if(!fiq_fsm_tt_in_use(st, num_channels, n)) {
515 if (!fiq_fsm_too_late(st, n)) {
516 st->channel[n].fsm = FIQ_PER_SSPLIT_STARTED;
517 fiq_print(FIQDBG_INT, st, "NEXTPER ");
518 fiq_fsm_restart_channel(st, n, 0);
520 st->channel[n].fsm = FIQ_PER_SPLIT_TIMEOUT;
526 for (n = 0; n < num_channels; n++) {
527 if (st->channel[n].fsm == FIQ_PER_ISO_OUT_PENDING) {
528 if (!fiq_fsm_tt_in_use(st, num_channels, n)) {
529 fiq_print(FIQDBG_INT, st, "NEXTISO ");
530 if (st->channel[n].nrpackets == 1)
531 st->channel[n].fsm = FIQ_PER_ISO_OUT_LAST;
533 st->channel[n].fsm = FIQ_PER_ISO_OUT_ACTIVE;
534 fiq_fsm_restart_channel(st, n, 0);
542 * fiq_fsm_update_hs_isoc() - update isochronous frame and transfer data
543 * @state: Pointer to fiq_state
544 * @n: Channel transaction is active on
545 * @hcint: Copy of host channel interrupt register
547 * Returns 0 if there are no more transactions for this HC to do, 1
550 static int notrace noinline fiq_fsm_update_hs_isoc(struct fiq_state *state, int n, hcint_data_t hcint)
552 struct fiq_channel_state *st = &state->channel[n];
553 int xfer_len = 0, nrpackets = 0;
555 fiq_print(FIQDBG_INT, state, "HSISO %02d", n);
557 xfer_len = fiq_get_xfer_len(state, n);
558 st->hs_isoc_info.iso_desc[st->hs_isoc_info.index].actual_length = xfer_len;
560 st->hs_isoc_info.iso_desc[st->hs_isoc_info.index].status = hcint.d32;
562 st->hs_isoc_info.index++;
563 if (st->hs_isoc_info.index == st->hs_isoc_info.nrframes) {
567 /* grab the next DMA address offset from the array */
568 hcdma.d32 = st->hcdma_copy.d32 + st->hs_isoc_info.iso_desc[st->hs_isoc_info.index].offset;
569 FIQ_WRITE(state->dwc_regs_base + HC_START + (HC_OFFSET * n) + HC_DMA, hcdma.d32);
571 /* We need to set multi_count. This is a bit tricky - has to be set per-transaction as
572 * the core needs to be told to send the correct number. Caution: for IN transfers,
573 * this is always set to the maximum size of the endpoint. */
574 xfer_len = st->hs_isoc_info.iso_desc[st->hs_isoc_info.index].length;
575 /* Integer divide in a FIQ: fun. FIXME: make this not suck */
576 nrpackets = (xfer_len + st->hcchar_copy.b.mps - 1) / st->hcchar_copy.b.mps;
579 st->hcchar_copy.b.multicnt = nrpackets;
580 st->hctsiz_copy.b.pktcnt = nrpackets;
582 /* Initial PID also needs to be set */
583 if (st->hcchar_copy.b.epdir == 0) {
584 st->hctsiz_copy.b.xfersize = xfer_len;
585 switch (st->hcchar_copy.b.multicnt) {
587 st->hctsiz_copy.b.pid = DWC_PID_DATA0;
591 st->hctsiz_copy.b.pid = DWC_PID_MDATA;
596 st->hctsiz_copy.b.xfersize = nrpackets * st->hcchar_copy.b.mps;
597 switch (st->hcchar_copy.b.multicnt) {
599 st->hctsiz_copy.b.pid = DWC_PID_DATA0;
602 st->hctsiz_copy.b.pid = DWC_PID_DATA1;
605 st->hctsiz_copy.b.pid = DWC_PID_DATA2;
609 FIQ_WRITE(state->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCTSIZ, st->hctsiz_copy.d32);
610 FIQ_WRITE(state->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCCHAR, st->hcchar_copy.d32);
611 /* Channel is enabled on hcint handler exit */
612 fiq_print(FIQDBG_INT, state, "HSISOOUT");
618 * fiq_fsm_do_sof() - FSM start-of-frame interrupt handler
619 * @state: Pointer to the state struct passed from banked FIQ mode registers.
620 * @num_channels: set according to the DWC hardware configuration
622 * The SOF handler in FSM mode has two functions
623 * 1. Hold off SOF from causing schedule advancement in IRQ context if there's
625 * 2. Advance certain FSM states that require either a microframe delay, or a microframe
628 * The second part is architecture-specific to mach-bcm2835 -
629 * a sane interrupt controller would have a mask register for ARM interrupt sources
630 * to be promoted to the nFIQ line, but it doesn't. Instead a single interrupt
631 * number (USB) can be enabled. This means that certain parts of the USB specification
632 * that require "wait a little while, then issue another packet" cannot be fulfilled with
633 * the timing granularity required to achieve optimal throughout. The workaround is to use
634 * the SOF "timer" (125uS) to perform this task.
636 static int notrace noinline fiq_fsm_do_sof(struct fiq_state *state, int num_channels)
638 hfnum_data_t hfnum = { .d32 = FIQ_READ(state->dwc_regs_base + HFNUM) };
642 if ((hfnum.b.frnum & 0x7) == 1) {
643 /* We cannot issue csplits for transactions in the last frame past (n+1).1
644 * Check to see if there are any transactions that are stale.
647 for (n = 0; n < num_channels; n++) {
648 switch (state->channel[n].fsm) {
649 case FIQ_PER_CSPLIT_WAIT:
650 case FIQ_PER_CSPLIT_NYET1:
651 case FIQ_PER_CSPLIT_POLL:
652 case FIQ_PER_CSPLIT_LAST:
653 /* Check if we are no longer in the same full-speed frame. */
654 if (((state->channel[n].expected_uframe & 0x3FFF) & ~0x7) <
655 (hfnum.b.frnum & ~0x7))
656 state->channel[n].fsm = FIQ_PER_SPLIT_TIMEOUT;
664 for (n = 0; n < num_channels; n++) {
665 switch (state->channel[n].fsm) {
667 case FIQ_NP_SSPLIT_RETRY:
668 case FIQ_NP_IN_CSPLIT_RETRY:
669 case FIQ_NP_OUT_CSPLIT_RETRY:
670 fiq_fsm_restart_channel(state, n, 0);
673 case FIQ_HS_ISOC_SLEEPING:
674 /* Is it time to wake this channel yet? */
675 if (--state->channel[n].uframe_sleeps == 0) {
676 state->channel[n].fsm = FIQ_HS_ISOC_TURBO;
677 fiq_fsm_restart_channel(state, n, 0);
681 case FIQ_PER_SSPLIT_QUEUED:
682 if ((hfnum.b.frnum & 0x7) == 5)
684 if(!fiq_fsm_tt_in_use(state, num_channels, n)) {
685 if (!fiq_fsm_too_late(state, n)) {
686 fiq_print(FIQDBG_INT, state, "SOF GO %01d", n);
687 fiq_fsm_restart_channel(state, n, 0);
688 state->channel[n].fsm = FIQ_PER_SSPLIT_STARTED;
690 /* Transaction cannot be started without risking a device babble error */
691 state->channel[n].fsm = FIQ_PER_SPLIT_TIMEOUT;
692 state->haintmsk_saved.b2.chint &= ~(1 << n);
693 FIQ_WRITE(state->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCINTMSK, 0);
699 case FIQ_PER_ISO_OUT_PENDING:
700 /* Ordinarily, this should be poked after the SSPLIT
701 * complete interrupt for a competing transfer on the same
702 * TT. Doesn't happen for aborted transactions though.
704 if ((hfnum.b.frnum & 0x7) >= 5)
706 if (!fiq_fsm_tt_in_use(state, num_channels, n)) {
707 /* Hardware bug. SOF can sometimes occur after the channel halt interrupt
710 fiq_fsm_restart_channel(state, n, 0);
711 fiq_print(FIQDBG_INT, state, "SOF ISOC");
712 if (state->channel[n].nrpackets == 1) {
713 state->channel[n].fsm = FIQ_PER_ISO_OUT_LAST;
715 state->channel[n].fsm = FIQ_PER_ISO_OUT_ACTIVE;
720 case FIQ_PER_CSPLIT_WAIT:
721 /* we are guaranteed to be in this state if and only if the SSPLIT interrupt
722 * occurred when the bus transaction occurred. The SOF interrupt reversal bug
723 * will utterly bugger this up though.
725 if (hfnum.b.frnum != state->channel[n].expected_uframe) {
726 fiq_print(FIQDBG_INT, state, "SOFCS %d ", n);
727 state->channel[n].fsm = FIQ_PER_CSPLIT_POLL;
728 fiq_fsm_restart_channel(state, n, 0);
729 fiq_fsm_start_next_periodic(state, num_channels);
734 case FIQ_PER_SPLIT_TIMEOUT:
735 case FIQ_DEQUEUE_ISSUED:
736 /* Ugly: we have to force a HCD interrupt.
737 * Poke the mask for the channel in question.
738 * We will take a fake SOF because of this, but
741 state->haintmsk_saved.b2.chint &= ~(1 << n);
742 FIQ_WRITE(state->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCINTMSK, 0);
751 if (state->kick_np_queues ||
752 dwc_frame_num_le(state->next_sched_frame, hfnum.b.frnum))
760 * fiq_fsm_do_hcintr() - FSM host channel interrupt handler
761 * @state: Pointer to the FIQ state struct
762 * @num_channels: Number of channels as per hardware config
763 * @n: channel for which HAINT(i) was raised
765 * An important property is that only the CHHLT interrupt is unmasked. Unfortunately, AHBerr is as well.
767 static int notrace noinline fiq_fsm_do_hcintr(struct fiq_state *state, int num_channels, int n)
770 hcintmsk_data_t hcintmsk;
771 hcint_data_t hcint_probe;
772 hcchar_data_t hcchar;
776 int start_next_periodic = 0;
777 struct fiq_channel_state *st = &state->channel[n];
780 hcint.d32 = FIQ_READ(state->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCINT);
781 hcintmsk.d32 = FIQ_READ(state->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCINTMSK);
782 hcint_probe.d32 = hcint.d32 & hcintmsk.d32;
784 if (st->fsm != FIQ_PASSTHROUGH) {
785 fiq_print(FIQDBG_INT, state, "HC%01d ST%02d", n, st->fsm);
786 fiq_print(FIQDBG_INT, state, "%08x", hcint.d32);
791 case FIQ_PASSTHROUGH:
792 case FIQ_DEQUEUE_ISSUED:
793 /* doesn't belong to us, kick it upstairs */
796 case FIQ_PASSTHROUGH_ERRORSTATE:
797 /* We are here to emulate the error recovery mechanism of the dwc HCD.
798 * Several interrupts are unmasked if a previous transaction failed - it's
799 * death for the FIQ to attempt to handle them as the channel isn't halted.
800 * Emulate what the HCD does in this situation: mask and continue.
801 * The FSM has no other state setup so this has to be handled out-of-band.
803 fiq_print(FIQDBG_ERR, state, "ERRST %02d", n);
804 if (hcint_probe.b.nak || hcint_probe.b.ack || hcint_probe.b.datatglerr) {
805 fiq_print(FIQDBG_ERR, state, "RESET %02d", n);
806 /* In some random cases we can get a NAK interrupt coincident with a Xacterr
807 * interrupt, after the device has disappeared.
809 if (!hcint.b.xacterr)
813 hcintmsk.b.datatglerr = 0;
814 FIQ_WRITE(state->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCINTMSK, hcintmsk.d32);
817 if (hcint_probe.b.chhltd) {
818 fiq_print(FIQDBG_ERR, state, "CHHLT %02d", n);
819 fiq_print(FIQDBG_ERR, state, "%08x", hcint.d32);
824 /* Non-periodic state groups */
825 case FIQ_NP_SSPLIT_STARTED:
826 case FIQ_NP_SSPLIT_RETRY:
827 /* Got a HCINT for a NP SSPLIT. Expected ACK / NAK / fail */
829 /* SSPLIT complete. For OUT, the data has been sent. For IN, the LS transaction
830 * will start shortly. SOF needs to kick the transaction to prevent a NYET flood.
832 if(st->hcchar_copy.b.epdir == 1)
833 st->fsm = FIQ_NP_IN_CSPLIT_RETRY;
835 st->fsm = FIQ_NP_OUT_CSPLIT_RETRY;
838 fiq_fsm_setup_csplit(state, n);
839 } else if (hcint.b.nak) {
840 // No buffer space in TT. Retry on a uframe boundary.
841 fiq_fsm_reload_hcdma(state, n);
842 st->fsm = FIQ_NP_SSPLIT_RETRY;
844 } else if (hcint.b.xacterr) {
845 // The only other one we care about is xacterr. This implies HS bus error - retry.
847 if(st->hcchar_copy.b.epdir == 0)
848 fiq_fsm_reload_hcdma(state, n);
849 st->fsm = FIQ_NP_SSPLIT_RETRY;
850 if (st->nr_errors >= 3) {
851 st->fsm = FIQ_NP_SPLIT_HS_ABORTED;
857 st->fsm = FIQ_NP_SPLIT_LS_ABORTED;
863 case FIQ_NP_IN_CSPLIT_RETRY:
864 /* Received a CSPLIT done interrupt.
865 * Expected Data/NAK/STALL/NYET for IN.
867 if (hcint.b.xfercomp) {
868 /* For IN, data is present. */
869 st->fsm = FIQ_NP_SPLIT_DONE;
870 } else if (hcint.b.nak) {
871 /* no endpoint data. Punt it upstairs */
872 st->fsm = FIQ_NP_SPLIT_DONE;
873 } else if (hcint.b.nyet) {
874 /* CSPLIT NYET - retry on a uframe boundary. */
877 } else if (hcint.b.datatglerr) {
878 /* data toggle errors do not set the xfercomp bit. */
879 st->fsm = FIQ_NP_SPLIT_LS_ABORTED;
880 } else if (hcint.b.xacterr) {
881 /* HS error. Retry immediate */
882 st->fsm = FIQ_NP_IN_CSPLIT_RETRY;
884 if (st->nr_errors >= 3) {
885 st->fsm = FIQ_NP_SPLIT_HS_ABORTED;
890 } else if (hcint.b.stall || hcint.b.bblerr) {
891 /* A STALL implies either a LS bus error or a genuine STALL. */
892 st->fsm = FIQ_NP_SPLIT_LS_ABORTED;
894 /* Hardware bug. It's possible in some cases to
895 * get a channel halt with nothing else set when
896 * the response was a NYET. Treat as local 3-strikes retry.
898 hcint_data_t hcint_test = hcint;
899 hcint_test.b.chhltd = 0;
900 if (!hcint_test.d32) {
902 if (st->nr_errors >= 3) {
903 st->fsm = FIQ_NP_SPLIT_HS_ABORTED;
908 /* Bail out if something unexpected happened */
909 st->fsm = FIQ_NP_SPLIT_HS_ABORTED;
912 if (st->fsm != FIQ_NP_IN_CSPLIT_RETRY) {
913 fiq_fsm_restart_np_pending(state, num_channels, n);
917 case FIQ_NP_OUT_CSPLIT_RETRY:
918 /* Received a CSPLIT done interrupt.
919 * Expected ACK/NAK/STALL/NYET/XFERCOMP for OUT.*/
920 if (hcint.b.xfercomp) {
921 st->fsm = FIQ_NP_SPLIT_DONE;
922 } else if (hcint.b.nak) {
923 // The HCD will implement the holdoff on frame boundaries.
924 st->fsm = FIQ_NP_SPLIT_DONE;
925 } else if (hcint.b.nyet) {
926 // Hub still processing.
927 st->fsm = FIQ_NP_OUT_CSPLIT_RETRY;
931 } else if (hcint.b.xacterr) {
932 /* HS error. retry immediate */
933 st->fsm = FIQ_NP_OUT_CSPLIT_RETRY;
935 if (st->nr_errors >= 3) {
936 st->fsm = FIQ_NP_SPLIT_HS_ABORTED;
941 } else if (hcint.b.stall) {
942 /* LS bus error or genuine stall */
943 st->fsm = FIQ_NP_SPLIT_LS_ABORTED;
946 * Hardware bug. It's possible in some cases to get a
947 * channel halt with nothing else set when the response was a NYET.
948 * Treat as local 3-strikes retry.
950 hcint_data_t hcint_test = hcint;
951 hcint_test.b.chhltd = 0;
952 if (!hcint_test.d32) {
954 if (st->nr_errors >= 3) {
955 st->fsm = FIQ_NP_SPLIT_HS_ABORTED;
960 // Something unexpected happened. AHBerror or babble perhaps. Let the IRQ deal with it.
961 st->fsm = FIQ_NP_SPLIT_HS_ABORTED;
964 if (st->fsm != FIQ_NP_OUT_CSPLIT_RETRY) {
965 fiq_fsm_restart_np_pending(state, num_channels, n);
969 /* Periodic split states (except isoc out) */
970 case FIQ_PER_SSPLIT_STARTED:
971 /* Expect an ACK or failure for SSPLIT */
974 * SSPLIT transfer complete interrupt - the generation of this interrupt is fraught with bugs.
975 * For a packet queued in microframe n-3 to appear in n-2, if the channel is enabled near the EOF1
976 * point for microframe n-3, the packet will not appear on the bus until microframe n.
977 * Additionally, the generation of the actual interrupt is dodgy. For a packet appearing on the bus
978 * in microframe n, sometimes the interrupt is generated immediately. Sometimes, it appears in n+1
979 * coincident with SOF for n+1.
980 * SOF is also buggy. It can sometimes be raised AFTER the first bus transaction has taken place.
981 * These appear to be caused by timing/clock crossing bugs within the core itself.
982 * State machine workaround.
984 hfnum.d32 = FIQ_READ(state->dwc_regs_base + HFNUM);
985 hcchar.d32 = FIQ_READ(state->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCCHAR);
986 fiq_fsm_setup_csplit(state, n);
987 /* Poke the oddfrm bit. If we are equivalent, we received the interrupt at the correct
988 * time. If not, then we're in the next SOF.
990 if ((hfnum.b.frnum & 0x1) == hcchar.b.oddfrm) {
991 fiq_print(FIQDBG_INT, state, "CSWAIT %01d", n);
992 st->expected_uframe = hfnum.b.frnum;
993 st->fsm = FIQ_PER_CSPLIT_WAIT;
995 fiq_print(FIQDBG_INT, state, "CSPOL %01d", n);
996 /* For isochronous IN endpoints,
997 * we need to hold off if we are expecting a lot of data */
998 if (st->hcchar_copy.b.mps < DATA0_PID_HEURISTIC) {
999 start_next_periodic = 1;
1001 /* Danger will robinson: we are in a broken state. If our first interrupt after
1002 * this is a NYET, it will be delayed by 1 uframe and result in an unrecoverable
1003 * lag. Unmask the NYET interrupt.
1005 st->expected_uframe = (hfnum.b.frnum + 1) & 0x3FFF;
1006 st->fsm = FIQ_PER_CSPLIT_BROKEN_NYET1;
1010 } else if (hcint.b.xacterr) {
1011 /* 3-strikes retry is enabled, we have hit our max nr_errors */
1012 st->fsm = FIQ_PER_SPLIT_HS_ABORTED;
1013 start_next_periodic = 1;
1015 st->fsm = FIQ_PER_SPLIT_HS_ABORTED;
1016 start_next_periodic = 1;
1018 /* We can now queue the next isochronous OUT transaction, if one is pending. */
1019 if(fiq_fsm_tt_next_isoc(state, num_channels, n)) {
1020 fiq_print(FIQDBG_INT, state, "NEXTISO ");
1024 case FIQ_PER_CSPLIT_NYET1:
1025 /* First CSPLIT attempt was a NYET. If we get a subsequent NYET,
1026 * we are too late and the TT has dropped its CSPLIT fifo.
1028 hfnum.d32 = FIQ_READ(state->dwc_regs_base + HFNUM);
1029 hcchar.d32 = FIQ_READ(state->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCCHAR);
1030 start_next_periodic = 1;
1032 st->fsm = FIQ_PER_SPLIT_DONE;
1033 } else if (hcint.b.xfercomp) {
1034 fiq_increment_dma_buf(state, num_channels, n);
1035 st->fsm = FIQ_PER_CSPLIT_POLL;
1037 if (fiq_fsm_more_csplits(state, n, &last_csplit)) {
1041 start_next_periodic = 0;
1043 st->fsm = FIQ_PER_SPLIT_DONE;
1045 } else if (hcint.b.nyet) {
1046 /* Doh. Data lost. */
1047 st->fsm = FIQ_PER_SPLIT_NYET_ABORTED;
1048 } else if (hcint.b.xacterr || hcint.b.stall || hcint.b.bblerr) {
1049 st->fsm = FIQ_PER_SPLIT_LS_ABORTED;
1051 st->fsm = FIQ_PER_SPLIT_HS_ABORTED;
1055 case FIQ_PER_CSPLIT_BROKEN_NYET1:
1057 * we got here because our host channel is in the delayed-interrupt
1058 * state and we cannot take a NYET interrupt any later than when it
1059 * occurred. Disable then re-enable the channel if this happens to force
1060 * CSPLITs to occur at the right time.
1062 hfnum.d32 = FIQ_READ(state->dwc_regs_base + HFNUM);
1063 hcchar.d32 = FIQ_READ(state->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCCHAR);
1064 fiq_print(FIQDBG_INT, state, "BROK: %01d ", n);
1066 st->fsm = FIQ_PER_SPLIT_DONE;
1067 start_next_periodic = 1;
1068 } else if (hcint.b.xfercomp) {
1069 fiq_increment_dma_buf(state, num_channels, n);
1070 if (fiq_fsm_more_csplits(state, n, &last_csplit)) {
1071 st->fsm = FIQ_PER_CSPLIT_POLL;
1074 start_next_periodic = 1;
1075 /* Reload HCTSIZ for the next transfer */
1076 fiq_fsm_reload_hctsiz(state, n);
1078 start_next_periodic = 0;
1080 st->fsm = FIQ_PER_SPLIT_DONE;
1082 } else if (hcint.b.nyet) {
1083 st->fsm = FIQ_PER_SPLIT_NYET_ABORTED;
1084 start_next_periodic = 1;
1085 } else if (hcint.b.xacterr || hcint.b.stall || hcint.b.bblerr) {
1086 /* Local 3-strikes retry is handled by the core. This is a ERR response.*/
1087 st->fsm = FIQ_PER_SPLIT_LS_ABORTED;
1089 st->fsm = FIQ_PER_SPLIT_HS_ABORTED;
1093 case FIQ_PER_CSPLIT_POLL:
1094 hfnum.d32 = FIQ_READ(state->dwc_regs_base + HFNUM);
1095 hcchar.d32 = FIQ_READ(state->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCCHAR);
1096 start_next_periodic = 1;
1098 st->fsm = FIQ_PER_SPLIT_DONE;
1099 } else if (hcint.b.xfercomp) {
1100 fiq_increment_dma_buf(state, num_channels, n);
1101 if (fiq_fsm_more_csplits(state, n, &last_csplit)) {
1104 /* Reload HCTSIZ for the next transfer */
1105 fiq_fsm_reload_hctsiz(state, n);
1107 start_next_periodic = 0;
1109 st->fsm = FIQ_PER_SPLIT_DONE;
1111 } else if (hcint.b.nyet) {
1112 /* Are we a NYET after the first data packet? */
1113 if (st->nrpackets == 0) {
1114 st->fsm = FIQ_PER_CSPLIT_NYET1;
1118 /* We got a NYET when polling CSPLITs. Can happen
1119 * if our heuristic fails, or if someone disables us
1120 * for any significant length of time.
1122 if (st->nr_errors >= 3) {
1123 st->fsm = FIQ_PER_SPLIT_NYET_ABORTED;
1125 st->fsm = FIQ_PER_SPLIT_DONE;
1128 } else if (hcint.b.xacterr || hcint.b.stall || hcint.b.bblerr) {
1129 /* For xacterr, Local 3-strikes retry is handled by the core. This is a ERR response.*/
1130 st->fsm = FIQ_PER_SPLIT_LS_ABORTED;
1132 st->fsm = FIQ_PER_SPLIT_HS_ABORTED;
1136 case FIQ_HS_ISOC_TURBO:
1137 if (fiq_fsm_update_hs_isoc(state, n, hcint)) {
1138 /* more transactions to come */
1140 fiq_print(FIQDBG_INT, state, "HSISO M ");
1141 /* For strided transfers, put ourselves to sleep */
1142 if (st->hs_isoc_info.stride > 1) {
1143 st->uframe_sleeps = st->hs_isoc_info.stride - 1;
1144 st->fsm = FIQ_HS_ISOC_SLEEPING;
1149 st->fsm = FIQ_HS_ISOC_DONE;
1150 fiq_print(FIQDBG_INT, state, "HSISO F ");
1154 case FIQ_HS_ISOC_ABORTED:
1155 /* This abort is called by the driver rewriting the state mid-transaction
1156 * which allows the dequeue mechanism to work more effectively.
1160 case FIQ_PER_ISO_OUT_ACTIVE:
1162 if(fiq_iso_out_advance(state, num_channels, n)) {
1163 /* last OUT transfer */
1164 st->fsm = FIQ_PER_ISO_OUT_LAST;
1166 * Assuming the periodic FIFO in the dwc core
1167 * actually does its job properly, we can queue
1168 * the next ssplit now and in theory, the wire
1169 * transactions will be in-order.
1171 // No it doesn't. It appears to process requests in host channel order.
1172 //start_next_periodic = 1;
1178 * Isochronous transactions carry on regardless. Log the error
1183 if(fiq_iso_out_advance(state, num_channels, n)) {
1184 st->fsm = FIQ_PER_ISO_OUT_LAST;
1185 //start_next_periodic = 1;
1192 case FIQ_PER_ISO_OUT_LAST:
1195 st->fsm = FIQ_PER_ISO_OUT_DONE;
1197 st->fsm = FIQ_PER_ISO_OUT_DONE;
1200 start_next_periodic = 1;
1203 case FIQ_PER_SPLIT_TIMEOUT:
1204 /* SOF kicked us because we overran. */
1205 start_next_periodic = 1;
1213 FIQ_WRITE(state->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCINT, hcint.d32);
1215 /* Copy the regs into the state so the IRQ knows what to do */
1216 st->hcint_copy.d32 = hcint.d32;
1220 /* Restart always implies handled. */
1222 /* For complete-split INs, the show must go on.
1223 * Force a channel restart */
1224 fiq_fsm_restart_channel(state, n, 1);
1226 fiq_fsm_restart_channel(state, n, 0);
1229 if (start_next_periodic) {
1230 fiq_fsm_start_next_periodic(state, num_channels);
1232 if (st->fsm != FIQ_PASSTHROUGH)
1233 fiq_print(FIQDBG_INT, state, "FSMOUT%02d", st->fsm);
1240 * dwc_otg_fiq_fsm() - Flying State Machine (monster) FIQ
1241 * @state: pointer to state struct passed from the banked FIQ mode registers.
1242 * @num_channels: set according to the DWC hardware configuration
1243 * @dma: pointer to DMA bounce buffers for split transaction slots
1245 * The FSM FIQ performs the low-level tasks that normally would be performed by the microcode
1246 * inside an EHCI or similar host controller regarding split transactions. The DWC core
1247 * interrupts each and every time a split transaction packet is received or sent successfully.
1248 * This results in either an interrupt storm when everything is working "properly", or
1249 * the interrupt latency of the system in general breaks time-sensitive periodic split
1250 * transactions. Pushing the low-level, but relatively easy state machine work into the FIQ
1251 * solves these problems.
1255 void notrace dwc_otg_fiq_fsm(struct fiq_state *state, int num_channels)
1257 gintsts_data_t gintsts, gintsts_handled;
1258 gintmsk_data_t gintmsk;
1259 //hfnum_data_t hfnum;
1260 haint_data_t haint, haint_handled;
1261 haintmsk_data_t haintmsk;
1264 /* Ensure peripheral reads issued prior to FIQ entry are complete */
1267 gintsts_handled.d32 = 0;
1268 haint_handled.d32 = 0;
1270 fiq_fsm_spin_lock(&state->lock);
1271 gintsts.d32 = FIQ_READ(state->dwc_regs_base + GINTSTS);
1272 gintmsk.d32 = FIQ_READ(state->dwc_regs_base + GINTMSK);
1273 gintsts.d32 &= gintmsk.d32;
1275 if (gintsts.b.sofintr) {
1276 /* For FSM mode, SOF is required to keep the state machine advance for
1277 * certain stages of the periodic pipeline. It's death to mask this
1278 * interrupt in that case.
1281 if (!fiq_fsm_do_sof(state, num_channels)) {
1282 /* Kick IRQ once. Queue advancement means that all pending transactions
1283 * will get serviced when the IRQ finally executes.
1285 if (state->gintmsk_saved.b.sofintr == 1)
1287 state->gintmsk_saved.b.sofintr = 0;
1289 gintsts_handled.b.sofintr = 1;
1292 if (gintsts.b.hcintr) {
1294 haint.d32 = FIQ_READ(state->dwc_regs_base + HAINT);
1295 haintmsk.d32 = FIQ_READ(state->dwc_regs_base + HAINTMSK);
1296 haint.d32 &= haintmsk.d32;
1297 haint_handled.d32 = 0;
1298 for (i=0; i<num_channels; i++) {
1299 if (haint.b2.chint & (1 << i)) {
1300 if(!fiq_fsm_do_hcintr(state, num_channels, i)) {
1301 /* HCINT was not handled in FIQ
1302 * HAINT is level-sensitive, leading to level-sensitive ginststs.b.hcint bit.
1303 * Mask HAINT(i) but keep top-level hcint unmasked.
1305 state->haintmsk_saved.b2.chint &= ~(1 << i);
1307 /* do_hcintr cleaned up after itself, but clear haint */
1308 haint_handled.b2.chint |= (1 << i);
1313 if (haint_handled.b2.chint) {
1314 FIQ_WRITE(state->dwc_regs_base + HAINT, haint_handled.d32);
1317 if (haintmsk.d32 != (haintmsk.d32 & state->haintmsk_saved.d32)) {
1319 * This is necessary to avoid multiple retriggers of the MPHI in the case
1320 * where interrupts are held off and HCINTs start to pile up.
1321 * Only wake up the IRQ if a new interrupt came in, was not handled and was
1324 haintmsk.d32 &= state->haintmsk_saved.d32;
1325 FIQ_WRITE(state->dwc_regs_base + HAINTMSK, haintmsk.d32);
1328 /* Top-Level interrupt - always handled because it's level-sensitive */
1329 gintsts_handled.b.hcintr = 1;
1333 /* Clear the bits in the saved register that were not handled but were triggered. */
1334 state->gintmsk_saved.d32 &= ~(gintsts.d32 & ~gintsts_handled.d32);
1336 /* FIQ didn't handle something - mask has changed - write new mask */
1337 if (gintmsk.d32 != (gintmsk.d32 & state->gintmsk_saved.d32)) {
1338 gintmsk.d32 &= state->gintmsk_saved.d32;
1339 gintmsk.b.sofintr = 1;
1340 FIQ_WRITE(state->dwc_regs_base + GINTMSK, gintmsk.d32);
1341 // fiq_print(FIQDBG_INT, state, "KICKGINT");
1342 // fiq_print(FIQDBG_INT, state, "%08x", gintmsk.d32);
1343 // fiq_print(FIQDBG_INT, state, "%08x", state->gintmsk_saved.d32);
1347 if (gintsts_handled.d32) {
1348 /* Only applies to edge-sensitive bits in GINTSTS */
1349 FIQ_WRITE(state->dwc_regs_base + GINTSTS, gintsts_handled.d32);
1352 /* We got an interrupt, didn't handle it. */
1354 state->mphi_int_count++;
1355 if (state->mphi_regs.swirq_set) {
1356 FIQ_WRITE(state->mphi_regs.swirq_set, 1);
1358 FIQ_WRITE(state->mphi_regs.outdda, state->dummy_send_dma);
1359 FIQ_WRITE(state->mphi_regs.outddb, (1<<29));
1365 fiq_fsm_spin_unlock(&state->lock);
1370 * dwc_otg_fiq_nop() - FIQ "lite"
1371 * @state: pointer to state struct passed from the banked FIQ mode registers.
1373 * The "nop" handler does not intervene on any interrupts other than SOF.
1374 * It is limited in scope to deciding at each SOF if the IRQ SOF handler (which deals
1375 * with non-periodic/periodic queues) needs to be kicked.
1377 * This is done to hold off the SOF interrupt, which occurs at a rate of 8000 per second.
1381 void notrace dwc_otg_fiq_nop(struct fiq_state *state)
1383 gintsts_data_t gintsts, gintsts_handled;
1384 gintmsk_data_t gintmsk;
1387 /* Ensure peripheral reads issued prior to FIQ entry are complete */
1390 fiq_fsm_spin_lock(&state->lock);
1391 hfnum.d32 = FIQ_READ(state->dwc_regs_base + HFNUM);
1392 gintsts.d32 = FIQ_READ(state->dwc_regs_base + GINTSTS);
1393 gintmsk.d32 = FIQ_READ(state->dwc_regs_base + GINTMSK);
1394 gintsts.d32 &= gintmsk.d32;
1395 gintsts_handled.d32 = 0;
1397 if (gintsts.b.sofintr) {
1398 if (!state->kick_np_queues &&
1399 dwc_frame_num_gt(state->next_sched_frame, hfnum.b.frnum)) {
1400 /* SOF handled, no work to do, just ACK interrupt */
1401 gintsts_handled.b.sofintr = 1;
1404 state->gintmsk_saved.b.sofintr = 0;
1408 /* Reset handled interrupts */
1409 if(gintsts_handled.d32) {
1410 FIQ_WRITE(state->dwc_regs_base + GINTSTS, gintsts_handled.d32);
1413 /* Clear the bits in the saved register that were not handled but were triggered. */
1414 state->gintmsk_saved.d32 &= ~(gintsts.d32 & ~gintsts_handled.d32);
1416 /* We got an interrupt, didn't handle it and want to mask it */
1417 if (~(state->gintmsk_saved.d32)) {
1418 state->mphi_int_count++;
1419 gintmsk.d32 &= state->gintmsk_saved.d32;
1420 FIQ_WRITE(state->dwc_regs_base + GINTMSK, gintmsk.d32);
1421 if (state->mphi_regs.swirq_set) {
1422 FIQ_WRITE(state->mphi_regs.swirq_set, 1);
1424 /* Force a clear before another dummy send */
1425 FIQ_WRITE(state->mphi_regs.intstat, (1<<29));
1426 FIQ_WRITE(state->mphi_regs.outdda, state->dummy_send_dma);
1427 FIQ_WRITE(state->mphi_regs.outddb, (1<<29));
1432 fiq_fsm_spin_unlock(&state->lock);