2 * dwc_otg_fiq_fsm.c - The finite state machine FIQ
4 * Copyright (c) 2013 Raspberry Pi Foundation
6 * Author: Jonathan Bell <jonathan@raspberrypi.org>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * * Neither the name of Raspberry Pi nor the
17 * names of its contributors may be used to endorse or promote products
18 * derived from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
22 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 * DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
24 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 * This FIQ implements functionality that performs split transactions on
32 * the dwc_otg hardware without any outside intervention. A split transaction
33 * is "queued" by nominating a specific host channel to perform the entirety
34 * of a split transaction. This FIQ will then perform the microframe-precise
35 * scheduling required in each phase of the transaction until completion.
37 * The FIQ functionality is glued into the Synopsys driver via the entry point
38 * in the FSM enqueue function, and at the exit point in handling a HC interrupt
39 * for a FSM-enabled channel.
41 * NB: Large parts of this implementation have architecture-specific code.
42 * For porting this functionality to other ARM machines, the minimum is required:
43 * - An interrupt controller allowing the top-level dwc USB interrupt to be routed
45 * - A method of forcing a software generated interrupt from FIQ mode that then
46 * triggers an IRQ entry (with the dwc USB handler called by this IRQ number)
47 * - Guaranteed interrupt routing such that both the FIQ and SGI occur on the same
48 * processor core - there is no locking between the FIQ and IRQ (aside from
53 #include "dwc_otg_fiq_fsm.h"
58 void notrace _fiq_print(enum fiq_debug_level dbg_lvl, volatile struct fiq_state *state, char *fmt, ...)
60 enum fiq_debug_level dbg_lvl_req = FIQDBG_ERR;
63 hfnum_data_t hfnum = { .d32 = FIQ_READ(state->dwc_regs_base + 0x408) };
65 if((dbg_lvl & dbg_lvl_req) || dbg_lvl == FIQDBG_ERR)
67 snprintf(text, 9, " %4d:%1u ", hfnum.b.frnum/8, hfnum.b.frnum & 7);
69 vsnprintf(text+8, 9, fmt, args);
72 memcpy(buffer + wptr, text, 16);
73 wptr = (wptr + 16) % sizeof(buffer);
80 inline void fiq_fsm_spin_lock(fiq_lock_t *lock)
82 spin_lock((spinlock_t *)lock);
85 inline void fiq_fsm_spin_unlock(fiq_lock_t *lock)
87 spin_unlock((spinlock_t *)lock);
93 * fiq_fsm_spin_lock() - ARMv6+ bare bones spinlock
94 * Must be called with local interrupts and FIQ disabled.
96 #if defined(CONFIG_ARCH_BCM2835) && defined(CONFIG_SMP)
97 inline void fiq_fsm_spin_lock(fiq_lock_t *lock)
102 /* Nested locking, yay. If we are on the same CPU as the fiq, then the disable
103 * will be sufficient. If we are on a different CPU, then the lock protects us. */
104 prefetchw(&lock->slock);
106 "1: ldrex %0, [%3]\n"
108 " strex %2, %1, [%3]\n"
111 : "=&r" (lockval), "=&r" (newval), "=&r" (tmp)
112 : "r" (&lock->slock), "I" (1 << 16)
115 while (lockval.tickets.next != lockval.tickets.owner) {
117 lockval.tickets.owner = READ_ONCE(lock->tickets.owner);
122 inline void fiq_fsm_spin_lock(fiq_lock_t *lock) { }
126 * fiq_fsm_spin_unlock() - ARMv6+ bare bones spinunlock
128 #if defined(CONFIG_ARCH_BCM2835) && defined(CONFIG_SMP)
129 inline void fiq_fsm_spin_unlock(fiq_lock_t *lock)
132 lock->tickets.owner++;
136 inline void fiq_fsm_spin_unlock(fiq_lock_t *lock) { }
142 * fiq_fsm_restart_channel() - Poke channel enable bit for a split transaction
143 * @channel: channel to re-enable
145 static void fiq_fsm_restart_channel(struct fiq_state *st, int n, int force)
147 hcchar_data_t hcchar = { .d32 = FIQ_READ(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCCHAR) };
150 if (st->channel[n].hcchar_copy.b.eptype & 0x1) {
151 hfnum_data_t hfnum = { .d32 = FIQ_READ(st->dwc_regs_base + HFNUM) };
152 /* Hardware bug workaround: update the ssplit index */
153 if (st->channel[n].hcsplt_copy.b.spltena)
154 st->channel[n].expected_uframe = (hfnum.b.frnum + 1) & 0x3FFF;
156 hcchar.b.oddfrm = (hfnum.b.frnum & 0x1) ? 0 : 1;
159 FIQ_WRITE(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCCHAR, hcchar.d32);
160 hcchar.d32 = FIQ_READ(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCCHAR);
163 FIQ_WRITE(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCCHAR, hcchar.d32);
164 fiq_print(FIQDBG_INT, st, "HCGO %01d %01d", n, force);
168 * fiq_fsm_setup_csplit() - Prepare a host channel for a CSplit transaction stage
169 * @st: Pointer to the channel's state
170 * @n : channel number
172 * Change host channel registers to perform a complete-split transaction. Being mindful of the
173 * endpoint direction, set control regs up correctly.
175 static void notrace fiq_fsm_setup_csplit(struct fiq_state *st, int n)
177 hcsplt_data_t hcsplt = { .d32 = FIQ_READ(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCSPLT) };
178 hctsiz_data_t hctsiz = { .d32 = FIQ_READ(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCTSIZ) };
180 hcsplt.b.compsplt = 1;
181 if (st->channel[n].hcchar_copy.b.epdir == 1) {
182 // If IN, the CSPLIT result contains the data or a hub handshake. hctsiz = maxpacket.
183 hctsiz.b.xfersize = st->channel[n].hctsiz_copy.b.xfersize;
185 // If OUT, the CSPLIT result contains handshake only.
186 hctsiz.b.xfersize = 0;
188 FIQ_WRITE(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCSPLT, hcsplt.d32);
189 FIQ_WRITE(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCTSIZ, hctsiz.d32);
194 * fiq_fsm_restart_np_pending() - Restart a single non-periodic contended transfer
195 * @st: Pointer to the channel's state
196 * @num_channels: Total number of host channels
197 * @orig_channel: Channel index of completed transfer
199 * In the case where an IN and OUT transfer are simultaneously scheduled to the
200 * same device/EP, inadequate hub implementations will misbehave. Once the first
201 * transfer is complete, a pending non-periodic split can then be issued.
203 static void notrace fiq_fsm_restart_np_pending(struct fiq_state *st, int num_channels, int orig_channel)
206 int dev_addr = st->channel[orig_channel].hcchar_copy.b.devaddr;
207 int ep_num = st->channel[orig_channel].hcchar_copy.b.epnum;
208 for (i = 0; i < num_channels; i++) {
209 if (st->channel[i].fsm == FIQ_NP_SSPLIT_PENDING &&
210 st->channel[i].hcchar_copy.b.devaddr == dev_addr &&
211 st->channel[i].hcchar_copy.b.epnum == ep_num) {
212 st->channel[i].fsm = FIQ_NP_SSPLIT_STARTED;
213 fiq_fsm_restart_channel(st, i, 0);
219 static inline int notrace fiq_get_xfer_len(struct fiq_state *st, int n)
221 /* The xfersize register is a bit wonky. For IN transfers, it decrements by the packet size. */
222 hctsiz_data_t hctsiz = { .d32 = FIQ_READ(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCTSIZ) };
224 if (st->channel[n].hcchar_copy.b.epdir == 0) {
225 return st->channel[n].hctsiz_copy.b.xfersize;
227 return st->channel[n].hctsiz_copy.b.xfersize - hctsiz.b.xfersize;
234 * fiq_increment_dma_buf() - update DMA address for bounce buffers after a CSPLIT
236 * Of use only for IN periodic transfers.
238 static int notrace fiq_increment_dma_buf(struct fiq_state *st, int num_channels, int n)
241 int i = st->channel[n].dma_info.index;
243 struct fiq_dma_blob *blob = (struct fiq_dma_blob *) st->dma_base;
245 len = fiq_get_xfer_len(st, n);
246 fiq_print(FIQDBG_INT, st, "LEN: %03d", len);
247 st->channel[n].dma_info.slot_len[i] = len;
252 hcdma.d32 = (dma_addr_t) &blob->channel[n].index[i].buf[0];
253 FIQ_WRITE(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HC_DMA, hcdma.d32);
254 st->channel[n].dma_info.index = i;
259 * fiq_reload_hctsiz() - for IN transactions, reset HCTSIZ
261 static void notrace fiq_fsm_reload_hctsiz(struct fiq_state *st, int n)
263 hctsiz_data_t hctsiz = { .d32 = FIQ_READ(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCTSIZ) };
264 hctsiz.b.xfersize = st->channel[n].hctsiz_copy.b.xfersize;
266 FIQ_WRITE(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCTSIZ, hctsiz.d32);
270 * fiq_fsm_reload_hcdma() - for OUT transactions, rewind DMA pointer
272 static void notrace fiq_fsm_reload_hcdma(struct fiq_state *st, int n)
274 hcdma_data_t hcdma = st->channel[n].hcdma_copy;
275 FIQ_WRITE(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HC_DMA, hcdma.d32);
279 * fiq_iso_out_advance() - update DMA address and split position bits
280 * for isochronous OUT transactions.
282 * Returns 1 if this is the last packet queued, 0 otherwise. Split-ALL and
283 * Split-BEGIN states are not handled - this is done when the transaction was queued.
285 * This function must only be called from the FIQ_ISO_OUT_ACTIVE state.
287 static int notrace fiq_iso_out_advance(struct fiq_state *st, int num_channels, int n)
289 hcsplt_data_t hcsplt;
290 hctsiz_data_t hctsiz;
292 struct fiq_dma_blob *blob = (struct fiq_dma_blob *) st->dma_base;
294 int i = st->channel[n].dma_info.index;
296 fiq_print(FIQDBG_INT, st, "ADV %01d %01d ", n, i);
300 if (st->channel[n].dma_info.slot_len[i+1] == 255)
303 /* New DMA address - address of bounce buffer referred to in index */
304 hcdma.d32 = (dma_addr_t) blob->channel[n].index[i].buf;
305 //hcdma.d32 = FIQ_READ(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HC_DMA);
306 //hcdma.d32 += st->channel[n].dma_info.slot_len[i];
307 fiq_print(FIQDBG_INT, st, "LAST: %01d ", last);
308 fiq_print(FIQDBG_INT, st, "LEN: %03d", st->channel[n].dma_info.slot_len[i]);
309 hcsplt.d32 = FIQ_READ(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCSPLT);
310 hctsiz.d32 = FIQ_READ(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCTSIZ);
311 hcsplt.b.xactpos = (last) ? ISOC_XACTPOS_END : ISOC_XACTPOS_MID;
312 /* Set up new packet length */
314 hctsiz.b.xfersize = st->channel[n].dma_info.slot_len[i];
315 fiq_print(FIQDBG_INT, st, "%08x", hctsiz.d32);
317 st->channel[n].dma_info.index++;
318 FIQ_WRITE(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCSPLT, hcsplt.d32);
319 FIQ_WRITE(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCTSIZ, hctsiz.d32);
320 FIQ_WRITE(st->dwc_regs_base + HC_START + (HC_OFFSET * n) + HC_DMA, hcdma.d32);
325 * fiq_fsm_tt_next_isoc() - queue next pending isochronous out start-split on a TT
327 * Despite the limitations of the DWC core, we can force a microframe pipeline of
328 * isochronous OUT start-split transactions while waiting for a corresponding other-type
329 * of endpoint to finish its CSPLITs. TTs have big periodic buffers therefore it
330 * is very unlikely that filling the start-split FIFO will cause data loss.
331 * This allows much better interleaving of transactions in an order-independent way-
332 * there is no requirement to prioritise isochronous, just a state-space search has
333 * to be performed on each periodic start-split complete interrupt.
335 static int notrace fiq_fsm_tt_next_isoc(struct fiq_state *st, int num_channels, int n)
337 int hub_addr = st->channel[n].hub_addr;
338 int port_addr = st->channel[n].port_addr;
340 for (i = 0; i < num_channels; i++) {
341 if (i == n || st->channel[i].fsm == FIQ_PASSTHROUGH)
343 if (st->channel[i].hub_addr == hub_addr &&
344 st->channel[i].port_addr == port_addr) {
345 switch (st->channel[i].fsm) {
346 case FIQ_PER_ISO_OUT_PENDING:
347 if (st->channel[i].nrpackets == 1) {
348 st->channel[i].fsm = FIQ_PER_ISO_OUT_LAST;
350 st->channel[i].fsm = FIQ_PER_ISO_OUT_ACTIVE;
352 fiq_fsm_restart_channel(st, i, 0);
367 * fiq_fsm_tt_in_use() - search for host channels using this TT
368 * @n: Channel to use as reference
371 int notrace noinline fiq_fsm_tt_in_use(struct fiq_state *st, int num_channels, int n)
373 int hub_addr = st->channel[n].hub_addr;
374 int port_addr = st->channel[n].port_addr;
376 for (i = 0; i < num_channels; i++) {
377 if (i == n || st->channel[i].fsm == FIQ_PASSTHROUGH)
379 switch (st->channel[i].fsm) {
380 /* TT is reserved for channels that are in the middle of a periodic
383 case FIQ_PER_SSPLIT_STARTED:
384 case FIQ_PER_CSPLIT_WAIT:
385 case FIQ_PER_CSPLIT_NYET1:
386 //case FIQ_PER_CSPLIT_POLL:
387 case FIQ_PER_ISO_OUT_ACTIVE:
388 case FIQ_PER_ISO_OUT_LAST:
389 if (st->channel[i].hub_addr == hub_addr &&
390 st->channel[i].port_addr == port_addr) {
404 * fiq_fsm_more_csplits() - determine whether additional CSPLITs need
405 * to be issued for this IN transaction.
407 * We cannot tell the inbound PID of a data packet due to hardware limitations.
408 * we need to make an educated guess as to whether we need to queue another CSPLIT
409 * or not. A no-brainer is when we have received enough data to fill the endpoint
410 * size, but for endpoints that give variable-length data then we have to resort
413 * We also return whether this is the last CSPLIT to be queued, again based on
414 * heuristics. This is to allow a 1-uframe overlap of periodic split transactions.
415 * Note: requires at least 1 CSPLIT to have been performed prior to being called.
419 * We need some way of guaranteeing if a returned periodic packet of size X
421 * The heuristic value of 144 bytes assumes that the received data has maximal
422 * bit-stuffing and the clock frequency of the transmitting device is at the lowest
423 * permissible limit. If the transfer length results in a final packet size
424 * 144 < p <= 188, then an erroneous CSPLIT will be issued.
425 * Also used to ensure that an endpoint will nominally only return a single
426 * complete-split worth of data.
428 #define DATA0_PID_HEURISTIC 144
430 static int notrace noinline fiq_fsm_more_csplits(struct fiq_state *state, int n, int *probably_last)
436 struct fiq_channel_state *st = &state->channel[n];
438 for (i = 0; i < st->dma_info.index; i++) {
439 total_len += st->dma_info.slot_len[i];
444 if (st->hcchar_copy.b.eptype == 0x3) {
446 * An interrupt endpoint will take max 2 CSPLITs. if we are receiving data
447 * then this is definitely the last CSPLIT.
451 /* Isoc IN. This is a bit risky if we are the first transaction:
452 * we may have been held off slightly. */
453 if (i > 1 && st->dma_info.slot_len[st->dma_info.index-1] <= DATA0_PID_HEURISTIC) {
456 /* If in the next uframe we will receive enough data to fill the endpoint,
457 * then only issue 1 more csplit.
459 if (st->hctsiz_copy.b.xfersize - total_len <= DATA0_PID_HEURISTIC)
463 if (total_len >= st->hctsiz_copy.b.xfersize ||
464 i == 6 || total_len == 0)
465 /* Note: due to bit stuffing it is possible to have > 6 CSPLITs for
466 * a single endpoint. Accepting more would completely break our scheduling mechanism though
467 * - in these extreme cases we will pass through a truncated packet.
475 * fiq_fsm_too_late() - Test transaction for lateness
477 * If a SSPLIT for a large IN transaction is issued too late in a frame,
478 * the hub will disable the port to the device and respond with ERR handshakes.
479 * The hub status endpoint will not reflect this change.
480 * Returns 1 if we will issue a SSPLIT that will result in a device babble.
482 int notrace fiq_fsm_too_late(struct fiq_state *st, int n)
485 hfnum_data_t hfnum = { .d32 = FIQ_READ(st->dwc_regs_base + HFNUM) };
486 uframe = hfnum.b.frnum & 0x7;
487 if ((uframe < 6) && (st->channel[n].nrpackets + 1 + uframe > 7)) {
496 * fiq_fsm_start_next_periodic() - A half-arsed attempt at a microframe pipeline
498 * Search pending transactions in the start-split pending state and queue them.
499 * Don't queue packets in uframe .5 (comes out in .6) (USB2.0 11.18.4).
500 * Note: we specifically don't do isochronous OUT transactions first because better
501 * use of the TT's start-split fifo can be achieved by pipelining an IN before an OUT.
503 static void notrace noinline fiq_fsm_start_next_periodic(struct fiq_state *st, int num_channels)
506 hfnum_data_t hfnum = { .d32 = FIQ_READ(st->dwc_regs_base + HFNUM) };
507 if ((hfnum.b.frnum & 0x7) == 5)
509 for (n = 0; n < num_channels; n++) {
510 if (st->channel[n].fsm == FIQ_PER_SSPLIT_QUEUED) {
511 /* Check to see if any other transactions are using this TT */
512 if(!fiq_fsm_tt_in_use(st, num_channels, n)) {
513 if (!fiq_fsm_too_late(st, n)) {
514 st->channel[n].fsm = FIQ_PER_SSPLIT_STARTED;
515 fiq_print(FIQDBG_INT, st, "NEXTPER ");
516 fiq_fsm_restart_channel(st, n, 0);
518 st->channel[n].fsm = FIQ_PER_SPLIT_TIMEOUT;
524 for (n = 0; n < num_channels; n++) {
525 if (st->channel[n].fsm == FIQ_PER_ISO_OUT_PENDING) {
526 if (!fiq_fsm_tt_in_use(st, num_channels, n)) {
527 fiq_print(FIQDBG_INT, st, "NEXTISO ");
528 if (st->channel[n].nrpackets == 1)
529 st->channel[n].fsm = FIQ_PER_ISO_OUT_LAST;
531 st->channel[n].fsm = FIQ_PER_ISO_OUT_ACTIVE;
532 fiq_fsm_restart_channel(st, n, 0);
540 * fiq_fsm_update_hs_isoc() - update isochronous frame and transfer data
541 * @state: Pointer to fiq_state
542 * @n: Channel transaction is active on
543 * @hcint: Copy of host channel interrupt register
545 * Returns 0 if there are no more transactions for this HC to do, 1
548 static int notrace noinline fiq_fsm_update_hs_isoc(struct fiq_state *state, int n, hcint_data_t hcint)
550 struct fiq_channel_state *st = &state->channel[n];
551 int xfer_len = 0, nrpackets = 0;
553 fiq_print(FIQDBG_INT, state, "HSISO %02d", n);
555 xfer_len = fiq_get_xfer_len(state, n);
556 st->hs_isoc_info.iso_desc[st->hs_isoc_info.index].actual_length = xfer_len;
558 st->hs_isoc_info.iso_desc[st->hs_isoc_info.index].status = hcint.d32;
560 st->hs_isoc_info.index++;
561 if (st->hs_isoc_info.index == st->hs_isoc_info.nrframes) {
565 /* grab the next DMA address offset from the array */
566 hcdma.d32 = st->hcdma_copy.d32 + st->hs_isoc_info.iso_desc[st->hs_isoc_info.index].offset;
567 FIQ_WRITE(state->dwc_regs_base + HC_START + (HC_OFFSET * n) + HC_DMA, hcdma.d32);
569 /* We need to set multi_count. This is a bit tricky - has to be set per-transaction as
570 * the core needs to be told to send the correct number. Caution: for IN transfers,
571 * this is always set to the maximum size of the endpoint. */
572 xfer_len = st->hs_isoc_info.iso_desc[st->hs_isoc_info.index].length;
573 /* Integer divide in a FIQ: fun. FIXME: make this not suck */
574 nrpackets = (xfer_len + st->hcchar_copy.b.mps - 1) / st->hcchar_copy.b.mps;
577 st->hcchar_copy.b.multicnt = nrpackets;
578 st->hctsiz_copy.b.pktcnt = nrpackets;
580 /* Initial PID also needs to be set */
581 if (st->hcchar_copy.b.epdir == 0) {
582 st->hctsiz_copy.b.xfersize = xfer_len;
583 switch (st->hcchar_copy.b.multicnt) {
585 st->hctsiz_copy.b.pid = DWC_PID_DATA0;
589 st->hctsiz_copy.b.pid = DWC_PID_MDATA;
594 st->hctsiz_copy.b.xfersize = nrpackets * st->hcchar_copy.b.mps;
595 switch (st->hcchar_copy.b.multicnt) {
597 st->hctsiz_copy.b.pid = DWC_PID_DATA0;
600 st->hctsiz_copy.b.pid = DWC_PID_DATA1;
603 st->hctsiz_copy.b.pid = DWC_PID_DATA2;
607 FIQ_WRITE(state->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCTSIZ, st->hctsiz_copy.d32);
608 FIQ_WRITE(state->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCCHAR, st->hcchar_copy.d32);
609 /* Channel is enabled on hcint handler exit */
610 fiq_print(FIQDBG_INT, state, "HSISOOUT");
616 * fiq_fsm_do_sof() - FSM start-of-frame interrupt handler
617 * @state: Pointer to the state struct passed from banked FIQ mode registers.
618 * @num_channels: set according to the DWC hardware configuration
620 * The SOF handler in FSM mode has two functions
621 * 1. Hold off SOF from causing schedule advancement in IRQ context if there's
623 * 2. Advance certain FSM states that require either a microframe delay, or a microframe
626 * The second part is architecture-specific to mach-bcm2835 -
627 * a sane interrupt controller would have a mask register for ARM interrupt sources
628 * to be promoted to the nFIQ line, but it doesn't. Instead a single interrupt
629 * number (USB) can be enabled. This means that certain parts of the USB specification
630 * that require "wait a little while, then issue another packet" cannot be fulfilled with
631 * the timing granularity required to achieve optimal throughout. The workaround is to use
632 * the SOF "timer" (125uS) to perform this task.
634 static int notrace noinline fiq_fsm_do_sof(struct fiq_state *state, int num_channels)
636 hfnum_data_t hfnum = { .d32 = FIQ_READ(state->dwc_regs_base + HFNUM) };
640 if ((hfnum.b.frnum & 0x7) == 1) {
641 /* We cannot issue csplits for transactions in the last frame past (n+1).1
642 * Check to see if there are any transactions that are stale.
645 for (n = 0; n < num_channels; n++) {
646 switch (state->channel[n].fsm) {
647 case FIQ_PER_CSPLIT_WAIT:
648 case FIQ_PER_CSPLIT_NYET1:
649 case FIQ_PER_CSPLIT_POLL:
650 case FIQ_PER_CSPLIT_LAST:
651 /* Check if we are no longer in the same full-speed frame. */
652 if (((state->channel[n].expected_uframe & 0x3FFF) & ~0x7) <
653 (hfnum.b.frnum & ~0x7))
654 state->channel[n].fsm = FIQ_PER_SPLIT_TIMEOUT;
662 for (n = 0; n < num_channels; n++) {
663 switch (state->channel[n].fsm) {
665 case FIQ_NP_SSPLIT_RETRY:
666 case FIQ_NP_IN_CSPLIT_RETRY:
667 case FIQ_NP_OUT_CSPLIT_RETRY:
668 fiq_fsm_restart_channel(state, n, 0);
671 case FIQ_HS_ISOC_SLEEPING:
672 /* Is it time to wake this channel yet? */
673 if (--state->channel[n].uframe_sleeps == 0) {
674 state->channel[n].fsm = FIQ_HS_ISOC_TURBO;
675 fiq_fsm_restart_channel(state, n, 0);
679 case FIQ_PER_SSPLIT_QUEUED:
680 if ((hfnum.b.frnum & 0x7) == 5)
682 if(!fiq_fsm_tt_in_use(state, num_channels, n)) {
683 if (!fiq_fsm_too_late(state, n)) {
684 fiq_print(FIQDBG_INT, state, "SOF GO %01d", n);
685 fiq_fsm_restart_channel(state, n, 0);
686 state->channel[n].fsm = FIQ_PER_SSPLIT_STARTED;
688 /* Transaction cannot be started without risking a device babble error */
689 state->channel[n].fsm = FIQ_PER_SPLIT_TIMEOUT;
690 state->haintmsk_saved.b2.chint &= ~(1 << n);
691 FIQ_WRITE(state->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCINTMSK, 0);
697 case FIQ_PER_ISO_OUT_PENDING:
698 /* Ordinarily, this should be poked after the SSPLIT
699 * complete interrupt for a competing transfer on the same
700 * TT. Doesn't happen for aborted transactions though.
702 if ((hfnum.b.frnum & 0x7) >= 5)
704 if (!fiq_fsm_tt_in_use(state, num_channels, n)) {
705 /* Hardware bug. SOF can sometimes occur after the channel halt interrupt
708 fiq_fsm_restart_channel(state, n, 0);
709 fiq_print(FIQDBG_INT, state, "SOF ISOC");
710 if (state->channel[n].nrpackets == 1) {
711 state->channel[n].fsm = FIQ_PER_ISO_OUT_LAST;
713 state->channel[n].fsm = FIQ_PER_ISO_OUT_ACTIVE;
718 case FIQ_PER_CSPLIT_WAIT:
719 /* we are guaranteed to be in this state if and only if the SSPLIT interrupt
720 * occurred when the bus transaction occurred. The SOF interrupt reversal bug
721 * will utterly bugger this up though.
723 if (hfnum.b.frnum != state->channel[n].expected_uframe) {
724 fiq_print(FIQDBG_INT, state, "SOFCS %d ", n);
725 state->channel[n].fsm = FIQ_PER_CSPLIT_POLL;
726 fiq_fsm_restart_channel(state, n, 0);
727 fiq_fsm_start_next_periodic(state, num_channels);
732 case FIQ_PER_SPLIT_TIMEOUT:
733 case FIQ_DEQUEUE_ISSUED:
734 /* Ugly: we have to force a HCD interrupt.
735 * Poke the mask for the channel in question.
736 * We will take a fake SOF because of this, but
739 state->haintmsk_saved.b2.chint &= ~(1 << n);
740 FIQ_WRITE(state->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCINTMSK, 0);
749 if (state->kick_np_queues ||
750 dwc_frame_num_le(state->next_sched_frame, hfnum.b.frnum))
758 * fiq_fsm_do_hcintr() - FSM host channel interrupt handler
759 * @state: Pointer to the FIQ state struct
760 * @num_channels: Number of channels as per hardware config
761 * @n: channel for which HAINT(i) was raised
763 * An important property is that only the CHHLT interrupt is unmasked. Unfortunately, AHBerr is as well.
765 static int notrace noinline fiq_fsm_do_hcintr(struct fiq_state *state, int num_channels, int n)
768 hcintmsk_data_t hcintmsk;
769 hcint_data_t hcint_probe;
770 hcchar_data_t hcchar;
774 int start_next_periodic = 0;
775 struct fiq_channel_state *st = &state->channel[n];
778 hcint.d32 = FIQ_READ(state->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCINT);
779 hcintmsk.d32 = FIQ_READ(state->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCINTMSK);
780 hcint_probe.d32 = hcint.d32 & hcintmsk.d32;
782 if (st->fsm != FIQ_PASSTHROUGH) {
783 fiq_print(FIQDBG_INT, state, "HC%01d ST%02d", n, st->fsm);
784 fiq_print(FIQDBG_INT, state, "%08x", hcint.d32);
789 case FIQ_PASSTHROUGH:
790 case FIQ_DEQUEUE_ISSUED:
791 /* doesn't belong to us, kick it upstairs */
794 case FIQ_PASSTHROUGH_ERRORSTATE:
795 /* We are here to emulate the error recovery mechanism of the dwc HCD.
796 * Several interrupts are unmasked if a previous transaction failed - it's
797 * death for the FIQ to attempt to handle them as the channel isn't halted.
798 * Emulate what the HCD does in this situation: mask and continue.
799 * The FSM has no other state setup so this has to be handled out-of-band.
801 fiq_print(FIQDBG_ERR, state, "ERRST %02d", n);
802 if (hcint_probe.b.nak || hcint_probe.b.ack || hcint_probe.b.datatglerr) {
803 fiq_print(FIQDBG_ERR, state, "RESET %02d", n);
804 /* In some random cases we can get a NAK interrupt coincident with a Xacterr
805 * interrupt, after the device has disappeared.
807 if (!hcint.b.xacterr)
811 hcintmsk.b.datatglerr = 0;
812 FIQ_WRITE(state->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCINTMSK, hcintmsk.d32);
815 if (hcint_probe.b.chhltd) {
816 fiq_print(FIQDBG_ERR, state, "CHHLT %02d", n);
817 fiq_print(FIQDBG_ERR, state, "%08x", hcint.d32);
822 /* Non-periodic state groups */
823 case FIQ_NP_SSPLIT_STARTED:
824 case FIQ_NP_SSPLIT_RETRY:
825 /* Got a HCINT for a NP SSPLIT. Expected ACK / NAK / fail */
827 /* SSPLIT complete. For OUT, the data has been sent. For IN, the LS transaction
828 * will start shortly. SOF needs to kick the transaction to prevent a NYET flood.
830 if(st->hcchar_copy.b.epdir == 1)
831 st->fsm = FIQ_NP_IN_CSPLIT_RETRY;
833 st->fsm = FIQ_NP_OUT_CSPLIT_RETRY;
836 fiq_fsm_setup_csplit(state, n);
837 } else if (hcint.b.nak) {
838 // No buffer space in TT. Retry on a uframe boundary.
839 fiq_fsm_reload_hcdma(state, n);
840 st->fsm = FIQ_NP_SSPLIT_RETRY;
842 } else if (hcint.b.xacterr) {
843 // The only other one we care about is xacterr. This implies HS bus error - retry.
845 if(st->hcchar_copy.b.epdir == 0)
846 fiq_fsm_reload_hcdma(state, n);
847 st->fsm = FIQ_NP_SSPLIT_RETRY;
848 if (st->nr_errors >= 3) {
849 st->fsm = FIQ_NP_SPLIT_HS_ABORTED;
855 st->fsm = FIQ_NP_SPLIT_LS_ABORTED;
861 case FIQ_NP_IN_CSPLIT_RETRY:
862 /* Received a CSPLIT done interrupt.
863 * Expected Data/NAK/STALL/NYET for IN.
865 if (hcint.b.xfercomp) {
866 /* For IN, data is present. */
867 st->fsm = FIQ_NP_SPLIT_DONE;
868 } else if (hcint.b.nak) {
869 /* no endpoint data. Punt it upstairs */
870 st->fsm = FIQ_NP_SPLIT_DONE;
871 } else if (hcint.b.nyet) {
872 /* CSPLIT NYET - retry on a uframe boundary. */
875 } else if (hcint.b.datatglerr) {
876 /* data toggle errors do not set the xfercomp bit. */
877 st->fsm = FIQ_NP_SPLIT_LS_ABORTED;
878 } else if (hcint.b.xacterr) {
879 /* HS error. Retry immediate */
880 st->fsm = FIQ_NP_IN_CSPLIT_RETRY;
882 if (st->nr_errors >= 3) {
883 st->fsm = FIQ_NP_SPLIT_HS_ABORTED;
888 } else if (hcint.b.stall || hcint.b.bblerr) {
889 /* A STALL implies either a LS bus error or a genuine STALL. */
890 st->fsm = FIQ_NP_SPLIT_LS_ABORTED;
892 /* Hardware bug. It's possible in some cases to
893 * get a channel halt with nothing else set when
894 * the response was a NYET. Treat as local 3-strikes retry.
896 hcint_data_t hcint_test = hcint;
897 hcint_test.b.chhltd = 0;
898 if (!hcint_test.d32) {
900 if (st->nr_errors >= 3) {
901 st->fsm = FIQ_NP_SPLIT_HS_ABORTED;
906 /* Bail out if something unexpected happened */
907 st->fsm = FIQ_NP_SPLIT_HS_ABORTED;
910 if (st->fsm != FIQ_NP_IN_CSPLIT_RETRY) {
911 fiq_fsm_restart_np_pending(state, num_channels, n);
915 case FIQ_NP_OUT_CSPLIT_RETRY:
916 /* Received a CSPLIT done interrupt.
917 * Expected ACK/NAK/STALL/NYET/XFERCOMP for OUT.*/
918 if (hcint.b.xfercomp) {
919 st->fsm = FIQ_NP_SPLIT_DONE;
920 } else if (hcint.b.nak) {
921 // The HCD will implement the holdoff on frame boundaries.
922 st->fsm = FIQ_NP_SPLIT_DONE;
923 } else if (hcint.b.nyet) {
924 // Hub still processing.
925 st->fsm = FIQ_NP_OUT_CSPLIT_RETRY;
929 } else if (hcint.b.xacterr) {
930 /* HS error. retry immediate */
931 st->fsm = FIQ_NP_OUT_CSPLIT_RETRY;
933 if (st->nr_errors >= 3) {
934 st->fsm = FIQ_NP_SPLIT_HS_ABORTED;
939 } else if (hcint.b.stall) {
940 /* LS bus error or genuine stall */
941 st->fsm = FIQ_NP_SPLIT_LS_ABORTED;
944 * Hardware bug. It's possible in some cases to get a
945 * channel halt with nothing else set when the response was a NYET.
946 * Treat as local 3-strikes retry.
948 hcint_data_t hcint_test = hcint;
949 hcint_test.b.chhltd = 0;
950 if (!hcint_test.d32) {
952 if (st->nr_errors >= 3) {
953 st->fsm = FIQ_NP_SPLIT_HS_ABORTED;
958 // Something unexpected happened. AHBerror or babble perhaps. Let the IRQ deal with it.
959 st->fsm = FIQ_NP_SPLIT_HS_ABORTED;
962 if (st->fsm != FIQ_NP_OUT_CSPLIT_RETRY) {
963 fiq_fsm_restart_np_pending(state, num_channels, n);
967 /* Periodic split states (except isoc out) */
968 case FIQ_PER_SSPLIT_STARTED:
969 /* Expect an ACK or failure for SSPLIT */
972 * SSPLIT transfer complete interrupt - the generation of this interrupt is fraught with bugs.
973 * For a packet queued in microframe n-3 to appear in n-2, if the channel is enabled near the EOF1
974 * point for microframe n-3, the packet will not appear on the bus until microframe n.
975 * Additionally, the generation of the actual interrupt is dodgy. For a packet appearing on the bus
976 * in microframe n, sometimes the interrupt is generated immediately. Sometimes, it appears in n+1
977 * coincident with SOF for n+1.
978 * SOF is also buggy. It can sometimes be raised AFTER the first bus transaction has taken place.
979 * These appear to be caused by timing/clock crossing bugs within the core itself.
980 * State machine workaround.
982 hfnum.d32 = FIQ_READ(state->dwc_regs_base + HFNUM);
983 hcchar.d32 = FIQ_READ(state->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCCHAR);
984 fiq_fsm_setup_csplit(state, n);
985 /* Poke the oddfrm bit. If we are equivalent, we received the interrupt at the correct
986 * time. If not, then we're in the next SOF.
988 if ((hfnum.b.frnum & 0x1) == hcchar.b.oddfrm) {
989 fiq_print(FIQDBG_INT, state, "CSWAIT %01d", n);
990 st->expected_uframe = hfnum.b.frnum;
991 st->fsm = FIQ_PER_CSPLIT_WAIT;
993 fiq_print(FIQDBG_INT, state, "CSPOL %01d", n);
994 /* For isochronous IN endpoints,
995 * we need to hold off if we are expecting a lot of data */
996 if (st->hcchar_copy.b.mps < DATA0_PID_HEURISTIC) {
997 start_next_periodic = 1;
999 /* Danger will robinson: we are in a broken state. If our first interrupt after
1000 * this is a NYET, it will be delayed by 1 uframe and result in an unrecoverable
1001 * lag. Unmask the NYET interrupt.
1003 st->expected_uframe = (hfnum.b.frnum + 1) & 0x3FFF;
1004 st->fsm = FIQ_PER_CSPLIT_BROKEN_NYET1;
1008 } else if (hcint.b.xacterr) {
1009 /* 3-strikes retry is enabled, we have hit our max nr_errors */
1010 st->fsm = FIQ_PER_SPLIT_HS_ABORTED;
1011 start_next_periodic = 1;
1013 st->fsm = FIQ_PER_SPLIT_HS_ABORTED;
1014 start_next_periodic = 1;
1016 /* We can now queue the next isochronous OUT transaction, if one is pending. */
1017 if(fiq_fsm_tt_next_isoc(state, num_channels, n)) {
1018 fiq_print(FIQDBG_INT, state, "NEXTISO ");
1022 case FIQ_PER_CSPLIT_NYET1:
1023 /* First CSPLIT attempt was a NYET. If we get a subsequent NYET,
1024 * we are too late and the TT has dropped its CSPLIT fifo.
1026 hfnum.d32 = FIQ_READ(state->dwc_regs_base + HFNUM);
1027 hcchar.d32 = FIQ_READ(state->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCCHAR);
1028 start_next_periodic = 1;
1030 st->fsm = FIQ_PER_SPLIT_DONE;
1031 } else if (hcint.b.xfercomp) {
1032 fiq_increment_dma_buf(state, num_channels, n);
1033 st->fsm = FIQ_PER_CSPLIT_POLL;
1035 if (fiq_fsm_more_csplits(state, n, &last_csplit)) {
1039 start_next_periodic = 0;
1041 st->fsm = FIQ_PER_SPLIT_DONE;
1043 } else if (hcint.b.nyet) {
1044 /* Doh. Data lost. */
1045 st->fsm = FIQ_PER_SPLIT_NYET_ABORTED;
1046 } else if (hcint.b.xacterr || hcint.b.stall || hcint.b.bblerr) {
1047 st->fsm = FIQ_PER_SPLIT_LS_ABORTED;
1049 st->fsm = FIQ_PER_SPLIT_HS_ABORTED;
1053 case FIQ_PER_CSPLIT_BROKEN_NYET1:
1055 * we got here because our host channel is in the delayed-interrupt
1056 * state and we cannot take a NYET interrupt any later than when it
1057 * occurred. Disable then re-enable the channel if this happens to force
1058 * CSPLITs to occur at the right time.
1060 hfnum.d32 = FIQ_READ(state->dwc_regs_base + HFNUM);
1061 hcchar.d32 = FIQ_READ(state->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCCHAR);
1062 fiq_print(FIQDBG_INT, state, "BROK: %01d ", n);
1064 st->fsm = FIQ_PER_SPLIT_DONE;
1065 start_next_periodic = 1;
1066 } else if (hcint.b.xfercomp) {
1067 fiq_increment_dma_buf(state, num_channels, n);
1068 if (fiq_fsm_more_csplits(state, n, &last_csplit)) {
1069 st->fsm = FIQ_PER_CSPLIT_POLL;
1072 start_next_periodic = 1;
1073 /* Reload HCTSIZ for the next transfer */
1074 fiq_fsm_reload_hctsiz(state, n);
1076 start_next_periodic = 0;
1078 st->fsm = FIQ_PER_SPLIT_DONE;
1080 } else if (hcint.b.nyet) {
1081 st->fsm = FIQ_PER_SPLIT_NYET_ABORTED;
1082 start_next_periodic = 1;
1083 } else if (hcint.b.xacterr || hcint.b.stall || hcint.b.bblerr) {
1084 /* Local 3-strikes retry is handled by the core. This is a ERR response.*/
1085 st->fsm = FIQ_PER_SPLIT_LS_ABORTED;
1087 st->fsm = FIQ_PER_SPLIT_HS_ABORTED;
1091 case FIQ_PER_CSPLIT_POLL:
1092 hfnum.d32 = FIQ_READ(state->dwc_regs_base + HFNUM);
1093 hcchar.d32 = FIQ_READ(state->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCCHAR);
1094 start_next_periodic = 1;
1096 st->fsm = FIQ_PER_SPLIT_DONE;
1097 } else if (hcint.b.xfercomp) {
1098 fiq_increment_dma_buf(state, num_channels, n);
1099 if (fiq_fsm_more_csplits(state, n, &last_csplit)) {
1102 /* Reload HCTSIZ for the next transfer */
1103 fiq_fsm_reload_hctsiz(state, n);
1105 start_next_periodic = 0;
1107 st->fsm = FIQ_PER_SPLIT_DONE;
1109 } else if (hcint.b.nyet) {
1110 /* Are we a NYET after the first data packet? */
1111 if (st->nrpackets == 0) {
1112 st->fsm = FIQ_PER_CSPLIT_NYET1;
1116 /* We got a NYET when polling CSPLITs. Can happen
1117 * if our heuristic fails, or if someone disables us
1118 * for any significant length of time.
1120 if (st->nr_errors >= 3) {
1121 st->fsm = FIQ_PER_SPLIT_NYET_ABORTED;
1123 st->fsm = FIQ_PER_SPLIT_DONE;
1126 } else if (hcint.b.xacterr || hcint.b.stall || hcint.b.bblerr) {
1127 /* For xacterr, Local 3-strikes retry is handled by the core. This is a ERR response.*/
1128 st->fsm = FIQ_PER_SPLIT_LS_ABORTED;
1130 st->fsm = FIQ_PER_SPLIT_HS_ABORTED;
1134 case FIQ_HS_ISOC_TURBO:
1135 if (fiq_fsm_update_hs_isoc(state, n, hcint)) {
1136 /* more transactions to come */
1138 fiq_print(FIQDBG_INT, state, "HSISO M ");
1139 /* For strided transfers, put ourselves to sleep */
1140 if (st->hs_isoc_info.stride > 1) {
1141 st->uframe_sleeps = st->hs_isoc_info.stride - 1;
1142 st->fsm = FIQ_HS_ISOC_SLEEPING;
1147 st->fsm = FIQ_HS_ISOC_DONE;
1148 fiq_print(FIQDBG_INT, state, "HSISO F ");
1152 case FIQ_HS_ISOC_ABORTED:
1153 /* This abort is called by the driver rewriting the state mid-transaction
1154 * which allows the dequeue mechanism to work more effectively.
1158 case FIQ_PER_ISO_OUT_ACTIVE:
1160 if(fiq_iso_out_advance(state, num_channels, n)) {
1161 /* last OUT transfer */
1162 st->fsm = FIQ_PER_ISO_OUT_LAST;
1164 * Assuming the periodic FIFO in the dwc core
1165 * actually does its job properly, we can queue
1166 * the next ssplit now and in theory, the wire
1167 * transactions will be in-order.
1169 // No it doesn't. It appears to process requests in host channel order.
1170 //start_next_periodic = 1;
1176 * Isochronous transactions carry on regardless. Log the error
1181 if(fiq_iso_out_advance(state, num_channels, n)) {
1182 st->fsm = FIQ_PER_ISO_OUT_LAST;
1183 //start_next_periodic = 1;
1190 case FIQ_PER_ISO_OUT_LAST:
1193 st->fsm = FIQ_PER_ISO_OUT_DONE;
1195 st->fsm = FIQ_PER_ISO_OUT_DONE;
1198 start_next_periodic = 1;
1201 case FIQ_PER_SPLIT_TIMEOUT:
1202 /* SOF kicked us because we overran. */
1203 start_next_periodic = 1;
1211 FIQ_WRITE(state->dwc_regs_base + HC_START + (HC_OFFSET * n) + HCINT, hcint.d32);
1213 /* Copy the regs into the state so the IRQ knows what to do */
1214 st->hcint_copy.d32 = hcint.d32;
1218 /* Restart always implies handled. */
1220 /* For complete-split INs, the show must go on.
1221 * Force a channel restart */
1222 fiq_fsm_restart_channel(state, n, 1);
1224 fiq_fsm_restart_channel(state, n, 0);
1227 if (start_next_periodic) {
1228 fiq_fsm_start_next_periodic(state, num_channels);
1230 if (st->fsm != FIQ_PASSTHROUGH)
1231 fiq_print(FIQDBG_INT, state, "FSMOUT%02d", st->fsm);
1238 * dwc_otg_fiq_fsm() - Flying State Machine (monster) FIQ
1239 * @state: pointer to state struct passed from the banked FIQ mode registers.
1240 * @num_channels: set according to the DWC hardware configuration
1241 * @dma: pointer to DMA bounce buffers for split transaction slots
1243 * The FSM FIQ performs the low-level tasks that normally would be performed by the microcode
1244 * inside an EHCI or similar host controller regarding split transactions. The DWC core
1245 * interrupts each and every time a split transaction packet is received or sent successfully.
1246 * This results in either an interrupt storm when everything is working "properly", or
1247 * the interrupt latency of the system in general breaks time-sensitive periodic split
1248 * transactions. Pushing the low-level, but relatively easy state machine work into the FIQ
1249 * solves these problems.
1253 void notrace dwc_otg_fiq_fsm(struct fiq_state *state, int num_channels)
1255 gintsts_data_t gintsts, gintsts_handled;
1256 gintmsk_data_t gintmsk;
1257 //hfnum_data_t hfnum;
1258 haint_data_t haint, haint_handled;
1259 haintmsk_data_t haintmsk;
1262 /* Ensure peripheral reads issued prior to FIQ entry are complete */
1265 gintsts_handled.d32 = 0;
1266 haint_handled.d32 = 0;
1268 fiq_fsm_spin_lock(&state->lock);
1269 gintsts.d32 = FIQ_READ(state->dwc_regs_base + GINTSTS);
1270 gintmsk.d32 = FIQ_READ(state->dwc_regs_base + GINTMSK);
1271 gintsts.d32 &= gintmsk.d32;
1273 if (gintsts.b.sofintr) {
1274 /* For FSM mode, SOF is required to keep the state machine advance for
1275 * certain stages of the periodic pipeline. It's death to mask this
1276 * interrupt in that case.
1279 if (!fiq_fsm_do_sof(state, num_channels)) {
1280 /* Kick IRQ once. Queue advancement means that all pending transactions
1281 * will get serviced when the IRQ finally executes.
1283 if (state->gintmsk_saved.b.sofintr == 1)
1285 state->gintmsk_saved.b.sofintr = 0;
1287 gintsts_handled.b.sofintr = 1;
1290 if (gintsts.b.hcintr) {
1292 haint.d32 = FIQ_READ(state->dwc_regs_base + HAINT);
1293 haintmsk.d32 = FIQ_READ(state->dwc_regs_base + HAINTMSK);
1294 haint.d32 &= haintmsk.d32;
1295 haint_handled.d32 = 0;
1296 for (i=0; i<num_channels; i++) {
1297 if (haint.b2.chint & (1 << i)) {
1298 if(!fiq_fsm_do_hcintr(state, num_channels, i)) {
1299 /* HCINT was not handled in FIQ
1300 * HAINT is level-sensitive, leading to level-sensitive ginststs.b.hcint bit.
1301 * Mask HAINT(i) but keep top-level hcint unmasked.
1303 state->haintmsk_saved.b2.chint &= ~(1 << i);
1305 /* do_hcintr cleaned up after itself, but clear haint */
1306 haint_handled.b2.chint |= (1 << i);
1311 if (haint_handled.b2.chint) {
1312 FIQ_WRITE(state->dwc_regs_base + HAINT, haint_handled.d32);
1315 if (haintmsk.d32 != (haintmsk.d32 & state->haintmsk_saved.d32)) {
1317 * This is necessary to avoid multiple retriggers of the MPHI in the case
1318 * where interrupts are held off and HCINTs start to pile up.
1319 * Only wake up the IRQ if a new interrupt came in, was not handled and was
1322 haintmsk.d32 &= state->haintmsk_saved.d32;
1323 FIQ_WRITE(state->dwc_regs_base + HAINTMSK, haintmsk.d32);
1326 /* Top-Level interrupt - always handled because it's level-sensitive */
1327 gintsts_handled.b.hcintr = 1;
1331 /* Clear the bits in the saved register that were not handled but were triggered. */
1332 state->gintmsk_saved.d32 &= ~(gintsts.d32 & ~gintsts_handled.d32);
1334 /* FIQ didn't handle something - mask has changed - write new mask */
1335 if (gintmsk.d32 != (gintmsk.d32 & state->gintmsk_saved.d32)) {
1336 gintmsk.d32 &= state->gintmsk_saved.d32;
1337 gintmsk.b.sofintr = 1;
1338 FIQ_WRITE(state->dwc_regs_base + GINTMSK, gintmsk.d32);
1339 // fiq_print(FIQDBG_INT, state, "KICKGINT");
1340 // fiq_print(FIQDBG_INT, state, "%08x", gintmsk.d32);
1341 // fiq_print(FIQDBG_INT, state, "%08x", state->gintmsk_saved.d32);
1345 if (gintsts_handled.d32) {
1346 /* Only applies to edge-sensitive bits in GINTSTS */
1347 FIQ_WRITE(state->dwc_regs_base + GINTSTS, gintsts_handled.d32);
1350 /* We got an interrupt, didn't handle it. */
1352 state->mphi_int_count++;
1353 if (state->mphi_regs.swirq_set) {
1354 FIQ_WRITE(state->mphi_regs.swirq_set, 1);
1356 FIQ_WRITE(state->mphi_regs.outdda, state->dummy_send_dma);
1357 FIQ_WRITE(state->mphi_regs.outddb, (1<<29));
1363 fiq_fsm_spin_unlock(&state->lock);
1368 * dwc_otg_fiq_nop() - FIQ "lite"
1369 * @state: pointer to state struct passed from the banked FIQ mode registers.
1371 * The "nop" handler does not intervene on any interrupts other than SOF.
1372 * It is limited in scope to deciding at each SOF if the IRQ SOF handler (which deals
1373 * with non-periodic/periodic queues) needs to be kicked.
1375 * This is done to hold off the SOF interrupt, which occurs at a rate of 8000 per second.
1379 void notrace dwc_otg_fiq_nop(struct fiq_state *state)
1381 gintsts_data_t gintsts, gintsts_handled;
1382 gintmsk_data_t gintmsk;
1385 /* Ensure peripheral reads issued prior to FIQ entry are complete */
1388 fiq_fsm_spin_lock(&state->lock);
1389 hfnum.d32 = FIQ_READ(state->dwc_regs_base + HFNUM);
1390 gintsts.d32 = FIQ_READ(state->dwc_regs_base + GINTSTS);
1391 gintmsk.d32 = FIQ_READ(state->dwc_regs_base + GINTMSK);
1392 gintsts.d32 &= gintmsk.d32;
1393 gintsts_handled.d32 = 0;
1395 if (gintsts.b.sofintr) {
1396 if (!state->kick_np_queues &&
1397 dwc_frame_num_gt(state->next_sched_frame, hfnum.b.frnum)) {
1398 /* SOF handled, no work to do, just ACK interrupt */
1399 gintsts_handled.b.sofintr = 1;
1402 state->gintmsk_saved.b.sofintr = 0;
1406 /* Reset handled interrupts */
1407 if(gintsts_handled.d32) {
1408 FIQ_WRITE(state->dwc_regs_base + GINTSTS, gintsts_handled.d32);
1411 /* Clear the bits in the saved register that were not handled but were triggered. */
1412 state->gintmsk_saved.d32 &= ~(gintsts.d32 & ~gintsts_handled.d32);
1414 /* We got an interrupt, didn't handle it and want to mask it */
1415 if (~(state->gintmsk_saved.d32)) {
1416 state->mphi_int_count++;
1417 gintmsk.d32 &= state->gintmsk_saved.d32;
1418 FIQ_WRITE(state->dwc_regs_base + GINTMSK, gintmsk.d32);
1419 if (state->mphi_regs.swirq_set) {
1420 FIQ_WRITE(state->mphi_regs.swirq_set, 1);
1422 /* Force a clear before another dummy send */
1423 FIQ_WRITE(state->mphi_regs.intstat, (1<<29));
1424 FIQ_WRITE(state->mphi_regs.outdda, state->dummy_send_dma);
1425 FIQ_WRITE(state->mphi_regs.outddb, (1<<29));
1430 fiq_fsm_spin_unlock(&state->lock);