2 * Copyright (c) 2000-2004 by David Brownell
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 #include <linux/module.h>
20 #include <linux/pci.h>
21 #include <linux/dmapool.h>
22 #include <linux/kernel.h>
23 #include <linux/delay.h>
24 #include <linux/ioport.h>
25 #include <linux/sched.h>
26 #include <linux/vmalloc.h>
27 #include <linux/errno.h>
28 #include <linux/init.h>
29 #include <linux/timer.h>
30 #include <linux/ktime.h>
31 #include <linux/list.h>
32 #include <linux/interrupt.h>
33 #include <linux/usb.h>
34 #include <linux/usb/hcd.h>
35 #include <linux/moduleparam.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/debugfs.h>
38 #include <linux/slab.h>
39 #include <linux/uaccess.h>
41 #include <asm/byteorder.h>
44 #include <asm/system.h>
45 #include <asm/unaligned.h>
47 /*-------------------------------------------------------------------------*/
50 * EHCI hc_driver implementation ... experimental, incomplete.
51 * Based on the final 1.0 register interface specification.
53 * USB 2.0 shows up in upcoming www.pcmcia.org technology.
54 * First was PCMCIA, like ISA; then CardBus, which is PCI.
55 * Next comes "CardBay", using USB 2.0 signals.
57 * Contains additional contributions by Brad Hards, Rory Bolt, and others.
58 * Special thanks to Intel and VIA for providing host controllers to
59 * test this driver on, and Cypress (including In-System Design) for
60 * providing early devices for those host controllers to talk to!
63 #define DRIVER_AUTHOR "David Brownell"
64 #define DRIVER_DESC "USB 2.0 'Enhanced' Host Controller (EHCI) Driver"
66 static const char hcd_name [] = "ehci_hcd";
76 /* magic numbers that can affect system performance */
77 #define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
78 #define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
79 #define EHCI_TUNE_RL_TT 0
80 #define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
81 #define EHCI_TUNE_MULT_TT 1
83 * Some drivers think it's safe to schedule isochronous transfers more than
84 * 256 ms into the future (partly as a result of an old bug in the scheduling
85 * code). In an attempt to avoid trouble, we will use a minimum scheduling
86 * length of 512 frames instead of 256.
88 #define EHCI_TUNE_FLS 1 /* (medium) 512-frame schedule */
90 #define EHCI_IAA_MSECS 10 /* arbitrary */
91 #define EHCI_IO_JIFFIES (HZ/10) /* io watchdog > irq_thresh */
92 #define EHCI_ASYNC_JIFFIES (HZ/20) /* async idle timeout */
93 #define EHCI_SHRINK_FRAMES 5 /* async qh unlink delay */
95 /* Initial IRQ latency: faster than hw default */
96 static int log2_irq_thresh = 0; // 0 to 6
97 module_param (log2_irq_thresh, int, S_IRUGO);
98 MODULE_PARM_DESC (log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
100 /* initial park setting: slower than hw default */
101 static unsigned park = 0;
102 module_param (park, uint, S_IRUGO);
103 MODULE_PARM_DESC (park, "park setting; 1-3 back-to-back async packets");
105 /* for flakey hardware, ignore overcurrent indicators */
106 static int ignore_oc = 0;
107 module_param (ignore_oc, bool, S_IRUGO);
108 MODULE_PARM_DESC (ignore_oc, "ignore bogus hardware overcurrent indications");
110 /* for link power management(LPM) feature */
111 static unsigned int hird;
112 module_param(hird, int, S_IRUGO);
113 MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us\n");
115 #define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
117 /* for ASPM quirk of ISOC on AMD SB800 */
118 static struct pci_dev *amd_nb_dev;
120 /*-------------------------------------------------------------------------*/
123 #include "ehci-dbg.c"
125 /*-------------------------------------------------------------------------*/
128 timer_action(struct ehci_hcd *ehci, enum ehci_timer_action action)
130 /* Don't override timeouts which shrink or (later) disable
131 * the async ring; just the I/O watchdog. Note that if a
132 * SHRINK were pending, OFF would never be requested.
134 if (timer_pending(&ehci->watchdog)
135 && ((BIT(TIMER_ASYNC_SHRINK) | BIT(TIMER_ASYNC_OFF))
139 if (!test_and_set_bit(action, &ehci->actions)) {
143 case TIMER_IO_WATCHDOG:
144 if (!ehci->need_io_watchdog)
148 case TIMER_ASYNC_OFF:
149 t = EHCI_ASYNC_JIFFIES;
151 /* case TIMER_ASYNC_SHRINK: */
153 /* add a jiffie since we synch against the
154 * 8 KHz uframe counter.
156 t = DIV_ROUND_UP(EHCI_SHRINK_FRAMES * HZ, 1000) + 1;
159 mod_timer(&ehci->watchdog, t + jiffies);
163 /*-------------------------------------------------------------------------*/
166 * handshake - spin reading hc until handshake completes or fails
167 * @ptr: address of hc register to be read
168 * @mask: bits to look at in result of read
169 * @done: value of those bits when handshake succeeds
170 * @usec: timeout in microseconds
172 * Returns negative errno, or zero on success
174 * Success happens when the "mask" bits have the specified value (hardware
175 * handshake done). There are two failure modes: "usec" have passed (major
176 * hardware flakeout), or the register reads as all-ones (hardware removed).
178 * That last failure should_only happen in cases like physical cardbus eject
179 * before driver shutdown. But it also seems to be caused by bugs in cardbus
180 * bridge shutdown: shutting down the bridge before the devices using it.
182 static int handshake (struct ehci_hcd *ehci, void __iomem *ptr,
183 u32 mask, u32 done, int usec)
188 result = ehci_readl(ehci, ptr);
189 if (result == ~(u32)0) /* card removed */
200 /* force HC to halt state from unknown (EHCI spec section 2.3) */
201 static int ehci_halt (struct ehci_hcd *ehci)
203 u32 temp = ehci_readl(ehci, &ehci->regs->status);
205 /* disable any irqs left enabled by previous code */
206 ehci_writel(ehci, 0, &ehci->regs->intr_enable);
208 if ((temp & STS_HALT) != 0)
211 temp = ehci_readl(ehci, &ehci->regs->command);
213 ehci_writel(ehci, temp, &ehci->regs->command);
214 return handshake (ehci, &ehci->regs->status,
215 STS_HALT, STS_HALT, 16 * 125);
218 static int handshake_on_error_set_halt(struct ehci_hcd *ehci, void __iomem *ptr,
219 u32 mask, u32 done, int usec)
223 error = handshake(ehci, ptr, mask, done, usec);
226 ehci_to_hcd(ehci)->state = HC_STATE_HALT;
227 ehci_err(ehci, "force halt; handshake %p %08x %08x -> %d\n",
228 ptr, mask, done, error);
234 /* put TDI/ARC silicon into EHCI mode */
235 static void tdi_reset (struct ehci_hcd *ehci)
237 u32 __iomem *reg_ptr;
240 reg_ptr = (u32 __iomem *)(((u8 __iomem *)ehci->regs) + USBMODE);
241 tmp = ehci_readl(ehci, reg_ptr);
242 tmp |= USBMODE_CM_HC;
243 /* The default byte access to MMR space is LE after
244 * controller reset. Set the required endian mode
245 * for transfer buffers to match the host microprocessor
247 if (ehci_big_endian_mmio(ehci))
249 ehci_writel(ehci, tmp, reg_ptr);
252 /* reset a non-running (STS_HALT == 1) controller */
253 static int ehci_reset (struct ehci_hcd *ehci)
256 u32 command = ehci_readl(ehci, &ehci->regs->command);
258 /* If the EHCI debug controller is active, special care must be
259 * taken before and after a host controller reset */
260 if (ehci->debug && !dbgp_reset_prep())
263 command |= CMD_RESET;
264 dbg_cmd (ehci, "reset", command);
265 ehci_writel(ehci, command, &ehci->regs->command);
266 ehci_to_hcd(ehci)->state = HC_STATE_HALT;
267 ehci->next_statechange = jiffies;
268 retval = handshake (ehci, &ehci->regs->command,
269 CMD_RESET, 0, 250 * 1000);
271 if (ehci->has_hostpc) {
272 ehci_writel(ehci, USBMODE_EX_HC | USBMODE_EX_VBPS,
273 (u32 __iomem *)(((u8 *)ehci->regs) + USBMODE_EX));
274 ehci_writel(ehci, TXFIFO_DEFAULT,
275 (u32 __iomem *)(((u8 *)ehci->regs) + TXFILLTUNING));
280 if (ehci_is_TDI(ehci))
284 dbgp_external_startup();
289 /* idle the controller (from running) */
290 static void ehci_quiesce (struct ehci_hcd *ehci)
295 if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state))
299 /* wait for any schedule enables/disables to take effect */
300 temp = ehci_readl(ehci, &ehci->regs->command) << 10;
301 temp &= STS_ASS | STS_PSS;
302 if (handshake_on_error_set_halt(ehci, &ehci->regs->status,
303 STS_ASS | STS_PSS, temp, 16 * 125))
306 /* then disable anything that's still active */
307 temp = ehci_readl(ehci, &ehci->regs->command);
308 temp &= ~(CMD_ASE | CMD_IAAD | CMD_PSE);
309 ehci_writel(ehci, temp, &ehci->regs->command);
311 /* hardware can take 16 microframes to turn off ... */
312 handshake_on_error_set_halt(ehci, &ehci->regs->status,
313 STS_ASS | STS_PSS, 0, 16 * 125);
316 /*-------------------------------------------------------------------------*/
318 static void end_unlink_async(struct ehci_hcd *ehci);
319 static void ehci_work(struct ehci_hcd *ehci);
321 #include "ehci-hub.c"
322 #include "ehci-lpm.c"
323 #include "ehci-mem.c"
325 #include "ehci-sched.c"
327 /*-------------------------------------------------------------------------*/
329 static void ehci_iaa_watchdog(unsigned long param)
331 struct ehci_hcd *ehci = (struct ehci_hcd *) param;
334 spin_lock_irqsave (&ehci->lock, flags);
336 /* Lost IAA irqs wedge things badly; seen first with a vt8235.
337 * So we need this watchdog, but must protect it against both
338 * (a) SMP races against real IAA firing and retriggering, and
339 * (b) clean HC shutdown, when IAA watchdog was pending.
342 && !timer_pending(&ehci->iaa_watchdog)
343 && HC_IS_RUNNING(ehci_to_hcd(ehci)->state)) {
346 /* If we get here, IAA is *REALLY* late. It's barely
347 * conceivable that the system is so busy that CMD_IAAD
348 * is still legitimately set, so let's be sure it's
349 * clear before we read STS_IAA. (The HC should clear
350 * CMD_IAAD when it sets STS_IAA.)
352 cmd = ehci_readl(ehci, &ehci->regs->command);
354 ehci_writel(ehci, cmd & ~CMD_IAAD,
355 &ehci->regs->command);
357 /* If IAA is set here it either legitimately triggered
358 * before we cleared IAAD above (but _way_ late, so we'll
359 * still count it as lost) ... or a silicon erratum:
360 * - VIA seems to set IAA without triggering the IRQ;
361 * - IAAD potentially cleared without setting IAA.
363 status = ehci_readl(ehci, &ehci->regs->status);
364 if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
365 COUNT (ehci->stats.lost_iaa);
366 ehci_writel(ehci, STS_IAA, &ehci->regs->status);
369 ehci_vdbg(ehci, "IAA watchdog: status %x cmd %x\n",
371 end_unlink_async(ehci);
374 spin_unlock_irqrestore(&ehci->lock, flags);
377 static void ehci_watchdog(unsigned long param)
379 struct ehci_hcd *ehci = (struct ehci_hcd *) param;
382 spin_lock_irqsave(&ehci->lock, flags);
384 /* stop async processing after it's idled a bit */
385 if (test_bit (TIMER_ASYNC_OFF, &ehci->actions))
386 start_unlink_async (ehci, ehci->async);
388 /* ehci could run by timer, without IRQs ... */
391 spin_unlock_irqrestore (&ehci->lock, flags);
394 /* On some systems, leaving remote wakeup enabled prevents system shutdown.
395 * The firmware seems to think that powering off is a wakeup event!
396 * This routine turns off remote wakeup and everything else, on all ports.
398 static void ehci_turn_off_all_ports(struct ehci_hcd *ehci)
400 int port = HCS_N_PORTS(ehci->hcs_params);
403 ehci_writel(ehci, PORT_RWC_BITS,
404 &ehci->regs->port_status[port]);
408 * Halt HC, turn off all ports, and let the BIOS use the companion controllers.
409 * Should be called with ehci->lock held.
411 static void ehci_silence_controller(struct ehci_hcd *ehci)
414 ehci_turn_off_all_ports(ehci);
416 /* make BIOS/etc use companion controller during reboot */
417 ehci_writel(ehci, 0, &ehci->regs->configured_flag);
419 /* unblock posted writes */
420 ehci_readl(ehci, &ehci->regs->configured_flag);
423 /* ehci_shutdown kick in for silicon on any bus (not just pci, etc).
424 * This forcibly disables dma and IRQs, helping kexec and other cases
425 * where the next system software may expect clean state.
427 static void ehci_shutdown(struct usb_hcd *hcd)
429 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
431 del_timer_sync(&ehci->watchdog);
432 del_timer_sync(&ehci->iaa_watchdog);
434 spin_lock_irq(&ehci->lock);
435 ehci_silence_controller(ehci);
436 spin_unlock_irq(&ehci->lock);
439 static void ehci_port_power (struct ehci_hcd *ehci, int is_on)
443 if (!HCS_PPC (ehci->hcs_params))
446 ehci_dbg (ehci, "...power%s ports...\n", is_on ? "up" : "down");
447 for (port = HCS_N_PORTS (ehci->hcs_params); port > 0; )
448 (void) ehci_hub_control(ehci_to_hcd(ehci),
449 is_on ? SetPortFeature : ClearPortFeature,
452 /* Flush those writes */
453 ehci_readl(ehci, &ehci->regs->command);
457 /*-------------------------------------------------------------------------*/
460 * ehci_work is called from some interrupts, timers, and so on.
461 * it calls driver completion functions, after dropping ehci->lock.
463 static void ehci_work (struct ehci_hcd *ehci)
465 timer_action_done (ehci, TIMER_IO_WATCHDOG);
467 /* another CPU may drop ehci->lock during a schedule scan while
468 * it reports urb completions. this flag guards against bogus
469 * attempts at re-entrant schedule scanning.
475 if (ehci->next_uframe != -1)
476 scan_periodic (ehci);
479 /* the IO watchdog guards against hardware or driver bugs that
480 * misplace IRQs, and should let us run completely without IRQs.
481 * such lossage has been observed on both VT6202 and VT8235.
483 if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state) &&
484 (ehci->async->qh_next.ptr != NULL ||
485 ehci->periodic_sched != 0))
486 timer_action (ehci, TIMER_IO_WATCHDOG);
490 * Called when the ehci_hcd module is removed.
492 static void ehci_stop (struct usb_hcd *hcd)
494 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
496 ehci_dbg (ehci, "stop\n");
498 /* no more interrupts ... */
499 del_timer_sync (&ehci->watchdog);
500 del_timer_sync(&ehci->iaa_watchdog);
502 spin_lock_irq(&ehci->lock);
503 if (HC_IS_RUNNING (hcd->state))
506 ehci_silence_controller(ehci);
508 spin_unlock_irq(&ehci->lock);
510 remove_companion_file(ehci);
511 remove_debug_files (ehci);
513 /* root hub is shut down separately (first, when possible) */
514 spin_lock_irq (&ehci->lock);
517 spin_unlock_irq (&ehci->lock);
518 ehci_mem_cleanup (ehci);
521 pci_dev_put(amd_nb_dev);
526 ehci_dbg (ehci, "irq normal %ld err %ld reclaim %ld (lost %ld)\n",
527 ehci->stats.normal, ehci->stats.error, ehci->stats.reclaim,
528 ehci->stats.lost_iaa);
529 ehci_dbg (ehci, "complete %ld unlink %ld\n",
530 ehci->stats.complete, ehci->stats.unlink);
533 dbg_status (ehci, "ehci_stop completed",
534 ehci_readl(ehci, &ehci->regs->status));
537 /* one-time init, only for memory state */
538 static int ehci_init(struct usb_hcd *hcd)
540 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
544 struct ehci_qh_hw *hw;
546 spin_lock_init(&ehci->lock);
549 * keep io watchdog by default, those good HCDs could turn off it later
551 ehci->need_io_watchdog = 1;
552 init_timer(&ehci->watchdog);
553 ehci->watchdog.function = ehci_watchdog;
554 ehci->watchdog.data = (unsigned long) ehci;
556 init_timer(&ehci->iaa_watchdog);
557 ehci->iaa_watchdog.function = ehci_iaa_watchdog;
558 ehci->iaa_watchdog.data = (unsigned long) ehci;
560 hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
563 * hw default: 1K periodic list heads, one per frame.
564 * periodic_size can shrink by USBCMD update if hcc_params allows.
566 ehci->periodic_size = DEFAULT_I_TDPS;
567 INIT_LIST_HEAD(&ehci->cached_itd_list);
568 INIT_LIST_HEAD(&ehci->cached_sitd_list);
570 if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
571 /* periodic schedule size can be smaller than default */
572 switch (EHCI_TUNE_FLS) {
573 case 0: ehci->periodic_size = 1024; break;
574 case 1: ehci->periodic_size = 512; break;
575 case 2: ehci->periodic_size = 256; break;
579 if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0)
582 /* controllers may cache some of the periodic schedule ... */
583 if (HCC_ISOC_CACHE(hcc_params)) // full frame cache
584 ehci->i_thresh = 2 + 8;
585 else // N microframes cached
586 ehci->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
588 ehci->reclaim = NULL;
589 ehci->next_uframe = -1;
590 ehci->clock_frame = -1;
593 * dedicate a qh for the async ring head, since we couldn't unlink
594 * a 'real' qh without stopping the async schedule [4.8]. use it
595 * as the 'reclamation list head' too.
596 * its dummy is used in hw_alt_next of many tds, to prevent the qh
597 * from automatically advancing to the next td after short reads.
599 ehci->async->qh_next.qh = NULL;
600 hw = ehci->async->hw;
601 hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma);
602 hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD);
603 hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
604 hw->hw_qtd_next = EHCI_LIST_END(ehci);
605 ehci->async->qh_state = QH_STATE_LINKED;
606 hw->hw_alt_next = QTD_NEXT(ehci, ehci->async->dummy->qtd_dma);
608 /* clear interrupt enables, set irq latency */
609 if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
611 temp = 1 << (16 + log2_irq_thresh);
612 if (HCC_PER_PORT_CHANGE_EVENT(hcc_params)) {
614 ehci_dbg(ehci, "enable per-port change event\n");
617 if (HCC_CANPARK(hcc_params)) {
618 /* HW default park == 3, on hardware that supports it (like
619 * NVidia and ALI silicon), maximizes throughput on the async
620 * schedule by avoiding QH fetches between transfers.
622 * With fast usb storage devices and NForce2, "park" seems to
623 * make problems: throughput reduction (!), data errors...
626 park = min(park, (unsigned) 3);
630 ehci_dbg(ehci, "park %d\n", park);
632 if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
633 /* periodic schedule size can be smaller than default */
635 temp |= (EHCI_TUNE_FLS << 2);
637 if (HCC_LPM(hcc_params)) {
638 /* support link power management EHCI 1.1 addendum */
639 ehci_dbg(ehci, "support lpm\n");
642 ehci_dbg(ehci, "hird %d invalid, use default 0",
648 ehci->command = temp;
650 /* Accept arbitrarily long scatter-gather lists */
651 if (!(hcd->driver->flags & HCD_LOCAL_MEM))
652 hcd->self.sg_tablesize = ~0;
656 /* start HC running; it's halted, ehci_init() has been run (once) */
657 static int ehci_run (struct usb_hcd *hcd)
659 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
664 hcd->uses_new_polling = 1;
666 /* EHCI spec section 4.1 */
667 if ((retval = ehci_reset(ehci)) != 0) {
668 ehci_mem_cleanup(ehci);
671 ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
672 ehci_writel(ehci, (u32)ehci->async->qh_dma, &ehci->regs->async_next);
675 * hcc_params controls whether ehci->regs->segment must (!!!)
676 * be used; it constrains QH/ITD/SITD and QTD locations.
677 * pci_pool consistent memory always uses segment zero.
678 * streaming mappings for I/O buffers, like pci_map_single(),
679 * can return segments above 4GB, if the device allows.
681 * NOTE: the dma mask is visible through dma_supported(), so
682 * drivers can pass this info along ... like NETIF_F_HIGHDMA,
683 * Scsi_Host.highmem_io, and so forth. It's readonly to all
684 * host side drivers though.
686 hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
687 if (HCC_64BIT_ADDR(hcc_params)) {
688 ehci_writel(ehci, 0, &ehci->regs->segment);
690 // this is deeply broken on almost all architectures
691 if (!dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64)))
692 ehci_info(ehci, "enabled 64bit DMA\n");
697 // Philips, Intel, and maybe others need CMD_RUN before the
698 // root hub will detect new devices (why?); NEC doesn't
699 ehci->command &= ~(CMD_LRESET|CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET);
700 ehci->command |= CMD_RUN;
701 ehci_writel(ehci, ehci->command, &ehci->regs->command);
702 dbg_cmd (ehci, "init", ehci->command);
705 * Start, enabling full USB 2.0 functionality ... usb 1.1 devices
706 * are explicitly handed to companion controller(s), so no TT is
707 * involved with the root hub. (Except where one is integrated,
708 * and there's no companion controller unless maybe for USB OTG.)
710 * Turning on the CF flag will transfer ownership of all ports
711 * from the companions to the EHCI controller. If any of the
712 * companions are in the middle of a port reset at the time, it
713 * could cause trouble. Write-locking ehci_cf_port_reset_rwsem
714 * guarantees that no resets are in progress. After we set CF,
715 * a short delay lets the hardware catch up; new resets shouldn't
716 * be started before the port switching actions could complete.
718 down_write(&ehci_cf_port_reset_rwsem);
719 hcd->state = HC_STATE_RUNNING;
720 ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
721 ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
723 up_write(&ehci_cf_port_reset_rwsem);
724 ehci->last_periodic_enable = ktime_get_real();
726 temp = HC_VERSION(ehci_readl(ehci, &ehci->caps->hc_capbase));
728 "USB %x.%x started, EHCI %x.%02x%s\n",
729 ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f),
730 temp >> 8, temp & 0xff,
731 ignore_oc ? ", overcurrent ignored" : "");
733 ehci_writel(ehci, INTR_MASK,
734 &ehci->regs->intr_enable); /* Turn On Interrupts */
736 /* GRR this is run-once init(), being done every time the HC starts.
737 * So long as they're part of class devices, we can't do it init()
738 * since the class device isn't created that early.
740 create_debug_files(ehci);
741 create_companion_file(ehci);
746 /*-------------------------------------------------------------------------*/
748 static irqreturn_t ehci_irq (struct usb_hcd *hcd)
750 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
751 u32 status, masked_status, pcd_status = 0, cmd;
754 spin_lock (&ehci->lock);
756 status = ehci_readl(ehci, &ehci->regs->status);
758 /* e.g. cardbus physical eject */
759 if (status == ~(u32) 0) {
760 ehci_dbg (ehci, "device removed\n");
764 masked_status = status & INTR_MASK;
765 if (!masked_status) { /* irq sharing? */
766 spin_unlock(&ehci->lock);
770 /* clear (just) interrupts */
771 ehci_writel(ehci, masked_status, &ehci->regs->status);
772 cmd = ehci_readl(ehci, &ehci->regs->command);
776 /* unrequested/ignored: Frame List Rollover */
777 dbg_status (ehci, "irq", status);
780 /* INT, ERR, and IAA interrupt rates can be throttled */
782 /* normal [4.15.1.2] or error [4.15.1.1] completion */
783 if (likely ((status & (STS_INT|STS_ERR)) != 0)) {
784 if (likely ((status & STS_ERR) == 0))
785 COUNT (ehci->stats.normal);
787 COUNT (ehci->stats.error);
791 /* complete the unlinking of some qh [4.15.2.3] */
792 if (status & STS_IAA) {
793 /* guard against (alleged) silicon errata */
794 if (cmd & CMD_IAAD) {
795 ehci_writel(ehci, cmd & ~CMD_IAAD,
796 &ehci->regs->command);
797 ehci_dbg(ehci, "IAA with IAAD still set?\n");
800 COUNT(ehci->stats.reclaim);
801 end_unlink_async(ehci);
803 ehci_dbg(ehci, "IAA with nothing to reclaim?\n");
806 /* remote wakeup [4.3.1] */
807 if (status & STS_PCD) {
808 unsigned i = HCS_N_PORTS (ehci->hcs_params);
811 /* kick root hub later */
814 /* resume root hub? */
815 if (!(cmd & CMD_RUN))
816 usb_hcd_resume_root_hub(hcd);
818 /* get per-port change detect bits */
825 /* leverage per-port change bits feature */
826 if (ehci->has_ppcd && !(ppcd & (1 << i)))
828 pstatus = ehci_readl(ehci,
829 &ehci->regs->port_status[i]);
831 if (pstatus & PORT_OWNER)
833 if (!(test_bit(i, &ehci->suspended_ports) &&
834 ((pstatus & PORT_RESUME) ||
835 !(pstatus & PORT_SUSPEND)) &&
836 (pstatus & PORT_PE) &&
837 ehci->reset_done[i] == 0))
840 /* start 20 msec resume signaling from this port,
841 * and make khubd collect PORT_STAT_C_SUSPEND to
842 * stop that signaling. Use 5 ms extra for safety,
843 * like usb_port_resume() does.
845 ehci->reset_done[i] = jiffies + msecs_to_jiffies(25);
846 ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
847 mod_timer(&hcd->rh_timer, ehci->reset_done[i]);
851 /* PCI errors [4.15.2.4] */
852 if (unlikely ((status & STS_FATAL) != 0)) {
853 ehci_err(ehci, "fatal error\n");
854 dbg_cmd(ehci, "fatal", cmd);
855 dbg_status(ehci, "fatal", status);
859 ehci_writel(ehci, 0, &ehci->regs->configured_flag);
860 /* generic layer kills/unlinks all urbs, then
861 * uses ehci_stop to clean up the rest
868 spin_unlock (&ehci->lock);
870 usb_hcd_poll_rh_status(hcd);
874 /*-------------------------------------------------------------------------*/
877 * non-error returns are a promise to giveback() the urb later
878 * we drop ownership so next owner (or urb unlink) can get it
880 * urb + dev is in hcd.self.controller.urb_list
881 * we're queueing TDs onto software and hardware lists
883 * hcd-specific init for hcpriv hasn't been done yet
885 * NOTE: control, bulk, and interrupt share the same code to append TDs
886 * to a (possibly active) QH, and the same QH scanning code.
888 static int ehci_urb_enqueue (
893 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
894 struct list_head qtd_list;
896 INIT_LIST_HEAD (&qtd_list);
898 switch (usb_pipetype (urb->pipe)) {
900 /* qh_completions() code doesn't handle all the fault cases
901 * in multi-TD control transfers. Even 1KB is rare anyway.
903 if (urb->transfer_buffer_length > (16 * 1024))
906 /* case PIPE_BULK: */
908 if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags))
910 return submit_async(ehci, urb, &qtd_list, mem_flags);
913 if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags))
915 return intr_submit(ehci, urb, &qtd_list, mem_flags);
917 case PIPE_ISOCHRONOUS:
918 if (urb->dev->speed == USB_SPEED_HIGH)
919 return itd_submit (ehci, urb, mem_flags);
921 return sitd_submit (ehci, urb, mem_flags);
925 static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
928 if (!HC_IS_RUNNING(ehci_to_hcd(ehci)->state) && ehci->reclaim)
929 end_unlink_async(ehci);
931 /* If the QH isn't linked then there's nothing we can do
932 * unless we were called during a giveback, in which case
933 * qh_completions() has to deal with it.
935 if (qh->qh_state != QH_STATE_LINKED) {
936 if (qh->qh_state == QH_STATE_COMPLETING)
937 qh->needs_rescan = 1;
941 /* defer till later if busy */
943 struct ehci_qh *last;
945 for (last = ehci->reclaim;
947 last = last->reclaim)
949 qh->qh_state = QH_STATE_UNLINK_WAIT;
952 /* start IAA cycle */
954 start_unlink_async (ehci, qh);
957 /* remove from hardware lists
958 * completions normally happen asynchronously
961 static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
963 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
968 spin_lock_irqsave (&ehci->lock, flags);
969 rc = usb_hcd_check_unlink_urb(hcd, urb, status);
973 switch (usb_pipetype (urb->pipe)) {
974 // case PIPE_CONTROL:
977 qh = (struct ehci_qh *) urb->hcpriv;
980 switch (qh->qh_state) {
981 case QH_STATE_LINKED:
982 case QH_STATE_COMPLETING:
983 unlink_async(ehci, qh);
985 case QH_STATE_UNLINK:
986 case QH_STATE_UNLINK_WAIT:
987 /* already started */
990 /* QH might be waiting for a Clear-TT-Buffer */
991 qh_completions(ehci, qh);
997 qh = (struct ehci_qh *) urb->hcpriv;
1000 switch (qh->qh_state) {
1001 case QH_STATE_LINKED:
1002 case QH_STATE_COMPLETING:
1003 intr_deschedule (ehci, qh);
1006 qh_completions (ehci, qh);
1009 ehci_dbg (ehci, "bogus qh %p state %d\n",
1015 case PIPE_ISOCHRONOUS:
1018 // wait till next completion, do it then.
1019 // completion irqs can wait up to 1024 msec,
1023 spin_unlock_irqrestore (&ehci->lock, flags);
1027 /*-------------------------------------------------------------------------*/
1029 // bulk qh holds the data toggle
1032 ehci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep)
1034 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
1035 unsigned long flags;
1036 struct ehci_qh *qh, *tmp;
1038 /* ASSERT: any requests/urbs are being unlinked */
1039 /* ASSERT: nobody can be submitting urbs for this any more */
1042 spin_lock_irqsave (&ehci->lock, flags);
1047 /* endpoints can be iso streams. for now, we don't
1048 * accelerate iso completions ... so spin a while.
1050 if (qh->hw == NULL) {
1051 ehci_vdbg (ehci, "iso delay\n");
1055 if (!HC_IS_RUNNING (hcd->state))
1056 qh->qh_state = QH_STATE_IDLE;
1057 switch (qh->qh_state) {
1058 case QH_STATE_LINKED:
1059 case QH_STATE_COMPLETING:
1060 for (tmp = ehci->async->qh_next.qh;
1062 tmp = tmp->qh_next.qh)
1064 /* periodic qh self-unlinks on empty, and a COMPLETING qh
1065 * may already be unlinked.
1068 unlink_async(ehci, qh);
1070 case QH_STATE_UNLINK: /* wait for hw to finish? */
1071 case QH_STATE_UNLINK_WAIT:
1073 spin_unlock_irqrestore (&ehci->lock, flags);
1074 schedule_timeout_uninterruptible(1);
1076 case QH_STATE_IDLE: /* fully unlinked */
1077 if (qh->clearing_tt)
1079 if (list_empty (&qh->qtd_list)) {
1083 /* else FALL THROUGH */
1085 /* caller was supposed to have unlinked any requests;
1086 * that's not our job. just leak this memory.
1088 ehci_err (ehci, "qh %p (#%02x) state %d%s\n",
1089 qh, ep->desc.bEndpointAddress, qh->qh_state,
1090 list_empty (&qh->qtd_list) ? "" : "(has tds)");
1095 spin_unlock_irqrestore (&ehci->lock, flags);
1100 ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
1102 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
1104 int eptype = usb_endpoint_type(&ep->desc);
1105 int epnum = usb_endpoint_num(&ep->desc);
1106 int is_out = usb_endpoint_dir_out(&ep->desc);
1107 unsigned long flags;
1109 if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT)
1112 spin_lock_irqsave(&ehci->lock, flags);
1115 /* For Bulk and Interrupt endpoints we maintain the toggle state
1116 * in the hardware; the toggle bits in udev aren't used at all.
1117 * When an endpoint is reset by usb_clear_halt() we must reset
1118 * the toggle bit in the QH.
1121 usb_settoggle(qh->dev, epnum, is_out, 0);
1122 if (!list_empty(&qh->qtd_list)) {
1123 WARN_ONCE(1, "clear_halt for a busy endpoint\n");
1124 } else if (qh->qh_state == QH_STATE_LINKED ||
1125 qh->qh_state == QH_STATE_COMPLETING) {
1127 /* The toggle value in the QH can't be updated
1128 * while the QH is active. Unlink it now;
1129 * re-linking will call qh_refresh().
1131 if (eptype == USB_ENDPOINT_XFER_BULK)
1132 unlink_async(ehci, qh);
1134 intr_deschedule(ehci, qh);
1137 spin_unlock_irqrestore(&ehci->lock, flags);
1140 static int ehci_get_frame (struct usb_hcd *hcd)
1142 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
1143 return (ehci_readl(ehci, &ehci->regs->frame_index) >> 3) %
1144 ehci->periodic_size;
1147 /*-------------------------------------------------------------------------*/
1149 MODULE_DESCRIPTION(DRIVER_DESC);
1150 MODULE_AUTHOR (DRIVER_AUTHOR);
1151 MODULE_LICENSE ("GPL");
1154 #include "ehci-pci.c"
1155 #define PCI_DRIVER ehci_pci_driver
1158 #ifdef CONFIG_USB_EHCI_FSL
1159 #include "ehci-fsl.c"
1160 #define PLATFORM_DRIVER ehci_fsl_driver
1163 #ifdef CONFIG_USB_EHCI_MXC
1164 #include "ehci-mxc.c"
1165 #define PLATFORM_DRIVER ehci_mxc_driver
1168 #ifdef CONFIG_SOC_AU1200
1169 #include "ehci-au1xxx.c"
1170 #define PLATFORM_DRIVER ehci_hcd_au1xxx_driver
1173 #ifdef CONFIG_ARCH_OMAP3
1174 #include "ehci-omap.c"
1175 #define PLATFORM_DRIVER ehci_hcd_omap_driver
1178 #ifdef CONFIG_PPC_PS3
1179 #include "ehci-ps3.c"
1180 #define PS3_SYSTEM_BUS_DRIVER ps3_ehci_driver
1183 #ifdef CONFIG_USB_EHCI_HCD_PPC_OF
1184 #include "ehci-ppc-of.c"
1185 #define OF_PLATFORM_DRIVER ehci_hcd_ppc_of_driver
1188 #ifdef CONFIG_XPS_USB_HCD_XILINX
1189 #include "ehci-xilinx-of.c"
1190 #define XILINX_OF_PLATFORM_DRIVER ehci_hcd_xilinx_of_driver
1193 #ifdef CONFIG_PLAT_ORION
1194 #include "ehci-orion.c"
1195 #define PLATFORM_DRIVER ehci_orion_driver
1198 #ifdef CONFIG_ARCH_IXP4XX
1199 #include "ehci-ixp4xx.c"
1200 #define PLATFORM_DRIVER ixp4xx_ehci_driver
1203 #ifdef CONFIG_USB_W90X900_EHCI
1204 #include "ehci-w90x900.c"
1205 #define PLATFORM_DRIVER ehci_hcd_w90x900_driver
1208 #ifdef CONFIG_ARCH_AT91
1209 #include "ehci-atmel.c"
1210 #define PLATFORM_DRIVER ehci_atmel_driver
1213 #if defined(CONFIG_ARCH_S5PV210) || defined(CONFIG_ARCH_S5P6450) || \
1214 defined(CONFIG_ARCH_S5PV310)
1215 #include "ehci-s5pv210.c"
1216 #define PLATFORM_DRIVER ehci_hcd_s5pv210_driver
1218 #if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) && \
1219 !defined(PS3_SYSTEM_BUS_DRIVER) && !defined(OF_PLATFORM_DRIVER) && \
1220 !defined(XILINX_OF_PLATFORM_DRIVER)
1221 #error "missing bus glue for ehci-hcd"
1224 static int __init ehci_hcd_init(void)
1231 printk(KERN_INFO "%s: " DRIVER_DESC "\n", hcd_name);
1232 set_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
1233 if (test_bit(USB_UHCI_LOADED, &usb_hcds_loaded) ||
1234 test_bit(USB_OHCI_LOADED, &usb_hcds_loaded))
1235 printk(KERN_WARNING "Warning! ehci_hcd should always be loaded"
1236 " before uhci_hcd and ohci_hcd, not after\n");
1238 pr_debug("%s: block sizes: qh %Zd qtd %Zd itd %Zd sitd %Zd\n",
1240 sizeof(struct ehci_qh), sizeof(struct ehci_qtd),
1241 sizeof(struct ehci_itd), sizeof(struct ehci_sitd));
1244 ehci_debug_root = debugfs_create_dir("ehci", usb_debug_root);
1245 if (!ehci_debug_root) {
1251 #ifdef PLATFORM_DRIVER
1252 retval = platform_driver_register(&PLATFORM_DRIVER);
1258 retval = pci_register_driver(&PCI_DRIVER);
1263 #ifdef PS3_SYSTEM_BUS_DRIVER
1264 retval = ps3_ehci_driver_register(&PS3_SYSTEM_BUS_DRIVER);
1269 #ifdef OF_PLATFORM_DRIVER
1270 retval = of_register_platform_driver(&OF_PLATFORM_DRIVER);
1275 #ifdef XILINX_OF_PLATFORM_DRIVER
1276 retval = of_register_platform_driver(&XILINX_OF_PLATFORM_DRIVER);
1282 #ifdef XILINX_OF_PLATFORM_DRIVER
1283 /* of_unregister_platform_driver(&XILINX_OF_PLATFORM_DRIVER); */
1286 #ifdef OF_PLATFORM_DRIVER
1287 of_unregister_platform_driver(&OF_PLATFORM_DRIVER);
1290 #ifdef PS3_SYSTEM_BUS_DRIVER
1291 ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
1295 pci_unregister_driver(&PCI_DRIVER);
1298 #ifdef PLATFORM_DRIVER
1299 platform_driver_unregister(&PLATFORM_DRIVER);
1303 debugfs_remove(ehci_debug_root);
1304 ehci_debug_root = NULL;
1307 clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
1310 module_init(ehci_hcd_init);
1312 static void __exit ehci_hcd_cleanup(void)
1314 #ifdef XILINX_OF_PLATFORM_DRIVER
1315 of_unregister_platform_driver(&XILINX_OF_PLATFORM_DRIVER);
1317 #ifdef OF_PLATFORM_DRIVER
1318 of_unregister_platform_driver(&OF_PLATFORM_DRIVER);
1320 #ifdef PLATFORM_DRIVER
1321 platform_driver_unregister(&PLATFORM_DRIVER);
1324 pci_unregister_driver(&PCI_DRIVER);
1326 #ifdef PS3_SYSTEM_BUS_DRIVER
1327 ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
1330 debugfs_remove(ehci_debug_root);
1332 clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
1334 module_exit(ehci_hcd_cleanup);