usb: gadget: Remove File-backed Storage Gadget (g_file_storage).
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / usb / gadget / net2280.c
1 /*
2  * Driver for the PLX NET2280 USB device controller.
3  * Specs and errata are available from <http://www.plxtech.com>.
4  *
5  * PLX Technology Inc. (formerly NetChip Technology) supported the
6  * development of this driver.
7  *
8  *
9  * CODE STATUS HIGHLIGHTS
10  *
11  * This driver should work well with most "gadget" drivers, including
12  * the Mass Storage, Serial, and Ethernet/RNDIS gadget drivers
13  * as well as Gadget Zero and Gadgetfs.
14  *
15  * DMA is enabled by default.  Drivers using transfer queues might use
16  * DMA chaining to remove IRQ latencies between transfers.  (Except when
17  * short OUT transfers happen.)  Drivers can use the req->no_interrupt
18  * hint to completely eliminate some IRQs, if a later IRQ is guaranteed
19  * and DMA chaining is enabled.
20  *
21  * Note that almost all the errata workarounds here are only needed for
22  * rev1 chips.  Rev1a silicon (0110) fixes almost all of them.
23  */
24
25 /*
26  * Copyright (C) 2003 David Brownell
27  * Copyright (C) 2003-2005 PLX Technology, Inc.
28  *
29  * Modified Seth Levy 2005 PLX Technology, Inc. to provide compatibility
30  *      with 2282 chip
31  *
32  * This program is free software; you can redistribute it and/or modify
33  * it under the terms of the GNU General Public License as published by
34  * the Free Software Foundation; either version 2 of the License, or
35  * (at your option) any later version.
36  */
37
38 #undef  DEBUG           /* messages on error and most fault paths */
39 #undef  VERBOSE         /* extra debug messages (success too) */
40
41 #include <linux/module.h>
42 #include <linux/pci.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/kernel.h>
45 #include <linux/delay.h>
46 #include <linux/ioport.h>
47 #include <linux/slab.h>
48 #include <linux/errno.h>
49 #include <linux/init.h>
50 #include <linux/timer.h>
51 #include <linux/list.h>
52 #include <linux/interrupt.h>
53 #include <linux/moduleparam.h>
54 #include <linux/device.h>
55 #include <linux/usb/ch9.h>
56 #include <linux/usb/gadget.h>
57 #include <linux/prefetch.h>
58
59 #include <asm/byteorder.h>
60 #include <asm/io.h>
61 #include <asm/irq.h>
62 #include <asm/unaligned.h>
63
64
65 #define DRIVER_DESC             "PLX NET228x USB Peripheral Controller"
66 #define DRIVER_VERSION          "2005 Sept 27"
67
68 #define DMA_ADDR_INVALID        (~(dma_addr_t)0)
69 #define EP_DONTUSE              13      /* nonzero */
70
71 #define USE_RDK_LEDS            /* GPIO pins control three LEDs */
72
73
74 static const char driver_name [] = "net2280";
75 static const char driver_desc [] = DRIVER_DESC;
76
77 static const char ep0name [] = "ep0";
78 static const char *const ep_name [] = {
79         ep0name,
80         "ep-a", "ep-b", "ep-c", "ep-d",
81         "ep-e", "ep-f",
82 };
83
84 /* use_dma -- general goodness, fewer interrupts, less cpu load (vs PIO)
85  * use_dma_chaining -- dma descriptor queueing gives even more irq reduction
86  *
87  * The net2280 DMA engines are not tightly integrated with their FIFOs;
88  * not all cases are (yet) handled well in this driver or the silicon.
89  * Some gadget drivers work better with the dma support here than others.
90  * These two parameters let you use PIO or more aggressive DMA.
91  */
92 static bool use_dma = 1;
93 static bool use_dma_chaining = 0;
94
95 /* "modprobe net2280 use_dma=n" etc */
96 module_param (use_dma, bool, S_IRUGO);
97 module_param (use_dma_chaining, bool, S_IRUGO);
98
99
100 /* mode 0 == ep-{a,b,c,d} 1K fifo each
101  * mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable
102  * mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable
103  */
104 static ushort fifo_mode = 0;
105
106 /* "modprobe net2280 fifo_mode=1" etc */
107 module_param (fifo_mode, ushort, 0644);
108
109 /* enable_suspend -- When enabled, the driver will respond to
110  * USB suspend requests by powering down the NET2280.  Otherwise,
111  * USB suspend requests will be ignored.  This is acceptable for
112  * self-powered devices
113  */
114 static bool enable_suspend = 0;
115
116 /* "modprobe net2280 enable_suspend=1" etc */
117 module_param (enable_suspend, bool, S_IRUGO);
118
119
120 #define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out")
121
122 #if defined(CONFIG_USB_GADGET_DEBUG_FILES) || defined (DEBUG)
123 static char *type_string (u8 bmAttributes)
124 {
125         switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
126         case USB_ENDPOINT_XFER_BULK:    return "bulk";
127         case USB_ENDPOINT_XFER_ISOC:    return "iso";
128         case USB_ENDPOINT_XFER_INT:     return "intr";
129         };
130         return "control";
131 }
132 #endif
133
134 #include "net2280.h"
135
136 #define valid_bit       cpu_to_le32 (1 << VALID_BIT)
137 #define dma_done_ie     cpu_to_le32 (1 << DMA_DONE_INTERRUPT_ENABLE)
138
139 /*-------------------------------------------------------------------------*/
140
141 static int
142 net2280_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
143 {
144         struct net2280          *dev;
145         struct net2280_ep       *ep;
146         u32                     max, tmp;
147         unsigned long           flags;
148
149         ep = container_of (_ep, struct net2280_ep, ep);
150         if (!_ep || !desc || ep->desc || _ep->name == ep0name
151                         || desc->bDescriptorType != USB_DT_ENDPOINT)
152                 return -EINVAL;
153         dev = ep->dev;
154         if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
155                 return -ESHUTDOWN;
156
157         /* erratum 0119 workaround ties up an endpoint number */
158         if ((desc->bEndpointAddress & 0x0f) == EP_DONTUSE)
159                 return -EDOM;
160
161         /* sanity check ep-e/ep-f since their fifos are small */
162         max = usb_endpoint_maxp (desc) & 0x1fff;
163         if (ep->num > 4 && max > 64)
164                 return -ERANGE;
165
166         spin_lock_irqsave (&dev->lock, flags);
167         _ep->maxpacket = max & 0x7ff;
168         ep->desc = desc;
169
170         /* ep_reset() has already been called */
171         ep->stopped = 0;
172         ep->wedged = 0;
173         ep->out_overflow = 0;
174
175         /* set speed-dependent max packet; may kick in high bandwidth */
176         set_idx_reg (dev->regs, REG_EP_MAXPKT (dev, ep->num), max);
177
178         /* FIFO lines can't go to different packets.  PIO is ok, so
179          * use it instead of troublesome (non-bulk) multi-packet DMA.
180          */
181         if (ep->dma && (max % 4) != 0 && use_dma_chaining) {
182                 DEBUG (ep->dev, "%s, no dma for maxpacket %d\n",
183                         ep->ep.name, ep->ep.maxpacket);
184                 ep->dma = NULL;
185         }
186
187         /* set type, direction, address; reset fifo counters */
188         writel ((1 << FIFO_FLUSH), &ep->regs->ep_stat);
189         tmp = (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
190         if (tmp == USB_ENDPOINT_XFER_INT) {
191                 /* erratum 0105 workaround prevents hs NYET */
192                 if (dev->chiprev == 0100
193                                 && dev->gadget.speed == USB_SPEED_HIGH
194                                 && !(desc->bEndpointAddress & USB_DIR_IN))
195                         writel ((1 << CLEAR_NAK_OUT_PACKETS_MODE),
196                                 &ep->regs->ep_rsp);
197         } else if (tmp == USB_ENDPOINT_XFER_BULK) {
198                 /* catch some particularly blatant driver bugs */
199                 if ((dev->gadget.speed == USB_SPEED_HIGH
200                                         && max != 512)
201                                 || (dev->gadget.speed == USB_SPEED_FULL
202                                         && max > 64)) {
203                         spin_unlock_irqrestore (&dev->lock, flags);
204                         return -ERANGE;
205                 }
206         }
207         ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC) ? 1 : 0;
208         tmp <<= ENDPOINT_TYPE;
209         tmp |= desc->bEndpointAddress;
210         tmp |= (4 << ENDPOINT_BYTE_COUNT);      /* default full fifo lines */
211         tmp |= 1 << ENDPOINT_ENABLE;
212         wmb ();
213
214         /* for OUT transfers, block the rx fifo until a read is posted */
215         ep->is_in = (tmp & USB_DIR_IN) != 0;
216         if (!ep->is_in)
217                 writel ((1 << SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
218         else if (dev->pdev->device != 0x2280) {
219                 /* Added for 2282, Don't use nak packets on an in endpoint,
220                  * this was ignored on 2280
221                  */
222                 writel ((1 << CLEAR_NAK_OUT_PACKETS)
223                         | (1 << CLEAR_NAK_OUT_PACKETS_MODE), &ep->regs->ep_rsp);
224         }
225
226         writel (tmp, &ep->regs->ep_cfg);
227
228         /* enable irqs */
229         if (!ep->dma) {                         /* pio, per-packet */
230                 tmp = (1 << ep->num) | readl (&dev->regs->pciirqenb0);
231                 writel (tmp, &dev->regs->pciirqenb0);
232
233                 tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
234                         | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE);
235                 if (dev->pdev->device == 0x2280)
236                         tmp |= readl (&ep->regs->ep_irqenb);
237                 writel (tmp, &ep->regs->ep_irqenb);
238         } else {                                /* dma, per-request */
239                 tmp = (1 << (8 + ep->num));     /* completion */
240                 tmp |= readl (&dev->regs->pciirqenb1);
241                 writel (tmp, &dev->regs->pciirqenb1);
242
243                 /* for short OUT transfers, dma completions can't
244                  * advance the queue; do it pio-style, by hand.
245                  * NOTE erratum 0112 workaround #2
246                  */
247                 if ((desc->bEndpointAddress & USB_DIR_IN) == 0) {
248                         tmp = (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE);
249                         writel (tmp, &ep->regs->ep_irqenb);
250
251                         tmp = (1 << ep->num) | readl (&dev->regs->pciirqenb0);
252                         writel (tmp, &dev->regs->pciirqenb0);
253                 }
254         }
255
256         tmp = desc->bEndpointAddress;
257         DEBUG (dev, "enabled %s (ep%d%s-%s) %s max %04x\n",
258                 _ep->name, tmp & 0x0f, DIR_STRING (tmp),
259                 type_string (desc->bmAttributes),
260                 ep->dma ? "dma" : "pio", max);
261
262         /* pci writes may still be posted */
263         spin_unlock_irqrestore (&dev->lock, flags);
264         return 0;
265 }
266
267 static int handshake (u32 __iomem *ptr, u32 mask, u32 done, int usec)
268 {
269         u32     result;
270
271         do {
272                 result = readl (ptr);
273                 if (result == ~(u32)0)          /* "device unplugged" */
274                         return -ENODEV;
275                 result &= mask;
276                 if (result == done)
277                         return 0;
278                 udelay (1);
279                 usec--;
280         } while (usec > 0);
281         return -ETIMEDOUT;
282 }
283
284 static const struct usb_ep_ops net2280_ep_ops;
285
286 static void ep_reset (struct net2280_regs __iomem *regs, struct net2280_ep *ep)
287 {
288         u32             tmp;
289
290         ep->desc = NULL;
291         INIT_LIST_HEAD (&ep->queue);
292
293         ep->ep.maxpacket = ~0;
294         ep->ep.ops = &net2280_ep_ops;
295
296         /* disable the dma, irqs, endpoint... */
297         if (ep->dma) {
298                 writel (0, &ep->dma->dmactl);
299                 writel (  (1 << DMA_SCATTER_GATHER_DONE_INTERRUPT)
300                         | (1 << DMA_TRANSACTION_DONE_INTERRUPT)
301                         | (1 << DMA_ABORT)
302                         , &ep->dma->dmastat);
303
304                 tmp = readl (&regs->pciirqenb0);
305                 tmp &= ~(1 << ep->num);
306                 writel (tmp, &regs->pciirqenb0);
307         } else {
308                 tmp = readl (&regs->pciirqenb1);
309                 tmp &= ~(1 << (8 + ep->num));   /* completion */
310                 writel (tmp, &regs->pciirqenb1);
311         }
312         writel (0, &ep->regs->ep_irqenb);
313
314         /* init to our chosen defaults, notably so that we NAK OUT
315          * packets until the driver queues a read (+note erratum 0112)
316          */
317         if (!ep->is_in || ep->dev->pdev->device == 0x2280) {
318                 tmp = (1 << SET_NAK_OUT_PACKETS_MODE)
319                 | (1 << SET_NAK_OUT_PACKETS)
320                 | (1 << CLEAR_EP_HIDE_STATUS_PHASE)
321                 | (1 << CLEAR_INTERRUPT_MODE);
322         } else {
323                 /* added for 2282 */
324                 tmp = (1 << CLEAR_NAK_OUT_PACKETS_MODE)
325                 | (1 << CLEAR_NAK_OUT_PACKETS)
326                 | (1 << CLEAR_EP_HIDE_STATUS_PHASE)
327                 | (1 << CLEAR_INTERRUPT_MODE);
328         }
329
330         if (ep->num != 0) {
331                 tmp |= (1 << CLEAR_ENDPOINT_TOGGLE)
332                         | (1 << CLEAR_ENDPOINT_HALT);
333         }
334         writel (tmp, &ep->regs->ep_rsp);
335
336         /* scrub most status bits, and flush any fifo state */
337         if (ep->dev->pdev->device == 0x2280)
338                 tmp = (1 << FIFO_OVERFLOW)
339                         | (1 << FIFO_UNDERFLOW);
340         else
341                 tmp = 0;
342
343         writel (tmp | (1 << TIMEOUT)
344                 | (1 << USB_STALL_SENT)
345                 | (1 << USB_IN_NAK_SENT)
346                 | (1 << USB_IN_ACK_RCVD)
347                 | (1 << USB_OUT_PING_NAK_SENT)
348                 | (1 << USB_OUT_ACK_SENT)
349                 | (1 << FIFO_FLUSH)
350                 | (1 << SHORT_PACKET_OUT_DONE_INTERRUPT)
351                 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)
352                 | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
353                 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
354                 | (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
355                 | (1 << DATA_IN_TOKEN_INTERRUPT)
356                 , &ep->regs->ep_stat);
357
358         /* fifo size is handled separately */
359 }
360
361 static void nuke (struct net2280_ep *);
362
363 static int net2280_disable (struct usb_ep *_ep)
364 {
365         struct net2280_ep       *ep;
366         unsigned long           flags;
367
368         ep = container_of (_ep, struct net2280_ep, ep);
369         if (!_ep || !ep->desc || _ep->name == ep0name)
370                 return -EINVAL;
371
372         spin_lock_irqsave (&ep->dev->lock, flags);
373         nuke (ep);
374         ep_reset (ep->dev->regs, ep);
375
376         VDEBUG (ep->dev, "disabled %s %s\n",
377                         ep->dma ? "dma" : "pio", _ep->name);
378
379         /* synch memory views with the device */
380         (void) readl (&ep->regs->ep_cfg);
381
382         if (use_dma && !ep->dma && ep->num >= 1 && ep->num <= 4)
383                 ep->dma = &ep->dev->dma [ep->num - 1];
384
385         spin_unlock_irqrestore (&ep->dev->lock, flags);
386         return 0;
387 }
388
389 /*-------------------------------------------------------------------------*/
390
391 static struct usb_request *
392 net2280_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags)
393 {
394         struct net2280_ep       *ep;
395         struct net2280_request  *req;
396
397         if (!_ep)
398                 return NULL;
399         ep = container_of (_ep, struct net2280_ep, ep);
400
401         req = kzalloc(sizeof(*req), gfp_flags);
402         if (!req)
403                 return NULL;
404
405         req->req.dma = DMA_ADDR_INVALID;
406         INIT_LIST_HEAD (&req->queue);
407
408         /* this dma descriptor may be swapped with the previous dummy */
409         if (ep->dma) {
410                 struct net2280_dma      *td;
411
412                 td = pci_pool_alloc (ep->dev->requests, gfp_flags,
413                                 &req->td_dma);
414                 if (!td) {
415                         kfree (req);
416                         return NULL;
417                 }
418                 td->dmacount = 0;       /* not VALID */
419                 td->dmaaddr = cpu_to_le32 (DMA_ADDR_INVALID);
420                 td->dmadesc = td->dmaaddr;
421                 req->td = td;
422         }
423         return &req->req;
424 }
425
426 static void
427 net2280_free_request (struct usb_ep *_ep, struct usb_request *_req)
428 {
429         struct net2280_ep       *ep;
430         struct net2280_request  *req;
431
432         ep = container_of (_ep, struct net2280_ep, ep);
433         if (!_ep || !_req)
434                 return;
435
436         req = container_of (_req, struct net2280_request, req);
437         WARN_ON (!list_empty (&req->queue));
438         if (req->td)
439                 pci_pool_free (ep->dev->requests, req->td, req->td_dma);
440         kfree (req);
441 }
442
443 /*-------------------------------------------------------------------------*/
444
445 /* load a packet into the fifo we use for usb IN transfers.
446  * works for all endpoints.
447  *
448  * NOTE: pio with ep-a..ep-d could stuff multiple packets into the fifo
449  * at a time, but this code is simpler because it knows it only writes
450  * one packet.  ep-a..ep-d should use dma instead.
451  */
452 static void
453 write_fifo (struct net2280_ep *ep, struct usb_request *req)
454 {
455         struct net2280_ep_regs  __iomem *regs = ep->regs;
456         u8                      *buf;
457         u32                     tmp;
458         unsigned                count, total;
459
460         /* INVARIANT:  fifo is currently empty. (testable) */
461
462         if (req) {
463                 buf = req->buf + req->actual;
464                 prefetch (buf);
465                 total = req->length - req->actual;
466         } else {
467                 total = 0;
468                 buf = NULL;
469         }
470
471         /* write just one packet at a time */
472         count = ep->ep.maxpacket;
473         if (count > total)      /* min() cannot be used on a bitfield */
474                 count = total;
475
476         VDEBUG (ep->dev, "write %s fifo (IN) %d bytes%s req %p\n",
477                         ep->ep.name, count,
478                         (count != ep->ep.maxpacket) ? " (short)" : "",
479                         req);
480         while (count >= 4) {
481                 /* NOTE be careful if you try to align these. fifo lines
482                  * should normally be full (4 bytes) and successive partial
483                  * lines are ok only in certain cases.
484                  */
485                 tmp = get_unaligned ((u32 *)buf);
486                 cpu_to_le32s (&tmp);
487                 writel (tmp, &regs->ep_data);
488                 buf += 4;
489                 count -= 4;
490         }
491
492         /* last fifo entry is "short" unless we wrote a full packet.
493          * also explicitly validate last word in (periodic) transfers
494          * when maxpacket is not a multiple of 4 bytes.
495          */
496         if (count || total < ep->ep.maxpacket) {
497                 tmp = count ? get_unaligned ((u32 *)buf) : count;
498                 cpu_to_le32s (&tmp);
499                 set_fifo_bytecount (ep, count & 0x03);
500                 writel (tmp, &regs->ep_data);
501         }
502
503         /* pci writes may still be posted */
504 }
505
506 /* work around erratum 0106: PCI and USB race over the OUT fifo.
507  * caller guarantees chiprev 0100, out endpoint is NAKing, and
508  * there's no real data in the fifo.
509  *
510  * NOTE:  also used in cases where that erratum doesn't apply:
511  * where the host wrote "too much" data to us.
512  */
513 static void out_flush (struct net2280_ep *ep)
514 {
515         u32     __iomem *statp;
516         u32     tmp;
517
518         ASSERT_OUT_NAKING (ep);
519
520         statp = &ep->regs->ep_stat;
521         writel (  (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
522                 | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
523                 , statp);
524         writel ((1 << FIFO_FLUSH), statp);
525         mb ();
526         tmp = readl (statp);
527         if (tmp & (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
528                         /* high speed did bulk NYET; fifo isn't filling */
529                         && ep->dev->gadget.speed == USB_SPEED_FULL) {
530                 unsigned        usec;
531
532                 usec = 50;              /* 64 byte bulk/interrupt */
533                 handshake (statp, (1 << USB_OUT_PING_NAK_SENT),
534                                 (1 << USB_OUT_PING_NAK_SENT), usec);
535                 /* NAK done; now CLEAR_NAK_OUT_PACKETS is safe */
536         }
537 }
538
539 /* unload packet(s) from the fifo we use for usb OUT transfers.
540  * returns true iff the request completed, because of short packet
541  * or the request buffer having filled with full packets.
542  *
543  * for ep-a..ep-d this will read multiple packets out when they
544  * have been accepted.
545  */
546 static int
547 read_fifo (struct net2280_ep *ep, struct net2280_request *req)
548 {
549         struct net2280_ep_regs  __iomem *regs = ep->regs;
550         u8                      *buf = req->req.buf + req->req.actual;
551         unsigned                count, tmp, is_short;
552         unsigned                cleanup = 0, prevent = 0;
553
554         /* erratum 0106 ... packets coming in during fifo reads might
555          * be incompletely rejected.  not all cases have workarounds.
556          */
557         if (ep->dev->chiprev == 0x0100
558                         && ep->dev->gadget.speed == USB_SPEED_FULL) {
559                 udelay (1);
560                 tmp = readl (&ep->regs->ep_stat);
561                 if ((tmp & (1 << NAK_OUT_PACKETS)))
562                         cleanup = 1;
563                 else if ((tmp & (1 << FIFO_FULL))) {
564                         start_out_naking (ep);
565                         prevent = 1;
566                 }
567                 /* else: hope we don't see the problem */
568         }
569
570         /* never overflow the rx buffer. the fifo reads packets until
571          * it sees a short one; we might not be ready for them all.
572          */
573         prefetchw (buf);
574         count = readl (&regs->ep_avail);
575         if (unlikely (count == 0)) {
576                 udelay (1);
577                 tmp = readl (&ep->regs->ep_stat);
578                 count = readl (&regs->ep_avail);
579                 /* handled that data already? */
580                 if (count == 0 && (tmp & (1 << NAK_OUT_PACKETS)) == 0)
581                         return 0;
582         }
583
584         tmp = req->req.length - req->req.actual;
585         if (count > tmp) {
586                 /* as with DMA, data overflow gets flushed */
587                 if ((tmp % ep->ep.maxpacket) != 0) {
588                         ERROR (ep->dev,
589                                 "%s out fifo %d bytes, expected %d\n",
590                                 ep->ep.name, count, tmp);
591                         req->req.status = -EOVERFLOW;
592                         cleanup = 1;
593                         /* NAK_OUT_PACKETS will be set, so flushing is safe;
594                          * the next read will start with the next packet
595                          */
596                 } /* else it's a ZLP, no worries */
597                 count = tmp;
598         }
599         req->req.actual += count;
600
601         is_short = (count == 0) || ((count % ep->ep.maxpacket) != 0);
602
603         VDEBUG (ep->dev, "read %s fifo (OUT) %d bytes%s%s%s req %p %d/%d\n",
604                         ep->ep.name, count, is_short ? " (short)" : "",
605                         cleanup ? " flush" : "", prevent ? " nak" : "",
606                         req, req->req.actual, req->req.length);
607
608         while (count >= 4) {
609                 tmp = readl (&regs->ep_data);
610                 cpu_to_le32s (&tmp);
611                 put_unaligned (tmp, (u32 *)buf);
612                 buf += 4;
613                 count -= 4;
614         }
615         if (count) {
616                 tmp = readl (&regs->ep_data);
617                 /* LE conversion is implicit here: */
618                 do {
619                         *buf++ = (u8) tmp;
620                         tmp >>= 8;
621                 } while (--count);
622         }
623         if (cleanup)
624                 out_flush (ep);
625         if (prevent) {
626                 writel ((1 << CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
627                 (void) readl (&ep->regs->ep_rsp);
628         }
629
630         return is_short || ((req->req.actual == req->req.length)
631                                 && !req->req.zero);
632 }
633
634 /* fill out dma descriptor to match a given request */
635 static void
636 fill_dma_desc (struct net2280_ep *ep, struct net2280_request *req, int valid)
637 {
638         struct net2280_dma      *td = req->td;
639         u32                     dmacount = req->req.length;
640
641         /* don't let DMA continue after a short OUT packet,
642          * so overruns can't affect the next transfer.
643          * in case of overruns on max-size packets, we can't
644          * stop the fifo from filling but we can flush it.
645          */
646         if (ep->is_in)
647                 dmacount |= (1 << DMA_DIRECTION);
648         if ((!ep->is_in && (dmacount % ep->ep.maxpacket) != 0)
649                         || ep->dev->pdev->device != 0x2280)
650                 dmacount |= (1 << END_OF_CHAIN);
651
652         req->valid = valid;
653         if (valid)
654                 dmacount |= (1 << VALID_BIT);
655         if (likely(!req->req.no_interrupt || !use_dma_chaining))
656                 dmacount |= (1 << DMA_DONE_INTERRUPT_ENABLE);
657
658         /* td->dmadesc = previously set by caller */
659         td->dmaaddr = cpu_to_le32 (req->req.dma);
660
661         /* 2280 may be polling VALID_BIT through ep->dma->dmadesc */
662         wmb ();
663         td->dmacount = cpu_to_le32(dmacount);
664 }
665
666 static const u32 dmactl_default =
667                   (1 << DMA_SCATTER_GATHER_DONE_INTERRUPT)
668                 | (1 << DMA_CLEAR_COUNT_ENABLE)
669                 /* erratum 0116 workaround part 1 (use POLLING) */
670                 | (POLL_100_USEC << DESCRIPTOR_POLLING_RATE)
671                 | (1 << DMA_VALID_BIT_POLLING_ENABLE)
672                 | (1 << DMA_VALID_BIT_ENABLE)
673                 | (1 << DMA_SCATTER_GATHER_ENABLE)
674                 /* erratum 0116 workaround part 2 (no AUTOSTART) */
675                 | (1 << DMA_ENABLE);
676
677 static inline void spin_stop_dma (struct net2280_dma_regs __iomem *dma)
678 {
679         handshake (&dma->dmactl, (1 << DMA_ENABLE), 0, 50);
680 }
681
682 static inline void stop_dma (struct net2280_dma_regs __iomem *dma)
683 {
684         writel (readl (&dma->dmactl) & ~(1 << DMA_ENABLE), &dma->dmactl);
685         spin_stop_dma (dma);
686 }
687
688 static void start_queue (struct net2280_ep *ep, u32 dmactl, u32 td_dma)
689 {
690         struct net2280_dma_regs __iomem *dma = ep->dma;
691         unsigned int tmp = (1 << VALID_BIT) | (ep->is_in << DMA_DIRECTION);
692
693         if (ep->dev->pdev->device != 0x2280)
694                 tmp |= (1 << END_OF_CHAIN);
695
696         writel (tmp, &dma->dmacount);
697         writel (readl (&dma->dmastat), &dma->dmastat);
698
699         writel (td_dma, &dma->dmadesc);
700         writel (dmactl, &dma->dmactl);
701
702         /* erratum 0116 workaround part 3:  pci arbiter away from net2280 */
703         (void) readl (&ep->dev->pci->pcimstctl);
704
705         writel ((1 << DMA_START), &dma->dmastat);
706
707         if (!ep->is_in)
708                 stop_out_naking (ep);
709 }
710
711 static void start_dma (struct net2280_ep *ep, struct net2280_request *req)
712 {
713         u32                     tmp;
714         struct net2280_dma_regs __iomem *dma = ep->dma;
715
716         /* FIXME can't use DMA for ZLPs */
717
718         /* on this path we "know" there's no dma active (yet) */
719         WARN_ON (readl (&dma->dmactl) & (1 << DMA_ENABLE));
720         writel (0, &ep->dma->dmactl);
721
722         /* previous OUT packet might have been short */
723         if (!ep->is_in && ((tmp = readl (&ep->regs->ep_stat))
724                                 & (1 << NAK_OUT_PACKETS)) != 0) {
725                 writel ((1 << SHORT_PACKET_TRANSFERRED_INTERRUPT),
726                         &ep->regs->ep_stat);
727
728                 tmp = readl (&ep->regs->ep_avail);
729                 if (tmp) {
730                         writel (readl (&dma->dmastat), &dma->dmastat);
731
732                         /* transfer all/some fifo data */
733                         writel (req->req.dma, &dma->dmaaddr);
734                         tmp = min (tmp, req->req.length);
735
736                         /* dma irq, faking scatterlist status */
737                         req->td->dmacount = cpu_to_le32 (req->req.length - tmp);
738                         writel ((1 << DMA_DONE_INTERRUPT_ENABLE)
739                                 | tmp, &dma->dmacount);
740                         req->td->dmadesc = 0;
741                         req->valid = 1;
742
743                         writel ((1 << DMA_ENABLE), &dma->dmactl);
744                         writel ((1 << DMA_START), &dma->dmastat);
745                         return;
746                 }
747         }
748
749         tmp = dmactl_default;
750
751         /* force packet boundaries between dma requests, but prevent the
752          * controller from automagically writing a last "short" packet
753          * (zero length) unless the driver explicitly said to do that.
754          */
755         if (ep->is_in) {
756                 if (likely ((req->req.length % ep->ep.maxpacket) != 0
757                                 || req->req.zero)) {
758                         tmp |= (1 << DMA_FIFO_VALIDATE);
759                         ep->in_fifo_validate = 1;
760                 } else
761                         ep->in_fifo_validate = 0;
762         }
763
764         /* init req->td, pointing to the current dummy */
765         req->td->dmadesc = cpu_to_le32 (ep->td_dma);
766         fill_dma_desc (ep, req, 1);
767
768         if (!use_dma_chaining)
769                 req->td->dmacount |= cpu_to_le32 (1 << END_OF_CHAIN);
770
771         start_queue (ep, tmp, req->td_dma);
772 }
773
774 static inline void
775 queue_dma (struct net2280_ep *ep, struct net2280_request *req, int valid)
776 {
777         struct net2280_dma      *end;
778         dma_addr_t              tmp;
779
780         /* swap new dummy for old, link; fill and maybe activate */
781         end = ep->dummy;
782         ep->dummy = req->td;
783         req->td = end;
784
785         tmp = ep->td_dma;
786         ep->td_dma = req->td_dma;
787         req->td_dma = tmp;
788
789         end->dmadesc = cpu_to_le32 (ep->td_dma);
790
791         fill_dma_desc (ep, req, valid);
792 }
793
794 static void
795 done (struct net2280_ep *ep, struct net2280_request *req, int status)
796 {
797         struct net2280          *dev;
798         unsigned                stopped = ep->stopped;
799
800         list_del_init (&req->queue);
801
802         if (req->req.status == -EINPROGRESS)
803                 req->req.status = status;
804         else
805                 status = req->req.status;
806
807         dev = ep->dev;
808         if (ep->dma)
809                 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
810
811         if (status && status != -ESHUTDOWN)
812                 VDEBUG (dev, "complete %s req %p stat %d len %u/%u\n",
813                         ep->ep.name, &req->req, status,
814                         req->req.actual, req->req.length);
815
816         /* don't modify queue heads during completion callback */
817         ep->stopped = 1;
818         spin_unlock (&dev->lock);
819         req->req.complete (&ep->ep, &req->req);
820         spin_lock (&dev->lock);
821         ep->stopped = stopped;
822 }
823
824 /*-------------------------------------------------------------------------*/
825
826 static int
827 net2280_queue (struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
828 {
829         struct net2280_request  *req;
830         struct net2280_ep       *ep;
831         struct net2280          *dev;
832         unsigned long           flags;
833
834         /* we always require a cpu-view buffer, so that we can
835          * always use pio (as fallback or whatever).
836          */
837         req = container_of (_req, struct net2280_request, req);
838         if (!_req || !_req->complete || !_req->buf
839                         || !list_empty (&req->queue))
840                 return -EINVAL;
841         if (_req->length > (~0 & DMA_BYTE_COUNT_MASK))
842                 return -EDOM;
843         ep = container_of (_ep, struct net2280_ep, ep);
844         if (!_ep || (!ep->desc && ep->num != 0))
845                 return -EINVAL;
846         dev = ep->dev;
847         if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
848                 return -ESHUTDOWN;
849
850         /* FIXME implement PIO fallback for ZLPs with DMA */
851         if (ep->dma && _req->length == 0)
852                 return -EOPNOTSUPP;
853
854         /* set up dma mapping in case the caller didn't */
855         if (ep->dma) {
856                 int ret;
857
858                 ret = usb_gadget_map_request(&dev->gadget, _req,
859                                 ep->is_in);
860                 if (ret)
861                         return ret;
862         }
863
864 #if 0
865         VDEBUG (dev, "%s queue req %p, len %d buf %p\n",
866                         _ep->name, _req, _req->length, _req->buf);
867 #endif
868
869         spin_lock_irqsave (&dev->lock, flags);
870
871         _req->status = -EINPROGRESS;
872         _req->actual = 0;
873
874         /* kickstart this i/o queue? */
875         if (list_empty (&ep->queue) && !ep->stopped) {
876                 /* use DMA if the endpoint supports it, else pio */
877                 if (ep->dma)
878                         start_dma (ep, req);
879                 else {
880                         /* maybe there's no control data, just status ack */
881                         if (ep->num == 0 && _req->length == 0) {
882                                 allow_status (ep);
883                                 done (ep, req, 0);
884                                 VDEBUG (dev, "%s status ack\n", ep->ep.name);
885                                 goto done;
886                         }
887
888                         /* PIO ... stuff the fifo, or unblock it.  */
889                         if (ep->is_in)
890                                 write_fifo (ep, _req);
891                         else if (list_empty (&ep->queue)) {
892                                 u32     s;
893
894                                 /* OUT FIFO might have packet(s) buffered */
895                                 s = readl (&ep->regs->ep_stat);
896                                 if ((s & (1 << FIFO_EMPTY)) == 0) {
897                                         /* note:  _req->short_not_ok is
898                                          * ignored here since PIO _always_
899                                          * stops queue advance here, and
900                                          * _req->status doesn't change for
901                                          * short reads (only _req->actual)
902                                          */
903                                         if (read_fifo (ep, req)) {
904                                                 done (ep, req, 0);
905                                                 if (ep->num == 0)
906                                                         allow_status (ep);
907                                                 /* don't queue it */
908                                                 req = NULL;
909                                         } else
910                                                 s = readl (&ep->regs->ep_stat);
911                                 }
912
913                                 /* don't NAK, let the fifo fill */
914                                 if (req && (s & (1 << NAK_OUT_PACKETS)))
915                                         writel ((1 << CLEAR_NAK_OUT_PACKETS),
916                                                         &ep->regs->ep_rsp);
917                         }
918                 }
919
920         } else if (ep->dma) {
921                 int     valid = 1;
922
923                 if (ep->is_in) {
924                         int     expect;
925
926                         /* preventing magic zlps is per-engine state, not
927                          * per-transfer; irq logic must recover hiccups.
928                          */
929                         expect = likely (req->req.zero
930                                 || (req->req.length % ep->ep.maxpacket) != 0);
931                         if (expect != ep->in_fifo_validate)
932                                 valid = 0;
933                 }
934                 queue_dma (ep, req, valid);
935
936         } /* else the irq handler advances the queue. */
937
938         ep->responded = 1;
939         if (req)
940                 list_add_tail (&req->queue, &ep->queue);
941 done:
942         spin_unlock_irqrestore (&dev->lock, flags);
943
944         /* pci writes may still be posted */
945         return 0;
946 }
947
948 static inline void
949 dma_done (
950         struct net2280_ep *ep,
951         struct net2280_request *req,
952         u32 dmacount,
953         int status
954 )
955 {
956         req->req.actual = req->req.length - (DMA_BYTE_COUNT_MASK & dmacount);
957         done (ep, req, status);
958 }
959
960 static void restart_dma (struct net2280_ep *ep);
961
962 static void scan_dma_completions (struct net2280_ep *ep)
963 {
964         /* only look at descriptors that were "naturally" retired,
965          * so fifo and list head state won't matter
966          */
967         while (!list_empty (&ep->queue)) {
968                 struct net2280_request  *req;
969                 u32                     tmp;
970
971                 req = list_entry (ep->queue.next,
972                                 struct net2280_request, queue);
973                 if (!req->valid)
974                         break;
975                 rmb ();
976                 tmp = le32_to_cpup (&req->td->dmacount);
977                 if ((tmp & (1 << VALID_BIT)) != 0)
978                         break;
979
980                 /* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short"
981                  * cases where DMA must be aborted; this code handles
982                  * all non-abort DMA completions.
983                  */
984                 if (unlikely (req->td->dmadesc == 0)) {
985                         /* paranoia */
986                         tmp = readl (&ep->dma->dmacount);
987                         if (tmp & DMA_BYTE_COUNT_MASK)
988                                 break;
989                         /* single transfer mode */
990                         dma_done (ep, req, tmp, 0);
991                         break;
992                 } else if (!ep->is_in
993                                 && (req->req.length % ep->ep.maxpacket) != 0) {
994                         tmp = readl (&ep->regs->ep_stat);
995
996                         /* AVOID TROUBLE HERE by not issuing short reads from
997                          * your gadget driver.  That helps avoids errata 0121,
998                          * 0122, and 0124; not all cases trigger the warning.
999                          */
1000                         if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
1001                                 WARNING (ep->dev, "%s lost packet sync!\n",
1002                                                 ep->ep.name);
1003                                 req->req.status = -EOVERFLOW;
1004                         } else if ((tmp = readl (&ep->regs->ep_avail)) != 0) {
1005                                 /* fifo gets flushed later */
1006                                 ep->out_overflow = 1;
1007                                 DEBUG (ep->dev, "%s dma, discard %d len %d\n",
1008                                                 ep->ep.name, tmp,
1009                                                 req->req.length);
1010                                 req->req.status = -EOVERFLOW;
1011                         }
1012                 }
1013                 dma_done (ep, req, tmp, 0);
1014         }
1015 }
1016
1017 static void restart_dma (struct net2280_ep *ep)
1018 {
1019         struct net2280_request  *req;
1020         u32                     dmactl = dmactl_default;
1021
1022         if (ep->stopped)
1023                 return;
1024         req = list_entry (ep->queue.next, struct net2280_request, queue);
1025
1026         if (!use_dma_chaining) {
1027                 start_dma (ep, req);
1028                 return;
1029         }
1030
1031         /* the 2280 will be processing the queue unless queue hiccups after
1032          * the previous transfer:
1033          *  IN:   wanted automagic zlp, head doesn't (or vice versa)
1034          *        DMA_FIFO_VALIDATE doesn't init from dma descriptors.
1035          *  OUT:  was "usb-short", we must restart.
1036          */
1037         if (ep->is_in && !req->valid) {
1038                 struct net2280_request  *entry, *prev = NULL;
1039                 int                     reqmode, done = 0;
1040
1041                 DEBUG (ep->dev, "%s dma hiccup td %p\n", ep->ep.name, req->td);
1042                 ep->in_fifo_validate = likely (req->req.zero
1043                         || (req->req.length % ep->ep.maxpacket) != 0);
1044                 if (ep->in_fifo_validate)
1045                         dmactl |= (1 << DMA_FIFO_VALIDATE);
1046                 list_for_each_entry (entry, &ep->queue, queue) {
1047                         __le32          dmacount;
1048
1049                         if (entry == req)
1050                                 continue;
1051                         dmacount = entry->td->dmacount;
1052                         if (!done) {
1053                                 reqmode = likely (entry->req.zero
1054                                         || (entry->req.length
1055                                                 % ep->ep.maxpacket) != 0);
1056                                 if (reqmode == ep->in_fifo_validate) {
1057                                         entry->valid = 1;
1058                                         dmacount |= valid_bit;
1059                                         entry->td->dmacount = dmacount;
1060                                         prev = entry;
1061                                         continue;
1062                                 } else {
1063                                         /* force a hiccup */
1064                                         prev->td->dmacount |= dma_done_ie;
1065                                         done = 1;
1066                                 }
1067                         }
1068
1069                         /* walk the rest of the queue so unlinks behave */
1070                         entry->valid = 0;
1071                         dmacount &= ~valid_bit;
1072                         entry->td->dmacount = dmacount;
1073                         prev = entry;
1074                 }
1075         }
1076
1077         writel (0, &ep->dma->dmactl);
1078         start_queue (ep, dmactl, req->td_dma);
1079 }
1080
1081 static void abort_dma (struct net2280_ep *ep)
1082 {
1083         /* abort the current transfer */
1084         if (likely (!list_empty (&ep->queue))) {
1085                 /* FIXME work around errata 0121, 0122, 0124 */
1086                 writel ((1 << DMA_ABORT), &ep->dma->dmastat);
1087                 spin_stop_dma (ep->dma);
1088         } else
1089                 stop_dma (ep->dma);
1090         scan_dma_completions (ep);
1091 }
1092
1093 /* dequeue ALL requests */
1094 static void nuke (struct net2280_ep *ep)
1095 {
1096         struct net2280_request  *req;
1097
1098         /* called with spinlock held */
1099         ep->stopped = 1;
1100         if (ep->dma)
1101                 abort_dma (ep);
1102         while (!list_empty (&ep->queue)) {
1103                 req = list_entry (ep->queue.next,
1104                                 struct net2280_request,
1105                                 queue);
1106                 done (ep, req, -ESHUTDOWN);
1107         }
1108 }
1109
1110 /* dequeue JUST ONE request */
1111 static int net2280_dequeue (struct usb_ep *_ep, struct usb_request *_req)
1112 {
1113         struct net2280_ep       *ep;
1114         struct net2280_request  *req;
1115         unsigned long           flags;
1116         u32                     dmactl;
1117         int                     stopped;
1118
1119         ep = container_of (_ep, struct net2280_ep, ep);
1120         if (!_ep || (!ep->desc && ep->num != 0) || !_req)
1121                 return -EINVAL;
1122
1123         spin_lock_irqsave (&ep->dev->lock, flags);
1124         stopped = ep->stopped;
1125
1126         /* quiesce dma while we patch the queue */
1127         dmactl = 0;
1128         ep->stopped = 1;
1129         if (ep->dma) {
1130                 dmactl = readl (&ep->dma->dmactl);
1131                 /* WARNING erratum 0127 may kick in ... */
1132                 stop_dma (ep->dma);
1133                 scan_dma_completions (ep);
1134         }
1135
1136         /* make sure it's still queued on this endpoint */
1137         list_for_each_entry (req, &ep->queue, queue) {
1138                 if (&req->req == _req)
1139                         break;
1140         }
1141         if (&req->req != _req) {
1142                 spin_unlock_irqrestore (&ep->dev->lock, flags);
1143                 return -EINVAL;
1144         }
1145
1146         /* queue head may be partially complete. */
1147         if (ep->queue.next == &req->queue) {
1148                 if (ep->dma) {
1149                         DEBUG (ep->dev, "unlink (%s) dma\n", _ep->name);
1150                         _req->status = -ECONNRESET;
1151                         abort_dma (ep);
1152                         if (likely (ep->queue.next == &req->queue)) {
1153                                 // NOTE: misreports single-transfer mode
1154                                 req->td->dmacount = 0;  /* invalidate */
1155                                 dma_done (ep, req,
1156                                         readl (&ep->dma->dmacount),
1157                                         -ECONNRESET);
1158                         }
1159                 } else {
1160                         DEBUG (ep->dev, "unlink (%s) pio\n", _ep->name);
1161                         done (ep, req, -ECONNRESET);
1162                 }
1163                 req = NULL;
1164
1165         /* patch up hardware chaining data */
1166         } else if (ep->dma && use_dma_chaining) {
1167                 if (req->queue.prev == ep->queue.next) {
1168                         writel (le32_to_cpu (req->td->dmadesc),
1169                                 &ep->dma->dmadesc);
1170                         if (req->td->dmacount & dma_done_ie)
1171                                 writel (readl (&ep->dma->dmacount)
1172                                                 | le32_to_cpu(dma_done_ie),
1173                                         &ep->dma->dmacount);
1174                 } else {
1175                         struct net2280_request  *prev;
1176
1177                         prev = list_entry (req->queue.prev,
1178                                 struct net2280_request, queue);
1179                         prev->td->dmadesc = req->td->dmadesc;
1180                         if (req->td->dmacount & dma_done_ie)
1181                                 prev->td->dmacount |= dma_done_ie;
1182                 }
1183         }
1184
1185         if (req)
1186                 done (ep, req, -ECONNRESET);
1187         ep->stopped = stopped;
1188
1189         if (ep->dma) {
1190                 /* turn off dma on inactive queues */
1191                 if (list_empty (&ep->queue))
1192                         stop_dma (ep->dma);
1193                 else if (!ep->stopped) {
1194                         /* resume current request, or start new one */
1195                         if (req)
1196                                 writel (dmactl, &ep->dma->dmactl);
1197                         else
1198                                 start_dma (ep, list_entry (ep->queue.next,
1199                                         struct net2280_request, queue));
1200                 }
1201         }
1202
1203         spin_unlock_irqrestore (&ep->dev->lock, flags);
1204         return 0;
1205 }
1206
1207 /*-------------------------------------------------------------------------*/
1208
1209 static int net2280_fifo_status (struct usb_ep *_ep);
1210
1211 static int
1212 net2280_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
1213 {
1214         struct net2280_ep       *ep;
1215         unsigned long           flags;
1216         int                     retval = 0;
1217
1218         ep = container_of (_ep, struct net2280_ep, ep);
1219         if (!_ep || (!ep->desc && ep->num != 0))
1220                 return -EINVAL;
1221         if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1222                 return -ESHUTDOWN;
1223         if (ep->desc /* not ep0 */ && (ep->desc->bmAttributes & 0x03)
1224                                                 == USB_ENDPOINT_XFER_ISOC)
1225                 return -EINVAL;
1226
1227         spin_lock_irqsave (&ep->dev->lock, flags);
1228         if (!list_empty (&ep->queue))
1229                 retval = -EAGAIN;
1230         else if (ep->is_in && value && net2280_fifo_status (_ep) != 0)
1231                 retval = -EAGAIN;
1232         else {
1233                 VDEBUG (ep->dev, "%s %s %s\n", _ep->name,
1234                                 value ? "set" : "clear",
1235                                 wedged ? "wedge" : "halt");
1236                 /* set/clear, then synch memory views with the device */
1237                 if (value) {
1238                         if (ep->num == 0)
1239                                 ep->dev->protocol_stall = 1;
1240                         else
1241                                 set_halt (ep);
1242                         if (wedged)
1243                                 ep->wedged = 1;
1244                 } else {
1245                         clear_halt (ep);
1246                         ep->wedged = 0;
1247                 }
1248                 (void) readl (&ep->regs->ep_rsp);
1249         }
1250         spin_unlock_irqrestore (&ep->dev->lock, flags);
1251
1252         return retval;
1253 }
1254
1255 static int
1256 net2280_set_halt(struct usb_ep *_ep, int value)
1257 {
1258         return net2280_set_halt_and_wedge(_ep, value, 0);
1259 }
1260
1261 static int
1262 net2280_set_wedge(struct usb_ep *_ep)
1263 {
1264         if (!_ep || _ep->name == ep0name)
1265                 return -EINVAL;
1266         return net2280_set_halt_and_wedge(_ep, 1, 1);
1267 }
1268
1269 static int
1270 net2280_fifo_status (struct usb_ep *_ep)
1271 {
1272         struct net2280_ep       *ep;
1273         u32                     avail;
1274
1275         ep = container_of (_ep, struct net2280_ep, ep);
1276         if (!_ep || (!ep->desc && ep->num != 0))
1277                 return -ENODEV;
1278         if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1279                 return -ESHUTDOWN;
1280
1281         avail = readl (&ep->regs->ep_avail) & ((1 << 12) - 1);
1282         if (avail > ep->fifo_size)
1283                 return -EOVERFLOW;
1284         if (ep->is_in)
1285                 avail = ep->fifo_size - avail;
1286         return avail;
1287 }
1288
1289 static void
1290 net2280_fifo_flush (struct usb_ep *_ep)
1291 {
1292         struct net2280_ep       *ep;
1293
1294         ep = container_of (_ep, struct net2280_ep, ep);
1295         if (!_ep || (!ep->desc && ep->num != 0))
1296                 return;
1297         if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1298                 return;
1299
1300         writel ((1 << FIFO_FLUSH), &ep->regs->ep_stat);
1301         (void) readl (&ep->regs->ep_rsp);
1302 }
1303
1304 static const struct usb_ep_ops net2280_ep_ops = {
1305         .enable         = net2280_enable,
1306         .disable        = net2280_disable,
1307
1308         .alloc_request  = net2280_alloc_request,
1309         .free_request   = net2280_free_request,
1310
1311         .queue          = net2280_queue,
1312         .dequeue        = net2280_dequeue,
1313
1314         .set_halt       = net2280_set_halt,
1315         .set_wedge      = net2280_set_wedge,
1316         .fifo_status    = net2280_fifo_status,
1317         .fifo_flush     = net2280_fifo_flush,
1318 };
1319
1320 /*-------------------------------------------------------------------------*/
1321
1322 static int net2280_get_frame (struct usb_gadget *_gadget)
1323 {
1324         struct net2280          *dev;
1325         unsigned long           flags;
1326         u16                     retval;
1327
1328         if (!_gadget)
1329                 return -ENODEV;
1330         dev = container_of (_gadget, struct net2280, gadget);
1331         spin_lock_irqsave (&dev->lock, flags);
1332         retval = get_idx_reg (dev->regs, REG_FRAME) & 0x03ff;
1333         spin_unlock_irqrestore (&dev->lock, flags);
1334         return retval;
1335 }
1336
1337 static int net2280_wakeup (struct usb_gadget *_gadget)
1338 {
1339         struct net2280          *dev;
1340         u32                     tmp;
1341         unsigned long           flags;
1342
1343         if (!_gadget)
1344                 return 0;
1345         dev = container_of (_gadget, struct net2280, gadget);
1346
1347         spin_lock_irqsave (&dev->lock, flags);
1348         tmp = readl (&dev->usb->usbctl);
1349         if (tmp & (1 << DEVICE_REMOTE_WAKEUP_ENABLE))
1350                 writel (1 << GENERATE_RESUME, &dev->usb->usbstat);
1351         spin_unlock_irqrestore (&dev->lock, flags);
1352
1353         /* pci writes may still be posted */
1354         return 0;
1355 }
1356
1357 static int net2280_set_selfpowered (struct usb_gadget *_gadget, int value)
1358 {
1359         struct net2280          *dev;
1360         u32                     tmp;
1361         unsigned long           flags;
1362
1363         if (!_gadget)
1364                 return 0;
1365         dev = container_of (_gadget, struct net2280, gadget);
1366
1367         spin_lock_irqsave (&dev->lock, flags);
1368         tmp = readl (&dev->usb->usbctl);
1369         if (value)
1370                 tmp |= (1 << SELF_POWERED_STATUS);
1371         else
1372                 tmp &= ~(1 << SELF_POWERED_STATUS);
1373         writel (tmp, &dev->usb->usbctl);
1374         spin_unlock_irqrestore (&dev->lock, flags);
1375
1376         return 0;
1377 }
1378
1379 static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
1380 {
1381         struct net2280  *dev;
1382         u32             tmp;
1383         unsigned long   flags;
1384
1385         if (!_gadget)
1386                 return -ENODEV;
1387         dev = container_of (_gadget, struct net2280, gadget);
1388
1389         spin_lock_irqsave (&dev->lock, flags);
1390         tmp = readl (&dev->usb->usbctl);
1391         dev->softconnect = (is_on != 0);
1392         if (is_on)
1393                 tmp |= (1 << USB_DETECT_ENABLE);
1394         else
1395                 tmp &= ~(1 << USB_DETECT_ENABLE);
1396         writel (tmp, &dev->usb->usbctl);
1397         spin_unlock_irqrestore (&dev->lock, flags);
1398
1399         return 0;
1400 }
1401
1402 static int net2280_start(struct usb_gadget *_gadget,
1403                 struct usb_gadget_driver *driver);
1404 static int net2280_stop(struct usb_gadget *_gadget,
1405                 struct usb_gadget_driver *driver);
1406
1407 static const struct usb_gadget_ops net2280_ops = {
1408         .get_frame      = net2280_get_frame,
1409         .wakeup         = net2280_wakeup,
1410         .set_selfpowered = net2280_set_selfpowered,
1411         .pullup         = net2280_pullup,
1412         .udc_start      = net2280_start,
1413         .udc_stop       = net2280_stop,
1414 };
1415
1416 /*-------------------------------------------------------------------------*/
1417
1418 #ifdef  CONFIG_USB_GADGET_DEBUG_FILES
1419
1420 /* FIXME move these into procfs, and use seq_file.
1421  * Sysfs _still_ doesn't behave for arbitrarily sized files,
1422  * and also doesn't help products using this with 2.4 kernels.
1423  */
1424
1425 /* "function" sysfs attribute */
1426 static ssize_t
1427 show_function (struct device *_dev, struct device_attribute *attr, char *buf)
1428 {
1429         struct net2280  *dev = dev_get_drvdata (_dev);
1430
1431         if (!dev->driver
1432                         || !dev->driver->function
1433                         || strlen (dev->driver->function) > PAGE_SIZE)
1434                 return 0;
1435         return scnprintf (buf, PAGE_SIZE, "%s\n", dev->driver->function);
1436 }
1437 static DEVICE_ATTR (function, S_IRUGO, show_function, NULL);
1438
1439 static ssize_t net2280_show_registers(struct device *_dev,
1440                                 struct device_attribute *attr, char *buf)
1441 {
1442         struct net2280          *dev;
1443         char                    *next;
1444         unsigned                size, t;
1445         unsigned long           flags;
1446         int                     i;
1447         u32                     t1, t2;
1448         const char              *s;
1449
1450         dev = dev_get_drvdata (_dev);
1451         next = buf;
1452         size = PAGE_SIZE;
1453         spin_lock_irqsave (&dev->lock, flags);
1454
1455         if (dev->driver)
1456                 s = dev->driver->driver.name;
1457         else
1458                 s = "(none)";
1459
1460         /* Main Control Registers */
1461         t = scnprintf (next, size, "%s version " DRIVER_VERSION
1462                         ", chiprev %04x, dma %s\n\n"
1463                         "devinit %03x fifoctl %08x gadget '%s'\n"
1464                         "pci irqenb0 %02x irqenb1 %08x "
1465                         "irqstat0 %04x irqstat1 %08x\n",
1466                         driver_name, dev->chiprev,
1467                         use_dma
1468                                 ? (use_dma_chaining ? "chaining" : "enabled")
1469                                 : "disabled",
1470                         readl (&dev->regs->devinit),
1471                         readl (&dev->regs->fifoctl),
1472                         s,
1473                         readl (&dev->regs->pciirqenb0),
1474                         readl (&dev->regs->pciirqenb1),
1475                         readl (&dev->regs->irqstat0),
1476                         readl (&dev->regs->irqstat1));
1477         size -= t;
1478         next += t;
1479
1480         /* USB Control Registers */
1481         t1 = readl (&dev->usb->usbctl);
1482         t2 = readl (&dev->usb->usbstat);
1483         if (t1 & (1 << VBUS_PIN)) {
1484                 if (t2 & (1 << HIGH_SPEED))
1485                         s = "high speed";
1486                 else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1487                         s = "powered";
1488                 else
1489                         s = "full speed";
1490                 /* full speed bit (6) not working?? */
1491         } else
1492                         s = "not attached";
1493         t = scnprintf (next, size,
1494                         "stdrsp %08x usbctl %08x usbstat %08x "
1495                                 "addr 0x%02x (%s)\n",
1496                         readl (&dev->usb->stdrsp), t1, t2,
1497                         readl (&dev->usb->ouraddr), s);
1498         size -= t;
1499         next += t;
1500
1501         /* PCI Master Control Registers */
1502
1503         /* DMA Control Registers */
1504
1505         /* Configurable EP Control Registers */
1506         for (i = 0; i < 7; i++) {
1507                 struct net2280_ep       *ep;
1508
1509                 ep = &dev->ep [i];
1510                 if (i && !ep->desc)
1511                         continue;
1512
1513                 t1 = readl (&ep->regs->ep_cfg);
1514                 t2 = readl (&ep->regs->ep_rsp) & 0xff;
1515                 t = scnprintf (next, size,
1516                                 "\n%s\tcfg %05x rsp (%02x) %s%s%s%s%s%s%s%s"
1517                                         "irqenb %02x\n",
1518                                 ep->ep.name, t1, t2,
1519                                 (t2 & (1 << CLEAR_NAK_OUT_PACKETS))
1520                                         ? "NAK " : "",
1521                                 (t2 & (1 << CLEAR_EP_HIDE_STATUS_PHASE))
1522                                         ? "hide " : "",
1523                                 (t2 & (1 << CLEAR_EP_FORCE_CRC_ERROR))
1524                                         ? "CRC " : "",
1525                                 (t2 & (1 << CLEAR_INTERRUPT_MODE))
1526                                         ? "interrupt " : "",
1527                                 (t2 & (1<<CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE))
1528                                         ? "status " : "",
1529                                 (t2 & (1 << CLEAR_NAK_OUT_PACKETS_MODE))
1530                                         ? "NAKmode " : "",
1531                                 (t2 & (1 << CLEAR_ENDPOINT_TOGGLE))
1532                                         ? "DATA1 " : "DATA0 ",
1533                                 (t2 & (1 << CLEAR_ENDPOINT_HALT))
1534                                         ? "HALT " : "",
1535                                 readl (&ep->regs->ep_irqenb));
1536                 size -= t;
1537                 next += t;
1538
1539                 t = scnprintf (next, size,
1540                                 "\tstat %08x avail %04x "
1541                                 "(ep%d%s-%s)%s\n",
1542                                 readl (&ep->regs->ep_stat),
1543                                 readl (&ep->regs->ep_avail),
1544                                 t1 & 0x0f, DIR_STRING (t1),
1545                                 type_string (t1 >> 8),
1546                                 ep->stopped ? "*" : "");
1547                 size -= t;
1548                 next += t;
1549
1550                 if (!ep->dma)
1551                         continue;
1552
1553                 t = scnprintf (next, size,
1554                                 "  dma\tctl %08x stat %08x count %08x\n"
1555                                 "\taddr %08x desc %08x\n",
1556                                 readl (&ep->dma->dmactl),
1557                                 readl (&ep->dma->dmastat),
1558                                 readl (&ep->dma->dmacount),
1559                                 readl (&ep->dma->dmaaddr),
1560                                 readl (&ep->dma->dmadesc));
1561                 size -= t;
1562                 next += t;
1563
1564         }
1565
1566         /* Indexed Registers */
1567                 // none yet
1568
1569         /* Statistics */
1570         t = scnprintf (next, size, "\nirqs:  ");
1571         size -= t;
1572         next += t;
1573         for (i = 0; i < 7; i++) {
1574                 struct net2280_ep       *ep;
1575
1576                 ep = &dev->ep [i];
1577                 if (i && !ep->irqs)
1578                         continue;
1579                 t = scnprintf (next, size, " %s/%lu", ep->ep.name, ep->irqs);
1580                 size -= t;
1581                 next += t;
1582
1583         }
1584         t = scnprintf (next, size, "\n");
1585         size -= t;
1586         next += t;
1587
1588         spin_unlock_irqrestore (&dev->lock, flags);
1589
1590         return PAGE_SIZE - size;
1591 }
1592 static DEVICE_ATTR(registers, S_IRUGO, net2280_show_registers, NULL);
1593
1594 static ssize_t
1595 show_queues (struct device *_dev, struct device_attribute *attr, char *buf)
1596 {
1597         struct net2280          *dev;
1598         char                    *next;
1599         unsigned                size;
1600         unsigned long           flags;
1601         int                     i;
1602
1603         dev = dev_get_drvdata (_dev);
1604         next = buf;
1605         size = PAGE_SIZE;
1606         spin_lock_irqsave (&dev->lock, flags);
1607
1608         for (i = 0; i < 7; i++) {
1609                 struct net2280_ep               *ep = &dev->ep [i];
1610                 struct net2280_request          *req;
1611                 int                             t;
1612
1613                 if (i != 0) {
1614                         const struct usb_endpoint_descriptor    *d;
1615
1616                         d = ep->desc;
1617                         if (!d)
1618                                 continue;
1619                         t = d->bEndpointAddress;
1620                         t = scnprintf (next, size,
1621                                 "\n%s (ep%d%s-%s) max %04x %s fifo %d\n",
1622                                 ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK,
1623                                 (t & USB_DIR_IN) ? "in" : "out",
1624                                 ({ char *val;
1625                                  switch (d->bmAttributes & 0x03) {
1626                                  case USB_ENDPOINT_XFER_BULK:
1627                                         val = "bulk"; break;
1628                                  case USB_ENDPOINT_XFER_INT:
1629                                         val = "intr"; break;
1630                                  default:
1631                                         val = "iso"; break;
1632                                  }; val; }),
1633                                 usb_endpoint_maxp (d) & 0x1fff,
1634                                 ep->dma ? "dma" : "pio", ep->fifo_size
1635                                 );
1636                 } else /* ep0 should only have one transfer queued */
1637                         t = scnprintf (next, size, "ep0 max 64 pio %s\n",
1638                                         ep->is_in ? "in" : "out");
1639                 if (t <= 0 || t > size)
1640                         goto done;
1641                 size -= t;
1642                 next += t;
1643
1644                 if (list_empty (&ep->queue)) {
1645                         t = scnprintf (next, size, "\t(nothing queued)\n");
1646                         if (t <= 0 || t > size)
1647                                 goto done;
1648                         size -= t;
1649                         next += t;
1650                         continue;
1651                 }
1652                 list_for_each_entry (req, &ep->queue, queue) {
1653                         if (ep->dma && req->td_dma == readl (&ep->dma->dmadesc))
1654                                 t = scnprintf (next, size,
1655                                         "\treq %p len %d/%d "
1656                                         "buf %p (dmacount %08x)\n",
1657                                         &req->req, req->req.actual,
1658                                         req->req.length, req->req.buf,
1659                                         readl (&ep->dma->dmacount));
1660                         else
1661                                 t = scnprintf (next, size,
1662                                         "\treq %p len %d/%d buf %p\n",
1663                                         &req->req, req->req.actual,
1664                                         req->req.length, req->req.buf);
1665                         if (t <= 0 || t > size)
1666                                 goto done;
1667                         size -= t;
1668                         next += t;
1669
1670                         if (ep->dma) {
1671                                 struct net2280_dma      *td;
1672
1673                                 td = req->td;
1674                                 t = scnprintf (next, size, "\t    td %08x "
1675                                         " count %08x buf %08x desc %08x\n",
1676                                         (u32) req->td_dma,
1677                                         le32_to_cpu (td->dmacount),
1678                                         le32_to_cpu (td->dmaaddr),
1679                                         le32_to_cpu (td->dmadesc));
1680                                 if (t <= 0 || t > size)
1681                                         goto done;
1682                                 size -= t;
1683                                 next += t;
1684                         }
1685                 }
1686         }
1687
1688 done:
1689         spin_unlock_irqrestore (&dev->lock, flags);
1690         return PAGE_SIZE - size;
1691 }
1692 static DEVICE_ATTR (queues, S_IRUGO, show_queues, NULL);
1693
1694
1695 #else
1696
1697 #define device_create_file(a,b) (0)
1698 #define device_remove_file(a,b) do { } while (0)
1699
1700 #endif
1701
1702 /*-------------------------------------------------------------------------*/
1703
1704 /* another driver-specific mode might be a request type doing dma
1705  * to/from another device fifo instead of to/from memory.
1706  */
1707
1708 static void set_fifo_mode (struct net2280 *dev, int mode)
1709 {
1710         /* keeping high bits preserves BAR2 */
1711         writel ((0xffff << PCI_BASE2_RANGE) | mode, &dev->regs->fifoctl);
1712
1713         /* always ep-{a,b,e,f} ... maybe not ep-c or ep-d */
1714         INIT_LIST_HEAD (&dev->gadget.ep_list);
1715         list_add_tail (&dev->ep [1].ep.ep_list, &dev->gadget.ep_list);
1716         list_add_tail (&dev->ep [2].ep.ep_list, &dev->gadget.ep_list);
1717         switch (mode) {
1718         case 0:
1719                 list_add_tail (&dev->ep [3].ep.ep_list, &dev->gadget.ep_list);
1720                 list_add_tail (&dev->ep [4].ep.ep_list, &dev->gadget.ep_list);
1721                 dev->ep [1].fifo_size = dev->ep [2].fifo_size = 1024;
1722                 break;
1723         case 1:
1724                 dev->ep [1].fifo_size = dev->ep [2].fifo_size = 2048;
1725                 break;
1726         case 2:
1727                 list_add_tail (&dev->ep [3].ep.ep_list, &dev->gadget.ep_list);
1728                 dev->ep [1].fifo_size = 2048;
1729                 dev->ep [2].fifo_size = 1024;
1730                 break;
1731         }
1732         /* fifo sizes for ep0, ep-c, ep-d, ep-e, and ep-f never change */
1733         list_add_tail (&dev->ep [5].ep.ep_list, &dev->gadget.ep_list);
1734         list_add_tail (&dev->ep [6].ep.ep_list, &dev->gadget.ep_list);
1735 }
1736
1737 /* keeping it simple:
1738  * - one bus driver, initted first;
1739  * - one function driver, initted second
1740  *
1741  * most of the work to support multiple net2280 controllers would
1742  * be to associate this gadget driver (yes?) with all of them, or
1743  * perhaps to bind specific drivers to specific devices.
1744  */
1745
1746 static void usb_reset (struct net2280 *dev)
1747 {
1748         u32     tmp;
1749
1750         dev->gadget.speed = USB_SPEED_UNKNOWN;
1751         (void) readl (&dev->usb->usbctl);
1752
1753         net2280_led_init (dev);
1754
1755         /* disable automatic responses, and irqs */
1756         writel (0, &dev->usb->stdrsp);
1757         writel (0, &dev->regs->pciirqenb0);
1758         writel (0, &dev->regs->pciirqenb1);
1759
1760         /* clear old dma and irq state */
1761         for (tmp = 0; tmp < 4; tmp++) {
1762                 struct net2280_ep       *ep = &dev->ep [tmp + 1];
1763
1764                 if (ep->dma)
1765                         abort_dma (ep);
1766         }
1767         writel (~0, &dev->regs->irqstat0),
1768         writel (~(1 << SUSPEND_REQUEST_INTERRUPT), &dev->regs->irqstat1),
1769
1770         /* reset, and enable pci */
1771         tmp = readl (&dev->regs->devinit)
1772                 | (1 << PCI_ENABLE)
1773                 | (1 << FIFO_SOFT_RESET)
1774                 | (1 << USB_SOFT_RESET)
1775                 | (1 << M8051_RESET);
1776         writel (tmp, &dev->regs->devinit);
1777
1778         /* standard fifo and endpoint allocations */
1779         set_fifo_mode (dev, (fifo_mode <= 2) ? fifo_mode : 0);
1780 }
1781
1782 static void usb_reinit (struct net2280 *dev)
1783 {
1784         u32     tmp;
1785         int     init_dma;
1786
1787         /* use_dma changes are ignored till next device re-init */
1788         init_dma = use_dma;
1789
1790         /* basic endpoint init */
1791         for (tmp = 0; tmp < 7; tmp++) {
1792                 struct net2280_ep       *ep = &dev->ep [tmp];
1793
1794                 ep->ep.name = ep_name [tmp];
1795                 ep->dev = dev;
1796                 ep->num = tmp;
1797
1798                 if (tmp > 0 && tmp <= 4) {
1799                         ep->fifo_size = 1024;
1800                         if (init_dma)
1801                                 ep->dma = &dev->dma [tmp - 1];
1802                 } else
1803                         ep->fifo_size = 64;
1804                 ep->regs = &dev->epregs [tmp];
1805                 ep_reset (dev->regs, ep);
1806         }
1807         dev->ep [0].ep.maxpacket = 64;
1808         dev->ep [5].ep.maxpacket = 64;
1809         dev->ep [6].ep.maxpacket = 64;
1810
1811         dev->gadget.ep0 = &dev->ep [0].ep;
1812         dev->ep [0].stopped = 0;
1813         INIT_LIST_HEAD (&dev->gadget.ep0->ep_list);
1814
1815         /* we want to prevent lowlevel/insecure access from the USB host,
1816          * but erratum 0119 means this enable bit is ignored
1817          */
1818         for (tmp = 0; tmp < 5; tmp++)
1819                 writel (EP_DONTUSE, &dev->dep [tmp].dep_cfg);
1820 }
1821
1822 static void ep0_start (struct net2280 *dev)
1823 {
1824         writel (  (1 << CLEAR_EP_HIDE_STATUS_PHASE)
1825                 | (1 << CLEAR_NAK_OUT_PACKETS)
1826                 | (1 << CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE)
1827                 , &dev->epregs [0].ep_rsp);
1828
1829         /*
1830          * hardware optionally handles a bunch of standard requests
1831          * that the API hides from drivers anyway.  have it do so.
1832          * endpoint status/features are handled in software, to
1833          * help pass tests for some dubious behavior.
1834          */
1835         writel (  (1 << SET_TEST_MODE)
1836                 | (1 << SET_ADDRESS)
1837                 | (1 << DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP)
1838                 | (1 << GET_DEVICE_STATUS)
1839                 | (1 << GET_INTERFACE_STATUS)
1840                 , &dev->usb->stdrsp);
1841         writel (  (1 << USB_ROOT_PORT_WAKEUP_ENABLE)
1842                 | (1 << SELF_POWERED_USB_DEVICE)
1843                 | (1 << REMOTE_WAKEUP_SUPPORT)
1844                 | (dev->softconnect << USB_DETECT_ENABLE)
1845                 | (1 << SELF_POWERED_STATUS)
1846                 , &dev->usb->usbctl);
1847
1848         /* enable irqs so we can see ep0 and general operation  */
1849         writel (  (1 << SETUP_PACKET_INTERRUPT_ENABLE)
1850                 | (1 << ENDPOINT_0_INTERRUPT_ENABLE)
1851                 , &dev->regs->pciirqenb0);
1852         writel (  (1 << PCI_INTERRUPT_ENABLE)
1853                 | (1 << PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE)
1854                 | (1 << PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE)
1855                 | (1 << PCI_RETRY_ABORT_INTERRUPT_ENABLE)
1856                 | (1 << VBUS_INTERRUPT_ENABLE)
1857                 | (1 << ROOT_PORT_RESET_INTERRUPT_ENABLE)
1858                 | (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE)
1859                 , &dev->regs->pciirqenb1);
1860
1861         /* don't leave any writes posted */
1862         (void) readl (&dev->usb->usbctl);
1863 }
1864
1865 /* when a driver is successfully registered, it will receive
1866  * control requests including set_configuration(), which enables
1867  * non-control requests.  then usb traffic follows until a
1868  * disconnect is reported.  then a host may connect again, or
1869  * the driver might get unbound.
1870  */
1871 static int net2280_start(struct usb_gadget *_gadget,
1872                 struct usb_gadget_driver *driver)
1873 {
1874         struct net2280          *dev;
1875         int                     retval;
1876         unsigned                i;
1877
1878         /* insist on high speed support from the driver, since
1879          * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
1880          * "must not be used in normal operation"
1881          */
1882         if (!driver || driver->max_speed < USB_SPEED_HIGH
1883                         || !driver->setup)
1884                 return -EINVAL;
1885
1886         dev = container_of (_gadget, struct net2280, gadget);
1887
1888         for (i = 0; i < 7; i++)
1889                 dev->ep [i].irqs = 0;
1890
1891         /* hook up the driver ... */
1892         dev->softconnect = 1;
1893         driver->driver.bus = NULL;
1894         dev->driver = driver;
1895         dev->gadget.dev.driver = &driver->driver;
1896
1897         retval = device_create_file (&dev->pdev->dev, &dev_attr_function);
1898         if (retval) goto err_unbind;
1899         retval = device_create_file (&dev->pdev->dev, &dev_attr_queues);
1900         if (retval) goto err_func;
1901
1902         /* ... then enable host detection and ep0; and we're ready
1903          * for set_configuration as well as eventual disconnect.
1904          */
1905         net2280_led_active (dev, 1);
1906         ep0_start (dev);
1907
1908         DEBUG (dev, "%s ready, usbctl %08x stdrsp %08x\n",
1909                         driver->driver.name,
1910                         readl (&dev->usb->usbctl),
1911                         readl (&dev->usb->stdrsp));
1912
1913         /* pci writes may still be posted */
1914         return 0;
1915
1916 err_func:
1917         device_remove_file (&dev->pdev->dev, &dev_attr_function);
1918 err_unbind:
1919         driver->unbind (&dev->gadget);
1920         dev->gadget.dev.driver = NULL;
1921         dev->driver = NULL;
1922         return retval;
1923 }
1924
1925 static void
1926 stop_activity (struct net2280 *dev, struct usb_gadget_driver *driver)
1927 {
1928         int                     i;
1929
1930         /* don't disconnect if it's not connected */
1931         if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1932                 driver = NULL;
1933
1934         /* stop hardware; prevent new request submissions;
1935          * and kill any outstanding requests.
1936          */
1937         usb_reset (dev);
1938         for (i = 0; i < 7; i++)
1939                 nuke (&dev->ep [i]);
1940
1941         usb_reinit (dev);
1942 }
1943
1944 static int net2280_stop(struct usb_gadget *_gadget,
1945                 struct usb_gadget_driver *driver)
1946 {
1947         struct net2280  *dev;
1948         unsigned long   flags;
1949
1950         dev = container_of (_gadget, struct net2280, gadget);
1951
1952         spin_lock_irqsave (&dev->lock, flags);
1953         stop_activity (dev, driver);
1954         spin_unlock_irqrestore (&dev->lock, flags);
1955
1956         dev->gadget.dev.driver = NULL;
1957         dev->driver = NULL;
1958
1959         net2280_led_active (dev, 0);
1960         device_remove_file (&dev->pdev->dev, &dev_attr_function);
1961         device_remove_file (&dev->pdev->dev, &dev_attr_queues);
1962
1963         DEBUG (dev, "unregistered driver '%s'\n", driver->driver.name);
1964         return 0;
1965 }
1966
1967 /*-------------------------------------------------------------------------*/
1968
1969 /* handle ep0, ep-e, ep-f with 64 byte packets: packet per irq.
1970  * also works for dma-capable endpoints, in pio mode or just
1971  * to manually advance the queue after short OUT transfers.
1972  */
1973 static void handle_ep_small (struct net2280_ep *ep)
1974 {
1975         struct net2280_request  *req;
1976         u32                     t;
1977         /* 0 error, 1 mid-data, 2 done */
1978         int                     mode = 1;
1979
1980         if (!list_empty (&ep->queue))
1981                 req = list_entry (ep->queue.next,
1982                         struct net2280_request, queue);
1983         else
1984                 req = NULL;
1985
1986         /* ack all, and handle what we care about */
1987         t = readl (&ep->regs->ep_stat);
1988         ep->irqs++;
1989 #if 0
1990         VDEBUG (ep->dev, "%s ack ep_stat %08x, req %p\n",
1991                         ep->ep.name, t, req ? &req->req : 0);
1992 #endif
1993         if (!ep->is_in || ep->dev->pdev->device == 0x2280)
1994                 writel (t & ~(1 << NAK_OUT_PACKETS), &ep->regs->ep_stat);
1995         else
1996                 /* Added for 2282 */
1997                 writel (t, &ep->regs->ep_stat);
1998
1999         /* for ep0, monitor token irqs to catch data stage length errors
2000          * and to synchronize on status.
2001          *
2002          * also, to defer reporting of protocol stalls ... here's where
2003          * data or status first appears, handling stalls here should never
2004          * cause trouble on the host side..
2005          *
2006          * control requests could be slightly faster without token synch for
2007          * status, but status can jam up that way.
2008          */
2009         if (unlikely (ep->num == 0)) {
2010                 if (ep->is_in) {
2011                         /* status; stop NAKing */
2012                         if (t & (1 << DATA_OUT_PING_TOKEN_INTERRUPT)) {
2013                                 if (ep->dev->protocol_stall) {
2014                                         ep->stopped = 1;
2015                                         set_halt (ep);
2016                                 }
2017                                 if (!req)
2018                                         allow_status (ep);
2019                                 mode = 2;
2020                         /* reply to extra IN data tokens with a zlp */
2021                         } else if (t & (1 << DATA_IN_TOKEN_INTERRUPT)) {
2022                                 if (ep->dev->protocol_stall) {
2023                                         ep->stopped = 1;
2024                                         set_halt (ep);
2025                                         mode = 2;
2026                                 } else if (ep->responded &&
2027                                                 !req && !ep->stopped)
2028                                         write_fifo (ep, NULL);
2029                         }
2030                 } else {
2031                         /* status; stop NAKing */
2032                         if (t & (1 << DATA_IN_TOKEN_INTERRUPT)) {
2033                                 if (ep->dev->protocol_stall) {
2034                                         ep->stopped = 1;
2035                                         set_halt (ep);
2036                                 }
2037                                 mode = 2;
2038                         /* an extra OUT token is an error */
2039                         } else if (((t & (1 << DATA_OUT_PING_TOKEN_INTERRUPT))
2040                                         && req
2041                                         && req->req.actual == req->req.length)
2042                                         || (ep->responded && !req)) {
2043                                 ep->dev->protocol_stall = 1;
2044                                 set_halt (ep);
2045                                 ep->stopped = 1;
2046                                 if (req)
2047                                         done (ep, req, -EOVERFLOW);
2048                                 req = NULL;
2049                         }
2050                 }
2051         }
2052
2053         if (unlikely (!req))
2054                 return;
2055
2056         /* manual DMA queue advance after short OUT */
2057         if (likely (ep->dma != 0)) {
2058                 if (t & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
2059                         u32     count;
2060                         int     stopped = ep->stopped;
2061
2062                         /* TRANSFERRED works around OUT_DONE erratum 0112.
2063                          * we expect (N <= maxpacket) bytes; host wrote M.
2064                          * iff (M < N) we won't ever see a DMA interrupt.
2065                          */
2066                         ep->stopped = 1;
2067                         for (count = 0; ; t = readl (&ep->regs->ep_stat)) {
2068
2069                                 /* any preceding dma transfers must finish.
2070                                  * dma handles (M >= N), may empty the queue
2071                                  */
2072                                 scan_dma_completions (ep);
2073                                 if (unlikely (list_empty (&ep->queue)
2074                                                 || ep->out_overflow)) {
2075                                         req = NULL;
2076                                         break;
2077                                 }
2078                                 req = list_entry (ep->queue.next,
2079                                         struct net2280_request, queue);
2080
2081                                 /* here either (M < N), a "real" short rx;
2082                                  * or (M == N) and the queue didn't empty
2083                                  */
2084                                 if (likely (t & (1 << FIFO_EMPTY))) {
2085                                         count = readl (&ep->dma->dmacount);
2086                                         count &= DMA_BYTE_COUNT_MASK;
2087                                         if (readl (&ep->dma->dmadesc)
2088                                                         != req->td_dma)
2089                                                 req = NULL;
2090                                         break;
2091                                 }
2092                                 udelay(1);
2093                         }
2094
2095                         /* stop DMA, leave ep NAKing */
2096                         writel ((1 << DMA_ABORT), &ep->dma->dmastat);
2097                         spin_stop_dma (ep->dma);
2098
2099                         if (likely (req)) {
2100                                 req->td->dmacount = 0;
2101                                 t = readl (&ep->regs->ep_avail);
2102                                 dma_done (ep, req, count,
2103                                         (ep->out_overflow || t)
2104                                                 ? -EOVERFLOW : 0);
2105                         }
2106
2107                         /* also flush to prevent erratum 0106 trouble */
2108                         if (unlikely (ep->out_overflow
2109                                         || (ep->dev->chiprev == 0x0100
2110                                                 && ep->dev->gadget.speed
2111                                                         == USB_SPEED_FULL))) {
2112                                 out_flush (ep);
2113                                 ep->out_overflow = 0;
2114                         }
2115
2116                         /* (re)start dma if needed, stop NAKing */
2117                         ep->stopped = stopped;
2118                         if (!list_empty (&ep->queue))
2119                                 restart_dma (ep);
2120                 } else
2121                         DEBUG (ep->dev, "%s dma ep_stat %08x ??\n",
2122                                         ep->ep.name, t);
2123                 return;
2124
2125         /* data packet(s) received (in the fifo, OUT) */
2126         } else if (t & (1 << DATA_PACKET_RECEIVED_INTERRUPT)) {
2127                 if (read_fifo (ep, req) && ep->num != 0)
2128                         mode = 2;
2129
2130         /* data packet(s) transmitted (IN) */
2131         } else if (t & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)) {
2132                 unsigned        len;
2133
2134                 len = req->req.length - req->req.actual;
2135                 if (len > ep->ep.maxpacket)
2136                         len = ep->ep.maxpacket;
2137                 req->req.actual += len;
2138
2139                 /* if we wrote it all, we're usually done */
2140                 if (req->req.actual == req->req.length) {
2141                         if (ep->num == 0) {
2142                                 /* send zlps until the status stage */
2143                         } else if (!req->req.zero || len != ep->ep.maxpacket)
2144                                 mode = 2;
2145                 }
2146
2147         /* there was nothing to do ...  */
2148         } else if (mode == 1)
2149                 return;
2150
2151         /* done */
2152         if (mode == 2) {
2153                 /* stream endpoints often resubmit/unlink in completion */
2154                 done (ep, req, 0);
2155
2156                 /* maybe advance queue to next request */
2157                 if (ep->num == 0) {
2158                         /* NOTE:  net2280 could let gadget driver start the
2159                          * status stage later. since not all controllers let
2160                          * them control that, the api doesn't (yet) allow it.
2161                          */
2162                         if (!ep->stopped)
2163                                 allow_status (ep);
2164                         req = NULL;
2165                 } else {
2166                         if (!list_empty (&ep->queue) && !ep->stopped)
2167                                 req = list_entry (ep->queue.next,
2168                                         struct net2280_request, queue);
2169                         else
2170                                 req = NULL;
2171                         if (req && !ep->is_in)
2172                                 stop_out_naking (ep);
2173                 }
2174         }
2175
2176         /* is there a buffer for the next packet?
2177          * for best streaming performance, make sure there is one.
2178          */
2179         if (req && !ep->stopped) {
2180
2181                 /* load IN fifo with next packet (may be zlp) */
2182                 if (t & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))
2183                         write_fifo (ep, &req->req);
2184         }
2185 }
2186
2187 static struct net2280_ep *
2188 get_ep_by_addr (struct net2280 *dev, u16 wIndex)
2189 {
2190         struct net2280_ep       *ep;
2191
2192         if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
2193                 return &dev->ep [0];
2194         list_for_each_entry (ep, &dev->gadget.ep_list, ep.ep_list) {
2195                 u8      bEndpointAddress;
2196
2197                 if (!ep->desc)
2198                         continue;
2199                 bEndpointAddress = ep->desc->bEndpointAddress;
2200                 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
2201                         continue;
2202                 if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
2203                         return ep;
2204         }
2205         return NULL;
2206 }
2207
2208 static void handle_stat0_irqs (struct net2280 *dev, u32 stat)
2209 {
2210         struct net2280_ep       *ep;
2211         u32                     num, scratch;
2212
2213         /* most of these don't need individual acks */
2214         stat &= ~(1 << INTA_ASSERTED);
2215         if (!stat)
2216                 return;
2217         // DEBUG (dev, "irqstat0 %04x\n", stat);
2218
2219         /* starting a control request? */
2220         if (unlikely (stat & (1 << SETUP_PACKET_INTERRUPT))) {
2221                 union {
2222                         u32                     raw [2];
2223                         struct usb_ctrlrequest  r;
2224                 } u;
2225                 int                             tmp;
2226                 struct net2280_request          *req;
2227
2228                 if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
2229                         if (readl (&dev->usb->usbstat) & (1 << HIGH_SPEED))
2230                                 dev->gadget.speed = USB_SPEED_HIGH;
2231                         else
2232                                 dev->gadget.speed = USB_SPEED_FULL;
2233                         net2280_led_speed (dev, dev->gadget.speed);
2234                         DEBUG(dev, "%s\n", usb_speed_string(dev->gadget.speed));
2235                 }
2236
2237                 ep = &dev->ep [0];
2238                 ep->irqs++;
2239
2240                 /* make sure any leftover request state is cleared */
2241                 stat &= ~(1 << ENDPOINT_0_INTERRUPT);
2242                 while (!list_empty (&ep->queue)) {
2243                         req = list_entry (ep->queue.next,
2244                                         struct net2280_request, queue);
2245                         done (ep, req, (req->req.actual == req->req.length)
2246                                                 ? 0 : -EPROTO);
2247                 }
2248                 ep->stopped = 0;
2249                 dev->protocol_stall = 0;
2250
2251                 if (ep->dev->pdev->device == 0x2280)
2252                         tmp = (1 << FIFO_OVERFLOW)
2253                                 | (1 << FIFO_UNDERFLOW);
2254                 else
2255                         tmp = 0;
2256
2257                 writel (tmp | (1 << TIMEOUT)
2258                         | (1 << USB_STALL_SENT)
2259                         | (1 << USB_IN_NAK_SENT)
2260                         | (1 << USB_IN_ACK_RCVD)
2261                         | (1 << USB_OUT_PING_NAK_SENT)
2262                         | (1 << USB_OUT_ACK_SENT)
2263                         | (1 << SHORT_PACKET_OUT_DONE_INTERRUPT)
2264                         | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)
2265                         | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
2266                         | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
2267                         | (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
2268                         | (1 << DATA_IN_TOKEN_INTERRUPT)
2269                         , &ep->regs->ep_stat);
2270                 u.raw [0] = readl (&dev->usb->setup0123);
2271                 u.raw [1] = readl (&dev->usb->setup4567);
2272
2273                 cpu_to_le32s (&u.raw [0]);
2274                 cpu_to_le32s (&u.raw [1]);
2275
2276                 tmp = 0;
2277
2278 #define w_value         le16_to_cpu(u.r.wValue)
2279 #define w_index         le16_to_cpu(u.r.wIndex)
2280 #define w_length        le16_to_cpu(u.r.wLength)
2281
2282                 /* ack the irq */
2283                 writel (1 << SETUP_PACKET_INTERRUPT, &dev->regs->irqstat0);
2284                 stat ^= (1 << SETUP_PACKET_INTERRUPT);
2285
2286                 /* watch control traffic at the token level, and force
2287                  * synchronization before letting the status stage happen.
2288                  * FIXME ignore tokens we'll NAK, until driver responds.
2289                  * that'll mean a lot less irqs for some drivers.
2290                  */
2291                 ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
2292                 if (ep->is_in) {
2293                         scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
2294                                 | (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
2295                                 | (1 << DATA_IN_TOKEN_INTERRUPT);
2296                         stop_out_naking (ep);
2297                 } else
2298                         scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT)
2299                                 | (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
2300                                 | (1 << DATA_IN_TOKEN_INTERRUPT);
2301                 writel (scratch, &dev->epregs [0].ep_irqenb);
2302
2303                 /* we made the hardware handle most lowlevel requests;
2304                  * everything else goes uplevel to the gadget code.
2305                  */
2306                 ep->responded = 1;
2307                 switch (u.r.bRequest) {
2308                 case USB_REQ_GET_STATUS: {
2309                         struct net2280_ep       *e;
2310                         __le32                  status;
2311
2312                         /* hw handles device and interface status */
2313                         if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT))
2314                                 goto delegate;
2315                         if ((e = get_ep_by_addr (dev, w_index)) == 0
2316                                         || w_length > 2)
2317                                 goto do_stall;
2318
2319                         if (readl (&e->regs->ep_rsp)
2320                                         & (1 << SET_ENDPOINT_HALT))
2321                                 status = cpu_to_le32 (1);
2322                         else
2323                                 status = cpu_to_le32 (0);
2324
2325                         /* don't bother with a request object! */
2326                         writel (0, &dev->epregs [0].ep_irqenb);
2327                         set_fifo_bytecount (ep, w_length);
2328                         writel ((__force u32)status, &dev->epregs [0].ep_data);
2329                         allow_status (ep);
2330                         VDEBUG (dev, "%s stat %02x\n", ep->ep.name, status);
2331                         goto next_endpoints;
2332                         }
2333                         break;
2334                 case USB_REQ_CLEAR_FEATURE: {
2335                         struct net2280_ep       *e;
2336
2337                         /* hw handles device features */
2338                         if (u.r.bRequestType != USB_RECIP_ENDPOINT)
2339                                 goto delegate;
2340                         if (w_value != USB_ENDPOINT_HALT
2341                                         || w_length != 0)
2342                                 goto do_stall;
2343                         if ((e = get_ep_by_addr (dev, w_index)) == 0)
2344                                 goto do_stall;
2345                         if (e->wedged) {
2346                                 VDEBUG(dev, "%s wedged, halt not cleared\n",
2347                                                 ep->ep.name);
2348                         } else {
2349                                 VDEBUG(dev, "%s clear halt\n", ep->ep.name);
2350                                 clear_halt(e);
2351                         }
2352                         allow_status (ep);
2353                         goto next_endpoints;
2354                         }
2355                         break;
2356                 case USB_REQ_SET_FEATURE: {
2357                         struct net2280_ep       *e;
2358
2359                         /* hw handles device features */
2360                         if (u.r.bRequestType != USB_RECIP_ENDPOINT)
2361                                 goto delegate;
2362                         if (w_value != USB_ENDPOINT_HALT
2363                                         || w_length != 0)
2364                                 goto do_stall;
2365                         if ((e = get_ep_by_addr (dev, w_index)) == 0)
2366                                 goto do_stall;
2367                         if (e->ep.name == ep0name)
2368                                 goto do_stall;
2369                         set_halt (e);
2370                         allow_status (ep);
2371                         VDEBUG (dev, "%s set halt\n", ep->ep.name);
2372                         goto next_endpoints;
2373                         }
2374                         break;
2375                 default:
2376 delegate:
2377                         VDEBUG (dev, "setup %02x.%02x v%04x i%04x l%04x "
2378                                 "ep_cfg %08x\n",
2379                                 u.r.bRequestType, u.r.bRequest,
2380                                 w_value, w_index, w_length,
2381                                 readl (&ep->regs->ep_cfg));
2382                         ep->responded = 0;
2383                         spin_unlock (&dev->lock);
2384                         tmp = dev->driver->setup (&dev->gadget, &u.r);
2385                         spin_lock (&dev->lock);
2386                 }
2387
2388                 /* stall ep0 on error */
2389                 if (tmp < 0) {
2390 do_stall:
2391                         VDEBUG (dev, "req %02x.%02x protocol STALL; stat %d\n",
2392                                         u.r.bRequestType, u.r.bRequest, tmp);
2393                         dev->protocol_stall = 1;
2394                 }
2395
2396                 /* some in/out token irq should follow; maybe stall then.
2397                  * driver must queue a request (even zlp) or halt ep0
2398                  * before the host times out.
2399                  */
2400         }
2401
2402 #undef  w_value
2403 #undef  w_index
2404 #undef  w_length
2405
2406 next_endpoints:
2407         /* endpoint data irq ? */
2408         scratch = stat & 0x7f;
2409         stat &= ~0x7f;
2410         for (num = 0; scratch; num++) {
2411                 u32             t;
2412
2413                 /* do this endpoint's FIFO and queue need tending? */
2414                 t = 1 << num;
2415                 if ((scratch & t) == 0)
2416                         continue;
2417                 scratch ^= t;
2418
2419                 ep = &dev->ep [num];
2420                 handle_ep_small (ep);
2421         }
2422
2423         if (stat)
2424                 DEBUG (dev, "unhandled irqstat0 %08x\n", stat);
2425 }
2426
2427 #define DMA_INTERRUPTS ( \
2428                   (1 << DMA_D_INTERRUPT) \
2429                 | (1 << DMA_C_INTERRUPT) \
2430                 | (1 << DMA_B_INTERRUPT) \
2431                 | (1 << DMA_A_INTERRUPT))
2432 #define PCI_ERROR_INTERRUPTS ( \
2433                   (1 << PCI_MASTER_ABORT_RECEIVED_INTERRUPT) \
2434                 | (1 << PCI_TARGET_ABORT_RECEIVED_INTERRUPT) \
2435                 | (1 << PCI_RETRY_ABORT_INTERRUPT))
2436
2437 static void handle_stat1_irqs (struct net2280 *dev, u32 stat)
2438 {
2439         struct net2280_ep       *ep;
2440         u32                     tmp, num, mask, scratch;
2441
2442         /* after disconnect there's nothing else to do! */
2443         tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT);
2444         mask = (1 << HIGH_SPEED) | (1 << FULL_SPEED);
2445
2446         /* VBUS disconnect is indicated by VBUS_PIN and VBUS_INTERRUPT set.
2447          * Root Port Reset is indicated by ROOT_PORT_RESET_INTERRUPT set and
2448          * both HIGH_SPEED and FULL_SPEED clear (as ROOT_PORT_RESET_INTERRUPT
2449          * only indicates a change in the reset state).
2450          */
2451         if (stat & tmp) {
2452                 writel (tmp, &dev->regs->irqstat1);
2453                 if ((((stat & (1 << ROOT_PORT_RESET_INTERRUPT))
2454                                         && ((readl (&dev->usb->usbstat) & mask)
2455                                                         == 0))
2456                                 || ((readl (&dev->usb->usbctl)
2457                                         & (1 << VBUS_PIN)) == 0)
2458                             ) && ( dev->gadget.speed != USB_SPEED_UNKNOWN)) {
2459                         DEBUG (dev, "disconnect %s\n",
2460                                         dev->driver->driver.name);
2461                         stop_activity (dev, dev->driver);
2462                         ep0_start (dev);
2463                         return;
2464                 }
2465                 stat &= ~tmp;
2466
2467                 /* vBUS can bounce ... one of many reasons to ignore the
2468                  * notion of hotplug events on bus connect/disconnect!
2469                  */
2470                 if (!stat)
2471                         return;
2472         }
2473
2474         /* NOTE: chip stays in PCI D0 state for now, but it could
2475          * enter D1 to save more power
2476          */
2477         tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT);
2478         if (stat & tmp) {
2479                 writel (tmp, &dev->regs->irqstat1);
2480                 if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) {
2481                         if (dev->driver->suspend)
2482                                 dev->driver->suspend (&dev->gadget);
2483                         if (!enable_suspend)
2484                                 stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT);
2485                 } else {
2486                         if (dev->driver->resume)
2487                                 dev->driver->resume (&dev->gadget);
2488                         /* at high speed, note erratum 0133 */
2489                 }
2490                 stat &= ~tmp;
2491         }
2492
2493         /* clear any other status/irqs */
2494         if (stat)
2495                 writel (stat, &dev->regs->irqstat1);
2496
2497         /* some status we can just ignore */
2498         if (dev->pdev->device == 0x2280)
2499                 stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
2500                           | (1 << SUSPEND_REQUEST_INTERRUPT)
2501                           | (1 << RESUME_INTERRUPT)
2502                           | (1 << SOF_INTERRUPT));
2503         else
2504                 stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
2505                           | (1 << RESUME_INTERRUPT)
2506                           | (1 << SOF_DOWN_INTERRUPT)
2507                           | (1 << SOF_INTERRUPT));
2508
2509         if (!stat)
2510                 return;
2511         // DEBUG (dev, "irqstat1 %08x\n", stat);
2512
2513         /* DMA status, for ep-{a,b,c,d} */
2514         scratch = stat & DMA_INTERRUPTS;
2515         stat &= ~DMA_INTERRUPTS;
2516         scratch >>= 9;
2517         for (num = 0; scratch; num++) {
2518                 struct net2280_dma_regs __iomem *dma;
2519
2520                 tmp = 1 << num;
2521                 if ((tmp & scratch) == 0)
2522                         continue;
2523                 scratch ^= tmp;
2524
2525                 ep = &dev->ep [num + 1];
2526                 dma = ep->dma;
2527
2528                 if (!dma)
2529                         continue;
2530
2531                 /* clear ep's dma status */
2532                 tmp = readl (&dma->dmastat);
2533                 writel (tmp, &dma->dmastat);
2534
2535                 /* chaining should stop on abort, short OUT from fifo,
2536                  * or (stat0 codepath) short OUT transfer.
2537                  */
2538                 if (!use_dma_chaining) {
2539                         if ((tmp & (1 << DMA_TRANSACTION_DONE_INTERRUPT))
2540                                         == 0) {
2541                                 DEBUG (ep->dev, "%s no xact done? %08x\n",
2542                                         ep->ep.name, tmp);
2543                                 continue;
2544                         }
2545                         stop_dma (ep->dma);
2546                 }
2547
2548                 /* OUT transfers terminate when the data from the
2549                  * host is in our memory.  Process whatever's done.
2550                  * On this path, we know transfer's last packet wasn't
2551                  * less than req->length. NAK_OUT_PACKETS may be set,
2552                  * or the FIFO may already be holding new packets.
2553                  *
2554                  * IN transfers can linger in the FIFO for a very
2555                  * long time ... we ignore that for now, accounting
2556                  * precisely (like PIO does) needs per-packet irqs
2557                  */
2558                 scan_dma_completions (ep);
2559
2560                 /* disable dma on inactive queues; else maybe restart */
2561                 if (list_empty (&ep->queue)) {
2562                         if (use_dma_chaining)
2563                                 stop_dma (ep->dma);
2564                 } else {
2565                         tmp = readl (&dma->dmactl);
2566                         if (!use_dma_chaining
2567                                         || (tmp & (1 << DMA_ENABLE)) == 0)
2568                                 restart_dma (ep);
2569                         else if (ep->is_in && use_dma_chaining) {
2570                                 struct net2280_request  *req;
2571                                 __le32                  dmacount;
2572
2573                                 /* the descriptor at the head of the chain
2574                                  * may still have VALID_BIT clear; that's
2575                                  * used to trigger changing DMA_FIFO_VALIDATE
2576                                  * (affects automagic zlp writes).
2577                                  */
2578                                 req = list_entry (ep->queue.next,
2579                                                 struct net2280_request, queue);
2580                                 dmacount = req->td->dmacount;
2581                                 dmacount &= cpu_to_le32 (
2582                                                 (1 << VALID_BIT)
2583                                                 | DMA_BYTE_COUNT_MASK);
2584                                 if (dmacount && (dmacount & valid_bit) == 0)
2585                                         restart_dma (ep);
2586                         }
2587                 }
2588                 ep->irqs++;
2589         }
2590
2591         /* NOTE:  there are other PCI errors we might usefully notice.
2592          * if they appear very often, here's where to try recovering.
2593          */
2594         if (stat & PCI_ERROR_INTERRUPTS) {
2595                 ERROR (dev, "pci dma error; stat %08x\n", stat);
2596                 stat &= ~PCI_ERROR_INTERRUPTS;
2597                 /* these are fatal errors, but "maybe" they won't
2598                  * happen again ...
2599                  */
2600                 stop_activity (dev, dev->driver);
2601                 ep0_start (dev);
2602                 stat = 0;
2603         }
2604
2605         if (stat)
2606                 DEBUG (dev, "unhandled irqstat1 %08x\n", stat);
2607 }
2608
2609 static irqreturn_t net2280_irq (int irq, void *_dev)
2610 {
2611         struct net2280          *dev = _dev;
2612
2613         /* shared interrupt, not ours */
2614         if (!(readl(&dev->regs->irqstat0) & (1 << INTA_ASSERTED)))
2615                 return IRQ_NONE;
2616
2617         spin_lock (&dev->lock);
2618
2619         /* handle disconnect, dma, and more */
2620         handle_stat1_irqs (dev, readl (&dev->regs->irqstat1));
2621
2622         /* control requests and PIO */
2623         handle_stat0_irqs (dev, readl (&dev->regs->irqstat0));
2624
2625         spin_unlock (&dev->lock);
2626
2627         return IRQ_HANDLED;
2628 }
2629
2630 /*-------------------------------------------------------------------------*/
2631
2632 static void gadget_release (struct device *_dev)
2633 {
2634         struct net2280  *dev = dev_get_drvdata (_dev);
2635
2636         kfree (dev);
2637 }
2638
2639 /* tear down the binding between this driver and the pci device */
2640
2641 static void net2280_remove (struct pci_dev *pdev)
2642 {
2643         struct net2280          *dev = pci_get_drvdata (pdev);
2644
2645         usb_del_gadget_udc(&dev->gadget);
2646
2647         BUG_ON(dev->driver);
2648
2649         /* then clean up the resources we allocated during probe() */
2650         net2280_led_shutdown (dev);
2651         if (dev->requests) {
2652                 int             i;
2653                 for (i = 1; i < 5; i++) {
2654                         if (!dev->ep [i].dummy)
2655                                 continue;
2656                         pci_pool_free (dev->requests, dev->ep [i].dummy,
2657                                         dev->ep [i].td_dma);
2658                 }
2659                 pci_pool_destroy (dev->requests);
2660         }
2661         if (dev->got_irq)
2662                 free_irq (pdev->irq, dev);
2663         if (dev->regs)
2664                 iounmap (dev->regs);
2665         if (dev->region)
2666                 release_mem_region (pci_resource_start (pdev, 0),
2667                                 pci_resource_len (pdev, 0));
2668         if (dev->enabled)
2669                 pci_disable_device (pdev);
2670         device_unregister (&dev->gadget.dev);
2671         device_remove_file (&pdev->dev, &dev_attr_registers);
2672         pci_set_drvdata (pdev, NULL);
2673
2674         INFO (dev, "unbind\n");
2675 }
2676
2677 /* wrap this driver around the specified device, but
2678  * don't respond over USB until a gadget driver binds to us.
2679  */
2680
2681 static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id)
2682 {
2683         struct net2280          *dev;
2684         unsigned long           resource, len;
2685         void                    __iomem *base = NULL;
2686         int                     retval, i;
2687
2688         /* alloc, and start init */
2689         dev = kzalloc (sizeof *dev, GFP_KERNEL);
2690         if (dev == NULL){
2691                 retval = -ENOMEM;
2692                 goto done;
2693         }
2694
2695         pci_set_drvdata (pdev, dev);
2696         spin_lock_init (&dev->lock);
2697         dev->pdev = pdev;
2698         dev->gadget.ops = &net2280_ops;
2699         dev->gadget.max_speed = USB_SPEED_HIGH;
2700
2701         /* the "gadget" abstracts/virtualizes the controller */
2702         dev_set_name(&dev->gadget.dev, "gadget");
2703         dev->gadget.dev.parent = &pdev->dev;
2704         dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
2705         dev->gadget.dev.release = gadget_release;
2706         dev->gadget.name = driver_name;
2707
2708         /* now all the pci goodies ... */
2709         if (pci_enable_device (pdev) < 0) {
2710                 retval = -ENODEV;
2711                 goto done;
2712         }
2713         dev->enabled = 1;
2714
2715         /* BAR 0 holds all the registers
2716          * BAR 1 is 8051 memory; unused here (note erratum 0103)
2717          * BAR 2 is fifo memory; unused here
2718          */
2719         resource = pci_resource_start (pdev, 0);
2720         len = pci_resource_len (pdev, 0);
2721         if (!request_mem_region (resource, len, driver_name)) {
2722                 DEBUG (dev, "controller already in use\n");
2723                 retval = -EBUSY;
2724                 goto done;
2725         }
2726         dev->region = 1;
2727
2728         /* FIXME provide firmware download interface to put
2729          * 8051 code into the chip, e.g. to turn on PCI PM.
2730          */
2731
2732         base = ioremap_nocache (resource, len);
2733         if (base == NULL) {
2734                 DEBUG (dev, "can't map memory\n");
2735                 retval = -EFAULT;
2736                 goto done;
2737         }
2738         dev->regs = (struct net2280_regs __iomem *) base;
2739         dev->usb = (struct net2280_usb_regs __iomem *) (base + 0x0080);
2740         dev->pci = (struct net2280_pci_regs __iomem *) (base + 0x0100);
2741         dev->dma = (struct net2280_dma_regs __iomem *) (base + 0x0180);
2742         dev->dep = (struct net2280_dep_regs __iomem *) (base + 0x0200);
2743         dev->epregs = (struct net2280_ep_regs __iomem *) (base + 0x0300);
2744
2745         /* put into initial config, link up all endpoints */
2746         writel (0, &dev->usb->usbctl);
2747         usb_reset (dev);
2748         usb_reinit (dev);
2749
2750         /* irq setup after old hardware is cleaned up */
2751         if (!pdev->irq) {
2752                 ERROR (dev, "No IRQ.  Check PCI setup!\n");
2753                 retval = -ENODEV;
2754                 goto done;
2755         }
2756
2757         if (request_irq (pdev->irq, net2280_irq, IRQF_SHARED, driver_name, dev)
2758                         != 0) {
2759                 ERROR (dev, "request interrupt %d failed\n", pdev->irq);
2760                 retval = -EBUSY;
2761                 goto done;
2762         }
2763         dev->got_irq = 1;
2764
2765         /* DMA setup */
2766         /* NOTE:  we know only the 32 LSBs of dma addresses may be nonzero */
2767         dev->requests = pci_pool_create ("requests", pdev,
2768                 sizeof (struct net2280_dma),
2769                 0 /* no alignment requirements */,
2770                 0 /* or page-crossing issues */);
2771         if (!dev->requests) {
2772                 DEBUG (dev, "can't get request pool\n");
2773                 retval = -ENOMEM;
2774                 goto done;
2775         }
2776         for (i = 1; i < 5; i++) {
2777                 struct net2280_dma      *td;
2778
2779                 td = pci_pool_alloc (dev->requests, GFP_KERNEL,
2780                                 &dev->ep [i].td_dma);
2781                 if (!td) {
2782                         DEBUG (dev, "can't get dummy %d\n", i);
2783                         retval = -ENOMEM;
2784                         goto done;
2785                 }
2786                 td->dmacount = 0;       /* not VALID */
2787                 td->dmaaddr = cpu_to_le32 (DMA_ADDR_INVALID);
2788                 td->dmadesc = td->dmaaddr;
2789                 dev->ep [i].dummy = td;
2790         }
2791
2792         /* enable lower-overhead pci memory bursts during DMA */
2793         writel ( (1 << DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE)
2794                         // 256 write retries may not be enough...
2795                         // | (1 << PCI_RETRY_ABORT_ENABLE)
2796                         | (1 << DMA_READ_MULTIPLE_ENABLE)
2797                         | (1 << DMA_READ_LINE_ENABLE)
2798                         , &dev->pci->pcimstctl);
2799         /* erratum 0115 shouldn't appear: Linux inits PCI_LATENCY_TIMER */
2800         pci_set_master (pdev);
2801         pci_try_set_mwi (pdev);
2802
2803         /* ... also flushes any posted pci writes */
2804         dev->chiprev = get_idx_reg (dev->regs, REG_CHIPREV) & 0xffff;
2805
2806         /* done */
2807         INFO (dev, "%s\n", driver_desc);
2808         INFO (dev, "irq %d, pci mem %p, chip rev %04x\n",
2809                         pdev->irq, base, dev->chiprev);
2810         INFO (dev, "version: " DRIVER_VERSION "; dma %s\n",
2811                         use_dma
2812                                 ? (use_dma_chaining ? "chaining" : "enabled")
2813                                 : "disabled");
2814         retval = device_register (&dev->gadget.dev);
2815         if (retval) goto done;
2816         retval = device_create_file (&pdev->dev, &dev_attr_registers);
2817         if (retval) goto done;
2818
2819         retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
2820         if (retval)
2821                 goto done;
2822         return 0;
2823
2824 done:
2825         if (dev)
2826                 net2280_remove (pdev);
2827         return retval;
2828 }
2829
2830 /* make sure the board is quiescent; otherwise it will continue
2831  * generating IRQs across the upcoming reboot.
2832  */
2833
2834 static void net2280_shutdown (struct pci_dev *pdev)
2835 {
2836         struct net2280          *dev = pci_get_drvdata (pdev);
2837
2838         /* disable IRQs */
2839         writel (0, &dev->regs->pciirqenb0);
2840         writel (0, &dev->regs->pciirqenb1);
2841
2842         /* disable the pullup so the host will think we're gone */
2843         writel (0, &dev->usb->usbctl);
2844 }
2845
2846
2847 /*-------------------------------------------------------------------------*/
2848
2849 static const struct pci_device_id pci_ids [] = { {
2850         .class =        ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
2851         .class_mask =   ~0,
2852         .vendor =       0x17cc,
2853         .device =       0x2280,
2854         .subvendor =    PCI_ANY_ID,
2855         .subdevice =    PCI_ANY_ID,
2856 }, {
2857         .class =        ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
2858         .class_mask =   ~0,
2859         .vendor =       0x17cc,
2860         .device =       0x2282,
2861         .subvendor =    PCI_ANY_ID,
2862         .subdevice =    PCI_ANY_ID,
2863
2864 }, { /* end: all zeroes */ }
2865 };
2866 MODULE_DEVICE_TABLE (pci, pci_ids);
2867
2868 /* pci driver glue; this is a "new style" PCI driver module */
2869 static struct pci_driver net2280_pci_driver = {
2870         .name =         (char *) driver_name,
2871         .id_table =     pci_ids,
2872
2873         .probe =        net2280_probe,
2874         .remove =       net2280_remove,
2875         .shutdown =     net2280_shutdown,
2876
2877         /* FIXME add power management support */
2878 };
2879
2880 MODULE_DESCRIPTION (DRIVER_DESC);
2881 MODULE_AUTHOR ("David Brownell");
2882 MODULE_LICENSE ("GPL");
2883
2884 static int __init init (void)
2885 {
2886         if (!use_dma)
2887                 use_dma_chaining = 0;
2888         return pci_register_driver (&net2280_pci_driver);
2889 }
2890 module_init (init);
2891
2892 static void __exit cleanup (void)
2893 {
2894         pci_unregister_driver (&net2280_pci_driver);
2895 }
2896 module_exit (cleanup);