packaging: install license for rpm package instead of license package
[profile/mobile/platform/kernel/linux-3.10-sc7730.git] / drivers / usb / gadget / u_ether.c
1 /*
2  * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
3  *
4  * Copyright (C) 2003-2005,2008 David Brownell
5  * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
6  * Copyright (C) 2008 Nokia Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  */
13
14 /* #define VERBOSE_DEBUG */
15
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/gfp.h>
19 #include <linux/device.h>
20 #include <linux/ctype.h>
21 #include <linux/etherdevice.h>
22 #include <linux/ethtool.h>
23 #include <linux/if_vlan.h>
24
25 #include "u_ether.h"
26
27
28 /*
29  * This component encapsulates the Ethernet link glue needed to provide
30  * one (!) network link through the USB gadget stack, normally "usb0".
31  *
32  * The control and data models are handled by the function driver which
33  * connects to this code; such as CDC Ethernet (ECM or EEM),
34  * "CDC Subset", or RNDIS.  That includes all descriptor and endpoint
35  * management.
36  *
37  * Link level addressing is handled by this component using module
38  * parameters; if no such parameters are provided, random link level
39  * addresses are used.  Each end of the link uses one address.  The
40  * host end address is exported in various ways, and is often recorded
41  * in configuration databases.
42  *
43  * The driver which assembles each configuration using such a link is
44  * responsible for ensuring that each configuration includes at most one
45  * instance of is network link.  (The network layer provides ways for
46  * this single "physical" link to be used by multiple virtual links.)
47  */
48
49 #define UETH__VERSION   "29-May-2008"
50
51 struct eth_dev {
52         /* lock is held while accessing port_usb
53          */
54         spinlock_t              lock;
55         struct gether           *port_usb;
56
57         struct net_device       *net;
58         struct usb_gadget       *gadget;
59
60         spinlock_t              req_lock;       /* guard {rx,tx}_reqs */
61         struct list_head        tx_reqs, rx_reqs;
62         atomic_t                tx_qlen;
63
64         struct sk_buff_head     rx_frames;
65
66         unsigned                header_len;
67         struct sk_buff          *(*wrap)(struct gether *, struct sk_buff *skb);
68         int                     (*unwrap)(struct gether *,
69                                                 struct sk_buff *skb,
70                                                 struct sk_buff_head *list);
71
72         struct work_struct      work;
73
74         unsigned long           todo;
75 #define WORK_RX_MEMORY          0
76
77         bool                    zlp;
78         u8                      host_mac[ETH_ALEN];
79 };
80
81 /*-------------------------------------------------------------------------*/
82
83 #define RX_EXTRA        20      /* bytes guarding against rx overflows */
84
85 #define DEFAULT_QLEN    2       /* double buffering by default */
86
87 #ifdef CONFIG_USB_SPRD_DWC
88 static unsigned qmult = 15;
89 #else
90 static unsigned qmult = 5;
91 #endif
92 module_param(qmult, uint, S_IRUGO|S_IWUSR);
93 MODULE_PARM_DESC(qmult, "queue length multiplier at high/super speed");
94
95 /* for dual-speed hardware, use deeper queues at high/super speed */
96 static inline int qlen(struct usb_gadget *gadget)
97 {
98         if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
99                                             gadget->speed == USB_SPEED_SUPER))
100                 return qmult * DEFAULT_QLEN;
101         else
102                 return DEFAULT_QLEN;
103 }
104
105 /*-------------------------------------------------------------------------*/
106
107 /* REVISIT there must be a better way than having two sets
108  * of debug calls ...
109  */
110
111 #undef DBG
112 #undef VDBG
113 #undef ERROR
114 #undef INFO
115
116 #define xprintk(d, level, fmt, args...) \
117         printk(level "%s: " fmt , (d)->net->name , ## args)
118
119 #ifdef DEBUG
120 #undef DEBUG
121 #define DBG(dev, fmt, args...) \
122         xprintk(dev , KERN_DEBUG , fmt , ## args)
123 #else
124 #define DBG(dev, fmt, args...) \
125         do { } while (0)
126 #endif /* DEBUG */
127
128 #ifdef VERBOSE_DEBUG
129 #define VDBG    DBG
130 #else
131 #define VDBG(dev, fmt, args...) \
132         do { } while (0)
133 #endif /* DEBUG */
134
135 #define ERROR(dev, fmt, args...) \
136         xprintk(dev , KERN_ERR , fmt , ## args)
137 #define INFO(dev, fmt, args...) \
138         xprintk(dev , KERN_INFO , fmt , ## args)
139
140 /*-------------------------------------------------------------------------*/
141
142 /* NETWORK DRIVER HOOKUP (to the layer above this driver) */
143
144 static int ueth_change_mtu(struct net_device *net, int new_mtu)
145 {
146         struct eth_dev  *dev = netdev_priv(net);
147         unsigned long   flags;
148         int             status = 0;
149
150         /* don't change MTU on "live" link (peer won't know) */
151         spin_lock_irqsave(&dev->lock, flags);
152         if (dev->port_usb)
153                 status = -EBUSY;
154         else if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
155                 status = -ERANGE;
156         else
157                 net->mtu = new_mtu;
158         spin_unlock_irqrestore(&dev->lock, flags);
159
160         return status;
161 }
162
163 static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
164 {
165         struct eth_dev *dev = netdev_priv(net);
166
167         strlcpy(p->driver, "g_ether", sizeof(p->driver));
168         strlcpy(p->version, UETH__VERSION, sizeof(p->version));
169         strlcpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version));
170         strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info));
171 }
172
173 /* REVISIT can also support:
174  *   - WOL (by tracking suspends and issuing remote wakeup)
175  *   - msglevel (implies updated messaging)
176  *   - ... probably more ethtool ops
177  */
178
179 static const struct ethtool_ops ops = {
180         .get_drvinfo = eth_get_drvinfo,
181         .get_link = ethtool_op_get_link,
182 };
183
184 static void defer_kevent(struct eth_dev *dev, int flag)
185 {
186         if (test_and_set_bit(flag, &dev->todo))
187                 return;
188         if (!schedule_work(&dev->work))
189                 ERROR(dev, "kevent %d may have been dropped\n", flag);
190         else
191                 DBG(dev, "kevent %d scheduled\n", flag);
192 }
193
194 static void rx_complete(struct usb_ep *ep, struct usb_request *req);
195 static void tx_complete(struct usb_ep *ep, struct usb_request *req);
196 static int prealloc(struct list_head *list,
197                 struct usb_ep *ep, unsigned n,
198                 bool sg_supported);
199
200 static int
201 rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
202 {
203         struct sk_buff  *skb;
204         int             retval = -ENOMEM;
205         size_t          size = 0;
206         struct usb_ep   *out;
207         unsigned long   flags;
208
209         spin_lock_irqsave(&dev->lock, flags);
210         if (dev->port_usb)
211                 out = dev->port_usb->out_ep;
212         else
213                 out = NULL;
214         spin_unlock_irqrestore(&dev->lock, flags);
215
216         if (!out)
217                 return -ENOTCONN;
218
219
220         /* Padding up to RX_EXTRA handles minor disagreements with host.
221          * Normally we use the USB "terminate on short read" convention;
222          * so allow up to (N*maxpacket), since that memory is normally
223          * already allocated.  Some hardware doesn't deal well with short
224          * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
225          * byte off the end (to force hardware errors on overflow).
226          *
227          * RNDIS uses internal framing, and explicitly allows senders to
228          * pad to end-of-packet.  That's potentially nice for speed, but
229          * means receivers can't recover lost synch on their own (because
230          * new packets don't only start after a short RX).
231          */
232         size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
233         size += dev->port_usb->header_len;
234         size += out->maxpacket - 1;
235         size -= size % out->maxpacket;
236
237         if (dev->port_usb->is_fixed)
238                 size = max_t(size_t, size, dev->port_usb->fixed_out_len);
239         size = size * RNDIS_MSG_MAX_NUM;
240
241         skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
242         if (skb == NULL) {
243                 DBG(dev, "no rx skb\n");
244                 goto enomem;
245         }
246
247         /* Some platforms perform better when IP packets are aligned,
248          * but on at least one, checksumming fails otherwise.  Note:
249          * RNDIS headers involve variable numbers of LE32 values.
250          */
251         /*
252          * RX: Do not move data by IP_ALIGN:
253          * if your DMA controller cannot handle it
254          */
255         if (!gadget_dma32(dev->gadget))
256         skb_reserve(skb, NET_IP_ALIGN);
257
258         req->buf = skb->data;
259         req->length = size;
260         req->complete = rx_complete;
261         req->context = skb;
262
263         retval = usb_ep_queue(out, req, gfp_flags);
264         if (retval == -ENOMEM)
265 enomem:
266                 defer_kevent(dev, WORK_RX_MEMORY);
267         if (retval) {
268                 DBG(dev, "rx submit --> %d\n", retval);
269                 if (skb)
270                         dev_kfree_skb_any(skb);
271                 spin_lock_irqsave(&dev->req_lock, flags);
272                 list_add(&req->list, &dev->rx_reqs);
273                 spin_unlock_irqrestore(&dev->req_lock, flags);
274         }
275         return retval;
276 }
277
278 static void rx_complete(struct usb_ep *ep, struct usb_request *req)
279 {
280         struct sk_buff  *skb = req->context, *skb2;
281         struct eth_dev  *dev = ep->driver_data;
282         int             status = req->status;
283
284         switch (status) {
285
286         /* normal completion */
287         case 0:
288                 skb_put(skb, req->actual);
289                 if (gadget_dma32(dev->gadget) && NET_IP_ALIGN) {
290                         u8 *data = skb->data;
291                         size_t len = skb_headlen(skb);
292                         skb_reserve(skb, NET_IP_ALIGN);
293                         memmove(skb->data, data, len);
294                 }
295                 if (dev->unwrap) {
296                         unsigned long   flags;
297
298                         spin_lock_irqsave(&dev->lock, flags);
299                         if (dev->port_usb) {
300                                 status = dev->unwrap(dev->port_usb,
301                                                         skb,
302                                                         &dev->rx_frames);
303                         } else {
304                                 dev_kfree_skb_any(skb);
305                                 status = -ENOTCONN;
306                         }
307                         spin_unlock_irqrestore(&dev->lock, flags);
308                 } else {
309                         skb_queue_tail(&dev->rx_frames, skb);
310                 }
311                 skb = NULL;
312
313                 skb2 = skb_dequeue(&dev->rx_frames);
314                 while (skb2) {
315                         if (status < 0
316                                         || ETH_HLEN > skb2->len
317                                         || skb2->len > VLAN_ETH_FRAME_LEN) {
318                                 dev->net->stats.rx_errors++;
319                                 dev->net->stats.rx_length_errors++;
320                                 DBG(dev, "rx length %d\n", skb2->len);
321                                 dev_kfree_skb_any(skb2);
322                                 goto next_frame;
323                         }
324                         skb2->protocol = eth_type_trans(skb2, dev->net);
325                         dev->net->stats.rx_packets++;
326                         dev->net->stats.rx_bytes += skb2->len;
327
328                         /* no buffer copies needed, unless hardware can't
329                          * use skb buffers.
330                          */
331                         status = netif_rx(skb2);
332 next_frame:
333                         skb2 = skb_dequeue(&dev->rx_frames);
334                 }
335                 break;
336
337         /* software-driven interface shutdown */
338         case -ECONNRESET:               /* unlink */
339         case -ESHUTDOWN:                /* disconnect etc */
340                 VDBG(dev, "rx shutdown, code %d\n", status);
341                 goto quiesce;
342
343         /* for hardware automagic (such as pxa) */
344         case -ECONNABORTED:             /* endpoint reset */
345                 DBG(dev, "rx %s reset\n", ep->name);
346                 defer_kevent(dev, WORK_RX_MEMORY);
347 quiesce:
348                 dev_kfree_skb_any(skb);
349                 goto clean;
350
351         /* data overrun */
352         case -EOVERFLOW:
353                 dev->net->stats.rx_over_errors++;
354                 /* FALLTHROUGH */
355
356         default:
357                 dev->net->stats.rx_errors++;
358                 DBG(dev, "rx status %d\n", status);
359                 break;
360         }
361
362         if (skb)
363                 dev_kfree_skb_any(skb);
364         if (!netif_running(dev->net)) {
365 clean:
366                 spin_lock(&dev->req_lock);
367                 list_add(&req->list, &dev->rx_reqs);
368                 spin_unlock(&dev->req_lock);
369                 req = NULL;
370         }
371         if (req)
372                 rx_submit(dev, req, GFP_ATOMIC);
373 }
374
375 static int prealloc(struct list_head *list,
376                 struct usb_ep *ep, unsigned n,
377                 bool sg_supported)
378 {
379         unsigned                i;
380         struct usb_request      *req;
381         bool                    usb_in;
382
383         if (!n)
384                 return -ENOMEM;
385
386         /* queue/recycle up to N requests */
387         i = n;
388         list_for_each_entry(req, list, list) {
389                 if (i-- == 0)
390                         goto extra;
391         }
392
393         if (ep->desc->bEndpointAddress & USB_DIR_IN)
394                 usb_in = true;
395         else
396                 usb_in = false;
397
398         while (i--) {
399                 req = usb_ep_alloc_request(ep, GFP_ATOMIC);
400                 if (!req)
401                         return list_empty(list) ? -ENOMEM : 0;
402                 /* update completion handler */
403                 if (usb_in) {
404                         req->complete = tx_complete;
405                         if (sg_supported) {
406                                 req->sg = kmalloc(RNDIS_MSG_MAX_NUM *
407                                                         sizeof(struct scatterlist),
408                                                         GFP_ATOMIC);
409                                 if (!req->sg)
410                                         goto extra;
411                         } else {
412                                 req->buf = kmalloc(RNDIS_MSG_MAX_NUM * (1516 + 44),
413                                                         GFP_ATOMIC);
414                                 if (!req->buf)
415                                         goto extra;
416                         }
417                 } else {
418                         req->complete = rx_complete;
419                 }
420                 list_add(&req->list, list);
421         }
422         return 0;
423
424 extra:
425         /* free extras */
426         for (;;) {
427                 struct list_head        *next;
428
429                 next = req->list.next;
430                 list_del(&req->list);
431
432                 if (sg_supported)
433                         kfree(req->sg);
434                 else
435                         kfree(req->buf);
436
437                 usb_ep_free_request(ep, req);
438
439                 if (next == list)
440                         break;
441
442                 req = container_of(next, struct usb_request, list);
443         }
444         return 0;
445 }
446
447
448 static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
449 {
450         int     status;
451
452         spin_lock(&dev->req_lock);
453         status = prealloc(&dev->tx_reqs, link->in_ep, n,
454                         dev->gadget->sg_supported);
455         if (status < 0){
456                 ERROR(dev, "fail to alloc in_ep\n");
457                 goto fail;
458         }
459         status = prealloc(&dev->rx_reqs, link->out_ep, n,
460                         dev->gadget->sg_supported);
461         if (status < 0){
462                 ERROR(dev, "fail to alloc out_ep\n");
463                 goto fail;
464         }
465         goto done;
466 fail:
467         DBG(dev, "can't alloc requests\n");
468 done:
469         spin_unlock(&dev->req_lock);
470         return status;
471 }
472
473 static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
474 {
475         struct usb_request      *req;
476         unsigned long           flags;
477
478         /* fill unused rxq slots with some skb */
479         spin_lock_irqsave(&dev->req_lock, flags);
480         while (!list_empty(&dev->rx_reqs)) {
481                 req = container_of(dev->rx_reqs.next,
482                                 struct usb_request, list);
483                 list_del_init(&req->list);
484                 spin_unlock_irqrestore(&dev->req_lock, flags);
485
486                 if (rx_submit(dev, req, gfp_flags) < 0) {
487                         defer_kevent(dev, WORK_RX_MEMORY);
488                         return;
489                 }
490
491                 spin_lock_irqsave(&dev->req_lock, flags);
492         }
493         spin_unlock_irqrestore(&dev->req_lock, flags);
494 }
495
496 static void eth_work(struct work_struct *work)
497 {
498         struct eth_dev  *dev = container_of(work, struct eth_dev, work);
499
500         if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
501                 if (netif_running(dev->net))
502                         rx_fill(dev, GFP_KERNEL);
503         }
504
505         if (dev->todo)
506                 DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
507 }
508 static inline int is_promisc(u16 cdc_filter)
509 {
510         return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
511 }
512
513 #ifndef CONFIG_USB_SPRD_DWC
514
515
516 static void tx_complete(struct usb_ep *ep, struct usb_request *req)
517 {
518         struct sk_buff  *skb = req->context;
519         struct eth_dev  *dev = ep->driver_data;
520
521         switch (req->status) {
522         default:
523                 dev->net->stats.tx_errors++;
524                 VDBG(dev, "tx err %d\n", req->status);
525                 /* FALLTHROUGH */
526         case -ECONNRESET:               /* unlink */
527         case -ESHUTDOWN:                /* disconnect etc */
528                 break;
529         case 0:
530                 dev->net->stats.tx_bytes += skb->len;
531         }
532         dev->net->stats.tx_packets++;
533
534         spin_lock(&dev->req_lock);
535         list_add(&req->list, &dev->tx_reqs);
536         spin_unlock(&dev->req_lock);
537
538         dev_kfree_skb_any(skb);
539
540         atomic_dec(&dev->tx_qlen);
541         if (netif_carrier_ok(dev->net))
542                 netif_wake_queue(dev->net);
543 }
544
545 static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
546                                         struct net_device *net)
547 {
548         struct eth_dev          *dev = netdev_priv(net);
549         int                     length = skb->len;
550         int                     retval;
551         struct usb_request      *req = NULL;
552         unsigned long           flags;
553         struct usb_ep           *in;
554         u16                     cdc_filter;
555
556         spin_lock_irqsave(&dev->lock, flags);
557         if (dev->port_usb) {
558                 in = dev->port_usb->in_ep;
559                 cdc_filter = dev->port_usb->cdc_filter;
560         } else {
561                 in = NULL;
562                 cdc_filter = 0;
563         }
564         spin_unlock_irqrestore(&dev->lock, flags);
565
566         if (!in) {
567                 dev_kfree_skb_any(skb);
568                 return NETDEV_TX_OK;
569         }
570
571         /* apply outgoing CDC or RNDIS filters */
572         if (!is_promisc(cdc_filter)) {
573                 u8              *dest = skb->data;
574
575                 if (is_multicast_ether_addr(dest)) {
576                         u16     type;
577
578                         /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
579                          * SET_ETHERNET_MULTICAST_FILTERS requests
580                          */
581                         if (is_broadcast_ether_addr(dest))
582                                 type = USB_CDC_PACKET_TYPE_BROADCAST;
583                         else
584                                 type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
585                         if (!(cdc_filter & type)) {
586                                 dev_kfree_skb_any(skb);
587                                 return NETDEV_TX_OK;
588                         }
589                 }
590                 /* ignores USB_CDC_PACKET_TYPE_DIRECTED */
591         }
592
593         spin_lock_irqsave(&dev->req_lock, flags);
594         /*
595          * this freelist can be empty if an interrupt triggered disconnect()
596          * and reconfigured the gadget (shutting down this queue) after the
597          * network stack decided to xmit but before we got the spinlock.
598          */
599         if (list_empty(&dev->tx_reqs)) {
600                 spin_unlock_irqrestore(&dev->req_lock, flags);
601                 return NETDEV_TX_BUSY;
602         }
603
604         req = container_of(dev->tx_reqs.next, struct usb_request, list);
605         list_del(&req->list);
606
607         /* temporarily stop TX queue when the freelist empties */
608         if (list_empty(&dev->tx_reqs))
609                 netif_stop_queue(net);
610         spin_unlock_irqrestore(&dev->req_lock, flags);
611
612         /* no buffer copies needed, unless the network stack did it
613          * or the hardware can't use skb buffers.
614          * or there's not enough space for extra headers we need
615          */
616         if (dev->wrap) {
617                 unsigned long   flags;
618
619                 spin_lock_irqsave(&dev->lock, flags);
620                 if (dev->port_usb)
621                         skb = dev->wrap(dev->port_usb, skb);
622                 spin_unlock_irqrestore(&dev->lock, flags);
623                 if (!skb)
624                         goto drop;
625
626                 length = skb->len;
627         }
628         /*
629          * Align data to 32bit if the dma controller requires it
630          */
631         if (gadget_dma32(dev->gadget)) {
632                 unsigned long align = (unsigned long)skb->data & 3;
633                 if (WARN_ON(skb_headroom(skb) < align)) {
634                         dev_kfree_skb_any(skb);
635                         goto drop;
636                 } else if (align) {
637                         u8 *data = skb->data;
638                         size_t len = skb_headlen(skb);
639                         skb->data -= align;
640                         memmove(skb->data, data, len);
641                         skb_set_tail_pointer(skb, len);
642                 }
643         }
644         req->buf = skb->data;
645         req->context = skb;
646         req->complete = tx_complete;
647
648         /* NCM requires no zlp if transfer is dwNtbInMaxSize */
649         if (dev->port_usb->is_fixed &&
650             length == dev->port_usb->fixed_in_len &&
651             (length % in->maxpacket) == 0)
652                 req->zero = 0;
653         else
654                 req->zero = 1;
655
656         /* use zlp framing on tx for strict CDC-Ether conformance,
657          * though any robust network rx path ignores extra padding.
658          * and some hardware doesn't like to write zlps.
659          */
660         if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
661                 length++;
662
663         req->length = length;
664
665         /* throttle high/super speed IRQ rate back slightly */
666         if (gadget_is_dualspeed(dev->gadget))
667                 req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
668                                      dev->gadget->speed == USB_SPEED_SUPER)
669                         ? ((atomic_read(&dev->tx_qlen) % qmult) != 0)
670                         : 0;
671
672         retval = usb_ep_queue(in, req, GFP_ATOMIC);
673         switch (retval) {
674         default:
675                 DBG(dev, "tx queue err %d\n", retval);
676                 break;
677         case 0:
678                 net->trans_start = jiffies;
679                 atomic_inc(&dev->tx_qlen);
680         }
681
682         if (retval) {
683                 dev_kfree_skb_any(skb);
684 drop:
685                 dev->net->stats.tx_dropped++;
686                 spin_lock_irqsave(&dev->req_lock, flags);
687                 if (list_empty(&dev->tx_reqs))
688                         netif_start_queue(net);
689                 list_add(&req->list, &dev->tx_reqs);
690                 spin_unlock_irqrestore(&dev->req_lock, flags);
691         }
692         return NETDEV_TX_OK;
693 }
694
695 #else
696 #define RNDIS_MSG_MAX_SIZE  (1516 + 44)
697 #define RNDIS_MSG_MAX_QUEUE 32
698 // RNDIS_MSG_MAX_NUM IP diagrams in the worst case
699 #define RNDIS_MSG_QUEUE_MAX_SIZE RNDIS_MSG_MAX_NUM*RNDIS_MSG_MAX_SIZE
700
701 #define RNDIS_QUEUE_IDLE    1
702 #define RNDIS_QUEUE_GATHER  2
703 #define RNDIS_QUEUE_FULL    3
704 #define RNDIS_QUEUE_SEND    4
705
706 #define idx_add_one(n)   (((n) + 1) % RNDIS_MSG_MAX_QUEUE)
707
708 struct pkt_msg{
709         u32 state;
710         u32 len;
711         u32 pkt_cnt;
712         u32 q_idx;
713         void *req;
714         void *skbs[RNDIS_MSG_MAX_NUM];
715         u32  skb_idx;
716         u32  bit_map;
717 };
718 struct rndis_msg{
719         struct eth_dev  *dev;
720         spinlock_t      buffer_lock;
721         u32     w_idx;
722         u32     r_idx;
723         u32     flow_stop;
724         u32     last_sent;
725         u32     last_complete;
726         struct pkt_msg q[RNDIS_MSG_MAX_QUEUE];
727 };
728
729 static struct rndis_msg s_rndis_msg;
730
731 static inline struct rndis_msg* get_rndis_msg(void)
732 {
733         return &s_rndis_msg;
734 }
735
736 static inline void pkt_msg_clean(struct pkt_msg *q)
737 {
738         int i;
739         struct sk_buff *skb;
740
741         q->len = 0;
742         q->pkt_cnt = 0;
743         q->req = NULL;
744         q->skb_idx = 0;
745         q->bit_map = 0;
746         for (i = 0; i < sizeof(q->skbs)/sizeof(q->skbs[0]); i++) {
747                 skb = (struct sk_buff *)q->skbs[i];
748                 q->skbs[i] = NULL;
749                 if(skb == NULL)
750                         continue;
751                 dev_kfree_skb_any(skb);
752         }
753         q->state = RNDIS_QUEUE_IDLE;
754 }
755
756 static inline uint32_t pkt_msg_full(struct eth_dev *dev, uint32_t map)
757 {
758         if (dev->port_usb->multi_pkt_xfer
759                 || dev->gadget->sg_supported)
760                 return map == ((1 << RNDIS_MSG_MAX_NUM) - 1);
761         else
762                 return !!map;
763 }
764
765 static int pkt_msg_send(struct rndis_msg *msg, struct pkt_msg *q,
766                                 struct net_device *net)
767 {
768         struct eth_dev          *dev = netdev_priv(net);
769         struct usb_ep           *in = dev->port_usb->in_ep;
770         struct usb_request  *req = NULL;
771         unsigned long   flags;
772         int retval;
773         uint32_t i, len, length ;
774         struct sk_buff *skb;
775
776         req = q->req;
777         req->context = q;
778         req->num_sgs = 0;
779         length = 0;
780
781         if (dev->gadget->sg_supported) {
782                 sg_init_table(req->sg, RNDIS_MSG_MAX_NUM);
783                 for (i = 0; i < q->skb_idx; i++) {
784                         skb = q->skbs[i];
785                         len = skb->len;
786                         /* use zlp framing on tx for strict CDC-Ether conformance,
787                         * though any robust network rx path ignores extra padding.
788                         * and some hardware doesn't like to write zlps.
789                         */
790                         if (dev->port_usb->is_fixed &&
791                                 skb->len == dev->port_usb->fixed_in_len &&
792                                 !(len & (in->maxpacket - 1)))
793                                 len++;
794                         sg_set_buf(req->sg + i, skb->data, len);
795                         length += len;
796                 }
797                 sg_mark_end(req->sg + i - 1);
798                 req->num_sgs = q->skb_idx;
799         } else {
800                 for (i = 0; i < q->skb_idx; i++) {
801                         skb = q->skbs[i];
802                         memcpy(req->buf + length, skb->data, skb->len);
803                         length += skb->len;
804                         /* use zlp framing on tx for strict CDC-Ether conformance,
805                         * though any robust network rx path ignores extra padding.
806                         * and some hardware doesn't like to write zlps.
807                         */
808                         if (dev->port_usb->is_fixed &&
809                                 skb->len == dev->port_usb->fixed_in_len &&
810                                 !(skb->len & (in->maxpacket - 1)))
811                                 length++;
812                 }
813         }
814         q->len = length;
815         q->bit_map = 0;
816
817         /* NCM requires no zlp if transfer is dwNtbInMaxSize */
818         if (dev->port_usb->is_fixed &&
819                 length == dev->port_usb->fixed_in_len &&
820                 (length % in->maxpacket) == 0)
821                 req->zero = 0;
822         else
823                 req->zero = 1;
824         /* Here zero means short packet */
825         if (dev->gadget->sg_supported
826                 && !dev->port_usb->multi_pkt_xfer)
827                 req->zero = 1;
828
829         /* use zlp framing on tx for strict CDC-Ether conformance,
830         * though any robust network rx path ignores extra padding.
831         * and some hardware doesn't like to write zlps.
832         */
833         if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
834                 length++;
835
836         req->length = length;
837         /* throttle high/super speed IRQ rate back slightly*/
838         if (gadget_is_dualspeed(dev->gadget))
839                 req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
840                                 dev->gadget->speed == USB_SPEED_SUPER)
841                                 ? ((atomic_read(&dev->tx_qlen) % qmult) != 0)
842                                 : 0;
843
844         retval = usb_ep_queue(in, req, GFP_ATOMIC);
845         switch (retval) {
846         default:
847                 printk("tx queue err %d\n", retval);
848                 break;
849         case 0:
850
851                 net->trans_start = jiffies;
852                 atomic_inc(&dev->tx_qlen);
853         }
854         if (retval) {
855                 printk("rndis gather, %i IP diagrams are lost! eth_start_xmit\n",
856                         q->pkt_cnt);
857                 net->stats.tx_dropped += q->skb_idx;
858                 pkt_msg_clean(q);
859                 spin_lock_irqsave(&dev->req_lock, flags);
860                 if (list_empty(&dev->tx_reqs)) {
861                         printk("tx_complete, netif_start_queue1 [%d,%d],[%d,%d]\n",
862                                 msg->w_idx, msg->r_idx, msg->last_sent, msg->last_complete);
863                         netif_start_queue(net);
864                         msg->flow_stop= 0;
865                 }
866                 list_add(&req->list, &dev->tx_reqs);
867                 spin_unlock_irqrestore(&dev->req_lock, flags);
868         }
869         return retval;
870 }
871
872 static void rndis_msg_init(struct eth_dev *dev)
873 {
874         struct rndis_msg *msg = get_rndis_msg();
875         u32 i;
876
877         msg->dev = dev;
878         spin_lock_init(&msg->buffer_lock);
879         msg->w_idx = 0;
880         msg->r_idx = 0;
881         for (i = 0; i < RNDIS_MSG_MAX_QUEUE; i++) {
882                 msg->q[i].q_idx = i;
883                 memset(msg->q[i].skbs, 0, sizeof(msg->q[i].skbs));
884                 pkt_msg_clean(msg->q + i);
885         }
886 }
887
888 static void tx_complete(struct usb_ep *ep, struct usb_request *req)
889 {
890
891         struct pkt_msg  *q = req->context;
892         struct eth_dev  *dev = ep->driver_data;
893         struct rndis_msg    *msg = get_rndis_msg();
894         struct pkt_msg *w_q;
895
896         switch (req->status) {
897         default:
898                 dev->net->stats.tx_errors++;
899                 VDBG(dev, "tx err %d\n", req->status);
900         /* FALLTHROUGH */
901         case -ECONNRESET:               /* unlink */
902         case -ESHUTDOWN:                /* disconnect etc */
903                 break;
904         case 0:
905                 dev->net->stats.tx_bytes += q->len;
906         }
907         dev->net->stats.tx_packets += q->skb_idx;
908         if(req != q->req)
909                 printk("tx_complete error: req != q->req [%p,%p]\n", req, q->req);
910
911         msg->last_complete = q->q_idx;
912         pkt_msg_clean(q);
913         if (q->q_idx != msg->w_idx)
914                 msg->r_idx = idx_add_one(q->q_idx);
915
916         atomic_dec(&dev->tx_qlen);
917
918         if (netif_carrier_ok(dev->net))
919                 netif_wake_queue(dev->net);
920         spin_lock(&dev->req_lock);
921         list_add(&req->list, &dev->tx_reqs);
922         spin_unlock(&dev->req_lock);
923         spin_lock(&msg->buffer_lock);
924         w_q = msg->q + msg->w_idx;
925         if (msg->w_idx == msg->r_idx &&
926                 w_q->state == RNDIS_QUEUE_GATHER &&
927                 w_q->skb_idx == w_q->pkt_cnt) {
928                 msg->last_sent = w_q->q_idx;
929                 w_q->state = RNDIS_QUEUE_SEND;
930                 spin_unlock(&msg->buffer_lock);
931                 pkt_msg_send(msg, w_q, dev->net);
932                 return;
933         }
934         spin_unlock(&msg->buffer_lock);
935 }
936
937
938 static netdev_tx_t eth_alloc_req(struct net_device *net,
939                 struct usb_request  **req,
940                 struct pkt_msg **w_q,
941                 struct sk_buff *skb)
942 {
943         unsigned long           flags;
944         struct eth_dev          *dev = netdev_priv(net);
945         struct rndis_msg    *msg = get_rndis_msg();
946         struct pkt_msg *q;
947
948         spin_lock_irqsave(&msg->buffer_lock, flags);
949         q = msg->q + msg->w_idx;
950         if (q->state == RNDIS_QUEUE_GATHER) {
951                 int next;
952                 q->skbs[q->pkt_cnt] = skb;
953                 q->pkt_cnt++;
954                 *w_q = q;
955                 *req = q->req;
956                 if (q->pkt_cnt >= RNDIS_MSG_MAX_NUM) {
957                         q->state = RNDIS_QUEUE_FULL;
958                         next = idx_add_one(msg->w_idx);
959                         if (msg->r_idx == next) {
960                                 netif_stop_queue(dev->net);
961                                 msg->flow_stop = 1;
962                         } else
963                                 msg->w_idx = next;
964                 }
965                 spin_unlock_irqrestore(&msg->buffer_lock, flags);
966                 return NETDEV_TX_OK;
967         }
968         spin_unlock_irqrestore(&msg->buffer_lock, flags);
969         *w_q = NULL;
970         spin_lock_irqsave(&dev->req_lock, flags);
971         /*
972          * this freelist can be empty if an interrupt triggered disconnect()
973          * and reconfigured the gadget (shutting down this queue) after the
974          * network stack decided to xmit but before we got the spinlock.
975          */
976         if (list_empty(&dev->tx_reqs)) {
977                 spin_unlock_irqrestore(&dev->req_lock, flags);
978                 *req = NULL;
979                 return NETDEV_TX_BUSY;
980         }
981
982         *req = container_of(dev->tx_reqs.next, struct usb_request, list);
983         __list_del_entry(&(*req)->list);
984
985         /* temporarily stop TX queue when the freelist empties */
986         if (list_empty(&dev->tx_reqs)) {
987                 netif_stop_queue(net);
988                 msg->flow_stop= 1;
989         }
990         spin_unlock_irqrestore(&dev->req_lock, flags);
991         return NETDEV_TX_OK;
992 }
993
994 int save_to_queue(struct eth_dev  *dev,struct rndis_msg    *msg,
995                 struct sk_buff *skb, struct usb_request *req,
996                 struct pkt_msg      **q)
997 {
998         unsigned long flags;
999         u32 next = 0;
1000         struct pkt_msg  *w_q;
1001
1002         spin_lock_irqsave(&msg->buffer_lock, flags);
1003         w_q = msg->q + msg->w_idx;
1004         next = idx_add_one(msg->w_idx);
1005         if (msg->r_idx == next) {
1006                 printk("save_to_queue, netif_stop_queue [%d,%d]\n",
1007                         msg->w_idx, msg->r_idx);
1008                 spin_unlock_irqrestore(&msg->buffer_lock, flags);
1009                 return 1;
1010         }
1011
1012         if (w_q->state >= RNDIS_QUEUE_FULL) {
1013                 msg->w_idx = next;
1014                 w_q = msg->q + msg->w_idx;
1015         }
1016
1017         if (w_q->state == RNDIS_QUEUE_IDLE)
1018                 w_q->req = req;
1019         else if (w_q->req != req) {
1020                 unsigned long flag;
1021                 spin_lock_irqsave(&dev->req_lock, flag);
1022                 if (list_empty(&dev->tx_reqs))
1023                         netif_start_queue(dev->net);
1024                 list_add(&req->list, &dev->tx_reqs);
1025                 spin_unlock_irqrestore(&dev->req_lock, flag);
1026         }
1027
1028         if (w_q->pkt_cnt < RNDIS_MSG_MAX_NUM) {
1029                 w_q->skbs[w_q->skb_idx] = skb;
1030                 w_q->bit_map |= (1<<w_q->skb_idx);
1031                 w_q->skb_idx++;
1032                 w_q->pkt_cnt++;
1033                 *q = w_q;
1034         }
1035         if (w_q->pkt_cnt >= RNDIS_MSG_MAX_NUM) {
1036                 w_q->state = RNDIS_QUEUE_FULL;
1037                 next = idx_add_one(msg->w_idx);
1038                 if (next == msg->r_idx) {
1039                         netif_stop_queue(dev->net);
1040                         msg->flow_stop = 1;
1041                 }else
1042                         msg->w_idx = next;
1043
1044         } else
1045                 w_q->state = RNDIS_QUEUE_GATHER;
1046         spin_unlock_irqrestore(&msg->buffer_lock, flags);
1047         return 0;
1048 }
1049 static void update_sbks_in_queue(struct pkt_msg *q,
1050                                 struct  sk_buff *skb_new,
1051                                 struct sk_buff *skb_old)
1052 {
1053         int i;
1054         unsigned long flags;
1055         struct rndis_msg    *msg = get_rndis_msg();
1056
1057         spin_lock_irqsave(&msg->buffer_lock, flags);
1058         for (i = 0; i < q->pkt_cnt; i++) {
1059                 if (q->skbs[i] == skb_old) {
1060                         q->skbs[i] = skb_new;
1061                         q->bit_map |= (1<<i);
1062                         q->skb_idx++;
1063                         break;
1064                 }
1065         }
1066         spin_unlock_irqrestore(&msg->buffer_lock, flags);
1067 }
1068 static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
1069                                         struct net_device *net)
1070 {
1071         struct eth_dev          *dev = netdev_priv(net);
1072         int                     length = skb->len;
1073         int                     retval = 0;
1074         struct usb_request      *req = NULL;
1075         unsigned long           flags;
1076         struct usb_ep           *in;
1077         u16                     cdc_filter;
1078         struct rndis_msg    *msg = get_rndis_msg();
1079         struct pkt_msg      *w_q = NULL;
1080         struct sk_buff *skb_old;
1081
1082         spin_lock_irqsave(&dev->lock, flags);
1083         if (dev->port_usb) {
1084                 in = dev->port_usb->in_ep;
1085                 cdc_filter = dev->port_usb->cdc_filter;
1086         } else {
1087                 in = NULL;
1088                 cdc_filter = 0;
1089         }
1090         spin_unlock_irqrestore(&dev->lock, flags);
1091
1092         if (!in) {
1093                 dev_kfree_skb_any(skb);
1094                 return NETDEV_TX_OK;
1095         }
1096
1097         /* apply outgoing CDC or RNDIS filters */
1098         if (!is_promisc(cdc_filter)) {
1099                 u8              *dest = skb->data;
1100
1101                 if (is_multicast_ether_addr(dest)) {
1102                         u16     type;
1103
1104                         /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
1105                          * SET_ETHERNET_MULTICAST_FILTERS requests
1106                          */
1107                         if (is_broadcast_ether_addr(dest))
1108                                 type = USB_CDC_PACKET_TYPE_BROADCAST;
1109                         else
1110                                 type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
1111                         if (!(cdc_filter & type)) {
1112                                 dev_kfree_skb_any(skb);
1113                                 return NETDEV_TX_OK;
1114                         }
1115                 }
1116         /* ignores USB_CDC_PACKET_TYPE_DIRECTED */
1117         }
1118         skb_old = skb;
1119         eth_alloc_req(net, &req, &w_q, skb);
1120         if (req == NULL)
1121                 return NETDEV_TX_BUSY;
1122         /* no buffer copies needed, unless the network stack did it
1123         * or the hardware can't use skb buffers.
1124         * or there's not enough space for extra headers we need
1125         */
1126         if (dev->wrap) {
1127                 unsigned long   flags;
1128
1129                 spin_lock_irqsave(&dev->lock, flags);
1130
1131                 if (dev->port_usb)
1132                         skb = dev->wrap(dev->port_usb, skb);
1133                         spin_unlock_irqrestore(&dev->lock, flags);
1134                 if (!skb)
1135                         goto drop;
1136
1137                 length = skb->len;
1138         }
1139         /*
1140         * Align data to 32bit if the dma controller requires it
1141         */
1142         if (gadget_dma32(dev->gadget)) {
1143                 unsigned long align = (unsigned long)skb->data & 3;
1144                 if (WARN_ON(skb_headroom(skb) < align)) {
1145                         dev_kfree_skb_any(skb);
1146                         goto drop;
1147                 } else if (align) {
1148                         u8 *data = skb->data;
1149                         size_t len = skb_headlen(skb);
1150                         skb->data -= align;
1151                         memmove(skb->data, data, len);
1152                         skb_set_tail_pointer(skb, len);
1153                 }
1154         }
1155
1156         retval = 0;
1157         if (w_q == NULL)
1158                 retval = save_to_queue(dev, msg, skb, req, &w_q);
1159         else if (skb != skb_old)
1160                 update_sbks_in_queue(w_q, skb, skb_old);
1161
1162         if (retval == 0) {
1163                 spin_lock_irqsave(&msg->buffer_lock, flags);
1164                 if (w_q &&
1165                         pkt_msg_full(dev, w_q->bit_map) &&
1166                         (w_q->state != RNDIS_QUEUE_SEND)) {
1167                         msg->last_sent = w_q->q_idx;
1168                         w_q->state = RNDIS_QUEUE_SEND;
1169                         spin_unlock_irqrestore(&msg->buffer_lock, flags);
1170                         pkt_msg_send(msg, w_q, net);
1171                         return NETDEV_TX_OK;
1172                 }
1173
1174                 w_q = msg->q + msg->w_idx;
1175                 if (pkt_msg_full(dev, w_q->bit_map)
1176                         && w_q->state != RNDIS_QUEUE_SEND) {
1177                         msg->last_sent = w_q->q_idx;
1178                         w_q->state = RNDIS_QUEUE_SEND;
1179                         spin_unlock_irqrestore(&msg->buffer_lock, flags);
1180                         pkt_msg_send(msg, w_q, net);
1181                         return NETDEV_TX_OK;
1182                 }
1183                 if (msg->w_idx == msg->r_idx &&
1184                         w_q->state == RNDIS_QUEUE_GATHER &&
1185                         w_q->skb_idx == w_q->pkt_cnt) {
1186                         msg->last_sent = w_q->q_idx;
1187                         w_q->state = RNDIS_QUEUE_SEND;
1188                         spin_unlock_irqrestore(&msg->buffer_lock, flags);
1189                         pkt_msg_send(msg, w_q, net);
1190                         return NETDEV_TX_OK;
1191                 }
1192                 spin_unlock_irqrestore(&msg->buffer_lock, flags);
1193                 return NETDEV_TX_OK;
1194         }
1195         if (retval) {
1196                 dev_kfree_skb_any(skb);
1197 drop:
1198                 printk("drop req\n");
1199                 dev->net->stats.tx_dropped++;
1200                 spin_lock_irqsave(&dev->req_lock, flags);
1201                 if (list_empty(&dev->tx_reqs))
1202                         netif_start_queue(net);
1203                 list_add(&req->list, &dev->tx_reqs);
1204                 spin_unlock_irqrestore(&dev->req_lock, flags);
1205         }
1206         return NETDEV_TX_OK;
1207 }
1208 #endif
1209 /*-------------------------------------------------------------------------*/
1210
1211 static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
1212 {
1213         DBG(dev, "%s\n", __func__);
1214
1215         /* fill the rx queue */
1216         rx_fill(dev, gfp_flags);
1217
1218         /* and open the tx floodgates */
1219         atomic_set(&dev->tx_qlen, 0);
1220         netif_wake_queue(dev->net);
1221 #ifdef CONFIG_USB_SPRD_DWC
1222         rndis_msg_init(dev);
1223 #endif
1224 }
1225
1226 static int eth_open(struct net_device *net)
1227 {
1228         struct eth_dev  *dev = netdev_priv(net);
1229         struct gether   *link;
1230
1231         DBG(dev, "%s\n", __func__);
1232         if (netif_carrier_ok(dev->net))
1233                 eth_start(dev, GFP_KERNEL);
1234
1235         spin_lock_irq(&dev->lock);
1236         link = dev->port_usb;
1237         if (link && link->open)
1238                 link->open(link);
1239         spin_unlock_irq(&dev->lock);
1240
1241         return 0;
1242 }
1243
1244 static int eth_stop(struct net_device *net)
1245 {
1246         struct eth_dev  *dev = netdev_priv(net);
1247         unsigned long   flags;
1248
1249         VDBG(dev, "%s\n", __func__);
1250         netif_stop_queue(net);
1251
1252         DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
1253                 dev->net->stats.rx_packets, dev->net->stats.tx_packets,
1254                 dev->net->stats.rx_errors, dev->net->stats.tx_errors
1255                 );
1256
1257         /* ensure there are no more active requests */
1258         spin_lock_irqsave(&dev->lock, flags);
1259         if (dev->port_usb) {
1260                 struct gether   *link = dev->port_usb;
1261                 const struct usb_endpoint_descriptor *in;
1262                 const struct usb_endpoint_descriptor *out;
1263
1264                 if (link->close)
1265                         link->close(link);
1266
1267                 /* NOTE:  we have no abort-queue primitive we could use
1268                  * to cancel all pending I/O.  Instead, we disable then
1269                  * reenable the endpoints ... this idiom may leave toggle
1270                  * wrong, but that's a self-correcting error.
1271                  *
1272                  * REVISIT:  we *COULD* just let the transfers complete at
1273                  * their own pace; the network stack can handle old packets.
1274                  * For the moment we leave this here, since it works.
1275                  */
1276                 in = link->in_ep->desc;
1277                 out = link->out_ep->desc;
1278                 usb_ep_disable(link->in_ep);
1279                 usb_ep_disable(link->out_ep);
1280                 if (netif_carrier_ok(net)) {
1281                         DBG(dev, "host still using in/out endpoints\n");
1282                         link->in_ep->desc = in;
1283                         link->out_ep->desc = out;
1284                         usb_ep_enable(link->in_ep);
1285                         usb_ep_enable(link->out_ep);
1286                 }
1287         }
1288         spin_unlock_irqrestore(&dev->lock, flags);
1289
1290         return 0;
1291 }
1292
1293 /*-------------------------------------------------------------------------*/
1294
1295 /* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
1296 static char *dev_addr;
1297 module_param(dev_addr, charp, S_IRUGO);
1298 MODULE_PARM_DESC(dev_addr, "Device Ethernet Address");
1299
1300 /* this address is invisible to ifconfig */
1301 static char *host_addr;
1302 module_param(host_addr, charp, S_IRUGO);
1303 MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
1304
1305 static int get_ether_addr(const char *str, u8 *dev_addr)
1306 {
1307         if (str) {
1308                 unsigned        i;
1309
1310                 for (i = 0; i < 6; i++) {
1311                         unsigned char num;
1312
1313                         if ((*str == '.') || (*str == ':'))
1314                                 str++;
1315                         num = hex_to_bin(*str++) << 4;
1316                         num |= hex_to_bin(*str++);
1317                         dev_addr [i] = num;
1318                 }
1319                 if (is_valid_ether_addr(dev_addr))
1320                         return 0;
1321         }
1322         eth_random_addr(dev_addr);
1323         return 1;
1324 }
1325
1326 static const struct net_device_ops eth_netdev_ops = {
1327         .ndo_open               = eth_open,
1328         .ndo_stop               = eth_stop,
1329         .ndo_start_xmit         = eth_start_xmit,
1330         .ndo_change_mtu         = ueth_change_mtu,
1331         .ndo_set_mac_address    = eth_mac_addr,
1332         .ndo_validate_addr      = eth_validate_addr,
1333 };
1334
1335 static struct device_type gadget_type = {
1336         .name   = "gadget",
1337 };
1338
1339 /**
1340  * gether_setup_name - initialize one ethernet-over-usb link
1341  * @g: gadget to associated with these links
1342  * @ethaddr: NULL, or a buffer in which the ethernet address of the
1343  *      host side of the link is recorded
1344  * @netname: name for network device (for example, "usb")
1345  * Context: may sleep
1346  *
1347  * This sets up the single network link that may be exported by a
1348  * gadget driver using this framework.  The link layer addresses are
1349  * set up using module parameters.
1350  *
1351  * Returns negative errno, or zero on success
1352  */
1353 struct eth_dev *gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
1354                 const char *netname)
1355 {
1356         struct eth_dev          *dev;
1357         struct net_device       *net;
1358         int                     status;
1359
1360         net = alloc_etherdev(sizeof *dev);
1361         if (!net)
1362                 return ERR_PTR(-ENOMEM);
1363
1364         dev = netdev_priv(net);
1365         spin_lock_init(&dev->lock);
1366         spin_lock_init(&dev->req_lock);
1367         INIT_WORK(&dev->work, eth_work);
1368         INIT_LIST_HEAD(&dev->tx_reqs);
1369         INIT_LIST_HEAD(&dev->rx_reqs);
1370
1371         skb_queue_head_init(&dev->rx_frames);
1372
1373         /* network device setup */
1374         dev->net = net;
1375         snprintf(net->name, sizeof(net->name), "%s%%d", netname);
1376
1377         if (get_ether_addr(dev_addr, net->dev_addr))
1378                 dev_warn(&g->dev,
1379                         "using random %s ethernet address\n", "self");
1380         if (get_ether_addr(host_addr, dev->host_mac))
1381                 dev_warn(&g->dev,
1382                         "using random %s ethernet address\n", "host");
1383
1384         if (ethaddr)
1385                 memcpy(ethaddr, dev->host_mac, ETH_ALEN);
1386
1387         net->netdev_ops = &eth_netdev_ops;
1388
1389         SET_ETHTOOL_OPS(net, &ops);
1390
1391         dev->gadget = g;
1392         SET_NETDEV_DEV(net, &g->dev);
1393         SET_NETDEV_DEVTYPE(net, &gadget_type);
1394
1395         status = register_netdev(net);
1396         if (status < 0) {
1397                 dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
1398                 free_netdev(net);
1399                 dev = ERR_PTR(status);
1400         } else {
1401                 INFO(dev, "MAC %pM\n", net->dev_addr);
1402                 INFO(dev, "HOST MAC %pM\n", dev->host_mac);
1403
1404                 /* two kinds of host-initiated state changes:
1405                  *  - iff DATA transfer is active, carrier is "on"
1406                  *  - tx queueing enabled if open *and* carrier is "on"
1407                  */
1408                 netif_carrier_off(net);
1409         }
1410
1411         return dev;
1412 }
1413
1414 /**
1415  * gether_cleanup - remove Ethernet-over-USB device
1416  * Context: may sleep
1417  *
1418  * This is called to free all resources allocated by @gether_setup().
1419  */
1420 void gether_cleanup(struct eth_dev *dev)
1421 {
1422         if (!dev)
1423                 return;
1424
1425         unregister_netdev(dev->net);
1426         flush_work(&dev->work);
1427         free_netdev(dev->net);
1428 }
1429
1430 /**
1431  * gether_connect - notify network layer that USB link is active
1432  * @link: the USB link, set up with endpoints, descriptors matching
1433  *      current device speed, and any framing wrapper(s) set up.
1434  * Context: irqs blocked
1435  *
1436  * This is called to activate endpoints and let the network layer know
1437  * the connection is active ("carrier detect").  It may cause the I/O
1438  * queues to open and start letting network packets flow, but will in
1439  * any case activate the endpoints so that they respond properly to the
1440  * USB host.
1441  *
1442  * Verify net_device pointer returned using IS_ERR().  If it doesn't
1443  * indicate some error code (negative errno), ep->driver_data values
1444  * have been overwritten.
1445  */
1446 struct net_device *gether_connect(struct gether *link)
1447 {
1448         struct eth_dev          *dev = link->ioport;
1449         int                     result = 0;
1450
1451         if (!dev)
1452                 return ERR_PTR(-EINVAL);
1453
1454         link->in_ep->driver_data = dev;
1455         result = usb_ep_enable(link->in_ep);
1456         if (result != 0) {
1457                 DBG(dev, "enable %s --> %d\n",
1458                         link->in_ep->name, result);
1459                 goto fail0;
1460         }
1461
1462         link->out_ep->driver_data = dev;
1463         result = usb_ep_enable(link->out_ep);
1464         if (result != 0) {
1465                 DBG(dev, "enable %s --> %d\n",
1466                         link->out_ep->name, result);
1467                 goto fail1;
1468         }
1469
1470         if (result == 0)
1471                 result = alloc_requests(dev, link, qlen(dev->gadget));
1472
1473         if (result == 0) {
1474                 dev->zlp = link->is_zlp_ok;
1475                 DBG(dev, "qlen %d\n", qlen(dev->gadget));
1476
1477                 dev->header_len = link->header_len;
1478                 dev->unwrap = link->unwrap;
1479                 dev->wrap = link->wrap;
1480
1481                 spin_lock(&dev->lock);
1482                 dev->port_usb = link;
1483                 if (netif_running(dev->net)) {
1484                         if (link->open)
1485                                 link->open(link);
1486                 } else {
1487                         if (link->close)
1488                                 link->close(link);
1489                 }
1490                 spin_unlock(&dev->lock);
1491
1492                 netif_carrier_on(dev->net);
1493                 if (netif_running(dev->net))
1494                         eth_start(dev, GFP_ATOMIC);
1495
1496         /* on error, disable any endpoints  */
1497         } else {
1498                 (void) usb_ep_disable(link->out_ep);
1499 fail1:
1500                 (void) usb_ep_disable(link->in_ep);
1501         }
1502 fail0:
1503         /* caller is responsible for cleanup on error */
1504         if (result < 0)
1505                 return ERR_PTR(result);
1506         return dev->net;
1507 }
1508
1509 /**
1510  * gether_disconnect - notify network layer that USB link is inactive
1511  * @link: the USB link, on which gether_connect() was called
1512  * Context: irqs blocked
1513  *
1514  * This is called to deactivate endpoints and let the network layer know
1515  * the connection went inactive ("no carrier").
1516  *
1517  * On return, the state is as if gether_connect() had never been called.
1518  * The endpoints are inactive, and accordingly without active USB I/O.
1519  * Pointers to endpoint descriptors and endpoint private data are nulled.
1520  */
1521 void gether_disconnect(struct gether *link)
1522 {
1523         struct eth_dev          *dev = link->ioport;
1524         struct usb_request      *req;
1525
1526         WARN_ON(!dev);
1527         if (!dev)
1528                 return;
1529
1530         DBG(dev, "%s\n", __func__);
1531
1532         netif_stop_queue(dev->net);
1533         netif_carrier_off(dev->net);
1534
1535         /* disable endpoints, forcing (synchronous) completion
1536          * of all pending i/o.  then free the request objects
1537          * and forget about the endpoints.
1538          */
1539         usb_ep_disable(link->in_ep);
1540         spin_lock(&dev->req_lock);
1541         while (!list_empty(&dev->tx_reqs)) {
1542                 req = container_of(dev->tx_reqs.next,
1543                                         struct usb_request, list);
1544                 list_del(&req->list);
1545
1546                 spin_unlock(&dev->req_lock);
1547                 if (dev->gadget->sg_supported)
1548                         kfree(req->sg);
1549                 else
1550                         kfree(req->buf);
1551                 usb_ep_free_request(link->in_ep, req);
1552                 spin_lock(&dev->req_lock);
1553         }
1554         spin_unlock(&dev->req_lock);
1555         link->in_ep->driver_data = NULL;
1556         link->in_ep->desc = NULL;
1557
1558         usb_ep_disable(link->out_ep);
1559         spin_lock(&dev->req_lock);
1560         while (!list_empty(&dev->rx_reqs)) {
1561                 req = container_of(dev->rx_reqs.next,
1562                                         struct usb_request, list);
1563                 list_del(&req->list);
1564
1565                 spin_unlock(&dev->req_lock);
1566                 usb_ep_free_request(link->out_ep, req);
1567                 spin_lock(&dev->req_lock);
1568         }
1569         spin_unlock(&dev->req_lock);
1570         link->out_ep->driver_data = NULL;
1571         link->out_ep->desc = NULL;
1572
1573         /* finish forgetting about this USB link episode */
1574         dev->header_len = 0;
1575         dev->unwrap = NULL;
1576         dev->wrap = NULL;
1577
1578         spin_lock(&dev->lock);
1579         dev->port_usb = NULL;
1580         spin_unlock(&dev->lock);
1581 }