1 // SPDX-License-Identifier: GPL-2.0+
3 * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
5 * Copyright (C) 2003-2005,2008 David Brownell
6 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
7 * Copyright (C) 2008 Nokia Corporation
10 /* #define VERBOSE_DEBUG */
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/gfp.h>
15 #include <linux/device.h>
16 #include <linux/ctype.h>
17 #include <linux/etherdevice.h>
18 #include <linux/ethtool.h>
19 #include <linux/if_vlan.h>
20 #include <linux/etherdevice.h>
26 * This component encapsulates the Ethernet link glue needed to provide
27 * one (!) network link through the USB gadget stack, normally "usb0".
29 * The control and data models are handled by the function driver which
30 * connects to this code; such as CDC Ethernet (ECM or EEM),
31 * "CDC Subset", or RNDIS. That includes all descriptor and endpoint
34 * Link level addressing is handled by this component using module
35 * parameters; if no such parameters are provided, random link level
36 * addresses are used. Each end of the link uses one address. The
37 * host end address is exported in various ways, and is often recorded
38 * in configuration databases.
40 * The driver which assembles each configuration using such a link is
41 * responsible for ensuring that each configuration includes at most one
42 * instance of is network link. (The network layer provides ways for
43 * this single "physical" link to be used by multiple virtual links.)
46 #define UETH__VERSION "29-May-2008"
48 /* Experiments show that both Linux and Windows hosts allow up to 16k
49 * frame sizes. Set the max MTU size to 15k+52 to prevent allocating 32k
50 * blocks and still have efficient handling. */
51 #define GETHER_MAX_MTU_SIZE 15412
52 #define GETHER_MAX_ETH_FRAME_LEN (GETHER_MAX_MTU_SIZE + ETH_HLEN)
55 /* lock is held while accessing port_usb
58 struct gether *port_usb;
60 struct net_device *net;
61 struct usb_gadget *gadget;
63 spinlock_t req_lock; /* guard {rx,tx}_reqs */
64 struct list_head tx_reqs, rx_reqs;
67 struct sk_buff_head rx_frames;
72 struct sk_buff *(*wrap)(struct gether *, struct sk_buff *skb);
73 int (*unwrap)(struct gether *,
75 struct sk_buff_head *list);
77 struct work_struct work;
80 #define WORK_RX_MEMORY 0
85 u8 host_mac[ETH_ALEN];
89 /*-------------------------------------------------------------------------*/
91 #define RX_EXTRA 20 /* bytes guarding against rx overflows */
93 #define DEFAULT_QLEN 2 /* double buffering by default */
95 /* for dual-speed hardware, use deeper queues at high/super speed */
96 static inline int qlen(struct usb_gadget *gadget, unsigned qmult)
98 if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
99 gadget->speed >= USB_SPEED_SUPER))
100 return qmult * DEFAULT_QLEN;
105 /*-------------------------------------------------------------------------*/
107 /* REVISIT there must be a better way than having two sets
116 #define xprintk(d, level, fmt, args...) \
117 printk(level "%s: " fmt , (d)->net->name , ## args)
121 #define DBG(dev, fmt, args...) \
122 xprintk(dev , KERN_DEBUG , fmt , ## args)
124 #define DBG(dev, fmt, args...) \
131 #define VDBG(dev, fmt, args...) \
135 #define ERROR(dev, fmt, args...) \
136 xprintk(dev , KERN_ERR , fmt , ## args)
137 #define INFO(dev, fmt, args...) \
138 xprintk(dev , KERN_INFO , fmt , ## args)
140 /*-------------------------------------------------------------------------*/
142 /* NETWORK DRIVER HOOKUP (to the layer above this driver) */
144 static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
146 struct eth_dev *dev = netdev_priv(net);
148 strlcpy(p->driver, "g_ether", sizeof(p->driver));
149 strlcpy(p->version, UETH__VERSION, sizeof(p->version));
150 strlcpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version));
151 strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info));
154 /* REVISIT can also support:
155 * - WOL (by tracking suspends and issuing remote wakeup)
156 * - msglevel (implies updated messaging)
157 * - ... probably more ethtool ops
160 static const struct ethtool_ops ops = {
161 .get_drvinfo = eth_get_drvinfo,
162 .get_link = ethtool_op_get_link,
165 static void defer_kevent(struct eth_dev *dev, int flag)
167 if (test_and_set_bit(flag, &dev->todo))
169 if (!schedule_work(&dev->work))
170 ERROR(dev, "kevent %d may have been dropped\n", flag);
172 DBG(dev, "kevent %d scheduled\n", flag);
175 static void rx_complete(struct usb_ep *ep, struct usb_request *req);
178 rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
180 struct usb_gadget *g = dev->gadget;
182 int retval = -ENOMEM;
187 spin_lock_irqsave(&dev->lock, flags);
189 out = dev->port_usb->out_ep;
195 spin_unlock_irqrestore(&dev->lock, flags);
199 /* Padding up to RX_EXTRA handles minor disagreements with host.
200 * Normally we use the USB "terminate on short read" convention;
201 * so allow up to (N*maxpacket), since that memory is normally
202 * already allocated. Some hardware doesn't deal well with short
203 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
204 * byte off the end (to force hardware errors on overflow).
206 * RNDIS uses internal framing, and explicitly allows senders to
207 * pad to end-of-packet. That's potentially nice for speed, but
208 * means receivers can't recover lost synch on their own (because
209 * new packets don't only start after a short RX).
211 size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
212 size += dev->port_usb->header_len;
214 if (g->quirk_ep_out_aligned_size) {
215 size += out->maxpacket - 1;
216 size -= size % out->maxpacket;
219 if (dev->port_usb->is_fixed)
220 size = max_t(size_t, size, dev->port_usb->fixed_out_len);
221 spin_unlock_irqrestore(&dev->lock, flags);
223 skb = __netdev_alloc_skb(dev->net, size + NET_IP_ALIGN, gfp_flags);
225 DBG(dev, "no rx skb\n");
229 /* Some platforms perform better when IP packets are aligned,
230 * but on at least one, checksumming fails otherwise. Note:
231 * RNDIS headers involve variable numbers of LE32 values.
233 if (likely(!dev->no_skb_reserve))
234 skb_reserve(skb, NET_IP_ALIGN);
236 req->buf = skb->data;
238 req->complete = rx_complete;
241 retval = usb_ep_queue(out, req, gfp_flags);
242 if (retval == -ENOMEM)
244 defer_kevent(dev, WORK_RX_MEMORY);
246 DBG(dev, "rx submit --> %d\n", retval);
248 dev_kfree_skb_any(skb);
249 spin_lock_irqsave(&dev->req_lock, flags);
250 list_add(&req->list, &dev->rx_reqs);
251 spin_unlock_irqrestore(&dev->req_lock, flags);
256 static void rx_complete(struct usb_ep *ep, struct usb_request *req)
258 struct sk_buff *skb = req->context, *skb2;
259 struct eth_dev *dev = ep->driver_data;
260 int status = req->status;
264 /* normal completion */
266 skb_put(skb, req->actual);
271 spin_lock_irqsave(&dev->lock, flags);
273 status = dev->unwrap(dev->port_usb,
277 dev_kfree_skb_any(skb);
280 spin_unlock_irqrestore(&dev->lock, flags);
282 skb_queue_tail(&dev->rx_frames, skb);
286 skb2 = skb_dequeue(&dev->rx_frames);
289 || ETH_HLEN > skb2->len
290 || skb2->len > GETHER_MAX_ETH_FRAME_LEN) {
291 dev->net->stats.rx_errors++;
292 dev->net->stats.rx_length_errors++;
293 DBG(dev, "rx length %d\n", skb2->len);
294 dev_kfree_skb_any(skb2);
297 skb2->protocol = eth_type_trans(skb2, dev->net);
298 dev->net->stats.rx_packets++;
299 dev->net->stats.rx_bytes += skb2->len;
301 /* no buffer copies needed, unless hardware can't
304 status = netif_rx(skb2);
306 skb2 = skb_dequeue(&dev->rx_frames);
310 /* software-driven interface shutdown */
311 case -ECONNRESET: /* unlink */
312 case -ESHUTDOWN: /* disconnect etc */
313 VDBG(dev, "rx shutdown, code %d\n", status);
316 /* for hardware automagic (such as pxa) */
317 case -ECONNABORTED: /* endpoint reset */
318 DBG(dev, "rx %s reset\n", ep->name);
319 defer_kevent(dev, WORK_RX_MEMORY);
321 dev_kfree_skb_any(skb);
326 dev->net->stats.rx_over_errors++;
330 dev->net->stats.rx_errors++;
331 DBG(dev, "rx status %d\n", status);
336 dev_kfree_skb_any(skb);
337 if (!netif_running(dev->net)) {
339 spin_lock(&dev->req_lock);
340 list_add(&req->list, &dev->rx_reqs);
341 spin_unlock(&dev->req_lock);
345 rx_submit(dev, req, GFP_ATOMIC);
348 static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
351 struct usb_request *req;
356 /* queue/recycle up to N requests */
358 list_for_each_entry(req, list, list) {
363 req = usb_ep_alloc_request(ep, GFP_ATOMIC);
365 return list_empty(list) ? -ENOMEM : 0;
366 list_add(&req->list, list);
373 struct list_head *next;
375 next = req->list.next;
376 list_del(&req->list);
377 usb_ep_free_request(ep, req);
382 req = container_of(next, struct usb_request, list);
387 static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
391 spin_lock(&dev->req_lock);
392 status = prealloc(&dev->tx_reqs, link->in_ep, n);
395 status = prealloc(&dev->rx_reqs, link->out_ep, n);
400 DBG(dev, "can't alloc requests\n");
402 spin_unlock(&dev->req_lock);
406 static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
408 struct usb_request *req;
411 /* fill unused rxq slots with some skb */
412 spin_lock_irqsave(&dev->req_lock, flags);
413 while (!list_empty(&dev->rx_reqs)) {
414 req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
415 list_del_init(&req->list);
416 spin_unlock_irqrestore(&dev->req_lock, flags);
418 if (rx_submit(dev, req, gfp_flags) < 0) {
419 defer_kevent(dev, WORK_RX_MEMORY);
423 spin_lock_irqsave(&dev->req_lock, flags);
425 spin_unlock_irqrestore(&dev->req_lock, flags);
428 static void eth_work(struct work_struct *work)
430 struct eth_dev *dev = container_of(work, struct eth_dev, work);
432 if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
433 if (netif_running(dev->net))
434 rx_fill(dev, GFP_KERNEL);
438 DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
441 static void tx_complete(struct usb_ep *ep, struct usb_request *req)
443 struct sk_buff *skb = req->context;
444 struct eth_dev *dev = ep->driver_data;
446 switch (req->status) {
448 dev->net->stats.tx_errors++;
449 VDBG(dev, "tx err %d\n", req->status);
451 case -ECONNRESET: /* unlink */
452 case -ESHUTDOWN: /* disconnect etc */
453 dev_kfree_skb_any(skb);
456 dev->net->stats.tx_bytes += skb->len;
457 dev_consume_skb_any(skb);
459 dev->net->stats.tx_packets++;
461 spin_lock(&dev->req_lock);
462 list_add(&req->list, &dev->tx_reqs);
463 spin_unlock(&dev->req_lock);
465 atomic_dec(&dev->tx_qlen);
466 if (netif_carrier_ok(dev->net))
467 netif_wake_queue(dev->net);
470 static inline int is_promisc(u16 cdc_filter)
472 return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
475 static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
476 struct net_device *net)
478 struct eth_dev *dev = netdev_priv(net);
481 struct usb_request *req = NULL;
486 spin_lock_irqsave(&dev->lock, flags);
488 in = dev->port_usb->in_ep;
489 cdc_filter = dev->port_usb->cdc_filter;
494 spin_unlock_irqrestore(&dev->lock, flags);
498 dev_kfree_skb_any(skb);
502 /* apply outgoing CDC or RNDIS filters */
503 if (skb && !is_promisc(cdc_filter)) {
504 u8 *dest = skb->data;
506 if (is_multicast_ether_addr(dest)) {
509 /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
510 * SET_ETHERNET_MULTICAST_FILTERS requests
512 if (is_broadcast_ether_addr(dest))
513 type = USB_CDC_PACKET_TYPE_BROADCAST;
515 type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
516 if (!(cdc_filter & type)) {
517 dev_kfree_skb_any(skb);
521 /* ignores USB_CDC_PACKET_TYPE_DIRECTED */
524 spin_lock_irqsave(&dev->req_lock, flags);
526 * this freelist can be empty if an interrupt triggered disconnect()
527 * and reconfigured the gadget (shutting down this queue) after the
528 * network stack decided to xmit but before we got the spinlock.
530 if (list_empty(&dev->tx_reqs)) {
531 spin_unlock_irqrestore(&dev->req_lock, flags);
532 return NETDEV_TX_BUSY;
535 req = list_first_entry(&dev->tx_reqs, struct usb_request, list);
536 list_del(&req->list);
538 /* temporarily stop TX queue when the freelist empties */
539 if (list_empty(&dev->tx_reqs))
540 netif_stop_queue(net);
541 spin_unlock_irqrestore(&dev->req_lock, flags);
543 /* no buffer copies needed, unless the network stack did it
544 * or the hardware can't use skb buffers.
545 * or there's not enough space for extra headers we need
550 spin_lock_irqsave(&dev->lock, flags);
552 skb = dev->wrap(dev->port_usb, skb);
553 spin_unlock_irqrestore(&dev->lock, flags);
555 /* Multi frame CDC protocols may store the frame for
556 * later which is not a dropped frame.
559 dev->port_usb->supports_multi_frame)
566 req->buf = skb->data;
568 req->complete = tx_complete;
570 /* NCM requires no zlp if transfer is dwNtbInMaxSize */
572 dev->port_usb->is_fixed &&
573 length == dev->port_usb->fixed_in_len &&
574 (length % in->maxpacket) == 0)
579 /* use zlp framing on tx for strict CDC-Ether conformance,
580 * though any robust network rx path ignores extra padding.
581 * and some hardware doesn't like to write zlps.
583 if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
586 req->length = length;
588 retval = usb_ep_queue(in, req, GFP_ATOMIC);
591 DBG(dev, "tx queue err %d\n", retval);
594 netif_trans_update(net);
595 atomic_inc(&dev->tx_qlen);
599 dev_kfree_skb_any(skb);
601 dev->net->stats.tx_dropped++;
603 spin_lock_irqsave(&dev->req_lock, flags);
604 if (list_empty(&dev->tx_reqs))
605 netif_start_queue(net);
606 list_add(&req->list, &dev->tx_reqs);
607 spin_unlock_irqrestore(&dev->req_lock, flags);
612 /*-------------------------------------------------------------------------*/
614 static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
616 DBG(dev, "%s\n", __func__);
618 /* fill the rx queue */
619 rx_fill(dev, gfp_flags);
621 /* and open the tx floodgates */
622 atomic_set(&dev->tx_qlen, 0);
623 netif_wake_queue(dev->net);
626 static int eth_open(struct net_device *net)
628 struct eth_dev *dev = netdev_priv(net);
631 DBG(dev, "%s\n", __func__);
632 if (netif_carrier_ok(dev->net))
633 eth_start(dev, GFP_KERNEL);
635 spin_lock_irq(&dev->lock);
636 link = dev->port_usb;
637 if (link && link->open)
639 spin_unlock_irq(&dev->lock);
644 static int eth_stop(struct net_device *net)
646 struct eth_dev *dev = netdev_priv(net);
649 VDBG(dev, "%s\n", __func__);
650 netif_stop_queue(net);
652 DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
653 dev->net->stats.rx_packets, dev->net->stats.tx_packets,
654 dev->net->stats.rx_errors, dev->net->stats.tx_errors
657 /* ensure there are no more active requests */
658 spin_lock_irqsave(&dev->lock, flags);
660 struct gether *link = dev->port_usb;
661 const struct usb_endpoint_descriptor *in;
662 const struct usb_endpoint_descriptor *out;
667 /* NOTE: we have no abort-queue primitive we could use
668 * to cancel all pending I/O. Instead, we disable then
669 * reenable the endpoints ... this idiom may leave toggle
670 * wrong, but that's a self-correcting error.
672 * REVISIT: we *COULD* just let the transfers complete at
673 * their own pace; the network stack can handle old packets.
674 * For the moment we leave this here, since it works.
676 in = link->in_ep->desc;
677 out = link->out_ep->desc;
678 usb_ep_disable(link->in_ep);
679 usb_ep_disable(link->out_ep);
680 if (netif_carrier_ok(net)) {
681 DBG(dev, "host still using in/out endpoints\n");
682 link->in_ep->desc = in;
683 link->out_ep->desc = out;
684 usb_ep_enable(link->in_ep);
685 usb_ep_enable(link->out_ep);
688 spin_unlock_irqrestore(&dev->lock, flags);
693 /*-------------------------------------------------------------------------*/
695 static int get_ether_addr(const char *str, u8 *dev_addr)
700 for (i = 0; i < 6; i++) {
703 if ((*str == '.') || (*str == ':'))
705 num = hex_to_bin(*str++) << 4;
706 num |= hex_to_bin(*str++);
709 if (is_valid_ether_addr(dev_addr))
712 eth_random_addr(dev_addr);
716 static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len)
721 snprintf(str, len, "%pM", dev_addr);
725 static const struct net_device_ops eth_netdev_ops = {
726 .ndo_open = eth_open,
727 .ndo_stop = eth_stop,
728 .ndo_start_xmit = eth_start_xmit,
729 .ndo_set_mac_address = eth_mac_addr,
730 .ndo_validate_addr = eth_validate_addr,
733 static struct device_type gadget_type = {
738 * gether_setup_name - initialize one ethernet-over-usb link
739 * @g: gadget to associated with these links
740 * @ethaddr: NULL, or a buffer in which the ethernet address of the
741 * host side of the link is recorded
742 * @netname: name for network device (for example, "usb")
745 * This sets up the single network link that may be exported by a
746 * gadget driver using this framework. The link layer addresses are
747 * set up using module parameters.
749 * Returns an eth_dev pointer on success, or an ERR_PTR on failure.
751 struct eth_dev *gether_setup_name(struct usb_gadget *g,
752 const char *dev_addr, const char *host_addr,
753 u8 ethaddr[ETH_ALEN], unsigned qmult, const char *netname)
756 struct net_device *net;
760 net = alloc_etherdev(sizeof *dev);
762 return ERR_PTR(-ENOMEM);
764 dev = netdev_priv(net);
765 spin_lock_init(&dev->lock);
766 spin_lock_init(&dev->req_lock);
767 INIT_WORK(&dev->work, eth_work);
768 INIT_LIST_HEAD(&dev->tx_reqs);
769 INIT_LIST_HEAD(&dev->rx_reqs);
771 skb_queue_head_init(&dev->rx_frames);
773 /* network device setup */
776 snprintf(net->name, sizeof(net->name), "%s%%d", netname);
778 if (get_ether_addr(dev_addr, addr))
780 "using random %s ethernet address\n", "self");
781 eth_hw_addr_set(net, addr);
782 if (get_ether_addr(host_addr, dev->host_mac))
784 "using random %s ethernet address\n", "host");
787 memcpy(ethaddr, dev->host_mac, ETH_ALEN);
789 net->netdev_ops = ð_netdev_ops;
791 net->ethtool_ops = &ops;
793 /* MTU range: 14 - 15412 */
794 net->min_mtu = ETH_HLEN;
795 net->max_mtu = GETHER_MAX_MTU_SIZE;
798 SET_NETDEV_DEV(net, &g->dev);
799 SET_NETDEV_DEVTYPE(net, &gadget_type);
801 status = register_netdev(net);
803 dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
805 dev = ERR_PTR(status);
807 INFO(dev, "MAC %pM\n", net->dev_addr);
808 INFO(dev, "HOST MAC %pM\n", dev->host_mac);
811 * two kinds of host-initiated state changes:
812 * - iff DATA transfer is active, carrier is "on"
813 * - tx queueing enabled if open *and* carrier is "on"
815 netif_carrier_off(net);
820 EXPORT_SYMBOL_GPL(gether_setup_name);
822 struct net_device *gether_setup_name_default(const char *netname)
824 struct net_device *net;
827 net = alloc_etherdev(sizeof(*dev));
829 return ERR_PTR(-ENOMEM);
831 dev = netdev_priv(net);
832 spin_lock_init(&dev->lock);
833 spin_lock_init(&dev->req_lock);
834 INIT_WORK(&dev->work, eth_work);
835 INIT_LIST_HEAD(&dev->tx_reqs);
836 INIT_LIST_HEAD(&dev->rx_reqs);
838 skb_queue_head_init(&dev->rx_frames);
840 /* network device setup */
842 dev->qmult = QMULT_DEFAULT;
843 snprintf(net->name, sizeof(net->name), "%s%%d", netname);
845 eth_random_addr(dev->dev_mac);
846 pr_warn("using random %s ethernet address\n", "self");
847 eth_random_addr(dev->host_mac);
848 pr_warn("using random %s ethernet address\n", "host");
850 net->netdev_ops = ð_netdev_ops;
852 net->ethtool_ops = &ops;
853 SET_NETDEV_DEVTYPE(net, &gadget_type);
855 /* MTU range: 14 - 15412 */
856 net->min_mtu = ETH_HLEN;
857 net->max_mtu = GETHER_MAX_MTU_SIZE;
861 EXPORT_SYMBOL_GPL(gether_setup_name_default);
863 int gether_register_netdev(struct net_device *net)
866 struct usb_gadget *g;
869 if (!net->dev.parent)
871 dev = netdev_priv(net);
874 net->addr_assign_type = NET_ADDR_RANDOM;
875 eth_hw_addr_set(net, dev->dev_mac);
877 status = register_netdev(net);
879 dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
882 INFO(dev, "HOST MAC %pM\n", dev->host_mac);
883 INFO(dev, "MAC %pM\n", dev->dev_mac);
885 /* two kinds of host-initiated state changes:
886 * - iff DATA transfer is active, carrier is "on"
887 * - tx queueing enabled if open *and* carrier is "on"
889 netif_carrier_off(net);
894 EXPORT_SYMBOL_GPL(gether_register_netdev);
896 void gether_set_gadget(struct net_device *net, struct usb_gadget *g)
900 dev = netdev_priv(net);
902 SET_NETDEV_DEV(net, &g->dev);
904 EXPORT_SYMBOL_GPL(gether_set_gadget);
906 int gether_set_dev_addr(struct net_device *net, const char *dev_addr)
909 u8 new_addr[ETH_ALEN];
911 dev = netdev_priv(net);
912 if (get_ether_addr(dev_addr, new_addr))
914 memcpy(dev->dev_mac, new_addr, ETH_ALEN);
917 EXPORT_SYMBOL_GPL(gether_set_dev_addr);
919 int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len)
924 dev = netdev_priv(net);
925 ret = get_ether_addr_str(dev->dev_mac, dev_addr, len);
927 dev_addr[ret++] = '\n';
928 dev_addr[ret] = '\0';
933 EXPORT_SYMBOL_GPL(gether_get_dev_addr);
935 int gether_set_host_addr(struct net_device *net, const char *host_addr)
938 u8 new_addr[ETH_ALEN];
940 dev = netdev_priv(net);
941 if (get_ether_addr(host_addr, new_addr))
943 memcpy(dev->host_mac, new_addr, ETH_ALEN);
946 EXPORT_SYMBOL_GPL(gether_set_host_addr);
948 int gether_get_host_addr(struct net_device *net, char *host_addr, int len)
953 dev = netdev_priv(net);
954 ret = get_ether_addr_str(dev->host_mac, host_addr, len);
956 host_addr[ret++] = '\n';
957 host_addr[ret] = '\0';
962 EXPORT_SYMBOL_GPL(gether_get_host_addr);
964 int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len)
971 dev = netdev_priv(net);
972 snprintf(host_addr, len, "%pm", dev->host_mac);
974 return strlen(host_addr);
976 EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc);
978 void gether_get_host_addr_u8(struct net_device *net, u8 host_mac[ETH_ALEN])
982 dev = netdev_priv(net);
983 memcpy(host_mac, dev->host_mac, ETH_ALEN);
985 EXPORT_SYMBOL_GPL(gether_get_host_addr_u8);
987 void gether_set_qmult(struct net_device *net, unsigned qmult)
991 dev = netdev_priv(net);
994 EXPORT_SYMBOL_GPL(gether_set_qmult);
996 unsigned gether_get_qmult(struct net_device *net)
1000 dev = netdev_priv(net);
1003 EXPORT_SYMBOL_GPL(gether_get_qmult);
1005 int gether_get_ifname(struct net_device *net, char *name, int len)
1007 struct eth_dev *dev = netdev_priv(net);
1011 ret = scnprintf(name, len, "%s\n",
1012 dev->ifname_set ? net->name : netdev_name(net));
1016 EXPORT_SYMBOL_GPL(gether_get_ifname);
1018 int gether_set_ifname(struct net_device *net, const char *name, int len)
1020 struct eth_dev *dev = netdev_priv(net);
1024 if (name[len - 1] == '\n')
1027 if (len >= sizeof(tmp))
1030 strscpy(tmp, name, len + 1);
1031 if (!dev_valid_name(tmp))
1034 /* Require exactly one %d, so binding will not fail with EEXIST. */
1035 p = strchr(name, '%');
1036 if (!p || p[1] != 'd' || strchr(p + 2, '%'))
1039 strncpy(net->name, tmp, sizeof(net->name));
1040 dev->ifname_set = true;
1044 EXPORT_SYMBOL_GPL(gether_set_ifname);
1047 * gether_cleanup - remove Ethernet-over-USB device
1048 * Context: may sleep
1050 * This is called to free all resources allocated by @gether_setup().
1052 void gether_cleanup(struct eth_dev *dev)
1057 unregister_netdev(dev->net);
1058 flush_work(&dev->work);
1059 free_netdev(dev->net);
1061 EXPORT_SYMBOL_GPL(gether_cleanup);
1064 * gether_connect - notify network layer that USB link is active
1065 * @link: the USB link, set up with endpoints, descriptors matching
1066 * current device speed, and any framing wrapper(s) set up.
1067 * Context: irqs blocked
1069 * This is called to activate endpoints and let the network layer know
1070 * the connection is active ("carrier detect"). It may cause the I/O
1071 * queues to open and start letting network packets flow, but will in
1072 * any case activate the endpoints so that they respond properly to the
1075 * Verify net_device pointer returned using IS_ERR(). If it doesn't
1076 * indicate some error code (negative errno), ep->driver_data values
1077 * have been overwritten.
1079 struct net_device *gether_connect(struct gether *link)
1081 struct eth_dev *dev = link->ioport;
1085 return ERR_PTR(-EINVAL);
1087 link->in_ep->driver_data = dev;
1088 result = usb_ep_enable(link->in_ep);
1090 DBG(dev, "enable %s --> %d\n",
1091 link->in_ep->name, result);
1095 link->out_ep->driver_data = dev;
1096 result = usb_ep_enable(link->out_ep);
1098 DBG(dev, "enable %s --> %d\n",
1099 link->out_ep->name, result);
1104 result = alloc_requests(dev, link, qlen(dev->gadget,
1108 dev->zlp = link->is_zlp_ok;
1109 dev->no_skb_reserve = gadget_avoids_skb_reserve(dev->gadget);
1110 DBG(dev, "qlen %d\n", qlen(dev->gadget, dev->qmult));
1112 dev->header_len = link->header_len;
1113 dev->unwrap = link->unwrap;
1114 dev->wrap = link->wrap;
1116 spin_lock(&dev->lock);
1117 dev->port_usb = link;
1118 if (netif_running(dev->net)) {
1125 spin_unlock(&dev->lock);
1127 netif_carrier_on(dev->net);
1128 if (netif_running(dev->net))
1129 eth_start(dev, GFP_ATOMIC);
1131 /* on error, disable any endpoints */
1133 (void) usb_ep_disable(link->out_ep);
1135 (void) usb_ep_disable(link->in_ep);
1138 /* caller is responsible for cleanup on error */
1140 return ERR_PTR(result);
1143 EXPORT_SYMBOL_GPL(gether_connect);
1146 * gether_disconnect - notify network layer that USB link is inactive
1147 * @link: the USB link, on which gether_connect() was called
1148 * Context: irqs blocked
1150 * This is called to deactivate endpoints and let the network layer know
1151 * the connection went inactive ("no carrier").
1153 * On return, the state is as if gether_connect() had never been called.
1154 * The endpoints are inactive, and accordingly without active USB I/O.
1155 * Pointers to endpoint descriptors and endpoint private data are nulled.
1157 void gether_disconnect(struct gether *link)
1159 struct eth_dev *dev = link->ioport;
1160 struct usb_request *req;
1166 DBG(dev, "%s\n", __func__);
1168 netif_stop_queue(dev->net);
1169 netif_carrier_off(dev->net);
1171 /* disable endpoints, forcing (synchronous) completion
1172 * of all pending i/o. then free the request objects
1173 * and forget about the endpoints.
1175 usb_ep_disable(link->in_ep);
1176 spin_lock(&dev->req_lock);
1177 while (!list_empty(&dev->tx_reqs)) {
1178 req = list_first_entry(&dev->tx_reqs, struct usb_request, list);
1179 list_del(&req->list);
1181 spin_unlock(&dev->req_lock);
1182 usb_ep_free_request(link->in_ep, req);
1183 spin_lock(&dev->req_lock);
1185 spin_unlock(&dev->req_lock);
1186 link->in_ep->desc = NULL;
1188 usb_ep_disable(link->out_ep);
1189 spin_lock(&dev->req_lock);
1190 while (!list_empty(&dev->rx_reqs)) {
1191 req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
1192 list_del(&req->list);
1194 spin_unlock(&dev->req_lock);
1195 usb_ep_free_request(link->out_ep, req);
1196 spin_lock(&dev->req_lock);
1198 spin_unlock(&dev->req_lock);
1199 link->out_ep->desc = NULL;
1201 /* finish forgetting about this USB link episode */
1202 dev->header_len = 0;
1206 spin_lock(&dev->lock);
1207 dev->port_usb = NULL;
1208 spin_unlock(&dev->lock);
1210 EXPORT_SYMBOL_GPL(gether_disconnect);
1212 MODULE_LICENSE("GPL");
1213 MODULE_AUTHOR("David Brownell");