1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2015 Microchip Technology
5 #include <linux/module.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
10 #include <linux/crc32.h>
11 #include <linux/signal.h>
12 #include <linux/slab.h>
13 #include <linux/if_vlan.h>
14 #include <linux/uaccess.h>
15 #include <linux/linkmode.h>
16 #include <linux/list.h>
18 #include <linux/ipv6.h>
19 #include <linux/mdio.h>
20 #include <linux/phy.h>
21 #include <net/ip6_checksum.h>
22 #include <net/vxlan.h>
23 #include <linux/interrupt.h>
24 #include <linux/irqdomain.h>
25 #include <linux/irq.h>
26 #include <linux/irqchip/chained_irq.h>
27 #include <linux/microchipphy.h>
28 #include <linux/phy_fixed.h>
29 #include <linux/of_mdio.h>
30 #include <linux/of_net.h>
33 #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
34 #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
35 #define DRIVER_NAME "lan78xx"
37 #define TX_TIMEOUT_JIFFIES (5 * HZ)
38 #define THROTTLE_JIFFIES (HZ / 8)
39 #define UNLINK_TIMEOUT_MS 3
41 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
43 #define SS_USB_PKT_SIZE (1024)
44 #define HS_USB_PKT_SIZE (512)
45 #define FS_USB_PKT_SIZE (64)
47 #define MAX_RX_FIFO_SIZE (12 * 1024)
48 #define MAX_TX_FIFO_SIZE (12 * 1024)
50 #define FLOW_THRESHOLD(n) ((((n) + 511) / 512) & 0x7F)
51 #define FLOW_CTRL_THRESHOLD(on, off) ((FLOW_THRESHOLD(on) << 0) | \
52 (FLOW_THRESHOLD(off) << 8))
54 /* Flow control turned on when Rx FIFO level rises above this level (bytes) */
55 #define FLOW_ON_SS 9216
56 #define FLOW_ON_HS 8704
58 /* Flow control turned off when Rx FIFO level falls below this level (bytes) */
59 #define FLOW_OFF_SS 4096
60 #define FLOW_OFF_HS 1024
62 #define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
63 #define DEFAULT_BULK_IN_DELAY (0x0800)
64 #define MAX_SINGLE_PACKET_SIZE (9000)
65 #define DEFAULT_TX_CSUM_ENABLE (true)
66 #define DEFAULT_RX_CSUM_ENABLE (true)
67 #define DEFAULT_TSO_CSUM_ENABLE (true)
68 #define DEFAULT_VLAN_FILTER_ENABLE (true)
69 #define DEFAULT_VLAN_RX_OFFLOAD (true)
70 #define TX_OVERHEAD (8)
73 #define LAN78XX_USB_VENDOR_ID (0x0424)
74 #define LAN7800_USB_PRODUCT_ID (0x7800)
75 #define LAN7850_USB_PRODUCT_ID (0x7850)
76 #define LAN7801_USB_PRODUCT_ID (0x7801)
77 #define LAN78XX_EEPROM_MAGIC (0x78A5)
78 #define LAN78XX_OTP_MAGIC (0x78F3)
79 #define AT29M2AF_USB_VENDOR_ID (0x07C9)
80 #define AT29M2AF_USB_PRODUCT_ID (0x0012)
85 #define EEPROM_INDICATOR (0xA5)
86 #define EEPROM_MAC_OFFSET (0x01)
87 #define MAX_EEPROM_SIZE 512
88 #define OTP_INDICATOR_1 (0xF3)
89 #define OTP_INDICATOR_2 (0xF7)
91 #define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
92 WAKE_MCAST | WAKE_BCAST | \
93 WAKE_ARP | WAKE_MAGIC)
95 /* USB related defines */
96 #define BULK_IN_PIPE 1
97 #define BULK_OUT_PIPE 2
99 /* default autosuspend delay (mSec)*/
100 #define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
102 /* statistic update interval (mSec) */
103 #define STAT_UPDATE_TIMER (1 * 1000)
105 /* time to wait for MAC or FCT to stop (jiffies) */
106 #define HW_DISABLE_TIMEOUT (HZ / 10)
108 /* time to wait between polling MAC or FCT state (ms) */
109 #define HW_DISABLE_DELAY_MS 1
111 /* defines interrupts from interrupt EP */
112 #define MAX_INT_EP (32)
113 #define INT_EP_INTEP (31)
114 #define INT_EP_OTP_WR_DONE (28)
115 #define INT_EP_EEE_TX_LPI_START (26)
116 #define INT_EP_EEE_TX_LPI_STOP (25)
117 #define INT_EP_EEE_RX_LPI (24)
118 #define INT_EP_MAC_RESET_TIMEOUT (23)
119 #define INT_EP_RDFO (22)
120 #define INT_EP_TXE (21)
121 #define INT_EP_USB_STATUS (20)
122 #define INT_EP_TX_DIS (19)
123 #define INT_EP_RX_DIS (18)
124 #define INT_EP_PHY (17)
125 #define INT_EP_DP (16)
126 #define INT_EP_MAC_ERR (15)
127 #define INT_EP_TDFU (14)
128 #define INT_EP_TDFO (13)
129 #define INT_EP_UTX (12)
130 #define INT_EP_GPIO_11 (11)
131 #define INT_EP_GPIO_10 (10)
132 #define INT_EP_GPIO_9 (9)
133 #define INT_EP_GPIO_8 (8)
134 #define INT_EP_GPIO_7 (7)
135 #define INT_EP_GPIO_6 (6)
136 #define INT_EP_GPIO_5 (5)
137 #define INT_EP_GPIO_4 (4)
138 #define INT_EP_GPIO_3 (3)
139 #define INT_EP_GPIO_2 (2)
140 #define INT_EP_GPIO_1 (1)
141 #define INT_EP_GPIO_0 (0)
143 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
145 "RX Alignment Errors",
146 "Rx Fragment Errors",
148 "RX Undersize Frame Errors",
149 "RX Oversize Frame Errors",
151 "RX Unicast Byte Count",
152 "RX Broadcast Byte Count",
153 "RX Multicast Byte Count",
155 "RX Broadcast Frames",
156 "RX Multicast Frames",
159 "RX 65 - 127 Byte Frames",
160 "RX 128 - 255 Byte Frames",
161 "RX 256 - 511 Bytes Frames",
162 "RX 512 - 1023 Byte Frames",
163 "RX 1024 - 1518 Byte Frames",
164 "RX Greater 1518 Byte Frames",
165 "EEE RX LPI Transitions",
168 "TX Excess Deferral Errors",
171 "TX Single Collisions",
172 "TX Multiple Collisions",
173 "TX Excessive Collision",
174 "TX Late Collisions",
175 "TX Unicast Byte Count",
176 "TX Broadcast Byte Count",
177 "TX Multicast Byte Count",
179 "TX Broadcast Frames",
180 "TX Multicast Frames",
183 "TX 65 - 127 Byte Frames",
184 "TX 128 - 255 Byte Frames",
185 "TX 256 - 511 Bytes Frames",
186 "TX 512 - 1023 Byte Frames",
187 "TX 1024 - 1518 Byte Frames",
188 "TX Greater 1518 Byte Frames",
189 "EEE TX LPI Transitions",
193 struct lan78xx_statstage {
195 u32 rx_alignment_errors;
196 u32 rx_fragment_errors;
197 u32 rx_jabber_errors;
198 u32 rx_undersize_frame_errors;
199 u32 rx_oversize_frame_errors;
200 u32 rx_dropped_frames;
201 u32 rx_unicast_byte_count;
202 u32 rx_broadcast_byte_count;
203 u32 rx_multicast_byte_count;
204 u32 rx_unicast_frames;
205 u32 rx_broadcast_frames;
206 u32 rx_multicast_frames;
208 u32 rx_64_byte_frames;
209 u32 rx_65_127_byte_frames;
210 u32 rx_128_255_byte_frames;
211 u32 rx_256_511_bytes_frames;
212 u32 rx_512_1023_byte_frames;
213 u32 rx_1024_1518_byte_frames;
214 u32 rx_greater_1518_byte_frames;
215 u32 eee_rx_lpi_transitions;
218 u32 tx_excess_deferral_errors;
219 u32 tx_carrier_errors;
220 u32 tx_bad_byte_count;
221 u32 tx_single_collisions;
222 u32 tx_multiple_collisions;
223 u32 tx_excessive_collision;
224 u32 tx_late_collisions;
225 u32 tx_unicast_byte_count;
226 u32 tx_broadcast_byte_count;
227 u32 tx_multicast_byte_count;
228 u32 tx_unicast_frames;
229 u32 tx_broadcast_frames;
230 u32 tx_multicast_frames;
232 u32 tx_64_byte_frames;
233 u32 tx_65_127_byte_frames;
234 u32 tx_128_255_byte_frames;
235 u32 tx_256_511_bytes_frames;
236 u32 tx_512_1023_byte_frames;
237 u32 tx_1024_1518_byte_frames;
238 u32 tx_greater_1518_byte_frames;
239 u32 eee_tx_lpi_transitions;
243 struct lan78xx_statstage64 {
245 u64 rx_alignment_errors;
246 u64 rx_fragment_errors;
247 u64 rx_jabber_errors;
248 u64 rx_undersize_frame_errors;
249 u64 rx_oversize_frame_errors;
250 u64 rx_dropped_frames;
251 u64 rx_unicast_byte_count;
252 u64 rx_broadcast_byte_count;
253 u64 rx_multicast_byte_count;
254 u64 rx_unicast_frames;
255 u64 rx_broadcast_frames;
256 u64 rx_multicast_frames;
258 u64 rx_64_byte_frames;
259 u64 rx_65_127_byte_frames;
260 u64 rx_128_255_byte_frames;
261 u64 rx_256_511_bytes_frames;
262 u64 rx_512_1023_byte_frames;
263 u64 rx_1024_1518_byte_frames;
264 u64 rx_greater_1518_byte_frames;
265 u64 eee_rx_lpi_transitions;
268 u64 tx_excess_deferral_errors;
269 u64 tx_carrier_errors;
270 u64 tx_bad_byte_count;
271 u64 tx_single_collisions;
272 u64 tx_multiple_collisions;
273 u64 tx_excessive_collision;
274 u64 tx_late_collisions;
275 u64 tx_unicast_byte_count;
276 u64 tx_broadcast_byte_count;
277 u64 tx_multicast_byte_count;
278 u64 tx_unicast_frames;
279 u64 tx_broadcast_frames;
280 u64 tx_multicast_frames;
282 u64 tx_64_byte_frames;
283 u64 tx_65_127_byte_frames;
284 u64 tx_128_255_byte_frames;
285 u64 tx_256_511_bytes_frames;
286 u64 tx_512_1023_byte_frames;
287 u64 tx_1024_1518_byte_frames;
288 u64 tx_greater_1518_byte_frames;
289 u64 eee_tx_lpi_transitions;
293 static u32 lan78xx_regs[] = {
315 #define PHY_REG_SIZE (32 * sizeof(u32))
319 struct lan78xx_priv {
320 struct lan78xx_net *dev;
322 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicast hash table */
323 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
324 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
325 struct mutex dataport_mutex; /* for dataport access */
326 spinlock_t rfe_ctl_lock; /* for rfe register access */
327 struct work_struct set_multicast;
328 struct work_struct set_vlan;
342 struct skb_data { /* skb->cb is one of these */
344 struct lan78xx_net *dev;
345 enum skb_state state;
351 struct usb_ctrlrequest req;
352 struct lan78xx_net *dev;
355 #define EVENT_TX_HALT 0
356 #define EVENT_RX_HALT 1
357 #define EVENT_RX_MEMORY 2
358 #define EVENT_STS_SPLIT 3
359 #define EVENT_LINK_RESET 4
360 #define EVENT_RX_PAUSED 5
361 #define EVENT_DEV_WAKING 6
362 #define EVENT_DEV_ASLEEP 7
363 #define EVENT_DEV_OPEN 8
364 #define EVENT_STAT_UPDATE 9
365 #define EVENT_DEV_DISCONNECT 10
368 struct mutex access_lock; /* for stats access */
369 struct lan78xx_statstage saved;
370 struct lan78xx_statstage rollover_count;
371 struct lan78xx_statstage rollover_max;
372 struct lan78xx_statstage64 curr_stat;
375 struct irq_domain_data {
376 struct irq_domain *irqdomain;
378 struct irq_chip *irqchip;
379 irq_flow_handler_t irq_handler;
381 struct mutex irq_lock; /* for irq bus access */
385 struct net_device *net;
386 struct usb_device *udev;
387 struct usb_interface *intf;
392 struct sk_buff_head rxq;
393 struct sk_buff_head txq;
394 struct sk_buff_head done;
395 struct sk_buff_head txq_pend;
397 struct tasklet_struct bh;
398 struct delayed_work wq;
402 struct urb *urb_intr;
403 struct usb_anchor deferred;
405 struct mutex dev_mutex; /* serialise open/stop wrt suspend/resume */
406 struct mutex phy_mutex; /* for phy access */
407 unsigned int pipe_in, pipe_out, pipe_intr;
409 u32 hard_mtu; /* count any extra framing */
410 size_t rx_urb_size; /* size for rx urbs */
414 wait_queue_head_t *wait;
415 unsigned char suspend_count;
417 unsigned int maxpacket;
418 struct timer_list stat_monitor;
420 unsigned long data[5];
427 struct mii_bus *mdiobus;
428 phy_interface_t interface;
431 u8 fc_request_control;
434 struct statstage stats;
436 struct irq_domain_data domain_data;
439 /* define external phy id */
440 #define PHY_LAN8835 (0x0007C130)
441 #define PHY_KSZ9031RNX (0x00221620)
443 /* use ethtool to change the level for any given device */
444 static int msg_level = -1;
445 module_param(msg_level, int, 0);
446 MODULE_PARM_DESC(msg_level, "Override default message level");
448 /* TSO seems to be having some issue with Selective Acknowledge (SACK) that
449 * results in lost data never being retransmitted.
450 * Disable it by default now, but adds a module parameter to enable it for
451 * debug purposes (the full cause is not currently understood).
453 static bool enable_tso;
454 module_param(enable_tso, bool, 0644);
455 MODULE_PARM_DESC(enable_tso, "Enables TCP segmentation offload");
457 #define INT_URB_MICROFRAMES_PER_MS 8
458 static int int_urb_interval_ms = 8;
459 module_param(int_urb_interval_ms, int, 0);
460 MODULE_PARM_DESC(int_urb_interval_ms, "Override usb interrupt urb interval");
462 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
467 if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
470 buf = kmalloc(sizeof(u32), GFP_KERNEL);
474 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
475 USB_VENDOR_REQUEST_READ_REGISTER,
476 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
477 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
478 if (likely(ret >= 0)) {
481 } else if (net_ratelimit()) {
482 netdev_warn(dev->net,
483 "Failed to read register index 0x%08x. ret = %d",
492 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
497 if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
500 buf = kmalloc(sizeof(u32), GFP_KERNEL);
507 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
508 USB_VENDOR_REQUEST_WRITE_REGISTER,
509 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
510 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
511 if (unlikely(ret < 0) &&
513 netdev_warn(dev->net,
514 "Failed to write register index 0x%08x. ret = %d",
523 static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
529 ret = lan78xx_read_reg(dev, reg, &buf);
534 buf |= (mask & data);
536 ret = lan78xx_write_reg(dev, reg, buf);
543 static int lan78xx_read_stats(struct lan78xx_net *dev,
544 struct lan78xx_statstage *data)
548 struct lan78xx_statstage *stats;
552 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
556 ret = usb_control_msg(dev->udev,
557 usb_rcvctrlpipe(dev->udev, 0),
558 USB_VENDOR_REQUEST_GET_STATS,
559 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
564 USB_CTRL_SET_TIMEOUT);
565 if (likely(ret >= 0)) {
568 for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) {
569 le32_to_cpus(&src[i]);
573 netdev_warn(dev->net,
574 "Failed to read stat ret = %d", ret);
582 #define check_counter_rollover(struct1, dev_stats, member) \
584 if ((struct1)->member < (dev_stats).saved.member) \
585 (dev_stats).rollover_count.member++; \
588 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
589 struct lan78xx_statstage *stats)
591 check_counter_rollover(stats, dev->stats, rx_fcs_errors);
592 check_counter_rollover(stats, dev->stats, rx_alignment_errors);
593 check_counter_rollover(stats, dev->stats, rx_fragment_errors);
594 check_counter_rollover(stats, dev->stats, rx_jabber_errors);
595 check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
596 check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
597 check_counter_rollover(stats, dev->stats, rx_dropped_frames);
598 check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
599 check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
600 check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
601 check_counter_rollover(stats, dev->stats, rx_unicast_frames);
602 check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
603 check_counter_rollover(stats, dev->stats, rx_multicast_frames);
604 check_counter_rollover(stats, dev->stats, rx_pause_frames);
605 check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
606 check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
607 check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
608 check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
609 check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
610 check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
611 check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
612 check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
613 check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
614 check_counter_rollover(stats, dev->stats, tx_fcs_errors);
615 check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
616 check_counter_rollover(stats, dev->stats, tx_carrier_errors);
617 check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
618 check_counter_rollover(stats, dev->stats, tx_single_collisions);
619 check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
620 check_counter_rollover(stats, dev->stats, tx_excessive_collision);
621 check_counter_rollover(stats, dev->stats, tx_late_collisions);
622 check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
623 check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
624 check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
625 check_counter_rollover(stats, dev->stats, tx_unicast_frames);
626 check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
627 check_counter_rollover(stats, dev->stats, tx_multicast_frames);
628 check_counter_rollover(stats, dev->stats, tx_pause_frames);
629 check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
630 check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
631 check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
632 check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
633 check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
634 check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
635 check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
636 check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
637 check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
639 memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
642 static void lan78xx_update_stats(struct lan78xx_net *dev)
644 u32 *p, *count, *max;
647 struct lan78xx_statstage lan78xx_stats;
649 if (usb_autopm_get_interface(dev->intf) < 0)
652 p = (u32 *)&lan78xx_stats;
653 count = (u32 *)&dev->stats.rollover_count;
654 max = (u32 *)&dev->stats.rollover_max;
655 data = (u64 *)&dev->stats.curr_stat;
657 mutex_lock(&dev->stats.access_lock);
659 if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
660 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
662 for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
663 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
665 mutex_unlock(&dev->stats.access_lock);
667 usb_autopm_put_interface(dev->intf);
670 /* Loop until the read is completed with timeout called with phy_mutex held */
671 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
673 unsigned long start_time = jiffies;
678 ret = lan78xx_read_reg(dev, MII_ACC, &val);
679 if (unlikely(ret < 0))
682 if (!(val & MII_ACC_MII_BUSY_))
684 } while (!time_after(jiffies, start_time + HZ));
689 static inline u32 mii_access(int id, int index, int read)
693 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
694 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
696 ret |= MII_ACC_MII_READ_;
698 ret |= MII_ACC_MII_WRITE_;
699 ret |= MII_ACC_MII_BUSY_;
704 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
706 unsigned long start_time = jiffies;
711 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
712 if (unlikely(ret < 0))
715 if (!(val & E2P_CMD_EPC_BUSY_) ||
716 (val & E2P_CMD_EPC_TIMEOUT_))
718 usleep_range(40, 100);
719 } while (!time_after(jiffies, start_time + HZ));
721 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
722 netdev_warn(dev->net, "EEPROM read operation timeout");
729 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
731 unsigned long start_time = jiffies;
736 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
737 if (unlikely(ret < 0))
740 if (!(val & E2P_CMD_EPC_BUSY_))
743 usleep_range(40, 100);
744 } while (!time_after(jiffies, start_time + HZ));
746 netdev_warn(dev->net, "EEPROM is busy");
750 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
751 u32 length, u8 *data)
758 /* depends on chip, some EEPROM pins are muxed with LED function.
759 * disable & restore LED function to access EEPROM.
761 ret = lan78xx_read_reg(dev, HW_CFG, &val);
763 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
764 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
765 ret = lan78xx_write_reg(dev, HW_CFG, val);
768 retval = lan78xx_eeprom_confirm_not_busy(dev);
772 for (i = 0; i < length; i++) {
773 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
774 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
775 ret = lan78xx_write_reg(dev, E2P_CMD, val);
776 if (unlikely(ret < 0)) {
781 retval = lan78xx_wait_eeprom(dev);
785 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
786 if (unlikely(ret < 0)) {
791 data[i] = val & 0xFF;
797 if (dev->chipid == ID_REV_CHIP_ID_7800_)
798 ret = lan78xx_write_reg(dev, HW_CFG, saved);
803 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
804 u32 length, u8 *data)
809 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
810 if ((ret == 0) && (sig == EEPROM_INDICATOR))
811 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
818 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
819 u32 length, u8 *data)
826 /* depends on chip, some EEPROM pins are muxed with LED function.
827 * disable & restore LED function to access EEPROM.
829 ret = lan78xx_read_reg(dev, HW_CFG, &val);
831 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
832 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
833 ret = lan78xx_write_reg(dev, HW_CFG, val);
836 retval = lan78xx_eeprom_confirm_not_busy(dev);
840 /* Issue write/erase enable command */
841 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
842 ret = lan78xx_write_reg(dev, E2P_CMD, val);
843 if (unlikely(ret < 0)) {
848 retval = lan78xx_wait_eeprom(dev);
852 for (i = 0; i < length; i++) {
853 /* Fill data register */
855 ret = lan78xx_write_reg(dev, E2P_DATA, val);
861 /* Send "write" command */
862 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
863 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
864 ret = lan78xx_write_reg(dev, E2P_CMD, val);
870 retval = lan78xx_wait_eeprom(dev);
879 if (dev->chipid == ID_REV_CHIP_ID_7800_)
880 ret = lan78xx_write_reg(dev, HW_CFG, saved);
885 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
886 u32 length, u8 *data)
890 unsigned long timeout;
892 lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
894 if (buf & OTP_PWR_DN_PWRDN_N_) {
895 /* clear it and wait to be cleared */
896 lan78xx_write_reg(dev, OTP_PWR_DN, 0);
898 timeout = jiffies + HZ;
901 lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
902 if (time_after(jiffies, timeout)) {
903 netdev_warn(dev->net,
904 "timeout on OTP_PWR_DN");
907 } while (buf & OTP_PWR_DN_PWRDN_N_);
910 for (i = 0; i < length; i++) {
911 lan78xx_write_reg(dev, OTP_ADDR1,
912 ((offset + i) >> 8) & OTP_ADDR1_15_11);
913 lan78xx_write_reg(dev, OTP_ADDR2,
914 ((offset + i) & OTP_ADDR2_10_3));
916 lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
917 lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
919 timeout = jiffies + HZ;
922 lan78xx_read_reg(dev, OTP_STATUS, &buf);
923 if (time_after(jiffies, timeout)) {
924 netdev_warn(dev->net,
925 "timeout on OTP_STATUS");
928 } while (buf & OTP_STATUS_BUSY_);
930 lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
932 data[i] = (u8)(buf & 0xFF);
938 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
939 u32 length, u8 *data)
943 unsigned long timeout;
945 lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
947 if (buf & OTP_PWR_DN_PWRDN_N_) {
948 /* clear it and wait to be cleared */
949 lan78xx_write_reg(dev, OTP_PWR_DN, 0);
951 timeout = jiffies + HZ;
954 lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
955 if (time_after(jiffies, timeout)) {
956 netdev_warn(dev->net,
957 "timeout on OTP_PWR_DN completion");
960 } while (buf & OTP_PWR_DN_PWRDN_N_);
963 /* set to BYTE program mode */
964 lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
966 for (i = 0; i < length; i++) {
967 lan78xx_write_reg(dev, OTP_ADDR1,
968 ((offset + i) >> 8) & OTP_ADDR1_15_11);
969 lan78xx_write_reg(dev, OTP_ADDR2,
970 ((offset + i) & OTP_ADDR2_10_3));
971 lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
972 lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
973 lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
975 timeout = jiffies + HZ;
978 lan78xx_read_reg(dev, OTP_STATUS, &buf);
979 if (time_after(jiffies, timeout)) {
980 netdev_warn(dev->net,
981 "Timeout on OTP_STATUS completion");
984 } while (buf & OTP_STATUS_BUSY_);
990 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
991 u32 length, u8 *data)
996 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
999 if (sig == OTP_INDICATOR_2)
1001 else if (sig != OTP_INDICATOR_1)
1004 ret = lan78xx_read_raw_otp(dev, offset, length, data);
1010 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
1014 for (i = 0; i < 100; i++) {
1017 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1018 if (unlikely(ret < 0))
1021 if (dp_sel & DP_SEL_DPRDY_)
1024 usleep_range(40, 100);
1027 netdev_warn(dev->net, "%s timed out", __func__);
1032 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
1033 u32 addr, u32 length, u32 *buf)
1035 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1039 if (usb_autopm_get_interface(dev->intf) < 0)
1042 mutex_lock(&pdata->dataport_mutex);
1044 ret = lan78xx_dataport_wait_not_busy(dev);
1048 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1050 dp_sel &= ~DP_SEL_RSEL_MASK_;
1051 dp_sel |= ram_select;
1052 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
1054 for (i = 0; i < length; i++) {
1055 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1057 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1059 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1061 ret = lan78xx_dataport_wait_not_busy(dev);
1067 mutex_unlock(&pdata->dataport_mutex);
1068 usb_autopm_put_interface(dev->intf);
1073 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1074 int index, u8 addr[ETH_ALEN])
1078 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1080 temp = addr[2] | (temp << 8);
1081 temp = addr[1] | (temp << 8);
1082 temp = addr[0] | (temp << 8);
1083 pdata->pfilter_table[index][1] = temp;
1085 temp = addr[4] | (temp << 8);
1086 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1087 pdata->pfilter_table[index][0] = temp;
1091 /* returns hash bit number for given MAC address */
1092 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1094 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1097 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1099 struct lan78xx_priv *pdata =
1100 container_of(param, struct lan78xx_priv, set_multicast);
1101 struct lan78xx_net *dev = pdata->dev;
1104 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1107 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1108 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1110 for (i = 1; i < NUM_OF_MAF; i++) {
1111 lan78xx_write_reg(dev, MAF_HI(i), 0);
1112 lan78xx_write_reg(dev, MAF_LO(i),
1113 pdata->pfilter_table[i][1]);
1114 lan78xx_write_reg(dev, MAF_HI(i),
1115 pdata->pfilter_table[i][0]);
1118 lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1121 static void lan78xx_set_multicast(struct net_device *netdev)
1123 struct lan78xx_net *dev = netdev_priv(netdev);
1124 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1125 unsigned long flags;
1128 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1130 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1131 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1133 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1134 pdata->mchash_table[i] = 0;
1136 /* pfilter_table[0] has own HW address */
1137 for (i = 1; i < NUM_OF_MAF; i++) {
1138 pdata->pfilter_table[i][0] = 0;
1139 pdata->pfilter_table[i][1] = 0;
1142 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1144 if (dev->net->flags & IFF_PROMISC) {
1145 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1146 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1148 if (dev->net->flags & IFF_ALLMULTI) {
1149 netif_dbg(dev, drv, dev->net,
1150 "receive all multicast enabled");
1151 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1155 if (netdev_mc_count(dev->net)) {
1156 struct netdev_hw_addr *ha;
1159 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1161 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1164 netdev_for_each_mc_addr(ha, netdev) {
1165 /* set first 32 into Perfect Filter */
1167 lan78xx_set_addr_filter(pdata, i, ha->addr);
1169 u32 bitnum = lan78xx_hash(ha->addr);
1171 pdata->mchash_table[bitnum / 32] |=
1172 (1 << (bitnum % 32));
1173 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1179 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1181 /* defer register writes to a sleepable context */
1182 schedule_work(&pdata->set_multicast);
1185 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1186 u16 lcladv, u16 rmtadv)
1188 u32 flow = 0, fct_flow = 0;
1191 if (dev->fc_autoneg)
1192 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1194 cap = dev->fc_request_control;
1196 if (cap & FLOW_CTRL_TX)
1197 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1199 if (cap & FLOW_CTRL_RX)
1200 flow |= FLOW_CR_RX_FCEN_;
1202 if (dev->udev->speed == USB_SPEED_SUPER)
1203 fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
1204 else if (dev->udev->speed == USB_SPEED_HIGH)
1205 fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
1207 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1208 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1209 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1211 lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1213 /* threshold value should be set before enabling flow */
1214 lan78xx_write_reg(dev, FLOW, flow);
1219 static int lan78xx_mac_reset(struct lan78xx_net *dev)
1221 unsigned long start_time = jiffies;
1225 mutex_lock(&dev->phy_mutex);
1227 /* Resetting the device while there is activity on the MDIO
1228 * bus can result in the MAC interface locking up and not
1229 * completing register access transactions.
1231 ret = lan78xx_phy_wait_not_busy(dev);
1235 ret = lan78xx_read_reg(dev, MAC_CR, &val);
1240 ret = lan78xx_write_reg(dev, MAC_CR, val);
1244 /* Wait for the reset to complete before allowing any further
1245 * MAC register accesses otherwise the MAC may lock up.
1248 ret = lan78xx_read_reg(dev, MAC_CR, &val);
1252 if (!(val & MAC_CR_RST_)) {
1256 } while (!time_after(jiffies, start_time + HZ));
1260 mutex_unlock(&dev->phy_mutex);
1265 static int lan78xx_link_reset(struct lan78xx_net *dev)
1267 struct phy_device *phydev = dev->net->phydev;
1268 struct ethtool_link_ksettings ecmd;
1269 int ladv, radv, ret, link;
1272 /* clear LAN78xx interrupt status */
1273 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1274 if (unlikely(ret < 0))
1277 /* Acknowledge any pending PHY interrupt, lest it be the last */
1278 phy_read(phydev, LAN88XX_INT_STS);
1280 mutex_lock(&phydev->lock);
1281 phy_read_status(phydev);
1282 link = phydev->link;
1283 mutex_unlock(&phydev->lock);
1285 if (!link && dev->link_on) {
1286 dev->link_on = false;
1289 ret = lan78xx_mac_reset(dev);
1293 del_timer(&dev->stat_monitor);
1294 } else if (link && !dev->link_on) {
1295 dev->link_on = true;
1297 phy_ethtool_ksettings_get(phydev, &ecmd);
1299 if (dev->udev->speed == USB_SPEED_SUPER) {
1300 if (ecmd.base.speed == 1000) {
1302 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1305 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1306 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1310 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1313 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1314 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1318 /* enable U1 & U2 */
1319 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1322 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1323 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1324 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1330 ladv = phy_read(phydev, MII_ADVERTISE);
1334 radv = phy_read(phydev, MII_LPA);
1338 netif_dbg(dev, link, dev->net,
1339 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1340 ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1342 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1347 if (!timer_pending(&dev->stat_monitor)) {
1349 mod_timer(&dev->stat_monitor,
1350 jiffies + STAT_UPDATE_TIMER);
1353 tasklet_schedule(&dev->bh);
1359 /* some work can't be done in tasklets, so we use keventd
1361 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1362 * but tasklet_schedule() doesn't. hope the failure is rare.
1364 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1366 set_bit(work, &dev->flags);
1367 if (!schedule_delayed_work(&dev->wq, 0))
1368 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1371 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1375 if (urb->actual_length != 4) {
1376 netdev_warn(dev->net,
1377 "unexpected urb length %d", urb->actual_length);
1381 intdata = get_unaligned_le32(urb->transfer_buffer);
1383 if (intdata & INT_ENP_PHY_INT) {
1384 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1385 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1387 if (dev->domain_data.phyirq > 0) {
1388 local_irq_disable();
1389 generic_handle_irq(dev->domain_data.phyirq);
1393 netdev_warn(dev->net,
1394 "unexpected interrupt: 0x%08x\n", intdata);
1398 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1400 return MAX_EEPROM_SIZE;
1403 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1404 struct ethtool_eeprom *ee, u8 *data)
1406 struct lan78xx_net *dev = netdev_priv(netdev);
1409 ret = usb_autopm_get_interface(dev->intf);
1413 ee->magic = LAN78XX_EEPROM_MAGIC;
1415 ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1417 usb_autopm_put_interface(dev->intf);
1422 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1423 struct ethtool_eeprom *ee, u8 *data)
1425 struct lan78xx_net *dev = netdev_priv(netdev);
1428 ret = usb_autopm_get_interface(dev->intf);
1432 /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1433 * to load data from EEPROM
1435 if (ee->magic == LAN78XX_EEPROM_MAGIC)
1436 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1437 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1438 (ee->offset == 0) &&
1440 (data[0] == OTP_INDICATOR_1))
1441 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1443 usb_autopm_put_interface(dev->intf);
1448 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1451 if (stringset == ETH_SS_STATS)
1452 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1455 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1457 if (sset == ETH_SS_STATS)
1458 return ARRAY_SIZE(lan78xx_gstrings);
1463 static void lan78xx_get_stats(struct net_device *netdev,
1464 struct ethtool_stats *stats, u64 *data)
1466 struct lan78xx_net *dev = netdev_priv(netdev);
1468 lan78xx_update_stats(dev);
1470 mutex_lock(&dev->stats.access_lock);
1471 memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1472 mutex_unlock(&dev->stats.access_lock);
1475 static void lan78xx_get_wol(struct net_device *netdev,
1476 struct ethtool_wolinfo *wol)
1478 struct lan78xx_net *dev = netdev_priv(netdev);
1481 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1483 if (usb_autopm_get_interface(dev->intf) < 0)
1486 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1487 if (unlikely(ret < 0)) {
1491 if (buf & USB_CFG_RMT_WKP_) {
1492 wol->supported = WAKE_ALL;
1493 wol->wolopts = pdata->wol;
1500 usb_autopm_put_interface(dev->intf);
1503 static int lan78xx_set_wol(struct net_device *netdev,
1504 struct ethtool_wolinfo *wol)
1506 struct lan78xx_net *dev = netdev_priv(netdev);
1507 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1510 ret = usb_autopm_get_interface(dev->intf);
1514 if (wol->wolopts & ~WAKE_ALL)
1517 pdata->wol = wol->wolopts;
1519 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1521 phy_ethtool_set_wol(netdev->phydev, wol);
1523 usb_autopm_put_interface(dev->intf);
1528 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1530 struct lan78xx_net *dev = netdev_priv(net);
1531 struct phy_device *phydev = net->phydev;
1535 ret = usb_autopm_get_interface(dev->intf);
1539 ret = phy_ethtool_get_eee(phydev, edata);
1543 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1544 if (buf & MAC_CR_EEE_EN_) {
1545 edata->eee_enabled = true;
1546 edata->eee_active = !!(edata->advertised &
1547 edata->lp_advertised);
1548 edata->tx_lpi_enabled = true;
1549 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1550 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1551 edata->tx_lpi_timer = buf;
1553 edata->eee_enabled = false;
1554 edata->eee_active = false;
1555 edata->tx_lpi_enabled = false;
1556 edata->tx_lpi_timer = 0;
1561 usb_autopm_put_interface(dev->intf);
1566 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1568 struct lan78xx_net *dev = netdev_priv(net);
1572 ret = usb_autopm_get_interface(dev->intf);
1576 if (edata->eee_enabled) {
1577 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1578 buf |= MAC_CR_EEE_EN_;
1579 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1581 phy_ethtool_set_eee(net->phydev, edata);
1583 buf = (u32)edata->tx_lpi_timer;
1584 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1586 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1587 buf &= ~MAC_CR_EEE_EN_;
1588 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1591 usb_autopm_put_interface(dev->intf);
1596 static u32 lan78xx_get_link(struct net_device *net)
1600 mutex_lock(&net->phydev->lock);
1601 phy_read_status(net->phydev);
1602 link = net->phydev->link;
1603 mutex_unlock(&net->phydev->lock);
1608 static void lan78xx_get_drvinfo(struct net_device *net,
1609 struct ethtool_drvinfo *info)
1611 struct lan78xx_net *dev = netdev_priv(net);
1613 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1614 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1617 static u32 lan78xx_get_msglevel(struct net_device *net)
1619 struct lan78xx_net *dev = netdev_priv(net);
1621 return dev->msg_enable;
1624 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1626 struct lan78xx_net *dev = netdev_priv(net);
1628 dev->msg_enable = level;
1631 static int lan78xx_get_link_ksettings(struct net_device *net,
1632 struct ethtool_link_ksettings *cmd)
1634 struct lan78xx_net *dev = netdev_priv(net);
1635 struct phy_device *phydev = net->phydev;
1638 ret = usb_autopm_get_interface(dev->intf);
1642 phy_ethtool_ksettings_get(phydev, cmd);
1644 usb_autopm_put_interface(dev->intf);
1649 static int lan78xx_set_link_ksettings(struct net_device *net,
1650 const struct ethtool_link_ksettings *cmd)
1652 struct lan78xx_net *dev = netdev_priv(net);
1653 struct phy_device *phydev = net->phydev;
1657 ret = usb_autopm_get_interface(dev->intf);
1661 /* change speed & duplex */
1662 ret = phy_ethtool_ksettings_set(phydev, cmd);
1664 if (!cmd->base.autoneg) {
1665 /* force link down */
1666 temp = phy_read(phydev, MII_BMCR);
1667 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1669 phy_write(phydev, MII_BMCR, temp);
1672 usb_autopm_put_interface(dev->intf);
1677 static void lan78xx_get_pause(struct net_device *net,
1678 struct ethtool_pauseparam *pause)
1680 struct lan78xx_net *dev = netdev_priv(net);
1681 struct phy_device *phydev = net->phydev;
1682 struct ethtool_link_ksettings ecmd;
1684 phy_ethtool_ksettings_get(phydev, &ecmd);
1686 pause->autoneg = dev->fc_autoneg;
1688 if (dev->fc_request_control & FLOW_CTRL_TX)
1689 pause->tx_pause = 1;
1691 if (dev->fc_request_control & FLOW_CTRL_RX)
1692 pause->rx_pause = 1;
1695 static int lan78xx_set_pause(struct net_device *net,
1696 struct ethtool_pauseparam *pause)
1698 struct lan78xx_net *dev = netdev_priv(net);
1699 struct phy_device *phydev = net->phydev;
1700 struct ethtool_link_ksettings ecmd;
1703 phy_ethtool_ksettings_get(phydev, &ecmd);
1705 if (pause->autoneg && !ecmd.base.autoneg) {
1710 dev->fc_request_control = 0;
1711 if (pause->rx_pause)
1712 dev->fc_request_control |= FLOW_CTRL_RX;
1714 if (pause->tx_pause)
1715 dev->fc_request_control |= FLOW_CTRL_TX;
1717 if (ecmd.base.autoneg) {
1718 __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
1721 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1722 ecmd.link_modes.advertising);
1723 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1724 ecmd.link_modes.advertising);
1725 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1726 mii_adv_to_linkmode_adv_t(fc, mii_adv);
1727 linkmode_or(ecmd.link_modes.advertising, fc,
1728 ecmd.link_modes.advertising);
1730 phy_ethtool_ksettings_set(phydev, &ecmd);
1733 dev->fc_autoneg = pause->autoneg;
1740 static int lan78xx_get_regs_len(struct net_device *netdev)
1742 if (!netdev->phydev)
1743 return (sizeof(lan78xx_regs));
1745 return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1749 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1754 struct lan78xx_net *dev = netdev_priv(netdev);
1756 /* Read Device/MAC registers */
1757 for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
1758 lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1760 if (!netdev->phydev)
1763 /* Read PHY registers */
1764 for (j = 0; j < 32; i++, j++)
1765 data[i] = phy_read(netdev->phydev, j);
1768 static const struct ethtool_ops lan78xx_ethtool_ops = {
1769 .get_link = lan78xx_get_link,
1770 .nway_reset = phy_ethtool_nway_reset,
1771 .get_drvinfo = lan78xx_get_drvinfo,
1772 .get_msglevel = lan78xx_get_msglevel,
1773 .set_msglevel = lan78xx_set_msglevel,
1774 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1775 .get_eeprom = lan78xx_ethtool_get_eeprom,
1776 .set_eeprom = lan78xx_ethtool_set_eeprom,
1777 .get_ethtool_stats = lan78xx_get_stats,
1778 .get_sset_count = lan78xx_get_sset_count,
1779 .get_strings = lan78xx_get_strings,
1780 .get_wol = lan78xx_get_wol,
1781 .set_wol = lan78xx_set_wol,
1782 .get_ts_info = ethtool_op_get_ts_info,
1783 .get_eee = lan78xx_get_eee,
1784 .set_eee = lan78xx_set_eee,
1785 .get_pauseparam = lan78xx_get_pause,
1786 .set_pauseparam = lan78xx_set_pause,
1787 .get_link_ksettings = lan78xx_get_link_ksettings,
1788 .set_link_ksettings = lan78xx_set_link_ksettings,
1789 .get_regs_len = lan78xx_get_regs_len,
1790 .get_regs = lan78xx_get_regs,
1791 .get_ts_info = ethtool_op_get_ts_info,
1794 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1796 u32 addr_lo, addr_hi;
1799 lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1800 lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1802 addr[0] = addr_lo & 0xFF;
1803 addr[1] = (addr_lo >> 8) & 0xFF;
1804 addr[2] = (addr_lo >> 16) & 0xFF;
1805 addr[3] = (addr_lo >> 24) & 0xFF;
1806 addr[4] = addr_hi & 0xFF;
1807 addr[5] = (addr_hi >> 8) & 0xFF;
1809 if (!is_valid_ether_addr(addr)) {
1810 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1811 /* valid address present in Device Tree */
1812 netif_dbg(dev, ifup, dev->net,
1813 "MAC address read from Device Tree");
1814 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1815 ETH_ALEN, addr) == 0) ||
1816 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1817 ETH_ALEN, addr) == 0)) &&
1818 is_valid_ether_addr(addr)) {
1819 /* eeprom values are valid so use them */
1820 netif_dbg(dev, ifup, dev->net,
1821 "MAC address read from EEPROM");
1823 /* generate random MAC */
1824 eth_random_addr(addr);
1825 netif_dbg(dev, ifup, dev->net,
1826 "MAC address set to random addr");
1829 addr_lo = addr[0] | (addr[1] << 8) |
1830 (addr[2] << 16) | (addr[3] << 24);
1831 addr_hi = addr[4] | (addr[5] << 8);
1833 lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1834 lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1837 lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1838 lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1840 ether_addr_copy(dev->net->dev_addr, addr);
1843 /* MDIO read and write wrappers for phylib */
1844 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1846 struct lan78xx_net *dev = bus->priv;
1850 ret = usb_autopm_get_interface(dev->intf);
1854 mutex_lock(&dev->phy_mutex);
1856 /* confirm MII not busy */
1857 ret = lan78xx_phy_wait_not_busy(dev);
1861 /* set the address, index & direction (read from PHY) */
1862 addr = mii_access(phy_id, idx, MII_READ);
1863 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1865 ret = lan78xx_phy_wait_not_busy(dev);
1869 ret = lan78xx_read_reg(dev, MII_DATA, &val);
1871 ret = (int)(val & 0xFFFF);
1874 mutex_unlock(&dev->phy_mutex);
1875 usb_autopm_put_interface(dev->intf);
1880 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1883 struct lan78xx_net *dev = bus->priv;
1887 ret = usb_autopm_get_interface(dev->intf);
1891 mutex_lock(&dev->phy_mutex);
1893 /* confirm MII not busy */
1894 ret = lan78xx_phy_wait_not_busy(dev);
1899 ret = lan78xx_write_reg(dev, MII_DATA, val);
1901 /* set the address, index & direction (write to PHY) */
1902 addr = mii_access(phy_id, idx, MII_WRITE);
1903 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1905 ret = lan78xx_phy_wait_not_busy(dev);
1910 mutex_unlock(&dev->phy_mutex);
1911 usb_autopm_put_interface(dev->intf);
1915 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1917 struct device_node *node;
1920 dev->mdiobus = mdiobus_alloc();
1921 if (!dev->mdiobus) {
1922 netdev_err(dev->net, "can't allocate MDIO bus\n");
1926 dev->mdiobus->priv = (void *)dev;
1927 dev->mdiobus->read = lan78xx_mdiobus_read;
1928 dev->mdiobus->write = lan78xx_mdiobus_write;
1929 dev->mdiobus->name = "lan78xx-mdiobus";
1930 dev->mdiobus->parent = &dev->udev->dev;
1932 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1933 dev->udev->bus->busnum, dev->udev->devnum);
1935 switch (dev->chipid) {
1936 case ID_REV_CHIP_ID_7800_:
1937 case ID_REV_CHIP_ID_7850_:
1938 /* set to internal PHY id */
1939 dev->mdiobus->phy_mask = ~(1 << 1);
1941 case ID_REV_CHIP_ID_7801_:
1942 /* scan thru PHYAD[2..0] */
1943 dev->mdiobus->phy_mask = ~(0xFF);
1947 node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
1948 ret = of_mdiobus_register(dev->mdiobus, node);
1951 netdev_err(dev->net, "can't register MDIO bus\n");
1955 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1958 mdiobus_free(dev->mdiobus);
1962 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1964 mdiobus_unregister(dev->mdiobus);
1965 mdiobus_free(dev->mdiobus);
1968 static void lan78xx_link_status_change(struct net_device *net)
1970 struct phy_device *phydev = net->phydev;
1973 /* At forced 100 F/H mode, chip may fail to set mode correctly
1974 * when cable is switched between long(~50+m) and short one.
1975 * As workaround, set to 10 before setting to 100
1976 * at forced 100 F/H mode.
1978 if (!phydev->autoneg && (phydev->speed == 100)) {
1979 /* disable phy interrupt */
1980 temp = phy_read(phydev, LAN88XX_INT_MASK);
1981 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1982 phy_write(phydev, LAN88XX_INT_MASK, temp);
1984 temp = phy_read(phydev, MII_BMCR);
1985 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1986 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1987 temp |= BMCR_SPEED100;
1988 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1990 /* clear pending interrupt generated while workaround */
1991 temp = phy_read(phydev, LAN88XX_INT_STS);
1993 /* enable phy interrupt back */
1994 temp = phy_read(phydev, LAN88XX_INT_MASK);
1995 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1996 phy_write(phydev, LAN88XX_INT_MASK, temp);
2000 static int irq_map(struct irq_domain *d, unsigned int irq,
2001 irq_hw_number_t hwirq)
2003 struct irq_domain_data *data = d->host_data;
2005 irq_set_chip_data(irq, data);
2006 irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
2007 irq_set_noprobe(irq);
2012 static void irq_unmap(struct irq_domain *d, unsigned int irq)
2014 irq_set_chip_and_handler(irq, NULL, NULL);
2015 irq_set_chip_data(irq, NULL);
2018 static const struct irq_domain_ops chip_domain_ops = {
2023 static void lan78xx_irq_mask(struct irq_data *irqd)
2025 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2027 data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
2030 static void lan78xx_irq_unmask(struct irq_data *irqd)
2032 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2034 data->irqenable |= BIT(irqd_to_hwirq(irqd));
2037 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
2039 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2041 mutex_lock(&data->irq_lock);
2044 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
2046 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2047 struct lan78xx_net *dev =
2048 container_of(data, struct lan78xx_net, domain_data);
2051 /* call register access here because irq_bus_lock & irq_bus_sync_unlock
2052 * are only two callbacks executed in non-atomic contex.
2054 lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2055 if (buf != data->irqenable)
2056 lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
2058 mutex_unlock(&data->irq_lock);
2061 static struct irq_chip lan78xx_irqchip = {
2062 .name = "lan78xx-irqs",
2063 .irq_mask = lan78xx_irq_mask,
2064 .irq_unmask = lan78xx_irq_unmask,
2065 .irq_bus_lock = lan78xx_irq_bus_lock,
2066 .irq_bus_sync_unlock = lan78xx_irq_bus_sync_unlock,
2069 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
2071 struct device_node *of_node;
2072 struct irq_domain *irqdomain;
2073 unsigned int irqmap = 0;
2077 of_node = dev->udev->dev.parent->of_node;
2079 mutex_init(&dev->domain_data.irq_lock);
2081 lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2082 dev->domain_data.irqenable = buf;
2084 dev->domain_data.irqchip = &lan78xx_irqchip;
2085 dev->domain_data.irq_handler = handle_simple_irq;
2087 irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
2088 &chip_domain_ops, &dev->domain_data);
2090 /* create mapping for PHY interrupt */
2091 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
2093 irq_domain_remove(irqdomain);
2102 dev->domain_data.irqdomain = irqdomain;
2103 dev->domain_data.phyirq = irqmap;
2108 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2110 if (dev->domain_data.phyirq > 0) {
2111 irq_dispose_mapping(dev->domain_data.phyirq);
2113 if (dev->domain_data.irqdomain)
2114 irq_domain_remove(dev->domain_data.irqdomain);
2116 dev->domain_data.phyirq = 0;
2117 dev->domain_data.irqdomain = NULL;
2120 static int lan8835_fixup(struct phy_device *phydev)
2123 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2125 /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2126 buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2129 phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2131 /* RGMII MAC TXC Delay Enable */
2132 lan78xx_write_reg(dev, MAC_RGMII_ID,
2133 MAC_RGMII_ID_TXC_DELAY_EN_);
2135 /* RGMII TX DLL Tune Adjust */
2136 lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2138 dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2143 static int ksz9031rnx_fixup(struct phy_device *phydev)
2145 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2147 /* Micrel9301RNX PHY configuration */
2148 /* RGMII Control Signal Pad Skew */
2149 phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2150 /* RGMII RX Data Pad Skew */
2151 phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2152 /* RGMII RX Clock Pad Skew */
2153 phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2155 dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2160 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2164 struct fixed_phy_status fphy_status = {
2166 .speed = SPEED_1000,
2167 .duplex = DUPLEX_FULL,
2169 struct phy_device *phydev;
2171 phydev = phy_find_first(dev->mdiobus);
2173 netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2174 phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
2175 if (IS_ERR(phydev)) {
2176 netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2179 netdev_dbg(dev->net, "Registered FIXED PHY\n");
2180 dev->interface = PHY_INTERFACE_MODE_RGMII;
2181 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2182 MAC_RGMII_ID_TXC_DELAY_EN_);
2183 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2184 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2185 buf |= HW_CFG_CLK125_EN_;
2186 buf |= HW_CFG_REFCLK25_EN_;
2187 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2190 netdev_err(dev->net, "no PHY driver found\n");
2193 dev->interface = PHY_INTERFACE_MODE_RGMII;
2194 /* external PHY fixup for KSZ9031RNX */
2195 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2198 netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2201 /* external PHY fixup for LAN8835 */
2202 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2205 netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2208 /* add more external PHY fixup here if needed */
2210 phydev->is_internal = false;
2215 static int lan78xx_phy_init(struct lan78xx_net *dev)
2217 __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2220 struct phy_device *phydev;
2222 switch (dev->chipid) {
2223 case ID_REV_CHIP_ID_7801_:
2224 phydev = lan7801_phy_init(dev);
2226 netdev_err(dev->net, "lan7801: PHY Init Failed");
2231 case ID_REV_CHIP_ID_7800_:
2232 case ID_REV_CHIP_ID_7850_:
2233 phydev = phy_find_first(dev->mdiobus);
2235 netdev_err(dev->net, "no PHY found\n");
2238 phydev->is_internal = true;
2239 dev->interface = PHY_INTERFACE_MODE_GMII;
2243 netdev_err(dev->net, "Unknown CHIP ID found\n");
2247 /* if phyirq is not set, use polling mode in phylib */
2248 if (dev->domain_data.phyirq > 0)
2249 phydev->irq = dev->domain_data.phyirq;
2251 phydev->irq = PHY_POLL;
2252 netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2254 /* set to AUTOMDIX */
2255 phydev->mdix = ETH_TP_MDI_AUTO;
2257 ret = phy_connect_direct(dev->net, phydev,
2258 lan78xx_link_status_change,
2261 netdev_err(dev->net, "can't attach PHY to %s\n",
2263 if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2264 if (phy_is_pseudo_fixed_link(phydev)) {
2265 fixed_phy_unregister(phydev);
2267 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2269 phy_unregister_fixup_for_uid(PHY_LAN8835,
2276 /* MAC doesn't support 1000T Half */
2277 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2279 /* support both flow controls */
2280 dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2281 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2282 phydev->advertising);
2283 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2284 phydev->advertising);
2285 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2286 mii_adv_to_linkmode_adv_t(fc, mii_adv);
2287 linkmode_or(phydev->advertising, fc, phydev->advertising);
2289 if (of_property_read_bool(phydev->mdio.dev.of_node,
2290 "microchip,eee-enabled")) {
2291 struct ethtool_eee edata;
2292 memset(&edata, 0, sizeof(edata));
2293 edata.cmd = ETHTOOL_SEEE;
2294 edata.advertised = ADVERTISED_1000baseT_Full |
2295 ADVERTISED_100baseT_Full;
2296 edata.eee_enabled = true;
2297 edata.tx_lpi_enabled = true;
2298 if (of_property_read_u32(dev->udev->dev.of_node,
2299 "microchip,tx-lpi-timer",
2300 &edata.tx_lpi_timer))
2301 edata.tx_lpi_timer = 600; /* non-aggressive */
2302 (void)lan78xx_set_eee(dev->net, &edata);
2305 if (phydev->mdio.dev.of_node) {
2309 len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2310 "microchip,led-modes",
2313 /* Ensure the appropriate LEDs are enabled */
2314 lan78xx_read_reg(dev, HW_CFG, ®);
2315 reg &= ~(HW_CFG_LED0_EN_ |
2319 reg |= (len > 0) * HW_CFG_LED0_EN_ |
2320 (len > 1) * HW_CFG_LED1_EN_ |
2321 (len > 2) * HW_CFG_LED2_EN_ |
2322 (len > 3) * HW_CFG_LED3_EN_;
2323 lan78xx_write_reg(dev, HW_CFG, reg);
2327 genphy_config_aneg(phydev);
2329 dev->fc_autoneg = phydev->autoneg;
2334 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2339 lan78xx_read_reg(dev, MAC_RX, &buf);
2341 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2344 buf &= ~MAC_RX_RXEN_;
2345 lan78xx_write_reg(dev, MAC_RX, buf);
2348 /* add 4 to size for FCS */
2349 buf &= ~MAC_RX_MAX_SIZE_MASK_;
2350 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2352 lan78xx_write_reg(dev, MAC_RX, buf);
2355 buf |= MAC_RX_RXEN_;
2356 lan78xx_write_reg(dev, MAC_RX, buf);
2362 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2364 struct sk_buff *skb;
2365 unsigned long flags;
2368 spin_lock_irqsave(&q->lock, flags);
2369 while (!skb_queue_empty(q)) {
2370 struct skb_data *entry;
2374 skb_queue_walk(q, skb) {
2375 entry = (struct skb_data *)skb->cb;
2376 if (entry->state != unlink_start)
2381 entry->state = unlink_start;
2384 /* Get reference count of the URB to avoid it to be
2385 * freed during usb_unlink_urb, which may trigger
2386 * use-after-free problem inside usb_unlink_urb since
2387 * usb_unlink_urb is always racing with .complete
2388 * handler(include defer_bh).
2391 spin_unlock_irqrestore(&q->lock, flags);
2392 /* during some PM-driven resume scenarios,
2393 * these (async) unlinks complete immediately
2395 ret = usb_unlink_urb(urb);
2396 if (ret != -EINPROGRESS && ret != 0)
2397 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2401 spin_lock_irqsave(&q->lock, flags);
2403 spin_unlock_irqrestore(&q->lock, flags);
2407 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2409 struct lan78xx_net *dev = netdev_priv(netdev);
2410 int ll_mtu = new_mtu + netdev->hard_header_len;
2411 int old_hard_mtu = dev->hard_mtu;
2412 int old_rx_urb_size = dev->rx_urb_size;
2415 /* no second zero-length packet read wanted after mtu-sized packets */
2416 if ((ll_mtu % dev->maxpacket) == 0)
2419 ret = usb_autopm_get_interface(dev->intf);
2423 lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
2425 netdev->mtu = new_mtu;
2427 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2428 if (dev->rx_urb_size == old_hard_mtu) {
2429 dev->rx_urb_size = dev->hard_mtu;
2430 if (dev->rx_urb_size > old_rx_urb_size) {
2431 if (netif_running(dev->net)) {
2432 unlink_urbs(dev, &dev->rxq);
2433 tasklet_schedule(&dev->bh);
2438 usb_autopm_put_interface(dev->intf);
2443 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2445 struct lan78xx_net *dev = netdev_priv(netdev);
2446 struct sockaddr *addr = p;
2447 u32 addr_lo, addr_hi;
2449 if (netif_running(netdev))
2452 if (!is_valid_ether_addr(addr->sa_data))
2453 return -EADDRNOTAVAIL;
2455 ether_addr_copy(netdev->dev_addr, addr->sa_data);
2457 addr_lo = netdev->dev_addr[0] |
2458 netdev->dev_addr[1] << 8 |
2459 netdev->dev_addr[2] << 16 |
2460 netdev->dev_addr[3] << 24;
2461 addr_hi = netdev->dev_addr[4] |
2462 netdev->dev_addr[5] << 8;
2464 lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2465 lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2467 /* Added to support MAC address changes */
2468 lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2469 lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2474 /* Enable or disable Rx checksum offload engine */
2475 static int lan78xx_set_features(struct net_device *netdev,
2476 netdev_features_t features)
2478 struct lan78xx_net *dev = netdev_priv(netdev);
2479 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2480 unsigned long flags;
2482 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2484 if (features & NETIF_F_RXCSUM) {
2485 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2486 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2488 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2489 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2492 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2493 pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2495 pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2497 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2498 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2500 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2502 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2504 lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2509 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2511 struct lan78xx_priv *pdata =
2512 container_of(param, struct lan78xx_priv, set_vlan);
2513 struct lan78xx_net *dev = pdata->dev;
2515 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2516 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2519 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2520 __be16 proto, u16 vid)
2522 struct lan78xx_net *dev = netdev_priv(netdev);
2523 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2525 u16 vid_dword_index;
2527 vid_dword_index = (vid >> 5) & 0x7F;
2528 vid_bit_index = vid & 0x1F;
2530 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2532 /* defer register writes to a sleepable context */
2533 schedule_work(&pdata->set_vlan);
2538 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2539 __be16 proto, u16 vid)
2541 struct lan78xx_net *dev = netdev_priv(netdev);
2542 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2544 u16 vid_dword_index;
2546 vid_dword_index = (vid >> 5) & 0x7F;
2547 vid_bit_index = vid & 0x1F;
2549 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2551 /* defer register writes to a sleepable context */
2552 schedule_work(&pdata->set_vlan);
2557 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2561 u32 regs[6] = { 0 };
2563 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2564 if (buf & USB_CFG1_LTM_ENABLE_) {
2566 /* Get values from EEPROM first */
2567 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2568 if (temp[0] == 24) {
2569 ret = lan78xx_read_raw_eeprom(dev,
2576 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2577 if (temp[0] == 24) {
2578 ret = lan78xx_read_raw_otp(dev,
2588 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2589 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2590 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2591 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2592 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2593 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2596 static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
2598 return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
2601 static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
2604 unsigned long timeout;
2605 bool stopped = true;
2609 /* Stop the h/w block (if not already stopped) */
2611 ret = lan78xx_read_reg(dev, reg, &buf);
2615 if (buf & hw_enabled) {
2618 ret = lan78xx_write_reg(dev, reg, buf);
2623 timeout = jiffies + HW_DISABLE_TIMEOUT;
2625 ret = lan78xx_read_reg(dev, reg, &buf);
2629 if (buf & hw_disabled)
2632 msleep(HW_DISABLE_DELAY_MS);
2633 } while (!stopped && !time_after(jiffies, timeout));
2636 ret = stopped ? 0 : -ETIME;
2641 static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
2643 return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
2646 static int lan78xx_start_tx_path(struct lan78xx_net *dev)
2650 netif_dbg(dev, drv, dev->net, "start tx path");
2652 /* Start the MAC transmitter */
2654 ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
2658 /* Start the Tx FIFO */
2660 ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
2667 static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
2671 netif_dbg(dev, drv, dev->net, "stop tx path");
2673 /* Stop the Tx FIFO */
2675 ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
2679 /* Stop the MAC transmitter */
2681 ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
2688 /* The caller must ensure the Tx path is stopped before calling
2689 * lan78xx_flush_tx_fifo().
2691 static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
2693 return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
2696 static int lan78xx_start_rx_path(struct lan78xx_net *dev)
2700 netif_dbg(dev, drv, dev->net, "start rx path");
2702 /* Start the Rx FIFO */
2704 ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
2708 /* Start the MAC receiver*/
2710 ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
2717 static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
2721 netif_dbg(dev, drv, dev->net, "stop rx path");
2723 /* Stop the MAC receiver */
2725 ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
2729 /* Stop the Rx FIFO */
2731 ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
2738 /* The caller must ensure the Rx path is stopped before calling
2739 * lan78xx_flush_rx_fifo().
2741 static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
2743 return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
2746 static int lan78xx_reset(struct lan78xx_net *dev)
2748 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2749 unsigned long timeout;
2756 has_eeprom = !lan78xx_read_eeprom(dev, 0, 0, NULL);
2757 has_otp = !lan78xx_read_otp(dev, 0, 0, NULL);
2759 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2763 buf |= HW_CFG_LRST_;
2765 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2769 timeout = jiffies + HZ;
2772 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2776 if (time_after(jiffies, timeout)) {
2777 netdev_warn(dev->net,
2778 "timeout on completion of LiteReset");
2782 } while (buf & HW_CFG_LRST_);
2784 lan78xx_init_mac_address(dev);
2786 /* save DEVID for later usage */
2787 ret = lan78xx_read_reg(dev, ID_REV, &buf);
2791 dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2792 dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2794 /* Respond to the IN token with a NAK */
2795 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2799 buf |= USB_CFG_BIR_;
2801 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2806 lan78xx_init_ltm(dev);
2808 if (dev->udev->speed == USB_SPEED_SUPER) {
2809 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2810 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2813 } else if (dev->udev->speed == USB_SPEED_HIGH) {
2814 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2815 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2816 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2817 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2819 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2820 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2825 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2829 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2833 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2839 /* If no valid EEPROM and no valid OTP, enable the LEDs by default */
2840 if (!has_eeprom && !has_otp)
2841 buf |= HW_CFG_LED0_EN_ | HW_CFG_LED1_EN_;
2843 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2847 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2851 buf |= USB_CFG_BCE_;
2853 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2857 /* set FIFO sizes */
2858 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2860 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2864 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2866 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2870 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2874 ret = lan78xx_write_reg(dev, FLOW, 0);
2878 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2882 /* Don't need rfe_ctl_lock during initialisation */
2883 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2887 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2889 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2893 /* Enable or disable checksum offload engines */
2894 ret = lan78xx_set_features(dev->net, dev->net->features);
2898 lan78xx_set_multicast(dev->net);
2901 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2905 buf |= PMT_CTL_PHY_RST_;
2907 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2911 timeout = jiffies + HZ;
2914 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2918 if (time_after(jiffies, timeout)) {
2919 netdev_warn(dev->net, "timeout waiting for PHY Reset");
2923 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2925 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2929 /* LAN7801 only has RGMII mode */
2930 if (dev->chipid == ID_REV_CHIP_ID_7801_)
2931 buf &= ~MAC_CR_GMII_EN_;
2933 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2934 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2935 if (!ret && sig != EEPROM_INDICATOR) {
2936 /* Implies there is no external eeprom. Set mac speed */
2937 netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2938 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2941 /* If no valid EEPROM and no valid OTP, enable AUTO negotiation */
2942 if (!has_eeprom && !has_otp)
2943 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2944 ret = lan78xx_write_reg(dev, MAC_CR, buf);
2948 ret = lan78xx_set_rx_max_frame_length(dev,
2949 dev->net->mtu + VLAN_ETH_HLEN);
2954 static void lan78xx_init_stats(struct lan78xx_net *dev)
2959 /* initialize for stats update
2960 * some counters are 20bits and some are 32bits
2962 p = (u32 *)&dev->stats.rollover_max;
2963 for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2966 dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2967 dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2968 dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2969 dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2970 dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2971 dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2972 dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2973 dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2974 dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2975 dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2977 set_bit(EVENT_STAT_UPDATE, &dev->flags);
2980 static int lan78xx_open(struct net_device *net)
2982 struct lan78xx_net *dev = netdev_priv(net);
2985 netif_dbg(dev, ifup, dev->net, "open device");
2987 ret = usb_autopm_get_interface(dev->intf);
2991 mutex_lock(&dev->dev_mutex);
2993 phy_start(net->phydev);
2995 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2997 /* for Link Check */
2998 if (dev->urb_intr) {
2999 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
3001 netif_err(dev, ifup, dev->net,
3002 "intr submit %d\n", ret);
3007 ret = lan78xx_flush_rx_fifo(dev);
3010 ret = lan78xx_flush_tx_fifo(dev);
3014 ret = lan78xx_start_tx_path(dev);
3017 ret = lan78xx_start_rx_path(dev);
3021 lan78xx_init_stats(dev);
3023 set_bit(EVENT_DEV_OPEN, &dev->flags);
3025 netif_start_queue(net);
3027 dev->link_on = false;
3029 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
3031 mutex_unlock(&dev->dev_mutex);
3033 usb_autopm_put_interface(dev->intf);
3038 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
3040 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
3041 DECLARE_WAITQUEUE(wait, current);
3044 /* ensure there are no more active urbs */
3045 add_wait_queue(&unlink_wakeup, &wait);
3046 set_current_state(TASK_UNINTERRUPTIBLE);
3047 dev->wait = &unlink_wakeup;
3048 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
3050 /* maybe wait for deletions to finish. */
3051 while (!skb_queue_empty(&dev->rxq) ||
3052 !skb_queue_empty(&dev->txq)) {
3053 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
3054 set_current_state(TASK_UNINTERRUPTIBLE);
3055 netif_dbg(dev, ifdown, dev->net,
3056 "waited for %d urb completions", temp);
3058 set_current_state(TASK_RUNNING);
3060 remove_wait_queue(&unlink_wakeup, &wait);
3062 while (!skb_queue_empty(&dev->done)) {
3063 struct skb_data *entry;
3064 struct sk_buff *skb;
3066 skb = skb_dequeue(&dev->done);
3067 entry = (struct skb_data *)(skb->cb);
3068 usb_free_urb(entry->urb);
3073 static int lan78xx_stop(struct net_device *net)
3075 struct lan78xx_net *dev = netdev_priv(net);
3077 netif_dbg(dev, ifup, dev->net, "stop device");
3079 mutex_lock(&dev->dev_mutex);
3081 if (timer_pending(&dev->stat_monitor))
3082 del_timer_sync(&dev->stat_monitor);
3084 clear_bit(EVENT_DEV_OPEN, &dev->flags);
3085 netif_stop_queue(net);
3086 tasklet_kill(&dev->bh);
3088 lan78xx_terminate_urbs(dev);
3090 netif_info(dev, ifdown, dev->net,
3091 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
3092 net->stats.rx_packets, net->stats.tx_packets,
3093 net->stats.rx_errors, net->stats.tx_errors);
3095 /* ignore errors that occur stopping the Tx and Rx data paths */
3096 lan78xx_stop_tx_path(dev);
3097 lan78xx_stop_rx_path(dev);
3100 phy_stop(net->phydev);
3102 usb_kill_urb(dev->urb_intr);
3104 /* deferred work (task, timer, softirq) must also stop.
3105 * can't flush_scheduled_work() until we drop rtnl (later),
3106 * else workers could deadlock; so make workers a NOP.
3108 clear_bit(EVENT_TX_HALT, &dev->flags);
3109 clear_bit(EVENT_RX_HALT, &dev->flags);
3110 clear_bit(EVENT_LINK_RESET, &dev->flags);
3111 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3113 cancel_delayed_work_sync(&dev->wq);
3115 usb_autopm_put_interface(dev->intf);
3117 mutex_unlock(&dev->dev_mutex);
3122 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
3123 struct sk_buff *skb, gfp_t flags)
3125 u32 tx_cmd_a, tx_cmd_b;
3128 if (skb_cow_head(skb, TX_OVERHEAD)) {
3129 dev_kfree_skb_any(skb);
3133 if (skb_linearize(skb)) {
3134 dev_kfree_skb_any(skb);
3138 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
3140 if (skb->ip_summed == CHECKSUM_PARTIAL)
3141 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
3144 if (skb_is_gso(skb)) {
3145 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
3147 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
3149 tx_cmd_a |= TX_CMD_A_LSO_;
3152 if (skb_vlan_tag_present(skb)) {
3153 tx_cmd_a |= TX_CMD_A_IVTG_;
3154 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
3157 ptr = skb_push(skb, 8);
3158 put_unaligned_le32(tx_cmd_a, ptr);
3159 put_unaligned_le32(tx_cmd_b, ptr + 4);
3164 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
3165 struct sk_buff_head *list, enum skb_state state)
3167 unsigned long flags;
3168 enum skb_state old_state;
3169 struct skb_data *entry = (struct skb_data *)skb->cb;
3171 spin_lock_irqsave(&list->lock, flags);
3172 old_state = entry->state;
3173 entry->state = state;
3175 __skb_unlink(skb, list);
3176 spin_unlock(&list->lock);
3177 spin_lock(&dev->done.lock);
3179 __skb_queue_tail(&dev->done, skb);
3180 if (skb_queue_len(&dev->done) == 1)
3181 tasklet_schedule(&dev->bh);
3182 spin_unlock_irqrestore(&dev->done.lock, flags);
3187 static void tx_complete(struct urb *urb)
3189 struct sk_buff *skb = (struct sk_buff *)urb->context;
3190 struct skb_data *entry = (struct skb_data *)skb->cb;
3191 struct lan78xx_net *dev = entry->dev;
3193 if (urb->status == 0) {
3194 dev->net->stats.tx_packets += entry->num_of_packet;
3195 dev->net->stats.tx_bytes += entry->length;
3197 dev->net->stats.tx_errors++;
3199 switch (urb->status) {
3201 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3204 /* software-driven interface shutdown */
3207 netif_dbg(dev, tx_err, dev->net,
3208 "tx err interface gone %d\n",
3209 entry->urb->status);
3215 netif_stop_queue(dev->net);
3216 netif_dbg(dev, tx_err, dev->net,
3217 "tx err queue stopped %d\n",
3218 entry->urb->status);
3221 netif_dbg(dev, tx_err, dev->net,
3222 "unknown tx err %d\n",
3223 entry->urb->status);
3228 usb_autopm_put_interface_async(dev->intf);
3230 defer_bh(dev, skb, &dev->txq, tx_done);
3233 static void lan78xx_queue_skb(struct sk_buff_head *list,
3234 struct sk_buff *newsk, enum skb_state state)
3236 struct skb_data *entry = (struct skb_data *)newsk->cb;
3238 __skb_queue_tail(list, newsk);
3239 entry->state = state;
3243 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
3245 struct lan78xx_net *dev = netdev_priv(net);
3246 struct sk_buff *skb2 = NULL;
3248 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
3249 schedule_delayed_work(&dev->wq, 0);
3252 skb_tx_timestamp(skb);
3253 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
3257 skb_queue_tail(&dev->txq_pend, skb2);
3259 /* throttle TX patch at slower than SUPER SPEED USB */
3260 if ((dev->udev->speed < USB_SPEED_SUPER) &&
3261 (skb_queue_len(&dev->txq_pend) > 10))
3262 netif_stop_queue(net);
3264 netif_dbg(dev, tx_err, dev->net,
3265 "lan78xx_tx_prep return NULL\n");
3266 dev->net->stats.tx_errors++;
3267 dev->net->stats.tx_dropped++;
3270 tasklet_schedule(&dev->bh);
3272 return NETDEV_TX_OK;
3275 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
3277 struct lan78xx_priv *pdata = NULL;
3281 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
3283 pdata = (struct lan78xx_priv *)(dev->data[0]);
3285 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
3291 spin_lock_init(&pdata->rfe_ctl_lock);
3292 mutex_init(&pdata->dataport_mutex);
3294 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
3296 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
3297 pdata->vlan_table[i] = 0;
3299 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
3301 dev->net->features = 0;
3303 if (DEFAULT_TX_CSUM_ENABLE)
3304 dev->net->features |= NETIF_F_HW_CSUM;
3306 if (DEFAULT_RX_CSUM_ENABLE)
3307 dev->net->features |= NETIF_F_RXCSUM;
3309 if (DEFAULT_TSO_CSUM_ENABLE) {
3310 dev->net->features |= NETIF_F_SG;
3311 /* Use module parameter to control TCP segmentation offload as
3312 * it appears to cause issues.
3315 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6;
3318 if (DEFAULT_VLAN_RX_OFFLOAD)
3319 dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
3321 if (DEFAULT_VLAN_FILTER_ENABLE)
3322 dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3324 dev->net->hw_features = dev->net->features;
3326 ret = lan78xx_setup_irq_domain(dev);
3328 netdev_warn(dev->net,
3329 "lan78xx_setup_irq_domain() failed : %d", ret);
3333 dev->net->hard_header_len += TX_OVERHEAD;
3334 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
3336 /* Init all registers */
3337 ret = lan78xx_reset(dev);
3339 netdev_warn(dev->net, "Registers INIT FAILED....");
3343 ret = lan78xx_mdio_init(dev);
3345 netdev_warn(dev->net, "MDIO INIT FAILED.....");
3349 dev->net->flags |= IFF_MULTICAST;
3351 pdata->wol = WAKE_MAGIC;
3356 lan78xx_remove_irq_domain(dev);
3359 netdev_warn(dev->net, "Bind routine FAILED");
3360 cancel_work_sync(&pdata->set_multicast);
3361 cancel_work_sync(&pdata->set_vlan);
3366 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3368 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3370 lan78xx_remove_irq_domain(dev);
3372 lan78xx_remove_mdio(dev);
3375 cancel_work_sync(&pdata->set_multicast);
3376 cancel_work_sync(&pdata->set_vlan);
3377 netif_dbg(dev, ifdown, dev->net, "free pdata");
3384 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3385 struct sk_buff *skb,
3386 u32 rx_cmd_a, u32 rx_cmd_b)
3388 /* HW Checksum offload appears to be flawed if used when not stripping
3389 * VLAN headers. Drop back to S/W checksums under these conditions.
3391 if (!(dev->net->features & NETIF_F_RXCSUM) ||
3392 unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3393 ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3394 !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3395 skb->ip_summed = CHECKSUM_NONE;
3397 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3398 skb->ip_summed = CHECKSUM_COMPLETE;
3402 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3403 struct sk_buff *skb,
3404 u32 rx_cmd_a, u32 rx_cmd_b)
3406 if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3407 (rx_cmd_a & RX_CMD_A_FVTG_))
3408 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3409 (rx_cmd_b & 0xffff));
3412 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3416 dev->net->stats.rx_packets++;
3417 dev->net->stats.rx_bytes += skb->len;
3419 skb->protocol = eth_type_trans(skb, dev->net);
3421 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3422 skb->len + sizeof(struct ethhdr), skb->protocol);
3423 memset(skb->cb, 0, sizeof(struct skb_data));
3425 if (skb_defer_rx_timestamp(skb))
3428 status = netif_rx(skb);
3429 if (status != NET_RX_SUCCESS)
3430 netif_dbg(dev, rx_err, dev->net,
3431 "netif_rx status %d\n", status);
3434 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3436 if (skb->len < dev->net->hard_header_len)
3439 while (skb->len > 0) {
3440 u32 rx_cmd_a, rx_cmd_b, align_count, size;
3442 struct sk_buff *skb2;
3443 unsigned char *packet;
3445 rx_cmd_a = get_unaligned_le32(skb->data);
3446 skb_pull(skb, sizeof(rx_cmd_a));
3448 rx_cmd_b = get_unaligned_le32(skb->data);
3449 skb_pull(skb, sizeof(rx_cmd_b));
3451 rx_cmd_c = get_unaligned_le16(skb->data);
3452 skb_pull(skb, sizeof(rx_cmd_c));
3456 /* get the packet length */
3457 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3458 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3460 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3461 netif_dbg(dev, rx_err, dev->net,
3462 "Error rx_cmd_a=0x%08x", rx_cmd_a);
3464 /* last frame in this batch */
3465 if (skb->len == size) {
3466 lan78xx_rx_csum_offload(dev, skb,
3467 rx_cmd_a, rx_cmd_b);
3468 lan78xx_rx_vlan_offload(dev, skb,
3469 rx_cmd_a, rx_cmd_b);
3471 skb_trim(skb, skb->len - 4); /* remove fcs */
3472 skb->truesize = size + sizeof(struct sk_buff);
3477 skb2 = skb_clone(skb, GFP_ATOMIC);
3478 if (unlikely(!skb2)) {
3479 netdev_warn(dev->net, "Error allocating skb");
3484 skb2->data = packet;
3485 skb_set_tail_pointer(skb2, size);
3487 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3488 lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3490 skb_trim(skb2, skb2->len - 4); /* remove fcs */
3491 skb2->truesize = size + sizeof(struct sk_buff);
3493 lan78xx_skb_return(dev, skb2);
3496 skb_pull(skb, size);
3498 /* padding bytes before the next frame starts */
3500 skb_pull(skb, align_count);
3506 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3508 if (!lan78xx_rx(dev, skb)) {
3509 dev->net->stats.rx_errors++;
3514 lan78xx_skb_return(dev, skb);
3518 netif_dbg(dev, rx_err, dev->net, "drop\n");
3519 dev->net->stats.rx_errors++;
3521 skb_queue_tail(&dev->done, skb);
3524 static void rx_complete(struct urb *urb);
3526 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3528 struct sk_buff *skb;
3529 struct skb_data *entry;
3530 unsigned long lockflags;
3531 size_t size = dev->rx_urb_size;
3534 skb = netdev_alloc_skb(dev->net, size);
3540 entry = (struct skb_data *)skb->cb;
3545 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3546 skb->data, size, rx_complete, skb);
3548 spin_lock_irqsave(&dev->rxq.lock, lockflags);
3550 if (netif_device_present(dev->net) &&
3551 netif_running(dev->net) &&
3552 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3553 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3554 ret = usb_submit_urb(urb, GFP_ATOMIC);
3557 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3560 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3564 netif_dbg(dev, ifdown, dev->net, "device gone\n");
3565 netif_device_detach(dev->net);
3571 netif_dbg(dev, rx_err, dev->net,
3572 "rx submit, %d\n", ret);
3573 tasklet_schedule(&dev->bh);
3576 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3579 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3581 dev_kfree_skb_any(skb);
3587 static void rx_complete(struct urb *urb)
3589 struct sk_buff *skb = (struct sk_buff *)urb->context;
3590 struct skb_data *entry = (struct skb_data *)skb->cb;
3591 struct lan78xx_net *dev = entry->dev;
3592 int urb_status = urb->status;
3593 enum skb_state state;
3595 skb_put(skb, urb->actual_length);
3599 switch (urb_status) {
3601 if (skb->len < dev->net->hard_header_len) {
3603 dev->net->stats.rx_errors++;
3604 dev->net->stats.rx_length_errors++;
3605 netif_dbg(dev, rx_err, dev->net,
3606 "rx length %d\n", skb->len);
3608 usb_mark_last_busy(dev->udev);
3611 dev->net->stats.rx_errors++;
3612 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3614 case -ECONNRESET: /* async unlink */
3615 case -ESHUTDOWN: /* hardware gone */
3616 netif_dbg(dev, ifdown, dev->net,
3617 "rx shutdown, code %d\n", urb_status);
3625 dev->net->stats.rx_errors++;
3631 /* data overrun ... flush fifo? */
3633 dev->net->stats.rx_over_errors++;
3638 dev->net->stats.rx_errors++;
3639 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3643 state = defer_bh(dev, skb, &dev->rxq, state);
3646 if (netif_running(dev->net) &&
3647 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3648 state != unlink_start) {
3649 rx_submit(dev, urb, GFP_ATOMIC);
3654 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3657 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3660 struct urb *urb = NULL;
3661 struct skb_data *entry;
3662 unsigned long flags;
3663 struct sk_buff_head *tqp = &dev->txq_pend;
3664 struct sk_buff *skb, *skb2;
3667 int skb_totallen, pkt_cnt;
3673 spin_lock_irqsave(&tqp->lock, flags);
3674 skb_queue_walk(tqp, skb) {
3675 if (skb_is_gso(skb)) {
3676 if (!skb_queue_is_first(tqp, skb)) {
3677 /* handle previous packets first */
3681 length = skb->len - TX_OVERHEAD;
3682 __skb_unlink(skb, tqp);
3683 spin_unlock_irqrestore(&tqp->lock, flags);
3687 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3689 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3692 spin_unlock_irqrestore(&tqp->lock, flags);
3694 /* copy to a single skb */
3695 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3699 skb_put(skb, skb_totallen);
3701 for (count = pos = 0; count < pkt_cnt; count++) {
3702 skb2 = skb_dequeue(tqp);
3704 length += (skb2->len - TX_OVERHEAD);
3705 memcpy(skb->data + pos, skb2->data, skb2->len);
3706 pos += roundup(skb2->len, sizeof(u32));
3707 dev_kfree_skb(skb2);
3712 urb = usb_alloc_urb(0, GFP_ATOMIC);
3716 entry = (struct skb_data *)skb->cb;
3719 entry->length = length;
3720 entry->num_of_packet = count;
3722 spin_lock_irqsave(&dev->txq.lock, flags);
3723 ret = usb_autopm_get_interface_async(dev->intf);
3725 spin_unlock_irqrestore(&dev->txq.lock, flags);
3729 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3730 skb->data, skb->len, tx_complete, skb);
3732 if (length % dev->maxpacket == 0) {
3733 /* send USB_ZERO_PACKET */
3734 urb->transfer_flags |= URB_ZERO_PACKET;
3738 /* if this triggers the device is still a sleep */
3739 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3740 /* transmission will be done in resume */
3741 usb_anchor_urb(urb, &dev->deferred);
3742 /* no use to process more packets */
3743 netif_stop_queue(dev->net);
3745 spin_unlock_irqrestore(&dev->txq.lock, flags);
3746 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3751 ret = usb_submit_urb(urb, GFP_ATOMIC);
3754 netif_trans_update(dev->net);
3755 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3756 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3757 netif_stop_queue(dev->net);
3760 netif_stop_queue(dev->net);
3761 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3762 usb_autopm_put_interface_async(dev->intf);
3766 netif_dbg(dev, tx_err, dev->net,
3767 "tx: submit urb err %d (disconnected?)", ret);
3768 netif_device_detach(dev->net);
3771 usb_autopm_put_interface_async(dev->intf);
3772 netif_dbg(dev, tx_err, dev->net,
3773 "tx: submit urb err %d\n", ret);
3777 spin_unlock_irqrestore(&dev->txq.lock, flags);
3780 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3782 dev->net->stats.tx_dropped++;
3784 dev_kfree_skb_any(skb);
3787 netif_dbg(dev, tx_queued, dev->net,
3788 "> tx, len %d, type 0x%x\n", length, skb->protocol);
3792 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3797 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3798 for (i = 0; i < 10; i++) {
3799 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3801 urb = usb_alloc_urb(0, GFP_ATOMIC);
3803 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3807 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3808 tasklet_schedule(&dev->bh);
3810 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3811 netif_wake_queue(dev->net);
3814 static void lan78xx_bh(struct tasklet_struct *t)
3816 struct lan78xx_net *dev = from_tasklet(dev, t, bh);
3817 struct sk_buff *skb;
3818 struct skb_data *entry;
3820 while ((skb = skb_dequeue(&dev->done))) {
3821 entry = (struct skb_data *)(skb->cb);
3822 switch (entry->state) {
3824 entry->state = rx_cleanup;
3825 rx_process(dev, skb);
3828 usb_free_urb(entry->urb);
3832 usb_free_urb(entry->urb);
3836 netdev_dbg(dev->net, "skb state %d\n", entry->state);
3841 if (netif_device_present(dev->net) && netif_running(dev->net)) {
3842 /* reset update timer delta */
3843 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3845 mod_timer(&dev->stat_monitor,
3846 jiffies + STAT_UPDATE_TIMER);
3849 if (!skb_queue_empty(&dev->txq_pend))
3852 if (!test_bit(EVENT_RX_HALT, &dev->flags))
3857 static void lan78xx_delayedwork(struct work_struct *work)
3860 struct lan78xx_net *dev;
3862 dev = container_of(work, struct lan78xx_net, wq.work);
3864 if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
3867 if (usb_autopm_get_interface(dev->intf) < 0)
3870 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3871 unlink_urbs(dev, &dev->txq);
3873 status = usb_clear_halt(dev->udev, dev->pipe_out);
3876 status != -ESHUTDOWN) {
3877 if (netif_msg_tx_err(dev))
3878 netdev_err(dev->net,
3879 "can't clear tx halt, status %d\n",
3882 clear_bit(EVENT_TX_HALT, &dev->flags);
3883 if (status != -ESHUTDOWN)
3884 netif_wake_queue(dev->net);
3888 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3889 unlink_urbs(dev, &dev->rxq);
3890 status = usb_clear_halt(dev->udev, dev->pipe_in);
3893 status != -ESHUTDOWN) {
3894 if (netif_msg_rx_err(dev))
3895 netdev_err(dev->net,
3896 "can't clear rx halt, status %d\n",
3899 clear_bit(EVENT_RX_HALT, &dev->flags);
3900 tasklet_schedule(&dev->bh);
3904 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3907 clear_bit(EVENT_LINK_RESET, &dev->flags);
3908 if (lan78xx_link_reset(dev) < 0) {
3909 netdev_info(dev->net, "link reset failed (%d)\n",
3914 if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3915 lan78xx_update_stats(dev);
3917 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3919 mod_timer(&dev->stat_monitor,
3920 jiffies + (STAT_UPDATE_TIMER * dev->delta));
3922 dev->delta = min((dev->delta * 2), 50);
3925 usb_autopm_put_interface(dev->intf);
3928 static void intr_complete(struct urb *urb)
3930 struct lan78xx_net *dev = urb->context;
3931 int status = urb->status;
3936 lan78xx_status(dev, urb);
3939 /* software-driven interface shutdown */
3940 case -ENOENT: /* urb killed */
3941 case -ENODEV: /* hardware gone */
3942 case -ESHUTDOWN: /* hardware gone */
3943 netif_dbg(dev, ifdown, dev->net,
3944 "intr shutdown, code %d\n", status);
3947 /* NOTE: not throttling like RX/TX, since this endpoint
3948 * already polls infrequently
3951 netdev_dbg(dev->net, "intr status %d\n", status);
3955 if (!netif_device_present(dev->net) ||
3956 !netif_running(dev->net)) {
3957 netdev_warn(dev->net, "not submitting new status URB");
3961 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3962 status = usb_submit_urb(urb, GFP_ATOMIC);
3969 netif_dbg(dev, timer, dev->net,
3970 "intr resubmit %d (disconnect?)", status);
3971 netif_device_detach(dev->net);
3974 netif_err(dev, timer, dev->net,
3975 "intr resubmit --> %d\n", status);
3980 static void lan78xx_disconnect(struct usb_interface *intf)
3982 struct lan78xx_net *dev;
3983 struct usb_device *udev;
3984 struct net_device *net;
3985 struct phy_device *phydev;
3987 dev = usb_get_intfdata(intf);
3988 usb_set_intfdata(intf, NULL);
3992 set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
3994 udev = interface_to_usbdev(intf);
3997 unregister_netdev(net);
3999 cancel_delayed_work_sync(&dev->wq);
4001 phydev = net->phydev;
4003 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
4004 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
4006 phy_disconnect(net->phydev);
4008 if (phy_is_pseudo_fixed_link(phydev))
4009 fixed_phy_unregister(phydev);
4011 usb_scuttle_anchored_urbs(&dev->deferred);
4013 if (timer_pending(&dev->stat_monitor))
4014 del_timer_sync(&dev->stat_monitor);
4016 lan78xx_unbind(dev, intf);
4018 usb_kill_urb(dev->urb_intr);
4019 usb_free_urb(dev->urb_intr);
4025 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
4027 struct lan78xx_net *dev = netdev_priv(net);
4029 unlink_urbs(dev, &dev->txq);
4030 tasklet_schedule(&dev->bh);
4033 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
4034 struct net_device *netdev,
4035 netdev_features_t features)
4037 if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
4038 features &= ~NETIF_F_GSO_MASK;
4040 features = vlan_features_check(skb, features);
4041 features = vxlan_features_check(skb, features);
4046 static const struct net_device_ops lan78xx_netdev_ops = {
4047 .ndo_open = lan78xx_open,
4048 .ndo_stop = lan78xx_stop,
4049 .ndo_start_xmit = lan78xx_start_xmit,
4050 .ndo_tx_timeout = lan78xx_tx_timeout,
4051 .ndo_change_mtu = lan78xx_change_mtu,
4052 .ndo_set_mac_address = lan78xx_set_mac_addr,
4053 .ndo_validate_addr = eth_validate_addr,
4054 .ndo_eth_ioctl = phy_do_ioctl_running,
4055 .ndo_set_rx_mode = lan78xx_set_multicast,
4056 .ndo_set_features = lan78xx_set_features,
4057 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
4058 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
4059 .ndo_features_check = lan78xx_features_check,
4062 static void lan78xx_stat_monitor(struct timer_list *t)
4064 struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
4066 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
4069 static int lan78xx_probe(struct usb_interface *intf,
4070 const struct usb_device_id *id)
4072 struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
4073 struct lan78xx_net *dev;
4074 struct net_device *netdev;
4075 struct usb_device *udev;
4078 unsigned int period;
4081 udev = interface_to_usbdev(intf);
4082 udev = usb_get_dev(udev);
4084 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
4086 dev_err(&intf->dev, "Error: OOM\n");
4091 /* netdev_printk() needs this */
4092 SET_NETDEV_DEV(netdev, &intf->dev);
4094 dev = netdev_priv(netdev);
4098 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
4099 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
4101 skb_queue_head_init(&dev->rxq);
4102 skb_queue_head_init(&dev->txq);
4103 skb_queue_head_init(&dev->done);
4104 skb_queue_head_init(&dev->txq_pend);
4105 mutex_init(&dev->phy_mutex);
4106 mutex_init(&dev->dev_mutex);
4108 tasklet_setup(&dev->bh, lan78xx_bh);
4109 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
4110 init_usb_anchor(&dev->deferred);
4112 netdev->netdev_ops = &lan78xx_netdev_ops;
4113 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
4114 netdev->ethtool_ops = &lan78xx_ethtool_ops;
4117 timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
4119 mutex_init(&dev->stats.access_lock);
4121 if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
4126 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
4127 ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
4128 if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
4133 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
4134 ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
4135 if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
4140 ep_intr = &intf->cur_altsetting->endpoint[2];
4141 if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
4146 dev->pipe_intr = usb_rcvintpipe(dev->udev,
4147 usb_endpoint_num(&ep_intr->desc));
4149 ret = lan78xx_bind(dev, intf);
4153 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
4154 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
4156 /* MTU range: 68 - 9000 */
4157 netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
4158 netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
4160 if (int_urb_interval_ms <= 0)
4161 period = ep_intr->desc.bInterval;
4163 period = int_urb_interval_ms * INT_URB_MICROFRAMES_PER_MS;
4165 netif_notice(dev, probe, netdev, "int urb period %d\n", period);
4167 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
4168 buf = kmalloc(maxp, GFP_KERNEL);
4170 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
4171 if (!dev->urb_intr) {
4176 usb_fill_int_urb(dev->urb_intr, dev->udev,
4177 dev->pipe_intr, buf, maxp,
4178 intr_complete, dev, period);
4179 dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
4183 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
4185 /* Reject broken descriptors. */
4186 if (dev->maxpacket == 0) {
4191 /* driver requires remote-wakeup capability during autosuspend. */
4192 intf->needs_remote_wakeup = 1;
4194 ret = lan78xx_phy_init(dev);
4198 ret = register_netdev(netdev);
4200 netif_err(dev, probe, netdev, "couldn't register the device\n");
4204 usb_set_intfdata(intf, dev);
4206 ret = device_set_wakeup_enable(&udev->dev, true);
4208 /* Default delay of 2sec has more overhead than advantage.
4209 * Set to 10sec as default.
4211 pm_runtime_set_autosuspend_delay(&udev->dev,
4212 DEFAULT_AUTOSUSPEND_DELAY);
4217 phy_disconnect(netdev->phydev);
4219 usb_free_urb(dev->urb_intr);
4221 lan78xx_unbind(dev, intf);
4223 free_netdev(netdev);
4230 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
4232 const u16 crc16poly = 0x8005;
4238 for (i = 0; i < len; i++) {
4240 for (bit = 0; bit < 8; bit++) {
4244 if (msb ^ (u16)(data & 1)) {
4246 crc |= (u16)0x0001U;
4255 static int lan78xx_set_auto_suspend(struct lan78xx_net *dev)
4260 ret = lan78xx_stop_tx_path(dev);
4264 ret = lan78xx_stop_rx_path(dev);
4268 /* auto suspend (selective suspend) */
4270 ret = lan78xx_write_reg(dev, WUCSR, 0);
4273 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4276 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4280 /* set goodframe wakeup */
4282 ret = lan78xx_read_reg(dev, WUCSR, &buf);
4286 buf |= WUCSR_RFE_WAKE_EN_;
4287 buf |= WUCSR_STORE_WAKE_;
4289 ret = lan78xx_write_reg(dev, WUCSR, buf);
4293 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4297 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4298 buf |= PMT_CTL_RES_CLR_WKP_STS_;
4299 buf |= PMT_CTL_PHY_WAKE_EN_;
4300 buf |= PMT_CTL_WOL_EN_;
4301 buf &= ~PMT_CTL_SUS_MODE_MASK_;
4302 buf |= PMT_CTL_SUS_MODE_3_;
4304 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4308 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4312 buf |= PMT_CTL_WUPS_MASK_;
4314 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4318 ret = lan78xx_start_rx_path(dev);
4323 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
4325 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
4326 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
4327 const u8 arp_type[2] = { 0x08, 0x06 };
4335 ret = lan78xx_stop_tx_path(dev);
4338 ret = lan78xx_stop_rx_path(dev);
4342 ret = lan78xx_write_reg(dev, WUCSR, 0);
4345 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4348 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4356 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
4360 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
4361 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
4363 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) {
4364 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
4370 if (wol & WAKE_PHY) {
4371 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
4373 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4374 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4375 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4377 if (wol & WAKE_MAGIC) {
4378 temp_wucsr |= WUCSR_MPEN_;
4380 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4381 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4382 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
4384 if (wol & WAKE_BCAST) {
4385 temp_wucsr |= WUCSR_BCST_EN_;
4387 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4388 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4389 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4391 if (wol & WAKE_MCAST) {
4392 temp_wucsr |= WUCSR_WAKE_EN_;
4394 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
4395 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
4396 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4398 WUF_CFGX_TYPE_MCAST_ |
4399 (0 << WUF_CFGX_OFFSET_SHIFT_) |
4400 (crc & WUF_CFGX_CRC16_MASK_));
4404 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
4407 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4410 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4413 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4419 /* for IPv6 Multicast */
4420 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
4421 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4423 WUF_CFGX_TYPE_MCAST_ |
4424 (0 << WUF_CFGX_OFFSET_SHIFT_) |
4425 (crc & WUF_CFGX_CRC16_MASK_));
4429 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
4432 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4435 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4438 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4444 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4445 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4446 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4448 if (wol & WAKE_UCAST) {
4449 temp_wucsr |= WUCSR_PFDA_EN_;
4451 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4452 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4453 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4455 if (wol & WAKE_ARP) {
4456 temp_wucsr |= WUCSR_WAKE_EN_;
4458 /* set WUF_CFG & WUF_MASK
4459 * for packettype (offset 12,13) = ARP (0x0806)
4461 crc = lan78xx_wakeframe_crc16(arp_type, 2);
4462 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4464 WUF_CFGX_TYPE_ALL_ |
4465 (0 << WUF_CFGX_OFFSET_SHIFT_) |
4466 (crc & WUF_CFGX_CRC16_MASK_));
4470 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
4473 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4476 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4479 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4485 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4486 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4487 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4490 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
4494 /* when multiple WOL bits are set */
4495 if (hweight_long((unsigned long)wol) > 1) {
4496 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4497 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4498 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4500 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
4505 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4509 buf |= PMT_CTL_WUPS_MASK_;
4511 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4515 ret = lan78xx_start_rx_path(dev);
4520 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
4522 struct lan78xx_net *dev = usb_get_intfdata(intf);
4526 mutex_lock(&dev->dev_mutex);
4528 netif_dbg(dev, ifdown, dev->net,
4529 "suspending: pm event %#x", message.event);
4531 dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4534 spin_lock_irq(&dev->txq.lock);
4535 /* don't autosuspend while transmitting */
4536 if ((skb_queue_len(&dev->txq) ||
4537 skb_queue_len(&dev->txq_pend)) &&
4538 PMSG_IS_AUTO(message)) {
4539 spin_unlock_irq(&dev->txq.lock);
4543 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4544 spin_unlock_irq(&dev->txq.lock);
4548 ret = lan78xx_stop_rx_path(dev);
4552 ret = lan78xx_flush_rx_fifo(dev);
4557 ret = lan78xx_stop_tx_path(dev);
4561 /* empty out the Rx and Tx queues */
4562 netif_device_detach(dev->net);
4563 lan78xx_terminate_urbs(dev);
4564 usb_kill_urb(dev->urb_intr);
4567 netif_device_attach(dev->net);
4569 del_timer(&dev->stat_monitor);
4571 if (PMSG_IS_AUTO(message)) {
4572 ret = lan78xx_set_auto_suspend(dev);
4576 struct lan78xx_priv *pdata;
4578 pdata = (struct lan78xx_priv *)(dev->data[0]);
4579 netif_carrier_off(dev->net);
4580 ret = lan78xx_set_suspend(dev, pdata->wol);
4585 /* Interface is down; don't allow WOL and PHY
4586 * events to wake up the host
4590 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4592 ret = lan78xx_write_reg(dev, WUCSR, 0);
4595 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4599 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4603 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4604 buf |= PMT_CTL_RES_CLR_WKP_STS_;
4605 buf &= ~PMT_CTL_SUS_MODE_MASK_;
4606 buf |= PMT_CTL_SUS_MODE_3_;
4608 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4612 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4616 buf |= PMT_CTL_WUPS_MASK_;
4618 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4625 mutex_unlock(&dev->dev_mutex);
4630 static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
4632 bool pipe_halted = false;
4635 while ((urb = usb_get_from_anchor(&dev->deferred))) {
4636 struct sk_buff *skb = urb->context;
4639 if (!netif_device_present(dev->net) ||
4640 !netif_carrier_ok(dev->net) ||
4647 ret = usb_submit_urb(urb, GFP_ATOMIC);
4650 netif_trans_update(dev->net);
4651 lan78xx_queue_skb(&dev->txq, skb, tx_start);
4656 if (ret == -EPIPE) {
4657 netif_stop_queue(dev->net);
4659 } else if (ret == -ENODEV) {
4660 netif_device_detach(dev->net);
4668 static int lan78xx_resume(struct usb_interface *intf)
4670 struct lan78xx_net *dev = usb_get_intfdata(intf);
4674 mutex_lock(&dev->dev_mutex);
4676 netif_dbg(dev, ifup, dev->net, "resuming device");
4678 dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4681 bool pipe_halted = false;
4683 ret = lan78xx_flush_tx_fifo(dev);
4687 if (dev->urb_intr) {
4688 int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
4692 netif_device_detach(dev->net);
4694 netdev_warn(dev->net, "Failed to submit intr URB");
4698 spin_lock_irq(&dev->txq.lock);
4700 if (netif_device_present(dev->net)) {
4701 pipe_halted = lan78xx_submit_deferred_urbs(dev);
4704 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
4707 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4709 spin_unlock_irq(&dev->txq.lock);
4712 netif_device_present(dev->net) &&
4713 (skb_queue_len(&dev->txq) < dev->tx_qlen))
4714 netif_start_queue(dev->net);
4716 ret = lan78xx_start_tx_path(dev);
4720 tasklet_schedule(&dev->bh);
4722 if (!timer_pending(&dev->stat_monitor)) {
4724 mod_timer(&dev->stat_monitor,
4725 jiffies + STAT_UPDATE_TIMER);
4729 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4732 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4735 ret = lan78xx_write_reg(dev, WUCSR, 0);
4738 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4742 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4744 WUCSR2_IPV6_TCPSYN_RCD_ |
4745 WUCSR2_IPV4_TCPSYN_RCD_);
4749 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
4750 WUCSR_EEE_RX_WAKE_ |
4752 WUCSR_RFE_WAKE_FR_ |
4761 mutex_unlock(&dev->dev_mutex);
4766 static int lan78xx_reset_resume(struct usb_interface *intf)
4768 struct lan78xx_net *dev = usb_get_intfdata(intf);
4771 netif_dbg(dev, ifup, dev->net, "(reset) resuming device");
4773 ret = lan78xx_reset(dev);
4777 phy_start(dev->net->phydev);
4779 ret = lan78xx_resume(intf);
4784 static const struct usb_device_id products[] = {
4786 /* LAN7800 USB Gigabit Ethernet Device */
4787 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4790 /* LAN7850 USB Gigabit Ethernet Device */
4791 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4794 /* LAN7801 USB Gigabit Ethernet Device */
4795 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4798 /* ATM2-AF USB Gigabit Ethernet Device */
4799 USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
4803 MODULE_DEVICE_TABLE(usb, products);
4805 static struct usb_driver lan78xx_driver = {
4806 .name = DRIVER_NAME,
4807 .id_table = products,
4808 .probe = lan78xx_probe,
4809 .disconnect = lan78xx_disconnect,
4810 .suspend = lan78xx_suspend,
4811 .resume = lan78xx_resume,
4812 .reset_resume = lan78xx_reset_resume,
4813 .supports_autosuspend = 1,
4814 .disable_hub_initiated_lpm = 1,
4817 module_usb_driver(lan78xx_driver);
4819 MODULE_AUTHOR(DRIVER_AUTHOR);
4820 MODULE_DESCRIPTION(DRIVER_DESC);
4821 MODULE_LICENSE("GPL");