1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2015 Microchip Technology
5 #include <linux/module.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
10 #include <linux/crc32.h>
11 #include <linux/signal.h>
12 #include <linux/slab.h>
13 #include <linux/if_vlan.h>
14 #include <linux/uaccess.h>
15 #include <linux/linkmode.h>
16 #include <linux/list.h>
18 #include <linux/ipv6.h>
19 #include <linux/mdio.h>
20 #include <linux/phy.h>
21 #include <net/ip6_checksum.h>
22 #include <net/vxlan.h>
23 #include <linux/interrupt.h>
24 #include <linux/irqdomain.h>
25 #include <linux/irq.h>
26 #include <linux/irqchip/chained_irq.h>
27 #include <linux/microchipphy.h>
28 #include <linux/phy_fixed.h>
29 #include <linux/of_mdio.h>
30 #include <linux/of_net.h>
33 #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
34 #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
35 #define DRIVER_NAME "lan78xx"
37 #define TX_TIMEOUT_JIFFIES (5 * HZ)
38 #define THROTTLE_JIFFIES (HZ / 8)
39 #define UNLINK_TIMEOUT_MS 3
41 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
43 #define SS_USB_PKT_SIZE (1024)
44 #define HS_USB_PKT_SIZE (512)
45 #define FS_USB_PKT_SIZE (64)
47 #define MAX_RX_FIFO_SIZE (12 * 1024)
48 #define MAX_TX_FIFO_SIZE (12 * 1024)
50 #define FLOW_THRESHOLD(n) ((((n) + 511) / 512) & 0x7F)
51 #define FLOW_CTRL_THRESHOLD(on, off) ((FLOW_THRESHOLD(on) << 0) | \
52 (FLOW_THRESHOLD(off) << 8))
54 /* Flow control turned on when Rx FIFO level rises above this level (bytes) */
55 #define FLOW_ON_SS 9216
56 #define FLOW_ON_HS 8704
58 /* Flow control turned off when Rx FIFO level falls below this level (bytes) */
59 #define FLOW_OFF_SS 4096
60 #define FLOW_OFF_HS 1024
62 #define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
63 #define DEFAULT_BULK_IN_DELAY (0x0800)
64 #define MAX_SINGLE_PACKET_SIZE (9000)
65 #define DEFAULT_TX_CSUM_ENABLE (true)
66 #define DEFAULT_RX_CSUM_ENABLE (true)
67 #define DEFAULT_TSO_CSUM_ENABLE (true)
68 #define DEFAULT_VLAN_FILTER_ENABLE (true)
69 #define DEFAULT_VLAN_RX_OFFLOAD (true)
70 #define TX_ALIGNMENT (4)
73 #define LAN78XX_USB_VENDOR_ID (0x0424)
74 #define LAN7800_USB_PRODUCT_ID (0x7800)
75 #define LAN7850_USB_PRODUCT_ID (0x7850)
76 #define LAN7801_USB_PRODUCT_ID (0x7801)
77 #define LAN78XX_EEPROM_MAGIC (0x78A5)
78 #define LAN78XX_OTP_MAGIC (0x78F3)
79 #define AT29M2AF_USB_VENDOR_ID (0x07C9)
80 #define AT29M2AF_USB_PRODUCT_ID (0x0012)
85 #define EEPROM_INDICATOR (0xA5)
86 #define EEPROM_MAC_OFFSET (0x01)
87 #define MAX_EEPROM_SIZE 512
88 #define OTP_INDICATOR_1 (0xF3)
89 #define OTP_INDICATOR_2 (0xF7)
91 #define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
92 WAKE_MCAST | WAKE_BCAST | \
93 WAKE_ARP | WAKE_MAGIC)
96 #define TX_SS_URB_NUM TX_URB_NUM
97 #define TX_HS_URB_NUM TX_URB_NUM
98 #define TX_FS_URB_NUM TX_URB_NUM
100 /* A single URB buffer must be large enough to hold a complete jumbo packet
102 #define TX_SS_URB_SIZE (32 * 1024)
103 #define TX_HS_URB_SIZE (16 * 1024)
104 #define TX_FS_URB_SIZE (10 * 1024)
106 #define RX_SS_URB_NUM 30
107 #define RX_HS_URB_NUM 10
108 #define RX_FS_URB_NUM 10
109 #define RX_SS_URB_SIZE TX_SS_URB_SIZE
110 #define RX_HS_URB_SIZE TX_HS_URB_SIZE
111 #define RX_FS_URB_SIZE TX_FS_URB_SIZE
113 #define SS_BURST_CAP_SIZE RX_SS_URB_SIZE
114 #define SS_BULK_IN_DELAY 0x2000
115 #define HS_BURST_CAP_SIZE RX_HS_URB_SIZE
116 #define HS_BULK_IN_DELAY 0x2000
117 #define FS_BURST_CAP_SIZE RX_FS_URB_SIZE
118 #define FS_BULK_IN_DELAY 0x2000
121 #define TX_SKB_MIN_LEN (TX_CMD_LEN + ETH_HLEN)
122 #define LAN78XX_TSO_SIZE(dev) ((dev)->tx_urb_size - TX_SKB_MIN_LEN)
124 #define RX_CMD_LEN 10
125 #define RX_SKB_MIN_LEN (RX_CMD_LEN + ETH_HLEN)
126 #define RX_MAX_FRAME_LEN(mtu) ((mtu) + ETH_HLEN + VLAN_HLEN)
128 /* USB related defines */
129 #define BULK_IN_PIPE 1
130 #define BULK_OUT_PIPE 2
132 /* default autosuspend delay (mSec)*/
133 #define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
135 /* statistic update interval (mSec) */
136 #define STAT_UPDATE_TIMER (1 * 1000)
138 /* time to wait for MAC or FCT to stop (jiffies) */
139 #define HW_DISABLE_TIMEOUT (HZ / 10)
141 /* time to wait between polling MAC or FCT state (ms) */
142 #define HW_DISABLE_DELAY_MS 1
144 /* defines interrupts from interrupt EP */
145 #define MAX_INT_EP (32)
146 #define INT_EP_INTEP (31)
147 #define INT_EP_OTP_WR_DONE (28)
148 #define INT_EP_EEE_TX_LPI_START (26)
149 #define INT_EP_EEE_TX_LPI_STOP (25)
150 #define INT_EP_EEE_RX_LPI (24)
151 #define INT_EP_MAC_RESET_TIMEOUT (23)
152 #define INT_EP_RDFO (22)
153 #define INT_EP_TXE (21)
154 #define INT_EP_USB_STATUS (20)
155 #define INT_EP_TX_DIS (19)
156 #define INT_EP_RX_DIS (18)
157 #define INT_EP_PHY (17)
158 #define INT_EP_DP (16)
159 #define INT_EP_MAC_ERR (15)
160 #define INT_EP_TDFU (14)
161 #define INT_EP_TDFO (13)
162 #define INT_EP_UTX (12)
163 #define INT_EP_GPIO_11 (11)
164 #define INT_EP_GPIO_10 (10)
165 #define INT_EP_GPIO_9 (9)
166 #define INT_EP_GPIO_8 (8)
167 #define INT_EP_GPIO_7 (7)
168 #define INT_EP_GPIO_6 (6)
169 #define INT_EP_GPIO_5 (5)
170 #define INT_EP_GPIO_4 (4)
171 #define INT_EP_GPIO_3 (3)
172 #define INT_EP_GPIO_2 (2)
173 #define INT_EP_GPIO_1 (1)
174 #define INT_EP_GPIO_0 (0)
176 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
178 "RX Alignment Errors",
179 "Rx Fragment Errors",
181 "RX Undersize Frame Errors",
182 "RX Oversize Frame Errors",
184 "RX Unicast Byte Count",
185 "RX Broadcast Byte Count",
186 "RX Multicast Byte Count",
188 "RX Broadcast Frames",
189 "RX Multicast Frames",
192 "RX 65 - 127 Byte Frames",
193 "RX 128 - 255 Byte Frames",
194 "RX 256 - 511 Bytes Frames",
195 "RX 512 - 1023 Byte Frames",
196 "RX 1024 - 1518 Byte Frames",
197 "RX Greater 1518 Byte Frames",
198 "EEE RX LPI Transitions",
201 "TX Excess Deferral Errors",
204 "TX Single Collisions",
205 "TX Multiple Collisions",
206 "TX Excessive Collision",
207 "TX Late Collisions",
208 "TX Unicast Byte Count",
209 "TX Broadcast Byte Count",
210 "TX Multicast Byte Count",
212 "TX Broadcast Frames",
213 "TX Multicast Frames",
216 "TX 65 - 127 Byte Frames",
217 "TX 128 - 255 Byte Frames",
218 "TX 256 - 511 Bytes Frames",
219 "TX 512 - 1023 Byte Frames",
220 "TX 1024 - 1518 Byte Frames",
221 "TX Greater 1518 Byte Frames",
222 "EEE TX LPI Transitions",
226 struct lan78xx_statstage {
228 u32 rx_alignment_errors;
229 u32 rx_fragment_errors;
230 u32 rx_jabber_errors;
231 u32 rx_undersize_frame_errors;
232 u32 rx_oversize_frame_errors;
233 u32 rx_dropped_frames;
234 u32 rx_unicast_byte_count;
235 u32 rx_broadcast_byte_count;
236 u32 rx_multicast_byte_count;
237 u32 rx_unicast_frames;
238 u32 rx_broadcast_frames;
239 u32 rx_multicast_frames;
241 u32 rx_64_byte_frames;
242 u32 rx_65_127_byte_frames;
243 u32 rx_128_255_byte_frames;
244 u32 rx_256_511_bytes_frames;
245 u32 rx_512_1023_byte_frames;
246 u32 rx_1024_1518_byte_frames;
247 u32 rx_greater_1518_byte_frames;
248 u32 eee_rx_lpi_transitions;
251 u32 tx_excess_deferral_errors;
252 u32 tx_carrier_errors;
253 u32 tx_bad_byte_count;
254 u32 tx_single_collisions;
255 u32 tx_multiple_collisions;
256 u32 tx_excessive_collision;
257 u32 tx_late_collisions;
258 u32 tx_unicast_byte_count;
259 u32 tx_broadcast_byte_count;
260 u32 tx_multicast_byte_count;
261 u32 tx_unicast_frames;
262 u32 tx_broadcast_frames;
263 u32 tx_multicast_frames;
265 u32 tx_64_byte_frames;
266 u32 tx_65_127_byte_frames;
267 u32 tx_128_255_byte_frames;
268 u32 tx_256_511_bytes_frames;
269 u32 tx_512_1023_byte_frames;
270 u32 tx_1024_1518_byte_frames;
271 u32 tx_greater_1518_byte_frames;
272 u32 eee_tx_lpi_transitions;
276 struct lan78xx_statstage64 {
278 u64 rx_alignment_errors;
279 u64 rx_fragment_errors;
280 u64 rx_jabber_errors;
281 u64 rx_undersize_frame_errors;
282 u64 rx_oversize_frame_errors;
283 u64 rx_dropped_frames;
284 u64 rx_unicast_byte_count;
285 u64 rx_broadcast_byte_count;
286 u64 rx_multicast_byte_count;
287 u64 rx_unicast_frames;
288 u64 rx_broadcast_frames;
289 u64 rx_multicast_frames;
291 u64 rx_64_byte_frames;
292 u64 rx_65_127_byte_frames;
293 u64 rx_128_255_byte_frames;
294 u64 rx_256_511_bytes_frames;
295 u64 rx_512_1023_byte_frames;
296 u64 rx_1024_1518_byte_frames;
297 u64 rx_greater_1518_byte_frames;
298 u64 eee_rx_lpi_transitions;
301 u64 tx_excess_deferral_errors;
302 u64 tx_carrier_errors;
303 u64 tx_bad_byte_count;
304 u64 tx_single_collisions;
305 u64 tx_multiple_collisions;
306 u64 tx_excessive_collision;
307 u64 tx_late_collisions;
308 u64 tx_unicast_byte_count;
309 u64 tx_broadcast_byte_count;
310 u64 tx_multicast_byte_count;
311 u64 tx_unicast_frames;
312 u64 tx_broadcast_frames;
313 u64 tx_multicast_frames;
315 u64 tx_64_byte_frames;
316 u64 tx_65_127_byte_frames;
317 u64 tx_128_255_byte_frames;
318 u64 tx_256_511_bytes_frames;
319 u64 tx_512_1023_byte_frames;
320 u64 tx_1024_1518_byte_frames;
321 u64 tx_greater_1518_byte_frames;
322 u64 eee_tx_lpi_transitions;
326 static u32 lan78xx_regs[] = {
348 #define PHY_REG_SIZE (32 * sizeof(u32))
352 struct lan78xx_priv {
353 struct lan78xx_net *dev;
355 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicast hash table */
356 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
357 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
358 struct mutex dataport_mutex; /* for dataport access */
359 spinlock_t rfe_ctl_lock; /* for rfe register access */
360 struct work_struct set_multicast;
361 struct work_struct set_vlan;
375 struct skb_data { /* skb->cb is one of these */
377 struct lan78xx_net *dev;
378 enum skb_state state;
384 struct usb_ctrlrequest req;
385 struct lan78xx_net *dev;
388 #define EVENT_TX_HALT 0
389 #define EVENT_RX_HALT 1
390 #define EVENT_RX_MEMORY 2
391 #define EVENT_STS_SPLIT 3
392 #define EVENT_LINK_RESET 4
393 #define EVENT_RX_PAUSED 5
394 #define EVENT_DEV_WAKING 6
395 #define EVENT_DEV_ASLEEP 7
396 #define EVENT_DEV_OPEN 8
397 #define EVENT_STAT_UPDATE 9
398 #define EVENT_DEV_DISCONNECT 10
401 struct mutex access_lock; /* for stats access */
402 struct lan78xx_statstage saved;
403 struct lan78xx_statstage rollover_count;
404 struct lan78xx_statstage rollover_max;
405 struct lan78xx_statstage64 curr_stat;
408 struct irq_domain_data {
409 struct irq_domain *irqdomain;
411 struct irq_chip *irqchip;
412 irq_flow_handler_t irq_handler;
414 struct mutex irq_lock; /* for irq bus access */
418 struct net_device *net;
419 struct usb_device *udev;
420 struct usb_interface *intf;
423 unsigned int tx_pend_data_len;
429 struct sk_buff_head rxq_free;
430 struct sk_buff_head rxq;
431 struct sk_buff_head rxq_done;
432 struct sk_buff_head rxq_overflow;
433 struct sk_buff_head txq_free;
434 struct sk_buff_head txq;
435 struct sk_buff_head txq_pend;
437 struct napi_struct napi;
439 struct delayed_work wq;
443 struct urb *urb_intr;
444 struct usb_anchor deferred;
446 struct mutex dev_mutex; /* serialise open/stop wrt suspend/resume */
447 struct mutex phy_mutex; /* for phy access */
448 unsigned int pipe_in, pipe_out, pipe_intr;
450 unsigned int bulk_in_delay;
451 unsigned int burst_cap;
455 wait_queue_head_t *wait;
456 unsigned char suspend_count;
458 unsigned int maxpacket;
459 struct timer_list stat_monitor;
461 unsigned long data[5];
468 struct mii_bus *mdiobus;
469 phy_interface_t interface;
472 u8 fc_request_control;
475 struct statstage stats;
477 struct irq_domain_data domain_data;
480 /* define external phy id */
481 #define PHY_LAN8835 (0x0007C130)
482 #define PHY_KSZ9031RNX (0x00221620)
484 /* use ethtool to change the level for any given device */
485 static int msg_level = -1;
486 module_param(msg_level, int, 0);
487 MODULE_PARM_DESC(msg_level, "Override default message level");
489 static struct sk_buff *lan78xx_get_buf(struct sk_buff_head *buf_pool)
491 if (skb_queue_empty(buf_pool))
494 return skb_dequeue(buf_pool);
497 static void lan78xx_release_buf(struct sk_buff_head *buf_pool,
500 buf->data = buf->head;
501 skb_reset_tail_pointer(buf);
506 skb_queue_tail(buf_pool, buf);
509 static void lan78xx_free_buf_pool(struct sk_buff_head *buf_pool)
511 struct skb_data *entry;
514 while (!skb_queue_empty(buf_pool)) {
515 buf = skb_dequeue(buf_pool);
517 entry = (struct skb_data *)buf->cb;
518 usb_free_urb(entry->urb);
519 dev_kfree_skb_any(buf);
524 static int lan78xx_alloc_buf_pool(struct sk_buff_head *buf_pool,
525 size_t n_urbs, size_t urb_size,
526 struct lan78xx_net *dev)
528 struct skb_data *entry;
533 skb_queue_head_init(buf_pool);
535 for (i = 0; i < n_urbs; i++) {
536 buf = alloc_skb(urb_size, GFP_ATOMIC);
540 if (skb_linearize(buf) != 0) {
541 dev_kfree_skb_any(buf);
545 urb = usb_alloc_urb(0, GFP_ATOMIC);
547 dev_kfree_skb_any(buf);
551 entry = (struct skb_data *)buf->cb;
555 entry->num_of_packet = 0;
557 skb_queue_tail(buf_pool, buf);
563 lan78xx_free_buf_pool(buf_pool);
568 static struct sk_buff *lan78xx_get_rx_buf(struct lan78xx_net *dev)
570 return lan78xx_get_buf(&dev->rxq_free);
573 static void lan78xx_release_rx_buf(struct lan78xx_net *dev,
574 struct sk_buff *rx_buf)
576 lan78xx_release_buf(&dev->rxq_free, rx_buf);
579 static void lan78xx_free_rx_resources(struct lan78xx_net *dev)
581 lan78xx_free_buf_pool(&dev->rxq_free);
584 static int lan78xx_alloc_rx_resources(struct lan78xx_net *dev)
586 return lan78xx_alloc_buf_pool(&dev->rxq_free,
587 dev->n_rx_urbs, dev->rx_urb_size, dev);
590 static struct sk_buff *lan78xx_get_tx_buf(struct lan78xx_net *dev)
592 return lan78xx_get_buf(&dev->txq_free);
595 static void lan78xx_release_tx_buf(struct lan78xx_net *dev,
596 struct sk_buff *tx_buf)
598 lan78xx_release_buf(&dev->txq_free, tx_buf);
601 static void lan78xx_free_tx_resources(struct lan78xx_net *dev)
603 lan78xx_free_buf_pool(&dev->txq_free);
606 static int lan78xx_alloc_tx_resources(struct lan78xx_net *dev)
608 return lan78xx_alloc_buf_pool(&dev->txq_free,
609 dev->n_tx_urbs, dev->tx_urb_size, dev);
612 /* TSO seems to be having some issue with Selective Acknowledge (SACK) that
613 * results in lost data never being retransmitted.
614 * Disable it by default now, but adds a module parameter to enable it for
615 * debug purposes (the full cause is not currently understood).
617 static bool enable_tso;
618 module_param(enable_tso, bool, 0644);
619 MODULE_PARM_DESC(enable_tso, "Enables TCP segmentation offload");
621 #define INT_URB_MICROFRAMES_PER_MS 8
622 static int int_urb_interval_ms = 8;
623 module_param(int_urb_interval_ms, int, 0);
624 MODULE_PARM_DESC(int_urb_interval_ms, "Override usb interrupt urb interval");
626 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
631 if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
634 buf = kmalloc(sizeof(u32), GFP_KERNEL);
638 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
639 USB_VENDOR_REQUEST_READ_REGISTER,
640 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
641 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
642 if (likely(ret >= 0)) {
645 } else if (net_ratelimit()) {
646 netdev_warn(dev->net,
647 "Failed to read register index 0x%08x. ret = %d",
656 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
661 if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
664 buf = kmalloc(sizeof(u32), GFP_KERNEL);
671 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
672 USB_VENDOR_REQUEST_WRITE_REGISTER,
673 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
674 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
675 if (unlikely(ret < 0) &&
677 netdev_warn(dev->net,
678 "Failed to write register index 0x%08x. ret = %d",
687 static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
693 ret = lan78xx_read_reg(dev, reg, &buf);
698 buf |= (mask & data);
700 ret = lan78xx_write_reg(dev, reg, buf);
707 static int lan78xx_read_stats(struct lan78xx_net *dev,
708 struct lan78xx_statstage *data)
712 struct lan78xx_statstage *stats;
716 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
720 ret = usb_control_msg(dev->udev,
721 usb_rcvctrlpipe(dev->udev, 0),
722 USB_VENDOR_REQUEST_GET_STATS,
723 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
728 USB_CTRL_SET_TIMEOUT);
729 if (likely(ret >= 0)) {
732 for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) {
733 le32_to_cpus(&src[i]);
737 netdev_warn(dev->net,
738 "Failed to read stat ret = %d", ret);
746 #define check_counter_rollover(struct1, dev_stats, member) \
748 if ((struct1)->member < (dev_stats).saved.member) \
749 (dev_stats).rollover_count.member++; \
752 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
753 struct lan78xx_statstage *stats)
755 check_counter_rollover(stats, dev->stats, rx_fcs_errors);
756 check_counter_rollover(stats, dev->stats, rx_alignment_errors);
757 check_counter_rollover(stats, dev->stats, rx_fragment_errors);
758 check_counter_rollover(stats, dev->stats, rx_jabber_errors);
759 check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
760 check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
761 check_counter_rollover(stats, dev->stats, rx_dropped_frames);
762 check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
763 check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
764 check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
765 check_counter_rollover(stats, dev->stats, rx_unicast_frames);
766 check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
767 check_counter_rollover(stats, dev->stats, rx_multicast_frames);
768 check_counter_rollover(stats, dev->stats, rx_pause_frames);
769 check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
770 check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
771 check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
772 check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
773 check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
774 check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
775 check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
776 check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
777 check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
778 check_counter_rollover(stats, dev->stats, tx_fcs_errors);
779 check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
780 check_counter_rollover(stats, dev->stats, tx_carrier_errors);
781 check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
782 check_counter_rollover(stats, dev->stats, tx_single_collisions);
783 check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
784 check_counter_rollover(stats, dev->stats, tx_excessive_collision);
785 check_counter_rollover(stats, dev->stats, tx_late_collisions);
786 check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
787 check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
788 check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
789 check_counter_rollover(stats, dev->stats, tx_unicast_frames);
790 check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
791 check_counter_rollover(stats, dev->stats, tx_multicast_frames);
792 check_counter_rollover(stats, dev->stats, tx_pause_frames);
793 check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
794 check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
795 check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
796 check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
797 check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
798 check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
799 check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
800 check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
801 check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
803 memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
806 static void lan78xx_update_stats(struct lan78xx_net *dev)
808 u32 *p, *count, *max;
811 struct lan78xx_statstage lan78xx_stats;
813 if (usb_autopm_get_interface(dev->intf) < 0)
816 p = (u32 *)&lan78xx_stats;
817 count = (u32 *)&dev->stats.rollover_count;
818 max = (u32 *)&dev->stats.rollover_max;
819 data = (u64 *)&dev->stats.curr_stat;
821 mutex_lock(&dev->stats.access_lock);
823 if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
824 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
826 for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
827 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
829 mutex_unlock(&dev->stats.access_lock);
831 usb_autopm_put_interface(dev->intf);
834 /* Loop until the read is completed with timeout called with phy_mutex held */
835 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
837 unsigned long start_time = jiffies;
842 ret = lan78xx_read_reg(dev, MII_ACC, &val);
843 if (unlikely(ret < 0))
846 if (!(val & MII_ACC_MII_BUSY_))
848 } while (!time_after(jiffies, start_time + HZ));
853 static inline u32 mii_access(int id, int index, int read)
857 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
858 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
860 ret |= MII_ACC_MII_READ_;
862 ret |= MII_ACC_MII_WRITE_;
863 ret |= MII_ACC_MII_BUSY_;
868 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
870 unsigned long start_time = jiffies;
875 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
876 if (unlikely(ret < 0))
879 if (!(val & E2P_CMD_EPC_BUSY_) ||
880 (val & E2P_CMD_EPC_TIMEOUT_))
882 usleep_range(40, 100);
883 } while (!time_after(jiffies, start_time + HZ));
885 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
886 netdev_warn(dev->net, "EEPROM read operation timeout");
893 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
895 unsigned long start_time = jiffies;
900 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
901 if (unlikely(ret < 0))
904 if (!(val & E2P_CMD_EPC_BUSY_))
907 usleep_range(40, 100);
908 } while (!time_after(jiffies, start_time + HZ));
910 netdev_warn(dev->net, "EEPROM is busy");
914 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
915 u32 length, u8 *data)
922 /* depends on chip, some EEPROM pins are muxed with LED function.
923 * disable & restore LED function to access EEPROM.
925 ret = lan78xx_read_reg(dev, HW_CFG, &val);
927 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
928 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
929 ret = lan78xx_write_reg(dev, HW_CFG, val);
932 retval = lan78xx_eeprom_confirm_not_busy(dev);
936 for (i = 0; i < length; i++) {
937 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
938 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
939 ret = lan78xx_write_reg(dev, E2P_CMD, val);
940 if (unlikely(ret < 0)) {
945 retval = lan78xx_wait_eeprom(dev);
949 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
950 if (unlikely(ret < 0)) {
955 data[i] = val & 0xFF;
961 if (dev->chipid == ID_REV_CHIP_ID_7800_)
962 ret = lan78xx_write_reg(dev, HW_CFG, saved);
967 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
968 u32 length, u8 *data)
973 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
974 if ((ret == 0) && (sig == EEPROM_INDICATOR))
975 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
982 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
983 u32 length, u8 *data)
990 /* depends on chip, some EEPROM pins are muxed with LED function.
991 * disable & restore LED function to access EEPROM.
993 ret = lan78xx_read_reg(dev, HW_CFG, &val);
995 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
996 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
997 ret = lan78xx_write_reg(dev, HW_CFG, val);
1000 retval = lan78xx_eeprom_confirm_not_busy(dev);
1004 /* Issue write/erase enable command */
1005 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
1006 ret = lan78xx_write_reg(dev, E2P_CMD, val);
1007 if (unlikely(ret < 0)) {
1012 retval = lan78xx_wait_eeprom(dev);
1016 for (i = 0; i < length; i++) {
1017 /* Fill data register */
1019 ret = lan78xx_write_reg(dev, E2P_DATA, val);
1025 /* Send "write" command */
1026 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
1027 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1028 ret = lan78xx_write_reg(dev, E2P_CMD, val);
1034 retval = lan78xx_wait_eeprom(dev);
1043 if (dev->chipid == ID_REV_CHIP_ID_7800_)
1044 ret = lan78xx_write_reg(dev, HW_CFG, saved);
1049 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
1050 u32 length, u8 *data)
1054 unsigned long timeout;
1056 lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1058 if (buf & OTP_PWR_DN_PWRDN_N_) {
1059 /* clear it and wait to be cleared */
1060 lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1062 timeout = jiffies + HZ;
1064 usleep_range(1, 10);
1065 lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1066 if (time_after(jiffies, timeout)) {
1067 netdev_warn(dev->net,
1068 "timeout on OTP_PWR_DN");
1071 } while (buf & OTP_PWR_DN_PWRDN_N_);
1074 for (i = 0; i < length; i++) {
1075 lan78xx_write_reg(dev, OTP_ADDR1,
1076 ((offset + i) >> 8) & OTP_ADDR1_15_11);
1077 lan78xx_write_reg(dev, OTP_ADDR2,
1078 ((offset + i) & OTP_ADDR2_10_3));
1080 lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
1081 lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1083 timeout = jiffies + HZ;
1086 lan78xx_read_reg(dev, OTP_STATUS, &buf);
1087 if (time_after(jiffies, timeout)) {
1088 netdev_warn(dev->net,
1089 "timeout on OTP_STATUS");
1092 } while (buf & OTP_STATUS_BUSY_);
1094 lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
1096 data[i] = (u8)(buf & 0xFF);
1102 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
1103 u32 length, u8 *data)
1107 unsigned long timeout;
1109 lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1111 if (buf & OTP_PWR_DN_PWRDN_N_) {
1112 /* clear it and wait to be cleared */
1113 lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1115 timeout = jiffies + HZ;
1118 lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1119 if (time_after(jiffies, timeout)) {
1120 netdev_warn(dev->net,
1121 "timeout on OTP_PWR_DN completion");
1124 } while (buf & OTP_PWR_DN_PWRDN_N_);
1127 /* set to BYTE program mode */
1128 lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
1130 for (i = 0; i < length; i++) {
1131 lan78xx_write_reg(dev, OTP_ADDR1,
1132 ((offset + i) >> 8) & OTP_ADDR1_15_11);
1133 lan78xx_write_reg(dev, OTP_ADDR2,
1134 ((offset + i) & OTP_ADDR2_10_3));
1135 lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
1136 lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
1137 lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1139 timeout = jiffies + HZ;
1142 lan78xx_read_reg(dev, OTP_STATUS, &buf);
1143 if (time_after(jiffies, timeout)) {
1144 netdev_warn(dev->net,
1145 "Timeout on OTP_STATUS completion");
1148 } while (buf & OTP_STATUS_BUSY_);
1154 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
1155 u32 length, u8 *data)
1160 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
1163 if (sig == OTP_INDICATOR_2)
1165 else if (sig != OTP_INDICATOR_1)
1168 ret = lan78xx_read_raw_otp(dev, offset, length, data);
1174 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
1178 for (i = 0; i < 100; i++) {
1181 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1182 if (unlikely(ret < 0))
1185 if (dp_sel & DP_SEL_DPRDY_)
1188 usleep_range(40, 100);
1191 netdev_warn(dev->net, "%s timed out", __func__);
1196 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
1197 u32 addr, u32 length, u32 *buf)
1199 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1203 if (usb_autopm_get_interface(dev->intf) < 0)
1206 mutex_lock(&pdata->dataport_mutex);
1208 ret = lan78xx_dataport_wait_not_busy(dev);
1212 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1214 dp_sel &= ~DP_SEL_RSEL_MASK_;
1215 dp_sel |= ram_select;
1216 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
1218 for (i = 0; i < length; i++) {
1219 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1221 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1223 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1225 ret = lan78xx_dataport_wait_not_busy(dev);
1231 mutex_unlock(&pdata->dataport_mutex);
1232 usb_autopm_put_interface(dev->intf);
1237 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1238 int index, u8 addr[ETH_ALEN])
1242 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1244 temp = addr[2] | (temp << 8);
1245 temp = addr[1] | (temp << 8);
1246 temp = addr[0] | (temp << 8);
1247 pdata->pfilter_table[index][1] = temp;
1249 temp = addr[4] | (temp << 8);
1250 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1251 pdata->pfilter_table[index][0] = temp;
1255 /* returns hash bit number for given MAC address */
1256 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1258 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1261 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1263 struct lan78xx_priv *pdata =
1264 container_of(param, struct lan78xx_priv, set_multicast);
1265 struct lan78xx_net *dev = pdata->dev;
1268 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1271 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1272 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1274 for (i = 1; i < NUM_OF_MAF; i++) {
1275 lan78xx_write_reg(dev, MAF_HI(i), 0);
1276 lan78xx_write_reg(dev, MAF_LO(i),
1277 pdata->pfilter_table[i][1]);
1278 lan78xx_write_reg(dev, MAF_HI(i),
1279 pdata->pfilter_table[i][0]);
1282 lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1285 static void lan78xx_set_multicast(struct net_device *netdev)
1287 struct lan78xx_net *dev = netdev_priv(netdev);
1288 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1289 unsigned long flags;
1292 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1294 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1295 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1297 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1298 pdata->mchash_table[i] = 0;
1300 /* pfilter_table[0] has own HW address */
1301 for (i = 1; i < NUM_OF_MAF; i++) {
1302 pdata->pfilter_table[i][0] = 0;
1303 pdata->pfilter_table[i][1] = 0;
1306 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1308 if (dev->net->flags & IFF_PROMISC) {
1309 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1310 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1312 if (dev->net->flags & IFF_ALLMULTI) {
1313 netif_dbg(dev, drv, dev->net,
1314 "receive all multicast enabled");
1315 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1319 if (netdev_mc_count(dev->net)) {
1320 struct netdev_hw_addr *ha;
1323 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1325 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1328 netdev_for_each_mc_addr(ha, netdev) {
1329 /* set first 32 into Perfect Filter */
1331 lan78xx_set_addr_filter(pdata, i, ha->addr);
1333 u32 bitnum = lan78xx_hash(ha->addr);
1335 pdata->mchash_table[bitnum / 32] |=
1336 (1 << (bitnum % 32));
1337 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1343 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1345 /* defer register writes to a sleepable context */
1346 schedule_work(&pdata->set_multicast);
1349 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1350 u16 lcladv, u16 rmtadv)
1352 u32 flow = 0, fct_flow = 0;
1355 if (dev->fc_autoneg)
1356 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1358 cap = dev->fc_request_control;
1360 if (cap & FLOW_CTRL_TX)
1361 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1363 if (cap & FLOW_CTRL_RX)
1364 flow |= FLOW_CR_RX_FCEN_;
1366 if (dev->udev->speed == USB_SPEED_SUPER)
1367 fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
1368 else if (dev->udev->speed == USB_SPEED_HIGH)
1369 fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
1371 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1372 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1373 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1375 lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1377 /* threshold value should be set before enabling flow */
1378 lan78xx_write_reg(dev, FLOW, flow);
1383 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev);
1385 static int lan78xx_mac_reset(struct lan78xx_net *dev)
1387 unsigned long start_time = jiffies;
1391 mutex_lock(&dev->phy_mutex);
1393 /* Resetting the device while there is activity on the MDIO
1394 * bus can result in the MAC interface locking up and not
1395 * completing register access transactions.
1397 ret = lan78xx_phy_wait_not_busy(dev);
1401 ret = lan78xx_read_reg(dev, MAC_CR, &val);
1406 ret = lan78xx_write_reg(dev, MAC_CR, val);
1410 /* Wait for the reset to complete before allowing any further
1411 * MAC register accesses otherwise the MAC may lock up.
1414 ret = lan78xx_read_reg(dev, MAC_CR, &val);
1418 if (!(val & MAC_CR_RST_)) {
1422 } while (!time_after(jiffies, start_time + HZ));
1426 mutex_unlock(&dev->phy_mutex);
1431 static int lan78xx_link_reset(struct lan78xx_net *dev)
1433 struct phy_device *phydev = dev->net->phydev;
1434 struct ethtool_link_ksettings ecmd;
1435 int ladv, radv, ret, link;
1438 /* clear LAN78xx interrupt status */
1439 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1440 if (unlikely(ret < 0))
1443 /* Acknowledge any pending PHY interrupt, lest it be the last */
1444 phy_read(phydev, LAN88XX_INT_STS);
1446 mutex_lock(&phydev->lock);
1447 phy_read_status(phydev);
1448 link = phydev->link;
1449 mutex_unlock(&phydev->lock);
1451 if (!link && dev->link_on) {
1452 dev->link_on = false;
1455 ret = lan78xx_mac_reset(dev);
1459 del_timer(&dev->stat_monitor);
1460 } else if (link && !dev->link_on) {
1461 dev->link_on = true;
1463 phy_ethtool_ksettings_get(phydev, &ecmd);
1465 if (dev->udev->speed == USB_SPEED_SUPER) {
1466 if (ecmd.base.speed == 1000) {
1468 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1471 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1472 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1476 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1479 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1480 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1484 /* enable U1 & U2 */
1485 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1488 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1489 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1490 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1496 ladv = phy_read(phydev, MII_ADVERTISE);
1500 radv = phy_read(phydev, MII_LPA);
1504 netif_dbg(dev, link, dev->net,
1505 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1506 ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1508 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1513 if (!timer_pending(&dev->stat_monitor)) {
1515 mod_timer(&dev->stat_monitor,
1516 jiffies + STAT_UPDATE_TIMER);
1519 lan78xx_rx_urb_submit_all(dev);
1521 napi_schedule(&dev->napi);
1527 /* some work can't be done in tasklets, so we use keventd
1529 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1530 * but tasklet_schedule() doesn't. hope the failure is rare.
1532 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1534 set_bit(work, &dev->flags);
1535 if (!schedule_delayed_work(&dev->wq, 0))
1536 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1539 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1543 if (urb->actual_length != 4) {
1544 netdev_warn(dev->net,
1545 "unexpected urb length %d", urb->actual_length);
1549 intdata = get_unaligned_le32(urb->transfer_buffer);
1551 if (intdata & INT_ENP_PHY_INT) {
1552 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1553 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1555 if (dev->domain_data.phyirq > 0)
1556 generic_handle_irq_safe(dev->domain_data.phyirq);
1558 netdev_warn(dev->net,
1559 "unexpected interrupt: 0x%08x\n", intdata);
1563 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1565 return MAX_EEPROM_SIZE;
1568 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1569 struct ethtool_eeprom *ee, u8 *data)
1571 struct lan78xx_net *dev = netdev_priv(netdev);
1574 ret = usb_autopm_get_interface(dev->intf);
1578 ee->magic = LAN78XX_EEPROM_MAGIC;
1580 ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1582 usb_autopm_put_interface(dev->intf);
1587 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1588 struct ethtool_eeprom *ee, u8 *data)
1590 struct lan78xx_net *dev = netdev_priv(netdev);
1593 ret = usb_autopm_get_interface(dev->intf);
1597 /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1598 * to load data from EEPROM
1600 if (ee->magic == LAN78XX_EEPROM_MAGIC)
1601 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1602 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1603 (ee->offset == 0) &&
1605 (data[0] == OTP_INDICATOR_1))
1606 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1608 usb_autopm_put_interface(dev->intf);
1613 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1616 if (stringset == ETH_SS_STATS)
1617 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1620 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1622 if (sset == ETH_SS_STATS)
1623 return ARRAY_SIZE(lan78xx_gstrings);
1628 static void lan78xx_get_stats(struct net_device *netdev,
1629 struct ethtool_stats *stats, u64 *data)
1631 struct lan78xx_net *dev = netdev_priv(netdev);
1633 lan78xx_update_stats(dev);
1635 mutex_lock(&dev->stats.access_lock);
1636 memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1637 mutex_unlock(&dev->stats.access_lock);
1640 static void lan78xx_get_wol(struct net_device *netdev,
1641 struct ethtool_wolinfo *wol)
1643 struct lan78xx_net *dev = netdev_priv(netdev);
1646 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1648 if (usb_autopm_get_interface(dev->intf) < 0)
1651 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1652 if (unlikely(ret < 0)) {
1656 if (buf & USB_CFG_RMT_WKP_) {
1657 wol->supported = WAKE_ALL;
1658 wol->wolopts = pdata->wol;
1665 usb_autopm_put_interface(dev->intf);
1668 static int lan78xx_set_wol(struct net_device *netdev,
1669 struct ethtool_wolinfo *wol)
1671 struct lan78xx_net *dev = netdev_priv(netdev);
1672 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1675 ret = usb_autopm_get_interface(dev->intf);
1679 if (wol->wolopts & ~WAKE_ALL)
1682 pdata->wol = wol->wolopts;
1684 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1686 phy_ethtool_set_wol(netdev->phydev, wol);
1688 usb_autopm_put_interface(dev->intf);
1693 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1695 struct lan78xx_net *dev = netdev_priv(net);
1696 struct phy_device *phydev = net->phydev;
1700 ret = usb_autopm_get_interface(dev->intf);
1704 ret = phy_ethtool_get_eee(phydev, edata);
1708 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1709 if (buf & MAC_CR_EEE_EN_) {
1710 edata->eee_enabled = true;
1711 edata->eee_active = !!(edata->advertised &
1712 edata->lp_advertised);
1713 edata->tx_lpi_enabled = true;
1714 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1715 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1716 edata->tx_lpi_timer = buf;
1718 edata->eee_enabled = false;
1719 edata->eee_active = false;
1720 edata->tx_lpi_enabled = false;
1721 edata->tx_lpi_timer = 0;
1726 usb_autopm_put_interface(dev->intf);
1731 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1733 struct lan78xx_net *dev = netdev_priv(net);
1737 ret = usb_autopm_get_interface(dev->intf);
1741 if (edata->eee_enabled) {
1742 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1743 buf |= MAC_CR_EEE_EN_;
1744 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1746 phy_ethtool_set_eee(net->phydev, edata);
1748 buf = (u32)edata->tx_lpi_timer;
1749 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1751 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1752 buf &= ~MAC_CR_EEE_EN_;
1753 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1756 usb_autopm_put_interface(dev->intf);
1761 static u32 lan78xx_get_link(struct net_device *net)
1765 mutex_lock(&net->phydev->lock);
1766 phy_read_status(net->phydev);
1767 link = net->phydev->link;
1768 mutex_unlock(&net->phydev->lock);
1773 static void lan78xx_get_drvinfo(struct net_device *net,
1774 struct ethtool_drvinfo *info)
1776 struct lan78xx_net *dev = netdev_priv(net);
1778 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1779 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1782 static u32 lan78xx_get_msglevel(struct net_device *net)
1784 struct lan78xx_net *dev = netdev_priv(net);
1786 return dev->msg_enable;
1789 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1791 struct lan78xx_net *dev = netdev_priv(net);
1793 dev->msg_enable = level;
1796 static int lan78xx_get_link_ksettings(struct net_device *net,
1797 struct ethtool_link_ksettings *cmd)
1799 struct lan78xx_net *dev = netdev_priv(net);
1800 struct phy_device *phydev = net->phydev;
1803 ret = usb_autopm_get_interface(dev->intf);
1807 phy_ethtool_ksettings_get(phydev, cmd);
1809 usb_autopm_put_interface(dev->intf);
1814 static int lan78xx_set_link_ksettings(struct net_device *net,
1815 const struct ethtool_link_ksettings *cmd)
1817 struct lan78xx_net *dev = netdev_priv(net);
1818 struct phy_device *phydev = net->phydev;
1822 ret = usb_autopm_get_interface(dev->intf);
1826 /* change speed & duplex */
1827 ret = phy_ethtool_ksettings_set(phydev, cmd);
1829 if (!cmd->base.autoneg) {
1830 /* force link down */
1831 temp = phy_read(phydev, MII_BMCR);
1832 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1834 phy_write(phydev, MII_BMCR, temp);
1837 usb_autopm_put_interface(dev->intf);
1842 static void lan78xx_get_pause(struct net_device *net,
1843 struct ethtool_pauseparam *pause)
1845 struct lan78xx_net *dev = netdev_priv(net);
1846 struct phy_device *phydev = net->phydev;
1847 struct ethtool_link_ksettings ecmd;
1849 phy_ethtool_ksettings_get(phydev, &ecmd);
1851 pause->autoneg = dev->fc_autoneg;
1853 if (dev->fc_request_control & FLOW_CTRL_TX)
1854 pause->tx_pause = 1;
1856 if (dev->fc_request_control & FLOW_CTRL_RX)
1857 pause->rx_pause = 1;
1860 static int lan78xx_set_pause(struct net_device *net,
1861 struct ethtool_pauseparam *pause)
1863 struct lan78xx_net *dev = netdev_priv(net);
1864 struct phy_device *phydev = net->phydev;
1865 struct ethtool_link_ksettings ecmd;
1868 phy_ethtool_ksettings_get(phydev, &ecmd);
1870 if (pause->autoneg && !ecmd.base.autoneg) {
1875 dev->fc_request_control = 0;
1876 if (pause->rx_pause)
1877 dev->fc_request_control |= FLOW_CTRL_RX;
1879 if (pause->tx_pause)
1880 dev->fc_request_control |= FLOW_CTRL_TX;
1882 if (ecmd.base.autoneg) {
1883 __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
1886 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1887 ecmd.link_modes.advertising);
1888 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1889 ecmd.link_modes.advertising);
1890 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1891 mii_adv_to_linkmode_adv_t(fc, mii_adv);
1892 linkmode_or(ecmd.link_modes.advertising, fc,
1893 ecmd.link_modes.advertising);
1895 phy_ethtool_ksettings_set(phydev, &ecmd);
1898 dev->fc_autoneg = pause->autoneg;
1905 static int lan78xx_get_regs_len(struct net_device *netdev)
1907 if (!netdev->phydev)
1908 return (sizeof(lan78xx_regs));
1910 return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1914 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1919 struct lan78xx_net *dev = netdev_priv(netdev);
1921 /* Read Device/MAC registers */
1922 for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
1923 lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1925 if (!netdev->phydev)
1928 /* Read PHY registers */
1929 for (j = 0; j < 32; i++, j++)
1930 data[i] = phy_read(netdev->phydev, j);
1933 static const struct ethtool_ops lan78xx_ethtool_ops = {
1934 .get_link = lan78xx_get_link,
1935 .nway_reset = phy_ethtool_nway_reset,
1936 .get_drvinfo = lan78xx_get_drvinfo,
1937 .get_msglevel = lan78xx_get_msglevel,
1938 .set_msglevel = lan78xx_set_msglevel,
1939 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1940 .get_eeprom = lan78xx_ethtool_get_eeprom,
1941 .set_eeprom = lan78xx_ethtool_set_eeprom,
1942 .get_ethtool_stats = lan78xx_get_stats,
1943 .get_sset_count = lan78xx_get_sset_count,
1944 .get_strings = lan78xx_get_strings,
1945 .get_wol = lan78xx_get_wol,
1946 .set_wol = lan78xx_set_wol,
1947 .get_ts_info = ethtool_op_get_ts_info,
1948 .get_eee = lan78xx_get_eee,
1949 .set_eee = lan78xx_set_eee,
1950 .get_pauseparam = lan78xx_get_pause,
1951 .set_pauseparam = lan78xx_set_pause,
1952 .get_link_ksettings = lan78xx_get_link_ksettings,
1953 .set_link_ksettings = lan78xx_set_link_ksettings,
1954 .get_regs_len = lan78xx_get_regs_len,
1955 .get_regs = lan78xx_get_regs,
1958 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1960 u32 addr_lo, addr_hi;
1963 lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1964 lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1966 addr[0] = addr_lo & 0xFF;
1967 addr[1] = (addr_lo >> 8) & 0xFF;
1968 addr[2] = (addr_lo >> 16) & 0xFF;
1969 addr[3] = (addr_lo >> 24) & 0xFF;
1970 addr[4] = addr_hi & 0xFF;
1971 addr[5] = (addr_hi >> 8) & 0xFF;
1973 if (!is_valid_ether_addr(addr)) {
1974 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1975 /* valid address present in Device Tree */
1976 netif_dbg(dev, ifup, dev->net,
1977 "MAC address read from Device Tree");
1978 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1979 ETH_ALEN, addr) == 0) ||
1980 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1981 ETH_ALEN, addr) == 0)) &&
1982 is_valid_ether_addr(addr)) {
1983 /* eeprom values are valid so use them */
1984 netif_dbg(dev, ifup, dev->net,
1985 "MAC address read from EEPROM");
1987 /* generate random MAC */
1988 eth_random_addr(addr);
1989 netif_dbg(dev, ifup, dev->net,
1990 "MAC address set to random addr");
1993 addr_lo = addr[0] | (addr[1] << 8) |
1994 (addr[2] << 16) | (addr[3] << 24);
1995 addr_hi = addr[4] | (addr[5] << 8);
1997 lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1998 lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2001 lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2002 lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2004 eth_hw_addr_set(dev->net, addr);
2007 /* MDIO read and write wrappers for phylib */
2008 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
2010 struct lan78xx_net *dev = bus->priv;
2014 ret = usb_autopm_get_interface(dev->intf);
2018 mutex_lock(&dev->phy_mutex);
2020 /* confirm MII not busy */
2021 ret = lan78xx_phy_wait_not_busy(dev);
2025 /* set the address, index & direction (read from PHY) */
2026 addr = mii_access(phy_id, idx, MII_READ);
2027 ret = lan78xx_write_reg(dev, MII_ACC, addr);
2029 ret = lan78xx_phy_wait_not_busy(dev);
2033 ret = lan78xx_read_reg(dev, MII_DATA, &val);
2035 ret = (int)(val & 0xFFFF);
2038 mutex_unlock(&dev->phy_mutex);
2039 usb_autopm_put_interface(dev->intf);
2044 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
2047 struct lan78xx_net *dev = bus->priv;
2051 ret = usb_autopm_get_interface(dev->intf);
2055 mutex_lock(&dev->phy_mutex);
2057 /* confirm MII not busy */
2058 ret = lan78xx_phy_wait_not_busy(dev);
2063 ret = lan78xx_write_reg(dev, MII_DATA, val);
2065 /* set the address, index & direction (write to PHY) */
2066 addr = mii_access(phy_id, idx, MII_WRITE);
2067 ret = lan78xx_write_reg(dev, MII_ACC, addr);
2069 ret = lan78xx_phy_wait_not_busy(dev);
2074 mutex_unlock(&dev->phy_mutex);
2075 usb_autopm_put_interface(dev->intf);
2079 static int lan78xx_mdio_init(struct lan78xx_net *dev)
2081 struct device_node *node;
2084 dev->mdiobus = mdiobus_alloc();
2085 if (!dev->mdiobus) {
2086 netdev_err(dev->net, "can't allocate MDIO bus\n");
2090 dev->mdiobus->priv = (void *)dev;
2091 dev->mdiobus->read = lan78xx_mdiobus_read;
2092 dev->mdiobus->write = lan78xx_mdiobus_write;
2093 dev->mdiobus->name = "lan78xx-mdiobus";
2094 dev->mdiobus->parent = &dev->udev->dev;
2096 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
2097 dev->udev->bus->busnum, dev->udev->devnum);
2099 switch (dev->chipid) {
2100 case ID_REV_CHIP_ID_7800_:
2101 case ID_REV_CHIP_ID_7850_:
2102 /* set to internal PHY id */
2103 dev->mdiobus->phy_mask = ~(1 << 1);
2105 case ID_REV_CHIP_ID_7801_:
2106 /* scan thru PHYAD[2..0] */
2107 dev->mdiobus->phy_mask = ~(0xFF);
2111 node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
2112 ret = of_mdiobus_register(dev->mdiobus, node);
2115 netdev_err(dev->net, "can't register MDIO bus\n");
2119 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
2122 mdiobus_free(dev->mdiobus);
2126 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
2128 mdiobus_unregister(dev->mdiobus);
2129 mdiobus_free(dev->mdiobus);
2132 static void lan78xx_link_status_change(struct net_device *net)
2134 struct phy_device *phydev = net->phydev;
2136 phy_print_status(phydev);
2139 static int irq_map(struct irq_domain *d, unsigned int irq,
2140 irq_hw_number_t hwirq)
2142 struct irq_domain_data *data = d->host_data;
2144 irq_set_chip_data(irq, data);
2145 irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
2146 irq_set_noprobe(irq);
2151 static void irq_unmap(struct irq_domain *d, unsigned int irq)
2153 irq_set_chip_and_handler(irq, NULL, NULL);
2154 irq_set_chip_data(irq, NULL);
2157 static const struct irq_domain_ops chip_domain_ops = {
2162 static void lan78xx_irq_mask(struct irq_data *irqd)
2164 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2166 data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
2169 static void lan78xx_irq_unmask(struct irq_data *irqd)
2171 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2173 data->irqenable |= BIT(irqd_to_hwirq(irqd));
2176 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
2178 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2180 mutex_lock(&data->irq_lock);
2183 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
2185 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2186 struct lan78xx_net *dev =
2187 container_of(data, struct lan78xx_net, domain_data);
2190 /* call register access here because irq_bus_lock & irq_bus_sync_unlock
2191 * are only two callbacks executed in non-atomic contex.
2193 lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2194 if (buf != data->irqenable)
2195 lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
2197 mutex_unlock(&data->irq_lock);
2200 static struct irq_chip lan78xx_irqchip = {
2201 .name = "lan78xx-irqs",
2202 .irq_mask = lan78xx_irq_mask,
2203 .irq_unmask = lan78xx_irq_unmask,
2204 .irq_bus_lock = lan78xx_irq_bus_lock,
2205 .irq_bus_sync_unlock = lan78xx_irq_bus_sync_unlock,
2208 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
2210 struct device_node *of_node;
2211 struct irq_domain *irqdomain;
2212 unsigned int irqmap = 0;
2216 of_node = dev->udev->dev.parent->of_node;
2218 mutex_init(&dev->domain_data.irq_lock);
2220 lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2221 dev->domain_data.irqenable = buf;
2223 dev->domain_data.irqchip = &lan78xx_irqchip;
2224 dev->domain_data.irq_handler = handle_simple_irq;
2226 irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
2227 &chip_domain_ops, &dev->domain_data);
2229 /* create mapping for PHY interrupt */
2230 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
2232 irq_domain_remove(irqdomain);
2241 dev->domain_data.irqdomain = irqdomain;
2242 dev->domain_data.phyirq = irqmap;
2247 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2249 if (dev->domain_data.phyirq > 0) {
2250 irq_dispose_mapping(dev->domain_data.phyirq);
2252 if (dev->domain_data.irqdomain)
2253 irq_domain_remove(dev->domain_data.irqdomain);
2255 dev->domain_data.phyirq = 0;
2256 dev->domain_data.irqdomain = NULL;
2259 static int lan8835_fixup(struct phy_device *phydev)
2262 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2264 /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2265 buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2268 phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2270 /* RGMII MAC TXC Delay Enable */
2271 lan78xx_write_reg(dev, MAC_RGMII_ID,
2272 MAC_RGMII_ID_TXC_DELAY_EN_);
2274 /* RGMII TX DLL Tune Adjust */
2275 lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2277 dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2282 static int ksz9031rnx_fixup(struct phy_device *phydev)
2284 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2286 /* Micrel9301RNX PHY configuration */
2287 /* RGMII Control Signal Pad Skew */
2288 phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2289 /* RGMII RX Data Pad Skew */
2290 phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2291 /* RGMII RX Clock Pad Skew */
2292 phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2294 dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2299 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2303 struct fixed_phy_status fphy_status = {
2305 .speed = SPEED_1000,
2306 .duplex = DUPLEX_FULL,
2308 struct phy_device *phydev;
2310 phydev = phy_find_first(dev->mdiobus);
2312 netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2313 phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
2314 if (IS_ERR(phydev)) {
2315 netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2318 netdev_dbg(dev->net, "Registered FIXED PHY\n");
2319 dev->interface = PHY_INTERFACE_MODE_RGMII;
2320 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2321 MAC_RGMII_ID_TXC_DELAY_EN_);
2322 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2323 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2324 buf |= HW_CFG_CLK125_EN_;
2325 buf |= HW_CFG_REFCLK25_EN_;
2326 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2329 netdev_err(dev->net, "no PHY driver found\n");
2332 dev->interface = PHY_INTERFACE_MODE_RGMII;
2333 /* external PHY fixup for KSZ9031RNX */
2334 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2337 netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2340 /* external PHY fixup for LAN8835 */
2341 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2344 netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2347 /* add more external PHY fixup here if needed */
2349 phydev->is_internal = false;
2354 static int lan78xx_phy_init(struct lan78xx_net *dev)
2356 __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2359 struct phy_device *phydev;
2361 switch (dev->chipid) {
2362 case ID_REV_CHIP_ID_7801_:
2363 phydev = lan7801_phy_init(dev);
2365 netdev_err(dev->net, "lan7801: PHY Init Failed");
2370 case ID_REV_CHIP_ID_7800_:
2371 case ID_REV_CHIP_ID_7850_:
2372 phydev = phy_find_first(dev->mdiobus);
2374 netdev_err(dev->net, "no PHY found\n");
2377 phydev->is_internal = true;
2378 dev->interface = PHY_INTERFACE_MODE_GMII;
2382 netdev_err(dev->net, "Unknown CHIP ID found\n");
2386 /* if phyirq is not set, use polling mode in phylib */
2387 if (dev->domain_data.phyirq > 0)
2388 phydev->irq = dev->domain_data.phyirq;
2390 phydev->irq = PHY_POLL;
2391 netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2393 /* set to AUTOMDIX */
2394 phydev->mdix = ETH_TP_MDI_AUTO;
2396 ret = phy_connect_direct(dev->net, phydev,
2397 lan78xx_link_status_change,
2400 netdev_err(dev->net, "can't attach PHY to %s\n",
2402 if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2403 if (phy_is_pseudo_fixed_link(phydev)) {
2404 fixed_phy_unregister(phydev);
2406 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2408 phy_unregister_fixup_for_uid(PHY_LAN8835,
2415 /* MAC doesn't support 1000T Half */
2416 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2418 /* support both flow controls */
2419 dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2420 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2421 phydev->advertising);
2422 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2423 phydev->advertising);
2424 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2425 mii_adv_to_linkmode_adv_t(fc, mii_adv);
2426 linkmode_or(phydev->advertising, fc, phydev->advertising);
2428 if (of_property_read_bool(phydev->mdio.dev.of_node,
2429 "microchip,eee-enabled")) {
2430 struct ethtool_eee edata;
2431 memset(&edata, 0, sizeof(edata));
2432 edata.cmd = ETHTOOL_SEEE;
2433 edata.advertised = ADVERTISED_1000baseT_Full |
2434 ADVERTISED_100baseT_Full;
2435 edata.eee_enabled = true;
2436 edata.tx_lpi_enabled = true;
2437 if (of_property_read_u32(dev->udev->dev.of_node,
2438 "microchip,tx-lpi-timer",
2439 &edata.tx_lpi_timer))
2440 edata.tx_lpi_timer = 600; /* non-aggressive */
2441 (void)lan78xx_set_eee(dev->net, &edata);
2444 if (phydev->mdio.dev.of_node) {
2448 len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2449 "microchip,led-modes",
2452 /* Ensure the appropriate LEDs are enabled */
2453 lan78xx_read_reg(dev, HW_CFG, ®);
2454 reg &= ~(HW_CFG_LED0_EN_ |
2458 reg |= (len > 0) * HW_CFG_LED0_EN_ |
2459 (len > 1) * HW_CFG_LED1_EN_ |
2460 (len > 2) * HW_CFG_LED2_EN_ |
2461 (len > 3) * HW_CFG_LED3_EN_;
2462 lan78xx_write_reg(dev, HW_CFG, reg);
2466 genphy_config_aneg(phydev);
2468 dev->fc_autoneg = phydev->autoneg;
2473 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2478 lan78xx_read_reg(dev, MAC_RX, &buf);
2480 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2483 buf &= ~MAC_RX_RXEN_;
2484 lan78xx_write_reg(dev, MAC_RX, buf);
2487 /* add 4 to size for FCS */
2488 buf &= ~MAC_RX_MAX_SIZE_MASK_;
2489 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2491 lan78xx_write_reg(dev, MAC_RX, buf);
2494 buf |= MAC_RX_RXEN_;
2495 lan78xx_write_reg(dev, MAC_RX, buf);
2501 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2503 struct sk_buff *skb;
2504 unsigned long flags;
2507 spin_lock_irqsave(&q->lock, flags);
2508 while (!skb_queue_empty(q)) {
2509 struct skb_data *entry;
2513 skb_queue_walk(q, skb) {
2514 entry = (struct skb_data *)skb->cb;
2515 if (entry->state != unlink_start)
2520 entry->state = unlink_start;
2523 /* Get reference count of the URB to avoid it to be
2524 * freed during usb_unlink_urb, which may trigger
2525 * use-after-free problem inside usb_unlink_urb since
2526 * usb_unlink_urb is always racing with .complete
2527 * handler(include defer_bh).
2530 spin_unlock_irqrestore(&q->lock, flags);
2531 /* during some PM-driven resume scenarios,
2532 * these (async) unlinks complete immediately
2534 ret = usb_unlink_urb(urb);
2535 if (ret != -EINPROGRESS && ret != 0)
2536 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2540 spin_lock_irqsave(&q->lock, flags);
2542 spin_unlock_irqrestore(&q->lock, flags);
2546 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2548 struct lan78xx_net *dev = netdev_priv(netdev);
2549 int max_frame_len = RX_MAX_FRAME_LEN(new_mtu);
2552 /* no second zero-length packet read wanted after mtu-sized packets */
2553 if ((max_frame_len % dev->maxpacket) == 0)
2556 ret = usb_autopm_get_interface(dev->intf);
2560 ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len);
2562 netdev->mtu = new_mtu;
2564 usb_autopm_put_interface(dev->intf);
2569 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2571 struct lan78xx_net *dev = netdev_priv(netdev);
2572 struct sockaddr *addr = p;
2573 u32 addr_lo, addr_hi;
2575 if (netif_running(netdev))
2578 if (!is_valid_ether_addr(addr->sa_data))
2579 return -EADDRNOTAVAIL;
2581 eth_hw_addr_set(netdev, addr->sa_data);
2583 addr_lo = netdev->dev_addr[0] |
2584 netdev->dev_addr[1] << 8 |
2585 netdev->dev_addr[2] << 16 |
2586 netdev->dev_addr[3] << 24;
2587 addr_hi = netdev->dev_addr[4] |
2588 netdev->dev_addr[5] << 8;
2590 lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2591 lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2593 /* Added to support MAC address changes */
2594 lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2595 lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2600 /* Enable or disable Rx checksum offload engine */
2601 static int lan78xx_set_features(struct net_device *netdev,
2602 netdev_features_t features)
2604 struct lan78xx_net *dev = netdev_priv(netdev);
2605 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2606 unsigned long flags;
2608 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2610 if (features & NETIF_F_RXCSUM) {
2611 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2612 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2614 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2615 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2618 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2619 pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2621 pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2623 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2624 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2626 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2628 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2630 lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2635 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2637 struct lan78xx_priv *pdata =
2638 container_of(param, struct lan78xx_priv, set_vlan);
2639 struct lan78xx_net *dev = pdata->dev;
2641 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2642 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2645 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2646 __be16 proto, u16 vid)
2648 struct lan78xx_net *dev = netdev_priv(netdev);
2649 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2651 u16 vid_dword_index;
2653 vid_dword_index = (vid >> 5) & 0x7F;
2654 vid_bit_index = vid & 0x1F;
2656 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2658 /* defer register writes to a sleepable context */
2659 schedule_work(&pdata->set_vlan);
2664 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2665 __be16 proto, u16 vid)
2667 struct lan78xx_net *dev = netdev_priv(netdev);
2668 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2670 u16 vid_dword_index;
2672 vid_dword_index = (vid >> 5) & 0x7F;
2673 vid_bit_index = vid & 0x1F;
2675 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2677 /* defer register writes to a sleepable context */
2678 schedule_work(&pdata->set_vlan);
2683 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2687 u32 regs[6] = { 0 };
2689 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2690 if (buf & USB_CFG1_LTM_ENABLE_) {
2692 /* Get values from EEPROM first */
2693 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2694 if (temp[0] == 24) {
2695 ret = lan78xx_read_raw_eeprom(dev,
2702 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2703 if (temp[0] == 24) {
2704 ret = lan78xx_read_raw_otp(dev,
2714 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2715 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2716 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2717 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2718 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2719 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2722 static int lan78xx_urb_config_init(struct lan78xx_net *dev)
2726 switch (dev->udev->speed) {
2727 case USB_SPEED_SUPER:
2728 dev->rx_urb_size = RX_SS_URB_SIZE;
2729 dev->tx_urb_size = TX_SS_URB_SIZE;
2730 dev->n_rx_urbs = RX_SS_URB_NUM;
2731 dev->n_tx_urbs = TX_SS_URB_NUM;
2732 dev->bulk_in_delay = SS_BULK_IN_DELAY;
2733 dev->burst_cap = SS_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2735 case USB_SPEED_HIGH:
2736 dev->rx_urb_size = RX_HS_URB_SIZE;
2737 dev->tx_urb_size = TX_HS_URB_SIZE;
2738 dev->n_rx_urbs = RX_HS_URB_NUM;
2739 dev->n_tx_urbs = TX_HS_URB_NUM;
2740 dev->bulk_in_delay = HS_BULK_IN_DELAY;
2741 dev->burst_cap = HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2743 case USB_SPEED_FULL:
2744 dev->rx_urb_size = RX_FS_URB_SIZE;
2745 dev->tx_urb_size = TX_FS_URB_SIZE;
2746 dev->n_rx_urbs = RX_FS_URB_NUM;
2747 dev->n_tx_urbs = TX_FS_URB_NUM;
2748 dev->bulk_in_delay = FS_BULK_IN_DELAY;
2749 dev->burst_cap = FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2752 netdev_warn(dev->net, "USB bus speed not supported\n");
2760 static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
2762 return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
2765 static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
2768 unsigned long timeout;
2769 bool stopped = true;
2773 /* Stop the h/w block (if not already stopped) */
2775 ret = lan78xx_read_reg(dev, reg, &buf);
2779 if (buf & hw_enabled) {
2782 ret = lan78xx_write_reg(dev, reg, buf);
2787 timeout = jiffies + HW_DISABLE_TIMEOUT;
2789 ret = lan78xx_read_reg(dev, reg, &buf);
2793 if (buf & hw_disabled)
2796 msleep(HW_DISABLE_DELAY_MS);
2797 } while (!stopped && !time_after(jiffies, timeout));
2800 ret = stopped ? 0 : -ETIME;
2805 static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
2807 return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
2810 static int lan78xx_start_tx_path(struct lan78xx_net *dev)
2814 netif_dbg(dev, drv, dev->net, "start tx path");
2816 /* Start the MAC transmitter */
2818 ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
2822 /* Start the Tx FIFO */
2824 ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
2831 static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
2835 netif_dbg(dev, drv, dev->net, "stop tx path");
2837 /* Stop the Tx FIFO */
2839 ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
2843 /* Stop the MAC transmitter */
2845 ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
2852 /* The caller must ensure the Tx path is stopped before calling
2853 * lan78xx_flush_tx_fifo().
2855 static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
2857 return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
2860 static int lan78xx_start_rx_path(struct lan78xx_net *dev)
2864 netif_dbg(dev, drv, dev->net, "start rx path");
2866 /* Start the Rx FIFO */
2868 ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
2872 /* Start the MAC receiver*/
2874 ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
2881 static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
2885 netif_dbg(dev, drv, dev->net, "stop rx path");
2887 /* Stop the MAC receiver */
2889 ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
2893 /* Stop the Rx FIFO */
2895 ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
2902 /* The caller must ensure the Rx path is stopped before calling
2903 * lan78xx_flush_rx_fifo().
2905 static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
2907 return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
2910 static int lan78xx_reset(struct lan78xx_net *dev)
2912 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2913 unsigned long timeout;
2920 has_eeprom = !lan78xx_read_eeprom(dev, 0, 0, NULL);
2921 has_otp = !lan78xx_read_otp(dev, 0, 0, NULL);
2923 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2927 buf |= HW_CFG_LRST_;
2929 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2933 timeout = jiffies + HZ;
2936 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2940 if (time_after(jiffies, timeout)) {
2941 netdev_warn(dev->net,
2942 "timeout on completion of LiteReset");
2946 } while (buf & HW_CFG_LRST_);
2948 lan78xx_init_mac_address(dev);
2950 /* save DEVID for later usage */
2951 ret = lan78xx_read_reg(dev, ID_REV, &buf);
2955 dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2956 dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2958 /* Respond to the IN token with a NAK */
2959 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2963 buf |= USB_CFG_BIR_;
2965 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2970 lan78xx_init_ltm(dev);
2972 ret = lan78xx_write_reg(dev, BURST_CAP, dev->burst_cap);
2976 ret = lan78xx_write_reg(dev, BULK_IN_DLY, dev->bulk_in_delay);
2980 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2986 /* If no valid EEPROM and no valid OTP, enable the LEDs by default */
2987 if (!has_eeprom && !has_otp)
2988 buf |= HW_CFG_LED0_EN_ | HW_CFG_LED1_EN_;
2990 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2994 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2998 buf |= USB_CFG_BCE_;
3000 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
3004 /* set FIFO sizes */
3005 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
3007 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
3011 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
3013 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
3017 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
3021 ret = lan78xx_write_reg(dev, FLOW, 0);
3025 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
3029 /* Don't need rfe_ctl_lock during initialisation */
3030 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
3034 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
3036 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
3040 /* Enable or disable checksum offload engines */
3041 ret = lan78xx_set_features(dev->net, dev->net->features);
3045 lan78xx_set_multicast(dev->net);
3048 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3052 buf |= PMT_CTL_PHY_RST_;
3054 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3058 timeout = jiffies + HZ;
3061 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3065 if (time_after(jiffies, timeout)) {
3066 netdev_warn(dev->net, "timeout waiting for PHY Reset");
3070 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
3072 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
3076 /* LAN7801 only has RGMII mode */
3077 if (dev->chipid == ID_REV_CHIP_ID_7801_)
3078 buf &= ~MAC_CR_GMII_EN_;
3080 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
3081 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
3082 if (!ret && sig != EEPROM_INDICATOR) {
3083 /* Implies there is no external eeprom. Set mac speed */
3084 netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
3085 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
3088 /* If no valid EEPROM and no valid OTP, enable AUTO negotiation */
3089 if (!has_eeprom && !has_otp)
3090 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
3091 ret = lan78xx_write_reg(dev, MAC_CR, buf);
3095 ret = lan78xx_set_rx_max_frame_length(dev,
3096 RX_MAX_FRAME_LEN(dev->net->mtu));
3101 static void lan78xx_init_stats(struct lan78xx_net *dev)
3106 /* initialize for stats update
3107 * some counters are 20bits and some are 32bits
3109 p = (u32 *)&dev->stats.rollover_max;
3110 for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
3113 dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
3114 dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
3115 dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
3116 dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
3117 dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
3118 dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
3119 dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
3120 dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
3121 dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
3122 dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
3124 set_bit(EVENT_STAT_UPDATE, &dev->flags);
3127 static int lan78xx_open(struct net_device *net)
3129 struct lan78xx_net *dev = netdev_priv(net);
3132 netif_dbg(dev, ifup, dev->net, "open device");
3134 ret = usb_autopm_get_interface(dev->intf);
3138 mutex_lock(&dev->dev_mutex);
3140 phy_start(net->phydev);
3142 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
3144 /* for Link Check */
3145 if (dev->urb_intr) {
3146 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
3148 netif_err(dev, ifup, dev->net,
3149 "intr submit %d\n", ret);
3154 ret = lan78xx_flush_rx_fifo(dev);
3157 ret = lan78xx_flush_tx_fifo(dev);
3161 ret = lan78xx_start_tx_path(dev);
3164 ret = lan78xx_start_rx_path(dev);
3168 lan78xx_init_stats(dev);
3170 set_bit(EVENT_DEV_OPEN, &dev->flags);
3172 netif_start_queue(net);
3174 dev->link_on = false;
3176 napi_enable(&dev->napi);
3178 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
3180 mutex_unlock(&dev->dev_mutex);
3182 usb_autopm_put_interface(dev->intf);
3187 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
3189 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
3190 DECLARE_WAITQUEUE(wait, current);
3193 /* ensure there are no more active urbs */
3194 add_wait_queue(&unlink_wakeup, &wait);
3195 set_current_state(TASK_UNINTERRUPTIBLE);
3196 dev->wait = &unlink_wakeup;
3197 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
3199 /* maybe wait for deletions to finish. */
3200 while (!skb_queue_empty(&dev->rxq) ||
3201 !skb_queue_empty(&dev->txq)) {
3202 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
3203 set_current_state(TASK_UNINTERRUPTIBLE);
3204 netif_dbg(dev, ifdown, dev->net,
3205 "waited for %d urb completions", temp);
3207 set_current_state(TASK_RUNNING);
3209 remove_wait_queue(&unlink_wakeup, &wait);
3211 /* empty Rx done, Rx overflow and Tx pend queues
3213 while (!skb_queue_empty(&dev->rxq_done)) {
3214 struct sk_buff *skb = skb_dequeue(&dev->rxq_done);
3216 lan78xx_release_rx_buf(dev, skb);
3219 skb_queue_purge(&dev->rxq_overflow);
3220 skb_queue_purge(&dev->txq_pend);
3223 static int lan78xx_stop(struct net_device *net)
3225 struct lan78xx_net *dev = netdev_priv(net);
3227 netif_dbg(dev, ifup, dev->net, "stop device");
3229 mutex_lock(&dev->dev_mutex);
3231 if (timer_pending(&dev->stat_monitor))
3232 del_timer_sync(&dev->stat_monitor);
3234 clear_bit(EVENT_DEV_OPEN, &dev->flags);
3235 netif_stop_queue(net);
3236 napi_disable(&dev->napi);
3238 lan78xx_terminate_urbs(dev);
3240 netif_info(dev, ifdown, dev->net,
3241 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
3242 net->stats.rx_packets, net->stats.tx_packets,
3243 net->stats.rx_errors, net->stats.tx_errors);
3245 /* ignore errors that occur stopping the Tx and Rx data paths */
3246 lan78xx_stop_tx_path(dev);
3247 lan78xx_stop_rx_path(dev);
3250 phy_stop(net->phydev);
3252 usb_kill_urb(dev->urb_intr);
3254 /* deferred work (task, timer, softirq) must also stop.
3255 * can't flush_scheduled_work() until we drop rtnl (later),
3256 * else workers could deadlock; so make workers a NOP.
3258 clear_bit(EVENT_TX_HALT, &dev->flags);
3259 clear_bit(EVENT_RX_HALT, &dev->flags);
3260 clear_bit(EVENT_LINK_RESET, &dev->flags);
3261 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3263 cancel_delayed_work_sync(&dev->wq);
3265 usb_autopm_put_interface(dev->intf);
3267 mutex_unlock(&dev->dev_mutex);
3272 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
3273 struct sk_buff_head *list, enum skb_state state)
3275 unsigned long flags;
3276 enum skb_state old_state;
3277 struct skb_data *entry = (struct skb_data *)skb->cb;
3279 spin_lock_irqsave(&list->lock, flags);
3280 old_state = entry->state;
3281 entry->state = state;
3283 __skb_unlink(skb, list);
3284 spin_unlock(&list->lock);
3285 spin_lock(&dev->rxq_done.lock);
3287 __skb_queue_tail(&dev->rxq_done, skb);
3288 if (skb_queue_len(&dev->rxq_done) == 1)
3289 napi_schedule(&dev->napi);
3291 spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
3296 static void tx_complete(struct urb *urb)
3298 struct sk_buff *skb = (struct sk_buff *)urb->context;
3299 struct skb_data *entry = (struct skb_data *)skb->cb;
3300 struct lan78xx_net *dev = entry->dev;
3302 if (urb->status == 0) {
3303 dev->net->stats.tx_packets += entry->num_of_packet;
3304 dev->net->stats.tx_bytes += entry->length;
3306 dev->net->stats.tx_errors += entry->num_of_packet;
3308 switch (urb->status) {
3310 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3313 /* software-driven interface shutdown */
3316 netif_dbg(dev, tx_err, dev->net,
3317 "tx err interface gone %d\n",
3318 entry->urb->status);
3324 netif_stop_queue(dev->net);
3325 netif_dbg(dev, tx_err, dev->net,
3326 "tx err queue stopped %d\n",
3327 entry->urb->status);
3330 netif_dbg(dev, tx_err, dev->net,
3331 "unknown tx err %d\n",
3332 entry->urb->status);
3337 usb_autopm_put_interface_async(dev->intf);
3339 skb_unlink(skb, &dev->txq);
3341 lan78xx_release_tx_buf(dev, skb);
3343 /* Re-schedule NAPI if Tx data pending but no URBs in progress.
3345 if (skb_queue_empty(&dev->txq) &&
3346 !skb_queue_empty(&dev->txq_pend))
3347 napi_schedule(&dev->napi);
3350 static void lan78xx_queue_skb(struct sk_buff_head *list,
3351 struct sk_buff *newsk, enum skb_state state)
3353 struct skb_data *entry = (struct skb_data *)newsk->cb;
3355 __skb_queue_tail(list, newsk);
3356 entry->state = state;
3359 static unsigned int lan78xx_tx_urb_space(struct lan78xx_net *dev)
3361 return skb_queue_len(&dev->txq_free) * dev->tx_urb_size;
3364 static unsigned int lan78xx_tx_pend_data_len(struct lan78xx_net *dev)
3366 return dev->tx_pend_data_len;
3369 static void lan78xx_tx_pend_skb_add(struct lan78xx_net *dev,
3370 struct sk_buff *skb,
3371 unsigned int *tx_pend_data_len)
3373 unsigned long flags;
3375 spin_lock_irqsave(&dev->txq_pend.lock, flags);
3377 __skb_queue_tail(&dev->txq_pend, skb);
3379 dev->tx_pend_data_len += skb->len;
3380 *tx_pend_data_len = dev->tx_pend_data_len;
3382 spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3385 static void lan78xx_tx_pend_skb_head_add(struct lan78xx_net *dev,
3386 struct sk_buff *skb,
3387 unsigned int *tx_pend_data_len)
3389 unsigned long flags;
3391 spin_lock_irqsave(&dev->txq_pend.lock, flags);
3393 __skb_queue_head(&dev->txq_pend, skb);
3395 dev->tx_pend_data_len += skb->len;
3396 *tx_pend_data_len = dev->tx_pend_data_len;
3398 spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3401 static void lan78xx_tx_pend_skb_get(struct lan78xx_net *dev,
3402 struct sk_buff **skb,
3403 unsigned int *tx_pend_data_len)
3405 unsigned long flags;
3407 spin_lock_irqsave(&dev->txq_pend.lock, flags);
3409 *skb = __skb_dequeue(&dev->txq_pend);
3411 dev->tx_pend_data_len -= (*skb)->len;
3412 *tx_pend_data_len = dev->tx_pend_data_len;
3414 spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3418 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
3420 struct lan78xx_net *dev = netdev_priv(net);
3421 unsigned int tx_pend_data_len;
3423 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
3424 schedule_delayed_work(&dev->wq, 0);
3426 skb_tx_timestamp(skb);
3428 lan78xx_tx_pend_skb_add(dev, skb, &tx_pend_data_len);
3430 /* Set up a Tx URB if none is in progress */
3432 if (skb_queue_empty(&dev->txq))
3433 napi_schedule(&dev->napi);
3435 /* Stop stack Tx queue if we have enough data to fill
3436 * all the free Tx URBs.
3438 if (tx_pend_data_len > lan78xx_tx_urb_space(dev)) {
3439 netif_stop_queue(net);
3441 netif_dbg(dev, hw, dev->net, "tx data len: %u, urb space %u",
3442 tx_pend_data_len, lan78xx_tx_urb_space(dev));
3444 /* Kick off transmission of pending data */
3446 if (!skb_queue_empty(&dev->txq_free))
3447 napi_schedule(&dev->napi);
3450 return NETDEV_TX_OK;
3453 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
3455 struct lan78xx_priv *pdata = NULL;
3459 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
3461 pdata = (struct lan78xx_priv *)(dev->data[0]);
3463 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
3469 spin_lock_init(&pdata->rfe_ctl_lock);
3470 mutex_init(&pdata->dataport_mutex);
3472 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
3474 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
3475 pdata->vlan_table[i] = 0;
3477 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
3479 dev->net->features = 0;
3481 if (DEFAULT_TX_CSUM_ENABLE)
3482 dev->net->features |= NETIF_F_HW_CSUM;
3484 if (DEFAULT_RX_CSUM_ENABLE)
3485 dev->net->features |= NETIF_F_RXCSUM;
3487 if (DEFAULT_TSO_CSUM_ENABLE) {
3488 dev->net->features |= NETIF_F_SG;
3489 /* Use module parameter to control TCP segmentation offload as
3490 * it appears to cause issues.
3493 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6;
3496 if (DEFAULT_VLAN_RX_OFFLOAD)
3497 dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
3499 if (DEFAULT_VLAN_FILTER_ENABLE)
3500 dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3502 dev->net->hw_features = dev->net->features;
3504 ret = lan78xx_setup_irq_domain(dev);
3506 netdev_warn(dev->net,
3507 "lan78xx_setup_irq_domain() failed : %d", ret);
3511 /* Init all registers */
3512 ret = lan78xx_reset(dev);
3514 netdev_warn(dev->net, "Registers INIT FAILED....");
3518 ret = lan78xx_mdio_init(dev);
3520 netdev_warn(dev->net, "MDIO INIT FAILED.....");
3524 dev->net->flags |= IFF_MULTICAST;
3526 pdata->wol = WAKE_MAGIC;
3531 lan78xx_remove_irq_domain(dev);
3534 netdev_warn(dev->net, "Bind routine FAILED");
3535 cancel_work_sync(&pdata->set_multicast);
3536 cancel_work_sync(&pdata->set_vlan);
3541 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3543 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3545 lan78xx_remove_irq_domain(dev);
3547 lan78xx_remove_mdio(dev);
3550 cancel_work_sync(&pdata->set_multicast);
3551 cancel_work_sync(&pdata->set_vlan);
3552 netif_dbg(dev, ifdown, dev->net, "free pdata");
3559 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3560 struct sk_buff *skb,
3561 u32 rx_cmd_a, u32 rx_cmd_b)
3563 /* HW Checksum offload appears to be flawed if used when not stripping
3564 * VLAN headers. Drop back to S/W checksums under these conditions.
3566 if (!(dev->net->features & NETIF_F_RXCSUM) ||
3567 unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3568 ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3569 !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3570 skb->ip_summed = CHECKSUM_NONE;
3572 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3573 skb->ip_summed = CHECKSUM_COMPLETE;
3577 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3578 struct sk_buff *skb,
3579 u32 rx_cmd_a, u32 rx_cmd_b)
3581 if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3582 (rx_cmd_a & RX_CMD_A_FVTG_))
3583 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3584 (rx_cmd_b & 0xffff));
3587 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3589 dev->net->stats.rx_packets++;
3590 dev->net->stats.rx_bytes += skb->len;
3592 skb->protocol = eth_type_trans(skb, dev->net);
3594 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3595 skb->len + sizeof(struct ethhdr), skb->protocol);
3596 memset(skb->cb, 0, sizeof(struct skb_data));
3598 if (skb_defer_rx_timestamp(skb))
3601 napi_gro_receive(&dev->napi, skb);
3604 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb,
3605 int budget, int *work_done)
3607 if (skb->len < RX_SKB_MIN_LEN)
3610 /* Extract frames from the URB buffer and pass each one to
3611 * the stack in a new NAPI SKB.
3613 while (skb->len > 0) {
3614 u32 rx_cmd_a, rx_cmd_b, align_count, size;
3616 unsigned char *packet;
3618 rx_cmd_a = get_unaligned_le32(skb->data);
3619 skb_pull(skb, sizeof(rx_cmd_a));
3621 rx_cmd_b = get_unaligned_le32(skb->data);
3622 skb_pull(skb, sizeof(rx_cmd_b));
3624 rx_cmd_c = get_unaligned_le16(skb->data);
3625 skb_pull(skb, sizeof(rx_cmd_c));
3629 /* get the packet length */
3630 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3631 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3633 if (unlikely(size > skb->len)) {
3634 netif_dbg(dev, rx_err, dev->net,
3635 "size err rx_cmd_a=0x%08x\n",
3640 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3641 netif_dbg(dev, rx_err, dev->net,
3642 "Error rx_cmd_a=0x%08x", rx_cmd_a);
3645 struct sk_buff *skb2;
3647 if (unlikely(size < ETH_FCS_LEN)) {
3648 netif_dbg(dev, rx_err, dev->net,
3649 "size err rx_cmd_a=0x%08x\n",
3654 frame_len = size - ETH_FCS_LEN;
3656 skb2 = napi_alloc_skb(&dev->napi, frame_len);
3660 memcpy(skb2->data, packet, frame_len);
3662 skb_put(skb2, frame_len);
3664 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3665 lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3667 /* Processing of the URB buffer must complete once
3668 * it has started. If the NAPI work budget is exhausted
3669 * while frames remain they are added to the overflow
3670 * queue for delivery in the next NAPI polling cycle.
3672 if (*work_done < budget) {
3673 lan78xx_skb_return(dev, skb2);
3676 skb_queue_tail(&dev->rxq_overflow, skb2);
3680 skb_pull(skb, size);
3682 /* skip padding bytes before the next frame starts */
3684 skb_pull(skb, align_count);
3690 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb,
3691 int budget, int *work_done)
3693 if (!lan78xx_rx(dev, skb, budget, work_done)) {
3694 netif_dbg(dev, rx_err, dev->net, "drop\n");
3695 dev->net->stats.rx_errors++;
3699 static void rx_complete(struct urb *urb)
3701 struct sk_buff *skb = (struct sk_buff *)urb->context;
3702 struct skb_data *entry = (struct skb_data *)skb->cb;
3703 struct lan78xx_net *dev = entry->dev;
3704 int urb_status = urb->status;
3705 enum skb_state state;
3707 netif_dbg(dev, rx_status, dev->net,
3708 "rx done: status %d", urb->status);
3710 skb_put(skb, urb->actual_length);
3713 if (urb != entry->urb)
3714 netif_warn(dev, rx_err, dev->net, "URB pointer mismatch");
3716 switch (urb_status) {
3718 if (skb->len < RX_SKB_MIN_LEN) {
3720 dev->net->stats.rx_errors++;
3721 dev->net->stats.rx_length_errors++;
3722 netif_dbg(dev, rx_err, dev->net,
3723 "rx length %d\n", skb->len);
3725 usb_mark_last_busy(dev->udev);
3728 dev->net->stats.rx_errors++;
3729 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3731 case -ECONNRESET: /* async unlink */
3732 case -ESHUTDOWN: /* hardware gone */
3733 netif_dbg(dev, ifdown, dev->net,
3734 "rx shutdown, code %d\n", urb_status);
3740 dev->net->stats.rx_errors++;
3744 /* data overrun ... flush fifo? */
3746 dev->net->stats.rx_over_errors++;
3751 dev->net->stats.rx_errors++;
3752 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3756 state = defer_bh(dev, skb, &dev->rxq, state);
3759 static int rx_submit(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags)
3761 struct skb_data *entry = (struct skb_data *)skb->cb;
3762 size_t size = dev->rx_urb_size;
3763 struct urb *urb = entry->urb;
3764 unsigned long lockflags;
3767 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3768 skb->data, size, rx_complete, skb);
3770 spin_lock_irqsave(&dev->rxq.lock, lockflags);
3772 if (netif_device_present(dev->net) &&
3773 netif_running(dev->net) &&
3774 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3775 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3776 ret = usb_submit_urb(urb, flags);
3779 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3782 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3786 netif_dbg(dev, ifdown, dev->net, "device gone\n");
3787 netif_device_detach(dev->net);
3791 napi_schedule(&dev->napi);
3794 netif_dbg(dev, rx_err, dev->net,
3795 "rx submit, %d\n", ret);
3796 napi_schedule(&dev->napi);
3800 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3803 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3806 lan78xx_release_rx_buf(dev, skb);
3811 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev)
3813 struct sk_buff *rx_buf;
3815 /* Ensure the maximum number of Rx URBs is submitted
3817 while ((rx_buf = lan78xx_get_rx_buf(dev)) != NULL) {
3818 if (rx_submit(dev, rx_buf, GFP_ATOMIC) != 0)
3823 static void lan78xx_rx_urb_resubmit(struct lan78xx_net *dev,
3824 struct sk_buff *rx_buf)
3826 /* reset SKB data pointers */
3828 rx_buf->data = rx_buf->head;
3829 skb_reset_tail_pointer(rx_buf);
3831 rx_buf->data_len = 0;
3833 rx_submit(dev, rx_buf, GFP_ATOMIC);
3836 static void lan78xx_fill_tx_cmd_words(struct sk_buff *skb, u8 *buffer)
3841 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
3843 if (skb->ip_summed == CHECKSUM_PARTIAL)
3844 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
3847 if (skb_is_gso(skb)) {
3848 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
3850 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
3852 tx_cmd_a |= TX_CMD_A_LSO_;
3855 if (skb_vlan_tag_present(skb)) {
3856 tx_cmd_a |= TX_CMD_A_IVTG_;
3857 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
3860 put_unaligned_le32(tx_cmd_a, buffer);
3861 put_unaligned_le32(tx_cmd_b, buffer + 4);
3864 static struct skb_data *lan78xx_tx_buf_fill(struct lan78xx_net *dev,
3865 struct sk_buff *tx_buf)
3867 struct skb_data *entry = (struct skb_data *)tx_buf->cb;
3868 int remain = dev->tx_urb_size;
3869 u8 *tx_data = tx_buf->data;
3872 entry->num_of_packet = 0;
3875 /* Work through the pending SKBs and copy the data of each SKB into
3876 * the URB buffer if there room for all the SKB data.
3878 * There must be at least DST+SRC+TYPE in the SKB (with padding enabled)
3880 while (remain >= TX_SKB_MIN_LEN) {
3881 unsigned int pending_bytes;
3882 unsigned int align_bytes;
3883 struct sk_buff *skb;
3886 lan78xx_tx_pend_skb_get(dev, &skb, &pending_bytes);
3891 align_bytes = (TX_ALIGNMENT - (urb_len % TX_ALIGNMENT)) %
3893 len = align_bytes + TX_CMD_LEN + skb->len;
3895 lan78xx_tx_pend_skb_head_add(dev, skb, &pending_bytes);
3899 tx_data += align_bytes;
3901 lan78xx_fill_tx_cmd_words(skb, tx_data);
3902 tx_data += TX_CMD_LEN;
3905 if (skb_copy_bits(skb, 0, tx_data, len) < 0) {
3906 struct net_device_stats *stats = &dev->net->stats;
3908 stats->tx_dropped++;
3909 dev_kfree_skb_any(skb);
3910 tx_data -= TX_CMD_LEN;
3915 entry->length += len;
3916 entry->num_of_packet += skb_shinfo(skb)->gso_segs ?: 1;
3918 dev_kfree_skb_any(skb);
3920 urb_len = (u32)(tx_data - (u8 *)tx_buf->data);
3922 remain = dev->tx_urb_size - urb_len;
3925 skb_put(tx_buf, urb_len);
3930 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3934 /* Start the stack Tx queue if it was stopped
3936 netif_tx_lock(dev->net);
3937 if (netif_queue_stopped(dev->net)) {
3938 if (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev))
3939 netif_wake_queue(dev->net);
3941 netif_tx_unlock(dev->net);
3943 /* Go through the Tx pending queue and set up URBs to transfer
3944 * the data to the device. Stop if no more pending data or URBs,
3945 * or if an error occurs when a URB is submitted.
3948 struct skb_data *entry;
3949 struct sk_buff *tx_buf;
3950 unsigned long flags;
3952 if (skb_queue_empty(&dev->txq_pend))
3955 tx_buf = lan78xx_get_tx_buf(dev);
3959 entry = lan78xx_tx_buf_fill(dev, tx_buf);
3961 spin_lock_irqsave(&dev->txq.lock, flags);
3962 ret = usb_autopm_get_interface_async(dev->intf);
3964 spin_unlock_irqrestore(&dev->txq.lock, flags);
3968 usb_fill_bulk_urb(entry->urb, dev->udev, dev->pipe_out,
3969 tx_buf->data, tx_buf->len, tx_complete,
3972 if (tx_buf->len % dev->maxpacket == 0) {
3973 /* send USB_ZERO_PACKET */
3974 entry->urb->transfer_flags |= URB_ZERO_PACKET;
3978 /* if device is asleep stop outgoing packet processing */
3979 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3980 usb_anchor_urb(entry->urb, &dev->deferred);
3981 netif_stop_queue(dev->net);
3982 spin_unlock_irqrestore(&dev->txq.lock, flags);
3983 netdev_dbg(dev->net,
3984 "Delaying transmission for resumption\n");
3988 ret = usb_submit_urb(entry->urb, GFP_ATOMIC);
3991 netif_trans_update(dev->net);
3992 lan78xx_queue_skb(&dev->txq, tx_buf, tx_start);
3995 netif_stop_queue(dev->net);
3996 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3997 usb_autopm_put_interface_async(dev->intf);
4001 netif_dbg(dev, tx_err, dev->net,
4002 "tx submit urb err %d (disconnected?)", ret);
4003 netif_device_detach(dev->net);
4006 usb_autopm_put_interface_async(dev->intf);
4007 netif_dbg(dev, tx_err, dev->net,
4008 "tx submit urb err %d\n", ret);
4012 spin_unlock_irqrestore(&dev->txq.lock, flags);
4015 netdev_warn(dev->net, "failed to tx urb %d\n", ret);
4017 dev->net->stats.tx_dropped += entry->num_of_packet;
4018 lan78xx_release_tx_buf(dev, tx_buf);
4023 static int lan78xx_bh(struct lan78xx_net *dev, int budget)
4025 struct sk_buff_head done;
4026 struct sk_buff *rx_buf;
4027 struct skb_data *entry;
4028 unsigned long flags;
4031 /* Pass frames received in the last NAPI cycle before
4032 * working on newly completed URBs.
4034 while (!skb_queue_empty(&dev->rxq_overflow)) {
4035 lan78xx_skb_return(dev, skb_dequeue(&dev->rxq_overflow));
4039 /* Take a snapshot of the done queue and move items to a
4040 * temporary queue. Rx URB completions will continue to add
4041 * to the done queue.
4043 __skb_queue_head_init(&done);
4045 spin_lock_irqsave(&dev->rxq_done.lock, flags);
4046 skb_queue_splice_init(&dev->rxq_done, &done);
4047 spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4049 /* Extract receive frames from completed URBs and
4050 * pass them to the stack. Re-submit each completed URB.
4052 while ((work_done < budget) &&
4053 (rx_buf = __skb_dequeue(&done))) {
4054 entry = (struct skb_data *)(rx_buf->cb);
4055 switch (entry->state) {
4057 rx_process(dev, rx_buf, budget, &work_done);
4062 netdev_dbg(dev->net, "rx buf state %d\n",
4067 lan78xx_rx_urb_resubmit(dev, rx_buf);
4070 /* If budget was consumed before processing all the URBs put them
4071 * back on the front of the done queue. They will be first to be
4072 * processed in the next NAPI cycle.
4074 spin_lock_irqsave(&dev->rxq_done.lock, flags);
4075 skb_queue_splice(&done, &dev->rxq_done);
4076 spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4078 if (netif_device_present(dev->net) && netif_running(dev->net)) {
4079 /* reset update timer delta */
4080 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
4082 mod_timer(&dev->stat_monitor,
4083 jiffies + STAT_UPDATE_TIMER);
4086 /* Submit all free Rx URBs */
4088 if (!test_bit(EVENT_RX_HALT, &dev->flags))
4089 lan78xx_rx_urb_submit_all(dev);
4091 /* Submit new Tx URBs */
4099 static int lan78xx_poll(struct napi_struct *napi, int budget)
4101 struct lan78xx_net *dev = container_of(napi, struct lan78xx_net, napi);
4102 int result = budget;
4105 /* Don't do any work if the device is suspended */
4107 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4108 napi_complete_done(napi, 0);
4112 /* Process completed URBs and submit new URBs */
4114 work_done = lan78xx_bh(dev, budget);
4116 if (work_done < budget) {
4117 napi_complete_done(napi, work_done);
4119 /* Start a new polling cycle if data was received or
4120 * data is waiting to be transmitted.
4122 if (!skb_queue_empty(&dev->rxq_done)) {
4123 napi_schedule(napi);
4124 } else if (netif_carrier_ok(dev->net)) {
4125 if (skb_queue_empty(&dev->txq) &&
4126 !skb_queue_empty(&dev->txq_pend)) {
4127 napi_schedule(napi);
4129 netif_tx_lock(dev->net);
4130 if (netif_queue_stopped(dev->net)) {
4131 netif_wake_queue(dev->net);
4132 napi_schedule(napi);
4134 netif_tx_unlock(dev->net);
4143 static void lan78xx_delayedwork(struct work_struct *work)
4146 struct lan78xx_net *dev;
4148 dev = container_of(work, struct lan78xx_net, wq.work);
4150 if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
4153 if (usb_autopm_get_interface(dev->intf) < 0)
4156 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
4157 unlink_urbs(dev, &dev->txq);
4159 status = usb_clear_halt(dev->udev, dev->pipe_out);
4162 status != -ESHUTDOWN) {
4163 if (netif_msg_tx_err(dev))
4164 netdev_err(dev->net,
4165 "can't clear tx halt, status %d\n",
4168 clear_bit(EVENT_TX_HALT, &dev->flags);
4169 if (status != -ESHUTDOWN)
4170 netif_wake_queue(dev->net);
4174 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
4175 unlink_urbs(dev, &dev->rxq);
4176 status = usb_clear_halt(dev->udev, dev->pipe_in);
4179 status != -ESHUTDOWN) {
4180 if (netif_msg_rx_err(dev))
4181 netdev_err(dev->net,
4182 "can't clear rx halt, status %d\n",
4185 clear_bit(EVENT_RX_HALT, &dev->flags);
4186 napi_schedule(&dev->napi);
4190 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
4193 clear_bit(EVENT_LINK_RESET, &dev->flags);
4194 if (lan78xx_link_reset(dev) < 0) {
4195 netdev_info(dev->net, "link reset failed (%d)\n",
4200 if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
4201 lan78xx_update_stats(dev);
4203 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
4205 mod_timer(&dev->stat_monitor,
4206 jiffies + (STAT_UPDATE_TIMER * dev->delta));
4208 dev->delta = min((dev->delta * 2), 50);
4211 usb_autopm_put_interface(dev->intf);
4214 static void intr_complete(struct urb *urb)
4216 struct lan78xx_net *dev = urb->context;
4217 int status = urb->status;
4222 lan78xx_status(dev, urb);
4225 /* software-driven interface shutdown */
4226 case -ENOENT: /* urb killed */
4227 case -ENODEV: /* hardware gone */
4228 case -ESHUTDOWN: /* hardware gone */
4229 netif_dbg(dev, ifdown, dev->net,
4230 "intr shutdown, code %d\n", status);
4233 /* NOTE: not throttling like RX/TX, since this endpoint
4234 * already polls infrequently
4237 netdev_dbg(dev->net, "intr status %d\n", status);
4241 if (!netif_device_present(dev->net) ||
4242 !netif_running(dev->net)) {
4243 netdev_warn(dev->net, "not submitting new status URB");
4247 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
4248 status = usb_submit_urb(urb, GFP_ATOMIC);
4255 netif_dbg(dev, timer, dev->net,
4256 "intr resubmit %d (disconnect?)", status);
4257 netif_device_detach(dev->net);
4260 netif_err(dev, timer, dev->net,
4261 "intr resubmit --> %d\n", status);
4266 static void lan78xx_disconnect(struct usb_interface *intf)
4268 struct lan78xx_net *dev;
4269 struct usb_device *udev;
4270 struct net_device *net;
4271 struct phy_device *phydev;
4273 dev = usb_get_intfdata(intf);
4274 usb_set_intfdata(intf, NULL);
4278 netif_napi_del(&dev->napi);
4280 udev = interface_to_usbdev(intf);
4283 unregister_netdev(net);
4285 timer_shutdown_sync(&dev->stat_monitor);
4286 set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
4287 cancel_delayed_work_sync(&dev->wq);
4289 phydev = net->phydev;
4291 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
4292 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
4294 phy_disconnect(net->phydev);
4296 if (phy_is_pseudo_fixed_link(phydev))
4297 fixed_phy_unregister(phydev);
4299 usb_scuttle_anchored_urbs(&dev->deferred);
4301 lan78xx_unbind(dev, intf);
4303 lan78xx_free_tx_resources(dev);
4304 lan78xx_free_rx_resources(dev);
4306 usb_kill_urb(dev->urb_intr);
4307 usb_free_urb(dev->urb_intr);
4313 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
4315 struct lan78xx_net *dev = netdev_priv(net);
4317 unlink_urbs(dev, &dev->txq);
4318 napi_schedule(&dev->napi);
4321 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
4322 struct net_device *netdev,
4323 netdev_features_t features)
4325 struct lan78xx_net *dev = netdev_priv(netdev);
4327 if (skb->len > LAN78XX_TSO_SIZE(dev))
4328 features &= ~NETIF_F_GSO_MASK;
4330 features = vlan_features_check(skb, features);
4331 features = vxlan_features_check(skb, features);
4336 static const struct net_device_ops lan78xx_netdev_ops = {
4337 .ndo_open = lan78xx_open,
4338 .ndo_stop = lan78xx_stop,
4339 .ndo_start_xmit = lan78xx_start_xmit,
4340 .ndo_tx_timeout = lan78xx_tx_timeout,
4341 .ndo_change_mtu = lan78xx_change_mtu,
4342 .ndo_set_mac_address = lan78xx_set_mac_addr,
4343 .ndo_validate_addr = eth_validate_addr,
4344 .ndo_eth_ioctl = phy_do_ioctl_running,
4345 .ndo_set_rx_mode = lan78xx_set_multicast,
4346 .ndo_set_features = lan78xx_set_features,
4347 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
4348 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
4349 .ndo_features_check = lan78xx_features_check,
4352 static void lan78xx_stat_monitor(struct timer_list *t)
4354 struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
4356 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
4359 static int lan78xx_probe(struct usb_interface *intf,
4360 const struct usb_device_id *id)
4362 struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
4363 struct lan78xx_net *dev;
4364 struct net_device *netdev;
4365 struct usb_device *udev;
4368 unsigned int period;
4371 udev = interface_to_usbdev(intf);
4372 udev = usb_get_dev(udev);
4374 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
4376 dev_err(&intf->dev, "Error: OOM\n");
4381 /* netdev_printk() needs this */
4382 SET_NETDEV_DEV(netdev, &intf->dev);
4384 dev = netdev_priv(netdev);
4388 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
4389 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
4391 skb_queue_head_init(&dev->rxq);
4392 skb_queue_head_init(&dev->txq);
4393 skb_queue_head_init(&dev->rxq_done);
4394 skb_queue_head_init(&dev->txq_pend);
4395 skb_queue_head_init(&dev->rxq_overflow);
4396 mutex_init(&dev->phy_mutex);
4397 mutex_init(&dev->dev_mutex);
4399 ret = lan78xx_urb_config_init(dev);
4403 ret = lan78xx_alloc_tx_resources(dev);
4407 ret = lan78xx_alloc_rx_resources(dev);
4411 /* MTU range: 68 - 9000 */
4412 netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
4414 netif_set_tso_max_size(netdev, LAN78XX_TSO_SIZE(dev));
4416 netif_napi_add(netdev, &dev->napi, lan78xx_poll);
4418 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
4419 init_usb_anchor(&dev->deferred);
4421 netdev->netdev_ops = &lan78xx_netdev_ops;
4422 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
4423 netdev->ethtool_ops = &lan78xx_ethtool_ops;
4426 timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
4428 mutex_init(&dev->stats.access_lock);
4430 if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
4435 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
4436 ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
4437 if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
4442 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
4443 ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
4444 if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
4449 ep_intr = &intf->cur_altsetting->endpoint[2];
4450 if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
4455 dev->pipe_intr = usb_rcvintpipe(dev->udev,
4456 usb_endpoint_num(&ep_intr->desc));
4458 ret = lan78xx_bind(dev, intf);
4462 if (int_urb_interval_ms <= 0)
4463 period = ep_intr->desc.bInterval;
4465 period = int_urb_interval_ms * INT_URB_MICROFRAMES_PER_MS;
4467 netif_notice(dev, probe, netdev, "int urb period %d\n", period);
4469 maxp = usb_maxpacket(dev->udev, dev->pipe_intr);
4470 buf = kmalloc(maxp, GFP_KERNEL);
4476 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
4477 if (!dev->urb_intr) {
4481 usb_fill_int_urb(dev->urb_intr, dev->udev,
4482 dev->pipe_intr, buf, maxp,
4483 intr_complete, dev, period);
4484 dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
4487 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out);
4489 /* Reject broken descriptors. */
4490 if (dev->maxpacket == 0) {
4495 /* driver requires remote-wakeup capability during autosuspend. */
4496 intf->needs_remote_wakeup = 1;
4498 ret = lan78xx_phy_init(dev);
4502 ret = register_netdev(netdev);
4504 netif_err(dev, probe, netdev, "couldn't register the device\n");
4508 usb_set_intfdata(intf, dev);
4510 ret = device_set_wakeup_enable(&udev->dev, true);
4512 /* Default delay of 2sec has more overhead than advantage.
4513 * Set to 10sec as default.
4515 pm_runtime_set_autosuspend_delay(&udev->dev,
4516 DEFAULT_AUTOSUSPEND_DELAY);
4521 phy_disconnect(netdev->phydev);
4523 usb_free_urb(dev->urb_intr);
4527 lan78xx_unbind(dev, intf);
4529 netif_napi_del(&dev->napi);
4530 lan78xx_free_rx_resources(dev);
4532 lan78xx_free_tx_resources(dev);
4534 free_netdev(netdev);
4541 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
4543 const u16 crc16poly = 0x8005;
4549 for (i = 0; i < len; i++) {
4551 for (bit = 0; bit < 8; bit++) {
4555 if (msb ^ (u16)(data & 1)) {
4557 crc |= (u16)0x0001U;
4566 static int lan78xx_set_auto_suspend(struct lan78xx_net *dev)
4571 ret = lan78xx_stop_tx_path(dev);
4575 ret = lan78xx_stop_rx_path(dev);
4579 /* auto suspend (selective suspend) */
4581 ret = lan78xx_write_reg(dev, WUCSR, 0);
4584 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4587 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4591 /* set goodframe wakeup */
4593 ret = lan78xx_read_reg(dev, WUCSR, &buf);
4597 buf |= WUCSR_RFE_WAKE_EN_;
4598 buf |= WUCSR_STORE_WAKE_;
4600 ret = lan78xx_write_reg(dev, WUCSR, buf);
4604 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4608 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4609 buf |= PMT_CTL_RES_CLR_WKP_STS_;
4610 buf |= PMT_CTL_PHY_WAKE_EN_;
4611 buf |= PMT_CTL_WOL_EN_;
4612 buf &= ~PMT_CTL_SUS_MODE_MASK_;
4613 buf |= PMT_CTL_SUS_MODE_3_;
4615 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4619 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4623 buf |= PMT_CTL_WUPS_MASK_;
4625 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4629 ret = lan78xx_start_rx_path(dev);
4634 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
4636 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
4637 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
4638 const u8 arp_type[2] = { 0x08, 0x06 };
4646 ret = lan78xx_stop_tx_path(dev);
4649 ret = lan78xx_stop_rx_path(dev);
4653 ret = lan78xx_write_reg(dev, WUCSR, 0);
4656 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4659 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4667 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
4671 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
4672 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
4674 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) {
4675 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
4681 if (wol & WAKE_PHY) {
4682 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
4684 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4685 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4686 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4688 if (wol & WAKE_MAGIC) {
4689 temp_wucsr |= WUCSR_MPEN_;
4691 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4692 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4693 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
4695 if (wol & WAKE_BCAST) {
4696 temp_wucsr |= WUCSR_BCST_EN_;
4698 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4699 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4700 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4702 if (wol & WAKE_MCAST) {
4703 temp_wucsr |= WUCSR_WAKE_EN_;
4705 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
4706 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
4707 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4709 WUF_CFGX_TYPE_MCAST_ |
4710 (0 << WUF_CFGX_OFFSET_SHIFT_) |
4711 (crc & WUF_CFGX_CRC16_MASK_));
4715 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
4718 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4721 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4724 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4730 /* for IPv6 Multicast */
4731 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
4732 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4734 WUF_CFGX_TYPE_MCAST_ |
4735 (0 << WUF_CFGX_OFFSET_SHIFT_) |
4736 (crc & WUF_CFGX_CRC16_MASK_));
4740 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
4743 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4746 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4749 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4755 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4756 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4757 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4759 if (wol & WAKE_UCAST) {
4760 temp_wucsr |= WUCSR_PFDA_EN_;
4762 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4763 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4764 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4766 if (wol & WAKE_ARP) {
4767 temp_wucsr |= WUCSR_WAKE_EN_;
4769 /* set WUF_CFG & WUF_MASK
4770 * for packettype (offset 12,13) = ARP (0x0806)
4772 crc = lan78xx_wakeframe_crc16(arp_type, 2);
4773 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4775 WUF_CFGX_TYPE_ALL_ |
4776 (0 << WUF_CFGX_OFFSET_SHIFT_) |
4777 (crc & WUF_CFGX_CRC16_MASK_));
4781 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
4784 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4787 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4790 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4796 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4797 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4798 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4801 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
4805 /* when multiple WOL bits are set */
4806 if (hweight_long((unsigned long)wol) > 1) {
4807 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4808 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4809 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4811 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
4816 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4820 buf |= PMT_CTL_WUPS_MASK_;
4822 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4826 ret = lan78xx_start_rx_path(dev);
4831 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
4833 struct lan78xx_net *dev = usb_get_intfdata(intf);
4837 mutex_lock(&dev->dev_mutex);
4839 netif_dbg(dev, ifdown, dev->net,
4840 "suspending: pm event %#x", message.event);
4842 dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4845 spin_lock_irq(&dev->txq.lock);
4846 /* don't autosuspend while transmitting */
4847 if ((skb_queue_len(&dev->txq) ||
4848 skb_queue_len(&dev->txq_pend)) &&
4849 PMSG_IS_AUTO(message)) {
4850 spin_unlock_irq(&dev->txq.lock);
4854 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4855 spin_unlock_irq(&dev->txq.lock);
4859 ret = lan78xx_stop_rx_path(dev);
4863 ret = lan78xx_flush_rx_fifo(dev);
4868 ret = lan78xx_stop_tx_path(dev);
4872 /* empty out the Rx and Tx queues */
4873 netif_device_detach(dev->net);
4874 lan78xx_terminate_urbs(dev);
4875 usb_kill_urb(dev->urb_intr);
4878 netif_device_attach(dev->net);
4880 del_timer(&dev->stat_monitor);
4882 if (PMSG_IS_AUTO(message)) {
4883 ret = lan78xx_set_auto_suspend(dev);
4887 struct lan78xx_priv *pdata;
4889 pdata = (struct lan78xx_priv *)(dev->data[0]);
4890 netif_carrier_off(dev->net);
4891 ret = lan78xx_set_suspend(dev, pdata->wol);
4896 /* Interface is down; don't allow WOL and PHY
4897 * events to wake up the host
4901 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4903 ret = lan78xx_write_reg(dev, WUCSR, 0);
4906 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4910 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4914 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4915 buf |= PMT_CTL_RES_CLR_WKP_STS_;
4916 buf &= ~PMT_CTL_SUS_MODE_MASK_;
4917 buf |= PMT_CTL_SUS_MODE_3_;
4919 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4923 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4927 buf |= PMT_CTL_WUPS_MASK_;
4929 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4936 mutex_unlock(&dev->dev_mutex);
4941 static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
4943 bool pipe_halted = false;
4946 while ((urb = usb_get_from_anchor(&dev->deferred))) {
4947 struct sk_buff *skb = urb->context;
4950 if (!netif_device_present(dev->net) ||
4951 !netif_carrier_ok(dev->net) ||
4953 lan78xx_release_tx_buf(dev, skb);
4957 ret = usb_submit_urb(urb, GFP_ATOMIC);
4960 netif_trans_update(dev->net);
4961 lan78xx_queue_skb(&dev->txq, skb, tx_start);
4963 if (ret == -EPIPE) {
4964 netif_stop_queue(dev->net);
4966 } else if (ret == -ENODEV) {
4967 netif_device_detach(dev->net);
4970 lan78xx_release_tx_buf(dev, skb);
4977 static int lan78xx_resume(struct usb_interface *intf)
4979 struct lan78xx_net *dev = usb_get_intfdata(intf);
4983 mutex_lock(&dev->dev_mutex);
4985 netif_dbg(dev, ifup, dev->net, "resuming device");
4987 dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4990 bool pipe_halted = false;
4992 ret = lan78xx_flush_tx_fifo(dev);
4996 if (dev->urb_intr) {
4997 int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
5001 netif_device_detach(dev->net);
5002 netdev_warn(dev->net, "Failed to submit intr URB");
5006 spin_lock_irq(&dev->txq.lock);
5008 if (netif_device_present(dev->net)) {
5009 pipe_halted = lan78xx_submit_deferred_urbs(dev);
5012 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
5015 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
5017 spin_unlock_irq(&dev->txq.lock);
5020 netif_device_present(dev->net) &&
5021 (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev)))
5022 netif_start_queue(dev->net);
5024 ret = lan78xx_start_tx_path(dev);
5028 napi_schedule(&dev->napi);
5030 if (!timer_pending(&dev->stat_monitor)) {
5032 mod_timer(&dev->stat_monitor,
5033 jiffies + STAT_UPDATE_TIMER);
5037 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
5040 ret = lan78xx_write_reg(dev, WUCSR2, 0);
5043 ret = lan78xx_write_reg(dev, WUCSR, 0);
5046 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
5050 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
5052 WUCSR2_IPV6_TCPSYN_RCD_ |
5053 WUCSR2_IPV4_TCPSYN_RCD_);
5057 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
5058 WUCSR_EEE_RX_WAKE_ |
5060 WUCSR_RFE_WAKE_FR_ |
5069 mutex_unlock(&dev->dev_mutex);
5074 static int lan78xx_reset_resume(struct usb_interface *intf)
5076 struct lan78xx_net *dev = usb_get_intfdata(intf);
5079 netif_dbg(dev, ifup, dev->net, "(reset) resuming device");
5081 ret = lan78xx_reset(dev);
5085 phy_start(dev->net->phydev);
5087 ret = lan78xx_resume(intf);
5092 static const struct usb_device_id products[] = {
5094 /* LAN7800 USB Gigabit Ethernet Device */
5095 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
5098 /* LAN7850 USB Gigabit Ethernet Device */
5099 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
5102 /* LAN7801 USB Gigabit Ethernet Device */
5103 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
5106 /* ATM2-AF USB Gigabit Ethernet Device */
5107 USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
5111 MODULE_DEVICE_TABLE(usb, products);
5113 static struct usb_driver lan78xx_driver = {
5114 .name = DRIVER_NAME,
5115 .id_table = products,
5116 .probe = lan78xx_probe,
5117 .disconnect = lan78xx_disconnect,
5118 .suspend = lan78xx_suspend,
5119 .resume = lan78xx_resume,
5120 .reset_resume = lan78xx_reset_resume,
5121 .supports_autosuspend = 1,
5122 .disable_hub_initiated_lpm = 1,
5125 module_usb_driver(lan78xx_driver);
5127 MODULE_AUTHOR(DRIVER_AUTHOR);
5128 MODULE_DESCRIPTION(DRIVER_DESC);
5129 MODULE_LICENSE("GPL");