2 * Copyright (C) 2015 Microchip Technology
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <linux/phy.h>
33 #include <net/ip6_checksum.h>
34 #include <linux/interrupt.h>
35 #include <linux/irqdomain.h>
36 #include <linux/irq.h>
37 #include <linux/irqchip/chained_irq.h>
38 #include <linux/microchipphy.h>
39 #include <linux/phy_fixed.h>
40 #include <linux/of_mdio.h>
41 #include <linux/of_net.h>
44 #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
45 #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
46 #define DRIVER_NAME "lan78xx"
48 #define TX_TIMEOUT_JIFFIES (5 * HZ)
49 #define THROTTLE_JIFFIES (HZ / 8)
50 #define UNLINK_TIMEOUT_MS 3
52 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
54 #define SS_USB_PKT_SIZE (1024)
55 #define HS_USB_PKT_SIZE (512)
56 #define FS_USB_PKT_SIZE (64)
58 #define MAX_RX_FIFO_SIZE (12 * 1024)
59 #define MAX_TX_FIFO_SIZE (12 * 1024)
60 #define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
61 #define DEFAULT_BULK_IN_DELAY (0x0800)
62 #define MAX_SINGLE_PACKET_SIZE (9000)
63 #define DEFAULT_TX_CSUM_ENABLE (true)
64 #define DEFAULT_RX_CSUM_ENABLE (true)
65 #define DEFAULT_TSO_CSUM_ENABLE (true)
66 #define DEFAULT_VLAN_FILTER_ENABLE (true)
67 #define DEFAULT_VLAN_RX_OFFLOAD (true)
68 #define TX_OVERHEAD (8)
71 #define LAN78XX_USB_VENDOR_ID (0x0424)
72 #define LAN7800_USB_PRODUCT_ID (0x7800)
73 #define LAN7850_USB_PRODUCT_ID (0x7850)
74 #define LAN7801_USB_PRODUCT_ID (0x7801)
75 #define LAN78XX_EEPROM_MAGIC (0x78A5)
76 #define LAN78XX_OTP_MAGIC (0x78F3)
81 #define EEPROM_INDICATOR (0xA5)
82 #define EEPROM_MAC_OFFSET (0x01)
83 #define MAX_EEPROM_SIZE 512
84 #define OTP_INDICATOR_1 (0xF3)
85 #define OTP_INDICATOR_2 (0xF7)
87 #define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
88 WAKE_MCAST | WAKE_BCAST | \
89 WAKE_ARP | WAKE_MAGIC)
91 /* USB related defines */
92 #define BULK_IN_PIPE 1
93 #define BULK_OUT_PIPE 2
95 /* default autosuspend delay (mSec)*/
96 #define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
98 /* statistic update interval (mSec) */
99 #define STAT_UPDATE_TIMER (1 * 1000)
101 /* defines interrupts from interrupt EP */
102 #define MAX_INT_EP (32)
103 #define INT_EP_INTEP (31)
104 #define INT_EP_OTP_WR_DONE (28)
105 #define INT_EP_EEE_TX_LPI_START (26)
106 #define INT_EP_EEE_TX_LPI_STOP (25)
107 #define INT_EP_EEE_RX_LPI (24)
108 #define INT_EP_MAC_RESET_TIMEOUT (23)
109 #define INT_EP_RDFO (22)
110 #define INT_EP_TXE (21)
111 #define INT_EP_USB_STATUS (20)
112 #define INT_EP_TX_DIS (19)
113 #define INT_EP_RX_DIS (18)
114 #define INT_EP_PHY (17)
115 #define INT_EP_DP (16)
116 #define INT_EP_MAC_ERR (15)
117 #define INT_EP_TDFU (14)
118 #define INT_EP_TDFO (13)
119 #define INT_EP_UTX (12)
120 #define INT_EP_GPIO_11 (11)
121 #define INT_EP_GPIO_10 (10)
122 #define INT_EP_GPIO_9 (9)
123 #define INT_EP_GPIO_8 (8)
124 #define INT_EP_GPIO_7 (7)
125 #define INT_EP_GPIO_6 (6)
126 #define INT_EP_GPIO_5 (5)
127 #define INT_EP_GPIO_4 (4)
128 #define INT_EP_GPIO_3 (3)
129 #define INT_EP_GPIO_2 (2)
130 #define INT_EP_GPIO_1 (1)
131 #define INT_EP_GPIO_0 (0)
133 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
135 "RX Alignment Errors",
136 "Rx Fragment Errors",
138 "RX Undersize Frame Errors",
139 "RX Oversize Frame Errors",
141 "RX Unicast Byte Count",
142 "RX Broadcast Byte Count",
143 "RX Multicast Byte Count",
145 "RX Broadcast Frames",
146 "RX Multicast Frames",
149 "RX 65 - 127 Byte Frames",
150 "RX 128 - 255 Byte Frames",
151 "RX 256 - 511 Bytes Frames",
152 "RX 512 - 1023 Byte Frames",
153 "RX 1024 - 1518 Byte Frames",
154 "RX Greater 1518 Byte Frames",
155 "EEE RX LPI Transitions",
158 "TX Excess Deferral Errors",
161 "TX Single Collisions",
162 "TX Multiple Collisions",
163 "TX Excessive Collision",
164 "TX Late Collisions",
165 "TX Unicast Byte Count",
166 "TX Broadcast Byte Count",
167 "TX Multicast Byte Count",
169 "TX Broadcast Frames",
170 "TX Multicast Frames",
173 "TX 65 - 127 Byte Frames",
174 "TX 128 - 255 Byte Frames",
175 "TX 256 - 511 Bytes Frames",
176 "TX 512 - 1023 Byte Frames",
177 "TX 1024 - 1518 Byte Frames",
178 "TX Greater 1518 Byte Frames",
179 "EEE TX LPI Transitions",
183 struct lan78xx_statstage {
185 u32 rx_alignment_errors;
186 u32 rx_fragment_errors;
187 u32 rx_jabber_errors;
188 u32 rx_undersize_frame_errors;
189 u32 rx_oversize_frame_errors;
190 u32 rx_dropped_frames;
191 u32 rx_unicast_byte_count;
192 u32 rx_broadcast_byte_count;
193 u32 rx_multicast_byte_count;
194 u32 rx_unicast_frames;
195 u32 rx_broadcast_frames;
196 u32 rx_multicast_frames;
198 u32 rx_64_byte_frames;
199 u32 rx_65_127_byte_frames;
200 u32 rx_128_255_byte_frames;
201 u32 rx_256_511_bytes_frames;
202 u32 rx_512_1023_byte_frames;
203 u32 rx_1024_1518_byte_frames;
204 u32 rx_greater_1518_byte_frames;
205 u32 eee_rx_lpi_transitions;
208 u32 tx_excess_deferral_errors;
209 u32 tx_carrier_errors;
210 u32 tx_bad_byte_count;
211 u32 tx_single_collisions;
212 u32 tx_multiple_collisions;
213 u32 tx_excessive_collision;
214 u32 tx_late_collisions;
215 u32 tx_unicast_byte_count;
216 u32 tx_broadcast_byte_count;
217 u32 tx_multicast_byte_count;
218 u32 tx_unicast_frames;
219 u32 tx_broadcast_frames;
220 u32 tx_multicast_frames;
222 u32 tx_64_byte_frames;
223 u32 tx_65_127_byte_frames;
224 u32 tx_128_255_byte_frames;
225 u32 tx_256_511_bytes_frames;
226 u32 tx_512_1023_byte_frames;
227 u32 tx_1024_1518_byte_frames;
228 u32 tx_greater_1518_byte_frames;
229 u32 eee_tx_lpi_transitions;
233 struct lan78xx_statstage64 {
235 u64 rx_alignment_errors;
236 u64 rx_fragment_errors;
237 u64 rx_jabber_errors;
238 u64 rx_undersize_frame_errors;
239 u64 rx_oversize_frame_errors;
240 u64 rx_dropped_frames;
241 u64 rx_unicast_byte_count;
242 u64 rx_broadcast_byte_count;
243 u64 rx_multicast_byte_count;
244 u64 rx_unicast_frames;
245 u64 rx_broadcast_frames;
246 u64 rx_multicast_frames;
248 u64 rx_64_byte_frames;
249 u64 rx_65_127_byte_frames;
250 u64 rx_128_255_byte_frames;
251 u64 rx_256_511_bytes_frames;
252 u64 rx_512_1023_byte_frames;
253 u64 rx_1024_1518_byte_frames;
254 u64 rx_greater_1518_byte_frames;
255 u64 eee_rx_lpi_transitions;
258 u64 tx_excess_deferral_errors;
259 u64 tx_carrier_errors;
260 u64 tx_bad_byte_count;
261 u64 tx_single_collisions;
262 u64 tx_multiple_collisions;
263 u64 tx_excessive_collision;
264 u64 tx_late_collisions;
265 u64 tx_unicast_byte_count;
266 u64 tx_broadcast_byte_count;
267 u64 tx_multicast_byte_count;
268 u64 tx_unicast_frames;
269 u64 tx_broadcast_frames;
270 u64 tx_multicast_frames;
272 u64 tx_64_byte_frames;
273 u64 tx_65_127_byte_frames;
274 u64 tx_128_255_byte_frames;
275 u64 tx_256_511_bytes_frames;
276 u64 tx_512_1023_byte_frames;
277 u64 tx_1024_1518_byte_frames;
278 u64 tx_greater_1518_byte_frames;
279 u64 eee_tx_lpi_transitions;
283 static u32 lan78xx_regs[] = {
305 #define PHY_REG_SIZE (32 * sizeof(u32))
309 struct lan78xx_priv {
310 struct lan78xx_net *dev;
312 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
313 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
314 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
315 struct mutex dataport_mutex; /* for dataport access */
316 spinlock_t rfe_ctl_lock; /* for rfe register access */
317 struct work_struct set_multicast;
318 struct work_struct set_vlan;
332 struct skb_data { /* skb->cb is one of these */
334 struct lan78xx_net *dev;
335 enum skb_state state;
341 struct usb_ctrlrequest req;
342 struct lan78xx_net *dev;
345 #define EVENT_TX_HALT 0
346 #define EVENT_RX_HALT 1
347 #define EVENT_RX_MEMORY 2
348 #define EVENT_STS_SPLIT 3
349 #define EVENT_LINK_RESET 4
350 #define EVENT_RX_PAUSED 5
351 #define EVENT_DEV_WAKING 6
352 #define EVENT_DEV_ASLEEP 7
353 #define EVENT_DEV_OPEN 8
354 #define EVENT_STAT_UPDATE 9
357 struct mutex access_lock; /* for stats access */
358 struct lan78xx_statstage saved;
359 struct lan78xx_statstage rollover_count;
360 struct lan78xx_statstage rollover_max;
361 struct lan78xx_statstage64 curr_stat;
364 struct irq_domain_data {
365 struct irq_domain *irqdomain;
367 struct irq_chip *irqchip;
368 irq_flow_handler_t irq_handler;
370 struct mutex irq_lock; /* for irq bus access */
374 struct net_device *net;
375 struct usb_device *udev;
376 struct usb_interface *intf;
381 struct sk_buff_head rxq;
382 struct sk_buff_head txq;
383 struct sk_buff_head done;
384 struct sk_buff_head rxq_pause;
385 struct sk_buff_head txq_pend;
387 struct tasklet_struct bh;
388 struct delayed_work wq;
390 struct usb_host_endpoint *ep_blkin;
391 struct usb_host_endpoint *ep_blkout;
392 struct usb_host_endpoint *ep_intr;
396 struct urb *urb_intr;
397 struct usb_anchor deferred;
399 struct mutex phy_mutex; /* for phy access */
400 unsigned pipe_in, pipe_out, pipe_intr;
402 u32 hard_mtu; /* count any extra framing */
403 size_t rx_urb_size; /* size for rx urbs */
407 wait_queue_head_t *wait;
408 unsigned char suspend_count;
411 struct timer_list delay;
412 struct timer_list stat_monitor;
414 unsigned long data[5];
421 struct mii_bus *mdiobus;
422 phy_interface_t interface;
425 u8 fc_request_control;
428 struct statstage stats;
430 struct irq_domain_data domain_data;
433 /* define external phy id */
434 #define PHY_LAN8835 (0x0007C130)
435 #define PHY_KSZ9031RNX (0x00221620)
437 /* use ethtool to change the level for any given device */
438 static int msg_level = -1;
439 module_param(msg_level, int, 0);
440 MODULE_PARM_DESC(msg_level, "Override default message level");
442 /* TSO seems to be having some issue with Selective Acknowledge (SACK) that
443 * results in lost data never being retransmitted.
444 * Disable it by default now, but adds a module parameter to enable it for
445 * debug purposes (the full cause is not currently understood).
447 static bool enable_tso;
448 module_param(enable_tso, bool, 0644);
449 MODULE_PARM_DESC(enable_tso, "Enables TCP segmentation offload");
451 #define INT_URB_MICROFRAMES_PER_MS 8
452 static int int_urb_interval_ms = 8;
453 module_param(int_urb_interval_ms, int, 0);
454 MODULE_PARM_DESC(int_urb_interval_ms, "Override usb interrupt urb interval");
456 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
458 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
464 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
465 USB_VENDOR_REQUEST_READ_REGISTER,
466 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
467 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
468 if (likely(ret >= 0)) {
472 netdev_warn(dev->net,
473 "Failed to read register index 0x%08x. ret = %d",
482 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
484 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
493 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
494 USB_VENDOR_REQUEST_WRITE_REGISTER,
495 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
496 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
497 if (unlikely(ret < 0)) {
498 netdev_warn(dev->net,
499 "Failed to write register index 0x%08x. ret = %d",
508 static int lan78xx_read_stats(struct lan78xx_net *dev,
509 struct lan78xx_statstage *data)
513 struct lan78xx_statstage *stats;
517 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
521 ret = usb_control_msg(dev->udev,
522 usb_rcvctrlpipe(dev->udev, 0),
523 USB_VENDOR_REQUEST_GET_STATS,
524 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
529 USB_CTRL_SET_TIMEOUT);
530 if (likely(ret >= 0)) {
533 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
534 le32_to_cpus(&src[i]);
538 netdev_warn(dev->net,
539 "Failed to read stat ret = 0x%x", ret);
547 #define check_counter_rollover(struct1, dev_stats, member) { \
548 if (struct1->member < dev_stats.saved.member) \
549 dev_stats.rollover_count.member++; \
552 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
553 struct lan78xx_statstage *stats)
555 check_counter_rollover(stats, dev->stats, rx_fcs_errors);
556 check_counter_rollover(stats, dev->stats, rx_alignment_errors);
557 check_counter_rollover(stats, dev->stats, rx_fragment_errors);
558 check_counter_rollover(stats, dev->stats, rx_jabber_errors);
559 check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
560 check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
561 check_counter_rollover(stats, dev->stats, rx_dropped_frames);
562 check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
563 check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
564 check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
565 check_counter_rollover(stats, dev->stats, rx_unicast_frames);
566 check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
567 check_counter_rollover(stats, dev->stats, rx_multicast_frames);
568 check_counter_rollover(stats, dev->stats, rx_pause_frames);
569 check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
570 check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
571 check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
572 check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
573 check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
574 check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
575 check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
576 check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
577 check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
578 check_counter_rollover(stats, dev->stats, tx_fcs_errors);
579 check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
580 check_counter_rollover(stats, dev->stats, tx_carrier_errors);
581 check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
582 check_counter_rollover(stats, dev->stats, tx_single_collisions);
583 check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
584 check_counter_rollover(stats, dev->stats, tx_excessive_collision);
585 check_counter_rollover(stats, dev->stats, tx_late_collisions);
586 check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
587 check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
588 check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
589 check_counter_rollover(stats, dev->stats, tx_unicast_frames);
590 check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
591 check_counter_rollover(stats, dev->stats, tx_multicast_frames);
592 check_counter_rollover(stats, dev->stats, tx_pause_frames);
593 check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
594 check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
595 check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
596 check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
597 check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
598 check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
599 check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
600 check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
601 check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
603 memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
606 static void lan78xx_update_stats(struct lan78xx_net *dev)
608 u32 *p, *count, *max;
611 struct lan78xx_statstage lan78xx_stats;
613 if (usb_autopm_get_interface(dev->intf) < 0)
616 p = (u32 *)&lan78xx_stats;
617 count = (u32 *)&dev->stats.rollover_count;
618 max = (u32 *)&dev->stats.rollover_max;
619 data = (u64 *)&dev->stats.curr_stat;
621 mutex_lock(&dev->stats.access_lock);
623 if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
624 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
626 for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
627 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
629 mutex_unlock(&dev->stats.access_lock);
631 usb_autopm_put_interface(dev->intf);
634 /* Loop until the read is completed with timeout called with phy_mutex held */
635 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
637 unsigned long start_time = jiffies;
642 ret = lan78xx_read_reg(dev, MII_ACC, &val);
643 if (unlikely(ret < 0))
646 if (!(val & MII_ACC_MII_BUSY_))
648 } while (!time_after(jiffies, start_time + HZ));
653 static inline u32 mii_access(int id, int index, int read)
657 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
658 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
660 ret |= MII_ACC_MII_READ_;
662 ret |= MII_ACC_MII_WRITE_;
663 ret |= MII_ACC_MII_BUSY_;
668 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
670 unsigned long start_time = jiffies;
675 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
676 if (unlikely(ret < 0))
679 if (!(val & E2P_CMD_EPC_BUSY_) ||
680 (val & E2P_CMD_EPC_TIMEOUT_))
682 usleep_range(40, 100);
683 } while (!time_after(jiffies, start_time + HZ));
685 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
686 netdev_warn(dev->net, "EEPROM read operation timeout");
693 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
695 unsigned long start_time = jiffies;
700 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
701 if (unlikely(ret < 0))
704 if (!(val & E2P_CMD_EPC_BUSY_))
707 usleep_range(40, 100);
708 } while (!time_after(jiffies, start_time + HZ));
710 netdev_warn(dev->net, "EEPROM is busy");
714 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
715 u32 length, u8 *data)
722 /* depends on chip, some EEPROM pins are muxed with LED function.
723 * disable & restore LED function to access EEPROM.
725 ret = lan78xx_read_reg(dev, HW_CFG, &val);
727 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
728 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
729 ret = lan78xx_write_reg(dev, HW_CFG, val);
732 retval = lan78xx_eeprom_confirm_not_busy(dev);
736 for (i = 0; i < length; i++) {
737 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
738 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
739 ret = lan78xx_write_reg(dev, E2P_CMD, val);
740 if (unlikely(ret < 0)) {
745 retval = lan78xx_wait_eeprom(dev);
749 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
750 if (unlikely(ret < 0)) {
755 data[i] = val & 0xFF;
761 if (dev->chipid == ID_REV_CHIP_ID_7800_)
762 ret = lan78xx_write_reg(dev, HW_CFG, saved);
767 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
768 u32 length, u8 *data)
773 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
774 if ((ret == 0) && (sig == EEPROM_INDICATOR))
775 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
782 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
783 u32 length, u8 *data)
790 /* depends on chip, some EEPROM pins are muxed with LED function.
791 * disable & restore LED function to access EEPROM.
793 ret = lan78xx_read_reg(dev, HW_CFG, &val);
795 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
796 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
797 ret = lan78xx_write_reg(dev, HW_CFG, val);
800 retval = lan78xx_eeprom_confirm_not_busy(dev);
804 /* Issue write/erase enable command */
805 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
806 ret = lan78xx_write_reg(dev, E2P_CMD, val);
807 if (unlikely(ret < 0)) {
812 retval = lan78xx_wait_eeprom(dev);
816 for (i = 0; i < length; i++) {
817 /* Fill data register */
819 ret = lan78xx_write_reg(dev, E2P_DATA, val);
825 /* Send "write" command */
826 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
827 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
828 ret = lan78xx_write_reg(dev, E2P_CMD, val);
834 retval = lan78xx_wait_eeprom(dev);
843 if (dev->chipid == ID_REV_CHIP_ID_7800_)
844 ret = lan78xx_write_reg(dev, HW_CFG, saved);
849 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
850 u32 length, u8 *data)
855 unsigned long timeout;
857 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
859 if (buf & OTP_PWR_DN_PWRDN_N_) {
860 /* clear it and wait to be cleared */
861 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
863 timeout = jiffies + HZ;
866 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
867 if (time_after(jiffies, timeout)) {
868 netdev_warn(dev->net,
869 "timeout on OTP_PWR_DN");
872 } while (buf & OTP_PWR_DN_PWRDN_N_);
875 for (i = 0; i < length; i++) {
876 ret = lan78xx_write_reg(dev, OTP_ADDR1,
877 ((offset + i) >> 8) & OTP_ADDR1_15_11);
878 ret = lan78xx_write_reg(dev, OTP_ADDR2,
879 ((offset + i) & OTP_ADDR2_10_3));
881 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
882 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
884 timeout = jiffies + HZ;
887 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
888 if (time_after(jiffies, timeout)) {
889 netdev_warn(dev->net,
890 "timeout on OTP_STATUS");
893 } while (buf & OTP_STATUS_BUSY_);
895 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
897 data[i] = (u8)(buf & 0xFF);
903 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
904 u32 length, u8 *data)
909 unsigned long timeout;
911 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
913 if (buf & OTP_PWR_DN_PWRDN_N_) {
914 /* clear it and wait to be cleared */
915 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
917 timeout = jiffies + HZ;
920 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
921 if (time_after(jiffies, timeout)) {
922 netdev_warn(dev->net,
923 "timeout on OTP_PWR_DN completion");
926 } while (buf & OTP_PWR_DN_PWRDN_N_);
929 /* set to BYTE program mode */
930 ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
932 for (i = 0; i < length; i++) {
933 ret = lan78xx_write_reg(dev, OTP_ADDR1,
934 ((offset + i) >> 8) & OTP_ADDR1_15_11);
935 ret = lan78xx_write_reg(dev, OTP_ADDR2,
936 ((offset + i) & OTP_ADDR2_10_3));
937 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
938 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
939 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
941 timeout = jiffies + HZ;
944 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
945 if (time_after(jiffies, timeout)) {
946 netdev_warn(dev->net,
947 "Timeout on OTP_STATUS completion");
950 } while (buf & OTP_STATUS_BUSY_);
956 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
957 u32 length, u8 *data)
962 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
965 if (sig == OTP_INDICATOR_1)
967 else if (sig == OTP_INDICATOR_2)
972 ret = lan78xx_read_raw_otp(dev, offset, length, data);
978 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
982 for (i = 0; i < 100; i++) {
985 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
986 if (unlikely(ret < 0))
989 if (dp_sel & DP_SEL_DPRDY_)
992 usleep_range(40, 100);
995 netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
1000 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
1001 u32 addr, u32 length, u32 *buf)
1003 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1007 if (usb_autopm_get_interface(dev->intf) < 0)
1010 mutex_lock(&pdata->dataport_mutex);
1012 ret = lan78xx_dataport_wait_not_busy(dev);
1016 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1018 dp_sel &= ~DP_SEL_RSEL_MASK_;
1019 dp_sel |= ram_select;
1020 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
1022 for (i = 0; i < length; i++) {
1023 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1025 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1027 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1029 ret = lan78xx_dataport_wait_not_busy(dev);
1035 mutex_unlock(&pdata->dataport_mutex);
1036 usb_autopm_put_interface(dev->intf);
1041 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1042 int index, u8 addr[ETH_ALEN])
1046 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1048 temp = addr[2] | (temp << 8);
1049 temp = addr[1] | (temp << 8);
1050 temp = addr[0] | (temp << 8);
1051 pdata->pfilter_table[index][1] = temp;
1053 temp = addr[4] | (temp << 8);
1054 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1055 pdata->pfilter_table[index][0] = temp;
1059 /* returns hash bit number for given MAC address */
1060 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1062 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1065 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1067 struct lan78xx_priv *pdata =
1068 container_of(param, struct lan78xx_priv, set_multicast);
1069 struct lan78xx_net *dev = pdata->dev;
1073 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1076 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1077 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1079 for (i = 1; i < NUM_OF_MAF; i++) {
1080 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1081 ret = lan78xx_write_reg(dev, MAF_LO(i),
1082 pdata->pfilter_table[i][1]);
1083 ret = lan78xx_write_reg(dev, MAF_HI(i),
1084 pdata->pfilter_table[i][0]);
1087 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1090 static void lan78xx_set_multicast(struct net_device *netdev)
1092 struct lan78xx_net *dev = netdev_priv(netdev);
1093 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1094 unsigned long flags;
1097 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1099 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1100 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1102 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1103 pdata->mchash_table[i] = 0;
1104 /* pfilter_table[0] has own HW address */
1105 for (i = 1; i < NUM_OF_MAF; i++) {
1106 pdata->pfilter_table[i][0] =
1107 pdata->pfilter_table[i][1] = 0;
1110 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1112 if (dev->net->flags & IFF_PROMISC) {
1113 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1114 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1116 if (dev->net->flags & IFF_ALLMULTI) {
1117 netif_dbg(dev, drv, dev->net,
1118 "receive all multicast enabled");
1119 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1123 if (netdev_mc_count(dev->net)) {
1124 struct netdev_hw_addr *ha;
1127 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1129 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1132 netdev_for_each_mc_addr(ha, netdev) {
1133 /* set first 32 into Perfect Filter */
1135 lan78xx_set_addr_filter(pdata, i, ha->addr);
1137 u32 bitnum = lan78xx_hash(ha->addr);
1139 pdata->mchash_table[bitnum / 32] |=
1140 (1 << (bitnum % 32));
1141 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1147 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1149 /* defer register writes to a sleepable context */
1150 schedule_work(&pdata->set_multicast);
1153 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1154 u16 lcladv, u16 rmtadv)
1156 u32 flow = 0, fct_flow = 0;
1160 if (dev->fc_autoneg)
1161 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1163 cap = dev->fc_request_control;
1165 if (cap & FLOW_CTRL_TX)
1166 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1168 if (cap & FLOW_CTRL_RX)
1169 flow |= FLOW_CR_RX_FCEN_;
1171 if (dev->udev->speed == USB_SPEED_SUPER)
1173 else if (dev->udev->speed == USB_SPEED_HIGH)
1176 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1177 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1178 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1180 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1182 /* threshold value should be set before enabling flow */
1183 ret = lan78xx_write_reg(dev, FLOW, flow);
1188 static int lan78xx_link_reset(struct lan78xx_net *dev)
1190 struct phy_device *phydev = dev->net->phydev;
1191 struct ethtool_link_ksettings ecmd;
1192 int ladv, radv, ret;
1195 /* clear LAN78xx interrupt status */
1196 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1197 if (unlikely(ret < 0))
1200 phy_read_status(phydev);
1202 if (!phydev->link && dev->link_on) {
1203 dev->link_on = false;
1206 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1207 if (unlikely(ret < 0))
1210 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1211 if (unlikely(ret < 0))
1214 del_timer(&dev->stat_monitor);
1215 } else if (phydev->link && !dev->link_on) {
1216 dev->link_on = true;
1218 phy_ethtool_ksettings_get(phydev, &ecmd);
1220 if (dev->udev->speed == USB_SPEED_SUPER) {
1221 if (ecmd.base.speed == 1000) {
1223 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1224 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1225 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1227 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1228 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1229 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1231 /* enable U1 & U2 */
1232 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1233 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1234 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1235 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1239 ladv = phy_read(phydev, MII_ADVERTISE);
1243 radv = phy_read(phydev, MII_LPA);
1247 netif_dbg(dev, link, dev->net,
1248 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1249 ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1251 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1254 if (!timer_pending(&dev->stat_monitor)) {
1256 mod_timer(&dev->stat_monitor,
1257 jiffies + STAT_UPDATE_TIMER);
1260 tasklet_schedule(&dev->bh);
1266 /* some work can't be done in tasklets, so we use keventd
1268 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1269 * but tasklet_schedule() doesn't. hope the failure is rare.
1271 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1273 set_bit(work, &dev->flags);
1274 if (!schedule_delayed_work(&dev->wq, 0))
1275 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1278 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1282 if (urb->actual_length != 4) {
1283 netdev_warn(dev->net,
1284 "unexpected urb length %d", urb->actual_length);
1288 memcpy(&intdata, urb->transfer_buffer, 4);
1289 le32_to_cpus(&intdata);
1291 if (intdata & INT_ENP_PHY_INT) {
1292 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1293 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1295 if (dev->domain_data.phyirq > 0) {
1296 local_irq_disable();
1297 generic_handle_irq(dev->domain_data.phyirq);
1301 netdev_warn(dev->net,
1302 "unexpected interrupt: 0x%08x\n", intdata);
1305 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1307 return MAX_EEPROM_SIZE;
1310 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1311 struct ethtool_eeprom *ee, u8 *data)
1313 struct lan78xx_net *dev = netdev_priv(netdev);
1316 ret = usb_autopm_get_interface(dev->intf);
1320 ee->magic = LAN78XX_EEPROM_MAGIC;
1322 ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1324 usb_autopm_put_interface(dev->intf);
1329 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1330 struct ethtool_eeprom *ee, u8 *data)
1332 struct lan78xx_net *dev = netdev_priv(netdev);
1335 ret = usb_autopm_get_interface(dev->intf);
1339 /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1340 * to load data from EEPROM
1342 if (ee->magic == LAN78XX_EEPROM_MAGIC)
1343 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1344 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1345 (ee->offset == 0) &&
1347 (data[0] == OTP_INDICATOR_1))
1348 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1350 usb_autopm_put_interface(dev->intf);
1355 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1358 if (stringset == ETH_SS_STATS)
1359 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1362 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1364 if (sset == ETH_SS_STATS)
1365 return ARRAY_SIZE(lan78xx_gstrings);
1370 static void lan78xx_get_stats(struct net_device *netdev,
1371 struct ethtool_stats *stats, u64 *data)
1373 struct lan78xx_net *dev = netdev_priv(netdev);
1375 lan78xx_update_stats(dev);
1377 mutex_lock(&dev->stats.access_lock);
1378 memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1379 mutex_unlock(&dev->stats.access_lock);
1382 static void lan78xx_get_wol(struct net_device *netdev,
1383 struct ethtool_wolinfo *wol)
1385 struct lan78xx_net *dev = netdev_priv(netdev);
1388 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1390 if (usb_autopm_get_interface(dev->intf) < 0)
1393 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1394 if (unlikely(ret < 0)) {
1398 if (buf & USB_CFG_RMT_WKP_) {
1399 wol->supported = WAKE_ALL;
1400 wol->wolopts = pdata->wol;
1407 usb_autopm_put_interface(dev->intf);
1410 static int lan78xx_set_wol(struct net_device *netdev,
1411 struct ethtool_wolinfo *wol)
1413 struct lan78xx_net *dev = netdev_priv(netdev);
1414 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1417 ret = usb_autopm_get_interface(dev->intf);
1421 if (wol->wolopts & ~WAKE_ALL)
1424 pdata->wol = wol->wolopts;
1426 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1428 phy_ethtool_set_wol(netdev->phydev, wol);
1430 usb_autopm_put_interface(dev->intf);
1435 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1437 struct lan78xx_net *dev = netdev_priv(net);
1438 struct phy_device *phydev = net->phydev;
1442 ret = usb_autopm_get_interface(dev->intf);
1446 ret = phy_ethtool_get_eee(phydev, edata);
1450 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1451 if (buf & MAC_CR_EEE_EN_) {
1452 edata->eee_enabled = true;
1453 edata->eee_active = !!(edata->advertised &
1454 edata->lp_advertised);
1455 edata->tx_lpi_enabled = true;
1456 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1457 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1458 edata->tx_lpi_timer = buf;
1460 edata->eee_enabled = false;
1461 edata->eee_active = false;
1462 edata->tx_lpi_enabled = false;
1463 edata->tx_lpi_timer = 0;
1468 usb_autopm_put_interface(dev->intf);
1473 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1475 struct lan78xx_net *dev = netdev_priv(net);
1479 ret = usb_autopm_get_interface(dev->intf);
1483 if (edata->eee_enabled) {
1484 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1485 buf |= MAC_CR_EEE_EN_;
1486 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1488 phy_ethtool_set_eee(net->phydev, edata);
1490 buf = (u32)edata->tx_lpi_timer;
1491 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1493 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1494 buf &= ~MAC_CR_EEE_EN_;
1495 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1498 usb_autopm_put_interface(dev->intf);
1503 static u32 lan78xx_get_link(struct net_device *net)
1505 phy_read_status(net->phydev);
1507 return net->phydev->link;
1510 static void lan78xx_get_drvinfo(struct net_device *net,
1511 struct ethtool_drvinfo *info)
1513 struct lan78xx_net *dev = netdev_priv(net);
1515 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1516 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1519 static u32 lan78xx_get_msglevel(struct net_device *net)
1521 struct lan78xx_net *dev = netdev_priv(net);
1523 return dev->msg_enable;
1526 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1528 struct lan78xx_net *dev = netdev_priv(net);
1530 dev->msg_enable = level;
1533 static int lan78xx_get_link_ksettings(struct net_device *net,
1534 struct ethtool_link_ksettings *cmd)
1536 struct lan78xx_net *dev = netdev_priv(net);
1537 struct phy_device *phydev = net->phydev;
1540 ret = usb_autopm_get_interface(dev->intf);
1544 phy_ethtool_ksettings_get(phydev, cmd);
1546 usb_autopm_put_interface(dev->intf);
1551 static int lan78xx_set_link_ksettings(struct net_device *net,
1552 const struct ethtool_link_ksettings *cmd)
1554 struct lan78xx_net *dev = netdev_priv(net);
1555 struct phy_device *phydev = net->phydev;
1559 ret = usb_autopm_get_interface(dev->intf);
1563 /* change speed & duplex */
1564 ret = phy_ethtool_ksettings_set(phydev, cmd);
1566 if (!cmd->base.autoneg) {
1567 /* force link down */
1568 temp = phy_read(phydev, MII_BMCR);
1569 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1571 phy_write(phydev, MII_BMCR, temp);
1574 usb_autopm_put_interface(dev->intf);
1579 static void lan78xx_get_pause(struct net_device *net,
1580 struct ethtool_pauseparam *pause)
1582 struct lan78xx_net *dev = netdev_priv(net);
1583 struct phy_device *phydev = net->phydev;
1584 struct ethtool_link_ksettings ecmd;
1586 phy_ethtool_ksettings_get(phydev, &ecmd);
1588 pause->autoneg = dev->fc_autoneg;
1590 if (dev->fc_request_control & FLOW_CTRL_TX)
1591 pause->tx_pause = 1;
1593 if (dev->fc_request_control & FLOW_CTRL_RX)
1594 pause->rx_pause = 1;
1597 static int lan78xx_set_pause(struct net_device *net,
1598 struct ethtool_pauseparam *pause)
1600 struct lan78xx_net *dev = netdev_priv(net);
1601 struct phy_device *phydev = net->phydev;
1602 struct ethtool_link_ksettings ecmd;
1605 phy_ethtool_ksettings_get(phydev, &ecmd);
1607 if (pause->autoneg && !ecmd.base.autoneg) {
1612 dev->fc_request_control = 0;
1613 if (pause->rx_pause)
1614 dev->fc_request_control |= FLOW_CTRL_RX;
1616 if (pause->tx_pause)
1617 dev->fc_request_control |= FLOW_CTRL_TX;
1619 if (ecmd.base.autoneg) {
1623 ethtool_convert_link_mode_to_legacy_u32(
1624 &advertising, ecmd.link_modes.advertising);
1626 advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1627 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1628 advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1630 ethtool_convert_legacy_u32_to_link_mode(
1631 ecmd.link_modes.advertising, advertising);
1633 phy_ethtool_ksettings_set(phydev, &ecmd);
1636 dev->fc_autoneg = pause->autoneg;
1643 static int lan78xx_get_regs_len(struct net_device *netdev)
1645 if (!netdev->phydev)
1646 return (sizeof(lan78xx_regs));
1648 return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1652 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1657 struct lan78xx_net *dev = netdev_priv(netdev);
1659 /* Read Device/MAC registers */
1660 for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
1661 lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1663 if (!netdev->phydev)
1666 /* Read PHY registers */
1667 for (j = 0; j < 32; i++, j++)
1668 data[i] = phy_read(netdev->phydev, j);
1671 static const struct ethtool_ops lan78xx_ethtool_ops = {
1672 .get_link = lan78xx_get_link,
1673 .nway_reset = phy_ethtool_nway_reset,
1674 .get_drvinfo = lan78xx_get_drvinfo,
1675 .get_msglevel = lan78xx_get_msglevel,
1676 .set_msglevel = lan78xx_set_msglevel,
1677 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1678 .get_eeprom = lan78xx_ethtool_get_eeprom,
1679 .set_eeprom = lan78xx_ethtool_set_eeprom,
1680 .get_ethtool_stats = lan78xx_get_stats,
1681 .get_sset_count = lan78xx_get_sset_count,
1682 .get_strings = lan78xx_get_strings,
1683 .get_wol = lan78xx_get_wol,
1684 .set_wol = lan78xx_set_wol,
1685 .get_eee = lan78xx_get_eee,
1686 .set_eee = lan78xx_set_eee,
1687 .get_pauseparam = lan78xx_get_pause,
1688 .set_pauseparam = lan78xx_set_pause,
1689 .get_link_ksettings = lan78xx_get_link_ksettings,
1690 .set_link_ksettings = lan78xx_set_link_ksettings,
1691 .get_regs_len = lan78xx_get_regs_len,
1692 .get_regs = lan78xx_get_regs,
1695 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1697 if (!netif_running(netdev))
1700 return phy_mii_ioctl(netdev->phydev, rq, cmd);
1703 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1705 u32 addr_lo, addr_hi;
1709 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1710 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1712 addr[0] = addr_lo & 0xFF;
1713 addr[1] = (addr_lo >> 8) & 0xFF;
1714 addr[2] = (addr_lo >> 16) & 0xFF;
1715 addr[3] = (addr_lo >> 24) & 0xFF;
1716 addr[4] = addr_hi & 0xFF;
1717 addr[5] = (addr_hi >> 8) & 0xFF;
1719 if (!is_valid_ether_addr(addr)) {
1720 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1721 /* valid address present in Device Tree */
1722 netif_dbg(dev, ifup, dev->net,
1723 "MAC address read from Device Tree");
1724 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1725 ETH_ALEN, addr) == 0) ||
1726 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1727 ETH_ALEN, addr) == 0)) &&
1728 is_valid_ether_addr(addr)) {
1729 /* eeprom values are valid so use them */
1730 netif_dbg(dev, ifup, dev->net,
1731 "MAC address read from EEPROM");
1733 /* generate random MAC */
1734 eth_random_addr(addr);
1735 netif_dbg(dev, ifup, dev->net,
1736 "MAC address set to random addr");
1739 addr_lo = addr[0] | (addr[1] << 8) |
1740 (addr[2] << 16) | (addr[3] << 24);
1741 addr_hi = addr[4] | (addr[5] << 8);
1743 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1744 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1747 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1748 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1750 ether_addr_copy(dev->net->dev_addr, addr);
1753 /* MDIO read and write wrappers for phylib */
1754 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1756 struct lan78xx_net *dev = bus->priv;
1760 ret = usb_autopm_get_interface(dev->intf);
1764 mutex_lock(&dev->phy_mutex);
1766 /* confirm MII not busy */
1767 ret = lan78xx_phy_wait_not_busy(dev);
1771 /* set the address, index & direction (read from PHY) */
1772 addr = mii_access(phy_id, idx, MII_READ);
1773 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1775 ret = lan78xx_phy_wait_not_busy(dev);
1779 ret = lan78xx_read_reg(dev, MII_DATA, &val);
1781 ret = (int)(val & 0xFFFF);
1784 mutex_unlock(&dev->phy_mutex);
1785 usb_autopm_put_interface(dev->intf);
1790 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1793 struct lan78xx_net *dev = bus->priv;
1797 ret = usb_autopm_get_interface(dev->intf);
1801 mutex_lock(&dev->phy_mutex);
1803 /* confirm MII not busy */
1804 ret = lan78xx_phy_wait_not_busy(dev);
1809 ret = lan78xx_write_reg(dev, MII_DATA, val);
1811 /* set the address, index & direction (write to PHY) */
1812 addr = mii_access(phy_id, idx, MII_WRITE);
1813 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1815 ret = lan78xx_phy_wait_not_busy(dev);
1820 mutex_unlock(&dev->phy_mutex);
1821 usb_autopm_put_interface(dev->intf);
1825 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1827 struct device_node *node;
1830 dev->mdiobus = mdiobus_alloc();
1831 if (!dev->mdiobus) {
1832 netdev_err(dev->net, "can't allocate MDIO bus\n");
1836 dev->mdiobus->priv = (void *)dev;
1837 dev->mdiobus->read = lan78xx_mdiobus_read;
1838 dev->mdiobus->write = lan78xx_mdiobus_write;
1839 dev->mdiobus->name = "lan78xx-mdiobus";
1840 dev->mdiobus->parent = &dev->udev->dev;
1842 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1843 dev->udev->bus->busnum, dev->udev->devnum);
1845 switch (dev->chipid) {
1846 case ID_REV_CHIP_ID_7800_:
1847 case ID_REV_CHIP_ID_7850_:
1848 /* set to internal PHY id */
1849 dev->mdiobus->phy_mask = ~(1 << 1);
1851 case ID_REV_CHIP_ID_7801_:
1852 /* scan thru PHYAD[2..0] */
1853 dev->mdiobus->phy_mask = ~(0xFF);
1857 node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
1858 ret = of_mdiobus_register(dev->mdiobus, node);
1862 netdev_err(dev->net, "can't register MDIO bus\n");
1866 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1869 mdiobus_free(dev->mdiobus);
1873 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1875 mdiobus_unregister(dev->mdiobus);
1876 mdiobus_free(dev->mdiobus);
1879 static void lan78xx_link_status_change(struct net_device *net)
1881 struct phy_device *phydev = net->phydev;
1884 /* At forced 100 F/H mode, chip may fail to set mode correctly
1885 * when cable is switched between long(~50+m) and short one.
1886 * As workaround, set to 10 before setting to 100
1887 * at forced 100 F/H mode.
1889 if (!phydev->autoneg && (phydev->speed == 100)) {
1890 /* disable phy interrupt */
1891 temp = phy_read(phydev, LAN88XX_INT_MASK);
1892 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1893 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1895 temp = phy_read(phydev, MII_BMCR);
1896 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1897 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1898 temp |= BMCR_SPEED100;
1899 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1901 /* clear pending interrupt generated while workaround */
1902 temp = phy_read(phydev, LAN88XX_INT_STS);
1904 /* enable phy interrupt back */
1905 temp = phy_read(phydev, LAN88XX_INT_MASK);
1906 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1907 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1911 static int irq_map(struct irq_domain *d, unsigned int irq,
1912 irq_hw_number_t hwirq)
1914 struct irq_domain_data *data = d->host_data;
1916 irq_set_chip_data(irq, data);
1917 irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1918 irq_set_noprobe(irq);
1923 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1925 irq_set_chip_and_handler(irq, NULL, NULL);
1926 irq_set_chip_data(irq, NULL);
1929 static const struct irq_domain_ops chip_domain_ops = {
1934 static void lan78xx_irq_mask(struct irq_data *irqd)
1936 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1938 data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1941 static void lan78xx_irq_unmask(struct irq_data *irqd)
1943 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1945 data->irqenable |= BIT(irqd_to_hwirq(irqd));
1948 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1950 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1952 mutex_lock(&data->irq_lock);
1955 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1957 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1958 struct lan78xx_net *dev =
1959 container_of(data, struct lan78xx_net, domain_data);
1963 /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1964 * are only two callbacks executed in non-atomic contex.
1966 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1967 if (buf != data->irqenable)
1968 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1970 mutex_unlock(&data->irq_lock);
1973 static struct irq_chip lan78xx_irqchip = {
1974 .name = "lan78xx-irqs",
1975 .irq_mask = lan78xx_irq_mask,
1976 .irq_unmask = lan78xx_irq_unmask,
1977 .irq_bus_lock = lan78xx_irq_bus_lock,
1978 .irq_bus_sync_unlock = lan78xx_irq_bus_sync_unlock,
1981 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1983 struct device_node *of_node;
1984 struct irq_domain *irqdomain;
1985 unsigned int irqmap = 0;
1989 of_node = dev->udev->dev.parent->of_node;
1991 mutex_init(&dev->domain_data.irq_lock);
1993 lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1994 dev->domain_data.irqenable = buf;
1996 dev->domain_data.irqchip = &lan78xx_irqchip;
1997 dev->domain_data.irq_handler = handle_simple_irq;
1999 irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
2000 &chip_domain_ops, &dev->domain_data);
2002 /* create mapping for PHY interrupt */
2003 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
2005 irq_domain_remove(irqdomain);
2014 dev->domain_data.irqdomain = irqdomain;
2015 dev->domain_data.phyirq = irqmap;
2020 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2022 if (dev->domain_data.phyirq > 0) {
2023 irq_dispose_mapping(dev->domain_data.phyirq);
2025 if (dev->domain_data.irqdomain)
2026 irq_domain_remove(dev->domain_data.irqdomain);
2028 dev->domain_data.phyirq = 0;
2029 dev->domain_data.irqdomain = NULL;
2032 static int lan8835_fixup(struct phy_device *phydev)
2036 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2038 /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2039 buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2042 phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2044 /* RGMII MAC TXC Delay Enable */
2045 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2046 MAC_RGMII_ID_TXC_DELAY_EN_);
2048 /* RGMII TX DLL Tune Adjust */
2049 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2051 dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2056 static int ksz9031rnx_fixup(struct phy_device *phydev)
2058 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2060 /* Micrel9301RNX PHY configuration */
2061 /* RGMII Control Signal Pad Skew */
2062 phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2063 /* RGMII RX Data Pad Skew */
2064 phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2065 /* RGMII RX Clock Pad Skew */
2066 phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2068 dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2073 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2077 struct fixed_phy_status fphy_status = {
2079 .speed = SPEED_1000,
2080 .duplex = DUPLEX_FULL,
2082 struct phy_device *phydev;
2084 phydev = phy_find_first(dev->mdiobus);
2086 netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2087 phydev = fixed_phy_register(PHY_POLL, &fphy_status, -1,
2089 if (IS_ERR(phydev)) {
2090 netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2093 netdev_dbg(dev->net, "Registered FIXED PHY\n");
2094 dev->interface = PHY_INTERFACE_MODE_RGMII;
2095 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2096 MAC_RGMII_ID_TXC_DELAY_EN_);
2097 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2098 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2099 buf |= HW_CFG_CLK125_EN_;
2100 buf |= HW_CFG_REFCLK25_EN_;
2101 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2104 netdev_err(dev->net, "no PHY driver found\n");
2107 dev->interface = PHY_INTERFACE_MODE_RGMII;
2108 /* external PHY fixup for KSZ9031RNX */
2109 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2112 netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2115 /* external PHY fixup for LAN8835 */
2116 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2119 netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2122 /* add more external PHY fixup here if needed */
2124 phydev->is_internal = false;
2129 static int lan78xx_phy_init(struct lan78xx_net *dev)
2133 struct phy_device *phydev;
2135 switch (dev->chipid) {
2136 case ID_REV_CHIP_ID_7801_:
2137 phydev = lan7801_phy_init(dev);
2139 netdev_err(dev->net, "lan7801: PHY Init Failed");
2144 case ID_REV_CHIP_ID_7800_:
2145 case ID_REV_CHIP_ID_7850_:
2146 phydev = phy_find_first(dev->mdiobus);
2148 netdev_err(dev->net, "no PHY found\n");
2151 phydev->is_internal = true;
2152 dev->interface = PHY_INTERFACE_MODE_GMII;
2156 netdev_err(dev->net, "Unknown CHIP ID found\n");
2160 /* if phyirq is not set, use polling mode in phylib */
2161 if (dev->domain_data.phyirq > 0)
2162 phydev->irq = dev->domain_data.phyirq;
2165 netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2167 /* set to AUTOMDIX */
2168 phydev->mdix = ETH_TP_MDI_AUTO;
2170 ret = phy_connect_direct(dev->net, phydev,
2171 lan78xx_link_status_change,
2174 netdev_err(dev->net, "can't attach PHY to %s\n",
2176 if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2177 if (phy_is_pseudo_fixed_link(phydev)) {
2178 fixed_phy_unregister(phydev);
2180 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2182 phy_unregister_fixup_for_uid(PHY_LAN8835,
2189 /* MAC doesn't support 1000T Half */
2190 phydev->supported &= ~SUPPORTED_1000baseT_Half;
2192 /* support both flow controls */
2193 dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2194 phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
2195 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2196 phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
2198 if (of_property_read_bool(phydev->mdio.dev.of_node,
2199 "microchip,eee-enabled")) {
2200 struct ethtool_eee edata;
2201 memset(&edata, 0, sizeof(edata));
2202 edata.cmd = ETHTOOL_SEEE;
2203 edata.advertised = ADVERTISED_1000baseT_Full |
2204 ADVERTISED_100baseT_Full;
2205 edata.eee_enabled = true;
2206 edata.tx_lpi_enabled = true;
2207 if (of_property_read_u32(dev->udev->dev.of_node,
2208 "microchip,tx-lpi-timer",
2209 &edata.tx_lpi_timer))
2210 edata.tx_lpi_timer = 600; /* non-aggressive */
2211 (void)lan78xx_set_eee(dev->net, &edata);
2214 if (phydev->mdio.dev.of_node) {
2218 len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2219 "microchip,led-modes",
2222 /* Ensure the appropriate LEDs are enabled */
2223 lan78xx_read_reg(dev, HW_CFG, ®);
2224 reg &= ~(HW_CFG_LED0_EN_ |
2228 reg |= (len > 0) * HW_CFG_LED0_EN_ |
2229 (len > 1) * HW_CFG_LED1_EN_ |
2230 (len > 2) * HW_CFG_LED2_EN_ |
2231 (len > 3) * HW_CFG_LED3_EN_;
2232 lan78xx_write_reg(dev, HW_CFG, reg);
2236 genphy_config_aneg(phydev);
2238 dev->fc_autoneg = phydev->autoneg;
2243 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2249 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2251 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2254 buf &= ~MAC_RX_RXEN_;
2255 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2258 /* add 4 to size for FCS */
2259 buf &= ~MAC_RX_MAX_SIZE_MASK_;
2260 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2262 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2265 buf |= MAC_RX_RXEN_;
2266 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2272 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2274 struct sk_buff *skb;
2275 unsigned long flags;
2278 spin_lock_irqsave(&q->lock, flags);
2279 while (!skb_queue_empty(q)) {
2280 struct skb_data *entry;
2284 skb_queue_walk(q, skb) {
2285 entry = (struct skb_data *)skb->cb;
2286 if (entry->state != unlink_start)
2291 entry->state = unlink_start;
2294 /* Get reference count of the URB to avoid it to be
2295 * freed during usb_unlink_urb, which may trigger
2296 * use-after-free problem inside usb_unlink_urb since
2297 * usb_unlink_urb is always racing with .complete
2298 * handler(include defer_bh).
2301 spin_unlock_irqrestore(&q->lock, flags);
2302 /* during some PM-driven resume scenarios,
2303 * these (async) unlinks complete immediately
2305 ret = usb_unlink_urb(urb);
2306 if (ret != -EINPROGRESS && ret != 0)
2307 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2311 spin_lock_irqsave(&q->lock, flags);
2313 spin_unlock_irqrestore(&q->lock, flags);
2317 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2319 struct lan78xx_net *dev = netdev_priv(netdev);
2320 int ll_mtu = new_mtu + netdev->hard_header_len;
2321 int old_hard_mtu = dev->hard_mtu;
2322 int old_rx_urb_size = dev->rx_urb_size;
2325 /* no second zero-length packet read wanted after mtu-sized packets */
2326 if ((ll_mtu % dev->maxpacket) == 0)
2329 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
2331 netdev->mtu = new_mtu;
2333 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2334 if (dev->rx_urb_size == old_hard_mtu) {
2335 dev->rx_urb_size = dev->hard_mtu;
2336 if (dev->rx_urb_size > old_rx_urb_size) {
2337 if (netif_running(dev->net)) {
2338 unlink_urbs(dev, &dev->rxq);
2339 tasklet_schedule(&dev->bh);
2347 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2349 struct lan78xx_net *dev = netdev_priv(netdev);
2350 struct sockaddr *addr = p;
2351 u32 addr_lo, addr_hi;
2354 if (netif_running(netdev))
2357 if (!is_valid_ether_addr(addr->sa_data))
2358 return -EADDRNOTAVAIL;
2360 ether_addr_copy(netdev->dev_addr, addr->sa_data);
2362 addr_lo = netdev->dev_addr[0] |
2363 netdev->dev_addr[1] << 8 |
2364 netdev->dev_addr[2] << 16 |
2365 netdev->dev_addr[3] << 24;
2366 addr_hi = netdev->dev_addr[4] |
2367 netdev->dev_addr[5] << 8;
2369 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2370 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2372 /* Added to support MAC address changes */
2373 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2374 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2379 /* Enable or disable Rx checksum offload engine */
2380 static int lan78xx_set_features(struct net_device *netdev,
2381 netdev_features_t features)
2383 struct lan78xx_net *dev = netdev_priv(netdev);
2384 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2385 unsigned long flags;
2388 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2390 if (features & NETIF_F_RXCSUM) {
2391 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2392 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2394 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2395 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2398 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2399 pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2401 pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2403 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2404 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2406 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2408 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2410 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2415 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2417 struct lan78xx_priv *pdata =
2418 container_of(param, struct lan78xx_priv, set_vlan);
2419 struct lan78xx_net *dev = pdata->dev;
2421 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2422 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2425 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2426 __be16 proto, u16 vid)
2428 struct lan78xx_net *dev = netdev_priv(netdev);
2429 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2431 u16 vid_dword_index;
2433 vid_dword_index = (vid >> 5) & 0x7F;
2434 vid_bit_index = vid & 0x1F;
2436 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2438 /* defer register writes to a sleepable context */
2439 schedule_work(&pdata->set_vlan);
2444 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2445 __be16 proto, u16 vid)
2447 struct lan78xx_net *dev = netdev_priv(netdev);
2448 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2450 u16 vid_dword_index;
2452 vid_dword_index = (vid >> 5) & 0x7F;
2453 vid_bit_index = vid & 0x1F;
2455 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2457 /* defer register writes to a sleepable context */
2458 schedule_work(&pdata->set_vlan);
2463 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2467 u32 regs[6] = { 0 };
2469 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2470 if (buf & USB_CFG1_LTM_ENABLE_) {
2472 /* Get values from EEPROM first */
2473 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2474 if (temp[0] == 24) {
2475 ret = lan78xx_read_raw_eeprom(dev,
2482 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2483 if (temp[0] == 24) {
2484 ret = lan78xx_read_raw_otp(dev,
2494 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2495 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2496 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2497 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2498 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2499 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2502 static int lan78xx_reset(struct lan78xx_net *dev)
2504 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2507 unsigned long timeout;
2512 has_eeprom = !lan78xx_read_eeprom(dev, 0, 0, NULL);
2513 has_otp = !lan78xx_read_otp(dev, 0, 0, NULL);
2515 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2516 buf |= HW_CFG_LRST_;
2517 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2519 timeout = jiffies + HZ;
2522 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2523 if (time_after(jiffies, timeout)) {
2524 netdev_warn(dev->net,
2525 "timeout on completion of LiteReset");
2528 } while (buf & HW_CFG_LRST_);
2530 lan78xx_init_mac_address(dev);
2532 /* save DEVID for later usage */
2533 ret = lan78xx_read_reg(dev, ID_REV, &buf);
2534 dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2535 dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2537 /* Respond to the IN token with a NAK */
2538 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2539 buf |= USB_CFG_BIR_;
2540 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2543 lan78xx_init_ltm(dev);
2545 if (dev->udev->speed == USB_SPEED_SUPER) {
2546 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2547 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2550 } else if (dev->udev->speed == USB_SPEED_HIGH) {
2551 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2552 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2553 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2554 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2556 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2557 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2562 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2563 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2565 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2567 /* If no valid EEPROM and no valid OTP, enable the LEDs by default */
2568 if (!has_eeprom && !has_otp)
2569 buf |= HW_CFG_LED0_EN_ | HW_CFG_LED1_EN_;
2570 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2572 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2573 buf |= USB_CFG_BCE_;
2574 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2576 /* set FIFO sizes */
2577 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2578 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2580 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2581 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2583 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2584 ret = lan78xx_write_reg(dev, FLOW, 0);
2585 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2587 /* Don't need rfe_ctl_lock during initialisation */
2588 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2589 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2590 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2592 /* Enable or disable checksum offload engines */
2593 lan78xx_set_features(dev->net, dev->net->features);
2595 lan78xx_set_multicast(dev->net);
2598 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2599 buf |= PMT_CTL_PHY_RST_;
2600 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2602 timeout = jiffies + HZ;
2605 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2606 if (time_after(jiffies, timeout)) {
2607 netdev_warn(dev->net, "timeout waiting for PHY Reset");
2610 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2612 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2613 /* LAN7801 only has RGMII mode */
2614 if (dev->chipid == ID_REV_CHIP_ID_7801_)
2615 buf &= ~MAC_CR_GMII_EN_;
2617 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2618 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2619 if (!ret && sig != EEPROM_INDICATOR) {
2620 /* Implies there is no external eeprom. Set mac speed */
2621 netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2622 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2625 /* If no valid EEPROM and no valid OTP, enable AUTO negotiation */
2626 if (!has_eeprom && !has_otp)
2627 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2628 ret = lan78xx_write_reg(dev, MAC_CR, buf);
2630 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2631 buf |= MAC_TX_TXEN_;
2632 ret = lan78xx_write_reg(dev, MAC_TX, buf);
2634 ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2635 buf |= FCT_TX_CTL_EN_;
2636 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2638 ret = lan78xx_set_rx_max_frame_length(dev,
2639 dev->net->mtu + VLAN_ETH_HLEN);
2641 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2642 buf |= MAC_RX_RXEN_;
2643 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2645 ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2646 buf |= FCT_RX_CTL_EN_;
2647 ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2652 static void lan78xx_init_stats(struct lan78xx_net *dev)
2657 /* initialize for stats update
2658 * some counters are 20bits and some are 32bits
2660 p = (u32 *)&dev->stats.rollover_max;
2661 for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2664 dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2665 dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2666 dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2667 dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2668 dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2669 dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2670 dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2671 dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2672 dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2673 dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2675 set_bit(EVENT_STAT_UPDATE, &dev->flags);
2678 static int lan78xx_open(struct net_device *net)
2680 struct lan78xx_net *dev = netdev_priv(net);
2683 ret = usb_autopm_get_interface(dev->intf);
2687 phy_start(net->phydev);
2689 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2691 /* for Link Check */
2692 if (dev->urb_intr) {
2693 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2695 netif_err(dev, ifup, dev->net,
2696 "intr submit %d\n", ret);
2701 lan78xx_init_stats(dev);
2703 set_bit(EVENT_DEV_OPEN, &dev->flags);
2705 netif_start_queue(net);
2707 dev->link_on = false;
2709 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2711 usb_autopm_put_interface(dev->intf);
2717 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2719 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2720 DECLARE_WAITQUEUE(wait, current);
2723 /* ensure there are no more active urbs */
2724 add_wait_queue(&unlink_wakeup, &wait);
2725 set_current_state(TASK_UNINTERRUPTIBLE);
2726 dev->wait = &unlink_wakeup;
2727 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2729 /* maybe wait for deletions to finish. */
2730 while (!skb_queue_empty(&dev->rxq) &&
2731 !skb_queue_empty(&dev->txq) &&
2732 !skb_queue_empty(&dev->done)) {
2733 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2734 set_current_state(TASK_UNINTERRUPTIBLE);
2735 netif_dbg(dev, ifdown, dev->net,
2736 "waited for %d urb completions\n", temp);
2738 set_current_state(TASK_RUNNING);
2740 remove_wait_queue(&unlink_wakeup, &wait);
2743 static int lan78xx_stop(struct net_device *net)
2745 struct lan78xx_net *dev = netdev_priv(net);
2747 if (timer_pending(&dev->stat_monitor))
2748 del_timer_sync(&dev->stat_monitor);
2751 phy_stop(net->phydev);
2753 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2754 netif_stop_queue(net);
2756 netif_info(dev, ifdown, dev->net,
2757 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2758 net->stats.rx_packets, net->stats.tx_packets,
2759 net->stats.rx_errors, net->stats.tx_errors);
2761 lan78xx_terminate_urbs(dev);
2763 usb_kill_urb(dev->urb_intr);
2765 skb_queue_purge(&dev->rxq_pause);
2767 /* deferred work (task, timer, softirq) must also stop.
2768 * can't flush_scheduled_work() until we drop rtnl (later),
2769 * else workers could deadlock; so make workers a NOP.
2772 cancel_delayed_work_sync(&dev->wq);
2773 tasklet_kill(&dev->bh);
2775 usb_autopm_put_interface(dev->intf);
2780 static int lan78xx_linearize(struct sk_buff *skb)
2782 return skb_linearize(skb);
2785 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2786 struct sk_buff *skb, gfp_t flags)
2788 u32 tx_cmd_a, tx_cmd_b;
2790 if (skb_cow_head(skb, TX_OVERHEAD)) {
2791 dev_kfree_skb_any(skb);
2795 if (lan78xx_linearize(skb) < 0)
2798 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2800 if (skb->ip_summed == CHECKSUM_PARTIAL)
2801 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2804 if (skb_is_gso(skb)) {
2805 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2807 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2809 tx_cmd_a |= TX_CMD_A_LSO_;
2812 if (skb_vlan_tag_present(skb)) {
2813 tx_cmd_a |= TX_CMD_A_IVTG_;
2814 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2818 cpu_to_le32s(&tx_cmd_b);
2819 memcpy(skb->data, &tx_cmd_b, 4);
2822 cpu_to_le32s(&tx_cmd_a);
2823 memcpy(skb->data, &tx_cmd_a, 4);
2828 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2829 struct sk_buff_head *list, enum skb_state state)
2831 unsigned long flags;
2832 enum skb_state old_state;
2833 struct skb_data *entry = (struct skb_data *)skb->cb;
2835 spin_lock_irqsave(&list->lock, flags);
2836 old_state = entry->state;
2837 entry->state = state;
2839 __skb_unlink(skb, list);
2840 spin_unlock(&list->lock);
2841 spin_lock(&dev->done.lock);
2843 __skb_queue_tail(&dev->done, skb);
2844 if (skb_queue_len(&dev->done) == 1)
2845 tasklet_schedule(&dev->bh);
2846 spin_unlock_irqrestore(&dev->done.lock, flags);
2851 static void tx_complete(struct urb *urb)
2853 struct sk_buff *skb = (struct sk_buff *)urb->context;
2854 struct skb_data *entry = (struct skb_data *)skb->cb;
2855 struct lan78xx_net *dev = entry->dev;
2857 if (urb->status == 0) {
2858 dev->net->stats.tx_packets += entry->num_of_packet;
2859 dev->net->stats.tx_bytes += entry->length;
2861 dev->net->stats.tx_errors++;
2863 switch (urb->status) {
2865 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2868 /* software-driven interface shutdown */
2876 netif_stop_queue(dev->net);
2879 netif_dbg(dev, tx_err, dev->net,
2880 "tx err %d\n", entry->urb->status);
2885 usb_autopm_put_interface_async(dev->intf);
2887 defer_bh(dev, skb, &dev->txq, tx_done);
2890 static void lan78xx_queue_skb(struct sk_buff_head *list,
2891 struct sk_buff *newsk, enum skb_state state)
2893 struct skb_data *entry = (struct skb_data *)newsk->cb;
2895 __skb_queue_tail(list, newsk);
2896 entry->state = state;
2900 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2902 struct lan78xx_net *dev = netdev_priv(net);
2903 struct sk_buff *skb2 = NULL;
2906 skb_tx_timestamp(skb);
2907 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2911 skb_queue_tail(&dev->txq_pend, skb2);
2913 /* throttle TX patch at slower than SUPER SPEED USB */
2914 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2915 (skb_queue_len(&dev->txq_pend) > 10))
2916 netif_stop_queue(net);
2918 netif_dbg(dev, tx_err, dev->net,
2919 "lan78xx_tx_prep return NULL\n");
2920 dev->net->stats.tx_errors++;
2921 dev->net->stats.tx_dropped++;
2924 tasklet_schedule(&dev->bh);
2926 return NETDEV_TX_OK;
2930 lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2933 struct usb_host_interface *alt = NULL;
2934 struct usb_host_endpoint *in = NULL, *out = NULL;
2935 struct usb_host_endpoint *status = NULL;
2937 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2943 alt = intf->altsetting + tmp;
2945 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2946 struct usb_host_endpoint *e;
2949 e = alt->endpoint + ep;
2950 switch (e->desc.bmAttributes) {
2951 case USB_ENDPOINT_XFER_INT:
2952 if (!usb_endpoint_dir_in(&e->desc))
2956 case USB_ENDPOINT_XFER_BULK:
2961 if (usb_endpoint_dir_in(&e->desc)) {
2964 else if (intr && !status)
2974 if (!alt || !in || !out)
2977 dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2978 in->desc.bEndpointAddress &
2979 USB_ENDPOINT_NUMBER_MASK);
2980 dev->pipe_out = usb_sndbulkpipe(dev->udev,
2981 out->desc.bEndpointAddress &
2982 USB_ENDPOINT_NUMBER_MASK);
2983 dev->ep_intr = status;
2988 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2990 struct lan78xx_priv *pdata = NULL;
2994 ret = lan78xx_get_endpoints(dev, intf);
2996 netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n",
3001 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
3003 pdata = (struct lan78xx_priv *)(dev->data[0]);
3005 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
3011 spin_lock_init(&pdata->rfe_ctl_lock);
3012 mutex_init(&pdata->dataport_mutex);
3014 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
3016 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
3017 pdata->vlan_table[i] = 0;
3019 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
3021 dev->net->features = 0;
3023 if (DEFAULT_TX_CSUM_ENABLE)
3024 dev->net->features |= NETIF_F_HW_CSUM;
3026 if (DEFAULT_RX_CSUM_ENABLE)
3027 dev->net->features |= NETIF_F_RXCSUM;
3029 if (DEFAULT_TSO_CSUM_ENABLE) {
3030 dev->net->features |= NETIF_F_SG;
3031 /* Use module parameter to control TCP segmentation offload as
3032 * it appears to cause issues.
3035 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6;
3038 if (DEFAULT_VLAN_RX_OFFLOAD)
3039 dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
3041 if (DEFAULT_VLAN_FILTER_ENABLE)
3042 dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3044 dev->net->hw_features = dev->net->features;
3046 ret = lan78xx_setup_irq_domain(dev);
3048 netdev_warn(dev->net,
3049 "lan78xx_setup_irq_domain() failed : %d", ret);
3053 dev->net->hard_header_len += TX_OVERHEAD;
3054 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
3056 /* Init all registers */
3057 ret = lan78xx_reset(dev);
3059 netdev_warn(dev->net, "Registers INIT FAILED....");
3063 ret = lan78xx_mdio_init(dev);
3065 netdev_warn(dev->net, "MDIO INIT FAILED.....");
3069 dev->net->flags |= IFF_MULTICAST;
3071 pdata->wol = WAKE_MAGIC;
3076 lan78xx_remove_irq_domain(dev);
3079 netdev_warn(dev->net, "Bind routine FAILED");
3080 cancel_work_sync(&pdata->set_multicast);
3081 cancel_work_sync(&pdata->set_vlan);
3086 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3088 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3090 lan78xx_remove_irq_domain(dev);
3092 lan78xx_remove_mdio(dev);
3095 cancel_work_sync(&pdata->set_multicast);
3096 cancel_work_sync(&pdata->set_vlan);
3097 netif_dbg(dev, ifdown, dev->net, "free pdata");
3104 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3105 struct sk_buff *skb,
3106 u32 rx_cmd_a, u32 rx_cmd_b)
3108 /* HW Checksum offload appears to be flawed if used when not stripping
3109 * VLAN headers. Drop back to S/W checksums under these conditions.
3111 if (!(dev->net->features & NETIF_F_RXCSUM) ||
3112 unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3113 ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3114 !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3115 skb->ip_summed = CHECKSUM_NONE;
3117 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3118 skb->ip_summed = CHECKSUM_COMPLETE;
3122 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3123 struct sk_buff *skb,
3124 u32 rx_cmd_a, u32 rx_cmd_b)
3126 if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3127 (rx_cmd_a & RX_CMD_A_FVTG_))
3128 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3129 (rx_cmd_b & 0xffff));
3132 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3136 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
3137 skb_queue_tail(&dev->rxq_pause, skb);
3141 dev->net->stats.rx_packets++;
3142 dev->net->stats.rx_bytes += skb->len;
3144 skb->protocol = eth_type_trans(skb, dev->net);
3146 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3147 skb->len + sizeof(struct ethhdr), skb->protocol);
3148 memset(skb->cb, 0, sizeof(struct skb_data));
3150 if (skb_defer_rx_timestamp(skb))
3153 status = netif_rx(skb);
3154 if (status != NET_RX_SUCCESS)
3155 netif_dbg(dev, rx_err, dev->net,
3156 "netif_rx status %d\n", status);
3159 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3161 if (skb->len < dev->net->hard_header_len)
3164 while (skb->len > 0) {
3165 u32 rx_cmd_a, rx_cmd_b, align_count, size;
3167 struct sk_buff *skb2;
3168 unsigned char *packet;
3170 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
3171 le32_to_cpus(&rx_cmd_a);
3172 skb_pull(skb, sizeof(rx_cmd_a));
3174 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
3175 le32_to_cpus(&rx_cmd_b);
3176 skb_pull(skb, sizeof(rx_cmd_b));
3178 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
3179 le16_to_cpus(&rx_cmd_c);
3180 skb_pull(skb, sizeof(rx_cmd_c));
3184 /* get the packet length */
3185 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3186 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3188 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3189 netif_dbg(dev, rx_err, dev->net,
3190 "Error rx_cmd_a=0x%08x", rx_cmd_a);
3192 /* last frame in this batch */
3193 if (skb->len == size) {
3194 lan78xx_rx_csum_offload(dev, skb,
3195 rx_cmd_a, rx_cmd_b);
3196 lan78xx_rx_vlan_offload(dev, skb,
3197 rx_cmd_a, rx_cmd_b);
3199 skb_trim(skb, skb->len - 4); /* remove fcs */
3200 skb->truesize = size + sizeof(struct sk_buff);
3205 skb2 = skb_clone(skb, GFP_ATOMIC);
3206 if (unlikely(!skb2)) {
3207 netdev_warn(dev->net, "Error allocating skb");
3212 skb2->data = packet;
3213 skb_set_tail_pointer(skb2, size);
3215 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3216 lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3218 skb_trim(skb2, skb2->len - 4); /* remove fcs */
3219 skb2->truesize = size + sizeof(struct sk_buff);
3221 lan78xx_skb_return(dev, skb2);
3224 skb_pull(skb, size);
3226 /* padding bytes before the next frame starts */
3228 skb_pull(skb, align_count);
3234 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3236 if (!lan78xx_rx(dev, skb)) {
3237 dev->net->stats.rx_errors++;
3242 lan78xx_skb_return(dev, skb);
3246 netif_dbg(dev, rx_err, dev->net, "drop\n");
3247 dev->net->stats.rx_errors++;
3249 skb_queue_tail(&dev->done, skb);
3252 static void rx_complete(struct urb *urb);
3254 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3256 struct sk_buff *skb;
3257 struct skb_data *entry;
3258 unsigned long lockflags;
3259 size_t size = dev->rx_urb_size;
3262 skb = netdev_alloc_skb(dev->net, size);
3268 entry = (struct skb_data *)skb->cb;
3273 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3274 skb->data, size, rx_complete, skb);
3276 spin_lock_irqsave(&dev->rxq.lock, lockflags);
3278 if (netif_device_present(dev->net) &&
3279 netif_running(dev->net) &&
3280 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3281 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3282 ret = usb_submit_urb(urb, GFP_ATOMIC);
3285 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3288 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3291 netif_dbg(dev, ifdown, dev->net, "device gone\n");
3292 netif_device_detach(dev->net);
3298 netif_dbg(dev, rx_err, dev->net,
3299 "rx submit, %d\n", ret);
3300 tasklet_schedule(&dev->bh);
3303 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3306 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3308 dev_kfree_skb_any(skb);
3314 static void rx_complete(struct urb *urb)
3316 struct sk_buff *skb = (struct sk_buff *)urb->context;
3317 struct skb_data *entry = (struct skb_data *)skb->cb;
3318 struct lan78xx_net *dev = entry->dev;
3319 int urb_status = urb->status;
3320 enum skb_state state;
3322 skb_put(skb, urb->actual_length);
3326 switch (urb_status) {
3328 if (skb->len < dev->net->hard_header_len) {
3330 dev->net->stats.rx_errors++;
3331 dev->net->stats.rx_length_errors++;
3332 netif_dbg(dev, rx_err, dev->net,
3333 "rx length %d\n", skb->len);
3335 usb_mark_last_busy(dev->udev);
3338 dev->net->stats.rx_errors++;
3339 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3341 case -ECONNRESET: /* async unlink */
3342 case -ESHUTDOWN: /* hardware gone */
3343 netif_dbg(dev, ifdown, dev->net,
3344 "rx shutdown, code %d\n", urb_status);
3352 dev->net->stats.rx_errors++;
3358 /* data overrun ... flush fifo? */
3360 dev->net->stats.rx_over_errors++;
3365 dev->net->stats.rx_errors++;
3366 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3370 state = defer_bh(dev, skb, &dev->rxq, state);
3373 if (netif_running(dev->net) &&
3374 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3375 state != unlink_start) {
3376 rx_submit(dev, urb, GFP_ATOMIC);
3381 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3384 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3387 struct urb *urb = NULL;
3388 struct skb_data *entry;
3389 unsigned long flags;
3390 struct sk_buff_head *tqp = &dev->txq_pend;
3391 struct sk_buff *skb, *skb2;
3394 int skb_totallen, pkt_cnt;
3400 spin_lock_irqsave(&tqp->lock, flags);
3401 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
3402 if (skb_is_gso(skb)) {
3404 /* handle previous packets first */
3408 length = skb->len - TX_OVERHEAD;
3409 __skb_unlink(skb, tqp);
3410 spin_unlock_irqrestore(&tqp->lock, flags);
3414 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3416 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3419 spin_unlock_irqrestore(&tqp->lock, flags);
3421 /* copy to a single skb */
3422 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3426 skb_put(skb, skb_totallen);
3428 for (count = pos = 0; count < pkt_cnt; count++) {
3429 skb2 = skb_dequeue(tqp);
3431 length += (skb2->len - TX_OVERHEAD);
3432 memcpy(skb->data + pos, skb2->data, skb2->len);
3433 pos += roundup(skb2->len, sizeof(u32));
3434 dev_kfree_skb(skb2);
3439 urb = usb_alloc_urb(0, GFP_ATOMIC);
3443 entry = (struct skb_data *)skb->cb;
3446 entry->length = length;
3447 entry->num_of_packet = count;
3449 spin_lock_irqsave(&dev->txq.lock, flags);
3450 ret = usb_autopm_get_interface_async(dev->intf);
3452 spin_unlock_irqrestore(&dev->txq.lock, flags);
3456 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3457 skb->data, skb->len, tx_complete, skb);
3459 if (length % dev->maxpacket == 0) {
3460 /* send USB_ZERO_PACKET */
3461 urb->transfer_flags |= URB_ZERO_PACKET;
3465 /* if this triggers the device is still a sleep */
3466 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3467 /* transmission will be done in resume */
3468 usb_anchor_urb(urb, &dev->deferred);
3469 /* no use to process more packets */
3470 netif_stop_queue(dev->net);
3472 spin_unlock_irqrestore(&dev->txq.lock, flags);
3473 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3478 ret = usb_submit_urb(urb, GFP_ATOMIC);
3481 netif_trans_update(dev->net);
3482 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3483 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3484 netif_stop_queue(dev->net);
3487 netif_stop_queue(dev->net);
3488 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3489 usb_autopm_put_interface_async(dev->intf);
3492 usb_autopm_put_interface_async(dev->intf);
3493 netif_dbg(dev, tx_err, dev->net,
3494 "tx: submit urb err %d\n", ret);
3498 spin_unlock_irqrestore(&dev->txq.lock, flags);
3501 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3503 dev->net->stats.tx_dropped++;
3505 dev_kfree_skb_any(skb);
3508 netif_dbg(dev, tx_queued, dev->net,
3509 "> tx, len %d, type 0x%x\n", length, skb->protocol);
3512 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3517 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3518 for (i = 0; i < 10; i++) {
3519 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3521 urb = usb_alloc_urb(0, GFP_ATOMIC);
3523 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3527 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3528 tasklet_schedule(&dev->bh);
3530 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3531 netif_wake_queue(dev->net);
3534 static void lan78xx_bh(unsigned long param)
3536 struct lan78xx_net *dev = (struct lan78xx_net *)param;
3537 struct sk_buff *skb;
3538 struct skb_data *entry;
3540 while ((skb = skb_dequeue(&dev->done))) {
3541 entry = (struct skb_data *)(skb->cb);
3542 switch (entry->state) {
3544 entry->state = rx_cleanup;
3545 rx_process(dev, skb);
3548 usb_free_urb(entry->urb);
3552 usb_free_urb(entry->urb);
3556 netdev_dbg(dev->net, "skb state %d\n", entry->state);
3561 if (netif_device_present(dev->net) && netif_running(dev->net)) {
3562 /* reset update timer delta */
3563 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3565 mod_timer(&dev->stat_monitor,
3566 jiffies + STAT_UPDATE_TIMER);
3569 if (!skb_queue_empty(&dev->txq_pend))
3572 if (!timer_pending(&dev->delay) &&
3573 !test_bit(EVENT_RX_HALT, &dev->flags))
3578 static void lan78xx_delayedwork(struct work_struct *work)
3581 struct lan78xx_net *dev;
3583 dev = container_of(work, struct lan78xx_net, wq.work);
3585 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3586 unlink_urbs(dev, &dev->txq);
3587 status = usb_autopm_get_interface(dev->intf);
3590 status = usb_clear_halt(dev->udev, dev->pipe_out);
3591 usb_autopm_put_interface(dev->intf);
3594 status != -ESHUTDOWN) {
3595 if (netif_msg_tx_err(dev))
3597 netdev_err(dev->net,
3598 "can't clear tx halt, status %d\n",
3601 clear_bit(EVENT_TX_HALT, &dev->flags);
3602 if (status != -ESHUTDOWN)
3603 netif_wake_queue(dev->net);
3606 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3607 unlink_urbs(dev, &dev->rxq);
3608 status = usb_autopm_get_interface(dev->intf);
3611 status = usb_clear_halt(dev->udev, dev->pipe_in);
3612 usb_autopm_put_interface(dev->intf);
3615 status != -ESHUTDOWN) {
3616 if (netif_msg_rx_err(dev))
3618 netdev_err(dev->net,
3619 "can't clear rx halt, status %d\n",
3622 clear_bit(EVENT_RX_HALT, &dev->flags);
3623 tasklet_schedule(&dev->bh);
3627 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3630 clear_bit(EVENT_LINK_RESET, &dev->flags);
3631 status = usb_autopm_get_interface(dev->intf);
3634 if (lan78xx_link_reset(dev) < 0) {
3635 usb_autopm_put_interface(dev->intf);
3637 netdev_info(dev->net, "link reset failed (%d)\n",
3640 usb_autopm_put_interface(dev->intf);
3644 if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3645 lan78xx_update_stats(dev);
3647 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3649 mod_timer(&dev->stat_monitor,
3650 jiffies + (STAT_UPDATE_TIMER * dev->delta));
3652 dev->delta = min((dev->delta * 2), 50);
3656 static void intr_complete(struct urb *urb)
3658 struct lan78xx_net *dev = urb->context;
3659 int status = urb->status;
3664 lan78xx_status(dev, urb);
3667 /* software-driven interface shutdown */
3668 case -ENOENT: /* urb killed */
3669 case -ESHUTDOWN: /* hardware gone */
3670 netif_dbg(dev, ifdown, dev->net,
3671 "intr shutdown, code %d\n", status);
3674 /* NOTE: not throttling like RX/TX, since this endpoint
3675 * already polls infrequently
3678 netdev_dbg(dev->net, "intr status %d\n", status);
3682 if (!netif_running(dev->net))
3685 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3686 status = usb_submit_urb(urb, GFP_ATOMIC);
3688 netif_err(dev, timer, dev->net,
3689 "intr resubmit --> %d\n", status);
3692 static void lan78xx_disconnect(struct usb_interface *intf)
3694 struct lan78xx_net *dev;
3695 struct usb_device *udev;
3696 struct net_device *net;
3697 struct phy_device *phydev;
3699 dev = usb_get_intfdata(intf);
3700 usb_set_intfdata(intf, NULL);
3704 udev = interface_to_usbdev(intf);
3706 phydev = net->phydev;
3708 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3709 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3711 phy_disconnect(net->phydev);
3713 if (phy_is_pseudo_fixed_link(phydev))
3714 fixed_phy_unregister(phydev);
3716 unregister_netdev(net);
3718 cancel_delayed_work_sync(&dev->wq);
3720 usb_scuttle_anchored_urbs(&dev->deferred);
3722 lan78xx_unbind(dev, intf);
3724 usb_kill_urb(dev->urb_intr);
3725 usb_free_urb(dev->urb_intr);
3731 static void lan78xx_tx_timeout(struct net_device *net)
3733 struct lan78xx_net *dev = netdev_priv(net);
3735 unlink_urbs(dev, &dev->txq);
3736 tasklet_schedule(&dev->bh);
3739 static const struct net_device_ops lan78xx_netdev_ops = {
3740 .ndo_open = lan78xx_open,
3741 .ndo_stop = lan78xx_stop,
3742 .ndo_start_xmit = lan78xx_start_xmit,
3743 .ndo_tx_timeout = lan78xx_tx_timeout,
3744 .ndo_change_mtu = lan78xx_change_mtu,
3745 .ndo_set_mac_address = lan78xx_set_mac_addr,
3746 .ndo_validate_addr = eth_validate_addr,
3747 .ndo_do_ioctl = lan78xx_ioctl,
3748 .ndo_set_rx_mode = lan78xx_set_multicast,
3749 .ndo_set_features = lan78xx_set_features,
3750 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
3751 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
3754 static void lan78xx_stat_monitor(struct timer_list *t)
3756 struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
3758 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3761 static int lan78xx_probe(struct usb_interface *intf,
3762 const struct usb_device_id *id)
3764 struct lan78xx_net *dev;
3765 struct net_device *netdev;
3766 struct usb_device *udev;
3772 udev = interface_to_usbdev(intf);
3773 udev = usb_get_dev(udev);
3775 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3777 dev_err(&intf->dev, "Error: OOM\n");
3782 /* netdev_printk() needs this */
3783 SET_NETDEV_DEV(netdev, &intf->dev);
3785 dev = netdev_priv(netdev);
3789 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3790 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3792 skb_queue_head_init(&dev->rxq);
3793 skb_queue_head_init(&dev->txq);
3794 skb_queue_head_init(&dev->done);
3795 skb_queue_head_init(&dev->rxq_pause);
3796 skb_queue_head_init(&dev->txq_pend);
3797 mutex_init(&dev->phy_mutex);
3799 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3800 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3801 init_usb_anchor(&dev->deferred);
3803 netdev->netdev_ops = &lan78xx_netdev_ops;
3804 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3805 netdev->ethtool_ops = &lan78xx_ethtool_ops;
3808 timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
3810 mutex_init(&dev->stats.access_lock);
3812 ret = lan78xx_bind(dev, intf);
3815 strcpy(netdev->name, "eth%d");
3817 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3818 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3820 /* MTU range: 68 - 9000 */
3821 netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3823 dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3824 dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3825 dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3827 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3828 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3830 dev->pipe_intr = usb_rcvintpipe(dev->udev,
3831 dev->ep_intr->desc.bEndpointAddress &
3832 USB_ENDPOINT_NUMBER_MASK);
3833 if (int_urb_interval_ms <= 0)
3834 period = dev->ep_intr->desc.bInterval;
3836 period = int_urb_interval_ms * INT_URB_MICROFRAMES_PER_MS;
3838 netif_notice(dev, probe, netdev, "int urb period %d\n", period);
3840 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3841 buf = kmalloc(maxp, GFP_KERNEL);
3843 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3844 if (!dev->urb_intr) {
3849 usb_fill_int_urb(dev->urb_intr, dev->udev,
3850 dev->pipe_intr, buf, maxp,
3851 intr_complete, dev, period);
3855 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3857 /* driver requires remote-wakeup capability during autosuspend. */
3858 intf->needs_remote_wakeup = 1;
3860 ret = lan78xx_phy_init(dev);
3864 ret = register_netdev(netdev);
3866 netif_err(dev, probe, netdev, "couldn't register the device\n");
3870 usb_set_intfdata(intf, dev);
3872 ret = device_set_wakeup_enable(&udev->dev, true);
3874 /* Default delay of 2sec has more overhead than advantage.
3875 * Set to 10sec as default.
3877 pm_runtime_set_autosuspend_delay(&udev->dev,
3878 DEFAULT_AUTOSUSPEND_DELAY);
3883 phy_disconnect(netdev->phydev);
3885 usb_free_urb(dev->urb_intr);
3887 lan78xx_unbind(dev, intf);
3889 free_netdev(netdev);
3896 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3898 const u16 crc16poly = 0x8005;
3904 for (i = 0; i < len; i++) {
3906 for (bit = 0; bit < 8; bit++) {
3910 if (msb ^ (u16)(data & 1)) {
3912 crc |= (u16)0x0001U;
3921 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3929 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3930 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3931 const u8 arp_type[2] = { 0x08, 0x06 };
3933 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3934 buf &= ~MAC_TX_TXEN_;
3935 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3936 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3937 buf &= ~MAC_RX_RXEN_;
3938 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3940 ret = lan78xx_write_reg(dev, WUCSR, 0);
3941 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3942 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3947 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3948 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3949 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3951 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3952 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3955 if (wol & WAKE_PHY) {
3956 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3958 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3959 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3960 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3962 if (wol & WAKE_MAGIC) {
3963 temp_wucsr |= WUCSR_MPEN_;
3965 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3966 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3967 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3969 if (wol & WAKE_BCAST) {
3970 temp_wucsr |= WUCSR_BCST_EN_;
3972 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3973 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3974 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3976 if (wol & WAKE_MCAST) {
3977 temp_wucsr |= WUCSR_WAKE_EN_;
3979 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3980 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3981 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3983 WUF_CFGX_TYPE_MCAST_ |
3984 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3985 (crc & WUF_CFGX_CRC16_MASK_));
3987 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3988 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3989 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3990 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3993 /* for IPv6 Multicast */
3994 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3995 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3997 WUF_CFGX_TYPE_MCAST_ |
3998 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3999 (crc & WUF_CFGX_CRC16_MASK_));
4001 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
4002 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4003 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4004 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4007 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4008 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4009 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4011 if (wol & WAKE_UCAST) {
4012 temp_wucsr |= WUCSR_PFDA_EN_;
4014 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4015 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4016 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4018 if (wol & WAKE_ARP) {
4019 temp_wucsr |= WUCSR_WAKE_EN_;
4021 /* set WUF_CFG & WUF_MASK
4022 * for packettype (offset 12,13) = ARP (0x0806)
4024 crc = lan78xx_wakeframe_crc16(arp_type, 2);
4025 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4027 WUF_CFGX_TYPE_ALL_ |
4028 (0 << WUF_CFGX_OFFSET_SHIFT_) |
4029 (crc & WUF_CFGX_CRC16_MASK_));
4031 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
4032 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4033 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4034 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4037 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4038 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4039 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4042 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
4044 /* when multiple WOL bits are set */
4045 if (hweight_long((unsigned long)wol) > 1) {
4046 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4047 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4048 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4050 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
4053 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4054 buf |= PMT_CTL_WUPS_MASK_;
4055 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4057 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4058 buf |= MAC_RX_RXEN_;
4059 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4064 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
4066 struct lan78xx_net *dev = usb_get_intfdata(intf);
4067 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
4072 event = message.event;
4074 if (!dev->suspend_count++) {
4075 spin_lock_irq(&dev->txq.lock);
4076 /* don't autosuspend while transmitting */
4077 if ((skb_queue_len(&dev->txq) ||
4078 skb_queue_len(&dev->txq_pend)) &&
4079 PMSG_IS_AUTO(message)) {
4080 spin_unlock_irq(&dev->txq.lock);
4084 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4085 spin_unlock_irq(&dev->txq.lock);
4089 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4090 buf &= ~MAC_TX_TXEN_;
4091 ret = lan78xx_write_reg(dev, MAC_TX, buf);
4092 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4093 buf &= ~MAC_RX_RXEN_;
4094 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4096 /* empty out the rx and queues */
4097 netif_device_detach(dev->net);
4098 lan78xx_terminate_urbs(dev);
4099 usb_kill_urb(dev->urb_intr);
4102 netif_device_attach(dev->net);
4105 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4106 del_timer(&dev->stat_monitor);
4108 if (PMSG_IS_AUTO(message)) {
4109 /* auto suspend (selective suspend) */
4110 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4111 buf &= ~MAC_TX_TXEN_;
4112 ret = lan78xx_write_reg(dev, MAC_TX, buf);
4113 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4114 buf &= ~MAC_RX_RXEN_;
4115 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4117 ret = lan78xx_write_reg(dev, WUCSR, 0);
4118 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4119 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4121 /* set goodframe wakeup */
4122 ret = lan78xx_read_reg(dev, WUCSR, &buf);
4124 buf |= WUCSR_RFE_WAKE_EN_;
4125 buf |= WUCSR_STORE_WAKE_;
4127 ret = lan78xx_write_reg(dev, WUCSR, buf);
4129 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4131 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4132 buf |= PMT_CTL_RES_CLR_WKP_STS_;
4134 buf |= PMT_CTL_PHY_WAKE_EN_;
4135 buf |= PMT_CTL_WOL_EN_;
4136 buf &= ~PMT_CTL_SUS_MODE_MASK_;
4137 buf |= PMT_CTL_SUS_MODE_3_;
4139 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4141 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4143 buf |= PMT_CTL_WUPS_MASK_;
4145 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4147 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4148 buf |= MAC_RX_RXEN_;
4149 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4151 lan78xx_set_suspend(dev, pdata->wol);
4160 static int lan78xx_resume(struct usb_interface *intf)
4162 struct lan78xx_net *dev = usb_get_intfdata(intf);
4163 struct sk_buff *skb;
4168 if (!timer_pending(&dev->stat_monitor)) {
4170 mod_timer(&dev->stat_monitor,
4171 jiffies + STAT_UPDATE_TIMER);
4174 if (!--dev->suspend_count) {
4175 /* resume interrupt URBs */
4176 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
4177 usb_submit_urb(dev->urb_intr, GFP_NOIO);
4179 spin_lock_irq(&dev->txq.lock);
4180 while ((res = usb_get_from_anchor(&dev->deferred))) {
4181 skb = (struct sk_buff *)res->context;
4182 ret = usb_submit_urb(res, GFP_ATOMIC);
4184 dev_kfree_skb_any(skb);
4186 usb_autopm_put_interface_async(dev->intf);
4188 netif_trans_update(dev->net);
4189 lan78xx_queue_skb(&dev->txq, skb, tx_start);
4193 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4194 spin_unlock_irq(&dev->txq.lock);
4196 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
4197 if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
4198 netif_start_queue(dev->net);
4199 tasklet_schedule(&dev->bh);
4203 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4204 ret = lan78xx_write_reg(dev, WUCSR, 0);
4205 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4207 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4209 WUCSR2_IPV6_TCPSYN_RCD_ |
4210 WUCSR2_IPV4_TCPSYN_RCD_);
4212 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
4213 WUCSR_EEE_RX_WAKE_ |
4215 WUCSR_RFE_WAKE_FR_ |
4220 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4221 buf |= MAC_TX_TXEN_;
4222 ret = lan78xx_write_reg(dev, MAC_TX, buf);
4227 static int lan78xx_reset_resume(struct usb_interface *intf)
4229 struct lan78xx_net *dev = usb_get_intfdata(intf);
4233 phy_start(dev->net->phydev);
4235 return lan78xx_resume(intf);
4238 static const struct usb_device_id products[] = {
4240 /* LAN7800 USB Gigabit Ethernet Device */
4241 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4244 /* LAN7850 USB Gigabit Ethernet Device */
4245 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4248 /* LAN7801 USB Gigabit Ethernet Device */
4249 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4253 MODULE_DEVICE_TABLE(usb, products);
4255 static struct usb_driver lan78xx_driver = {
4256 .name = DRIVER_NAME,
4257 .id_table = products,
4258 .probe = lan78xx_probe,
4259 .disconnect = lan78xx_disconnect,
4260 .suspend = lan78xx_suspend,
4261 .resume = lan78xx_resume,
4262 .reset_resume = lan78xx_reset_resume,
4263 .supports_autosuspend = 1,
4264 .disable_hub_initiated_lpm = 1,
4267 module_usb_driver(lan78xx_driver);
4269 MODULE_AUTHOR(DRIVER_AUTHOR);
4270 MODULE_DESCRIPTION(DRIVER_DESC);
4271 MODULE_LICENSE("GPL");