2 * Copyright (C) 2015 Microchip Technology
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <linux/phy.h>
33 #include <net/ip6_checksum.h>
34 #include <linux/interrupt.h>
35 #include <linux/irqdomain.h>
36 #include <linux/irq.h>
37 #include <linux/irqchip/chained_irq.h>
38 #include <linux/microchipphy.h>
39 #include <linux/phy.h>
42 #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
43 #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
44 #define DRIVER_NAME "lan78xx"
45 #define DRIVER_VERSION "1.0.6"
47 #define TX_TIMEOUT_JIFFIES (5 * HZ)
48 #define THROTTLE_JIFFIES (HZ / 8)
49 #define UNLINK_TIMEOUT_MS 3
51 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
53 #define SS_USB_PKT_SIZE (1024)
54 #define HS_USB_PKT_SIZE (512)
55 #define FS_USB_PKT_SIZE (64)
57 #define MAX_RX_FIFO_SIZE (12 * 1024)
58 #define MAX_TX_FIFO_SIZE (12 * 1024)
59 #define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
60 #define DEFAULT_BULK_IN_DELAY (0x0800)
61 #define MAX_SINGLE_PACKET_SIZE (9000)
62 #define DEFAULT_TX_CSUM_ENABLE (true)
63 #define DEFAULT_RX_CSUM_ENABLE (true)
64 #define DEFAULT_TSO_CSUM_ENABLE (true)
65 #define DEFAULT_VLAN_FILTER_ENABLE (true)
66 #define TX_OVERHEAD (8)
69 #define LAN78XX_USB_VENDOR_ID (0x0424)
70 #define LAN7800_USB_PRODUCT_ID (0x7800)
71 #define LAN7850_USB_PRODUCT_ID (0x7850)
72 #define LAN7801_USB_PRODUCT_ID (0x7801)
73 #define LAN78XX_EEPROM_MAGIC (0x78A5)
74 #define LAN78XX_OTP_MAGIC (0x78F3)
79 #define EEPROM_INDICATOR (0xA5)
80 #define EEPROM_MAC_OFFSET (0x01)
81 #define MAX_EEPROM_SIZE 512
82 #define OTP_INDICATOR_1 (0xF3)
83 #define OTP_INDICATOR_2 (0xF7)
85 #define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
86 WAKE_MCAST | WAKE_BCAST | \
87 WAKE_ARP | WAKE_MAGIC)
89 /* USB related defines */
90 #define BULK_IN_PIPE 1
91 #define BULK_OUT_PIPE 2
93 /* default autosuspend delay (mSec)*/
94 #define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
96 /* statistic update interval (mSec) */
97 #define STAT_UPDATE_TIMER (1 * 1000)
99 /* defines interrupts from interrupt EP */
100 #define MAX_INT_EP (32)
101 #define INT_EP_INTEP (31)
102 #define INT_EP_OTP_WR_DONE (28)
103 #define INT_EP_EEE_TX_LPI_START (26)
104 #define INT_EP_EEE_TX_LPI_STOP (25)
105 #define INT_EP_EEE_RX_LPI (24)
106 #define INT_EP_MAC_RESET_TIMEOUT (23)
107 #define INT_EP_RDFO (22)
108 #define INT_EP_TXE (21)
109 #define INT_EP_USB_STATUS (20)
110 #define INT_EP_TX_DIS (19)
111 #define INT_EP_RX_DIS (18)
112 #define INT_EP_PHY (17)
113 #define INT_EP_DP (16)
114 #define INT_EP_MAC_ERR (15)
115 #define INT_EP_TDFU (14)
116 #define INT_EP_TDFO (13)
117 #define INT_EP_UTX (12)
118 #define INT_EP_GPIO_11 (11)
119 #define INT_EP_GPIO_10 (10)
120 #define INT_EP_GPIO_9 (9)
121 #define INT_EP_GPIO_8 (8)
122 #define INT_EP_GPIO_7 (7)
123 #define INT_EP_GPIO_6 (6)
124 #define INT_EP_GPIO_5 (5)
125 #define INT_EP_GPIO_4 (4)
126 #define INT_EP_GPIO_3 (3)
127 #define INT_EP_GPIO_2 (2)
128 #define INT_EP_GPIO_1 (1)
129 #define INT_EP_GPIO_0 (0)
131 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
133 "RX Alignment Errors",
134 "Rx Fragment Errors",
136 "RX Undersize Frame Errors",
137 "RX Oversize Frame Errors",
139 "RX Unicast Byte Count",
140 "RX Broadcast Byte Count",
141 "RX Multicast Byte Count",
143 "RX Broadcast Frames",
144 "RX Multicast Frames",
147 "RX 65 - 127 Byte Frames",
148 "RX 128 - 255 Byte Frames",
149 "RX 256 - 511 Bytes Frames",
150 "RX 512 - 1023 Byte Frames",
151 "RX 1024 - 1518 Byte Frames",
152 "RX Greater 1518 Byte Frames",
153 "EEE RX LPI Transitions",
156 "TX Excess Deferral Errors",
159 "TX Single Collisions",
160 "TX Multiple Collisions",
161 "TX Excessive Collision",
162 "TX Late Collisions",
163 "TX Unicast Byte Count",
164 "TX Broadcast Byte Count",
165 "TX Multicast Byte Count",
167 "TX Broadcast Frames",
168 "TX Multicast Frames",
171 "TX 65 - 127 Byte Frames",
172 "TX 128 - 255 Byte Frames",
173 "TX 256 - 511 Bytes Frames",
174 "TX 512 - 1023 Byte Frames",
175 "TX 1024 - 1518 Byte Frames",
176 "TX Greater 1518 Byte Frames",
177 "EEE TX LPI Transitions",
181 struct lan78xx_statstage {
183 u32 rx_alignment_errors;
184 u32 rx_fragment_errors;
185 u32 rx_jabber_errors;
186 u32 rx_undersize_frame_errors;
187 u32 rx_oversize_frame_errors;
188 u32 rx_dropped_frames;
189 u32 rx_unicast_byte_count;
190 u32 rx_broadcast_byte_count;
191 u32 rx_multicast_byte_count;
192 u32 rx_unicast_frames;
193 u32 rx_broadcast_frames;
194 u32 rx_multicast_frames;
196 u32 rx_64_byte_frames;
197 u32 rx_65_127_byte_frames;
198 u32 rx_128_255_byte_frames;
199 u32 rx_256_511_bytes_frames;
200 u32 rx_512_1023_byte_frames;
201 u32 rx_1024_1518_byte_frames;
202 u32 rx_greater_1518_byte_frames;
203 u32 eee_rx_lpi_transitions;
206 u32 tx_excess_deferral_errors;
207 u32 tx_carrier_errors;
208 u32 tx_bad_byte_count;
209 u32 tx_single_collisions;
210 u32 tx_multiple_collisions;
211 u32 tx_excessive_collision;
212 u32 tx_late_collisions;
213 u32 tx_unicast_byte_count;
214 u32 tx_broadcast_byte_count;
215 u32 tx_multicast_byte_count;
216 u32 tx_unicast_frames;
217 u32 tx_broadcast_frames;
218 u32 tx_multicast_frames;
220 u32 tx_64_byte_frames;
221 u32 tx_65_127_byte_frames;
222 u32 tx_128_255_byte_frames;
223 u32 tx_256_511_bytes_frames;
224 u32 tx_512_1023_byte_frames;
225 u32 tx_1024_1518_byte_frames;
226 u32 tx_greater_1518_byte_frames;
227 u32 eee_tx_lpi_transitions;
231 struct lan78xx_statstage64 {
233 u64 rx_alignment_errors;
234 u64 rx_fragment_errors;
235 u64 rx_jabber_errors;
236 u64 rx_undersize_frame_errors;
237 u64 rx_oversize_frame_errors;
238 u64 rx_dropped_frames;
239 u64 rx_unicast_byte_count;
240 u64 rx_broadcast_byte_count;
241 u64 rx_multicast_byte_count;
242 u64 rx_unicast_frames;
243 u64 rx_broadcast_frames;
244 u64 rx_multicast_frames;
246 u64 rx_64_byte_frames;
247 u64 rx_65_127_byte_frames;
248 u64 rx_128_255_byte_frames;
249 u64 rx_256_511_bytes_frames;
250 u64 rx_512_1023_byte_frames;
251 u64 rx_1024_1518_byte_frames;
252 u64 rx_greater_1518_byte_frames;
253 u64 eee_rx_lpi_transitions;
256 u64 tx_excess_deferral_errors;
257 u64 tx_carrier_errors;
258 u64 tx_bad_byte_count;
259 u64 tx_single_collisions;
260 u64 tx_multiple_collisions;
261 u64 tx_excessive_collision;
262 u64 tx_late_collisions;
263 u64 tx_unicast_byte_count;
264 u64 tx_broadcast_byte_count;
265 u64 tx_multicast_byte_count;
266 u64 tx_unicast_frames;
267 u64 tx_broadcast_frames;
268 u64 tx_multicast_frames;
270 u64 tx_64_byte_frames;
271 u64 tx_65_127_byte_frames;
272 u64 tx_128_255_byte_frames;
273 u64 tx_256_511_bytes_frames;
274 u64 tx_512_1023_byte_frames;
275 u64 tx_1024_1518_byte_frames;
276 u64 tx_greater_1518_byte_frames;
277 u64 eee_tx_lpi_transitions;
283 struct lan78xx_priv {
284 struct lan78xx_net *dev;
286 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
287 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
288 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
289 struct mutex dataport_mutex; /* for dataport access */
290 spinlock_t rfe_ctl_lock; /* for rfe register access */
291 struct work_struct set_multicast;
292 struct work_struct set_vlan;
306 struct skb_data { /* skb->cb is one of these */
308 struct lan78xx_net *dev;
309 enum skb_state state;
315 struct usb_ctrlrequest req;
316 struct lan78xx_net *dev;
319 #define EVENT_TX_HALT 0
320 #define EVENT_RX_HALT 1
321 #define EVENT_RX_MEMORY 2
322 #define EVENT_STS_SPLIT 3
323 #define EVENT_LINK_RESET 4
324 #define EVENT_RX_PAUSED 5
325 #define EVENT_DEV_WAKING 6
326 #define EVENT_DEV_ASLEEP 7
327 #define EVENT_DEV_OPEN 8
328 #define EVENT_STAT_UPDATE 9
331 struct mutex access_lock; /* for stats access */
332 struct lan78xx_statstage saved;
333 struct lan78xx_statstage rollover_count;
334 struct lan78xx_statstage rollover_max;
335 struct lan78xx_statstage64 curr_stat;
338 struct irq_domain_data {
339 struct irq_domain *irqdomain;
341 struct irq_chip *irqchip;
342 irq_flow_handler_t irq_handler;
344 struct mutex irq_lock; /* for irq bus access */
348 struct net_device *net;
349 struct usb_device *udev;
350 struct usb_interface *intf;
355 struct sk_buff_head rxq;
356 struct sk_buff_head txq;
357 struct sk_buff_head done;
358 struct sk_buff_head rxq_pause;
359 struct sk_buff_head txq_pend;
361 struct tasklet_struct bh;
362 struct delayed_work wq;
364 struct usb_host_endpoint *ep_blkin;
365 struct usb_host_endpoint *ep_blkout;
366 struct usb_host_endpoint *ep_intr;
370 struct urb *urb_intr;
371 struct usb_anchor deferred;
373 struct mutex phy_mutex; /* for phy access */
374 unsigned pipe_in, pipe_out, pipe_intr;
376 u32 hard_mtu; /* count any extra framing */
377 size_t rx_urb_size; /* size for rx urbs */
381 wait_queue_head_t *wait;
382 unsigned char suspend_count;
385 struct timer_list delay;
386 struct timer_list stat_monitor;
388 unsigned long data[5];
395 struct mii_bus *mdiobus;
396 phy_interface_t interface;
399 u8 fc_request_control;
402 struct statstage stats;
404 struct irq_domain_data domain_data;
407 /* define external phy id */
408 #define PHY_LAN8835 (0x0007C130)
409 #define PHY_KSZ9031RNX (0x00221620)
411 /* use ethtool to change the level for any given device */
412 static int msg_level = -1;
413 module_param(msg_level, int, 0);
414 MODULE_PARM_DESC(msg_level, "Override default message level");
416 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
418 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
424 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
425 USB_VENDOR_REQUEST_READ_REGISTER,
426 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
427 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
428 if (likely(ret >= 0)) {
432 netdev_warn(dev->net,
433 "Failed to read register index 0x%08x. ret = %d",
442 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
444 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
453 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
454 USB_VENDOR_REQUEST_WRITE_REGISTER,
455 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
456 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
457 if (unlikely(ret < 0)) {
458 netdev_warn(dev->net,
459 "Failed to write register index 0x%08x. ret = %d",
468 static int lan78xx_read_stats(struct lan78xx_net *dev,
469 struct lan78xx_statstage *data)
473 struct lan78xx_statstage *stats;
477 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
481 ret = usb_control_msg(dev->udev,
482 usb_rcvctrlpipe(dev->udev, 0),
483 USB_VENDOR_REQUEST_GET_STATS,
484 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
489 USB_CTRL_SET_TIMEOUT);
490 if (likely(ret >= 0)) {
493 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
494 le32_to_cpus(&src[i]);
498 netdev_warn(dev->net,
499 "Failed to read stat ret = 0x%x", ret);
507 #define check_counter_rollover(struct1, dev_stats, member) { \
508 if (struct1->member < dev_stats.saved.member) \
509 dev_stats.rollover_count.member++; \
512 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
513 struct lan78xx_statstage *stats)
515 check_counter_rollover(stats, dev->stats, rx_fcs_errors);
516 check_counter_rollover(stats, dev->stats, rx_alignment_errors);
517 check_counter_rollover(stats, dev->stats, rx_fragment_errors);
518 check_counter_rollover(stats, dev->stats, rx_jabber_errors);
519 check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
520 check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
521 check_counter_rollover(stats, dev->stats, rx_dropped_frames);
522 check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
523 check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
524 check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
525 check_counter_rollover(stats, dev->stats, rx_unicast_frames);
526 check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
527 check_counter_rollover(stats, dev->stats, rx_multicast_frames);
528 check_counter_rollover(stats, dev->stats, rx_pause_frames);
529 check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
530 check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
531 check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
532 check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
533 check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
534 check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
535 check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
536 check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
537 check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
538 check_counter_rollover(stats, dev->stats, tx_fcs_errors);
539 check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
540 check_counter_rollover(stats, dev->stats, tx_carrier_errors);
541 check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
542 check_counter_rollover(stats, dev->stats, tx_single_collisions);
543 check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
544 check_counter_rollover(stats, dev->stats, tx_excessive_collision);
545 check_counter_rollover(stats, dev->stats, tx_late_collisions);
546 check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
547 check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
548 check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
549 check_counter_rollover(stats, dev->stats, tx_unicast_frames);
550 check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
551 check_counter_rollover(stats, dev->stats, tx_multicast_frames);
552 check_counter_rollover(stats, dev->stats, tx_pause_frames);
553 check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
554 check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
555 check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
556 check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
557 check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
558 check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
559 check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
560 check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
561 check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
563 memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
566 static void lan78xx_update_stats(struct lan78xx_net *dev)
568 u32 *p, *count, *max;
571 struct lan78xx_statstage lan78xx_stats;
573 if (usb_autopm_get_interface(dev->intf) < 0)
576 p = (u32 *)&lan78xx_stats;
577 count = (u32 *)&dev->stats.rollover_count;
578 max = (u32 *)&dev->stats.rollover_max;
579 data = (u64 *)&dev->stats.curr_stat;
581 mutex_lock(&dev->stats.access_lock);
583 if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
584 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
586 for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
587 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
589 mutex_unlock(&dev->stats.access_lock);
591 usb_autopm_put_interface(dev->intf);
594 /* Loop until the read is completed with timeout called with phy_mutex held */
595 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
597 unsigned long start_time = jiffies;
602 ret = lan78xx_read_reg(dev, MII_ACC, &val);
603 if (unlikely(ret < 0))
606 if (!(val & MII_ACC_MII_BUSY_))
608 } while (!time_after(jiffies, start_time + HZ));
613 static inline u32 mii_access(int id, int index, int read)
617 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
618 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
620 ret |= MII_ACC_MII_READ_;
622 ret |= MII_ACC_MII_WRITE_;
623 ret |= MII_ACC_MII_BUSY_;
628 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
630 unsigned long start_time = jiffies;
635 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
636 if (unlikely(ret < 0))
639 if (!(val & E2P_CMD_EPC_BUSY_) ||
640 (val & E2P_CMD_EPC_TIMEOUT_))
642 usleep_range(40, 100);
643 } while (!time_after(jiffies, start_time + HZ));
645 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
646 netdev_warn(dev->net, "EEPROM read operation timeout");
653 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
655 unsigned long start_time = jiffies;
660 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
661 if (unlikely(ret < 0))
664 if (!(val & E2P_CMD_EPC_BUSY_))
667 usleep_range(40, 100);
668 } while (!time_after(jiffies, start_time + HZ));
670 netdev_warn(dev->net, "EEPROM is busy");
674 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
675 u32 length, u8 *data)
682 /* depends on chip, some EEPROM pins are muxed with LED function.
683 * disable & restore LED function to access EEPROM.
685 ret = lan78xx_read_reg(dev, HW_CFG, &val);
687 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
688 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
689 ret = lan78xx_write_reg(dev, HW_CFG, val);
692 retval = lan78xx_eeprom_confirm_not_busy(dev);
696 for (i = 0; i < length; i++) {
697 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
698 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
699 ret = lan78xx_write_reg(dev, E2P_CMD, val);
700 if (unlikely(ret < 0)) {
705 retval = lan78xx_wait_eeprom(dev);
709 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
710 if (unlikely(ret < 0)) {
715 data[i] = val & 0xFF;
721 if (dev->chipid == ID_REV_CHIP_ID_7800_)
722 ret = lan78xx_write_reg(dev, HW_CFG, saved);
727 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
728 u32 length, u8 *data)
733 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
734 if ((ret == 0) && (sig == EEPROM_INDICATOR))
735 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
742 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
743 u32 length, u8 *data)
750 /* depends on chip, some EEPROM pins are muxed with LED function.
751 * disable & restore LED function to access EEPROM.
753 ret = lan78xx_read_reg(dev, HW_CFG, &val);
755 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
756 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
757 ret = lan78xx_write_reg(dev, HW_CFG, val);
760 retval = lan78xx_eeprom_confirm_not_busy(dev);
764 /* Issue write/erase enable command */
765 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
766 ret = lan78xx_write_reg(dev, E2P_CMD, val);
767 if (unlikely(ret < 0)) {
772 retval = lan78xx_wait_eeprom(dev);
776 for (i = 0; i < length; i++) {
777 /* Fill data register */
779 ret = lan78xx_write_reg(dev, E2P_DATA, val);
785 /* Send "write" command */
786 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
787 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
788 ret = lan78xx_write_reg(dev, E2P_CMD, val);
794 retval = lan78xx_wait_eeprom(dev);
803 if (dev->chipid == ID_REV_CHIP_ID_7800_)
804 ret = lan78xx_write_reg(dev, HW_CFG, saved);
809 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
810 u32 length, u8 *data)
815 unsigned long timeout;
817 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
819 if (buf & OTP_PWR_DN_PWRDN_N_) {
820 /* clear it and wait to be cleared */
821 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
823 timeout = jiffies + HZ;
826 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
827 if (time_after(jiffies, timeout)) {
828 netdev_warn(dev->net,
829 "timeout on OTP_PWR_DN");
832 } while (buf & OTP_PWR_DN_PWRDN_N_);
835 for (i = 0; i < length; i++) {
836 ret = lan78xx_write_reg(dev, OTP_ADDR1,
837 ((offset + i) >> 8) & OTP_ADDR1_15_11);
838 ret = lan78xx_write_reg(dev, OTP_ADDR2,
839 ((offset + i) & OTP_ADDR2_10_3));
841 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
842 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
844 timeout = jiffies + HZ;
847 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
848 if (time_after(jiffies, timeout)) {
849 netdev_warn(dev->net,
850 "timeout on OTP_STATUS");
853 } while (buf & OTP_STATUS_BUSY_);
855 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
857 data[i] = (u8)(buf & 0xFF);
863 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
864 u32 length, u8 *data)
869 unsigned long timeout;
871 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
873 if (buf & OTP_PWR_DN_PWRDN_N_) {
874 /* clear it and wait to be cleared */
875 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
877 timeout = jiffies + HZ;
880 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
881 if (time_after(jiffies, timeout)) {
882 netdev_warn(dev->net,
883 "timeout on OTP_PWR_DN completion");
886 } while (buf & OTP_PWR_DN_PWRDN_N_);
889 /* set to BYTE program mode */
890 ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
892 for (i = 0; i < length; i++) {
893 ret = lan78xx_write_reg(dev, OTP_ADDR1,
894 ((offset + i) >> 8) & OTP_ADDR1_15_11);
895 ret = lan78xx_write_reg(dev, OTP_ADDR2,
896 ((offset + i) & OTP_ADDR2_10_3));
897 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
898 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
899 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
901 timeout = jiffies + HZ;
904 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
905 if (time_after(jiffies, timeout)) {
906 netdev_warn(dev->net,
907 "Timeout on OTP_STATUS completion");
910 } while (buf & OTP_STATUS_BUSY_);
916 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
917 u32 length, u8 *data)
922 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
925 if (sig == OTP_INDICATOR_1)
927 else if (sig == OTP_INDICATOR_2)
932 ret = lan78xx_read_raw_otp(dev, offset, length, data);
938 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
942 for (i = 0; i < 100; i++) {
945 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
946 if (unlikely(ret < 0))
949 if (dp_sel & DP_SEL_DPRDY_)
952 usleep_range(40, 100);
955 netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
960 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
961 u32 addr, u32 length, u32 *buf)
963 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
967 if (usb_autopm_get_interface(dev->intf) < 0)
970 mutex_lock(&pdata->dataport_mutex);
972 ret = lan78xx_dataport_wait_not_busy(dev);
976 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
978 dp_sel &= ~DP_SEL_RSEL_MASK_;
979 dp_sel |= ram_select;
980 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
982 for (i = 0; i < length; i++) {
983 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
985 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
987 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
989 ret = lan78xx_dataport_wait_not_busy(dev);
995 mutex_unlock(&pdata->dataport_mutex);
996 usb_autopm_put_interface(dev->intf);
1001 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1002 int index, u8 addr[ETH_ALEN])
1006 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1008 temp = addr[2] | (temp << 8);
1009 temp = addr[1] | (temp << 8);
1010 temp = addr[0] | (temp << 8);
1011 pdata->pfilter_table[index][1] = temp;
1013 temp = addr[4] | (temp << 8);
1014 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1015 pdata->pfilter_table[index][0] = temp;
1019 /* returns hash bit number for given MAC address */
1020 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1022 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1025 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1027 struct lan78xx_priv *pdata =
1028 container_of(param, struct lan78xx_priv, set_multicast);
1029 struct lan78xx_net *dev = pdata->dev;
1033 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1036 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1037 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1039 for (i = 1; i < NUM_OF_MAF; i++) {
1040 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1041 ret = lan78xx_write_reg(dev, MAF_LO(i),
1042 pdata->pfilter_table[i][1]);
1043 ret = lan78xx_write_reg(dev, MAF_HI(i),
1044 pdata->pfilter_table[i][0]);
1047 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1050 static void lan78xx_set_multicast(struct net_device *netdev)
1052 struct lan78xx_net *dev = netdev_priv(netdev);
1053 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1054 unsigned long flags;
1057 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1059 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1060 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1062 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1063 pdata->mchash_table[i] = 0;
1064 /* pfilter_table[0] has own HW address */
1065 for (i = 1; i < NUM_OF_MAF; i++) {
1066 pdata->pfilter_table[i][0] =
1067 pdata->pfilter_table[i][1] = 0;
1070 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1072 if (dev->net->flags & IFF_PROMISC) {
1073 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1074 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1076 if (dev->net->flags & IFF_ALLMULTI) {
1077 netif_dbg(dev, drv, dev->net,
1078 "receive all multicast enabled");
1079 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1083 if (netdev_mc_count(dev->net)) {
1084 struct netdev_hw_addr *ha;
1087 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1089 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1092 netdev_for_each_mc_addr(ha, netdev) {
1093 /* set first 32 into Perfect Filter */
1095 lan78xx_set_addr_filter(pdata, i, ha->addr);
1097 u32 bitnum = lan78xx_hash(ha->addr);
1099 pdata->mchash_table[bitnum / 32] |=
1100 (1 << (bitnum % 32));
1101 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1107 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1109 /* defer register writes to a sleepable context */
1110 schedule_work(&pdata->set_multicast);
1113 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1114 u16 lcladv, u16 rmtadv)
1116 u32 flow = 0, fct_flow = 0;
1120 if (dev->fc_autoneg)
1121 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1123 cap = dev->fc_request_control;
1125 if (cap & FLOW_CTRL_TX)
1126 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1128 if (cap & FLOW_CTRL_RX)
1129 flow |= FLOW_CR_RX_FCEN_;
1131 if (dev->udev->speed == USB_SPEED_SUPER)
1133 else if (dev->udev->speed == USB_SPEED_HIGH)
1136 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1137 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1138 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1140 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1142 /* threshold value should be set before enabling flow */
1143 ret = lan78xx_write_reg(dev, FLOW, flow);
1148 static int lan78xx_link_reset(struct lan78xx_net *dev)
1150 struct phy_device *phydev = dev->net->phydev;
1151 struct ethtool_link_ksettings ecmd;
1152 int ladv, radv, ret;
1155 /* clear LAN78xx interrupt status */
1156 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1157 if (unlikely(ret < 0))
1160 phy_read_status(phydev);
1162 if (!phydev->link && dev->link_on) {
1163 dev->link_on = false;
1166 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1167 if (unlikely(ret < 0))
1170 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1171 if (unlikely(ret < 0))
1174 del_timer(&dev->stat_monitor);
1175 } else if (phydev->link && !dev->link_on) {
1176 dev->link_on = true;
1178 phy_ethtool_ksettings_get(phydev, &ecmd);
1180 if (dev->udev->speed == USB_SPEED_SUPER) {
1181 if (ecmd.base.speed == 1000) {
1183 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1184 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1185 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1187 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1188 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1189 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1191 /* enable U1 & U2 */
1192 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1193 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1194 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1195 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1199 ladv = phy_read(phydev, MII_ADVERTISE);
1203 radv = phy_read(phydev, MII_LPA);
1207 netif_dbg(dev, link, dev->net,
1208 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1209 ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1211 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1214 if (!timer_pending(&dev->stat_monitor)) {
1216 mod_timer(&dev->stat_monitor,
1217 jiffies + STAT_UPDATE_TIMER);
1220 tasklet_schedule(&dev->bh);
1226 /* some work can't be done in tasklets, so we use keventd
1228 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1229 * but tasklet_schedule() doesn't. hope the failure is rare.
1231 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1233 set_bit(work, &dev->flags);
1234 if (!schedule_delayed_work(&dev->wq, 0))
1235 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1238 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1242 if (urb->actual_length != 4) {
1243 netdev_warn(dev->net,
1244 "unexpected urb length %d", urb->actual_length);
1248 memcpy(&intdata, urb->transfer_buffer, 4);
1249 le32_to_cpus(&intdata);
1251 if (intdata & INT_ENP_PHY_INT) {
1252 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1253 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1255 if (dev->domain_data.phyirq > 0)
1256 generic_handle_irq(dev->domain_data.phyirq);
1258 netdev_warn(dev->net,
1259 "unexpected interrupt: 0x%08x\n", intdata);
1262 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1264 return MAX_EEPROM_SIZE;
1267 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1268 struct ethtool_eeprom *ee, u8 *data)
1270 struct lan78xx_net *dev = netdev_priv(netdev);
1273 ret = usb_autopm_get_interface(dev->intf);
1277 ee->magic = LAN78XX_EEPROM_MAGIC;
1279 ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1281 usb_autopm_put_interface(dev->intf);
1286 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1287 struct ethtool_eeprom *ee, u8 *data)
1289 struct lan78xx_net *dev = netdev_priv(netdev);
1292 ret = usb_autopm_get_interface(dev->intf);
1296 /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1297 * to load data from EEPROM
1299 if (ee->magic == LAN78XX_EEPROM_MAGIC)
1300 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1301 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1302 (ee->offset == 0) &&
1304 (data[0] == OTP_INDICATOR_1))
1305 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1307 usb_autopm_put_interface(dev->intf);
1312 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1315 if (stringset == ETH_SS_STATS)
1316 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1319 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1321 if (sset == ETH_SS_STATS)
1322 return ARRAY_SIZE(lan78xx_gstrings);
1327 static void lan78xx_get_stats(struct net_device *netdev,
1328 struct ethtool_stats *stats, u64 *data)
1330 struct lan78xx_net *dev = netdev_priv(netdev);
1332 lan78xx_update_stats(dev);
1334 mutex_lock(&dev->stats.access_lock);
1335 memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1336 mutex_unlock(&dev->stats.access_lock);
1339 static void lan78xx_get_wol(struct net_device *netdev,
1340 struct ethtool_wolinfo *wol)
1342 struct lan78xx_net *dev = netdev_priv(netdev);
1345 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1347 if (usb_autopm_get_interface(dev->intf) < 0)
1350 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1351 if (unlikely(ret < 0)) {
1355 if (buf & USB_CFG_RMT_WKP_) {
1356 wol->supported = WAKE_ALL;
1357 wol->wolopts = pdata->wol;
1364 usb_autopm_put_interface(dev->intf);
1367 static int lan78xx_set_wol(struct net_device *netdev,
1368 struct ethtool_wolinfo *wol)
1370 struct lan78xx_net *dev = netdev_priv(netdev);
1371 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1374 ret = usb_autopm_get_interface(dev->intf);
1378 if (wol->wolopts & ~WAKE_ALL)
1381 pdata->wol = wol->wolopts;
1383 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1385 phy_ethtool_set_wol(netdev->phydev, wol);
1387 usb_autopm_put_interface(dev->intf);
1392 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1394 struct lan78xx_net *dev = netdev_priv(net);
1395 struct phy_device *phydev = net->phydev;
1399 ret = usb_autopm_get_interface(dev->intf);
1403 ret = phy_ethtool_get_eee(phydev, edata);
1407 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1408 if (buf & MAC_CR_EEE_EN_) {
1409 edata->eee_enabled = true;
1410 edata->eee_active = !!(edata->advertised &
1411 edata->lp_advertised);
1412 edata->tx_lpi_enabled = true;
1413 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1414 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1415 edata->tx_lpi_timer = buf;
1417 edata->eee_enabled = false;
1418 edata->eee_active = false;
1419 edata->tx_lpi_enabled = false;
1420 edata->tx_lpi_timer = 0;
1425 usb_autopm_put_interface(dev->intf);
1430 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1432 struct lan78xx_net *dev = netdev_priv(net);
1436 ret = usb_autopm_get_interface(dev->intf);
1440 if (edata->eee_enabled) {
1441 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1442 buf |= MAC_CR_EEE_EN_;
1443 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1445 phy_ethtool_set_eee(net->phydev, edata);
1447 buf = (u32)edata->tx_lpi_timer;
1448 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1450 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1451 buf &= ~MAC_CR_EEE_EN_;
1452 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1455 usb_autopm_put_interface(dev->intf);
1460 static u32 lan78xx_get_link(struct net_device *net)
1462 phy_read_status(net->phydev);
1464 return net->phydev->link;
1467 static void lan78xx_get_drvinfo(struct net_device *net,
1468 struct ethtool_drvinfo *info)
1470 struct lan78xx_net *dev = netdev_priv(net);
1472 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1473 strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1474 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1477 static u32 lan78xx_get_msglevel(struct net_device *net)
1479 struct lan78xx_net *dev = netdev_priv(net);
1481 return dev->msg_enable;
1484 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1486 struct lan78xx_net *dev = netdev_priv(net);
1488 dev->msg_enable = level;
1491 static int lan78xx_get_link_ksettings(struct net_device *net,
1492 struct ethtool_link_ksettings *cmd)
1494 struct lan78xx_net *dev = netdev_priv(net);
1495 struct phy_device *phydev = net->phydev;
1498 ret = usb_autopm_get_interface(dev->intf);
1502 phy_ethtool_ksettings_get(phydev, cmd);
1504 usb_autopm_put_interface(dev->intf);
1509 static int lan78xx_set_link_ksettings(struct net_device *net,
1510 const struct ethtool_link_ksettings *cmd)
1512 struct lan78xx_net *dev = netdev_priv(net);
1513 struct phy_device *phydev = net->phydev;
1517 ret = usb_autopm_get_interface(dev->intf);
1521 /* change speed & duplex */
1522 ret = phy_ethtool_ksettings_set(phydev, cmd);
1524 if (!cmd->base.autoneg) {
1525 /* force link down */
1526 temp = phy_read(phydev, MII_BMCR);
1527 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1529 phy_write(phydev, MII_BMCR, temp);
1532 usb_autopm_put_interface(dev->intf);
1537 static void lan78xx_get_pause(struct net_device *net,
1538 struct ethtool_pauseparam *pause)
1540 struct lan78xx_net *dev = netdev_priv(net);
1541 struct phy_device *phydev = net->phydev;
1542 struct ethtool_link_ksettings ecmd;
1544 phy_ethtool_ksettings_get(phydev, &ecmd);
1546 pause->autoneg = dev->fc_autoneg;
1548 if (dev->fc_request_control & FLOW_CTRL_TX)
1549 pause->tx_pause = 1;
1551 if (dev->fc_request_control & FLOW_CTRL_RX)
1552 pause->rx_pause = 1;
1555 static int lan78xx_set_pause(struct net_device *net,
1556 struct ethtool_pauseparam *pause)
1558 struct lan78xx_net *dev = netdev_priv(net);
1559 struct phy_device *phydev = net->phydev;
1560 struct ethtool_link_ksettings ecmd;
1563 phy_ethtool_ksettings_get(phydev, &ecmd);
1565 if (pause->autoneg && !ecmd.base.autoneg) {
1570 dev->fc_request_control = 0;
1571 if (pause->rx_pause)
1572 dev->fc_request_control |= FLOW_CTRL_RX;
1574 if (pause->tx_pause)
1575 dev->fc_request_control |= FLOW_CTRL_TX;
1577 if (ecmd.base.autoneg) {
1581 ethtool_convert_link_mode_to_legacy_u32(
1582 &advertising, ecmd.link_modes.advertising);
1584 advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1585 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1586 advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1588 ethtool_convert_legacy_u32_to_link_mode(
1589 ecmd.link_modes.advertising, advertising);
1591 phy_ethtool_ksettings_set(phydev, &ecmd);
1594 dev->fc_autoneg = pause->autoneg;
1601 static const struct ethtool_ops lan78xx_ethtool_ops = {
1602 .get_link = lan78xx_get_link,
1603 .nway_reset = phy_ethtool_nway_reset,
1604 .get_drvinfo = lan78xx_get_drvinfo,
1605 .get_msglevel = lan78xx_get_msglevel,
1606 .set_msglevel = lan78xx_set_msglevel,
1607 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1608 .get_eeprom = lan78xx_ethtool_get_eeprom,
1609 .set_eeprom = lan78xx_ethtool_set_eeprom,
1610 .get_ethtool_stats = lan78xx_get_stats,
1611 .get_sset_count = lan78xx_get_sset_count,
1612 .get_strings = lan78xx_get_strings,
1613 .get_wol = lan78xx_get_wol,
1614 .set_wol = lan78xx_set_wol,
1615 .get_eee = lan78xx_get_eee,
1616 .set_eee = lan78xx_set_eee,
1617 .get_pauseparam = lan78xx_get_pause,
1618 .set_pauseparam = lan78xx_set_pause,
1619 .get_link_ksettings = lan78xx_get_link_ksettings,
1620 .set_link_ksettings = lan78xx_set_link_ksettings,
1623 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1625 if (!netif_running(netdev))
1628 return phy_mii_ioctl(netdev->phydev, rq, cmd);
1631 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1633 u32 addr_lo, addr_hi;
1637 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1638 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1640 addr[0] = addr_lo & 0xFF;
1641 addr[1] = (addr_lo >> 8) & 0xFF;
1642 addr[2] = (addr_lo >> 16) & 0xFF;
1643 addr[3] = (addr_lo >> 24) & 0xFF;
1644 addr[4] = addr_hi & 0xFF;
1645 addr[5] = (addr_hi >> 8) & 0xFF;
1647 if (!is_valid_ether_addr(addr)) {
1648 /* reading mac address from EEPROM or OTP */
1649 if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1651 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1653 if (is_valid_ether_addr(addr)) {
1654 /* eeprom values are valid so use them */
1655 netif_dbg(dev, ifup, dev->net,
1656 "MAC address read from EEPROM");
1658 /* generate random MAC */
1659 random_ether_addr(addr);
1660 netif_dbg(dev, ifup, dev->net,
1661 "MAC address set to random addr");
1664 addr_lo = addr[0] | (addr[1] << 8) |
1665 (addr[2] << 16) | (addr[3] << 24);
1666 addr_hi = addr[4] | (addr[5] << 8);
1668 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1669 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1671 /* generate random MAC */
1672 random_ether_addr(addr);
1673 netif_dbg(dev, ifup, dev->net,
1674 "MAC address set to random addr");
1678 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1679 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1681 ether_addr_copy(dev->net->dev_addr, addr);
1684 /* MDIO read and write wrappers for phylib */
1685 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1687 struct lan78xx_net *dev = bus->priv;
1691 ret = usb_autopm_get_interface(dev->intf);
1695 mutex_lock(&dev->phy_mutex);
1697 /* confirm MII not busy */
1698 ret = lan78xx_phy_wait_not_busy(dev);
1702 /* set the address, index & direction (read from PHY) */
1703 addr = mii_access(phy_id, idx, MII_READ);
1704 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1706 ret = lan78xx_phy_wait_not_busy(dev);
1710 ret = lan78xx_read_reg(dev, MII_DATA, &val);
1712 ret = (int)(val & 0xFFFF);
1715 mutex_unlock(&dev->phy_mutex);
1716 usb_autopm_put_interface(dev->intf);
1721 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1724 struct lan78xx_net *dev = bus->priv;
1728 ret = usb_autopm_get_interface(dev->intf);
1732 mutex_lock(&dev->phy_mutex);
1734 /* confirm MII not busy */
1735 ret = lan78xx_phy_wait_not_busy(dev);
1740 ret = lan78xx_write_reg(dev, MII_DATA, val);
1742 /* set the address, index & direction (write to PHY) */
1743 addr = mii_access(phy_id, idx, MII_WRITE);
1744 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1746 ret = lan78xx_phy_wait_not_busy(dev);
1751 mutex_unlock(&dev->phy_mutex);
1752 usb_autopm_put_interface(dev->intf);
1756 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1760 dev->mdiobus = mdiobus_alloc();
1761 if (!dev->mdiobus) {
1762 netdev_err(dev->net, "can't allocate MDIO bus\n");
1766 dev->mdiobus->priv = (void *)dev;
1767 dev->mdiobus->read = lan78xx_mdiobus_read;
1768 dev->mdiobus->write = lan78xx_mdiobus_write;
1769 dev->mdiobus->name = "lan78xx-mdiobus";
1771 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1772 dev->udev->bus->busnum, dev->udev->devnum);
1774 switch (dev->chipid) {
1775 case ID_REV_CHIP_ID_7800_:
1776 case ID_REV_CHIP_ID_7850_:
1777 /* set to internal PHY id */
1778 dev->mdiobus->phy_mask = ~(1 << 1);
1780 case ID_REV_CHIP_ID_7801_:
1781 /* scan thru PHYAD[2..0] */
1782 dev->mdiobus->phy_mask = ~(0xFF);
1786 ret = mdiobus_register(dev->mdiobus);
1788 netdev_err(dev->net, "can't register MDIO bus\n");
1792 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1795 mdiobus_free(dev->mdiobus);
1799 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1801 mdiobus_unregister(dev->mdiobus);
1802 mdiobus_free(dev->mdiobus);
1805 static void lan78xx_link_status_change(struct net_device *net)
1807 struct phy_device *phydev = net->phydev;
1810 /* At forced 100 F/H mode, chip may fail to set mode correctly
1811 * when cable is switched between long(~50+m) and short one.
1812 * As workaround, set to 10 before setting to 100
1813 * at forced 100 F/H mode.
1815 if (!phydev->autoneg && (phydev->speed == 100)) {
1816 /* disable phy interrupt */
1817 temp = phy_read(phydev, LAN88XX_INT_MASK);
1818 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1819 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1821 temp = phy_read(phydev, MII_BMCR);
1822 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1823 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1824 temp |= BMCR_SPEED100;
1825 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1827 /* clear pending interrupt generated while workaround */
1828 temp = phy_read(phydev, LAN88XX_INT_STS);
1830 /* enable phy interrupt back */
1831 temp = phy_read(phydev, LAN88XX_INT_MASK);
1832 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1833 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1837 static int irq_map(struct irq_domain *d, unsigned int irq,
1838 irq_hw_number_t hwirq)
1840 struct irq_domain_data *data = d->host_data;
1842 irq_set_chip_data(irq, data);
1843 irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1844 irq_set_noprobe(irq);
1849 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1851 irq_set_chip_and_handler(irq, NULL, NULL);
1852 irq_set_chip_data(irq, NULL);
1855 static const struct irq_domain_ops chip_domain_ops = {
1860 static void lan78xx_irq_mask(struct irq_data *irqd)
1862 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1864 data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1867 static void lan78xx_irq_unmask(struct irq_data *irqd)
1869 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1871 data->irqenable |= BIT(irqd_to_hwirq(irqd));
1874 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1876 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1878 mutex_lock(&data->irq_lock);
1881 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1883 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1884 struct lan78xx_net *dev =
1885 container_of(data, struct lan78xx_net, domain_data);
1889 /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1890 * are only two callbacks executed in non-atomic contex.
1892 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1893 if (buf != data->irqenable)
1894 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1896 mutex_unlock(&data->irq_lock);
1899 static struct irq_chip lan78xx_irqchip = {
1900 .name = "lan78xx-irqs",
1901 .irq_mask = lan78xx_irq_mask,
1902 .irq_unmask = lan78xx_irq_unmask,
1903 .irq_bus_lock = lan78xx_irq_bus_lock,
1904 .irq_bus_sync_unlock = lan78xx_irq_bus_sync_unlock,
1907 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1909 struct device_node *of_node;
1910 struct irq_domain *irqdomain;
1911 unsigned int irqmap = 0;
1915 of_node = dev->udev->dev.parent->of_node;
1917 mutex_init(&dev->domain_data.irq_lock);
1919 lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1920 dev->domain_data.irqenable = buf;
1922 dev->domain_data.irqchip = &lan78xx_irqchip;
1923 dev->domain_data.irq_handler = handle_simple_irq;
1925 irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1926 &chip_domain_ops, &dev->domain_data);
1928 /* create mapping for PHY interrupt */
1929 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1931 irq_domain_remove(irqdomain);
1940 dev->domain_data.irqdomain = irqdomain;
1941 dev->domain_data.phyirq = irqmap;
1946 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
1948 if (dev->domain_data.phyirq > 0) {
1949 irq_dispose_mapping(dev->domain_data.phyirq);
1951 if (dev->domain_data.irqdomain)
1952 irq_domain_remove(dev->domain_data.irqdomain);
1954 dev->domain_data.phyirq = 0;
1955 dev->domain_data.irqdomain = NULL;
1958 static int lan8835_fixup(struct phy_device *phydev)
1962 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1964 /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
1965 buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
1968 phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
1970 /* RGMII MAC TXC Delay Enable */
1971 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
1972 MAC_RGMII_ID_TXC_DELAY_EN_);
1974 /* RGMII TX DLL Tune Adjust */
1975 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
1977 dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
1982 static int ksz9031rnx_fixup(struct phy_device *phydev)
1984 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1986 /* Micrel9301RNX PHY configuration */
1987 /* RGMII Control Signal Pad Skew */
1988 phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
1989 /* RGMII RX Data Pad Skew */
1990 phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
1991 /* RGMII RX Clock Pad Skew */
1992 phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
1994 dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
1999 static int lan78xx_phy_init(struct lan78xx_net *dev)
2003 struct phy_device *phydev = dev->net->phydev;
2005 phydev = phy_find_first(dev->mdiobus);
2007 netdev_err(dev->net, "no PHY found\n");
2011 if ((dev->chipid == ID_REV_CHIP_ID_7800_) ||
2012 (dev->chipid == ID_REV_CHIP_ID_7850_)) {
2013 phydev->is_internal = true;
2014 dev->interface = PHY_INTERFACE_MODE_GMII;
2016 } else if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2018 netdev_err(dev->net, "no PHY driver found\n");
2022 dev->interface = PHY_INTERFACE_MODE_RGMII;
2024 /* external PHY fixup for KSZ9031RNX */
2025 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2028 netdev_err(dev->net, "fail to register fixup\n");
2031 /* external PHY fixup for LAN8835 */
2032 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2035 netdev_err(dev->net, "fail to register fixup\n");
2038 /* add more external PHY fixup here if needed */
2040 phydev->is_internal = false;
2042 netdev_err(dev->net, "unknown ID found\n");
2047 /* if phyirq is not set, use polling mode in phylib */
2048 if (dev->domain_data.phyirq > 0)
2049 phydev->irq = dev->domain_data.phyirq;
2052 netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2054 /* set to AUTOMDIX */
2055 phydev->mdix = ETH_TP_MDI_AUTO;
2057 ret = phy_connect_direct(dev->net, phydev,
2058 lan78xx_link_status_change,
2061 netdev_err(dev->net, "can't attach PHY to %s\n",
2066 /* MAC doesn't support 1000T Half */
2067 phydev->supported &= ~SUPPORTED_1000baseT_Half;
2069 /* support both flow controls */
2070 dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2071 phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
2072 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2073 phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
2075 genphy_config_aneg(phydev);
2077 dev->fc_autoneg = phydev->autoneg;
2082 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
2083 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
2088 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2094 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2096 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2099 buf &= ~MAC_RX_RXEN_;
2100 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2103 /* add 4 to size for FCS */
2104 buf &= ~MAC_RX_MAX_SIZE_MASK_;
2105 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2107 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2110 buf |= MAC_RX_RXEN_;
2111 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2117 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2119 struct sk_buff *skb;
2120 unsigned long flags;
2123 spin_lock_irqsave(&q->lock, flags);
2124 while (!skb_queue_empty(q)) {
2125 struct skb_data *entry;
2129 skb_queue_walk(q, skb) {
2130 entry = (struct skb_data *)skb->cb;
2131 if (entry->state != unlink_start)
2136 entry->state = unlink_start;
2139 /* Get reference count of the URB to avoid it to be
2140 * freed during usb_unlink_urb, which may trigger
2141 * use-after-free problem inside usb_unlink_urb since
2142 * usb_unlink_urb is always racing with .complete
2143 * handler(include defer_bh).
2146 spin_unlock_irqrestore(&q->lock, flags);
2147 /* during some PM-driven resume scenarios,
2148 * these (async) unlinks complete immediately
2150 ret = usb_unlink_urb(urb);
2151 if (ret != -EINPROGRESS && ret != 0)
2152 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2156 spin_lock_irqsave(&q->lock, flags);
2158 spin_unlock_irqrestore(&q->lock, flags);
2162 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2164 struct lan78xx_net *dev = netdev_priv(netdev);
2165 int ll_mtu = new_mtu + netdev->hard_header_len;
2166 int old_hard_mtu = dev->hard_mtu;
2167 int old_rx_urb_size = dev->rx_urb_size;
2170 /* no second zero-length packet read wanted after mtu-sized packets */
2171 if ((ll_mtu % dev->maxpacket) == 0)
2174 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
2176 netdev->mtu = new_mtu;
2178 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2179 if (dev->rx_urb_size == old_hard_mtu) {
2180 dev->rx_urb_size = dev->hard_mtu;
2181 if (dev->rx_urb_size > old_rx_urb_size) {
2182 if (netif_running(dev->net)) {
2183 unlink_urbs(dev, &dev->rxq);
2184 tasklet_schedule(&dev->bh);
2192 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2194 struct lan78xx_net *dev = netdev_priv(netdev);
2195 struct sockaddr *addr = p;
2196 u32 addr_lo, addr_hi;
2199 if (netif_running(netdev))
2202 if (!is_valid_ether_addr(addr->sa_data))
2203 return -EADDRNOTAVAIL;
2205 ether_addr_copy(netdev->dev_addr, addr->sa_data);
2207 addr_lo = netdev->dev_addr[0] |
2208 netdev->dev_addr[1] << 8 |
2209 netdev->dev_addr[2] << 16 |
2210 netdev->dev_addr[3] << 24;
2211 addr_hi = netdev->dev_addr[4] |
2212 netdev->dev_addr[5] << 8;
2214 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2215 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2220 /* Enable or disable Rx checksum offload engine */
2221 static int lan78xx_set_features(struct net_device *netdev,
2222 netdev_features_t features)
2224 struct lan78xx_net *dev = netdev_priv(netdev);
2225 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2226 unsigned long flags;
2229 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2231 if (features & NETIF_F_RXCSUM) {
2232 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2233 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2235 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2236 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2239 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2240 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2242 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2244 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2246 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2251 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2253 struct lan78xx_priv *pdata =
2254 container_of(param, struct lan78xx_priv, set_vlan);
2255 struct lan78xx_net *dev = pdata->dev;
2257 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2258 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2261 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2262 __be16 proto, u16 vid)
2264 struct lan78xx_net *dev = netdev_priv(netdev);
2265 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2267 u16 vid_dword_index;
2269 vid_dword_index = (vid >> 5) & 0x7F;
2270 vid_bit_index = vid & 0x1F;
2272 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2274 /* defer register writes to a sleepable context */
2275 schedule_work(&pdata->set_vlan);
2280 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2281 __be16 proto, u16 vid)
2283 struct lan78xx_net *dev = netdev_priv(netdev);
2284 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2286 u16 vid_dword_index;
2288 vid_dword_index = (vid >> 5) & 0x7F;
2289 vid_bit_index = vid & 0x1F;
2291 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2293 /* defer register writes to a sleepable context */
2294 schedule_work(&pdata->set_vlan);
2299 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2303 u32 regs[6] = { 0 };
2305 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2306 if (buf & USB_CFG1_LTM_ENABLE_) {
2308 /* Get values from EEPROM first */
2309 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2310 if (temp[0] == 24) {
2311 ret = lan78xx_read_raw_eeprom(dev,
2318 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2319 if (temp[0] == 24) {
2320 ret = lan78xx_read_raw_otp(dev,
2330 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2331 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2332 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2333 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2334 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2335 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2338 static int lan78xx_reset(struct lan78xx_net *dev)
2340 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2343 unsigned long timeout;
2346 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2347 buf |= HW_CFG_LRST_;
2348 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2350 timeout = jiffies + HZ;
2353 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2354 if (time_after(jiffies, timeout)) {
2355 netdev_warn(dev->net,
2356 "timeout on completion of LiteReset");
2359 } while (buf & HW_CFG_LRST_);
2361 lan78xx_init_mac_address(dev);
2363 /* save DEVID for later usage */
2364 ret = lan78xx_read_reg(dev, ID_REV, &buf);
2365 dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2366 dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2368 /* Respond to the IN token with a NAK */
2369 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2370 buf |= USB_CFG_BIR_;
2371 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2374 lan78xx_init_ltm(dev);
2376 if (dev->udev->speed == USB_SPEED_SUPER) {
2377 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2378 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2381 } else if (dev->udev->speed == USB_SPEED_HIGH) {
2382 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2383 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2384 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2385 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2387 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2388 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2393 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2394 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2396 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2398 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2400 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2401 buf |= USB_CFG_BCE_;
2402 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2404 /* set FIFO sizes */
2405 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2406 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2408 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2409 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2411 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2412 ret = lan78xx_write_reg(dev, FLOW, 0);
2413 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2415 /* Don't need rfe_ctl_lock during initialisation */
2416 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2417 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2418 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2420 /* Enable or disable checksum offload engines */
2421 lan78xx_set_features(dev->net, dev->net->features);
2423 lan78xx_set_multicast(dev->net);
2426 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2427 buf |= PMT_CTL_PHY_RST_;
2428 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2430 timeout = jiffies + HZ;
2433 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2434 if (time_after(jiffies, timeout)) {
2435 netdev_warn(dev->net, "timeout waiting for PHY Reset");
2438 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2440 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2441 /* LAN7801 only has RGMII mode */
2442 if (dev->chipid == ID_REV_CHIP_ID_7801_)
2443 buf &= ~MAC_CR_GMII_EN_;
2445 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2446 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2447 if (!ret && sig != EEPROM_INDICATOR) {
2448 /* Implies there is no external eeprom. Set mac speed */
2449 netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2450 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2453 ret = lan78xx_write_reg(dev, MAC_CR, buf);
2455 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2456 buf |= MAC_TX_TXEN_;
2457 ret = lan78xx_write_reg(dev, MAC_TX, buf);
2459 ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2460 buf |= FCT_TX_CTL_EN_;
2461 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2463 ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2465 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2466 buf |= MAC_RX_RXEN_;
2467 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2469 ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2470 buf |= FCT_RX_CTL_EN_;
2471 ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2476 static void lan78xx_init_stats(struct lan78xx_net *dev)
2481 /* initialize for stats update
2482 * some counters are 20bits and some are 32bits
2484 p = (u32 *)&dev->stats.rollover_max;
2485 for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2488 dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2489 dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2490 dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2491 dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2492 dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2493 dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2494 dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2495 dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2496 dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2497 dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2499 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
2502 static int lan78xx_open(struct net_device *net)
2504 struct lan78xx_net *dev = netdev_priv(net);
2507 ret = usb_autopm_get_interface(dev->intf);
2511 phy_start(net->phydev);
2513 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2515 /* for Link Check */
2516 if (dev->urb_intr) {
2517 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2519 netif_err(dev, ifup, dev->net,
2520 "intr submit %d\n", ret);
2525 lan78xx_init_stats(dev);
2527 set_bit(EVENT_DEV_OPEN, &dev->flags);
2529 netif_start_queue(net);
2531 dev->link_on = false;
2533 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2535 usb_autopm_put_interface(dev->intf);
2541 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2543 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2544 DECLARE_WAITQUEUE(wait, current);
2547 /* ensure there are no more active urbs */
2548 add_wait_queue(&unlink_wakeup, &wait);
2549 set_current_state(TASK_UNINTERRUPTIBLE);
2550 dev->wait = &unlink_wakeup;
2551 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2553 /* maybe wait for deletions to finish. */
2554 while (!skb_queue_empty(&dev->rxq) &&
2555 !skb_queue_empty(&dev->txq) &&
2556 !skb_queue_empty(&dev->done)) {
2557 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2558 set_current_state(TASK_UNINTERRUPTIBLE);
2559 netif_dbg(dev, ifdown, dev->net,
2560 "waited for %d urb completions\n", temp);
2562 set_current_state(TASK_RUNNING);
2564 remove_wait_queue(&unlink_wakeup, &wait);
2567 static int lan78xx_stop(struct net_device *net)
2569 struct lan78xx_net *dev = netdev_priv(net);
2571 if (timer_pending(&dev->stat_monitor))
2572 del_timer_sync(&dev->stat_monitor);
2575 phy_stop(net->phydev);
2577 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2578 netif_stop_queue(net);
2580 netif_info(dev, ifdown, dev->net,
2581 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2582 net->stats.rx_packets, net->stats.tx_packets,
2583 net->stats.rx_errors, net->stats.tx_errors);
2585 lan78xx_terminate_urbs(dev);
2587 usb_kill_urb(dev->urb_intr);
2589 skb_queue_purge(&dev->rxq_pause);
2591 /* deferred work (task, timer, softirq) must also stop.
2592 * can't flush_scheduled_work() until we drop rtnl (later),
2593 * else workers could deadlock; so make workers a NOP.
2596 cancel_delayed_work_sync(&dev->wq);
2597 tasklet_kill(&dev->bh);
2599 usb_autopm_put_interface(dev->intf);
2604 static int lan78xx_linearize(struct sk_buff *skb)
2606 return skb_linearize(skb);
2609 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2610 struct sk_buff *skb, gfp_t flags)
2612 u32 tx_cmd_a, tx_cmd_b;
2614 if (skb_cow_head(skb, TX_OVERHEAD)) {
2615 dev_kfree_skb_any(skb);
2619 if (lan78xx_linearize(skb) < 0)
2622 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2624 if (skb->ip_summed == CHECKSUM_PARTIAL)
2625 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2628 if (skb_is_gso(skb)) {
2629 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2631 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2633 tx_cmd_a |= TX_CMD_A_LSO_;
2636 if (skb_vlan_tag_present(skb)) {
2637 tx_cmd_a |= TX_CMD_A_IVTG_;
2638 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2642 cpu_to_le32s(&tx_cmd_b);
2643 memcpy(skb->data, &tx_cmd_b, 4);
2646 cpu_to_le32s(&tx_cmd_a);
2647 memcpy(skb->data, &tx_cmd_a, 4);
2652 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2653 struct sk_buff_head *list, enum skb_state state)
2655 unsigned long flags;
2656 enum skb_state old_state;
2657 struct skb_data *entry = (struct skb_data *)skb->cb;
2659 spin_lock_irqsave(&list->lock, flags);
2660 old_state = entry->state;
2661 entry->state = state;
2663 __skb_unlink(skb, list);
2664 spin_unlock(&list->lock);
2665 spin_lock(&dev->done.lock);
2667 __skb_queue_tail(&dev->done, skb);
2668 if (skb_queue_len(&dev->done) == 1)
2669 tasklet_schedule(&dev->bh);
2670 spin_unlock_irqrestore(&dev->done.lock, flags);
2675 static void tx_complete(struct urb *urb)
2677 struct sk_buff *skb = (struct sk_buff *)urb->context;
2678 struct skb_data *entry = (struct skb_data *)skb->cb;
2679 struct lan78xx_net *dev = entry->dev;
2681 if (urb->status == 0) {
2682 dev->net->stats.tx_packets += entry->num_of_packet;
2683 dev->net->stats.tx_bytes += entry->length;
2685 dev->net->stats.tx_errors++;
2687 switch (urb->status) {
2689 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2692 /* software-driven interface shutdown */
2700 netif_stop_queue(dev->net);
2703 netif_dbg(dev, tx_err, dev->net,
2704 "tx err %d\n", entry->urb->status);
2709 usb_autopm_put_interface_async(dev->intf);
2711 defer_bh(dev, skb, &dev->txq, tx_done);
2714 static void lan78xx_queue_skb(struct sk_buff_head *list,
2715 struct sk_buff *newsk, enum skb_state state)
2717 struct skb_data *entry = (struct skb_data *)newsk->cb;
2719 __skb_queue_tail(list, newsk);
2720 entry->state = state;
2724 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2726 struct lan78xx_net *dev = netdev_priv(net);
2727 struct sk_buff *skb2 = NULL;
2730 skb_tx_timestamp(skb);
2731 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2735 skb_queue_tail(&dev->txq_pend, skb2);
2737 /* throttle TX patch at slower than SUPER SPEED USB */
2738 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2739 (skb_queue_len(&dev->txq_pend) > 10))
2740 netif_stop_queue(net);
2742 netif_dbg(dev, tx_err, dev->net,
2743 "lan78xx_tx_prep return NULL\n");
2744 dev->net->stats.tx_errors++;
2745 dev->net->stats.tx_dropped++;
2748 tasklet_schedule(&dev->bh);
2750 return NETDEV_TX_OK;
2754 lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2757 struct usb_host_interface *alt = NULL;
2758 struct usb_host_endpoint *in = NULL, *out = NULL;
2759 struct usb_host_endpoint *status = NULL;
2761 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2767 alt = intf->altsetting + tmp;
2769 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2770 struct usb_host_endpoint *e;
2773 e = alt->endpoint + ep;
2774 switch (e->desc.bmAttributes) {
2775 case USB_ENDPOINT_XFER_INT:
2776 if (!usb_endpoint_dir_in(&e->desc))
2780 case USB_ENDPOINT_XFER_BULK:
2785 if (usb_endpoint_dir_in(&e->desc)) {
2788 else if (intr && !status)
2798 if (!alt || !in || !out)
2801 dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2802 in->desc.bEndpointAddress &
2803 USB_ENDPOINT_NUMBER_MASK);
2804 dev->pipe_out = usb_sndbulkpipe(dev->udev,
2805 out->desc.bEndpointAddress &
2806 USB_ENDPOINT_NUMBER_MASK);
2807 dev->ep_intr = status;
2812 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2814 struct lan78xx_priv *pdata = NULL;
2818 ret = lan78xx_get_endpoints(dev, intf);
2820 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2822 pdata = (struct lan78xx_priv *)(dev->data[0]);
2824 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2830 spin_lock_init(&pdata->rfe_ctl_lock);
2831 mutex_init(&pdata->dataport_mutex);
2833 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2835 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2836 pdata->vlan_table[i] = 0;
2838 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2840 dev->net->features = 0;
2842 if (DEFAULT_TX_CSUM_ENABLE)
2843 dev->net->features |= NETIF_F_HW_CSUM;
2845 if (DEFAULT_RX_CSUM_ENABLE)
2846 dev->net->features |= NETIF_F_RXCSUM;
2848 if (DEFAULT_TSO_CSUM_ENABLE)
2849 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2851 dev->net->hw_features = dev->net->features;
2853 ret = lan78xx_setup_irq_domain(dev);
2855 netdev_warn(dev->net,
2856 "lan78xx_setup_irq_domain() failed : %d", ret);
2860 dev->net->hard_header_len += TX_OVERHEAD;
2861 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2863 /* Init all registers */
2864 ret = lan78xx_reset(dev);
2866 netdev_warn(dev->net, "Registers INIT FAILED....");
2870 ret = lan78xx_mdio_init(dev);
2872 netdev_warn(dev->net, "MDIO INIT FAILED.....");
2876 dev->net->flags |= IFF_MULTICAST;
2878 pdata->wol = WAKE_MAGIC;
2883 lan78xx_remove_irq_domain(dev);
2886 netdev_warn(dev->net, "Bind routine FAILED");
2887 cancel_work_sync(&pdata->set_multicast);
2888 cancel_work_sync(&pdata->set_vlan);
2893 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2895 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2897 lan78xx_remove_irq_domain(dev);
2899 lan78xx_remove_mdio(dev);
2902 cancel_work_sync(&pdata->set_multicast);
2903 cancel_work_sync(&pdata->set_vlan);
2904 netif_dbg(dev, ifdown, dev->net, "free pdata");
2911 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2912 struct sk_buff *skb,
2913 u32 rx_cmd_a, u32 rx_cmd_b)
2915 if (!(dev->net->features & NETIF_F_RXCSUM) ||
2916 unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2917 skb->ip_summed = CHECKSUM_NONE;
2919 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2920 skb->ip_summed = CHECKSUM_COMPLETE;
2924 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2928 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2929 skb_queue_tail(&dev->rxq_pause, skb);
2933 dev->net->stats.rx_packets++;
2934 dev->net->stats.rx_bytes += skb->len;
2936 skb->protocol = eth_type_trans(skb, dev->net);
2938 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2939 skb->len + sizeof(struct ethhdr), skb->protocol);
2940 memset(skb->cb, 0, sizeof(struct skb_data));
2942 if (skb_defer_rx_timestamp(skb))
2945 status = netif_rx(skb);
2946 if (status != NET_RX_SUCCESS)
2947 netif_dbg(dev, rx_err, dev->net,
2948 "netif_rx status %d\n", status);
2951 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2953 if (skb->len < dev->net->hard_header_len)
2956 while (skb->len > 0) {
2957 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2959 struct sk_buff *skb2;
2960 unsigned char *packet;
2962 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2963 le32_to_cpus(&rx_cmd_a);
2964 skb_pull(skb, sizeof(rx_cmd_a));
2966 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2967 le32_to_cpus(&rx_cmd_b);
2968 skb_pull(skb, sizeof(rx_cmd_b));
2970 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2971 le16_to_cpus(&rx_cmd_c);
2972 skb_pull(skb, sizeof(rx_cmd_c));
2976 /* get the packet length */
2977 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2978 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2980 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2981 netif_dbg(dev, rx_err, dev->net,
2982 "Error rx_cmd_a=0x%08x", rx_cmd_a);
2984 /* last frame in this batch */
2985 if (skb->len == size) {
2986 lan78xx_rx_csum_offload(dev, skb,
2987 rx_cmd_a, rx_cmd_b);
2989 skb_trim(skb, skb->len - 4); /* remove fcs */
2990 skb->truesize = size + sizeof(struct sk_buff);
2995 skb2 = skb_clone(skb, GFP_ATOMIC);
2996 if (unlikely(!skb2)) {
2997 netdev_warn(dev->net, "Error allocating skb");
3002 skb2->data = packet;
3003 skb_set_tail_pointer(skb2, size);
3005 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3007 skb_trim(skb2, skb2->len - 4); /* remove fcs */
3008 skb2->truesize = size + sizeof(struct sk_buff);
3010 lan78xx_skb_return(dev, skb2);
3013 skb_pull(skb, size);
3015 /* padding bytes before the next frame starts */
3017 skb_pull(skb, align_count);
3023 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3025 if (!lan78xx_rx(dev, skb)) {
3026 dev->net->stats.rx_errors++;
3031 lan78xx_skb_return(dev, skb);
3035 netif_dbg(dev, rx_err, dev->net, "drop\n");
3036 dev->net->stats.rx_errors++;
3038 skb_queue_tail(&dev->done, skb);
3041 static void rx_complete(struct urb *urb);
3043 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3045 struct sk_buff *skb;
3046 struct skb_data *entry;
3047 unsigned long lockflags;
3048 size_t size = dev->rx_urb_size;
3051 skb = netdev_alloc_skb_ip_align(dev->net, size);
3057 entry = (struct skb_data *)skb->cb;
3062 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3063 skb->data, size, rx_complete, skb);
3065 spin_lock_irqsave(&dev->rxq.lock, lockflags);
3067 if (netif_device_present(dev->net) &&
3068 netif_running(dev->net) &&
3069 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3070 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3071 ret = usb_submit_urb(urb, GFP_ATOMIC);
3074 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3077 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3080 netif_dbg(dev, ifdown, dev->net, "device gone\n");
3081 netif_device_detach(dev->net);
3087 netif_dbg(dev, rx_err, dev->net,
3088 "rx submit, %d\n", ret);
3089 tasklet_schedule(&dev->bh);
3092 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3095 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3097 dev_kfree_skb_any(skb);
3103 static void rx_complete(struct urb *urb)
3105 struct sk_buff *skb = (struct sk_buff *)urb->context;
3106 struct skb_data *entry = (struct skb_data *)skb->cb;
3107 struct lan78xx_net *dev = entry->dev;
3108 int urb_status = urb->status;
3109 enum skb_state state;
3111 skb_put(skb, urb->actual_length);
3115 switch (urb_status) {
3117 if (skb->len < dev->net->hard_header_len) {
3119 dev->net->stats.rx_errors++;
3120 dev->net->stats.rx_length_errors++;
3121 netif_dbg(dev, rx_err, dev->net,
3122 "rx length %d\n", skb->len);
3124 usb_mark_last_busy(dev->udev);
3127 dev->net->stats.rx_errors++;
3128 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3130 case -ECONNRESET: /* async unlink */
3131 case -ESHUTDOWN: /* hardware gone */
3132 netif_dbg(dev, ifdown, dev->net,
3133 "rx shutdown, code %d\n", urb_status);
3141 dev->net->stats.rx_errors++;
3147 /* data overrun ... flush fifo? */
3149 dev->net->stats.rx_over_errors++;
3154 dev->net->stats.rx_errors++;
3155 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3159 state = defer_bh(dev, skb, &dev->rxq, state);
3162 if (netif_running(dev->net) &&
3163 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3164 state != unlink_start) {
3165 rx_submit(dev, urb, GFP_ATOMIC);
3170 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3173 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3176 struct urb *urb = NULL;
3177 struct skb_data *entry;
3178 unsigned long flags;
3179 struct sk_buff_head *tqp = &dev->txq_pend;
3180 struct sk_buff *skb, *skb2;
3183 int skb_totallen, pkt_cnt;
3189 spin_lock_irqsave(&tqp->lock, flags);
3190 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
3191 if (skb_is_gso(skb)) {
3193 /* handle previous packets first */
3197 length = skb->len - TX_OVERHEAD;
3198 __skb_unlink(skb, tqp);
3199 spin_unlock_irqrestore(&tqp->lock, flags);
3203 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3205 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3208 spin_unlock_irqrestore(&tqp->lock, flags);
3210 /* copy to a single skb */
3211 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3215 skb_put(skb, skb_totallen);
3217 for (count = pos = 0; count < pkt_cnt; count++) {
3218 skb2 = skb_dequeue(tqp);
3220 length += (skb2->len - TX_OVERHEAD);
3221 memcpy(skb->data + pos, skb2->data, skb2->len);
3222 pos += roundup(skb2->len, sizeof(u32));
3223 dev_kfree_skb(skb2);
3228 urb = usb_alloc_urb(0, GFP_ATOMIC);
3232 entry = (struct skb_data *)skb->cb;
3235 entry->length = length;
3236 entry->num_of_packet = count;
3238 spin_lock_irqsave(&dev->txq.lock, flags);
3239 ret = usb_autopm_get_interface_async(dev->intf);
3241 spin_unlock_irqrestore(&dev->txq.lock, flags);
3245 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3246 skb->data, skb->len, tx_complete, skb);
3248 if (length % dev->maxpacket == 0) {
3249 /* send USB_ZERO_PACKET */
3250 urb->transfer_flags |= URB_ZERO_PACKET;
3254 /* if this triggers the device is still a sleep */
3255 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3256 /* transmission will be done in resume */
3257 usb_anchor_urb(urb, &dev->deferred);
3258 /* no use to process more packets */
3259 netif_stop_queue(dev->net);
3261 spin_unlock_irqrestore(&dev->txq.lock, flags);
3262 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3267 ret = usb_submit_urb(urb, GFP_ATOMIC);
3270 netif_trans_update(dev->net);
3271 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3272 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3273 netif_stop_queue(dev->net);
3276 netif_stop_queue(dev->net);
3277 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3278 usb_autopm_put_interface_async(dev->intf);
3281 usb_autopm_put_interface_async(dev->intf);
3282 netif_dbg(dev, tx_err, dev->net,
3283 "tx: submit urb err %d\n", ret);
3287 spin_unlock_irqrestore(&dev->txq.lock, flags);
3290 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3292 dev->net->stats.tx_dropped++;
3294 dev_kfree_skb_any(skb);
3297 netif_dbg(dev, tx_queued, dev->net,
3298 "> tx, len %d, type 0x%x\n", length, skb->protocol);
3301 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3306 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3307 for (i = 0; i < 10; i++) {
3308 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3310 urb = usb_alloc_urb(0, GFP_ATOMIC);
3312 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3316 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3317 tasklet_schedule(&dev->bh);
3319 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3320 netif_wake_queue(dev->net);
3323 static void lan78xx_bh(unsigned long param)
3325 struct lan78xx_net *dev = (struct lan78xx_net *)param;
3326 struct sk_buff *skb;
3327 struct skb_data *entry;
3329 while ((skb = skb_dequeue(&dev->done))) {
3330 entry = (struct skb_data *)(skb->cb);
3331 switch (entry->state) {
3333 entry->state = rx_cleanup;
3334 rx_process(dev, skb);
3337 usb_free_urb(entry->urb);
3341 usb_free_urb(entry->urb);
3345 netdev_dbg(dev->net, "skb state %d\n", entry->state);
3350 if (netif_device_present(dev->net) && netif_running(dev->net)) {
3351 /* reset update timer delta */
3352 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3354 mod_timer(&dev->stat_monitor,
3355 jiffies + STAT_UPDATE_TIMER);
3358 if (!skb_queue_empty(&dev->txq_pend))
3361 if (!timer_pending(&dev->delay) &&
3362 !test_bit(EVENT_RX_HALT, &dev->flags))
3367 static void lan78xx_delayedwork(struct work_struct *work)
3370 struct lan78xx_net *dev;
3372 dev = container_of(work, struct lan78xx_net, wq.work);
3374 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3375 unlink_urbs(dev, &dev->txq);
3376 status = usb_autopm_get_interface(dev->intf);
3379 status = usb_clear_halt(dev->udev, dev->pipe_out);
3380 usb_autopm_put_interface(dev->intf);
3383 status != -ESHUTDOWN) {
3384 if (netif_msg_tx_err(dev))
3386 netdev_err(dev->net,
3387 "can't clear tx halt, status %d\n",
3390 clear_bit(EVENT_TX_HALT, &dev->flags);
3391 if (status != -ESHUTDOWN)
3392 netif_wake_queue(dev->net);
3395 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3396 unlink_urbs(dev, &dev->rxq);
3397 status = usb_autopm_get_interface(dev->intf);
3400 status = usb_clear_halt(dev->udev, dev->pipe_in);
3401 usb_autopm_put_interface(dev->intf);
3404 status != -ESHUTDOWN) {
3405 if (netif_msg_rx_err(dev))
3407 netdev_err(dev->net,
3408 "can't clear rx halt, status %d\n",
3411 clear_bit(EVENT_RX_HALT, &dev->flags);
3412 tasklet_schedule(&dev->bh);
3416 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3419 clear_bit(EVENT_LINK_RESET, &dev->flags);
3420 status = usb_autopm_get_interface(dev->intf);
3423 if (lan78xx_link_reset(dev) < 0) {
3424 usb_autopm_put_interface(dev->intf);
3426 netdev_info(dev->net, "link reset failed (%d)\n",
3429 usb_autopm_put_interface(dev->intf);
3433 if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3434 lan78xx_update_stats(dev);
3436 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3438 mod_timer(&dev->stat_monitor,
3439 jiffies + (STAT_UPDATE_TIMER * dev->delta));
3441 dev->delta = min((dev->delta * 2), 50);
3445 static void intr_complete(struct urb *urb)
3447 struct lan78xx_net *dev = urb->context;
3448 int status = urb->status;
3453 lan78xx_status(dev, urb);
3456 /* software-driven interface shutdown */
3457 case -ENOENT: /* urb killed */
3458 case -ESHUTDOWN: /* hardware gone */
3459 netif_dbg(dev, ifdown, dev->net,
3460 "intr shutdown, code %d\n", status);
3463 /* NOTE: not throttling like RX/TX, since this endpoint
3464 * already polls infrequently
3467 netdev_dbg(dev->net, "intr status %d\n", status);
3471 if (!netif_running(dev->net))
3474 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3475 status = usb_submit_urb(urb, GFP_ATOMIC);
3477 netif_err(dev, timer, dev->net,
3478 "intr resubmit --> %d\n", status);
3481 static void lan78xx_disconnect(struct usb_interface *intf)
3483 struct lan78xx_net *dev;
3484 struct usb_device *udev;
3485 struct net_device *net;
3487 dev = usb_get_intfdata(intf);
3488 usb_set_intfdata(intf, NULL);
3492 udev = interface_to_usbdev(intf);
3495 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3496 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3498 phy_disconnect(net->phydev);
3500 unregister_netdev(net);
3502 cancel_delayed_work_sync(&dev->wq);
3504 usb_scuttle_anchored_urbs(&dev->deferred);
3506 lan78xx_unbind(dev, intf);
3508 usb_kill_urb(dev->urb_intr);
3509 usb_free_urb(dev->urb_intr);
3515 static void lan78xx_tx_timeout(struct net_device *net)
3517 struct lan78xx_net *dev = netdev_priv(net);
3519 unlink_urbs(dev, &dev->txq);
3520 tasklet_schedule(&dev->bh);
3523 static const struct net_device_ops lan78xx_netdev_ops = {
3524 .ndo_open = lan78xx_open,
3525 .ndo_stop = lan78xx_stop,
3526 .ndo_start_xmit = lan78xx_start_xmit,
3527 .ndo_tx_timeout = lan78xx_tx_timeout,
3528 .ndo_change_mtu = lan78xx_change_mtu,
3529 .ndo_set_mac_address = lan78xx_set_mac_addr,
3530 .ndo_validate_addr = eth_validate_addr,
3531 .ndo_do_ioctl = lan78xx_ioctl,
3532 .ndo_set_rx_mode = lan78xx_set_multicast,
3533 .ndo_set_features = lan78xx_set_features,
3534 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
3535 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
3538 static void lan78xx_stat_monitor(unsigned long param)
3540 struct lan78xx_net *dev;
3542 dev = (struct lan78xx_net *)param;
3544 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3547 static int lan78xx_probe(struct usb_interface *intf,
3548 const struct usb_device_id *id)
3550 struct lan78xx_net *dev;
3551 struct net_device *netdev;
3552 struct usb_device *udev;
3558 udev = interface_to_usbdev(intf);
3559 udev = usb_get_dev(udev);
3561 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3563 dev_err(&intf->dev, "Error: OOM\n");
3568 /* netdev_printk() needs this */
3569 SET_NETDEV_DEV(netdev, &intf->dev);
3571 dev = netdev_priv(netdev);
3575 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3576 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3578 skb_queue_head_init(&dev->rxq);
3579 skb_queue_head_init(&dev->txq);
3580 skb_queue_head_init(&dev->done);
3581 skb_queue_head_init(&dev->rxq_pause);
3582 skb_queue_head_init(&dev->txq_pend);
3583 mutex_init(&dev->phy_mutex);
3585 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3586 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3587 init_usb_anchor(&dev->deferred);
3589 netdev->netdev_ops = &lan78xx_netdev_ops;
3590 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3591 netdev->ethtool_ops = &lan78xx_ethtool_ops;
3593 dev->stat_monitor.function = lan78xx_stat_monitor;
3594 dev->stat_monitor.data = (unsigned long)dev;
3596 init_timer(&dev->stat_monitor);
3598 mutex_init(&dev->stats.access_lock);
3600 ret = lan78xx_bind(dev, intf);
3603 strcpy(netdev->name, "eth%d");
3605 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3606 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3608 /* MTU range: 68 - 9000 */
3609 netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3611 dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3612 dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3613 dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3615 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3616 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3618 dev->pipe_intr = usb_rcvintpipe(dev->udev,
3619 dev->ep_intr->desc.bEndpointAddress &
3620 USB_ENDPOINT_NUMBER_MASK);
3621 period = dev->ep_intr->desc.bInterval;
3623 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3624 buf = kmalloc(maxp, GFP_KERNEL);
3626 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3627 if (!dev->urb_intr) {
3632 usb_fill_int_urb(dev->urb_intr, dev->udev,
3633 dev->pipe_intr, buf, maxp,
3634 intr_complete, dev, period);
3638 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3640 /* driver requires remote-wakeup capability during autosuspend. */
3641 intf->needs_remote_wakeup = 1;
3643 ret = register_netdev(netdev);
3645 netif_err(dev, probe, netdev, "couldn't register the device\n");
3649 usb_set_intfdata(intf, dev);
3651 ret = device_set_wakeup_enable(&udev->dev, true);
3653 /* Default delay of 2sec has more overhead than advantage.
3654 * Set to 10sec as default.
3656 pm_runtime_set_autosuspend_delay(&udev->dev,
3657 DEFAULT_AUTOSUSPEND_DELAY);
3659 ret = lan78xx_phy_init(dev);
3666 unregister_netdev(netdev);
3668 lan78xx_unbind(dev, intf);
3670 free_netdev(netdev);
3677 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3679 const u16 crc16poly = 0x8005;
3685 for (i = 0; i < len; i++) {
3687 for (bit = 0; bit < 8; bit++) {
3691 if (msb ^ (u16)(data & 1)) {
3693 crc |= (u16)0x0001U;
3702 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3710 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3711 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3712 const u8 arp_type[2] = { 0x08, 0x06 };
3714 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3715 buf &= ~MAC_TX_TXEN_;
3716 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3717 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3718 buf &= ~MAC_RX_RXEN_;
3719 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3721 ret = lan78xx_write_reg(dev, WUCSR, 0);
3722 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3723 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3728 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3729 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3730 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3732 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3733 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3736 if (wol & WAKE_PHY) {
3737 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3739 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3740 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3741 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3743 if (wol & WAKE_MAGIC) {
3744 temp_wucsr |= WUCSR_MPEN_;
3746 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3747 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3748 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3750 if (wol & WAKE_BCAST) {
3751 temp_wucsr |= WUCSR_BCST_EN_;
3753 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3754 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3755 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3757 if (wol & WAKE_MCAST) {
3758 temp_wucsr |= WUCSR_WAKE_EN_;
3760 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3761 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3762 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3764 WUF_CFGX_TYPE_MCAST_ |
3765 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3766 (crc & WUF_CFGX_CRC16_MASK_));
3768 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3769 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3770 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3771 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3774 /* for IPv6 Multicast */
3775 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3776 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3778 WUF_CFGX_TYPE_MCAST_ |
3779 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3780 (crc & WUF_CFGX_CRC16_MASK_));
3782 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3783 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3784 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3785 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3788 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3789 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3790 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3792 if (wol & WAKE_UCAST) {
3793 temp_wucsr |= WUCSR_PFDA_EN_;
3795 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3796 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3797 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3799 if (wol & WAKE_ARP) {
3800 temp_wucsr |= WUCSR_WAKE_EN_;
3802 /* set WUF_CFG & WUF_MASK
3803 * for packettype (offset 12,13) = ARP (0x0806)
3805 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3806 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3808 WUF_CFGX_TYPE_ALL_ |
3809 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3810 (crc & WUF_CFGX_CRC16_MASK_));
3812 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3813 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3814 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3815 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3818 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3819 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3820 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3823 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3825 /* when multiple WOL bits are set */
3826 if (hweight_long((unsigned long)wol) > 1) {
3827 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3828 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3829 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3831 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3834 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3835 buf |= PMT_CTL_WUPS_MASK_;
3836 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3838 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3839 buf |= MAC_RX_RXEN_;
3840 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3845 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3847 struct lan78xx_net *dev = usb_get_intfdata(intf);
3848 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3853 event = message.event;
3855 if (!dev->suspend_count++) {
3856 spin_lock_irq(&dev->txq.lock);
3857 /* don't autosuspend while transmitting */
3858 if ((skb_queue_len(&dev->txq) ||
3859 skb_queue_len(&dev->txq_pend)) &&
3860 PMSG_IS_AUTO(message)) {
3861 spin_unlock_irq(&dev->txq.lock);
3865 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3866 spin_unlock_irq(&dev->txq.lock);
3870 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3871 buf &= ~MAC_TX_TXEN_;
3872 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3873 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3874 buf &= ~MAC_RX_RXEN_;
3875 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3877 /* empty out the rx and queues */
3878 netif_device_detach(dev->net);
3879 lan78xx_terminate_urbs(dev);
3880 usb_kill_urb(dev->urb_intr);
3883 netif_device_attach(dev->net);
3886 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3887 del_timer(&dev->stat_monitor);
3889 if (PMSG_IS_AUTO(message)) {
3890 /* auto suspend (selective suspend) */
3891 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3892 buf &= ~MAC_TX_TXEN_;
3893 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3894 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3895 buf &= ~MAC_RX_RXEN_;
3896 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3898 ret = lan78xx_write_reg(dev, WUCSR, 0);
3899 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3900 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3902 /* set goodframe wakeup */
3903 ret = lan78xx_read_reg(dev, WUCSR, &buf);
3905 buf |= WUCSR_RFE_WAKE_EN_;
3906 buf |= WUCSR_STORE_WAKE_;
3908 ret = lan78xx_write_reg(dev, WUCSR, buf);
3910 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3912 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3913 buf |= PMT_CTL_RES_CLR_WKP_STS_;
3915 buf |= PMT_CTL_PHY_WAKE_EN_;
3916 buf |= PMT_CTL_WOL_EN_;
3917 buf &= ~PMT_CTL_SUS_MODE_MASK_;
3918 buf |= PMT_CTL_SUS_MODE_3_;
3920 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3922 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3924 buf |= PMT_CTL_WUPS_MASK_;
3926 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3928 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3929 buf |= MAC_RX_RXEN_;
3930 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3932 lan78xx_set_suspend(dev, pdata->wol);
3941 static int lan78xx_resume(struct usb_interface *intf)
3943 struct lan78xx_net *dev = usb_get_intfdata(intf);
3944 struct sk_buff *skb;
3949 if (!timer_pending(&dev->stat_monitor)) {
3951 mod_timer(&dev->stat_monitor,
3952 jiffies + STAT_UPDATE_TIMER);
3955 if (!--dev->suspend_count) {
3956 /* resume interrupt URBs */
3957 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3958 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3960 spin_lock_irq(&dev->txq.lock);
3961 while ((res = usb_get_from_anchor(&dev->deferred))) {
3962 skb = (struct sk_buff *)res->context;
3963 ret = usb_submit_urb(res, GFP_ATOMIC);
3965 dev_kfree_skb_any(skb);
3967 usb_autopm_put_interface_async(dev->intf);
3969 netif_trans_update(dev->net);
3970 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3974 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3975 spin_unlock_irq(&dev->txq.lock);
3977 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3978 if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3979 netif_start_queue(dev->net);
3980 tasklet_schedule(&dev->bh);
3984 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3985 ret = lan78xx_write_reg(dev, WUCSR, 0);
3986 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3988 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3990 WUCSR2_IPV6_TCPSYN_RCD_ |
3991 WUCSR2_IPV4_TCPSYN_RCD_);
3993 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3994 WUCSR_EEE_RX_WAKE_ |
3996 WUCSR_RFE_WAKE_FR_ |
4001 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4002 buf |= MAC_TX_TXEN_;
4003 ret = lan78xx_write_reg(dev, MAC_TX, buf);
4008 static int lan78xx_reset_resume(struct usb_interface *intf)
4010 struct lan78xx_net *dev = usb_get_intfdata(intf);
4014 phy_start(dev->net->phydev);
4016 return lan78xx_resume(intf);
4019 static const struct usb_device_id products[] = {
4021 /* LAN7800 USB Gigabit Ethernet Device */
4022 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4025 /* LAN7850 USB Gigabit Ethernet Device */
4026 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4029 /* LAN7801 USB Gigabit Ethernet Device */
4030 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4034 MODULE_DEVICE_TABLE(usb, products);
4036 static struct usb_driver lan78xx_driver = {
4037 .name = DRIVER_NAME,
4038 .id_table = products,
4039 .probe = lan78xx_probe,
4040 .disconnect = lan78xx_disconnect,
4041 .suspend = lan78xx_suspend,
4042 .resume = lan78xx_resume,
4043 .reset_resume = lan78xx_reset_resume,
4044 .supports_autosuspend = 1,
4045 .disable_hub_initiated_lpm = 1,
4048 module_usb_driver(lan78xx_driver);
4050 MODULE_AUTHOR(DRIVER_AUTHOR);
4051 MODULE_DESCRIPTION(DRIVER_DESC);
4052 MODULE_LICENSE("GPL");