2ccda40a7d0a7d481ca0c4e82391ee5a096f65c2
[platform/kernel/linux-rpi.git] / drivers / net / usb / lan78xx.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Microchip Technology
4  */
5 #include <linux/module.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
9 #include <linux/usb.h>
10 #include <linux/crc32.h>
11 #include <linux/signal.h>
12 #include <linux/slab.h>
13 #include <linux/if_vlan.h>
14 #include <linux/uaccess.h>
15 #include <linux/linkmode.h>
16 #include <linux/list.h>
17 #include <linux/ip.h>
18 #include <linux/ipv6.h>
19 #include <linux/mdio.h>
20 #include <linux/phy.h>
21 #include <net/ip6_checksum.h>
22 #include <net/vxlan.h>
23 #include <linux/interrupt.h>
24 #include <linux/irqdomain.h>
25 #include <linux/irq.h>
26 #include <linux/irqchip/chained_irq.h>
27 #include <linux/microchipphy.h>
28 #include <linux/phy_fixed.h>
29 #include <linux/of_mdio.h>
30 #include <linux/of_net.h>
31 #include "lan78xx.h"
32
33 #define DRIVER_AUTHOR   "WOOJUNG HUH <woojung.huh@microchip.com>"
34 #define DRIVER_DESC     "LAN78XX USB 3.0 Gigabit Ethernet Devices"
35 #define DRIVER_NAME     "lan78xx"
36
37 #define TX_TIMEOUT_JIFFIES              (5 * HZ)
38 #define THROTTLE_JIFFIES                (HZ / 8)
39 #define UNLINK_TIMEOUT_MS               3
40
41 #define RX_MAX_QUEUE_MEMORY             (60 * 1518)
42
43 #define SS_USB_PKT_SIZE                 (1024)
44 #define HS_USB_PKT_SIZE                 (512)
45 #define FS_USB_PKT_SIZE                 (64)
46
47 #define MAX_RX_FIFO_SIZE                (12 * 1024)
48 #define MAX_TX_FIFO_SIZE                (12 * 1024)
49
50 #define FLOW_THRESHOLD(n)               ((((n) + 511) / 512) & 0x7F)
51 #define FLOW_CTRL_THRESHOLD(on, off)    ((FLOW_THRESHOLD(on)  << 0) | \
52                                          (FLOW_THRESHOLD(off) << 8))
53
54 /* Flow control turned on when Rx FIFO level rises above this level (bytes) */
55 #define FLOW_ON_SS                      9216
56 #define FLOW_ON_HS                      8704
57
58 /* Flow control turned off when Rx FIFO level falls below this level (bytes) */
59 #define FLOW_OFF_SS                     4096
60 #define FLOW_OFF_HS                     1024
61
62 #define DEFAULT_BURST_CAP_SIZE          (MAX_TX_FIFO_SIZE)
63 #define DEFAULT_BULK_IN_DELAY           (0x0800)
64 #define MAX_SINGLE_PACKET_SIZE          (9000)
65 #define DEFAULT_TX_CSUM_ENABLE          (true)
66 #define DEFAULT_RX_CSUM_ENABLE          (true)
67 #define DEFAULT_TSO_CSUM_ENABLE         (true)
68 #define DEFAULT_VLAN_FILTER_ENABLE      (true)
69 #define DEFAULT_VLAN_RX_OFFLOAD         (true)
70 #define TX_OVERHEAD                     (8)
71 #define RXW_PADDING                     2
72
73 #define LAN78XX_USB_VENDOR_ID           (0x0424)
74 #define LAN7800_USB_PRODUCT_ID          (0x7800)
75 #define LAN7850_USB_PRODUCT_ID          (0x7850)
76 #define LAN7801_USB_PRODUCT_ID          (0x7801)
77 #define LAN78XX_EEPROM_MAGIC            (0x78A5)
78 #define LAN78XX_OTP_MAGIC               (0x78F3)
79 #define AT29M2AF_USB_VENDOR_ID          (0x07C9)
80 #define AT29M2AF_USB_PRODUCT_ID (0x0012)
81
82 #define MII_READ                        1
83 #define MII_WRITE                       0
84
85 #define EEPROM_INDICATOR                (0xA5)
86 #define EEPROM_MAC_OFFSET               (0x01)
87 #define MAX_EEPROM_SIZE                 512
88 #define OTP_INDICATOR_1                 (0xF3)
89 #define OTP_INDICATOR_2                 (0xF7)
90
91 #define WAKE_ALL                        (WAKE_PHY | WAKE_UCAST | \
92                                          WAKE_MCAST | WAKE_BCAST | \
93                                          WAKE_ARP | WAKE_MAGIC)
94
95 /* USB related defines */
96 #define BULK_IN_PIPE                    1
97 #define BULK_OUT_PIPE                   2
98
99 /* default autosuspend delay (mSec)*/
100 #define DEFAULT_AUTOSUSPEND_DELAY       (10 * 1000)
101
102 /* statistic update interval (mSec) */
103 #define STAT_UPDATE_TIMER               (1 * 1000)
104
105 /* time to wait for MAC or FCT to stop (jiffies) */
106 #define HW_DISABLE_TIMEOUT              (HZ / 10)
107
108 /* time to wait between polling MAC or FCT state (ms) */
109 #define HW_DISABLE_DELAY_MS             1
110
111 /* defines interrupts from interrupt EP */
112 #define MAX_INT_EP                      (32)
113 #define INT_EP_INTEP                    (31)
114 #define INT_EP_OTP_WR_DONE              (28)
115 #define INT_EP_EEE_TX_LPI_START         (26)
116 #define INT_EP_EEE_TX_LPI_STOP          (25)
117 #define INT_EP_EEE_RX_LPI               (24)
118 #define INT_EP_MAC_RESET_TIMEOUT        (23)
119 #define INT_EP_RDFO                     (22)
120 #define INT_EP_TXE                      (21)
121 #define INT_EP_USB_STATUS               (20)
122 #define INT_EP_TX_DIS                   (19)
123 #define INT_EP_RX_DIS                   (18)
124 #define INT_EP_PHY                      (17)
125 #define INT_EP_DP                       (16)
126 #define INT_EP_MAC_ERR                  (15)
127 #define INT_EP_TDFU                     (14)
128 #define INT_EP_TDFO                     (13)
129 #define INT_EP_UTX                      (12)
130 #define INT_EP_GPIO_11                  (11)
131 #define INT_EP_GPIO_10                  (10)
132 #define INT_EP_GPIO_9                   (9)
133 #define INT_EP_GPIO_8                   (8)
134 #define INT_EP_GPIO_7                   (7)
135 #define INT_EP_GPIO_6                   (6)
136 #define INT_EP_GPIO_5                   (5)
137 #define INT_EP_GPIO_4                   (4)
138 #define INT_EP_GPIO_3                   (3)
139 #define INT_EP_GPIO_2                   (2)
140 #define INT_EP_GPIO_1                   (1)
141 #define INT_EP_GPIO_0                   (0)
142
143 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
144         "RX FCS Errors",
145         "RX Alignment Errors",
146         "Rx Fragment Errors",
147         "RX Jabber Errors",
148         "RX Undersize Frame Errors",
149         "RX Oversize Frame Errors",
150         "RX Dropped Frames",
151         "RX Unicast Byte Count",
152         "RX Broadcast Byte Count",
153         "RX Multicast Byte Count",
154         "RX Unicast Frames",
155         "RX Broadcast Frames",
156         "RX Multicast Frames",
157         "RX Pause Frames",
158         "RX 64 Byte Frames",
159         "RX 65 - 127 Byte Frames",
160         "RX 128 - 255 Byte Frames",
161         "RX 256 - 511 Bytes Frames",
162         "RX 512 - 1023 Byte Frames",
163         "RX 1024 - 1518 Byte Frames",
164         "RX Greater 1518 Byte Frames",
165         "EEE RX LPI Transitions",
166         "EEE RX LPI Time",
167         "TX FCS Errors",
168         "TX Excess Deferral Errors",
169         "TX Carrier Errors",
170         "TX Bad Byte Count",
171         "TX Single Collisions",
172         "TX Multiple Collisions",
173         "TX Excessive Collision",
174         "TX Late Collisions",
175         "TX Unicast Byte Count",
176         "TX Broadcast Byte Count",
177         "TX Multicast Byte Count",
178         "TX Unicast Frames",
179         "TX Broadcast Frames",
180         "TX Multicast Frames",
181         "TX Pause Frames",
182         "TX 64 Byte Frames",
183         "TX 65 - 127 Byte Frames",
184         "TX 128 - 255 Byte Frames",
185         "TX 256 - 511 Bytes Frames",
186         "TX 512 - 1023 Byte Frames",
187         "TX 1024 - 1518 Byte Frames",
188         "TX Greater 1518 Byte Frames",
189         "EEE TX LPI Transitions",
190         "EEE TX LPI Time",
191 };
192
193 struct lan78xx_statstage {
194         u32 rx_fcs_errors;
195         u32 rx_alignment_errors;
196         u32 rx_fragment_errors;
197         u32 rx_jabber_errors;
198         u32 rx_undersize_frame_errors;
199         u32 rx_oversize_frame_errors;
200         u32 rx_dropped_frames;
201         u32 rx_unicast_byte_count;
202         u32 rx_broadcast_byte_count;
203         u32 rx_multicast_byte_count;
204         u32 rx_unicast_frames;
205         u32 rx_broadcast_frames;
206         u32 rx_multicast_frames;
207         u32 rx_pause_frames;
208         u32 rx_64_byte_frames;
209         u32 rx_65_127_byte_frames;
210         u32 rx_128_255_byte_frames;
211         u32 rx_256_511_bytes_frames;
212         u32 rx_512_1023_byte_frames;
213         u32 rx_1024_1518_byte_frames;
214         u32 rx_greater_1518_byte_frames;
215         u32 eee_rx_lpi_transitions;
216         u32 eee_rx_lpi_time;
217         u32 tx_fcs_errors;
218         u32 tx_excess_deferral_errors;
219         u32 tx_carrier_errors;
220         u32 tx_bad_byte_count;
221         u32 tx_single_collisions;
222         u32 tx_multiple_collisions;
223         u32 tx_excessive_collision;
224         u32 tx_late_collisions;
225         u32 tx_unicast_byte_count;
226         u32 tx_broadcast_byte_count;
227         u32 tx_multicast_byte_count;
228         u32 tx_unicast_frames;
229         u32 tx_broadcast_frames;
230         u32 tx_multicast_frames;
231         u32 tx_pause_frames;
232         u32 tx_64_byte_frames;
233         u32 tx_65_127_byte_frames;
234         u32 tx_128_255_byte_frames;
235         u32 tx_256_511_bytes_frames;
236         u32 tx_512_1023_byte_frames;
237         u32 tx_1024_1518_byte_frames;
238         u32 tx_greater_1518_byte_frames;
239         u32 eee_tx_lpi_transitions;
240         u32 eee_tx_lpi_time;
241 };
242
243 struct lan78xx_statstage64 {
244         u64 rx_fcs_errors;
245         u64 rx_alignment_errors;
246         u64 rx_fragment_errors;
247         u64 rx_jabber_errors;
248         u64 rx_undersize_frame_errors;
249         u64 rx_oversize_frame_errors;
250         u64 rx_dropped_frames;
251         u64 rx_unicast_byte_count;
252         u64 rx_broadcast_byte_count;
253         u64 rx_multicast_byte_count;
254         u64 rx_unicast_frames;
255         u64 rx_broadcast_frames;
256         u64 rx_multicast_frames;
257         u64 rx_pause_frames;
258         u64 rx_64_byte_frames;
259         u64 rx_65_127_byte_frames;
260         u64 rx_128_255_byte_frames;
261         u64 rx_256_511_bytes_frames;
262         u64 rx_512_1023_byte_frames;
263         u64 rx_1024_1518_byte_frames;
264         u64 rx_greater_1518_byte_frames;
265         u64 eee_rx_lpi_transitions;
266         u64 eee_rx_lpi_time;
267         u64 tx_fcs_errors;
268         u64 tx_excess_deferral_errors;
269         u64 tx_carrier_errors;
270         u64 tx_bad_byte_count;
271         u64 tx_single_collisions;
272         u64 tx_multiple_collisions;
273         u64 tx_excessive_collision;
274         u64 tx_late_collisions;
275         u64 tx_unicast_byte_count;
276         u64 tx_broadcast_byte_count;
277         u64 tx_multicast_byte_count;
278         u64 tx_unicast_frames;
279         u64 tx_broadcast_frames;
280         u64 tx_multicast_frames;
281         u64 tx_pause_frames;
282         u64 tx_64_byte_frames;
283         u64 tx_65_127_byte_frames;
284         u64 tx_128_255_byte_frames;
285         u64 tx_256_511_bytes_frames;
286         u64 tx_512_1023_byte_frames;
287         u64 tx_1024_1518_byte_frames;
288         u64 tx_greater_1518_byte_frames;
289         u64 eee_tx_lpi_transitions;
290         u64 eee_tx_lpi_time;
291 };
292
293 static u32 lan78xx_regs[] = {
294         ID_REV,
295         INT_STS,
296         HW_CFG,
297         PMT_CTL,
298         E2P_CMD,
299         E2P_DATA,
300         USB_STATUS,
301         VLAN_TYPE,
302         MAC_CR,
303         MAC_RX,
304         MAC_TX,
305         FLOW,
306         ERR_STS,
307         MII_ACC,
308         MII_DATA,
309         EEE_TX_LPI_REQ_DLY,
310         EEE_TW_TX_SYS,
311         EEE_TX_LPI_REM_DLY,
312         WUCSR
313 };
314
315 #define PHY_REG_SIZE (32 * sizeof(u32))
316
317 struct lan78xx_net;
318
319 struct lan78xx_priv {
320         struct lan78xx_net *dev;
321         u32 rfe_ctl;
322         u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicast hash table */
323         u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
324         u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
325         struct mutex dataport_mutex; /* for dataport access */
326         spinlock_t rfe_ctl_lock; /* for rfe register access */
327         struct work_struct set_multicast;
328         struct work_struct set_vlan;
329         u32 wol;
330 };
331
332 enum skb_state {
333         illegal = 0,
334         tx_start,
335         tx_done,
336         rx_start,
337         rx_done,
338         rx_cleanup,
339         unlink_start
340 };
341
342 struct skb_data {               /* skb->cb is one of these */
343         struct urb *urb;
344         struct lan78xx_net *dev;
345         enum skb_state state;
346         size_t length;
347         int num_of_packet;
348 };
349
350 struct usb_context {
351         struct usb_ctrlrequest req;
352         struct lan78xx_net *dev;
353 };
354
355 #define EVENT_TX_HALT                   0
356 #define EVENT_RX_HALT                   1
357 #define EVENT_RX_MEMORY                 2
358 #define EVENT_STS_SPLIT                 3
359 #define EVENT_LINK_RESET                4
360 #define EVENT_RX_PAUSED                 5
361 #define EVENT_DEV_WAKING                6
362 #define EVENT_DEV_ASLEEP                7
363 #define EVENT_DEV_OPEN                  8
364 #define EVENT_STAT_UPDATE               9
365 #define EVENT_DEV_DISCONNECT            10
366
367 struct statstage {
368         struct mutex                    access_lock;    /* for stats access */
369         struct lan78xx_statstage        saved;
370         struct lan78xx_statstage        rollover_count;
371         struct lan78xx_statstage        rollover_max;
372         struct lan78xx_statstage64      curr_stat;
373 };
374
375 struct irq_domain_data {
376         struct irq_domain       *irqdomain;
377         unsigned int            phyirq;
378         struct irq_chip         *irqchip;
379         irq_flow_handler_t      irq_handler;
380         u32                     irqenable;
381         struct mutex            irq_lock;               /* for irq bus access */
382 };
383
384 struct lan78xx_net {
385         struct net_device       *net;
386         struct usb_device       *udev;
387         struct usb_interface    *intf;
388         void                    *driver_priv;
389
390         int                     rx_qlen;
391         int                     tx_qlen;
392         struct sk_buff_head     rxq;
393         struct sk_buff_head     txq;
394         struct sk_buff_head     done;
395         struct sk_buff_head     txq_pend;
396
397         struct tasklet_struct   bh;
398         struct delayed_work     wq;
399
400         int                     msg_enable;
401
402         struct urb              *urb_intr;
403         struct usb_anchor       deferred;
404
405         struct mutex            dev_mutex; /* serialise open/stop wrt suspend/resume */
406         struct mutex            phy_mutex; /* for phy access */
407         unsigned int            pipe_in, pipe_out, pipe_intr;
408
409         u32                     hard_mtu;       /* count any extra framing */
410         size_t                  rx_urb_size;    /* size for rx urbs */
411
412         unsigned long           flags;
413
414         wait_queue_head_t       *wait;
415         unsigned char           suspend_count;
416
417         unsigned int            maxpacket;
418         struct timer_list       stat_monitor;
419
420         unsigned long           data[5];
421
422         int                     link_on;
423         u8                      mdix_ctrl;
424
425         u32                     chipid;
426         u32                     chiprev;
427         struct mii_bus          *mdiobus;
428         phy_interface_t         interface;
429
430         int                     fc_autoneg;
431         u8                      fc_request_control;
432
433         int                     delta;
434         struct statstage        stats;
435
436         struct irq_domain_data  domain_data;
437 };
438
439 /* define external phy id */
440 #define PHY_LAN8835                     (0x0007C130)
441 #define PHY_KSZ9031RNX                  (0x00221620)
442
443 /* use ethtool to change the level for any given device */
444 static int msg_level = -1;
445 module_param(msg_level, int, 0);
446 MODULE_PARM_DESC(msg_level, "Override default message level");
447
448 /* TSO seems to be having some issue with Selective Acknowledge (SACK) that
449  * results in lost data never being retransmitted.
450  * Disable it by default now, but adds a module parameter to enable it for
451  * debug purposes (the full cause is not currently understood).
452  */
453 static bool enable_tso;
454 module_param(enable_tso, bool, 0644);
455 MODULE_PARM_DESC(enable_tso, "Enables TCP segmentation offload");
456
457 #define INT_URB_MICROFRAMES_PER_MS      8
458 static int int_urb_interval_ms = 8;
459 module_param(int_urb_interval_ms, int, 0);
460 MODULE_PARM_DESC(int_urb_interval_ms, "Override usb interrupt urb interval");
461
462 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
463 {
464         u32 *buf;
465         int ret;
466
467         if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
468                 return -ENODEV;
469
470         buf = kmalloc(sizeof(u32), GFP_KERNEL);
471         if (!buf)
472                 return -ENOMEM;
473
474         ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
475                               USB_VENDOR_REQUEST_READ_REGISTER,
476                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
477                               0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
478         if (likely(ret >= 0)) {
479                 le32_to_cpus(buf);
480                 *data = *buf;
481         } else if (net_ratelimit()) {
482                 netdev_warn(dev->net,
483                             "Failed to read register index 0x%08x. ret = %d",
484                             index, ret);
485         }
486
487         kfree(buf);
488
489         return ret;
490 }
491
492 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
493 {
494         u32 *buf;
495         int ret;
496
497         if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
498                 return -ENODEV;
499
500         buf = kmalloc(sizeof(u32), GFP_KERNEL);
501         if (!buf)
502                 return -ENOMEM;
503
504         *buf = data;
505         cpu_to_le32s(buf);
506
507         ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
508                               USB_VENDOR_REQUEST_WRITE_REGISTER,
509                               USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
510                               0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
511         if (unlikely(ret < 0) &&
512             net_ratelimit()) {
513                 netdev_warn(dev->net,
514                             "Failed to write register index 0x%08x. ret = %d",
515                             index, ret);
516         }
517
518         kfree(buf);
519
520         return ret;
521 }
522
523 static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
524                               u32 data)
525 {
526         int ret;
527         u32 buf;
528
529         ret = lan78xx_read_reg(dev, reg, &buf);
530         if (ret < 0)
531                 return ret;
532
533         buf &= ~mask;
534         buf |= (mask & data);
535
536         ret = lan78xx_write_reg(dev, reg, buf);
537         if (ret < 0)
538                 return ret;
539
540         return 0;
541 }
542
543 static int lan78xx_read_stats(struct lan78xx_net *dev,
544                               struct lan78xx_statstage *data)
545 {
546         int ret = 0;
547         int i;
548         struct lan78xx_statstage *stats;
549         u32 *src;
550         u32 *dst;
551
552         stats = kmalloc(sizeof(*stats), GFP_KERNEL);
553         if (!stats)
554                 return -ENOMEM;
555
556         ret = usb_control_msg(dev->udev,
557                               usb_rcvctrlpipe(dev->udev, 0),
558                               USB_VENDOR_REQUEST_GET_STATS,
559                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
560                               0,
561                               0,
562                               (void *)stats,
563                               sizeof(*stats),
564                               USB_CTRL_SET_TIMEOUT);
565         if (likely(ret >= 0)) {
566                 src = (u32 *)stats;
567                 dst = (u32 *)data;
568                 for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) {
569                         le32_to_cpus(&src[i]);
570                         dst[i] = src[i];
571                 }
572         } else {
573                 netdev_warn(dev->net,
574                             "Failed to read stat ret = %d", ret);
575         }
576
577         kfree(stats);
578
579         return ret;
580 }
581
582 #define check_counter_rollover(struct1, dev_stats, member)              \
583         do {                                                            \
584                 if ((struct1)->member < (dev_stats).saved.member)       \
585                         (dev_stats).rollover_count.member++;            \
586         } while (0)
587
588 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
589                                         struct lan78xx_statstage *stats)
590 {
591         check_counter_rollover(stats, dev->stats, rx_fcs_errors);
592         check_counter_rollover(stats, dev->stats, rx_alignment_errors);
593         check_counter_rollover(stats, dev->stats, rx_fragment_errors);
594         check_counter_rollover(stats, dev->stats, rx_jabber_errors);
595         check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
596         check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
597         check_counter_rollover(stats, dev->stats, rx_dropped_frames);
598         check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
599         check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
600         check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
601         check_counter_rollover(stats, dev->stats, rx_unicast_frames);
602         check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
603         check_counter_rollover(stats, dev->stats, rx_multicast_frames);
604         check_counter_rollover(stats, dev->stats, rx_pause_frames);
605         check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
606         check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
607         check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
608         check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
609         check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
610         check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
611         check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
612         check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
613         check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
614         check_counter_rollover(stats, dev->stats, tx_fcs_errors);
615         check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
616         check_counter_rollover(stats, dev->stats, tx_carrier_errors);
617         check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
618         check_counter_rollover(stats, dev->stats, tx_single_collisions);
619         check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
620         check_counter_rollover(stats, dev->stats, tx_excessive_collision);
621         check_counter_rollover(stats, dev->stats, tx_late_collisions);
622         check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
623         check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
624         check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
625         check_counter_rollover(stats, dev->stats, tx_unicast_frames);
626         check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
627         check_counter_rollover(stats, dev->stats, tx_multicast_frames);
628         check_counter_rollover(stats, dev->stats, tx_pause_frames);
629         check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
630         check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
631         check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
632         check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
633         check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
634         check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
635         check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
636         check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
637         check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
638
639         memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
640 }
641
642 static void lan78xx_update_stats(struct lan78xx_net *dev)
643 {
644         u32 *p, *count, *max;
645         u64 *data;
646         int i;
647         struct lan78xx_statstage lan78xx_stats;
648
649         if (usb_autopm_get_interface(dev->intf) < 0)
650                 return;
651
652         p = (u32 *)&lan78xx_stats;
653         count = (u32 *)&dev->stats.rollover_count;
654         max = (u32 *)&dev->stats.rollover_max;
655         data = (u64 *)&dev->stats.curr_stat;
656
657         mutex_lock(&dev->stats.access_lock);
658
659         if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
660                 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
661
662         for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
663                 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
664
665         mutex_unlock(&dev->stats.access_lock);
666
667         usb_autopm_put_interface(dev->intf);
668 }
669
670 /* Loop until the read is completed with timeout called with phy_mutex held */
671 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
672 {
673         unsigned long start_time = jiffies;
674         u32 val;
675         int ret;
676
677         do {
678                 ret = lan78xx_read_reg(dev, MII_ACC, &val);
679                 if (unlikely(ret < 0))
680                         return -EIO;
681
682                 if (!(val & MII_ACC_MII_BUSY_))
683                         return 0;
684         } while (!time_after(jiffies, start_time + HZ));
685
686         return -EIO;
687 }
688
689 static inline u32 mii_access(int id, int index, int read)
690 {
691         u32 ret;
692
693         ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
694         ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
695         if (read)
696                 ret |= MII_ACC_MII_READ_;
697         else
698                 ret |= MII_ACC_MII_WRITE_;
699         ret |= MII_ACC_MII_BUSY_;
700
701         return ret;
702 }
703
704 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
705 {
706         unsigned long start_time = jiffies;
707         u32 val;
708         int ret;
709
710         do {
711                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
712                 if (unlikely(ret < 0))
713                         return -EIO;
714
715                 if (!(val & E2P_CMD_EPC_BUSY_) ||
716                     (val & E2P_CMD_EPC_TIMEOUT_))
717                         break;
718                 usleep_range(40, 100);
719         } while (!time_after(jiffies, start_time + HZ));
720
721         if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
722                 netdev_warn(dev->net, "EEPROM read operation timeout");
723                 return -EIO;
724         }
725
726         return 0;
727 }
728
729 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
730 {
731         unsigned long start_time = jiffies;
732         u32 val;
733         int ret;
734
735         do {
736                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
737                 if (unlikely(ret < 0))
738                         return -EIO;
739
740                 if (!(val & E2P_CMD_EPC_BUSY_))
741                         return 0;
742
743                 usleep_range(40, 100);
744         } while (!time_after(jiffies, start_time + HZ));
745
746         netdev_warn(dev->net, "EEPROM is busy");
747         return -EIO;
748 }
749
750 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
751                                    u32 length, u8 *data)
752 {
753         u32 val;
754         u32 saved;
755         int i, ret;
756         int retval;
757
758         /* depends on chip, some EEPROM pins are muxed with LED function.
759          * disable & restore LED function to access EEPROM.
760          */
761         ret = lan78xx_read_reg(dev, HW_CFG, &val);
762         saved = val;
763         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
764                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
765                 ret = lan78xx_write_reg(dev, HW_CFG, val);
766         }
767
768         retval = lan78xx_eeprom_confirm_not_busy(dev);
769         if (retval)
770                 return retval;
771
772         for (i = 0; i < length; i++) {
773                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
774                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
775                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
776                 if (unlikely(ret < 0)) {
777                         retval = -EIO;
778                         goto exit;
779                 }
780
781                 retval = lan78xx_wait_eeprom(dev);
782                 if (retval < 0)
783                         goto exit;
784
785                 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
786                 if (unlikely(ret < 0)) {
787                         retval = -EIO;
788                         goto exit;
789                 }
790
791                 data[i] = val & 0xFF;
792                 offset++;
793         }
794
795         retval = 0;
796 exit:
797         if (dev->chipid == ID_REV_CHIP_ID_7800_)
798                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
799
800         return retval;
801 }
802
803 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
804                                u32 length, u8 *data)
805 {
806         u8 sig;
807         int ret;
808
809         ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
810         if ((ret == 0) && (sig == EEPROM_INDICATOR))
811                 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
812         else
813                 ret = -EINVAL;
814
815         return ret;
816 }
817
818 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
819                                     u32 length, u8 *data)
820 {
821         u32 val;
822         u32 saved;
823         int i, ret;
824         int retval;
825
826         /* depends on chip, some EEPROM pins are muxed with LED function.
827          * disable & restore LED function to access EEPROM.
828          */
829         ret = lan78xx_read_reg(dev, HW_CFG, &val);
830         saved = val;
831         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
832                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
833                 ret = lan78xx_write_reg(dev, HW_CFG, val);
834         }
835
836         retval = lan78xx_eeprom_confirm_not_busy(dev);
837         if (retval)
838                 goto exit;
839
840         /* Issue write/erase enable command */
841         val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
842         ret = lan78xx_write_reg(dev, E2P_CMD, val);
843         if (unlikely(ret < 0)) {
844                 retval = -EIO;
845                 goto exit;
846         }
847
848         retval = lan78xx_wait_eeprom(dev);
849         if (retval < 0)
850                 goto exit;
851
852         for (i = 0; i < length; i++) {
853                 /* Fill data register */
854                 val = data[i];
855                 ret = lan78xx_write_reg(dev, E2P_DATA, val);
856                 if (ret < 0) {
857                         retval = -EIO;
858                         goto exit;
859                 }
860
861                 /* Send "write" command */
862                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
863                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
864                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
865                 if (ret < 0) {
866                         retval = -EIO;
867                         goto exit;
868                 }
869
870                 retval = lan78xx_wait_eeprom(dev);
871                 if (retval < 0)
872                         goto exit;
873
874                 offset++;
875         }
876
877         retval = 0;
878 exit:
879         if (dev->chipid == ID_REV_CHIP_ID_7800_)
880                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
881
882         return retval;
883 }
884
885 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
886                                 u32 length, u8 *data)
887 {
888         int i;
889         u32 buf;
890         unsigned long timeout;
891
892         lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
893
894         if (buf & OTP_PWR_DN_PWRDN_N_) {
895                 /* clear it and wait to be cleared */
896                 lan78xx_write_reg(dev, OTP_PWR_DN, 0);
897
898                 timeout = jiffies + HZ;
899                 do {
900                         usleep_range(1, 10);
901                         lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
902                         if (time_after(jiffies, timeout)) {
903                                 netdev_warn(dev->net,
904                                             "timeout on OTP_PWR_DN");
905                                 return -EIO;
906                         }
907                 } while (buf & OTP_PWR_DN_PWRDN_N_);
908         }
909
910         for (i = 0; i < length; i++) {
911                 lan78xx_write_reg(dev, OTP_ADDR1,
912                                   ((offset + i) >> 8) & OTP_ADDR1_15_11);
913                 lan78xx_write_reg(dev, OTP_ADDR2,
914                                   ((offset + i) & OTP_ADDR2_10_3));
915
916                 lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
917                 lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
918
919                 timeout = jiffies + HZ;
920                 do {
921                         udelay(1);
922                         lan78xx_read_reg(dev, OTP_STATUS, &buf);
923                         if (time_after(jiffies, timeout)) {
924                                 netdev_warn(dev->net,
925                                             "timeout on OTP_STATUS");
926                                 return -EIO;
927                         }
928                 } while (buf & OTP_STATUS_BUSY_);
929
930                 lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
931
932                 data[i] = (u8)(buf & 0xFF);
933         }
934
935         return 0;
936 }
937
938 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
939                                  u32 length, u8 *data)
940 {
941         int i;
942         u32 buf;
943         unsigned long timeout;
944
945         lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
946
947         if (buf & OTP_PWR_DN_PWRDN_N_) {
948                 /* clear it and wait to be cleared */
949                 lan78xx_write_reg(dev, OTP_PWR_DN, 0);
950
951                 timeout = jiffies + HZ;
952                 do {
953                         udelay(1);
954                         lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
955                         if (time_after(jiffies, timeout)) {
956                                 netdev_warn(dev->net,
957                                             "timeout on OTP_PWR_DN completion");
958                                 return -EIO;
959                         }
960                 } while (buf & OTP_PWR_DN_PWRDN_N_);
961         }
962
963         /* set to BYTE program mode */
964         lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
965
966         for (i = 0; i < length; i++) {
967                 lan78xx_write_reg(dev, OTP_ADDR1,
968                                   ((offset + i) >> 8) & OTP_ADDR1_15_11);
969                 lan78xx_write_reg(dev, OTP_ADDR2,
970                                   ((offset + i) & OTP_ADDR2_10_3));
971                 lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
972                 lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
973                 lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
974
975                 timeout = jiffies + HZ;
976                 do {
977                         udelay(1);
978                         lan78xx_read_reg(dev, OTP_STATUS, &buf);
979                         if (time_after(jiffies, timeout)) {
980                                 netdev_warn(dev->net,
981                                             "Timeout on OTP_STATUS completion");
982                                 return -EIO;
983                         }
984                 } while (buf & OTP_STATUS_BUSY_);
985         }
986
987         return 0;
988 }
989
990 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
991                             u32 length, u8 *data)
992 {
993         u8 sig;
994         int ret;
995
996         ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
997
998         if (ret == 0) {
999                 if (sig == OTP_INDICATOR_2)
1000                         offset += 0x100;
1001                 else if (sig != OTP_INDICATOR_1)
1002                         ret = -EINVAL;
1003                 if (!ret)
1004                         ret = lan78xx_read_raw_otp(dev, offset, length, data);
1005         }
1006
1007         return ret;
1008 }
1009
1010 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
1011 {
1012         int i, ret;
1013
1014         for (i = 0; i < 100; i++) {
1015                 u32 dp_sel;
1016
1017                 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1018                 if (unlikely(ret < 0))
1019                         return -EIO;
1020
1021                 if (dp_sel & DP_SEL_DPRDY_)
1022                         return 0;
1023
1024                 usleep_range(40, 100);
1025         }
1026
1027         netdev_warn(dev->net, "%s timed out", __func__);
1028
1029         return -EIO;
1030 }
1031
1032 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
1033                                   u32 addr, u32 length, u32 *buf)
1034 {
1035         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1036         u32 dp_sel;
1037         int i, ret;
1038
1039         if (usb_autopm_get_interface(dev->intf) < 0)
1040                 return 0;
1041
1042         mutex_lock(&pdata->dataport_mutex);
1043
1044         ret = lan78xx_dataport_wait_not_busy(dev);
1045         if (ret < 0)
1046                 goto done;
1047
1048         ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1049
1050         dp_sel &= ~DP_SEL_RSEL_MASK_;
1051         dp_sel |= ram_select;
1052         ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
1053
1054         for (i = 0; i < length; i++) {
1055                 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1056
1057                 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1058
1059                 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1060
1061                 ret = lan78xx_dataport_wait_not_busy(dev);
1062                 if (ret < 0)
1063                         goto done;
1064         }
1065
1066 done:
1067         mutex_unlock(&pdata->dataport_mutex);
1068         usb_autopm_put_interface(dev->intf);
1069
1070         return ret;
1071 }
1072
1073 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1074                                     int index, u8 addr[ETH_ALEN])
1075 {
1076         u32 temp;
1077
1078         if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1079                 temp = addr[3];
1080                 temp = addr[2] | (temp << 8);
1081                 temp = addr[1] | (temp << 8);
1082                 temp = addr[0] | (temp << 8);
1083                 pdata->pfilter_table[index][1] = temp;
1084                 temp = addr[5];
1085                 temp = addr[4] | (temp << 8);
1086                 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1087                 pdata->pfilter_table[index][0] = temp;
1088         }
1089 }
1090
1091 /* returns hash bit number for given MAC address */
1092 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1093 {
1094         return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1095 }
1096
1097 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1098 {
1099         struct lan78xx_priv *pdata =
1100                         container_of(param, struct lan78xx_priv, set_multicast);
1101         struct lan78xx_net *dev = pdata->dev;
1102         int i;
1103
1104         netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1105                   pdata->rfe_ctl);
1106
1107         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1108                                DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1109
1110         for (i = 1; i < NUM_OF_MAF; i++) {
1111                 lan78xx_write_reg(dev, MAF_HI(i), 0);
1112                 lan78xx_write_reg(dev, MAF_LO(i),
1113                                   pdata->pfilter_table[i][1]);
1114                 lan78xx_write_reg(dev, MAF_HI(i),
1115                                   pdata->pfilter_table[i][0]);
1116         }
1117
1118         lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1119 }
1120
1121 static void lan78xx_set_multicast(struct net_device *netdev)
1122 {
1123         struct lan78xx_net *dev = netdev_priv(netdev);
1124         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1125         unsigned long flags;
1126         int i;
1127
1128         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1129
1130         pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1131                             RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1132
1133         for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1134                 pdata->mchash_table[i] = 0;
1135
1136         /* pfilter_table[0] has own HW address */
1137         for (i = 1; i < NUM_OF_MAF; i++) {
1138                 pdata->pfilter_table[i][0] = 0;
1139                 pdata->pfilter_table[i][1] = 0;
1140         }
1141
1142         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1143
1144         if (dev->net->flags & IFF_PROMISC) {
1145                 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1146                 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1147         } else {
1148                 if (dev->net->flags & IFF_ALLMULTI) {
1149                         netif_dbg(dev, drv, dev->net,
1150                                   "receive all multicast enabled");
1151                         pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1152                 }
1153         }
1154
1155         if (netdev_mc_count(dev->net)) {
1156                 struct netdev_hw_addr *ha;
1157                 int i;
1158
1159                 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1160
1161                 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1162
1163                 i = 1;
1164                 netdev_for_each_mc_addr(ha, netdev) {
1165                         /* set first 32 into Perfect Filter */
1166                         if (i < 33) {
1167                                 lan78xx_set_addr_filter(pdata, i, ha->addr);
1168                         } else {
1169                                 u32 bitnum = lan78xx_hash(ha->addr);
1170
1171                                 pdata->mchash_table[bitnum / 32] |=
1172                                                         (1 << (bitnum % 32));
1173                                 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1174                         }
1175                         i++;
1176                 }
1177         }
1178
1179         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1180
1181         /* defer register writes to a sleepable context */
1182         schedule_work(&pdata->set_multicast);
1183 }
1184
1185 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1186                                       u16 lcladv, u16 rmtadv)
1187 {
1188         u32 flow = 0, fct_flow = 0;
1189         u8 cap;
1190
1191         if (dev->fc_autoneg)
1192                 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1193         else
1194                 cap = dev->fc_request_control;
1195
1196         if (cap & FLOW_CTRL_TX)
1197                 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1198
1199         if (cap & FLOW_CTRL_RX)
1200                 flow |= FLOW_CR_RX_FCEN_;
1201
1202         if (dev->udev->speed == USB_SPEED_SUPER)
1203                 fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
1204         else if (dev->udev->speed == USB_SPEED_HIGH)
1205                 fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
1206
1207         netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1208                   (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1209                   (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1210
1211         lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1212
1213         /* threshold value should be set before enabling flow */
1214         lan78xx_write_reg(dev, FLOW, flow);
1215
1216         return 0;
1217 }
1218
1219 static int lan78xx_mac_reset(struct lan78xx_net *dev)
1220 {
1221         unsigned long start_time = jiffies;
1222         u32 val;
1223         int ret;
1224
1225         mutex_lock(&dev->phy_mutex);
1226
1227         /* Resetting the device while there is activity on the MDIO
1228          * bus can result in the MAC interface locking up and not
1229          * completing register access transactions.
1230          */
1231         ret = lan78xx_phy_wait_not_busy(dev);
1232         if (ret < 0)
1233                 goto done;
1234
1235         ret = lan78xx_read_reg(dev, MAC_CR, &val);
1236         if (ret < 0)
1237                 goto done;
1238
1239         val |= MAC_CR_RST_;
1240         ret = lan78xx_write_reg(dev, MAC_CR, val);
1241         if (ret < 0)
1242                 goto done;
1243
1244         /* Wait for the reset to complete before allowing any further
1245          * MAC register accesses otherwise the MAC may lock up.
1246          */
1247         do {
1248                 ret = lan78xx_read_reg(dev, MAC_CR, &val);
1249                 if (ret < 0)
1250                         goto done;
1251
1252                 if (!(val & MAC_CR_RST_)) {
1253                         ret = 0;
1254                         goto done;
1255                 }
1256         } while (!time_after(jiffies, start_time + HZ));
1257
1258         ret = -ETIMEDOUT;
1259 done:
1260         mutex_unlock(&dev->phy_mutex);
1261
1262         return ret;
1263 }
1264
1265 static int lan78xx_link_reset(struct lan78xx_net *dev)
1266 {
1267         struct phy_device *phydev = dev->net->phydev;
1268         struct ethtool_link_ksettings ecmd;
1269         int ladv, radv, ret, link;
1270         u32 buf;
1271
1272         /* clear LAN78xx interrupt status */
1273         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1274         if (unlikely(ret < 0))
1275                 return ret;
1276
1277         /* Acknowledge any pending PHY interrupt, lest it be the last */
1278         phy_read(phydev, LAN88XX_INT_STS);
1279
1280         mutex_lock(&phydev->lock);
1281         phy_read_status(phydev);
1282         link = phydev->link;
1283         mutex_unlock(&phydev->lock);
1284
1285         if (!link && dev->link_on) {
1286                 dev->link_on = false;
1287
1288                 /* reset MAC */
1289                 ret = lan78xx_mac_reset(dev);
1290                 if (ret < 0)
1291                         return ret;
1292
1293                 del_timer(&dev->stat_monitor);
1294         } else if (link && !dev->link_on) {
1295                 dev->link_on = true;
1296
1297                 phy_ethtool_ksettings_get(phydev, &ecmd);
1298
1299                 if (dev->udev->speed == USB_SPEED_SUPER) {
1300                         if (ecmd.base.speed == 1000) {
1301                                 /* disable U2 */
1302                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1303                                 if (ret < 0)
1304                                         return ret;
1305                                 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1306                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1307                                 if (ret < 0)
1308                                         return ret;
1309                                 /* enable U1 */
1310                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1311                                 if (ret < 0)
1312                                         return ret;
1313                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1314                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1315                                 if (ret < 0)
1316                                         return ret;
1317                         } else {
1318                                 /* enable U1 & U2 */
1319                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1320                                 if (ret < 0)
1321                                         return ret;
1322                                 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1323                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1324                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1325                                 if (ret < 0)
1326                                         return ret;
1327                         }
1328                 }
1329
1330                 ladv = phy_read(phydev, MII_ADVERTISE);
1331                 if (ladv < 0)
1332                         return ladv;
1333
1334                 radv = phy_read(phydev, MII_LPA);
1335                 if (radv < 0)
1336                         return radv;
1337
1338                 netif_dbg(dev, link, dev->net,
1339                           "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1340                           ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1341
1342                 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1343                                                  radv);
1344                 if (ret < 0)
1345                         return ret;
1346
1347                 if (!timer_pending(&dev->stat_monitor)) {
1348                         dev->delta = 1;
1349                         mod_timer(&dev->stat_monitor,
1350                                   jiffies + STAT_UPDATE_TIMER);
1351                 }
1352
1353                 tasklet_schedule(&dev->bh);
1354         }
1355
1356         return 0;
1357 }
1358
1359 /* some work can't be done in tasklets, so we use keventd
1360  *
1361  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1362  * but tasklet_schedule() doesn't.      hope the failure is rare.
1363  */
1364 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1365 {
1366         set_bit(work, &dev->flags);
1367         if (!schedule_delayed_work(&dev->wq, 0))
1368                 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1369 }
1370
1371 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1372 {
1373         u32 intdata;
1374
1375         if (urb->actual_length != 4) {
1376                 netdev_warn(dev->net,
1377                             "unexpected urb length %d", urb->actual_length);
1378                 return;
1379         }
1380
1381         intdata = get_unaligned_le32(urb->transfer_buffer);
1382
1383         if (intdata & INT_ENP_PHY_INT) {
1384                 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1385                 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1386
1387                 if (dev->domain_data.phyirq > 0) {
1388                         local_irq_disable();
1389                         generic_handle_irq(dev->domain_data.phyirq);
1390                         local_irq_enable();
1391                 }
1392         } else {
1393                 netdev_warn(dev->net,
1394                             "unexpected interrupt: 0x%08x\n", intdata);
1395         }
1396 }
1397
1398 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1399 {
1400         return MAX_EEPROM_SIZE;
1401 }
1402
1403 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1404                                       struct ethtool_eeprom *ee, u8 *data)
1405 {
1406         struct lan78xx_net *dev = netdev_priv(netdev);
1407         int ret;
1408
1409         ret = usb_autopm_get_interface(dev->intf);
1410         if (ret)
1411                 return ret;
1412
1413         ee->magic = LAN78XX_EEPROM_MAGIC;
1414
1415         ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1416
1417         usb_autopm_put_interface(dev->intf);
1418
1419         return ret;
1420 }
1421
1422 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1423                                       struct ethtool_eeprom *ee, u8 *data)
1424 {
1425         struct lan78xx_net *dev = netdev_priv(netdev);
1426         int ret;
1427
1428         ret = usb_autopm_get_interface(dev->intf);
1429         if (ret)
1430                 return ret;
1431
1432         /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1433          * to load data from EEPROM
1434          */
1435         if (ee->magic == LAN78XX_EEPROM_MAGIC)
1436                 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1437         else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1438                  (ee->offset == 0) &&
1439                  (ee->len == 512) &&
1440                  (data[0] == OTP_INDICATOR_1))
1441                 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1442
1443         usb_autopm_put_interface(dev->intf);
1444
1445         return ret;
1446 }
1447
1448 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1449                                 u8 *data)
1450 {
1451         if (stringset == ETH_SS_STATS)
1452                 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1453 }
1454
1455 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1456 {
1457         if (sset == ETH_SS_STATS)
1458                 return ARRAY_SIZE(lan78xx_gstrings);
1459         else
1460                 return -EOPNOTSUPP;
1461 }
1462
1463 static void lan78xx_get_stats(struct net_device *netdev,
1464                               struct ethtool_stats *stats, u64 *data)
1465 {
1466         struct lan78xx_net *dev = netdev_priv(netdev);
1467
1468         lan78xx_update_stats(dev);
1469
1470         mutex_lock(&dev->stats.access_lock);
1471         memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1472         mutex_unlock(&dev->stats.access_lock);
1473 }
1474
1475 static void lan78xx_get_wol(struct net_device *netdev,
1476                             struct ethtool_wolinfo *wol)
1477 {
1478         struct lan78xx_net *dev = netdev_priv(netdev);
1479         int ret;
1480         u32 buf;
1481         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1482
1483         if (usb_autopm_get_interface(dev->intf) < 0)
1484                 return;
1485
1486         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1487         if (unlikely(ret < 0)) {
1488                 wol->supported = 0;
1489                 wol->wolopts = 0;
1490         } else {
1491                 if (buf & USB_CFG_RMT_WKP_) {
1492                         wol->supported = WAKE_ALL;
1493                         wol->wolopts = pdata->wol;
1494                 } else {
1495                         wol->supported = 0;
1496                         wol->wolopts = 0;
1497                 }
1498         }
1499
1500         usb_autopm_put_interface(dev->intf);
1501 }
1502
1503 static int lan78xx_set_wol(struct net_device *netdev,
1504                            struct ethtool_wolinfo *wol)
1505 {
1506         struct lan78xx_net *dev = netdev_priv(netdev);
1507         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1508         int ret;
1509
1510         ret = usb_autopm_get_interface(dev->intf);
1511         if (ret < 0)
1512                 return ret;
1513
1514         if (wol->wolopts & ~WAKE_ALL)
1515                 return -EINVAL;
1516
1517         pdata->wol = wol->wolopts;
1518
1519         device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1520
1521         phy_ethtool_set_wol(netdev->phydev, wol);
1522
1523         usb_autopm_put_interface(dev->intf);
1524
1525         return ret;
1526 }
1527
1528 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1529 {
1530         struct lan78xx_net *dev = netdev_priv(net);
1531         struct phy_device *phydev = net->phydev;
1532         int ret;
1533         u32 buf;
1534
1535         ret = usb_autopm_get_interface(dev->intf);
1536         if (ret < 0)
1537                 return ret;
1538
1539         ret = phy_ethtool_get_eee(phydev, edata);
1540         if (ret < 0)
1541                 goto exit;
1542
1543         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1544         if (buf & MAC_CR_EEE_EN_) {
1545                 edata->eee_enabled = true;
1546                 edata->eee_active = !!(edata->advertised &
1547                                        edata->lp_advertised);
1548                 edata->tx_lpi_enabled = true;
1549                 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1550                 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1551                 edata->tx_lpi_timer = buf;
1552         } else {
1553                 edata->eee_enabled = false;
1554                 edata->eee_active = false;
1555                 edata->tx_lpi_enabled = false;
1556                 edata->tx_lpi_timer = 0;
1557         }
1558
1559         ret = 0;
1560 exit:
1561         usb_autopm_put_interface(dev->intf);
1562
1563         return ret;
1564 }
1565
1566 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1567 {
1568         struct lan78xx_net *dev = netdev_priv(net);
1569         int ret;
1570         u32 buf;
1571
1572         ret = usb_autopm_get_interface(dev->intf);
1573         if (ret < 0)
1574                 return ret;
1575
1576         if (edata->eee_enabled) {
1577                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1578                 buf |= MAC_CR_EEE_EN_;
1579                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1580
1581                 phy_ethtool_set_eee(net->phydev, edata);
1582
1583                 buf = (u32)edata->tx_lpi_timer;
1584                 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1585         } else {
1586                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1587                 buf &= ~MAC_CR_EEE_EN_;
1588                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1589         }
1590
1591         usb_autopm_put_interface(dev->intf);
1592
1593         return 0;
1594 }
1595
1596 static u32 lan78xx_get_link(struct net_device *net)
1597 {
1598         u32 link;
1599
1600         mutex_lock(&net->phydev->lock);
1601         phy_read_status(net->phydev);
1602         link = net->phydev->link;
1603         mutex_unlock(&net->phydev->lock);
1604
1605         return link;
1606 }
1607
1608 static void lan78xx_get_drvinfo(struct net_device *net,
1609                                 struct ethtool_drvinfo *info)
1610 {
1611         struct lan78xx_net *dev = netdev_priv(net);
1612
1613         strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1614         usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1615 }
1616
1617 static u32 lan78xx_get_msglevel(struct net_device *net)
1618 {
1619         struct lan78xx_net *dev = netdev_priv(net);
1620
1621         return dev->msg_enable;
1622 }
1623
1624 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1625 {
1626         struct lan78xx_net *dev = netdev_priv(net);
1627
1628         dev->msg_enable = level;
1629 }
1630
1631 static int lan78xx_get_link_ksettings(struct net_device *net,
1632                                       struct ethtool_link_ksettings *cmd)
1633 {
1634         struct lan78xx_net *dev = netdev_priv(net);
1635         struct phy_device *phydev = net->phydev;
1636         int ret;
1637
1638         ret = usb_autopm_get_interface(dev->intf);
1639         if (ret < 0)
1640                 return ret;
1641
1642         phy_ethtool_ksettings_get(phydev, cmd);
1643
1644         usb_autopm_put_interface(dev->intf);
1645
1646         return ret;
1647 }
1648
1649 static int lan78xx_set_link_ksettings(struct net_device *net,
1650                                       const struct ethtool_link_ksettings *cmd)
1651 {
1652         struct lan78xx_net *dev = netdev_priv(net);
1653         struct phy_device *phydev = net->phydev;
1654         int ret = 0;
1655         int temp;
1656
1657         ret = usb_autopm_get_interface(dev->intf);
1658         if (ret < 0)
1659                 return ret;
1660
1661         /* change speed & duplex */
1662         ret = phy_ethtool_ksettings_set(phydev, cmd);
1663
1664         if (!cmd->base.autoneg) {
1665                 /* force link down */
1666                 temp = phy_read(phydev, MII_BMCR);
1667                 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1668                 mdelay(1);
1669                 phy_write(phydev, MII_BMCR, temp);
1670         }
1671
1672         usb_autopm_put_interface(dev->intf);
1673
1674         return ret;
1675 }
1676
1677 static void lan78xx_get_pause(struct net_device *net,
1678                               struct ethtool_pauseparam *pause)
1679 {
1680         struct lan78xx_net *dev = netdev_priv(net);
1681         struct phy_device *phydev = net->phydev;
1682         struct ethtool_link_ksettings ecmd;
1683
1684         phy_ethtool_ksettings_get(phydev, &ecmd);
1685
1686         pause->autoneg = dev->fc_autoneg;
1687
1688         if (dev->fc_request_control & FLOW_CTRL_TX)
1689                 pause->tx_pause = 1;
1690
1691         if (dev->fc_request_control & FLOW_CTRL_RX)
1692                 pause->rx_pause = 1;
1693 }
1694
1695 static int lan78xx_set_pause(struct net_device *net,
1696                              struct ethtool_pauseparam *pause)
1697 {
1698         struct lan78xx_net *dev = netdev_priv(net);
1699         struct phy_device *phydev = net->phydev;
1700         struct ethtool_link_ksettings ecmd;
1701         int ret;
1702
1703         phy_ethtool_ksettings_get(phydev, &ecmd);
1704
1705         if (pause->autoneg && !ecmd.base.autoneg) {
1706                 ret = -EINVAL;
1707                 goto exit;
1708         }
1709
1710         dev->fc_request_control = 0;
1711         if (pause->rx_pause)
1712                 dev->fc_request_control |= FLOW_CTRL_RX;
1713
1714         if (pause->tx_pause)
1715                 dev->fc_request_control |= FLOW_CTRL_TX;
1716
1717         if (ecmd.base.autoneg) {
1718                 __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
1719                 u32 mii_adv;
1720
1721                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1722                                    ecmd.link_modes.advertising);
1723                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1724                                    ecmd.link_modes.advertising);
1725                 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1726                 mii_adv_to_linkmode_adv_t(fc, mii_adv);
1727                 linkmode_or(ecmd.link_modes.advertising, fc,
1728                             ecmd.link_modes.advertising);
1729
1730                 phy_ethtool_ksettings_set(phydev, &ecmd);
1731         }
1732
1733         dev->fc_autoneg = pause->autoneg;
1734
1735         ret = 0;
1736 exit:
1737         return ret;
1738 }
1739
1740 static int lan78xx_get_regs_len(struct net_device *netdev)
1741 {
1742         if (!netdev->phydev)
1743                 return (sizeof(lan78xx_regs));
1744         else
1745                 return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1746 }
1747
1748 static void
1749 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1750                  void *buf)
1751 {
1752         u32 *data = buf;
1753         int i, j;
1754         struct lan78xx_net *dev = netdev_priv(netdev);
1755
1756         /* Read Device/MAC registers */
1757         for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
1758                 lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1759
1760         if (!netdev->phydev)
1761                 return;
1762
1763         /* Read PHY registers */
1764         for (j = 0; j < 32; i++, j++)
1765                 data[i] = phy_read(netdev->phydev, j);
1766 }
1767
1768 static const struct ethtool_ops lan78xx_ethtool_ops = {
1769         .get_link       = lan78xx_get_link,
1770         .nway_reset     = phy_ethtool_nway_reset,
1771         .get_drvinfo    = lan78xx_get_drvinfo,
1772         .get_msglevel   = lan78xx_get_msglevel,
1773         .set_msglevel   = lan78xx_set_msglevel,
1774         .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1775         .get_eeprom     = lan78xx_ethtool_get_eeprom,
1776         .set_eeprom     = lan78xx_ethtool_set_eeprom,
1777         .get_ethtool_stats = lan78xx_get_stats,
1778         .get_sset_count = lan78xx_get_sset_count,
1779         .get_strings    = lan78xx_get_strings,
1780         .get_wol        = lan78xx_get_wol,
1781         .set_wol        = lan78xx_set_wol,
1782         .get_ts_info    = ethtool_op_get_ts_info,
1783         .get_eee        = lan78xx_get_eee,
1784         .set_eee        = lan78xx_set_eee,
1785         .get_pauseparam = lan78xx_get_pause,
1786         .set_pauseparam = lan78xx_set_pause,
1787         .get_link_ksettings = lan78xx_get_link_ksettings,
1788         .set_link_ksettings = lan78xx_set_link_ksettings,
1789         .get_regs_len   = lan78xx_get_regs_len,
1790         .get_regs       = lan78xx_get_regs,
1791         .get_ts_info    = ethtool_op_get_ts_info,
1792 };
1793
1794 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1795 {
1796         u32 addr_lo, addr_hi;
1797         u8 addr[6];
1798
1799         lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1800         lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1801
1802         addr[0] = addr_lo & 0xFF;
1803         addr[1] = (addr_lo >> 8) & 0xFF;
1804         addr[2] = (addr_lo >> 16) & 0xFF;
1805         addr[3] = (addr_lo >> 24) & 0xFF;
1806         addr[4] = addr_hi & 0xFF;
1807         addr[5] = (addr_hi >> 8) & 0xFF;
1808
1809         if (!is_valid_ether_addr(addr)) {
1810                 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1811                         /* valid address present in Device Tree */
1812                         netif_dbg(dev, ifup, dev->net,
1813                                   "MAC address read from Device Tree");
1814                 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1815                                                  ETH_ALEN, addr) == 0) ||
1816                             (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1817                                               ETH_ALEN, addr) == 0)) &&
1818                            is_valid_ether_addr(addr)) {
1819                         /* eeprom values are valid so use them */
1820                         netif_dbg(dev, ifup, dev->net,
1821                                   "MAC address read from EEPROM");
1822                 } else {
1823                         /* generate random MAC */
1824                         eth_random_addr(addr);
1825                         netif_dbg(dev, ifup, dev->net,
1826                                   "MAC address set to random addr");
1827                 }
1828
1829                 addr_lo = addr[0] | (addr[1] << 8) |
1830                           (addr[2] << 16) | (addr[3] << 24);
1831                 addr_hi = addr[4] | (addr[5] << 8);
1832
1833                 lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1834                 lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1835         }
1836
1837         lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1838         lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1839
1840         ether_addr_copy(dev->net->dev_addr, addr);
1841 }
1842
1843 /* MDIO read and write wrappers for phylib */
1844 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1845 {
1846         struct lan78xx_net *dev = bus->priv;
1847         u32 val, addr;
1848         int ret;
1849
1850         ret = usb_autopm_get_interface(dev->intf);
1851         if (ret < 0)
1852                 return ret;
1853
1854         mutex_lock(&dev->phy_mutex);
1855
1856         /* confirm MII not busy */
1857         ret = lan78xx_phy_wait_not_busy(dev);
1858         if (ret < 0)
1859                 goto done;
1860
1861         /* set the address, index & direction (read from PHY) */
1862         addr = mii_access(phy_id, idx, MII_READ);
1863         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1864
1865         ret = lan78xx_phy_wait_not_busy(dev);
1866         if (ret < 0)
1867                 goto done;
1868
1869         ret = lan78xx_read_reg(dev, MII_DATA, &val);
1870
1871         ret = (int)(val & 0xFFFF);
1872
1873 done:
1874         mutex_unlock(&dev->phy_mutex);
1875         usb_autopm_put_interface(dev->intf);
1876
1877         return ret;
1878 }
1879
1880 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1881                                  u16 regval)
1882 {
1883         struct lan78xx_net *dev = bus->priv;
1884         u32 val, addr;
1885         int ret;
1886
1887         ret = usb_autopm_get_interface(dev->intf);
1888         if (ret < 0)
1889                 return ret;
1890
1891         mutex_lock(&dev->phy_mutex);
1892
1893         /* confirm MII not busy */
1894         ret = lan78xx_phy_wait_not_busy(dev);
1895         if (ret < 0)
1896                 goto done;
1897
1898         val = (u32)regval;
1899         ret = lan78xx_write_reg(dev, MII_DATA, val);
1900
1901         /* set the address, index & direction (write to PHY) */
1902         addr = mii_access(phy_id, idx, MII_WRITE);
1903         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1904
1905         ret = lan78xx_phy_wait_not_busy(dev);
1906         if (ret < 0)
1907                 goto done;
1908
1909 done:
1910         mutex_unlock(&dev->phy_mutex);
1911         usb_autopm_put_interface(dev->intf);
1912         return 0;
1913 }
1914
1915 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1916 {
1917         struct device_node *node;
1918         int ret;
1919
1920         dev->mdiobus = mdiobus_alloc();
1921         if (!dev->mdiobus) {
1922                 netdev_err(dev->net, "can't allocate MDIO bus\n");
1923                 return -ENOMEM;
1924         }
1925
1926         dev->mdiobus->priv = (void *)dev;
1927         dev->mdiobus->read = lan78xx_mdiobus_read;
1928         dev->mdiobus->write = lan78xx_mdiobus_write;
1929         dev->mdiobus->name = "lan78xx-mdiobus";
1930         dev->mdiobus->parent = &dev->udev->dev;
1931
1932         snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1933                  dev->udev->bus->busnum, dev->udev->devnum);
1934
1935         switch (dev->chipid) {
1936         case ID_REV_CHIP_ID_7800_:
1937         case ID_REV_CHIP_ID_7850_:
1938                 /* set to internal PHY id */
1939                 dev->mdiobus->phy_mask = ~(1 << 1);
1940                 break;
1941         case ID_REV_CHIP_ID_7801_:
1942                 /* scan thru PHYAD[2..0] */
1943                 dev->mdiobus->phy_mask = ~(0xFF);
1944                 break;
1945         }
1946
1947         node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
1948         ret = of_mdiobus_register(dev->mdiobus, node);
1949         of_node_put(node);
1950         if (ret) {
1951                 netdev_err(dev->net, "can't register MDIO bus\n");
1952                 goto exit1;
1953         }
1954
1955         netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1956         return 0;
1957 exit1:
1958         mdiobus_free(dev->mdiobus);
1959         return ret;
1960 }
1961
1962 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1963 {
1964         mdiobus_unregister(dev->mdiobus);
1965         mdiobus_free(dev->mdiobus);
1966 }
1967
1968 static void lan78xx_link_status_change(struct net_device *net)
1969 {
1970         struct phy_device *phydev = net->phydev;
1971         int temp;
1972
1973         /* At forced 100 F/H mode, chip may fail to set mode correctly
1974          * when cable is switched between long(~50+m) and short one.
1975          * As workaround, set to 10 before setting to 100
1976          * at forced 100 F/H mode.
1977          */
1978         if (!phydev->autoneg && (phydev->speed == 100)) {
1979                 /* disable phy interrupt */
1980                 temp = phy_read(phydev, LAN88XX_INT_MASK);
1981                 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1982                 phy_write(phydev, LAN88XX_INT_MASK, temp);
1983
1984                 temp = phy_read(phydev, MII_BMCR);
1985                 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1986                 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1987                 temp |= BMCR_SPEED100;
1988                 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1989
1990                 /* clear pending interrupt generated while workaround */
1991                 temp = phy_read(phydev, LAN88XX_INT_STS);
1992
1993                 /* enable phy interrupt back */
1994                 temp = phy_read(phydev, LAN88XX_INT_MASK);
1995                 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1996                 phy_write(phydev, LAN88XX_INT_MASK, temp);
1997         }
1998 }
1999
2000 static int irq_map(struct irq_domain *d, unsigned int irq,
2001                    irq_hw_number_t hwirq)
2002 {
2003         struct irq_domain_data *data = d->host_data;
2004
2005         irq_set_chip_data(irq, data);
2006         irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
2007         irq_set_noprobe(irq);
2008
2009         return 0;
2010 }
2011
2012 static void irq_unmap(struct irq_domain *d, unsigned int irq)
2013 {
2014         irq_set_chip_and_handler(irq, NULL, NULL);
2015         irq_set_chip_data(irq, NULL);
2016 }
2017
2018 static const struct irq_domain_ops chip_domain_ops = {
2019         .map    = irq_map,
2020         .unmap  = irq_unmap,
2021 };
2022
2023 static void lan78xx_irq_mask(struct irq_data *irqd)
2024 {
2025         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2026
2027         data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
2028 }
2029
2030 static void lan78xx_irq_unmask(struct irq_data *irqd)
2031 {
2032         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2033
2034         data->irqenable |= BIT(irqd_to_hwirq(irqd));
2035 }
2036
2037 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
2038 {
2039         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2040
2041         mutex_lock(&data->irq_lock);
2042 }
2043
2044 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
2045 {
2046         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2047         struct lan78xx_net *dev =
2048                         container_of(data, struct lan78xx_net, domain_data);
2049         u32 buf;
2050
2051         /* call register access here because irq_bus_lock & irq_bus_sync_unlock
2052          * are only two callbacks executed in non-atomic contex.
2053          */
2054         lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2055         if (buf != data->irqenable)
2056                 lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
2057
2058         mutex_unlock(&data->irq_lock);
2059 }
2060
2061 static struct irq_chip lan78xx_irqchip = {
2062         .name                   = "lan78xx-irqs",
2063         .irq_mask               = lan78xx_irq_mask,
2064         .irq_unmask             = lan78xx_irq_unmask,
2065         .irq_bus_lock           = lan78xx_irq_bus_lock,
2066         .irq_bus_sync_unlock    = lan78xx_irq_bus_sync_unlock,
2067 };
2068
2069 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
2070 {
2071         struct device_node *of_node;
2072         struct irq_domain *irqdomain;
2073         unsigned int irqmap = 0;
2074         u32 buf;
2075         int ret = 0;
2076
2077         of_node = dev->udev->dev.parent->of_node;
2078
2079         mutex_init(&dev->domain_data.irq_lock);
2080
2081         lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2082         dev->domain_data.irqenable = buf;
2083
2084         dev->domain_data.irqchip = &lan78xx_irqchip;
2085         dev->domain_data.irq_handler = handle_simple_irq;
2086
2087         irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
2088                                           &chip_domain_ops, &dev->domain_data);
2089         if (irqdomain) {
2090                 /* create mapping for PHY interrupt */
2091                 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
2092                 if (!irqmap) {
2093                         irq_domain_remove(irqdomain);
2094
2095                         irqdomain = NULL;
2096                         ret = -EINVAL;
2097                 }
2098         } else {
2099                 ret = -EINVAL;
2100         }
2101
2102         dev->domain_data.irqdomain = irqdomain;
2103         dev->domain_data.phyirq = irqmap;
2104
2105         return ret;
2106 }
2107
2108 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2109 {
2110         if (dev->domain_data.phyirq > 0) {
2111                 irq_dispose_mapping(dev->domain_data.phyirq);
2112
2113                 if (dev->domain_data.irqdomain)
2114                         irq_domain_remove(dev->domain_data.irqdomain);
2115         }
2116         dev->domain_data.phyirq = 0;
2117         dev->domain_data.irqdomain = NULL;
2118 }
2119
2120 static int lan8835_fixup(struct phy_device *phydev)
2121 {
2122         int buf;
2123         struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2124
2125         /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2126         buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2127         buf &= ~0x1800;
2128         buf |= 0x0800;
2129         phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2130
2131         /* RGMII MAC TXC Delay Enable */
2132         lan78xx_write_reg(dev, MAC_RGMII_ID,
2133                           MAC_RGMII_ID_TXC_DELAY_EN_);
2134
2135         /* RGMII TX DLL Tune Adjust */
2136         lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2137
2138         dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2139
2140         return 1;
2141 }
2142
2143 static int ksz9031rnx_fixup(struct phy_device *phydev)
2144 {
2145         struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2146
2147         /* Micrel9301RNX PHY configuration */
2148         /* RGMII Control Signal Pad Skew */
2149         phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2150         /* RGMII RX Data Pad Skew */
2151         phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2152         /* RGMII RX Clock Pad Skew */
2153         phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2154
2155         dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2156
2157         return 1;
2158 }
2159
2160 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2161 {
2162         u32 buf;
2163         int ret;
2164         struct fixed_phy_status fphy_status = {
2165                 .link = 1,
2166                 .speed = SPEED_1000,
2167                 .duplex = DUPLEX_FULL,
2168         };
2169         struct phy_device *phydev;
2170
2171         phydev = phy_find_first(dev->mdiobus);
2172         if (!phydev) {
2173                 netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2174                 phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
2175                 if (IS_ERR(phydev)) {
2176                         netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2177                         return NULL;
2178                 }
2179                 netdev_dbg(dev->net, "Registered FIXED PHY\n");
2180                 dev->interface = PHY_INTERFACE_MODE_RGMII;
2181                 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2182                                         MAC_RGMII_ID_TXC_DELAY_EN_);
2183                 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2184                 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2185                 buf |= HW_CFG_CLK125_EN_;
2186                 buf |= HW_CFG_REFCLK25_EN_;
2187                 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2188         } else {
2189                 if (!phydev->drv) {
2190                         netdev_err(dev->net, "no PHY driver found\n");
2191                         return NULL;
2192                 }
2193                 dev->interface = PHY_INTERFACE_MODE_RGMII;
2194                 /* external PHY fixup for KSZ9031RNX */
2195                 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2196                                                  ksz9031rnx_fixup);
2197                 if (ret < 0) {
2198                         netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2199                         return NULL;
2200                 }
2201                 /* external PHY fixup for LAN8835 */
2202                 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2203                                                  lan8835_fixup);
2204                 if (ret < 0) {
2205                         netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2206                         return NULL;
2207                 }
2208                 /* add more external PHY fixup here if needed */
2209
2210                 phydev->is_internal = false;
2211         }
2212         return phydev;
2213 }
2214
2215 static int lan78xx_phy_init(struct lan78xx_net *dev)
2216 {
2217         __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2218         int ret;
2219         u32 mii_adv;
2220         struct phy_device *phydev;
2221
2222         switch (dev->chipid) {
2223         case ID_REV_CHIP_ID_7801_:
2224                 phydev = lan7801_phy_init(dev);
2225                 if (!phydev) {
2226                         netdev_err(dev->net, "lan7801: PHY Init Failed");
2227                         return -EIO;
2228                 }
2229                 break;
2230
2231         case ID_REV_CHIP_ID_7800_:
2232         case ID_REV_CHIP_ID_7850_:
2233                 phydev = phy_find_first(dev->mdiobus);
2234                 if (!phydev) {
2235                         netdev_err(dev->net, "no PHY found\n");
2236                         return -EIO;
2237                 }
2238                 phydev->is_internal = true;
2239                 dev->interface = PHY_INTERFACE_MODE_GMII;
2240                 break;
2241
2242         default:
2243                 netdev_err(dev->net, "Unknown CHIP ID found\n");
2244                 return -EIO;
2245         }
2246
2247         /* if phyirq is not set, use polling mode in phylib */
2248         if (dev->domain_data.phyirq > 0)
2249                 phydev->irq = dev->domain_data.phyirq;
2250         else
2251                 phydev->irq = PHY_POLL;
2252         netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2253
2254         /* set to AUTOMDIX */
2255         phydev->mdix = ETH_TP_MDI_AUTO;
2256
2257         ret = phy_connect_direct(dev->net, phydev,
2258                                  lan78xx_link_status_change,
2259                                  dev->interface);
2260         if (ret) {
2261                 netdev_err(dev->net, "can't attach PHY to %s\n",
2262                            dev->mdiobus->id);
2263                 if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2264                         if (phy_is_pseudo_fixed_link(phydev)) {
2265                                 fixed_phy_unregister(phydev);
2266                         } else {
2267                                 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2268                                                              0xfffffff0);
2269                                 phy_unregister_fixup_for_uid(PHY_LAN8835,
2270                                                              0xfffffff0);
2271                         }
2272                 }
2273                 return -EIO;
2274         }
2275
2276         /* MAC doesn't support 1000T Half */
2277         phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2278
2279         /* support both flow controls */
2280         dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2281         linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2282                            phydev->advertising);
2283         linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2284                            phydev->advertising);
2285         mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2286         mii_adv_to_linkmode_adv_t(fc, mii_adv);
2287         linkmode_or(phydev->advertising, fc, phydev->advertising);
2288
2289         if (of_property_read_bool(phydev->mdio.dev.of_node,
2290                                   "microchip,eee-enabled")) {
2291                 struct ethtool_eee edata;
2292                 memset(&edata, 0, sizeof(edata));
2293                 edata.cmd = ETHTOOL_SEEE;
2294                 edata.advertised = ADVERTISED_1000baseT_Full |
2295                                    ADVERTISED_100baseT_Full;
2296                 edata.eee_enabled = true;
2297                 edata.tx_lpi_enabled = true;
2298                 if (of_property_read_u32(dev->udev->dev.of_node,
2299                                          "microchip,tx-lpi-timer",
2300                                          &edata.tx_lpi_timer))
2301                         edata.tx_lpi_timer = 600; /* non-aggressive */
2302                 (void)lan78xx_set_eee(dev->net, &edata);
2303         }
2304
2305         if (phydev->mdio.dev.of_node) {
2306                 u32 reg;
2307                 int len;
2308
2309                 len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2310                                                       "microchip,led-modes",
2311                                                       sizeof(u32));
2312                 if (len >= 0) {
2313                         /* Ensure the appropriate LEDs are enabled */
2314                         lan78xx_read_reg(dev, HW_CFG, &reg);
2315                         reg &= ~(HW_CFG_LED0_EN_ |
2316                                  HW_CFG_LED1_EN_ |
2317                                  HW_CFG_LED2_EN_ |
2318                                  HW_CFG_LED3_EN_);
2319                         reg |= (len > 0) * HW_CFG_LED0_EN_ |
2320                                 (len > 1) * HW_CFG_LED1_EN_ |
2321                                 (len > 2) * HW_CFG_LED2_EN_ |
2322                                 (len > 3) * HW_CFG_LED3_EN_;
2323                         lan78xx_write_reg(dev, HW_CFG, reg);
2324                 }
2325         }
2326
2327         genphy_config_aneg(phydev);
2328
2329         dev->fc_autoneg = phydev->autoneg;
2330
2331         return 0;
2332 }
2333
2334 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2335 {
2336         u32 buf;
2337         bool rxenabled;
2338
2339         lan78xx_read_reg(dev, MAC_RX, &buf);
2340
2341         rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2342
2343         if (rxenabled) {
2344                 buf &= ~MAC_RX_RXEN_;
2345                 lan78xx_write_reg(dev, MAC_RX, buf);
2346         }
2347
2348         /* add 4 to size for FCS */
2349         buf &= ~MAC_RX_MAX_SIZE_MASK_;
2350         buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2351
2352         lan78xx_write_reg(dev, MAC_RX, buf);
2353
2354         if (rxenabled) {
2355                 buf |= MAC_RX_RXEN_;
2356                 lan78xx_write_reg(dev, MAC_RX, buf);
2357         }
2358
2359         return 0;
2360 }
2361
2362 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2363 {
2364         struct sk_buff *skb;
2365         unsigned long flags;
2366         int count = 0;
2367
2368         spin_lock_irqsave(&q->lock, flags);
2369         while (!skb_queue_empty(q)) {
2370                 struct skb_data *entry;
2371                 struct urb *urb;
2372                 int ret;
2373
2374                 skb_queue_walk(q, skb) {
2375                         entry = (struct skb_data *)skb->cb;
2376                         if (entry->state != unlink_start)
2377                                 goto found;
2378                 }
2379                 break;
2380 found:
2381                 entry->state = unlink_start;
2382                 urb = entry->urb;
2383
2384                 /* Get reference count of the URB to avoid it to be
2385                  * freed during usb_unlink_urb, which may trigger
2386                  * use-after-free problem inside usb_unlink_urb since
2387                  * usb_unlink_urb is always racing with .complete
2388                  * handler(include defer_bh).
2389                  */
2390                 usb_get_urb(urb);
2391                 spin_unlock_irqrestore(&q->lock, flags);
2392                 /* during some PM-driven resume scenarios,
2393                  * these (async) unlinks complete immediately
2394                  */
2395                 ret = usb_unlink_urb(urb);
2396                 if (ret != -EINPROGRESS && ret != 0)
2397                         netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2398                 else
2399                         count++;
2400                 usb_put_urb(urb);
2401                 spin_lock_irqsave(&q->lock, flags);
2402         }
2403         spin_unlock_irqrestore(&q->lock, flags);
2404         return count;
2405 }
2406
2407 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2408 {
2409         struct lan78xx_net *dev = netdev_priv(netdev);
2410         int ll_mtu = new_mtu + netdev->hard_header_len;
2411         int old_hard_mtu = dev->hard_mtu;
2412         int old_rx_urb_size = dev->rx_urb_size;
2413         int ret;
2414
2415         /* no second zero-length packet read wanted after mtu-sized packets */
2416         if ((ll_mtu % dev->maxpacket) == 0)
2417                 return -EDOM;
2418
2419         ret = usb_autopm_get_interface(dev->intf);
2420         if (ret < 0)
2421                 return ret;
2422
2423         lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
2424
2425         netdev->mtu = new_mtu;
2426
2427         dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2428         if (dev->rx_urb_size == old_hard_mtu) {
2429                 dev->rx_urb_size = dev->hard_mtu;
2430                 if (dev->rx_urb_size > old_rx_urb_size) {
2431                         if (netif_running(dev->net)) {
2432                                 unlink_urbs(dev, &dev->rxq);
2433                                 tasklet_schedule(&dev->bh);
2434                         }
2435                 }
2436         }
2437
2438         usb_autopm_put_interface(dev->intf);
2439
2440         return 0;
2441 }
2442
2443 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2444 {
2445         struct lan78xx_net *dev = netdev_priv(netdev);
2446         struct sockaddr *addr = p;
2447         u32 addr_lo, addr_hi;
2448
2449         if (netif_running(netdev))
2450                 return -EBUSY;
2451
2452         if (!is_valid_ether_addr(addr->sa_data))
2453                 return -EADDRNOTAVAIL;
2454
2455         ether_addr_copy(netdev->dev_addr, addr->sa_data);
2456
2457         addr_lo = netdev->dev_addr[0] |
2458                   netdev->dev_addr[1] << 8 |
2459                   netdev->dev_addr[2] << 16 |
2460                   netdev->dev_addr[3] << 24;
2461         addr_hi = netdev->dev_addr[4] |
2462                   netdev->dev_addr[5] << 8;
2463
2464         lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2465         lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2466
2467         /* Added to support MAC address changes */
2468         lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2469         lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2470
2471         return 0;
2472 }
2473
2474 /* Enable or disable Rx checksum offload engine */
2475 static int lan78xx_set_features(struct net_device *netdev,
2476                                 netdev_features_t features)
2477 {
2478         struct lan78xx_net *dev = netdev_priv(netdev);
2479         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2480         unsigned long flags;
2481
2482         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2483
2484         if (features & NETIF_F_RXCSUM) {
2485                 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2486                 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2487         } else {
2488                 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2489                 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2490         }
2491
2492         if (features & NETIF_F_HW_VLAN_CTAG_RX)
2493                 pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2494         else
2495                 pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2496
2497         if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2498                 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2499         else
2500                 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2501
2502         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2503
2504         lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2505
2506         return 0;
2507 }
2508
2509 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2510 {
2511         struct lan78xx_priv *pdata =
2512                         container_of(param, struct lan78xx_priv, set_vlan);
2513         struct lan78xx_net *dev = pdata->dev;
2514
2515         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2516                                DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2517 }
2518
2519 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2520                                    __be16 proto, u16 vid)
2521 {
2522         struct lan78xx_net *dev = netdev_priv(netdev);
2523         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2524         u16 vid_bit_index;
2525         u16 vid_dword_index;
2526
2527         vid_dword_index = (vid >> 5) & 0x7F;
2528         vid_bit_index = vid & 0x1F;
2529
2530         pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2531
2532         /* defer register writes to a sleepable context */
2533         schedule_work(&pdata->set_vlan);
2534
2535         return 0;
2536 }
2537
2538 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2539                                     __be16 proto, u16 vid)
2540 {
2541         struct lan78xx_net *dev = netdev_priv(netdev);
2542         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2543         u16 vid_bit_index;
2544         u16 vid_dword_index;
2545
2546         vid_dword_index = (vid >> 5) & 0x7F;
2547         vid_bit_index = vid & 0x1F;
2548
2549         pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2550
2551         /* defer register writes to a sleepable context */
2552         schedule_work(&pdata->set_vlan);
2553
2554         return 0;
2555 }
2556
2557 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2558 {
2559         int ret;
2560         u32 buf;
2561         u32 regs[6] = { 0 };
2562
2563         ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2564         if (buf & USB_CFG1_LTM_ENABLE_) {
2565                 u8 temp[2];
2566                 /* Get values from EEPROM first */
2567                 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2568                         if (temp[0] == 24) {
2569                                 ret = lan78xx_read_raw_eeprom(dev,
2570                                                               temp[1] * 2,
2571                                                               24,
2572                                                               (u8 *)regs);
2573                                 if (ret < 0)
2574                                         return;
2575                         }
2576                 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2577                         if (temp[0] == 24) {
2578                                 ret = lan78xx_read_raw_otp(dev,
2579                                                            temp[1] * 2,
2580                                                            24,
2581                                                            (u8 *)regs);
2582                                 if (ret < 0)
2583                                         return;
2584                         }
2585                 }
2586         }
2587
2588         lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2589         lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2590         lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2591         lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2592         lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2593         lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2594 }
2595
2596 static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
2597 {
2598         return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
2599 }
2600
2601 static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
2602                            u32 hw_disabled)
2603 {
2604         unsigned long timeout;
2605         bool stopped = true;
2606         int ret;
2607         u32 buf;
2608
2609         /* Stop the h/w block (if not already stopped) */
2610
2611         ret = lan78xx_read_reg(dev, reg, &buf);
2612         if (ret < 0)
2613                 return ret;
2614
2615         if (buf & hw_enabled) {
2616                 buf &= ~hw_enabled;
2617
2618                 ret = lan78xx_write_reg(dev, reg, buf);
2619                 if (ret < 0)
2620                         return ret;
2621
2622                 stopped = false;
2623                 timeout = jiffies + HW_DISABLE_TIMEOUT;
2624                 do  {
2625                         ret = lan78xx_read_reg(dev, reg, &buf);
2626                         if (ret < 0)
2627                                 return ret;
2628
2629                         if (buf & hw_disabled)
2630                                 stopped = true;
2631                         else
2632                                 msleep(HW_DISABLE_DELAY_MS);
2633                 } while (!stopped && !time_after(jiffies, timeout));
2634         }
2635
2636         ret = stopped ? 0 : -ETIME;
2637
2638         return ret;
2639 }
2640
2641 static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
2642 {
2643         return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
2644 }
2645
2646 static int lan78xx_start_tx_path(struct lan78xx_net *dev)
2647 {
2648         int ret;
2649
2650         netif_dbg(dev, drv, dev->net, "start tx path");
2651
2652         /* Start the MAC transmitter */
2653
2654         ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
2655         if (ret < 0)
2656                 return ret;
2657
2658         /* Start the Tx FIFO */
2659
2660         ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
2661         if (ret < 0)
2662                 return ret;
2663
2664         return 0;
2665 }
2666
2667 static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
2668 {
2669         int ret;
2670
2671         netif_dbg(dev, drv, dev->net, "stop tx path");
2672
2673         /* Stop the Tx FIFO */
2674
2675         ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
2676         if (ret < 0)
2677                 return ret;
2678
2679         /* Stop the MAC transmitter */
2680
2681         ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
2682         if (ret < 0)
2683                 return ret;
2684
2685         return 0;
2686 }
2687
2688 /* The caller must ensure the Tx path is stopped before calling
2689  * lan78xx_flush_tx_fifo().
2690  */
2691 static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
2692 {
2693         return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
2694 }
2695
2696 static int lan78xx_start_rx_path(struct lan78xx_net *dev)
2697 {
2698         int ret;
2699
2700         netif_dbg(dev, drv, dev->net, "start rx path");
2701
2702         /* Start the Rx FIFO */
2703
2704         ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
2705         if (ret < 0)
2706                 return ret;
2707
2708         /* Start the MAC receiver*/
2709
2710         ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
2711         if (ret < 0)
2712                 return ret;
2713
2714         return 0;
2715 }
2716
2717 static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
2718 {
2719         int ret;
2720
2721         netif_dbg(dev, drv, dev->net, "stop rx path");
2722
2723         /* Stop the MAC receiver */
2724
2725         ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
2726         if (ret < 0)
2727                 return ret;
2728
2729         /* Stop the Rx FIFO */
2730
2731         ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
2732         if (ret < 0)
2733                 return ret;
2734
2735         return 0;
2736 }
2737
2738 /* The caller must ensure the Rx path is stopped before calling
2739  * lan78xx_flush_rx_fifo().
2740  */
2741 static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
2742 {
2743         return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
2744 }
2745
2746 static int lan78xx_reset(struct lan78xx_net *dev)
2747 {
2748         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2749         unsigned long timeout;
2750         int ret;
2751         u32 buf;
2752         u8 sig;
2753         bool has_eeprom;
2754         bool has_otp;
2755
2756         has_eeprom = !lan78xx_read_eeprom(dev, 0, 0, NULL);
2757         has_otp = !lan78xx_read_otp(dev, 0, 0, NULL);
2758
2759         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2760         if (ret < 0)
2761                 return ret;
2762
2763         buf |= HW_CFG_LRST_;
2764
2765         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2766         if (ret < 0)
2767                 return ret;
2768
2769         timeout = jiffies + HZ;
2770         do {
2771                 mdelay(1);
2772                 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2773                 if (ret < 0)
2774                         return ret;
2775
2776                 if (time_after(jiffies, timeout)) {
2777                         netdev_warn(dev->net,
2778                                     "timeout on completion of LiteReset");
2779                         ret = -ETIMEDOUT;
2780                         return ret;
2781                 }
2782         } while (buf & HW_CFG_LRST_);
2783
2784         lan78xx_init_mac_address(dev);
2785
2786         /* save DEVID for later usage */
2787         ret = lan78xx_read_reg(dev, ID_REV, &buf);
2788         if (ret < 0)
2789                 return ret;
2790
2791         dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2792         dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2793
2794         /* Respond to the IN token with a NAK */
2795         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2796         if (ret < 0)
2797                 return ret;
2798
2799         buf |= USB_CFG_BIR_;
2800
2801         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2802         if (ret < 0)
2803                 return ret;
2804
2805         /* Init LTM */
2806         lan78xx_init_ltm(dev);
2807
2808         if (dev->udev->speed == USB_SPEED_SUPER) {
2809                 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2810                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2811                 dev->rx_qlen = 4;
2812                 dev->tx_qlen = 4;
2813         } else if (dev->udev->speed == USB_SPEED_HIGH) {
2814                 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2815                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2816                 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2817                 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2818         } else {
2819                 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2820                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2821                 dev->rx_qlen = 4;
2822                 dev->tx_qlen = 4;
2823         }
2824
2825         ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2826         if (ret < 0)
2827                 return ret;
2828
2829         ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2830         if (ret < 0)
2831                 return ret;
2832
2833         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2834         if (ret < 0)
2835                 return ret;
2836
2837         buf |= HW_CFG_MEF_;
2838
2839         /* If no valid EEPROM and no valid OTP, enable the LEDs by default */
2840         if (!has_eeprom && !has_otp)
2841             buf |= HW_CFG_LED0_EN_ | HW_CFG_LED1_EN_;
2842
2843         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2844         if (ret < 0)
2845                 return ret;
2846
2847         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2848         if (ret < 0)
2849                 return ret;
2850
2851         buf |= USB_CFG_BCE_;
2852
2853         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2854         if (ret < 0)
2855                 return ret;
2856
2857         /* set FIFO sizes */
2858         buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2859
2860         ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2861         if (ret < 0)
2862                 return ret;
2863
2864         buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2865
2866         ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2867         if (ret < 0)
2868                 return ret;
2869
2870         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2871         if (ret < 0)
2872                 return ret;
2873
2874         ret = lan78xx_write_reg(dev, FLOW, 0);
2875         if (ret < 0)
2876                 return ret;
2877
2878         ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2879         if (ret < 0)
2880                 return ret;
2881
2882         /* Don't need rfe_ctl_lock during initialisation */
2883         ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2884         if (ret < 0)
2885                 return ret;
2886
2887         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2888
2889         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2890         if (ret < 0)
2891                 return ret;
2892
2893         /* Enable or disable checksum offload engines */
2894         ret = lan78xx_set_features(dev->net, dev->net->features);
2895         if (ret < 0)
2896                 return ret;
2897
2898         lan78xx_set_multicast(dev->net);
2899
2900         /* reset PHY */
2901         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2902         if (ret < 0)
2903                 return ret;
2904
2905         buf |= PMT_CTL_PHY_RST_;
2906
2907         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2908         if (ret < 0)
2909                 return ret;
2910
2911         timeout = jiffies + HZ;
2912         do {
2913                 mdelay(1);
2914                 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2915                 if (ret < 0)
2916                         return ret;
2917
2918                 if (time_after(jiffies, timeout)) {
2919                         netdev_warn(dev->net, "timeout waiting for PHY Reset");
2920                         ret = -ETIMEDOUT;
2921                         return ret;
2922                 }
2923         } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2924
2925         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2926         if (ret < 0)
2927                 return ret;
2928
2929         /* LAN7801 only has RGMII mode */
2930         if (dev->chipid == ID_REV_CHIP_ID_7801_)
2931                 buf &= ~MAC_CR_GMII_EN_;
2932
2933         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2934                 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2935                 if (!ret && sig != EEPROM_INDICATOR) {
2936                         /* Implies there is no external eeprom. Set mac speed */
2937                         netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2938                         buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2939                 }
2940         }
2941         /* If no valid EEPROM and no valid OTP, enable AUTO negotiation */
2942         if (!has_eeprom && !has_otp)
2943             buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2944         ret = lan78xx_write_reg(dev, MAC_CR, buf);
2945         if (ret < 0)
2946                 return ret;
2947
2948         ret = lan78xx_set_rx_max_frame_length(dev,
2949                                               dev->net->mtu + VLAN_ETH_HLEN);
2950
2951         return ret;
2952 }
2953
2954 static void lan78xx_init_stats(struct lan78xx_net *dev)
2955 {
2956         u32 *p;
2957         int i;
2958
2959         /* initialize for stats update
2960          * some counters are 20bits and some are 32bits
2961          */
2962         p = (u32 *)&dev->stats.rollover_max;
2963         for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2964                 p[i] = 0xFFFFF;
2965
2966         dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2967         dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2968         dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2969         dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2970         dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2971         dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2972         dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2973         dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2974         dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2975         dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2976
2977         set_bit(EVENT_STAT_UPDATE, &dev->flags);
2978 }
2979
2980 static int lan78xx_open(struct net_device *net)
2981 {
2982         struct lan78xx_net *dev = netdev_priv(net);
2983         int ret;
2984
2985         netif_dbg(dev, ifup, dev->net, "open device");
2986
2987         ret = usb_autopm_get_interface(dev->intf);
2988         if (ret < 0)
2989                 return ret;
2990
2991         mutex_lock(&dev->dev_mutex);
2992
2993         phy_start(net->phydev);
2994
2995         netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2996
2997         /* for Link Check */
2998         if (dev->urb_intr) {
2999                 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
3000                 if (ret < 0) {
3001                         netif_err(dev, ifup, dev->net,
3002                                   "intr submit %d\n", ret);
3003                         goto done;
3004                 }
3005         }
3006
3007         ret = lan78xx_flush_rx_fifo(dev);
3008         if (ret < 0)
3009                 goto done;
3010         ret = lan78xx_flush_tx_fifo(dev);
3011         if (ret < 0)
3012                 goto done;
3013
3014         ret = lan78xx_start_tx_path(dev);
3015         if (ret < 0)
3016                 goto done;
3017         ret = lan78xx_start_rx_path(dev);
3018         if (ret < 0)
3019                 goto done;
3020
3021         lan78xx_init_stats(dev);
3022
3023         set_bit(EVENT_DEV_OPEN, &dev->flags);
3024
3025         netif_start_queue(net);
3026
3027         dev->link_on = false;
3028
3029         lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
3030 done:
3031         mutex_unlock(&dev->dev_mutex);
3032
3033         usb_autopm_put_interface(dev->intf);
3034
3035         return ret;
3036 }
3037
3038 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
3039 {
3040         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
3041         DECLARE_WAITQUEUE(wait, current);
3042         int temp;
3043
3044         /* ensure there are no more active urbs */
3045         add_wait_queue(&unlink_wakeup, &wait);
3046         set_current_state(TASK_UNINTERRUPTIBLE);
3047         dev->wait = &unlink_wakeup;
3048         temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
3049
3050         /* maybe wait for deletions to finish. */
3051         while (!skb_queue_empty(&dev->rxq) ||
3052                !skb_queue_empty(&dev->txq)) {
3053                 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
3054                 set_current_state(TASK_UNINTERRUPTIBLE);
3055                 netif_dbg(dev, ifdown, dev->net,
3056                           "waited for %d urb completions", temp);
3057         }
3058         set_current_state(TASK_RUNNING);
3059         dev->wait = NULL;
3060         remove_wait_queue(&unlink_wakeup, &wait);
3061
3062         while (!skb_queue_empty(&dev->done)) {
3063                 struct skb_data *entry;
3064                 struct sk_buff *skb;
3065
3066                 skb = skb_dequeue(&dev->done);
3067                 entry = (struct skb_data *)(skb->cb);
3068                 usb_free_urb(entry->urb);
3069                 dev_kfree_skb(skb);
3070         }
3071 }
3072
3073 static int lan78xx_stop(struct net_device *net)
3074 {
3075         struct lan78xx_net *dev = netdev_priv(net);
3076
3077         netif_dbg(dev, ifup, dev->net, "stop device");
3078
3079         mutex_lock(&dev->dev_mutex);
3080
3081         if (timer_pending(&dev->stat_monitor))
3082                 del_timer_sync(&dev->stat_monitor);
3083
3084         clear_bit(EVENT_DEV_OPEN, &dev->flags);
3085         netif_stop_queue(net);
3086         tasklet_kill(&dev->bh);
3087
3088         lan78xx_terminate_urbs(dev);
3089
3090         netif_info(dev, ifdown, dev->net,
3091                    "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
3092                    net->stats.rx_packets, net->stats.tx_packets,
3093                    net->stats.rx_errors, net->stats.tx_errors);
3094
3095         /* ignore errors that occur stopping the Tx and Rx data paths */
3096         lan78xx_stop_tx_path(dev);
3097         lan78xx_stop_rx_path(dev);
3098
3099         if (net->phydev)
3100                 phy_stop(net->phydev);
3101
3102         usb_kill_urb(dev->urb_intr);
3103
3104         /* deferred work (task, timer, softirq) must also stop.
3105          * can't flush_scheduled_work() until we drop rtnl (later),
3106          * else workers could deadlock; so make workers a NOP.
3107          */
3108         clear_bit(EVENT_TX_HALT, &dev->flags);
3109         clear_bit(EVENT_RX_HALT, &dev->flags);
3110         clear_bit(EVENT_LINK_RESET, &dev->flags);
3111         clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3112
3113         cancel_delayed_work_sync(&dev->wq);
3114
3115         usb_autopm_put_interface(dev->intf);
3116
3117         mutex_unlock(&dev->dev_mutex);
3118
3119         return 0;
3120 }
3121
3122 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
3123                                        struct sk_buff *skb, gfp_t flags)
3124 {
3125         u32 tx_cmd_a, tx_cmd_b;
3126         void *ptr;
3127
3128         if (skb_cow_head(skb, TX_OVERHEAD)) {
3129                 dev_kfree_skb_any(skb);
3130                 return NULL;
3131         }
3132
3133         if (skb_linearize(skb)) {
3134                 dev_kfree_skb_any(skb);
3135                 return NULL;
3136         }
3137
3138         tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
3139
3140         if (skb->ip_summed == CHECKSUM_PARTIAL)
3141                 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
3142
3143         tx_cmd_b = 0;
3144         if (skb_is_gso(skb)) {
3145                 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
3146
3147                 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
3148
3149                 tx_cmd_a |= TX_CMD_A_LSO_;
3150         }
3151
3152         if (skb_vlan_tag_present(skb)) {
3153                 tx_cmd_a |= TX_CMD_A_IVTG_;
3154                 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
3155         }
3156
3157         ptr = skb_push(skb, 8);
3158         put_unaligned_le32(tx_cmd_a, ptr);
3159         put_unaligned_le32(tx_cmd_b, ptr + 4);
3160
3161         return skb;
3162 }
3163
3164 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
3165                                struct sk_buff_head *list, enum skb_state state)
3166 {
3167         unsigned long flags;
3168         enum skb_state old_state;
3169         struct skb_data *entry = (struct skb_data *)skb->cb;
3170
3171         spin_lock_irqsave(&list->lock, flags);
3172         old_state = entry->state;
3173         entry->state = state;
3174
3175         __skb_unlink(skb, list);
3176         spin_unlock(&list->lock);
3177         spin_lock(&dev->done.lock);
3178
3179         __skb_queue_tail(&dev->done, skb);
3180         if (skb_queue_len(&dev->done) == 1)
3181                 tasklet_schedule(&dev->bh);
3182         spin_unlock_irqrestore(&dev->done.lock, flags);
3183
3184         return old_state;
3185 }
3186
3187 static void tx_complete(struct urb *urb)
3188 {
3189         struct sk_buff *skb = (struct sk_buff *)urb->context;
3190         struct skb_data *entry = (struct skb_data *)skb->cb;
3191         struct lan78xx_net *dev = entry->dev;
3192
3193         if (urb->status == 0) {
3194                 dev->net->stats.tx_packets += entry->num_of_packet;
3195                 dev->net->stats.tx_bytes += entry->length;
3196         } else {
3197                 dev->net->stats.tx_errors++;
3198
3199                 switch (urb->status) {
3200                 case -EPIPE:
3201                         lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3202                         break;
3203
3204                 /* software-driven interface shutdown */
3205                 case -ECONNRESET:
3206                 case -ESHUTDOWN:
3207                         netif_dbg(dev, tx_err, dev->net,
3208                                   "tx err interface gone %d\n",
3209                                   entry->urb->status);
3210                         break;
3211
3212                 case -EPROTO:
3213                 case -ETIME:
3214                 case -EILSEQ:
3215                         netif_stop_queue(dev->net);
3216                         netif_dbg(dev, tx_err, dev->net,
3217                                   "tx err queue stopped %d\n",
3218                                   entry->urb->status);
3219                         break;
3220                 default:
3221                         netif_dbg(dev, tx_err, dev->net,
3222                                   "unknown tx err %d\n",
3223                                   entry->urb->status);
3224                         break;
3225                 }
3226         }
3227
3228         usb_autopm_put_interface_async(dev->intf);
3229
3230         defer_bh(dev, skb, &dev->txq, tx_done);
3231 }
3232
3233 static void lan78xx_queue_skb(struct sk_buff_head *list,
3234                               struct sk_buff *newsk, enum skb_state state)
3235 {
3236         struct skb_data *entry = (struct skb_data *)newsk->cb;
3237
3238         __skb_queue_tail(list, newsk);
3239         entry->state = state;
3240 }
3241
3242 static netdev_tx_t
3243 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
3244 {
3245         struct lan78xx_net *dev = netdev_priv(net);
3246         struct sk_buff *skb2 = NULL;
3247
3248         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
3249                 schedule_delayed_work(&dev->wq, 0);
3250
3251         if (skb) {
3252                 skb_tx_timestamp(skb);
3253                 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
3254         }
3255
3256         if (skb2) {
3257                 skb_queue_tail(&dev->txq_pend, skb2);
3258
3259                 /* throttle TX patch at slower than SUPER SPEED USB */
3260                 if ((dev->udev->speed < USB_SPEED_SUPER) &&
3261                     (skb_queue_len(&dev->txq_pend) > 10))
3262                         netif_stop_queue(net);
3263         } else {
3264                 netif_dbg(dev, tx_err, dev->net,
3265                           "lan78xx_tx_prep return NULL\n");
3266                 dev->net->stats.tx_errors++;
3267                 dev->net->stats.tx_dropped++;
3268         }
3269
3270         tasklet_schedule(&dev->bh);
3271
3272         return NETDEV_TX_OK;
3273 }
3274
3275 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
3276 {
3277         struct lan78xx_priv *pdata = NULL;
3278         int ret;
3279         int i;
3280
3281         dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
3282
3283         pdata = (struct lan78xx_priv *)(dev->data[0]);
3284         if (!pdata) {
3285                 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
3286                 return -ENOMEM;
3287         }
3288
3289         pdata->dev = dev;
3290
3291         spin_lock_init(&pdata->rfe_ctl_lock);
3292         mutex_init(&pdata->dataport_mutex);
3293
3294         INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
3295
3296         for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
3297                 pdata->vlan_table[i] = 0;
3298
3299         INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
3300
3301         dev->net->features = 0;
3302
3303         if (DEFAULT_TX_CSUM_ENABLE)
3304                 dev->net->features |= NETIF_F_HW_CSUM;
3305
3306         if (DEFAULT_RX_CSUM_ENABLE)
3307                 dev->net->features |= NETIF_F_RXCSUM;
3308
3309         if (DEFAULT_TSO_CSUM_ENABLE) {
3310                 dev->net->features |= NETIF_F_SG;
3311                 /* Use module parameter to control TCP segmentation offload as
3312                  * it appears to cause issues.
3313                  */
3314                 if (enable_tso)
3315                         dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6;
3316         }
3317
3318         if (DEFAULT_VLAN_RX_OFFLOAD)
3319                 dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
3320
3321         if (DEFAULT_VLAN_FILTER_ENABLE)
3322                 dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3323
3324         dev->net->hw_features = dev->net->features;
3325
3326         ret = lan78xx_setup_irq_domain(dev);
3327         if (ret < 0) {
3328                 netdev_warn(dev->net,
3329                             "lan78xx_setup_irq_domain() failed : %d", ret);
3330                 goto out1;
3331         }
3332
3333         dev->net->hard_header_len += TX_OVERHEAD;
3334         dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
3335
3336         /* Init all registers */
3337         ret = lan78xx_reset(dev);
3338         if (ret) {
3339                 netdev_warn(dev->net, "Registers INIT FAILED....");
3340                 goto out2;
3341         }
3342
3343         ret = lan78xx_mdio_init(dev);
3344         if (ret) {
3345                 netdev_warn(dev->net, "MDIO INIT FAILED.....");
3346                 goto out2;
3347         }
3348
3349         dev->net->flags |= IFF_MULTICAST;
3350
3351         pdata->wol = WAKE_MAGIC;
3352
3353         return ret;
3354
3355 out2:
3356         lan78xx_remove_irq_domain(dev);
3357
3358 out1:
3359         netdev_warn(dev->net, "Bind routine FAILED");
3360         cancel_work_sync(&pdata->set_multicast);
3361         cancel_work_sync(&pdata->set_vlan);
3362         kfree(pdata);
3363         return ret;
3364 }
3365
3366 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3367 {
3368         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3369
3370         lan78xx_remove_irq_domain(dev);
3371
3372         lan78xx_remove_mdio(dev);
3373
3374         if (pdata) {
3375                 cancel_work_sync(&pdata->set_multicast);
3376                 cancel_work_sync(&pdata->set_vlan);
3377                 netif_dbg(dev, ifdown, dev->net, "free pdata");
3378                 kfree(pdata);
3379                 pdata = NULL;
3380                 dev->data[0] = 0;
3381         }
3382 }
3383
3384 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3385                                     struct sk_buff *skb,
3386                                     u32 rx_cmd_a, u32 rx_cmd_b)
3387 {
3388         /* HW Checksum offload appears to be flawed if used when not stripping
3389          * VLAN headers. Drop back to S/W checksums under these conditions.
3390          */
3391         if (!(dev->net->features & NETIF_F_RXCSUM) ||
3392             unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3393             ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3394              !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3395                 skb->ip_summed = CHECKSUM_NONE;
3396         } else {
3397                 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3398                 skb->ip_summed = CHECKSUM_COMPLETE;
3399         }
3400 }
3401
3402 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3403                                     struct sk_buff *skb,
3404                                     u32 rx_cmd_a, u32 rx_cmd_b)
3405 {
3406         if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3407             (rx_cmd_a & RX_CMD_A_FVTG_))
3408                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3409                                        (rx_cmd_b & 0xffff));
3410 }
3411
3412 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3413 {
3414         int status;
3415
3416         dev->net->stats.rx_packets++;
3417         dev->net->stats.rx_bytes += skb->len;
3418
3419         skb->protocol = eth_type_trans(skb, dev->net);
3420
3421         netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3422                   skb->len + sizeof(struct ethhdr), skb->protocol);
3423         memset(skb->cb, 0, sizeof(struct skb_data));
3424
3425         if (skb_defer_rx_timestamp(skb))
3426                 return;
3427
3428         status = netif_rx(skb);
3429         if (status != NET_RX_SUCCESS)
3430                 netif_dbg(dev, rx_err, dev->net,
3431                           "netif_rx status %d\n", status);
3432 }
3433
3434 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3435 {
3436         if (skb->len < dev->net->hard_header_len)
3437                 return 0;
3438
3439         while (skb->len > 0) {
3440                 u32 rx_cmd_a, rx_cmd_b, align_count, size;
3441                 u16 rx_cmd_c;
3442                 struct sk_buff *skb2;
3443                 unsigned char *packet;
3444
3445                 rx_cmd_a = get_unaligned_le32(skb->data);
3446                 skb_pull(skb, sizeof(rx_cmd_a));
3447
3448                 rx_cmd_b = get_unaligned_le32(skb->data);
3449                 skb_pull(skb, sizeof(rx_cmd_b));
3450
3451                 rx_cmd_c = get_unaligned_le16(skb->data);
3452                 skb_pull(skb, sizeof(rx_cmd_c));
3453
3454                 packet = skb->data;
3455
3456                 /* get the packet length */
3457                 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3458                 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3459
3460                 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3461                         netif_dbg(dev, rx_err, dev->net,
3462                                   "Error rx_cmd_a=0x%08x", rx_cmd_a);
3463                 } else {
3464                         /* last frame in this batch */
3465                         if (skb->len == size) {
3466                                 lan78xx_rx_csum_offload(dev, skb,
3467                                                         rx_cmd_a, rx_cmd_b);
3468                                 lan78xx_rx_vlan_offload(dev, skb,
3469                                                         rx_cmd_a, rx_cmd_b);
3470
3471                                 skb_trim(skb, skb->len - 4); /* remove fcs */
3472                                 skb->truesize = size + sizeof(struct sk_buff);
3473
3474                                 return 1;
3475                         }
3476
3477                         skb2 = skb_clone(skb, GFP_ATOMIC);
3478                         if (unlikely(!skb2)) {
3479                                 netdev_warn(dev->net, "Error allocating skb");
3480                                 return 0;
3481                         }
3482
3483                         skb2->len = size;
3484                         skb2->data = packet;
3485                         skb_set_tail_pointer(skb2, size);
3486
3487                         lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3488                         lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3489
3490                         skb_trim(skb2, skb2->len - 4); /* remove fcs */
3491                         skb2->truesize = size + sizeof(struct sk_buff);
3492
3493                         lan78xx_skb_return(dev, skb2);
3494                 }
3495
3496                 skb_pull(skb, size);
3497
3498                 /* padding bytes before the next frame starts */
3499                 if (skb->len)
3500                         skb_pull(skb, align_count);
3501         }
3502
3503         return 1;
3504 }
3505
3506 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3507 {
3508         if (!lan78xx_rx(dev, skb)) {
3509                 dev->net->stats.rx_errors++;
3510                 goto done;
3511         }
3512
3513         if (skb->len) {
3514                 lan78xx_skb_return(dev, skb);
3515                 return;
3516         }
3517
3518         netif_dbg(dev, rx_err, dev->net, "drop\n");
3519         dev->net->stats.rx_errors++;
3520 done:
3521         skb_queue_tail(&dev->done, skb);
3522 }
3523
3524 static void rx_complete(struct urb *urb);
3525
3526 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3527 {
3528         struct sk_buff *skb;
3529         struct skb_data *entry;
3530         unsigned long lockflags;
3531         size_t size = dev->rx_urb_size;
3532         int ret = 0;
3533
3534         skb = netdev_alloc_skb(dev->net, size);
3535         if (!skb) {
3536                 usb_free_urb(urb);
3537                 return -ENOMEM;
3538         }
3539
3540         entry = (struct skb_data *)skb->cb;
3541         entry->urb = urb;
3542         entry->dev = dev;
3543         entry->length = 0;
3544
3545         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3546                           skb->data, size, rx_complete, skb);
3547
3548         spin_lock_irqsave(&dev->rxq.lock, lockflags);
3549
3550         if (netif_device_present(dev->net) &&
3551             netif_running(dev->net) &&
3552             !test_bit(EVENT_RX_HALT, &dev->flags) &&
3553             !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3554                 ret = usb_submit_urb(urb, GFP_ATOMIC);
3555                 switch (ret) {
3556                 case 0:
3557                         lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3558                         break;
3559                 case -EPIPE:
3560                         lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3561                         break;
3562                 case -ENODEV:
3563                 case -ENOENT:
3564                         netif_dbg(dev, ifdown, dev->net, "device gone\n");
3565                         netif_device_detach(dev->net);
3566                         break;
3567                 case -EHOSTUNREACH:
3568                         ret = -ENOLINK;
3569                         break;
3570                 default:
3571                         netif_dbg(dev, rx_err, dev->net,
3572                                   "rx submit, %d\n", ret);
3573                         tasklet_schedule(&dev->bh);
3574                 }
3575         } else {
3576                 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3577                 ret = -ENOLINK;
3578         }
3579         spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3580         if (ret) {
3581                 dev_kfree_skb_any(skb);
3582                 usb_free_urb(urb);
3583         }
3584         return ret;
3585 }
3586
3587 static void rx_complete(struct urb *urb)
3588 {
3589         struct sk_buff  *skb = (struct sk_buff *)urb->context;
3590         struct skb_data *entry = (struct skb_data *)skb->cb;
3591         struct lan78xx_net *dev = entry->dev;
3592         int urb_status = urb->status;
3593         enum skb_state state;
3594
3595         skb_put(skb, urb->actual_length);
3596         state = rx_done;
3597         entry->urb = NULL;
3598
3599         switch (urb_status) {
3600         case 0:
3601                 if (skb->len < dev->net->hard_header_len) {
3602                         state = rx_cleanup;
3603                         dev->net->stats.rx_errors++;
3604                         dev->net->stats.rx_length_errors++;
3605                         netif_dbg(dev, rx_err, dev->net,
3606                                   "rx length %d\n", skb->len);
3607                 }
3608                 usb_mark_last_busy(dev->udev);
3609                 break;
3610         case -EPIPE:
3611                 dev->net->stats.rx_errors++;
3612                 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3613                 fallthrough;
3614         case -ECONNRESET:                               /* async unlink */
3615         case -ESHUTDOWN:                                /* hardware gone */
3616                 netif_dbg(dev, ifdown, dev->net,
3617                           "rx shutdown, code %d\n", urb_status);
3618                 state = rx_cleanup;
3619                 entry->urb = urb;
3620                 urb = NULL;
3621                 break;
3622         case -EPROTO:
3623         case -ETIME:
3624         case -EILSEQ:
3625                 dev->net->stats.rx_errors++;
3626                 state = rx_cleanup;
3627                 entry->urb = urb;
3628                 urb = NULL;
3629                 break;
3630
3631         /* data overrun ... flush fifo? */
3632         case -EOVERFLOW:
3633                 dev->net->stats.rx_over_errors++;
3634                 fallthrough;
3635
3636         default:
3637                 state = rx_cleanup;
3638                 dev->net->stats.rx_errors++;
3639                 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3640                 break;
3641         }
3642
3643         state = defer_bh(dev, skb, &dev->rxq, state);
3644
3645         if (urb) {
3646                 if (netif_running(dev->net) &&
3647                     !test_bit(EVENT_RX_HALT, &dev->flags) &&
3648                     state != unlink_start) {
3649                         rx_submit(dev, urb, GFP_ATOMIC);
3650                         return;
3651                 }
3652                 usb_free_urb(urb);
3653         }
3654         netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3655 }
3656
3657 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3658 {
3659         int length;
3660         struct urb *urb = NULL;
3661         struct skb_data *entry;
3662         unsigned long flags;
3663         struct sk_buff_head *tqp = &dev->txq_pend;
3664         struct sk_buff *skb, *skb2;
3665         int ret;
3666         int count, pos;
3667         int skb_totallen, pkt_cnt;
3668
3669         skb_totallen = 0;
3670         pkt_cnt = 0;
3671         count = 0;
3672         length = 0;
3673         spin_lock_irqsave(&tqp->lock, flags);
3674         skb_queue_walk(tqp, skb) {
3675                 if (skb_is_gso(skb)) {
3676                         if (!skb_queue_is_first(tqp, skb)) {
3677                                 /* handle previous packets first */
3678                                 break;
3679                         }
3680                         count = 1;
3681                         length = skb->len - TX_OVERHEAD;
3682                         __skb_unlink(skb, tqp);
3683                         spin_unlock_irqrestore(&tqp->lock, flags);
3684                         goto gso_skb;
3685                 }
3686
3687                 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3688                         break;
3689                 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3690                 pkt_cnt++;
3691         }
3692         spin_unlock_irqrestore(&tqp->lock, flags);
3693
3694         /* copy to a single skb */
3695         skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3696         if (!skb)
3697                 goto drop;
3698
3699         skb_put(skb, skb_totallen);
3700
3701         for (count = pos = 0; count < pkt_cnt; count++) {
3702                 skb2 = skb_dequeue(tqp);
3703                 if (skb2) {
3704                         length += (skb2->len - TX_OVERHEAD);
3705                         memcpy(skb->data + pos, skb2->data, skb2->len);
3706                         pos += roundup(skb2->len, sizeof(u32));
3707                         dev_kfree_skb(skb2);
3708                 }
3709         }
3710
3711 gso_skb:
3712         urb = usb_alloc_urb(0, GFP_ATOMIC);
3713         if (!urb)
3714                 goto drop;
3715
3716         entry = (struct skb_data *)skb->cb;
3717         entry->urb = urb;
3718         entry->dev = dev;
3719         entry->length = length;
3720         entry->num_of_packet = count;
3721
3722         spin_lock_irqsave(&dev->txq.lock, flags);
3723         ret = usb_autopm_get_interface_async(dev->intf);
3724         if (ret < 0) {
3725                 spin_unlock_irqrestore(&dev->txq.lock, flags);
3726                 goto drop;
3727         }
3728
3729         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3730                           skb->data, skb->len, tx_complete, skb);
3731
3732         if (length % dev->maxpacket == 0) {
3733                 /* send USB_ZERO_PACKET */
3734                 urb->transfer_flags |= URB_ZERO_PACKET;
3735         }
3736
3737 #ifdef CONFIG_PM
3738         /* if this triggers the device is still a sleep */
3739         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3740                 /* transmission will be done in resume */
3741                 usb_anchor_urb(urb, &dev->deferred);
3742                 /* no use to process more packets */
3743                 netif_stop_queue(dev->net);
3744                 usb_put_urb(urb);
3745                 spin_unlock_irqrestore(&dev->txq.lock, flags);
3746                 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3747                 return;
3748         }
3749 #endif
3750
3751         ret = usb_submit_urb(urb, GFP_ATOMIC);
3752         switch (ret) {
3753         case 0:
3754                 netif_trans_update(dev->net);
3755                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3756                 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3757                         netif_stop_queue(dev->net);
3758                 break;
3759         case -EPIPE:
3760                 netif_stop_queue(dev->net);
3761                 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3762                 usb_autopm_put_interface_async(dev->intf);
3763                 break;
3764         case -ENODEV:
3765         case -ENOENT:
3766                 netif_dbg(dev, tx_err, dev->net,
3767                           "tx: submit urb err %d (disconnected?)", ret);
3768                 netif_device_detach(dev->net);
3769                 break;
3770         default:
3771                 usb_autopm_put_interface_async(dev->intf);
3772                 netif_dbg(dev, tx_err, dev->net,
3773                           "tx: submit urb err %d\n", ret);
3774                 break;
3775         }
3776
3777         spin_unlock_irqrestore(&dev->txq.lock, flags);
3778
3779         if (ret) {
3780                 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3781 drop:
3782                 dev->net->stats.tx_dropped++;
3783                 if (skb)
3784                         dev_kfree_skb_any(skb);
3785                 usb_free_urb(urb);
3786         } else {
3787                 netif_dbg(dev, tx_queued, dev->net,
3788                           "> tx, len %d, type 0x%x\n", length, skb->protocol);
3789         }
3790 }
3791
3792 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3793 {
3794         struct urb *urb;
3795         int i;
3796
3797         if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3798                 for (i = 0; i < 10; i++) {
3799                         if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3800                                 break;
3801                         urb = usb_alloc_urb(0, GFP_ATOMIC);
3802                         if (urb)
3803                                 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3804                                         return;
3805                 }
3806
3807                 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3808                         tasklet_schedule(&dev->bh);
3809         }
3810         if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3811                 netif_wake_queue(dev->net);
3812 }
3813
3814 static void lan78xx_bh(struct tasklet_struct *t)
3815 {
3816         struct lan78xx_net *dev = from_tasklet(dev, t, bh);
3817         struct sk_buff *skb;
3818         struct skb_data *entry;
3819
3820         while ((skb = skb_dequeue(&dev->done))) {
3821                 entry = (struct skb_data *)(skb->cb);
3822                 switch (entry->state) {
3823                 case rx_done:
3824                         entry->state = rx_cleanup;
3825                         rx_process(dev, skb);
3826                         continue;
3827                 case tx_done:
3828                         usb_free_urb(entry->urb);
3829                         dev_kfree_skb(skb);
3830                         continue;
3831                 case rx_cleanup:
3832                         usb_free_urb(entry->urb);
3833                         dev_kfree_skb(skb);
3834                         continue;
3835                 default:
3836                         netdev_dbg(dev->net, "skb state %d\n", entry->state);
3837                         return;
3838                 }
3839         }
3840
3841         if (netif_device_present(dev->net) && netif_running(dev->net)) {
3842                 /* reset update timer delta */
3843                 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3844                         dev->delta = 1;
3845                         mod_timer(&dev->stat_monitor,
3846                                   jiffies + STAT_UPDATE_TIMER);
3847                 }
3848
3849                 if (!skb_queue_empty(&dev->txq_pend))
3850                         lan78xx_tx_bh(dev);
3851
3852                 if (!test_bit(EVENT_RX_HALT, &dev->flags))
3853                         lan78xx_rx_bh(dev);
3854         }
3855 }
3856
3857 static void lan78xx_delayedwork(struct work_struct *work)
3858 {
3859         int status;
3860         struct lan78xx_net *dev;
3861
3862         dev = container_of(work, struct lan78xx_net, wq.work);
3863
3864         if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
3865                 return;
3866
3867         if (usb_autopm_get_interface(dev->intf) < 0)
3868                 return;
3869
3870         if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3871                 unlink_urbs(dev, &dev->txq);
3872
3873                 status = usb_clear_halt(dev->udev, dev->pipe_out);
3874                 if (status < 0 &&
3875                     status != -EPIPE &&
3876                     status != -ESHUTDOWN) {
3877                         if (netif_msg_tx_err(dev))
3878                                 netdev_err(dev->net,
3879                                            "can't clear tx halt, status %d\n",
3880                                            status);
3881                 } else {
3882                         clear_bit(EVENT_TX_HALT, &dev->flags);
3883                         if (status != -ESHUTDOWN)
3884                                 netif_wake_queue(dev->net);
3885                 }
3886         }
3887
3888         if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3889                 unlink_urbs(dev, &dev->rxq);
3890                 status = usb_clear_halt(dev->udev, dev->pipe_in);
3891                 if (status < 0 &&
3892                     status != -EPIPE &&
3893                     status != -ESHUTDOWN) {
3894                         if (netif_msg_rx_err(dev))
3895                                 netdev_err(dev->net,
3896                                            "can't clear rx halt, status %d\n",
3897                                            status);
3898                 } else {
3899                         clear_bit(EVENT_RX_HALT, &dev->flags);
3900                         tasklet_schedule(&dev->bh);
3901                 }
3902         }
3903
3904         if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3905                 int ret = 0;
3906
3907                 clear_bit(EVENT_LINK_RESET, &dev->flags);
3908                 if (lan78xx_link_reset(dev) < 0) {
3909                         netdev_info(dev->net, "link reset failed (%d)\n",
3910                                     ret);
3911                 }
3912         }
3913
3914         if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3915                 lan78xx_update_stats(dev);
3916
3917                 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3918
3919                 mod_timer(&dev->stat_monitor,
3920                           jiffies + (STAT_UPDATE_TIMER * dev->delta));
3921
3922                 dev->delta = min((dev->delta * 2), 50);
3923         }
3924
3925         usb_autopm_put_interface(dev->intf);
3926 }
3927
3928 static void intr_complete(struct urb *urb)
3929 {
3930         struct lan78xx_net *dev = urb->context;
3931         int status = urb->status;
3932
3933         switch (status) {
3934         /* success */
3935         case 0:
3936                 lan78xx_status(dev, urb);
3937                 break;
3938
3939         /* software-driven interface shutdown */
3940         case -ENOENT:                   /* urb killed */
3941         case -ENODEV:                   /* hardware gone */
3942         case -ESHUTDOWN:                /* hardware gone */
3943                 netif_dbg(dev, ifdown, dev->net,
3944                           "intr shutdown, code %d\n", status);
3945                 return;
3946
3947         /* NOTE:  not throttling like RX/TX, since this endpoint
3948          * already polls infrequently
3949          */
3950         default:
3951                 netdev_dbg(dev->net, "intr status %d\n", status);
3952                 break;
3953         }
3954
3955         if (!netif_device_present(dev->net) ||
3956             !netif_running(dev->net)) {
3957                 netdev_warn(dev->net, "not submitting new status URB");
3958                 return;
3959         }
3960
3961         memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3962         status = usb_submit_urb(urb, GFP_ATOMIC);
3963
3964         switch (status) {
3965         case  0:
3966                 break;
3967         case -ENODEV:
3968         case -ENOENT:
3969                 netif_dbg(dev, timer, dev->net,
3970                           "intr resubmit %d (disconnect?)", status);
3971                 netif_device_detach(dev->net);
3972                 break;
3973         default:
3974                 netif_err(dev, timer, dev->net,
3975                           "intr resubmit --> %d\n", status);
3976                 break;
3977         }
3978 }
3979
3980 static void lan78xx_disconnect(struct usb_interface *intf)
3981 {
3982         struct lan78xx_net *dev;
3983         struct usb_device *udev;
3984         struct net_device *net;
3985         struct phy_device *phydev;
3986
3987         dev = usb_get_intfdata(intf);
3988         usb_set_intfdata(intf, NULL);
3989         if (!dev)
3990                 return;
3991
3992         set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
3993
3994         udev = interface_to_usbdev(intf);
3995         net = dev->net;
3996
3997         unregister_netdev(net);
3998
3999         cancel_delayed_work_sync(&dev->wq);
4000
4001         phydev = net->phydev;
4002
4003         phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
4004         phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
4005
4006         phy_disconnect(net->phydev);
4007
4008         if (phy_is_pseudo_fixed_link(phydev))
4009                 fixed_phy_unregister(phydev);
4010
4011         usb_scuttle_anchored_urbs(&dev->deferred);
4012
4013         if (timer_pending(&dev->stat_monitor))
4014                 del_timer_sync(&dev->stat_monitor);
4015
4016         lan78xx_unbind(dev, intf);
4017
4018         usb_kill_urb(dev->urb_intr);
4019         usb_free_urb(dev->urb_intr);
4020
4021         free_netdev(net);
4022         usb_put_dev(udev);
4023 }
4024
4025 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
4026 {
4027         struct lan78xx_net *dev = netdev_priv(net);
4028
4029         unlink_urbs(dev, &dev->txq);
4030         tasklet_schedule(&dev->bh);
4031 }
4032
4033 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
4034                                                 struct net_device *netdev,
4035                                                 netdev_features_t features)
4036 {
4037         if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
4038                 features &= ~NETIF_F_GSO_MASK;
4039
4040         features = vlan_features_check(skb, features);
4041         features = vxlan_features_check(skb, features);
4042
4043         return features;
4044 }
4045
4046 static const struct net_device_ops lan78xx_netdev_ops = {
4047         .ndo_open               = lan78xx_open,
4048         .ndo_stop               = lan78xx_stop,
4049         .ndo_start_xmit         = lan78xx_start_xmit,
4050         .ndo_tx_timeout         = lan78xx_tx_timeout,
4051         .ndo_change_mtu         = lan78xx_change_mtu,
4052         .ndo_set_mac_address    = lan78xx_set_mac_addr,
4053         .ndo_validate_addr      = eth_validate_addr,
4054         .ndo_eth_ioctl          = phy_do_ioctl_running,
4055         .ndo_set_rx_mode        = lan78xx_set_multicast,
4056         .ndo_set_features       = lan78xx_set_features,
4057         .ndo_vlan_rx_add_vid    = lan78xx_vlan_rx_add_vid,
4058         .ndo_vlan_rx_kill_vid   = lan78xx_vlan_rx_kill_vid,
4059         .ndo_features_check     = lan78xx_features_check,
4060 };
4061
4062 static void lan78xx_stat_monitor(struct timer_list *t)
4063 {
4064         struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
4065
4066         lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
4067 }
4068
4069 static int lan78xx_probe(struct usb_interface *intf,
4070                          const struct usb_device_id *id)
4071 {
4072         struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
4073         struct lan78xx_net *dev;
4074         struct net_device *netdev;
4075         struct usb_device *udev;
4076         int ret;
4077         unsigned int maxp;
4078         unsigned int period;
4079         u8 *buf = NULL;
4080
4081         udev = interface_to_usbdev(intf);
4082         udev = usb_get_dev(udev);
4083
4084         netdev = alloc_etherdev(sizeof(struct lan78xx_net));
4085         if (!netdev) {
4086                 dev_err(&intf->dev, "Error: OOM\n");
4087                 ret = -ENOMEM;
4088                 goto out1;
4089         }
4090
4091         /* netdev_printk() needs this */
4092         SET_NETDEV_DEV(netdev, &intf->dev);
4093
4094         dev = netdev_priv(netdev);
4095         dev->udev = udev;
4096         dev->intf = intf;
4097         dev->net = netdev;
4098         dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
4099                                         | NETIF_MSG_PROBE | NETIF_MSG_LINK);
4100
4101         skb_queue_head_init(&dev->rxq);
4102         skb_queue_head_init(&dev->txq);
4103         skb_queue_head_init(&dev->done);
4104         skb_queue_head_init(&dev->txq_pend);
4105         mutex_init(&dev->phy_mutex);
4106         mutex_init(&dev->dev_mutex);
4107
4108         tasklet_setup(&dev->bh, lan78xx_bh);
4109         INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
4110         init_usb_anchor(&dev->deferred);
4111
4112         netdev->netdev_ops = &lan78xx_netdev_ops;
4113         netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
4114         netdev->ethtool_ops = &lan78xx_ethtool_ops;
4115
4116         dev->delta = 1;
4117         timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
4118
4119         mutex_init(&dev->stats.access_lock);
4120
4121         if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
4122                 ret = -ENODEV;
4123                 goto out2;
4124         }
4125
4126         dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
4127         ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
4128         if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
4129                 ret = -ENODEV;
4130                 goto out2;
4131         }
4132
4133         dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
4134         ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
4135         if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
4136                 ret = -ENODEV;
4137                 goto out2;
4138         }
4139
4140         ep_intr = &intf->cur_altsetting->endpoint[2];
4141         if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
4142                 ret = -ENODEV;
4143                 goto out2;
4144         }
4145
4146         dev->pipe_intr = usb_rcvintpipe(dev->udev,
4147                                         usb_endpoint_num(&ep_intr->desc));
4148
4149         ret = lan78xx_bind(dev, intf);
4150         if (ret < 0)
4151                 goto out2;
4152
4153         if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
4154                 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
4155
4156         /* MTU range: 68 - 9000 */
4157         netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
4158         netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
4159
4160         if (int_urb_interval_ms <= 0)
4161                 period = ep_intr->desc.bInterval;
4162         else
4163                 period = int_urb_interval_ms * INT_URB_MICROFRAMES_PER_MS;
4164
4165         netif_notice(dev, probe, netdev, "int urb period %d\n", period);
4166
4167         maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
4168         buf = kmalloc(maxp, GFP_KERNEL);
4169         if (buf) {
4170                 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
4171                 if (!dev->urb_intr) {
4172                         ret = -ENOMEM;
4173                         kfree(buf);
4174                         goto out3;
4175                 } else {
4176                         usb_fill_int_urb(dev->urb_intr, dev->udev,
4177                                          dev->pipe_intr, buf, maxp,
4178                                          intr_complete, dev, period);
4179                         dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
4180                 }
4181         }
4182
4183         dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
4184
4185         /* Reject broken descriptors. */
4186         if (dev->maxpacket == 0) {
4187                 ret = -ENODEV;
4188                 goto out4;
4189         }
4190
4191         /* driver requires remote-wakeup capability during autosuspend. */
4192         intf->needs_remote_wakeup = 1;
4193
4194         ret = lan78xx_phy_init(dev);
4195         if (ret < 0)
4196                 goto out4;
4197
4198         ret = register_netdev(netdev);
4199         if (ret != 0) {
4200                 netif_err(dev, probe, netdev, "couldn't register the device\n");
4201                 goto out5;
4202         }
4203
4204         usb_set_intfdata(intf, dev);
4205
4206         ret = device_set_wakeup_enable(&udev->dev, true);
4207
4208          /* Default delay of 2sec has more overhead than advantage.
4209           * Set to 10sec as default.
4210           */
4211         pm_runtime_set_autosuspend_delay(&udev->dev,
4212                                          DEFAULT_AUTOSUSPEND_DELAY);
4213
4214         return 0;
4215
4216 out5:
4217         phy_disconnect(netdev->phydev);
4218 out4:
4219         usb_free_urb(dev->urb_intr);
4220 out3:
4221         lan78xx_unbind(dev, intf);
4222 out2:
4223         free_netdev(netdev);
4224 out1:
4225         usb_put_dev(udev);
4226
4227         return ret;
4228 }
4229
4230 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
4231 {
4232         const u16 crc16poly = 0x8005;
4233         int i;
4234         u16 bit, crc, msb;
4235         u8 data;
4236
4237         crc = 0xFFFF;
4238         for (i = 0; i < len; i++) {
4239                 data = *buf++;
4240                 for (bit = 0; bit < 8; bit++) {
4241                         msb = crc >> 15;
4242                         crc <<= 1;
4243
4244                         if (msb ^ (u16)(data & 1)) {
4245                                 crc ^= crc16poly;
4246                                 crc |= (u16)0x0001U;
4247                         }
4248                         data >>= 1;
4249                 }
4250         }
4251
4252         return crc;
4253 }
4254
4255 static int lan78xx_set_auto_suspend(struct lan78xx_net *dev)
4256 {
4257         u32 buf;
4258         int ret;
4259
4260         ret = lan78xx_stop_tx_path(dev);
4261         if (ret < 0)
4262                 return ret;
4263
4264         ret = lan78xx_stop_rx_path(dev);
4265         if (ret < 0)
4266                 return ret;
4267
4268         /* auto suspend (selective suspend) */
4269
4270         ret = lan78xx_write_reg(dev, WUCSR, 0);
4271         if (ret < 0)
4272                 return ret;
4273         ret = lan78xx_write_reg(dev, WUCSR2, 0);
4274         if (ret < 0)
4275                 return ret;
4276         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4277         if (ret < 0)
4278                 return ret;
4279
4280         /* set goodframe wakeup */
4281
4282         ret = lan78xx_read_reg(dev, WUCSR, &buf);
4283         if (ret < 0)
4284                 return ret;
4285
4286         buf |= WUCSR_RFE_WAKE_EN_;
4287         buf |= WUCSR_STORE_WAKE_;
4288
4289         ret = lan78xx_write_reg(dev, WUCSR, buf);
4290         if (ret < 0)
4291                 return ret;
4292
4293         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4294         if (ret < 0)
4295                 return ret;
4296
4297         buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4298         buf |= PMT_CTL_RES_CLR_WKP_STS_;
4299         buf |= PMT_CTL_PHY_WAKE_EN_;
4300         buf |= PMT_CTL_WOL_EN_;
4301         buf &= ~PMT_CTL_SUS_MODE_MASK_;
4302         buf |= PMT_CTL_SUS_MODE_3_;
4303
4304         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4305         if (ret < 0)
4306                 return ret;
4307
4308         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4309         if (ret < 0)
4310                 return ret;
4311
4312         buf |= PMT_CTL_WUPS_MASK_;
4313
4314         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4315         if (ret < 0)
4316                 return ret;
4317
4318         ret = lan78xx_start_rx_path(dev);
4319
4320         return ret;
4321 }
4322
4323 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
4324 {
4325         const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
4326         const u8 ipv6_multicast[3] = { 0x33, 0x33 };
4327         const u8 arp_type[2] = { 0x08, 0x06 };
4328         u32 temp_pmt_ctl;
4329         int mask_index;
4330         u32 temp_wucsr;
4331         u32 buf;
4332         u16 crc;
4333         int ret;
4334
4335         ret = lan78xx_stop_tx_path(dev);
4336         if (ret < 0)
4337                 return ret;
4338         ret = lan78xx_stop_rx_path(dev);
4339         if (ret < 0)
4340                 return ret;
4341
4342         ret = lan78xx_write_reg(dev, WUCSR, 0);
4343         if (ret < 0)
4344                 return ret;
4345         ret = lan78xx_write_reg(dev, WUCSR2, 0);
4346         if (ret < 0)
4347                 return ret;
4348         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4349         if (ret < 0)
4350                 return ret;
4351
4352         temp_wucsr = 0;
4353
4354         temp_pmt_ctl = 0;
4355
4356         ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
4357         if (ret < 0)
4358                 return ret;
4359
4360         temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
4361         temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
4362
4363         for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) {
4364                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
4365                 if (ret < 0)
4366                         return ret;
4367         }
4368
4369         mask_index = 0;
4370         if (wol & WAKE_PHY) {
4371                 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
4372
4373                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4374                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4375                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4376         }
4377         if (wol & WAKE_MAGIC) {
4378                 temp_wucsr |= WUCSR_MPEN_;
4379
4380                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4381                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4382                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
4383         }
4384         if (wol & WAKE_BCAST) {
4385                 temp_wucsr |= WUCSR_BCST_EN_;
4386
4387                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4388                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4389                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4390         }
4391         if (wol & WAKE_MCAST) {
4392                 temp_wucsr |= WUCSR_WAKE_EN_;
4393
4394                 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
4395                 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
4396                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4397                                         WUF_CFGX_EN_ |
4398                                         WUF_CFGX_TYPE_MCAST_ |
4399                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
4400                                         (crc & WUF_CFGX_CRC16_MASK_));
4401                 if (ret < 0)
4402                         return ret;
4403
4404                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
4405                 if (ret < 0)
4406                         return ret;
4407                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4408                 if (ret < 0)
4409                         return ret;
4410                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4411                 if (ret < 0)
4412                         return ret;
4413                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4414                 if (ret < 0)
4415                         return ret;
4416
4417                 mask_index++;
4418
4419                 /* for IPv6 Multicast */
4420                 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
4421                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4422                                         WUF_CFGX_EN_ |
4423                                         WUF_CFGX_TYPE_MCAST_ |
4424                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
4425                                         (crc & WUF_CFGX_CRC16_MASK_));
4426                 if (ret < 0)
4427                         return ret;
4428
4429                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
4430                 if (ret < 0)
4431                         return ret;
4432                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4433                 if (ret < 0)
4434                         return ret;
4435                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4436                 if (ret < 0)
4437                         return ret;
4438                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4439                 if (ret < 0)
4440                         return ret;
4441
4442                 mask_index++;
4443
4444                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4445                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4446                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4447         }
4448         if (wol & WAKE_UCAST) {
4449                 temp_wucsr |= WUCSR_PFDA_EN_;
4450
4451                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4452                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4453                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4454         }
4455         if (wol & WAKE_ARP) {
4456                 temp_wucsr |= WUCSR_WAKE_EN_;
4457
4458                 /* set WUF_CFG & WUF_MASK
4459                  * for packettype (offset 12,13) = ARP (0x0806)
4460                  */
4461                 crc = lan78xx_wakeframe_crc16(arp_type, 2);
4462                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4463                                         WUF_CFGX_EN_ |
4464                                         WUF_CFGX_TYPE_ALL_ |
4465                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
4466                                         (crc & WUF_CFGX_CRC16_MASK_));
4467                 if (ret < 0)
4468                         return ret;
4469
4470                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
4471                 if (ret < 0)
4472                         return ret;
4473                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4474                 if (ret < 0)
4475                         return ret;
4476                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4477                 if (ret < 0)
4478                         return ret;
4479                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4480                 if (ret < 0)
4481                         return ret;
4482
4483                 mask_index++;
4484
4485                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4486                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4487                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4488         }
4489
4490         ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
4491         if (ret < 0)
4492                 return ret;
4493
4494         /* when multiple WOL bits are set */
4495         if (hweight_long((unsigned long)wol) > 1) {
4496                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4497                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4498                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4499         }
4500         ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
4501         if (ret < 0)
4502                 return ret;
4503
4504         /* clear WUPS */
4505         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4506         if (ret < 0)
4507                 return ret;
4508
4509         buf |= PMT_CTL_WUPS_MASK_;
4510
4511         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4512         if (ret < 0)
4513                 return ret;
4514
4515         ret = lan78xx_start_rx_path(dev);
4516
4517         return ret;
4518 }
4519
4520 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
4521 {
4522         struct lan78xx_net *dev = usb_get_intfdata(intf);
4523         bool dev_open;
4524         int ret;
4525
4526         mutex_lock(&dev->dev_mutex);
4527
4528         netif_dbg(dev, ifdown, dev->net,
4529                   "suspending: pm event %#x", message.event);
4530
4531         dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4532
4533         if (dev_open) {
4534                 spin_lock_irq(&dev->txq.lock);
4535                 /* don't autosuspend while transmitting */
4536                 if ((skb_queue_len(&dev->txq) ||
4537                      skb_queue_len(&dev->txq_pend)) &&
4538                     PMSG_IS_AUTO(message)) {
4539                         spin_unlock_irq(&dev->txq.lock);
4540                         ret = -EBUSY;
4541                         goto out;
4542                 } else {
4543                         set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4544                         spin_unlock_irq(&dev->txq.lock);
4545                 }
4546
4547                 /* stop RX */
4548                 ret = lan78xx_stop_rx_path(dev);
4549                 if (ret < 0)
4550                         goto out;
4551
4552                 ret = lan78xx_flush_rx_fifo(dev);
4553                 if (ret < 0)
4554                         goto out;
4555
4556                 /* stop Tx */
4557                 ret = lan78xx_stop_tx_path(dev);
4558                 if (ret < 0)
4559                         goto out;
4560
4561                 /* empty out the Rx and Tx queues */
4562                 netif_device_detach(dev->net);
4563                 lan78xx_terminate_urbs(dev);
4564                 usb_kill_urb(dev->urb_intr);
4565
4566                 /* reattach */
4567                 netif_device_attach(dev->net);
4568
4569                 del_timer(&dev->stat_monitor);
4570
4571                 if (PMSG_IS_AUTO(message)) {
4572                         ret = lan78xx_set_auto_suspend(dev);
4573                         if (ret < 0)
4574                                 goto out;
4575                 } else {
4576                         struct lan78xx_priv *pdata;
4577
4578                         pdata = (struct lan78xx_priv *)(dev->data[0]);
4579                         netif_carrier_off(dev->net);
4580                         ret = lan78xx_set_suspend(dev, pdata->wol);
4581                         if (ret < 0)
4582                                 goto out;
4583                 }
4584         } else {
4585                 /* Interface is down; don't allow WOL and PHY
4586                  * events to wake up the host
4587                  */
4588                 u32 buf;
4589
4590                 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4591
4592                 ret = lan78xx_write_reg(dev, WUCSR, 0);
4593                 if (ret < 0)
4594                         goto out;
4595                 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4596                 if (ret < 0)
4597                         goto out;
4598
4599                 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4600                 if (ret < 0)
4601                         goto out;
4602
4603                 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4604                 buf |= PMT_CTL_RES_CLR_WKP_STS_;
4605                 buf &= ~PMT_CTL_SUS_MODE_MASK_;
4606                 buf |= PMT_CTL_SUS_MODE_3_;
4607
4608                 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4609                 if (ret < 0)
4610                         goto out;
4611
4612                 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4613                 if (ret < 0)
4614                         goto out;
4615
4616                 buf |= PMT_CTL_WUPS_MASK_;
4617
4618                 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4619                 if (ret < 0)
4620                         goto out;
4621         }
4622
4623         ret = 0;
4624 out:
4625         mutex_unlock(&dev->dev_mutex);
4626
4627         return ret;
4628 }
4629
4630 static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
4631 {
4632         bool pipe_halted = false;
4633         struct urb *urb;
4634
4635         while ((urb = usb_get_from_anchor(&dev->deferred))) {
4636                 struct sk_buff *skb = urb->context;
4637                 int ret;
4638
4639                 if (!netif_device_present(dev->net) ||
4640                     !netif_carrier_ok(dev->net) ||
4641                     pipe_halted) {
4642                         usb_free_urb(urb);
4643                         dev_kfree_skb(skb);
4644                         continue;
4645                 }
4646
4647                 ret = usb_submit_urb(urb, GFP_ATOMIC);
4648
4649                 if (ret == 0) {
4650                         netif_trans_update(dev->net);
4651                         lan78xx_queue_skb(&dev->txq, skb, tx_start);
4652                 } else {
4653                         usb_free_urb(urb);
4654                         dev_kfree_skb(skb);
4655
4656                         if (ret == -EPIPE) {
4657                                 netif_stop_queue(dev->net);
4658                                 pipe_halted = true;
4659                         } else if (ret == -ENODEV) {
4660                                 netif_device_detach(dev->net);
4661                         }
4662                 }
4663         }
4664
4665         return pipe_halted;
4666 }
4667
4668 static int lan78xx_resume(struct usb_interface *intf)
4669 {
4670         struct lan78xx_net *dev = usb_get_intfdata(intf);
4671         bool dev_open;
4672         int ret;
4673
4674         mutex_lock(&dev->dev_mutex);
4675
4676         netif_dbg(dev, ifup, dev->net, "resuming device");
4677
4678         dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4679
4680         if (dev_open) {
4681                 bool pipe_halted = false;
4682
4683                 ret = lan78xx_flush_tx_fifo(dev);
4684                 if (ret < 0)
4685                         goto out;
4686
4687                 if (dev->urb_intr) {
4688                         int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
4689
4690                         if (ret < 0) {
4691                                 if (ret == -ENODEV)
4692                                         netif_device_detach(dev->net);
4693
4694                         netdev_warn(dev->net, "Failed to submit intr URB");
4695                         }
4696                 }
4697
4698                 spin_lock_irq(&dev->txq.lock);
4699
4700                 if (netif_device_present(dev->net)) {
4701                         pipe_halted = lan78xx_submit_deferred_urbs(dev);
4702
4703                         if (pipe_halted)
4704                                 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
4705                 }
4706
4707                 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4708
4709                 spin_unlock_irq(&dev->txq.lock);
4710
4711                 if (!pipe_halted &&
4712                     netif_device_present(dev->net) &&
4713                     (skb_queue_len(&dev->txq) < dev->tx_qlen))
4714                         netif_start_queue(dev->net);
4715
4716                 ret = lan78xx_start_tx_path(dev);
4717                 if (ret < 0)
4718                         goto out;
4719
4720                 tasklet_schedule(&dev->bh);
4721
4722                 if (!timer_pending(&dev->stat_monitor)) {
4723                         dev->delta = 1;
4724                         mod_timer(&dev->stat_monitor,
4725                                   jiffies + STAT_UPDATE_TIMER);
4726                 }
4727
4728         } else {
4729                 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4730         }
4731
4732         ret = lan78xx_write_reg(dev, WUCSR2, 0);
4733         if (ret < 0)
4734                 goto out;
4735         ret = lan78xx_write_reg(dev, WUCSR, 0);
4736         if (ret < 0)
4737                 goto out;
4738         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4739         if (ret < 0)
4740                 goto out;
4741
4742         ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4743                                              WUCSR2_ARP_RCD_ |
4744                                              WUCSR2_IPV6_TCPSYN_RCD_ |
4745                                              WUCSR2_IPV4_TCPSYN_RCD_);
4746         if (ret < 0)
4747                 goto out;
4748
4749         ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
4750                                             WUCSR_EEE_RX_WAKE_ |
4751                                             WUCSR_PFDA_FR_ |
4752                                             WUCSR_RFE_WAKE_FR_ |
4753                                             WUCSR_WUFR_ |
4754                                             WUCSR_MPR_ |
4755                                             WUCSR_BCST_FR_);
4756         if (ret < 0)
4757                 goto out;
4758
4759         ret = 0;
4760 out:
4761         mutex_unlock(&dev->dev_mutex);
4762
4763         return ret;
4764 }
4765
4766 static int lan78xx_reset_resume(struct usb_interface *intf)
4767 {
4768         struct lan78xx_net *dev = usb_get_intfdata(intf);
4769         int ret;
4770
4771         netif_dbg(dev, ifup, dev->net, "(reset) resuming device");
4772
4773         ret = lan78xx_reset(dev);
4774         if (ret < 0)
4775                 return ret;
4776
4777         phy_start(dev->net->phydev);
4778
4779         ret = lan78xx_resume(intf);
4780
4781         return ret;
4782 }
4783
4784 static const struct usb_device_id products[] = {
4785         {
4786         /* LAN7800 USB Gigabit Ethernet Device */
4787         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4788         },
4789         {
4790         /* LAN7850 USB Gigabit Ethernet Device */
4791         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4792         },
4793         {
4794         /* LAN7801 USB Gigabit Ethernet Device */
4795         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4796         },
4797         {
4798         /* ATM2-AF USB Gigabit Ethernet Device */
4799         USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
4800         },
4801         {},
4802 };
4803 MODULE_DEVICE_TABLE(usb, products);
4804
4805 static struct usb_driver lan78xx_driver = {
4806         .name                   = DRIVER_NAME,
4807         .id_table               = products,
4808         .probe                  = lan78xx_probe,
4809         .disconnect             = lan78xx_disconnect,
4810         .suspend                = lan78xx_suspend,
4811         .resume                 = lan78xx_resume,
4812         .reset_resume           = lan78xx_reset_resume,
4813         .supports_autosuspend   = 1,
4814         .disable_hub_initiated_lpm = 1,
4815 };
4816
4817 module_usb_driver(lan78xx_driver);
4818
4819 MODULE_AUTHOR(DRIVER_AUTHOR);
4820 MODULE_DESCRIPTION(DRIVER_DESC);
4821 MODULE_LICENSE("GPL");