Linux 5.15.57
[platform/kernel/linux-rpi.git] / drivers / net / usb / lan78xx.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Microchip Technology
4  */
5 #include <linux/module.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
9 #include <linux/usb.h>
10 #include <linux/crc32.h>
11 #include <linux/signal.h>
12 #include <linux/slab.h>
13 #include <linux/if_vlan.h>
14 #include <linux/uaccess.h>
15 #include <linux/linkmode.h>
16 #include <linux/list.h>
17 #include <linux/ip.h>
18 #include <linux/ipv6.h>
19 #include <linux/mdio.h>
20 #include <linux/phy.h>
21 #include <net/ip6_checksum.h>
22 #include <net/vxlan.h>
23 #include <linux/interrupt.h>
24 #include <linux/irqdomain.h>
25 #include <linux/irq.h>
26 #include <linux/irqchip/chained_irq.h>
27 #include <linux/microchipphy.h>
28 #include <linux/phy_fixed.h>
29 #include <linux/of_mdio.h>
30 #include <linux/of_net.h>
31 #include "lan78xx.h"
32
33 #define DRIVER_AUTHOR   "WOOJUNG HUH <woojung.huh@microchip.com>"
34 #define DRIVER_DESC     "LAN78XX USB 3.0 Gigabit Ethernet Devices"
35 #define DRIVER_NAME     "lan78xx"
36
37 #define TX_TIMEOUT_JIFFIES              (5 * HZ)
38 #define THROTTLE_JIFFIES                (HZ / 8)
39 #define UNLINK_TIMEOUT_MS               3
40
41 #define RX_MAX_QUEUE_MEMORY             (60 * 1518)
42
43 #define SS_USB_PKT_SIZE                 (1024)
44 #define HS_USB_PKT_SIZE                 (512)
45 #define FS_USB_PKT_SIZE                 (64)
46
47 #define MAX_RX_FIFO_SIZE                (12 * 1024)
48 #define MAX_TX_FIFO_SIZE                (12 * 1024)
49
50 #define FLOW_THRESHOLD(n)               ((((n) + 511) / 512) & 0x7F)
51 #define FLOW_CTRL_THRESHOLD(on, off)    ((FLOW_THRESHOLD(on)  << 0) | \
52                                          (FLOW_THRESHOLD(off) << 8))
53
54 /* Flow control turned on when Rx FIFO level rises above this level (bytes) */
55 #define FLOW_ON_SS                      9216
56 #define FLOW_ON_HS                      8704
57
58 /* Flow control turned off when Rx FIFO level falls below this level (bytes) */
59 #define FLOW_OFF_SS                     4096
60 #define FLOW_OFF_HS                     1024
61
62 #define DEFAULT_BURST_CAP_SIZE          (MAX_TX_FIFO_SIZE)
63 #define DEFAULT_BULK_IN_DELAY           (0x0800)
64 #define MAX_SINGLE_PACKET_SIZE          (9000)
65 #define DEFAULT_TX_CSUM_ENABLE          (true)
66 #define DEFAULT_RX_CSUM_ENABLE          (true)
67 #define DEFAULT_TSO_CSUM_ENABLE         (true)
68 #define DEFAULT_VLAN_FILTER_ENABLE      (true)
69 #define DEFAULT_VLAN_RX_OFFLOAD         (true)
70 #define TX_OVERHEAD                     (8)
71 #define RXW_PADDING                     2
72
73 #define LAN78XX_USB_VENDOR_ID           (0x0424)
74 #define LAN7800_USB_PRODUCT_ID          (0x7800)
75 #define LAN7850_USB_PRODUCT_ID          (0x7850)
76 #define LAN7801_USB_PRODUCT_ID          (0x7801)
77 #define LAN78XX_EEPROM_MAGIC            (0x78A5)
78 #define LAN78XX_OTP_MAGIC               (0x78F3)
79 #define AT29M2AF_USB_VENDOR_ID          (0x07C9)
80 #define AT29M2AF_USB_PRODUCT_ID (0x0012)
81
82 #define MII_READ                        1
83 #define MII_WRITE                       0
84
85 #define EEPROM_INDICATOR                (0xA5)
86 #define EEPROM_MAC_OFFSET               (0x01)
87 #define MAX_EEPROM_SIZE                 512
88 #define OTP_INDICATOR_1                 (0xF3)
89 #define OTP_INDICATOR_2                 (0xF7)
90
91 #define WAKE_ALL                        (WAKE_PHY | WAKE_UCAST | \
92                                          WAKE_MCAST | WAKE_BCAST | \
93                                          WAKE_ARP | WAKE_MAGIC)
94
95 /* USB related defines */
96 #define BULK_IN_PIPE                    1
97 #define BULK_OUT_PIPE                   2
98
99 /* default autosuspend delay (mSec)*/
100 #define DEFAULT_AUTOSUSPEND_DELAY       (10 * 1000)
101
102 /* statistic update interval (mSec) */
103 #define STAT_UPDATE_TIMER               (1 * 1000)
104
105 /* time to wait for MAC or FCT to stop (jiffies) */
106 #define HW_DISABLE_TIMEOUT              (HZ / 10)
107
108 /* time to wait between polling MAC or FCT state (ms) */
109 #define HW_DISABLE_DELAY_MS             1
110
111 /* defines interrupts from interrupt EP */
112 #define MAX_INT_EP                      (32)
113 #define INT_EP_INTEP                    (31)
114 #define INT_EP_OTP_WR_DONE              (28)
115 #define INT_EP_EEE_TX_LPI_START         (26)
116 #define INT_EP_EEE_TX_LPI_STOP          (25)
117 #define INT_EP_EEE_RX_LPI               (24)
118 #define INT_EP_MAC_RESET_TIMEOUT        (23)
119 #define INT_EP_RDFO                     (22)
120 #define INT_EP_TXE                      (21)
121 #define INT_EP_USB_STATUS               (20)
122 #define INT_EP_TX_DIS                   (19)
123 #define INT_EP_RX_DIS                   (18)
124 #define INT_EP_PHY                      (17)
125 #define INT_EP_DP                       (16)
126 #define INT_EP_MAC_ERR                  (15)
127 #define INT_EP_TDFU                     (14)
128 #define INT_EP_TDFO                     (13)
129 #define INT_EP_UTX                      (12)
130 #define INT_EP_GPIO_11                  (11)
131 #define INT_EP_GPIO_10                  (10)
132 #define INT_EP_GPIO_9                   (9)
133 #define INT_EP_GPIO_8                   (8)
134 #define INT_EP_GPIO_7                   (7)
135 #define INT_EP_GPIO_6                   (6)
136 #define INT_EP_GPIO_5                   (5)
137 #define INT_EP_GPIO_4                   (4)
138 #define INT_EP_GPIO_3                   (3)
139 #define INT_EP_GPIO_2                   (2)
140 #define INT_EP_GPIO_1                   (1)
141 #define INT_EP_GPIO_0                   (0)
142
143 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
144         "RX FCS Errors",
145         "RX Alignment Errors",
146         "Rx Fragment Errors",
147         "RX Jabber Errors",
148         "RX Undersize Frame Errors",
149         "RX Oversize Frame Errors",
150         "RX Dropped Frames",
151         "RX Unicast Byte Count",
152         "RX Broadcast Byte Count",
153         "RX Multicast Byte Count",
154         "RX Unicast Frames",
155         "RX Broadcast Frames",
156         "RX Multicast Frames",
157         "RX Pause Frames",
158         "RX 64 Byte Frames",
159         "RX 65 - 127 Byte Frames",
160         "RX 128 - 255 Byte Frames",
161         "RX 256 - 511 Bytes Frames",
162         "RX 512 - 1023 Byte Frames",
163         "RX 1024 - 1518 Byte Frames",
164         "RX Greater 1518 Byte Frames",
165         "EEE RX LPI Transitions",
166         "EEE RX LPI Time",
167         "TX FCS Errors",
168         "TX Excess Deferral Errors",
169         "TX Carrier Errors",
170         "TX Bad Byte Count",
171         "TX Single Collisions",
172         "TX Multiple Collisions",
173         "TX Excessive Collision",
174         "TX Late Collisions",
175         "TX Unicast Byte Count",
176         "TX Broadcast Byte Count",
177         "TX Multicast Byte Count",
178         "TX Unicast Frames",
179         "TX Broadcast Frames",
180         "TX Multicast Frames",
181         "TX Pause Frames",
182         "TX 64 Byte Frames",
183         "TX 65 - 127 Byte Frames",
184         "TX 128 - 255 Byte Frames",
185         "TX 256 - 511 Bytes Frames",
186         "TX 512 - 1023 Byte Frames",
187         "TX 1024 - 1518 Byte Frames",
188         "TX Greater 1518 Byte Frames",
189         "EEE TX LPI Transitions",
190         "EEE TX LPI Time",
191 };
192
193 struct lan78xx_statstage {
194         u32 rx_fcs_errors;
195         u32 rx_alignment_errors;
196         u32 rx_fragment_errors;
197         u32 rx_jabber_errors;
198         u32 rx_undersize_frame_errors;
199         u32 rx_oversize_frame_errors;
200         u32 rx_dropped_frames;
201         u32 rx_unicast_byte_count;
202         u32 rx_broadcast_byte_count;
203         u32 rx_multicast_byte_count;
204         u32 rx_unicast_frames;
205         u32 rx_broadcast_frames;
206         u32 rx_multicast_frames;
207         u32 rx_pause_frames;
208         u32 rx_64_byte_frames;
209         u32 rx_65_127_byte_frames;
210         u32 rx_128_255_byte_frames;
211         u32 rx_256_511_bytes_frames;
212         u32 rx_512_1023_byte_frames;
213         u32 rx_1024_1518_byte_frames;
214         u32 rx_greater_1518_byte_frames;
215         u32 eee_rx_lpi_transitions;
216         u32 eee_rx_lpi_time;
217         u32 tx_fcs_errors;
218         u32 tx_excess_deferral_errors;
219         u32 tx_carrier_errors;
220         u32 tx_bad_byte_count;
221         u32 tx_single_collisions;
222         u32 tx_multiple_collisions;
223         u32 tx_excessive_collision;
224         u32 tx_late_collisions;
225         u32 tx_unicast_byte_count;
226         u32 tx_broadcast_byte_count;
227         u32 tx_multicast_byte_count;
228         u32 tx_unicast_frames;
229         u32 tx_broadcast_frames;
230         u32 tx_multicast_frames;
231         u32 tx_pause_frames;
232         u32 tx_64_byte_frames;
233         u32 tx_65_127_byte_frames;
234         u32 tx_128_255_byte_frames;
235         u32 tx_256_511_bytes_frames;
236         u32 tx_512_1023_byte_frames;
237         u32 tx_1024_1518_byte_frames;
238         u32 tx_greater_1518_byte_frames;
239         u32 eee_tx_lpi_transitions;
240         u32 eee_tx_lpi_time;
241 };
242
243 struct lan78xx_statstage64 {
244         u64 rx_fcs_errors;
245         u64 rx_alignment_errors;
246         u64 rx_fragment_errors;
247         u64 rx_jabber_errors;
248         u64 rx_undersize_frame_errors;
249         u64 rx_oversize_frame_errors;
250         u64 rx_dropped_frames;
251         u64 rx_unicast_byte_count;
252         u64 rx_broadcast_byte_count;
253         u64 rx_multicast_byte_count;
254         u64 rx_unicast_frames;
255         u64 rx_broadcast_frames;
256         u64 rx_multicast_frames;
257         u64 rx_pause_frames;
258         u64 rx_64_byte_frames;
259         u64 rx_65_127_byte_frames;
260         u64 rx_128_255_byte_frames;
261         u64 rx_256_511_bytes_frames;
262         u64 rx_512_1023_byte_frames;
263         u64 rx_1024_1518_byte_frames;
264         u64 rx_greater_1518_byte_frames;
265         u64 eee_rx_lpi_transitions;
266         u64 eee_rx_lpi_time;
267         u64 tx_fcs_errors;
268         u64 tx_excess_deferral_errors;
269         u64 tx_carrier_errors;
270         u64 tx_bad_byte_count;
271         u64 tx_single_collisions;
272         u64 tx_multiple_collisions;
273         u64 tx_excessive_collision;
274         u64 tx_late_collisions;
275         u64 tx_unicast_byte_count;
276         u64 tx_broadcast_byte_count;
277         u64 tx_multicast_byte_count;
278         u64 tx_unicast_frames;
279         u64 tx_broadcast_frames;
280         u64 tx_multicast_frames;
281         u64 tx_pause_frames;
282         u64 tx_64_byte_frames;
283         u64 tx_65_127_byte_frames;
284         u64 tx_128_255_byte_frames;
285         u64 tx_256_511_bytes_frames;
286         u64 tx_512_1023_byte_frames;
287         u64 tx_1024_1518_byte_frames;
288         u64 tx_greater_1518_byte_frames;
289         u64 eee_tx_lpi_transitions;
290         u64 eee_tx_lpi_time;
291 };
292
293 static u32 lan78xx_regs[] = {
294         ID_REV,
295         INT_STS,
296         HW_CFG,
297         PMT_CTL,
298         E2P_CMD,
299         E2P_DATA,
300         USB_STATUS,
301         VLAN_TYPE,
302         MAC_CR,
303         MAC_RX,
304         MAC_TX,
305         FLOW,
306         ERR_STS,
307         MII_ACC,
308         MII_DATA,
309         EEE_TX_LPI_REQ_DLY,
310         EEE_TW_TX_SYS,
311         EEE_TX_LPI_REM_DLY,
312         WUCSR
313 };
314
315 #define PHY_REG_SIZE (32 * sizeof(u32))
316
317 struct lan78xx_net;
318
319 struct lan78xx_priv {
320         struct lan78xx_net *dev;
321         u32 rfe_ctl;
322         u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicast hash table */
323         u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
324         u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
325         struct mutex dataport_mutex; /* for dataport access */
326         spinlock_t rfe_ctl_lock; /* for rfe register access */
327         struct work_struct set_multicast;
328         struct work_struct set_vlan;
329         u32 wol;
330 };
331
332 enum skb_state {
333         illegal = 0,
334         tx_start,
335         tx_done,
336         rx_start,
337         rx_done,
338         rx_cleanup,
339         unlink_start
340 };
341
342 struct skb_data {               /* skb->cb is one of these */
343         struct urb *urb;
344         struct lan78xx_net *dev;
345         enum skb_state state;
346         size_t length;
347         int num_of_packet;
348 };
349
350 struct usb_context {
351         struct usb_ctrlrequest req;
352         struct lan78xx_net *dev;
353 };
354
355 #define EVENT_TX_HALT                   0
356 #define EVENT_RX_HALT                   1
357 #define EVENT_RX_MEMORY                 2
358 #define EVENT_STS_SPLIT                 3
359 #define EVENT_LINK_RESET                4
360 #define EVENT_RX_PAUSED                 5
361 #define EVENT_DEV_WAKING                6
362 #define EVENT_DEV_ASLEEP                7
363 #define EVENT_DEV_OPEN                  8
364 #define EVENT_STAT_UPDATE               9
365 #define EVENT_DEV_DISCONNECT            10
366
367 struct statstage {
368         struct mutex                    access_lock;    /* for stats access */
369         struct lan78xx_statstage        saved;
370         struct lan78xx_statstage        rollover_count;
371         struct lan78xx_statstage        rollover_max;
372         struct lan78xx_statstage64      curr_stat;
373 };
374
375 struct irq_domain_data {
376         struct irq_domain       *irqdomain;
377         unsigned int            phyirq;
378         struct irq_chip         *irqchip;
379         irq_flow_handler_t      irq_handler;
380         u32                     irqenable;
381         struct mutex            irq_lock;               /* for irq bus access */
382 };
383
384 struct lan78xx_net {
385         struct net_device       *net;
386         struct usb_device       *udev;
387         struct usb_interface    *intf;
388         void                    *driver_priv;
389
390         int                     rx_qlen;
391         int                     tx_qlen;
392         struct sk_buff_head     rxq;
393         struct sk_buff_head     txq;
394         struct sk_buff_head     done;
395         struct sk_buff_head     txq_pend;
396
397         struct tasklet_struct   bh;
398         struct delayed_work     wq;
399
400         int                     msg_enable;
401
402         struct urb              *urb_intr;
403         struct usb_anchor       deferred;
404
405         struct mutex            dev_mutex; /* serialise open/stop wrt suspend/resume */
406         struct mutex            phy_mutex; /* for phy access */
407         unsigned int            pipe_in, pipe_out, pipe_intr;
408
409         u32                     hard_mtu;       /* count any extra framing */
410         size_t                  rx_urb_size;    /* size for rx urbs */
411
412         unsigned long           flags;
413
414         wait_queue_head_t       *wait;
415         unsigned char           suspend_count;
416
417         unsigned int            maxpacket;
418         struct timer_list       stat_monitor;
419
420         unsigned long           data[5];
421
422         int                     link_on;
423         u8                      mdix_ctrl;
424
425         u32                     chipid;
426         u32                     chiprev;
427         struct mii_bus          *mdiobus;
428         phy_interface_t         interface;
429
430         int                     fc_autoneg;
431         u8                      fc_request_control;
432
433         int                     delta;
434         struct statstage        stats;
435
436         struct irq_domain_data  domain_data;
437 };
438
439 /* define external phy id */
440 #define PHY_LAN8835                     (0x0007C130)
441 #define PHY_KSZ9031RNX                  (0x00221620)
442
443 /* use ethtool to change the level for any given device */
444 static int msg_level = -1;
445 module_param(msg_level, int, 0);
446 MODULE_PARM_DESC(msg_level, "Override default message level");
447
448 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
449 {
450         u32 *buf;
451         int ret;
452
453         if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
454                 return -ENODEV;
455
456         buf = kmalloc(sizeof(u32), GFP_KERNEL);
457         if (!buf)
458                 return -ENOMEM;
459
460         ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
461                               USB_VENDOR_REQUEST_READ_REGISTER,
462                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
463                               0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
464         if (likely(ret >= 0)) {
465                 le32_to_cpus(buf);
466                 *data = *buf;
467         } else if (net_ratelimit()) {
468                 netdev_warn(dev->net,
469                             "Failed to read register index 0x%08x. ret = %d",
470                             index, ret);
471         }
472
473         kfree(buf);
474
475         return ret;
476 }
477
478 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
479 {
480         u32 *buf;
481         int ret;
482
483         if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
484                 return -ENODEV;
485
486         buf = kmalloc(sizeof(u32), GFP_KERNEL);
487         if (!buf)
488                 return -ENOMEM;
489
490         *buf = data;
491         cpu_to_le32s(buf);
492
493         ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
494                               USB_VENDOR_REQUEST_WRITE_REGISTER,
495                               USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
496                               0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
497         if (unlikely(ret < 0) &&
498             net_ratelimit()) {
499                 netdev_warn(dev->net,
500                             "Failed to write register index 0x%08x. ret = %d",
501                             index, ret);
502         }
503
504         kfree(buf);
505
506         return ret;
507 }
508
509 static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
510                               u32 data)
511 {
512         int ret;
513         u32 buf;
514
515         ret = lan78xx_read_reg(dev, reg, &buf);
516         if (ret < 0)
517                 return ret;
518
519         buf &= ~mask;
520         buf |= (mask & data);
521
522         ret = lan78xx_write_reg(dev, reg, buf);
523         if (ret < 0)
524                 return ret;
525
526         return 0;
527 }
528
529 static int lan78xx_read_stats(struct lan78xx_net *dev,
530                               struct lan78xx_statstage *data)
531 {
532         int ret = 0;
533         int i;
534         struct lan78xx_statstage *stats;
535         u32 *src;
536         u32 *dst;
537
538         stats = kmalloc(sizeof(*stats), GFP_KERNEL);
539         if (!stats)
540                 return -ENOMEM;
541
542         ret = usb_control_msg(dev->udev,
543                               usb_rcvctrlpipe(dev->udev, 0),
544                               USB_VENDOR_REQUEST_GET_STATS,
545                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
546                               0,
547                               0,
548                               (void *)stats,
549                               sizeof(*stats),
550                               USB_CTRL_SET_TIMEOUT);
551         if (likely(ret >= 0)) {
552                 src = (u32 *)stats;
553                 dst = (u32 *)data;
554                 for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) {
555                         le32_to_cpus(&src[i]);
556                         dst[i] = src[i];
557                 }
558         } else {
559                 netdev_warn(dev->net,
560                             "Failed to read stat ret = %d", ret);
561         }
562
563         kfree(stats);
564
565         return ret;
566 }
567
568 #define check_counter_rollover(struct1, dev_stats, member)              \
569         do {                                                            \
570                 if ((struct1)->member < (dev_stats).saved.member)       \
571                         (dev_stats).rollover_count.member++;            \
572         } while (0)
573
574 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
575                                         struct lan78xx_statstage *stats)
576 {
577         check_counter_rollover(stats, dev->stats, rx_fcs_errors);
578         check_counter_rollover(stats, dev->stats, rx_alignment_errors);
579         check_counter_rollover(stats, dev->stats, rx_fragment_errors);
580         check_counter_rollover(stats, dev->stats, rx_jabber_errors);
581         check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
582         check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
583         check_counter_rollover(stats, dev->stats, rx_dropped_frames);
584         check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
585         check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
586         check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
587         check_counter_rollover(stats, dev->stats, rx_unicast_frames);
588         check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
589         check_counter_rollover(stats, dev->stats, rx_multicast_frames);
590         check_counter_rollover(stats, dev->stats, rx_pause_frames);
591         check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
592         check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
593         check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
594         check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
595         check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
596         check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
597         check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
598         check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
599         check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
600         check_counter_rollover(stats, dev->stats, tx_fcs_errors);
601         check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
602         check_counter_rollover(stats, dev->stats, tx_carrier_errors);
603         check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
604         check_counter_rollover(stats, dev->stats, tx_single_collisions);
605         check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
606         check_counter_rollover(stats, dev->stats, tx_excessive_collision);
607         check_counter_rollover(stats, dev->stats, tx_late_collisions);
608         check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
609         check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
610         check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
611         check_counter_rollover(stats, dev->stats, tx_unicast_frames);
612         check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
613         check_counter_rollover(stats, dev->stats, tx_multicast_frames);
614         check_counter_rollover(stats, dev->stats, tx_pause_frames);
615         check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
616         check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
617         check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
618         check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
619         check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
620         check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
621         check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
622         check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
623         check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
624
625         memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
626 }
627
628 static void lan78xx_update_stats(struct lan78xx_net *dev)
629 {
630         u32 *p, *count, *max;
631         u64 *data;
632         int i;
633         struct lan78xx_statstage lan78xx_stats;
634
635         if (usb_autopm_get_interface(dev->intf) < 0)
636                 return;
637
638         p = (u32 *)&lan78xx_stats;
639         count = (u32 *)&dev->stats.rollover_count;
640         max = (u32 *)&dev->stats.rollover_max;
641         data = (u64 *)&dev->stats.curr_stat;
642
643         mutex_lock(&dev->stats.access_lock);
644
645         if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
646                 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
647
648         for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
649                 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
650
651         mutex_unlock(&dev->stats.access_lock);
652
653         usb_autopm_put_interface(dev->intf);
654 }
655
656 /* Loop until the read is completed with timeout called with phy_mutex held */
657 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
658 {
659         unsigned long start_time = jiffies;
660         u32 val;
661         int ret;
662
663         do {
664                 ret = lan78xx_read_reg(dev, MII_ACC, &val);
665                 if (unlikely(ret < 0))
666                         return -EIO;
667
668                 if (!(val & MII_ACC_MII_BUSY_))
669                         return 0;
670         } while (!time_after(jiffies, start_time + HZ));
671
672         return -EIO;
673 }
674
675 static inline u32 mii_access(int id, int index, int read)
676 {
677         u32 ret;
678
679         ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
680         ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
681         if (read)
682                 ret |= MII_ACC_MII_READ_;
683         else
684                 ret |= MII_ACC_MII_WRITE_;
685         ret |= MII_ACC_MII_BUSY_;
686
687         return ret;
688 }
689
690 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
691 {
692         unsigned long start_time = jiffies;
693         u32 val;
694         int ret;
695
696         do {
697                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
698                 if (unlikely(ret < 0))
699                         return -EIO;
700
701                 if (!(val & E2P_CMD_EPC_BUSY_) ||
702                     (val & E2P_CMD_EPC_TIMEOUT_))
703                         break;
704                 usleep_range(40, 100);
705         } while (!time_after(jiffies, start_time + HZ));
706
707         if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
708                 netdev_warn(dev->net, "EEPROM read operation timeout");
709                 return -EIO;
710         }
711
712         return 0;
713 }
714
715 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
716 {
717         unsigned long start_time = jiffies;
718         u32 val;
719         int ret;
720
721         do {
722                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
723                 if (unlikely(ret < 0))
724                         return -EIO;
725
726                 if (!(val & E2P_CMD_EPC_BUSY_))
727                         return 0;
728
729                 usleep_range(40, 100);
730         } while (!time_after(jiffies, start_time + HZ));
731
732         netdev_warn(dev->net, "EEPROM is busy");
733         return -EIO;
734 }
735
736 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
737                                    u32 length, u8 *data)
738 {
739         u32 val;
740         u32 saved;
741         int i, ret;
742         int retval;
743
744         /* depends on chip, some EEPROM pins are muxed with LED function.
745          * disable & restore LED function to access EEPROM.
746          */
747         ret = lan78xx_read_reg(dev, HW_CFG, &val);
748         saved = val;
749         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
750                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
751                 ret = lan78xx_write_reg(dev, HW_CFG, val);
752         }
753
754         retval = lan78xx_eeprom_confirm_not_busy(dev);
755         if (retval)
756                 return retval;
757
758         for (i = 0; i < length; i++) {
759                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
760                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
761                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
762                 if (unlikely(ret < 0)) {
763                         retval = -EIO;
764                         goto exit;
765                 }
766
767                 retval = lan78xx_wait_eeprom(dev);
768                 if (retval < 0)
769                         goto exit;
770
771                 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
772                 if (unlikely(ret < 0)) {
773                         retval = -EIO;
774                         goto exit;
775                 }
776
777                 data[i] = val & 0xFF;
778                 offset++;
779         }
780
781         retval = 0;
782 exit:
783         if (dev->chipid == ID_REV_CHIP_ID_7800_)
784                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
785
786         return retval;
787 }
788
789 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
790                                u32 length, u8 *data)
791 {
792         u8 sig;
793         int ret;
794
795         ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
796         if ((ret == 0) && (sig == EEPROM_INDICATOR))
797                 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
798         else
799                 ret = -EINVAL;
800
801         return ret;
802 }
803
804 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
805                                     u32 length, u8 *data)
806 {
807         u32 val;
808         u32 saved;
809         int i, ret;
810         int retval;
811
812         /* depends on chip, some EEPROM pins are muxed with LED function.
813          * disable & restore LED function to access EEPROM.
814          */
815         ret = lan78xx_read_reg(dev, HW_CFG, &val);
816         saved = val;
817         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
818                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
819                 ret = lan78xx_write_reg(dev, HW_CFG, val);
820         }
821
822         retval = lan78xx_eeprom_confirm_not_busy(dev);
823         if (retval)
824                 goto exit;
825
826         /* Issue write/erase enable command */
827         val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
828         ret = lan78xx_write_reg(dev, E2P_CMD, val);
829         if (unlikely(ret < 0)) {
830                 retval = -EIO;
831                 goto exit;
832         }
833
834         retval = lan78xx_wait_eeprom(dev);
835         if (retval < 0)
836                 goto exit;
837
838         for (i = 0; i < length; i++) {
839                 /* Fill data register */
840                 val = data[i];
841                 ret = lan78xx_write_reg(dev, E2P_DATA, val);
842                 if (ret < 0) {
843                         retval = -EIO;
844                         goto exit;
845                 }
846
847                 /* Send "write" command */
848                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
849                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
850                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
851                 if (ret < 0) {
852                         retval = -EIO;
853                         goto exit;
854                 }
855
856                 retval = lan78xx_wait_eeprom(dev);
857                 if (retval < 0)
858                         goto exit;
859
860                 offset++;
861         }
862
863         retval = 0;
864 exit:
865         if (dev->chipid == ID_REV_CHIP_ID_7800_)
866                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
867
868         return retval;
869 }
870
871 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
872                                 u32 length, u8 *data)
873 {
874         int i;
875         u32 buf;
876         unsigned long timeout;
877
878         lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
879
880         if (buf & OTP_PWR_DN_PWRDN_N_) {
881                 /* clear it and wait to be cleared */
882                 lan78xx_write_reg(dev, OTP_PWR_DN, 0);
883
884                 timeout = jiffies + HZ;
885                 do {
886                         usleep_range(1, 10);
887                         lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
888                         if (time_after(jiffies, timeout)) {
889                                 netdev_warn(dev->net,
890                                             "timeout on OTP_PWR_DN");
891                                 return -EIO;
892                         }
893                 } while (buf & OTP_PWR_DN_PWRDN_N_);
894         }
895
896         for (i = 0; i < length; i++) {
897                 lan78xx_write_reg(dev, OTP_ADDR1,
898                                   ((offset + i) >> 8) & OTP_ADDR1_15_11);
899                 lan78xx_write_reg(dev, OTP_ADDR2,
900                                   ((offset + i) & OTP_ADDR2_10_3));
901
902                 lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
903                 lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
904
905                 timeout = jiffies + HZ;
906                 do {
907                         udelay(1);
908                         lan78xx_read_reg(dev, OTP_STATUS, &buf);
909                         if (time_after(jiffies, timeout)) {
910                                 netdev_warn(dev->net,
911                                             "timeout on OTP_STATUS");
912                                 return -EIO;
913                         }
914                 } while (buf & OTP_STATUS_BUSY_);
915
916                 lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
917
918                 data[i] = (u8)(buf & 0xFF);
919         }
920
921         return 0;
922 }
923
924 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
925                                  u32 length, u8 *data)
926 {
927         int i;
928         u32 buf;
929         unsigned long timeout;
930
931         lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
932
933         if (buf & OTP_PWR_DN_PWRDN_N_) {
934                 /* clear it and wait to be cleared */
935                 lan78xx_write_reg(dev, OTP_PWR_DN, 0);
936
937                 timeout = jiffies + HZ;
938                 do {
939                         udelay(1);
940                         lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
941                         if (time_after(jiffies, timeout)) {
942                                 netdev_warn(dev->net,
943                                             "timeout on OTP_PWR_DN completion");
944                                 return -EIO;
945                         }
946                 } while (buf & OTP_PWR_DN_PWRDN_N_);
947         }
948
949         /* set to BYTE program mode */
950         lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
951
952         for (i = 0; i < length; i++) {
953                 lan78xx_write_reg(dev, OTP_ADDR1,
954                                   ((offset + i) >> 8) & OTP_ADDR1_15_11);
955                 lan78xx_write_reg(dev, OTP_ADDR2,
956                                   ((offset + i) & OTP_ADDR2_10_3));
957                 lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
958                 lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
959                 lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
960
961                 timeout = jiffies + HZ;
962                 do {
963                         udelay(1);
964                         lan78xx_read_reg(dev, OTP_STATUS, &buf);
965                         if (time_after(jiffies, timeout)) {
966                                 netdev_warn(dev->net,
967                                             "Timeout on OTP_STATUS completion");
968                                 return -EIO;
969                         }
970                 } while (buf & OTP_STATUS_BUSY_);
971         }
972
973         return 0;
974 }
975
976 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
977                             u32 length, u8 *data)
978 {
979         u8 sig;
980         int ret;
981
982         ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
983
984         if (ret == 0) {
985                 if (sig == OTP_INDICATOR_2)
986                         offset += 0x100;
987                 else if (sig != OTP_INDICATOR_1)
988                         ret = -EINVAL;
989                 if (!ret)
990                         ret = lan78xx_read_raw_otp(dev, offset, length, data);
991         }
992
993         return ret;
994 }
995
996 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
997 {
998         int i, ret;
999
1000         for (i = 0; i < 100; i++) {
1001                 u32 dp_sel;
1002
1003                 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1004                 if (unlikely(ret < 0))
1005                         return -EIO;
1006
1007                 if (dp_sel & DP_SEL_DPRDY_)
1008                         return 0;
1009
1010                 usleep_range(40, 100);
1011         }
1012
1013         netdev_warn(dev->net, "%s timed out", __func__);
1014
1015         return -EIO;
1016 }
1017
1018 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
1019                                   u32 addr, u32 length, u32 *buf)
1020 {
1021         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1022         u32 dp_sel;
1023         int i, ret;
1024
1025         if (usb_autopm_get_interface(dev->intf) < 0)
1026                 return 0;
1027
1028         mutex_lock(&pdata->dataport_mutex);
1029
1030         ret = lan78xx_dataport_wait_not_busy(dev);
1031         if (ret < 0)
1032                 goto done;
1033
1034         ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1035
1036         dp_sel &= ~DP_SEL_RSEL_MASK_;
1037         dp_sel |= ram_select;
1038         ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
1039
1040         for (i = 0; i < length; i++) {
1041                 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1042
1043                 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1044
1045                 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1046
1047                 ret = lan78xx_dataport_wait_not_busy(dev);
1048                 if (ret < 0)
1049                         goto done;
1050         }
1051
1052 done:
1053         mutex_unlock(&pdata->dataport_mutex);
1054         usb_autopm_put_interface(dev->intf);
1055
1056         return ret;
1057 }
1058
1059 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1060                                     int index, u8 addr[ETH_ALEN])
1061 {
1062         u32 temp;
1063
1064         if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1065                 temp = addr[3];
1066                 temp = addr[2] | (temp << 8);
1067                 temp = addr[1] | (temp << 8);
1068                 temp = addr[0] | (temp << 8);
1069                 pdata->pfilter_table[index][1] = temp;
1070                 temp = addr[5];
1071                 temp = addr[4] | (temp << 8);
1072                 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1073                 pdata->pfilter_table[index][0] = temp;
1074         }
1075 }
1076
1077 /* returns hash bit number for given MAC address */
1078 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1079 {
1080         return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1081 }
1082
1083 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1084 {
1085         struct lan78xx_priv *pdata =
1086                         container_of(param, struct lan78xx_priv, set_multicast);
1087         struct lan78xx_net *dev = pdata->dev;
1088         int i;
1089
1090         netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1091                   pdata->rfe_ctl);
1092
1093         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1094                                DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1095
1096         for (i = 1; i < NUM_OF_MAF; i++) {
1097                 lan78xx_write_reg(dev, MAF_HI(i), 0);
1098                 lan78xx_write_reg(dev, MAF_LO(i),
1099                                   pdata->pfilter_table[i][1]);
1100                 lan78xx_write_reg(dev, MAF_HI(i),
1101                                   pdata->pfilter_table[i][0]);
1102         }
1103
1104         lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1105 }
1106
1107 static void lan78xx_set_multicast(struct net_device *netdev)
1108 {
1109         struct lan78xx_net *dev = netdev_priv(netdev);
1110         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1111         unsigned long flags;
1112         int i;
1113
1114         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1115
1116         pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1117                             RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1118
1119         for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1120                 pdata->mchash_table[i] = 0;
1121
1122         /* pfilter_table[0] has own HW address */
1123         for (i = 1; i < NUM_OF_MAF; i++) {
1124                 pdata->pfilter_table[i][0] = 0;
1125                 pdata->pfilter_table[i][1] = 0;
1126         }
1127
1128         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1129
1130         if (dev->net->flags & IFF_PROMISC) {
1131                 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1132                 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1133         } else {
1134                 if (dev->net->flags & IFF_ALLMULTI) {
1135                         netif_dbg(dev, drv, dev->net,
1136                                   "receive all multicast enabled");
1137                         pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1138                 }
1139         }
1140
1141         if (netdev_mc_count(dev->net)) {
1142                 struct netdev_hw_addr *ha;
1143                 int i;
1144
1145                 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1146
1147                 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1148
1149                 i = 1;
1150                 netdev_for_each_mc_addr(ha, netdev) {
1151                         /* set first 32 into Perfect Filter */
1152                         if (i < 33) {
1153                                 lan78xx_set_addr_filter(pdata, i, ha->addr);
1154                         } else {
1155                                 u32 bitnum = lan78xx_hash(ha->addr);
1156
1157                                 pdata->mchash_table[bitnum / 32] |=
1158                                                         (1 << (bitnum % 32));
1159                                 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1160                         }
1161                         i++;
1162                 }
1163         }
1164
1165         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1166
1167         /* defer register writes to a sleepable context */
1168         schedule_work(&pdata->set_multicast);
1169 }
1170
1171 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1172                                       u16 lcladv, u16 rmtadv)
1173 {
1174         u32 flow = 0, fct_flow = 0;
1175         u8 cap;
1176
1177         if (dev->fc_autoneg)
1178                 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1179         else
1180                 cap = dev->fc_request_control;
1181
1182         if (cap & FLOW_CTRL_TX)
1183                 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1184
1185         if (cap & FLOW_CTRL_RX)
1186                 flow |= FLOW_CR_RX_FCEN_;
1187
1188         if (dev->udev->speed == USB_SPEED_SUPER)
1189                 fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
1190         else if (dev->udev->speed == USB_SPEED_HIGH)
1191                 fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
1192
1193         netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1194                   (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1195                   (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1196
1197         lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1198
1199         /* threshold value should be set before enabling flow */
1200         lan78xx_write_reg(dev, FLOW, flow);
1201
1202         return 0;
1203 }
1204
1205 static int lan78xx_mac_reset(struct lan78xx_net *dev)
1206 {
1207         unsigned long start_time = jiffies;
1208         u32 val;
1209         int ret;
1210
1211         mutex_lock(&dev->phy_mutex);
1212
1213         /* Resetting the device while there is activity on the MDIO
1214          * bus can result in the MAC interface locking up and not
1215          * completing register access transactions.
1216          */
1217         ret = lan78xx_phy_wait_not_busy(dev);
1218         if (ret < 0)
1219                 goto done;
1220
1221         ret = lan78xx_read_reg(dev, MAC_CR, &val);
1222         if (ret < 0)
1223                 goto done;
1224
1225         val |= MAC_CR_RST_;
1226         ret = lan78xx_write_reg(dev, MAC_CR, val);
1227         if (ret < 0)
1228                 goto done;
1229
1230         /* Wait for the reset to complete before allowing any further
1231          * MAC register accesses otherwise the MAC may lock up.
1232          */
1233         do {
1234                 ret = lan78xx_read_reg(dev, MAC_CR, &val);
1235                 if (ret < 0)
1236                         goto done;
1237
1238                 if (!(val & MAC_CR_RST_)) {
1239                         ret = 0;
1240                         goto done;
1241                 }
1242         } while (!time_after(jiffies, start_time + HZ));
1243
1244         ret = -ETIMEDOUT;
1245 done:
1246         mutex_unlock(&dev->phy_mutex);
1247
1248         return ret;
1249 }
1250
1251 static int lan78xx_link_reset(struct lan78xx_net *dev)
1252 {
1253         struct phy_device *phydev = dev->net->phydev;
1254         struct ethtool_link_ksettings ecmd;
1255         int ladv, radv, ret, link;
1256         u32 buf;
1257
1258         /* clear LAN78xx interrupt status */
1259         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1260         if (unlikely(ret < 0))
1261                 return ret;
1262
1263         mutex_lock(&phydev->lock);
1264         phy_read_status(phydev);
1265         link = phydev->link;
1266         mutex_unlock(&phydev->lock);
1267
1268         if (!link && dev->link_on) {
1269                 dev->link_on = false;
1270
1271                 /* reset MAC */
1272                 ret = lan78xx_mac_reset(dev);
1273                 if (ret < 0)
1274                         return ret;
1275
1276                 del_timer(&dev->stat_monitor);
1277         } else if (link && !dev->link_on) {
1278                 dev->link_on = true;
1279
1280                 phy_ethtool_ksettings_get(phydev, &ecmd);
1281
1282                 if (dev->udev->speed == USB_SPEED_SUPER) {
1283                         if (ecmd.base.speed == 1000) {
1284                                 /* disable U2 */
1285                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1286                                 if (ret < 0)
1287                                         return ret;
1288                                 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1289                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1290                                 if (ret < 0)
1291                                         return ret;
1292                                 /* enable U1 */
1293                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1294                                 if (ret < 0)
1295                                         return ret;
1296                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1297                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1298                                 if (ret < 0)
1299                                         return ret;
1300                         } else {
1301                                 /* enable U1 & U2 */
1302                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1303                                 if (ret < 0)
1304                                         return ret;
1305                                 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1306                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1307                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1308                                 if (ret < 0)
1309                                         return ret;
1310                         }
1311                 }
1312
1313                 ladv = phy_read(phydev, MII_ADVERTISE);
1314                 if (ladv < 0)
1315                         return ladv;
1316
1317                 radv = phy_read(phydev, MII_LPA);
1318                 if (radv < 0)
1319                         return radv;
1320
1321                 netif_dbg(dev, link, dev->net,
1322                           "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1323                           ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1324
1325                 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1326                                                  radv);
1327                 if (ret < 0)
1328                         return ret;
1329
1330                 if (!timer_pending(&dev->stat_monitor)) {
1331                         dev->delta = 1;
1332                         mod_timer(&dev->stat_monitor,
1333                                   jiffies + STAT_UPDATE_TIMER);
1334                 }
1335
1336                 tasklet_schedule(&dev->bh);
1337         }
1338
1339         return 0;
1340 }
1341
1342 /* some work can't be done in tasklets, so we use keventd
1343  *
1344  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1345  * but tasklet_schedule() doesn't.      hope the failure is rare.
1346  */
1347 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1348 {
1349         set_bit(work, &dev->flags);
1350         if (!schedule_delayed_work(&dev->wq, 0))
1351                 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1352 }
1353
1354 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1355 {
1356         u32 intdata;
1357
1358         if (urb->actual_length != 4) {
1359                 netdev_warn(dev->net,
1360                             "unexpected urb length %d", urb->actual_length);
1361                 return;
1362         }
1363
1364         intdata = get_unaligned_le32(urb->transfer_buffer);
1365
1366         if (intdata & INT_ENP_PHY_INT) {
1367                 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1368                 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1369
1370                 if (dev->domain_data.phyirq > 0) {
1371                         local_irq_disable();
1372                         generic_handle_irq(dev->domain_data.phyirq);
1373                         local_irq_enable();
1374                 }
1375         } else {
1376                 netdev_warn(dev->net,
1377                             "unexpected interrupt: 0x%08x\n", intdata);
1378         }
1379 }
1380
1381 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1382 {
1383         return MAX_EEPROM_SIZE;
1384 }
1385
1386 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1387                                       struct ethtool_eeprom *ee, u8 *data)
1388 {
1389         struct lan78xx_net *dev = netdev_priv(netdev);
1390         int ret;
1391
1392         ret = usb_autopm_get_interface(dev->intf);
1393         if (ret)
1394                 return ret;
1395
1396         ee->magic = LAN78XX_EEPROM_MAGIC;
1397
1398         ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1399
1400         usb_autopm_put_interface(dev->intf);
1401
1402         return ret;
1403 }
1404
1405 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1406                                       struct ethtool_eeprom *ee, u8 *data)
1407 {
1408         struct lan78xx_net *dev = netdev_priv(netdev);
1409         int ret;
1410
1411         ret = usb_autopm_get_interface(dev->intf);
1412         if (ret)
1413                 return ret;
1414
1415         /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1416          * to load data from EEPROM
1417          */
1418         if (ee->magic == LAN78XX_EEPROM_MAGIC)
1419                 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1420         else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1421                  (ee->offset == 0) &&
1422                  (ee->len == 512) &&
1423                  (data[0] == OTP_INDICATOR_1))
1424                 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1425
1426         usb_autopm_put_interface(dev->intf);
1427
1428         return ret;
1429 }
1430
1431 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1432                                 u8 *data)
1433 {
1434         if (stringset == ETH_SS_STATS)
1435                 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1436 }
1437
1438 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1439 {
1440         if (sset == ETH_SS_STATS)
1441                 return ARRAY_SIZE(lan78xx_gstrings);
1442         else
1443                 return -EOPNOTSUPP;
1444 }
1445
1446 static void lan78xx_get_stats(struct net_device *netdev,
1447                               struct ethtool_stats *stats, u64 *data)
1448 {
1449         struct lan78xx_net *dev = netdev_priv(netdev);
1450
1451         lan78xx_update_stats(dev);
1452
1453         mutex_lock(&dev->stats.access_lock);
1454         memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1455         mutex_unlock(&dev->stats.access_lock);
1456 }
1457
1458 static void lan78xx_get_wol(struct net_device *netdev,
1459                             struct ethtool_wolinfo *wol)
1460 {
1461         struct lan78xx_net *dev = netdev_priv(netdev);
1462         int ret;
1463         u32 buf;
1464         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1465
1466         if (usb_autopm_get_interface(dev->intf) < 0)
1467                 return;
1468
1469         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1470         if (unlikely(ret < 0)) {
1471                 wol->supported = 0;
1472                 wol->wolopts = 0;
1473         } else {
1474                 if (buf & USB_CFG_RMT_WKP_) {
1475                         wol->supported = WAKE_ALL;
1476                         wol->wolopts = pdata->wol;
1477                 } else {
1478                         wol->supported = 0;
1479                         wol->wolopts = 0;
1480                 }
1481         }
1482
1483         usb_autopm_put_interface(dev->intf);
1484 }
1485
1486 static int lan78xx_set_wol(struct net_device *netdev,
1487                            struct ethtool_wolinfo *wol)
1488 {
1489         struct lan78xx_net *dev = netdev_priv(netdev);
1490         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1491         int ret;
1492
1493         ret = usb_autopm_get_interface(dev->intf);
1494         if (ret < 0)
1495                 return ret;
1496
1497         if (wol->wolopts & ~WAKE_ALL)
1498                 return -EINVAL;
1499
1500         pdata->wol = wol->wolopts;
1501
1502         device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1503
1504         phy_ethtool_set_wol(netdev->phydev, wol);
1505
1506         usb_autopm_put_interface(dev->intf);
1507
1508         return ret;
1509 }
1510
1511 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1512 {
1513         struct lan78xx_net *dev = netdev_priv(net);
1514         struct phy_device *phydev = net->phydev;
1515         int ret;
1516         u32 buf;
1517
1518         ret = usb_autopm_get_interface(dev->intf);
1519         if (ret < 0)
1520                 return ret;
1521
1522         ret = phy_ethtool_get_eee(phydev, edata);
1523         if (ret < 0)
1524                 goto exit;
1525
1526         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1527         if (buf & MAC_CR_EEE_EN_) {
1528                 edata->eee_enabled = true;
1529                 edata->eee_active = !!(edata->advertised &
1530                                        edata->lp_advertised);
1531                 edata->tx_lpi_enabled = true;
1532                 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1533                 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1534                 edata->tx_lpi_timer = buf;
1535         } else {
1536                 edata->eee_enabled = false;
1537                 edata->eee_active = false;
1538                 edata->tx_lpi_enabled = false;
1539                 edata->tx_lpi_timer = 0;
1540         }
1541
1542         ret = 0;
1543 exit:
1544         usb_autopm_put_interface(dev->intf);
1545
1546         return ret;
1547 }
1548
1549 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1550 {
1551         struct lan78xx_net *dev = netdev_priv(net);
1552         int ret;
1553         u32 buf;
1554
1555         ret = usb_autopm_get_interface(dev->intf);
1556         if (ret < 0)
1557                 return ret;
1558
1559         if (edata->eee_enabled) {
1560                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1561                 buf |= MAC_CR_EEE_EN_;
1562                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1563
1564                 phy_ethtool_set_eee(net->phydev, edata);
1565
1566                 buf = (u32)edata->tx_lpi_timer;
1567                 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1568         } else {
1569                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1570                 buf &= ~MAC_CR_EEE_EN_;
1571                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1572         }
1573
1574         usb_autopm_put_interface(dev->intf);
1575
1576         return 0;
1577 }
1578
1579 static u32 lan78xx_get_link(struct net_device *net)
1580 {
1581         u32 link;
1582
1583         mutex_lock(&net->phydev->lock);
1584         phy_read_status(net->phydev);
1585         link = net->phydev->link;
1586         mutex_unlock(&net->phydev->lock);
1587
1588         return link;
1589 }
1590
1591 static void lan78xx_get_drvinfo(struct net_device *net,
1592                                 struct ethtool_drvinfo *info)
1593 {
1594         struct lan78xx_net *dev = netdev_priv(net);
1595
1596         strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1597         usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1598 }
1599
1600 static u32 lan78xx_get_msglevel(struct net_device *net)
1601 {
1602         struct lan78xx_net *dev = netdev_priv(net);
1603
1604         return dev->msg_enable;
1605 }
1606
1607 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1608 {
1609         struct lan78xx_net *dev = netdev_priv(net);
1610
1611         dev->msg_enable = level;
1612 }
1613
1614 static int lan78xx_get_link_ksettings(struct net_device *net,
1615                                       struct ethtool_link_ksettings *cmd)
1616 {
1617         struct lan78xx_net *dev = netdev_priv(net);
1618         struct phy_device *phydev = net->phydev;
1619         int ret;
1620
1621         ret = usb_autopm_get_interface(dev->intf);
1622         if (ret < 0)
1623                 return ret;
1624
1625         phy_ethtool_ksettings_get(phydev, cmd);
1626
1627         usb_autopm_put_interface(dev->intf);
1628
1629         return ret;
1630 }
1631
1632 static int lan78xx_set_link_ksettings(struct net_device *net,
1633                                       const struct ethtool_link_ksettings *cmd)
1634 {
1635         struct lan78xx_net *dev = netdev_priv(net);
1636         struct phy_device *phydev = net->phydev;
1637         int ret = 0;
1638         int temp;
1639
1640         ret = usb_autopm_get_interface(dev->intf);
1641         if (ret < 0)
1642                 return ret;
1643
1644         /* change speed & duplex */
1645         ret = phy_ethtool_ksettings_set(phydev, cmd);
1646
1647         if (!cmd->base.autoneg) {
1648                 /* force link down */
1649                 temp = phy_read(phydev, MII_BMCR);
1650                 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1651                 mdelay(1);
1652                 phy_write(phydev, MII_BMCR, temp);
1653         }
1654
1655         usb_autopm_put_interface(dev->intf);
1656
1657         return ret;
1658 }
1659
1660 static void lan78xx_get_pause(struct net_device *net,
1661                               struct ethtool_pauseparam *pause)
1662 {
1663         struct lan78xx_net *dev = netdev_priv(net);
1664         struct phy_device *phydev = net->phydev;
1665         struct ethtool_link_ksettings ecmd;
1666
1667         phy_ethtool_ksettings_get(phydev, &ecmd);
1668
1669         pause->autoneg = dev->fc_autoneg;
1670
1671         if (dev->fc_request_control & FLOW_CTRL_TX)
1672                 pause->tx_pause = 1;
1673
1674         if (dev->fc_request_control & FLOW_CTRL_RX)
1675                 pause->rx_pause = 1;
1676 }
1677
1678 static int lan78xx_set_pause(struct net_device *net,
1679                              struct ethtool_pauseparam *pause)
1680 {
1681         struct lan78xx_net *dev = netdev_priv(net);
1682         struct phy_device *phydev = net->phydev;
1683         struct ethtool_link_ksettings ecmd;
1684         int ret;
1685
1686         phy_ethtool_ksettings_get(phydev, &ecmd);
1687
1688         if (pause->autoneg && !ecmd.base.autoneg) {
1689                 ret = -EINVAL;
1690                 goto exit;
1691         }
1692
1693         dev->fc_request_control = 0;
1694         if (pause->rx_pause)
1695                 dev->fc_request_control |= FLOW_CTRL_RX;
1696
1697         if (pause->tx_pause)
1698                 dev->fc_request_control |= FLOW_CTRL_TX;
1699
1700         if (ecmd.base.autoneg) {
1701                 __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
1702                 u32 mii_adv;
1703
1704                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1705                                    ecmd.link_modes.advertising);
1706                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1707                                    ecmd.link_modes.advertising);
1708                 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1709                 mii_adv_to_linkmode_adv_t(fc, mii_adv);
1710                 linkmode_or(ecmd.link_modes.advertising, fc,
1711                             ecmd.link_modes.advertising);
1712
1713                 phy_ethtool_ksettings_set(phydev, &ecmd);
1714         }
1715
1716         dev->fc_autoneg = pause->autoneg;
1717
1718         ret = 0;
1719 exit:
1720         return ret;
1721 }
1722
1723 static int lan78xx_get_regs_len(struct net_device *netdev)
1724 {
1725         if (!netdev->phydev)
1726                 return (sizeof(lan78xx_regs));
1727         else
1728                 return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1729 }
1730
1731 static void
1732 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1733                  void *buf)
1734 {
1735         u32 *data = buf;
1736         int i, j;
1737         struct lan78xx_net *dev = netdev_priv(netdev);
1738
1739         /* Read Device/MAC registers */
1740         for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
1741                 lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1742
1743         if (!netdev->phydev)
1744                 return;
1745
1746         /* Read PHY registers */
1747         for (j = 0; j < 32; i++, j++)
1748                 data[i] = phy_read(netdev->phydev, j);
1749 }
1750
1751 static const struct ethtool_ops lan78xx_ethtool_ops = {
1752         .get_link       = lan78xx_get_link,
1753         .nway_reset     = phy_ethtool_nway_reset,
1754         .get_drvinfo    = lan78xx_get_drvinfo,
1755         .get_msglevel   = lan78xx_get_msglevel,
1756         .set_msglevel   = lan78xx_set_msglevel,
1757         .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1758         .get_eeprom     = lan78xx_ethtool_get_eeprom,
1759         .set_eeprom     = lan78xx_ethtool_set_eeprom,
1760         .get_ethtool_stats = lan78xx_get_stats,
1761         .get_sset_count = lan78xx_get_sset_count,
1762         .get_strings    = lan78xx_get_strings,
1763         .get_wol        = lan78xx_get_wol,
1764         .set_wol        = lan78xx_set_wol,
1765         .get_ts_info    = ethtool_op_get_ts_info,
1766         .get_eee        = lan78xx_get_eee,
1767         .set_eee        = lan78xx_set_eee,
1768         .get_pauseparam = lan78xx_get_pause,
1769         .set_pauseparam = lan78xx_set_pause,
1770         .get_link_ksettings = lan78xx_get_link_ksettings,
1771         .set_link_ksettings = lan78xx_set_link_ksettings,
1772         .get_regs_len   = lan78xx_get_regs_len,
1773         .get_regs       = lan78xx_get_regs,
1774 };
1775
1776 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1777 {
1778         u32 addr_lo, addr_hi;
1779         u8 addr[6];
1780
1781         lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1782         lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1783
1784         addr[0] = addr_lo & 0xFF;
1785         addr[1] = (addr_lo >> 8) & 0xFF;
1786         addr[2] = (addr_lo >> 16) & 0xFF;
1787         addr[3] = (addr_lo >> 24) & 0xFF;
1788         addr[4] = addr_hi & 0xFF;
1789         addr[5] = (addr_hi >> 8) & 0xFF;
1790
1791         if (!is_valid_ether_addr(addr)) {
1792                 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1793                         /* valid address present in Device Tree */
1794                         netif_dbg(dev, ifup, dev->net,
1795                                   "MAC address read from Device Tree");
1796                 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1797                                                  ETH_ALEN, addr) == 0) ||
1798                             (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1799                                               ETH_ALEN, addr) == 0)) &&
1800                            is_valid_ether_addr(addr)) {
1801                         /* eeprom values are valid so use them */
1802                         netif_dbg(dev, ifup, dev->net,
1803                                   "MAC address read from EEPROM");
1804                 } else {
1805                         /* generate random MAC */
1806                         eth_random_addr(addr);
1807                         netif_dbg(dev, ifup, dev->net,
1808                                   "MAC address set to random addr");
1809                 }
1810
1811                 addr_lo = addr[0] | (addr[1] << 8) |
1812                           (addr[2] << 16) | (addr[3] << 24);
1813                 addr_hi = addr[4] | (addr[5] << 8);
1814
1815                 lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1816                 lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1817         }
1818
1819         lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1820         lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1821
1822         ether_addr_copy(dev->net->dev_addr, addr);
1823 }
1824
1825 /* MDIO read and write wrappers for phylib */
1826 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1827 {
1828         struct lan78xx_net *dev = bus->priv;
1829         u32 val, addr;
1830         int ret;
1831
1832         ret = usb_autopm_get_interface(dev->intf);
1833         if (ret < 0)
1834                 return ret;
1835
1836         mutex_lock(&dev->phy_mutex);
1837
1838         /* confirm MII not busy */
1839         ret = lan78xx_phy_wait_not_busy(dev);
1840         if (ret < 0)
1841                 goto done;
1842
1843         /* set the address, index & direction (read from PHY) */
1844         addr = mii_access(phy_id, idx, MII_READ);
1845         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1846
1847         ret = lan78xx_phy_wait_not_busy(dev);
1848         if (ret < 0)
1849                 goto done;
1850
1851         ret = lan78xx_read_reg(dev, MII_DATA, &val);
1852
1853         ret = (int)(val & 0xFFFF);
1854
1855 done:
1856         mutex_unlock(&dev->phy_mutex);
1857         usb_autopm_put_interface(dev->intf);
1858
1859         return ret;
1860 }
1861
1862 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1863                                  u16 regval)
1864 {
1865         struct lan78xx_net *dev = bus->priv;
1866         u32 val, addr;
1867         int ret;
1868
1869         ret = usb_autopm_get_interface(dev->intf);
1870         if (ret < 0)
1871                 return ret;
1872
1873         mutex_lock(&dev->phy_mutex);
1874
1875         /* confirm MII not busy */
1876         ret = lan78xx_phy_wait_not_busy(dev);
1877         if (ret < 0)
1878                 goto done;
1879
1880         val = (u32)regval;
1881         ret = lan78xx_write_reg(dev, MII_DATA, val);
1882
1883         /* set the address, index & direction (write to PHY) */
1884         addr = mii_access(phy_id, idx, MII_WRITE);
1885         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1886
1887         ret = lan78xx_phy_wait_not_busy(dev);
1888         if (ret < 0)
1889                 goto done;
1890
1891 done:
1892         mutex_unlock(&dev->phy_mutex);
1893         usb_autopm_put_interface(dev->intf);
1894         return 0;
1895 }
1896
1897 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1898 {
1899         struct device_node *node;
1900         int ret;
1901
1902         dev->mdiobus = mdiobus_alloc();
1903         if (!dev->mdiobus) {
1904                 netdev_err(dev->net, "can't allocate MDIO bus\n");
1905                 return -ENOMEM;
1906         }
1907
1908         dev->mdiobus->priv = (void *)dev;
1909         dev->mdiobus->read = lan78xx_mdiobus_read;
1910         dev->mdiobus->write = lan78xx_mdiobus_write;
1911         dev->mdiobus->name = "lan78xx-mdiobus";
1912         dev->mdiobus->parent = &dev->udev->dev;
1913
1914         snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1915                  dev->udev->bus->busnum, dev->udev->devnum);
1916
1917         switch (dev->chipid) {
1918         case ID_REV_CHIP_ID_7800_:
1919         case ID_REV_CHIP_ID_7850_:
1920                 /* set to internal PHY id */
1921                 dev->mdiobus->phy_mask = ~(1 << 1);
1922                 break;
1923         case ID_REV_CHIP_ID_7801_:
1924                 /* scan thru PHYAD[2..0] */
1925                 dev->mdiobus->phy_mask = ~(0xFF);
1926                 break;
1927         }
1928
1929         node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
1930         ret = of_mdiobus_register(dev->mdiobus, node);
1931         of_node_put(node);
1932         if (ret) {
1933                 netdev_err(dev->net, "can't register MDIO bus\n");
1934                 goto exit1;
1935         }
1936
1937         netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1938         return 0;
1939 exit1:
1940         mdiobus_free(dev->mdiobus);
1941         return ret;
1942 }
1943
1944 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1945 {
1946         mdiobus_unregister(dev->mdiobus);
1947         mdiobus_free(dev->mdiobus);
1948 }
1949
1950 static void lan78xx_link_status_change(struct net_device *net)
1951 {
1952         struct phy_device *phydev = net->phydev;
1953         int temp;
1954
1955         /* At forced 100 F/H mode, chip may fail to set mode correctly
1956          * when cable is switched between long(~50+m) and short one.
1957          * As workaround, set to 10 before setting to 100
1958          * at forced 100 F/H mode.
1959          */
1960         if (!phydev->autoneg && (phydev->speed == 100)) {
1961                 /* disable phy interrupt */
1962                 temp = phy_read(phydev, LAN88XX_INT_MASK);
1963                 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1964                 phy_write(phydev, LAN88XX_INT_MASK, temp);
1965
1966                 temp = phy_read(phydev, MII_BMCR);
1967                 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1968                 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1969                 temp |= BMCR_SPEED100;
1970                 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1971
1972                 /* clear pending interrupt generated while workaround */
1973                 temp = phy_read(phydev, LAN88XX_INT_STS);
1974
1975                 /* enable phy interrupt back */
1976                 temp = phy_read(phydev, LAN88XX_INT_MASK);
1977                 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1978                 phy_write(phydev, LAN88XX_INT_MASK, temp);
1979         }
1980 }
1981
1982 static int irq_map(struct irq_domain *d, unsigned int irq,
1983                    irq_hw_number_t hwirq)
1984 {
1985         struct irq_domain_data *data = d->host_data;
1986
1987         irq_set_chip_data(irq, data);
1988         irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1989         irq_set_noprobe(irq);
1990
1991         return 0;
1992 }
1993
1994 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1995 {
1996         irq_set_chip_and_handler(irq, NULL, NULL);
1997         irq_set_chip_data(irq, NULL);
1998 }
1999
2000 static const struct irq_domain_ops chip_domain_ops = {
2001         .map    = irq_map,
2002         .unmap  = irq_unmap,
2003 };
2004
2005 static void lan78xx_irq_mask(struct irq_data *irqd)
2006 {
2007         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2008
2009         data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
2010 }
2011
2012 static void lan78xx_irq_unmask(struct irq_data *irqd)
2013 {
2014         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2015
2016         data->irqenable |= BIT(irqd_to_hwirq(irqd));
2017 }
2018
2019 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
2020 {
2021         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2022
2023         mutex_lock(&data->irq_lock);
2024 }
2025
2026 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
2027 {
2028         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2029         struct lan78xx_net *dev =
2030                         container_of(data, struct lan78xx_net, domain_data);
2031         u32 buf;
2032
2033         /* call register access here because irq_bus_lock & irq_bus_sync_unlock
2034          * are only two callbacks executed in non-atomic contex.
2035          */
2036         lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2037         if (buf != data->irqenable)
2038                 lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
2039
2040         mutex_unlock(&data->irq_lock);
2041 }
2042
2043 static struct irq_chip lan78xx_irqchip = {
2044         .name                   = "lan78xx-irqs",
2045         .irq_mask               = lan78xx_irq_mask,
2046         .irq_unmask             = lan78xx_irq_unmask,
2047         .irq_bus_lock           = lan78xx_irq_bus_lock,
2048         .irq_bus_sync_unlock    = lan78xx_irq_bus_sync_unlock,
2049 };
2050
2051 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
2052 {
2053         struct device_node *of_node;
2054         struct irq_domain *irqdomain;
2055         unsigned int irqmap = 0;
2056         u32 buf;
2057         int ret = 0;
2058
2059         of_node = dev->udev->dev.parent->of_node;
2060
2061         mutex_init(&dev->domain_data.irq_lock);
2062
2063         lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2064         dev->domain_data.irqenable = buf;
2065
2066         dev->domain_data.irqchip = &lan78xx_irqchip;
2067         dev->domain_data.irq_handler = handle_simple_irq;
2068
2069         irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
2070                                           &chip_domain_ops, &dev->domain_data);
2071         if (irqdomain) {
2072                 /* create mapping for PHY interrupt */
2073                 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
2074                 if (!irqmap) {
2075                         irq_domain_remove(irqdomain);
2076
2077                         irqdomain = NULL;
2078                         ret = -EINVAL;
2079                 }
2080         } else {
2081                 ret = -EINVAL;
2082         }
2083
2084         dev->domain_data.irqdomain = irqdomain;
2085         dev->domain_data.phyirq = irqmap;
2086
2087         return ret;
2088 }
2089
2090 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2091 {
2092         if (dev->domain_data.phyirq > 0) {
2093                 irq_dispose_mapping(dev->domain_data.phyirq);
2094
2095                 if (dev->domain_data.irqdomain)
2096                         irq_domain_remove(dev->domain_data.irqdomain);
2097         }
2098         dev->domain_data.phyirq = 0;
2099         dev->domain_data.irqdomain = NULL;
2100 }
2101
2102 static int lan8835_fixup(struct phy_device *phydev)
2103 {
2104         int buf;
2105         struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2106
2107         /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2108         buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2109         buf &= ~0x1800;
2110         buf |= 0x0800;
2111         phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2112
2113         /* RGMII MAC TXC Delay Enable */
2114         lan78xx_write_reg(dev, MAC_RGMII_ID,
2115                           MAC_RGMII_ID_TXC_DELAY_EN_);
2116
2117         /* RGMII TX DLL Tune Adjust */
2118         lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2119
2120         dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2121
2122         return 1;
2123 }
2124
2125 static int ksz9031rnx_fixup(struct phy_device *phydev)
2126 {
2127         struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2128
2129         /* Micrel9301RNX PHY configuration */
2130         /* RGMII Control Signal Pad Skew */
2131         phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2132         /* RGMII RX Data Pad Skew */
2133         phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2134         /* RGMII RX Clock Pad Skew */
2135         phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2136
2137         dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2138
2139         return 1;
2140 }
2141
2142 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2143 {
2144         u32 buf;
2145         int ret;
2146         struct fixed_phy_status fphy_status = {
2147                 .link = 1,
2148                 .speed = SPEED_1000,
2149                 .duplex = DUPLEX_FULL,
2150         };
2151         struct phy_device *phydev;
2152
2153         phydev = phy_find_first(dev->mdiobus);
2154         if (!phydev) {
2155                 netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2156                 phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
2157                 if (IS_ERR(phydev)) {
2158                         netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2159                         return NULL;
2160                 }
2161                 netdev_dbg(dev->net, "Registered FIXED PHY\n");
2162                 dev->interface = PHY_INTERFACE_MODE_RGMII;
2163                 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2164                                         MAC_RGMII_ID_TXC_DELAY_EN_);
2165                 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2166                 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2167                 buf |= HW_CFG_CLK125_EN_;
2168                 buf |= HW_CFG_REFCLK25_EN_;
2169                 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2170         } else {
2171                 if (!phydev->drv) {
2172                         netdev_err(dev->net, "no PHY driver found\n");
2173                         return NULL;
2174                 }
2175                 dev->interface = PHY_INTERFACE_MODE_RGMII;
2176                 /* external PHY fixup for KSZ9031RNX */
2177                 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2178                                                  ksz9031rnx_fixup);
2179                 if (ret < 0) {
2180                         netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2181                         return NULL;
2182                 }
2183                 /* external PHY fixup for LAN8835 */
2184                 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2185                                                  lan8835_fixup);
2186                 if (ret < 0) {
2187                         netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2188                         return NULL;
2189                 }
2190                 /* add more external PHY fixup here if needed */
2191
2192                 phydev->is_internal = false;
2193         }
2194         return phydev;
2195 }
2196
2197 static int lan78xx_phy_init(struct lan78xx_net *dev)
2198 {
2199         __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2200         int ret;
2201         u32 mii_adv;
2202         struct phy_device *phydev;
2203
2204         switch (dev->chipid) {
2205         case ID_REV_CHIP_ID_7801_:
2206                 phydev = lan7801_phy_init(dev);
2207                 if (!phydev) {
2208                         netdev_err(dev->net, "lan7801: PHY Init Failed");
2209                         return -EIO;
2210                 }
2211                 break;
2212
2213         case ID_REV_CHIP_ID_7800_:
2214         case ID_REV_CHIP_ID_7850_:
2215                 phydev = phy_find_first(dev->mdiobus);
2216                 if (!phydev) {
2217                         netdev_err(dev->net, "no PHY found\n");
2218                         return -EIO;
2219                 }
2220                 phydev->is_internal = true;
2221                 dev->interface = PHY_INTERFACE_MODE_GMII;
2222                 break;
2223
2224         default:
2225                 netdev_err(dev->net, "Unknown CHIP ID found\n");
2226                 return -EIO;
2227         }
2228
2229         /* if phyirq is not set, use polling mode in phylib */
2230         if (dev->domain_data.phyirq > 0)
2231                 phydev->irq = dev->domain_data.phyirq;
2232         else
2233                 phydev->irq = PHY_POLL;
2234         netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2235
2236         /* set to AUTOMDIX */
2237         phydev->mdix = ETH_TP_MDI_AUTO;
2238
2239         ret = phy_connect_direct(dev->net, phydev,
2240                                  lan78xx_link_status_change,
2241                                  dev->interface);
2242         if (ret) {
2243                 netdev_err(dev->net, "can't attach PHY to %s\n",
2244                            dev->mdiobus->id);
2245                 if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2246                         if (phy_is_pseudo_fixed_link(phydev)) {
2247                                 fixed_phy_unregister(phydev);
2248                         } else {
2249                                 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2250                                                              0xfffffff0);
2251                                 phy_unregister_fixup_for_uid(PHY_LAN8835,
2252                                                              0xfffffff0);
2253                         }
2254                 }
2255                 return -EIO;
2256         }
2257
2258         /* MAC doesn't support 1000T Half */
2259         phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2260
2261         /* support both flow controls */
2262         dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2263         linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2264                            phydev->advertising);
2265         linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2266                            phydev->advertising);
2267         mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2268         mii_adv_to_linkmode_adv_t(fc, mii_adv);
2269         linkmode_or(phydev->advertising, fc, phydev->advertising);
2270
2271         if (phydev->mdio.dev.of_node) {
2272                 u32 reg;
2273                 int len;
2274
2275                 len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2276                                                       "microchip,led-modes",
2277                                                       sizeof(u32));
2278                 if (len >= 0) {
2279                         /* Ensure the appropriate LEDs are enabled */
2280                         lan78xx_read_reg(dev, HW_CFG, &reg);
2281                         reg &= ~(HW_CFG_LED0_EN_ |
2282                                  HW_CFG_LED1_EN_ |
2283                                  HW_CFG_LED2_EN_ |
2284                                  HW_CFG_LED3_EN_);
2285                         reg |= (len > 0) * HW_CFG_LED0_EN_ |
2286                                 (len > 1) * HW_CFG_LED1_EN_ |
2287                                 (len > 2) * HW_CFG_LED2_EN_ |
2288                                 (len > 3) * HW_CFG_LED3_EN_;
2289                         lan78xx_write_reg(dev, HW_CFG, reg);
2290                 }
2291         }
2292
2293         genphy_config_aneg(phydev);
2294
2295         dev->fc_autoneg = phydev->autoneg;
2296
2297         return 0;
2298 }
2299
2300 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2301 {
2302         u32 buf;
2303         bool rxenabled;
2304
2305         lan78xx_read_reg(dev, MAC_RX, &buf);
2306
2307         rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2308
2309         if (rxenabled) {
2310                 buf &= ~MAC_RX_RXEN_;
2311                 lan78xx_write_reg(dev, MAC_RX, buf);
2312         }
2313
2314         /* add 4 to size for FCS */
2315         buf &= ~MAC_RX_MAX_SIZE_MASK_;
2316         buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2317
2318         lan78xx_write_reg(dev, MAC_RX, buf);
2319
2320         if (rxenabled) {
2321                 buf |= MAC_RX_RXEN_;
2322                 lan78xx_write_reg(dev, MAC_RX, buf);
2323         }
2324
2325         return 0;
2326 }
2327
2328 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2329 {
2330         struct sk_buff *skb;
2331         unsigned long flags;
2332         int count = 0;
2333
2334         spin_lock_irqsave(&q->lock, flags);
2335         while (!skb_queue_empty(q)) {
2336                 struct skb_data *entry;
2337                 struct urb *urb;
2338                 int ret;
2339
2340                 skb_queue_walk(q, skb) {
2341                         entry = (struct skb_data *)skb->cb;
2342                         if (entry->state != unlink_start)
2343                                 goto found;
2344                 }
2345                 break;
2346 found:
2347                 entry->state = unlink_start;
2348                 urb = entry->urb;
2349
2350                 /* Get reference count of the URB to avoid it to be
2351                  * freed during usb_unlink_urb, which may trigger
2352                  * use-after-free problem inside usb_unlink_urb since
2353                  * usb_unlink_urb is always racing with .complete
2354                  * handler(include defer_bh).
2355                  */
2356                 usb_get_urb(urb);
2357                 spin_unlock_irqrestore(&q->lock, flags);
2358                 /* during some PM-driven resume scenarios,
2359                  * these (async) unlinks complete immediately
2360                  */
2361                 ret = usb_unlink_urb(urb);
2362                 if (ret != -EINPROGRESS && ret != 0)
2363                         netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2364                 else
2365                         count++;
2366                 usb_put_urb(urb);
2367                 spin_lock_irqsave(&q->lock, flags);
2368         }
2369         spin_unlock_irqrestore(&q->lock, flags);
2370         return count;
2371 }
2372
2373 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2374 {
2375         struct lan78xx_net *dev = netdev_priv(netdev);
2376         int ll_mtu = new_mtu + netdev->hard_header_len;
2377         int old_hard_mtu = dev->hard_mtu;
2378         int old_rx_urb_size = dev->rx_urb_size;
2379         int ret;
2380
2381         /* no second zero-length packet read wanted after mtu-sized packets */
2382         if ((ll_mtu % dev->maxpacket) == 0)
2383                 return -EDOM;
2384
2385         ret = usb_autopm_get_interface(dev->intf);
2386         if (ret < 0)
2387                 return ret;
2388
2389         lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
2390
2391         netdev->mtu = new_mtu;
2392
2393         dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2394         if (dev->rx_urb_size == old_hard_mtu) {
2395                 dev->rx_urb_size = dev->hard_mtu;
2396                 if (dev->rx_urb_size > old_rx_urb_size) {
2397                         if (netif_running(dev->net)) {
2398                                 unlink_urbs(dev, &dev->rxq);
2399                                 tasklet_schedule(&dev->bh);
2400                         }
2401                 }
2402         }
2403
2404         usb_autopm_put_interface(dev->intf);
2405
2406         return 0;
2407 }
2408
2409 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2410 {
2411         struct lan78xx_net *dev = netdev_priv(netdev);
2412         struct sockaddr *addr = p;
2413         u32 addr_lo, addr_hi;
2414
2415         if (netif_running(netdev))
2416                 return -EBUSY;
2417
2418         if (!is_valid_ether_addr(addr->sa_data))
2419                 return -EADDRNOTAVAIL;
2420
2421         ether_addr_copy(netdev->dev_addr, addr->sa_data);
2422
2423         addr_lo = netdev->dev_addr[0] |
2424                   netdev->dev_addr[1] << 8 |
2425                   netdev->dev_addr[2] << 16 |
2426                   netdev->dev_addr[3] << 24;
2427         addr_hi = netdev->dev_addr[4] |
2428                   netdev->dev_addr[5] << 8;
2429
2430         lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2431         lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2432
2433         /* Added to support MAC address changes */
2434         lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2435         lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2436
2437         return 0;
2438 }
2439
2440 /* Enable or disable Rx checksum offload engine */
2441 static int lan78xx_set_features(struct net_device *netdev,
2442                                 netdev_features_t features)
2443 {
2444         struct lan78xx_net *dev = netdev_priv(netdev);
2445         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2446         unsigned long flags;
2447
2448         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2449
2450         if (features & NETIF_F_RXCSUM) {
2451                 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2452                 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2453         } else {
2454                 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2455                 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2456         }
2457
2458         if (features & NETIF_F_HW_VLAN_CTAG_RX)
2459                 pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2460         else
2461                 pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2462
2463         if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2464                 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2465         else
2466                 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2467
2468         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2469
2470         lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2471
2472         return 0;
2473 }
2474
2475 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2476 {
2477         struct lan78xx_priv *pdata =
2478                         container_of(param, struct lan78xx_priv, set_vlan);
2479         struct lan78xx_net *dev = pdata->dev;
2480
2481         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2482                                DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2483 }
2484
2485 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2486                                    __be16 proto, u16 vid)
2487 {
2488         struct lan78xx_net *dev = netdev_priv(netdev);
2489         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2490         u16 vid_bit_index;
2491         u16 vid_dword_index;
2492
2493         vid_dword_index = (vid >> 5) & 0x7F;
2494         vid_bit_index = vid & 0x1F;
2495
2496         pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2497
2498         /* defer register writes to a sleepable context */
2499         schedule_work(&pdata->set_vlan);
2500
2501         return 0;
2502 }
2503
2504 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2505                                     __be16 proto, u16 vid)
2506 {
2507         struct lan78xx_net *dev = netdev_priv(netdev);
2508         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2509         u16 vid_bit_index;
2510         u16 vid_dword_index;
2511
2512         vid_dword_index = (vid >> 5) & 0x7F;
2513         vid_bit_index = vid & 0x1F;
2514
2515         pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2516
2517         /* defer register writes to a sleepable context */
2518         schedule_work(&pdata->set_vlan);
2519
2520         return 0;
2521 }
2522
2523 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2524 {
2525         int ret;
2526         u32 buf;
2527         u32 regs[6] = { 0 };
2528
2529         ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2530         if (buf & USB_CFG1_LTM_ENABLE_) {
2531                 u8 temp[2];
2532                 /* Get values from EEPROM first */
2533                 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2534                         if (temp[0] == 24) {
2535                                 ret = lan78xx_read_raw_eeprom(dev,
2536                                                               temp[1] * 2,
2537                                                               24,
2538                                                               (u8 *)regs);
2539                                 if (ret < 0)
2540                                         return;
2541                         }
2542                 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2543                         if (temp[0] == 24) {
2544                                 ret = lan78xx_read_raw_otp(dev,
2545                                                            temp[1] * 2,
2546                                                            24,
2547                                                            (u8 *)regs);
2548                                 if (ret < 0)
2549                                         return;
2550                         }
2551                 }
2552         }
2553
2554         lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2555         lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2556         lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2557         lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2558         lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2559         lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2560 }
2561
2562 static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
2563 {
2564         return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
2565 }
2566
2567 static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
2568                            u32 hw_disabled)
2569 {
2570         unsigned long timeout;
2571         bool stopped = true;
2572         int ret;
2573         u32 buf;
2574
2575         /* Stop the h/w block (if not already stopped) */
2576
2577         ret = lan78xx_read_reg(dev, reg, &buf);
2578         if (ret < 0)
2579                 return ret;
2580
2581         if (buf & hw_enabled) {
2582                 buf &= ~hw_enabled;
2583
2584                 ret = lan78xx_write_reg(dev, reg, buf);
2585                 if (ret < 0)
2586                         return ret;
2587
2588                 stopped = false;
2589                 timeout = jiffies + HW_DISABLE_TIMEOUT;
2590                 do  {
2591                         ret = lan78xx_read_reg(dev, reg, &buf);
2592                         if (ret < 0)
2593                                 return ret;
2594
2595                         if (buf & hw_disabled)
2596                                 stopped = true;
2597                         else
2598                                 msleep(HW_DISABLE_DELAY_MS);
2599                 } while (!stopped && !time_after(jiffies, timeout));
2600         }
2601
2602         ret = stopped ? 0 : -ETIME;
2603
2604         return ret;
2605 }
2606
2607 static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
2608 {
2609         return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
2610 }
2611
2612 static int lan78xx_start_tx_path(struct lan78xx_net *dev)
2613 {
2614         int ret;
2615
2616         netif_dbg(dev, drv, dev->net, "start tx path");
2617
2618         /* Start the MAC transmitter */
2619
2620         ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
2621         if (ret < 0)
2622                 return ret;
2623
2624         /* Start the Tx FIFO */
2625
2626         ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
2627         if (ret < 0)
2628                 return ret;
2629
2630         return 0;
2631 }
2632
2633 static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
2634 {
2635         int ret;
2636
2637         netif_dbg(dev, drv, dev->net, "stop tx path");
2638
2639         /* Stop the Tx FIFO */
2640
2641         ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
2642         if (ret < 0)
2643                 return ret;
2644
2645         /* Stop the MAC transmitter */
2646
2647         ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
2648         if (ret < 0)
2649                 return ret;
2650
2651         return 0;
2652 }
2653
2654 /* The caller must ensure the Tx path is stopped before calling
2655  * lan78xx_flush_tx_fifo().
2656  */
2657 static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
2658 {
2659         return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
2660 }
2661
2662 static int lan78xx_start_rx_path(struct lan78xx_net *dev)
2663 {
2664         int ret;
2665
2666         netif_dbg(dev, drv, dev->net, "start rx path");
2667
2668         /* Start the Rx FIFO */
2669
2670         ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
2671         if (ret < 0)
2672                 return ret;
2673
2674         /* Start the MAC receiver*/
2675
2676         ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
2677         if (ret < 0)
2678                 return ret;
2679
2680         return 0;
2681 }
2682
2683 static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
2684 {
2685         int ret;
2686
2687         netif_dbg(dev, drv, dev->net, "stop rx path");
2688
2689         /* Stop the MAC receiver */
2690
2691         ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
2692         if (ret < 0)
2693                 return ret;
2694
2695         /* Stop the Rx FIFO */
2696
2697         ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
2698         if (ret < 0)
2699                 return ret;
2700
2701         return 0;
2702 }
2703
2704 /* The caller must ensure the Rx path is stopped before calling
2705  * lan78xx_flush_rx_fifo().
2706  */
2707 static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
2708 {
2709         return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
2710 }
2711
2712 static int lan78xx_reset(struct lan78xx_net *dev)
2713 {
2714         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2715         unsigned long timeout;
2716         int ret;
2717         u32 buf;
2718         u8 sig;
2719
2720         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2721         if (ret < 0)
2722                 return ret;
2723
2724         buf |= HW_CFG_LRST_;
2725
2726         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2727         if (ret < 0)
2728                 return ret;
2729
2730         timeout = jiffies + HZ;
2731         do {
2732                 mdelay(1);
2733                 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2734                 if (ret < 0)
2735                         return ret;
2736
2737                 if (time_after(jiffies, timeout)) {
2738                         netdev_warn(dev->net,
2739                                     "timeout on completion of LiteReset");
2740                         ret = -ETIMEDOUT;
2741                         return ret;
2742                 }
2743         } while (buf & HW_CFG_LRST_);
2744
2745         lan78xx_init_mac_address(dev);
2746
2747         /* save DEVID for later usage */
2748         ret = lan78xx_read_reg(dev, ID_REV, &buf);
2749         if (ret < 0)
2750                 return ret;
2751
2752         dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2753         dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2754
2755         /* Respond to the IN token with a NAK */
2756         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2757         if (ret < 0)
2758                 return ret;
2759
2760         buf |= USB_CFG_BIR_;
2761
2762         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2763         if (ret < 0)
2764                 return ret;
2765
2766         /* Init LTM */
2767         lan78xx_init_ltm(dev);
2768
2769         if (dev->udev->speed == USB_SPEED_SUPER) {
2770                 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2771                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2772                 dev->rx_qlen = 4;
2773                 dev->tx_qlen = 4;
2774         } else if (dev->udev->speed == USB_SPEED_HIGH) {
2775                 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2776                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2777                 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2778                 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2779         } else {
2780                 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2781                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2782                 dev->rx_qlen = 4;
2783                 dev->tx_qlen = 4;
2784         }
2785
2786         ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2787         if (ret < 0)
2788                 return ret;
2789
2790         ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2791         if (ret < 0)
2792                 return ret;
2793
2794         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2795         if (ret < 0)
2796                 return ret;
2797
2798         buf |= HW_CFG_MEF_;
2799
2800         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2801         if (ret < 0)
2802                 return ret;
2803
2804         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2805         if (ret < 0)
2806                 return ret;
2807
2808         buf |= USB_CFG_BCE_;
2809
2810         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2811         if (ret < 0)
2812                 return ret;
2813
2814         /* set FIFO sizes */
2815         buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2816
2817         ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2818         if (ret < 0)
2819                 return ret;
2820
2821         buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2822
2823         ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2824         if (ret < 0)
2825                 return ret;
2826
2827         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2828         if (ret < 0)
2829                 return ret;
2830
2831         ret = lan78xx_write_reg(dev, FLOW, 0);
2832         if (ret < 0)
2833                 return ret;
2834
2835         ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2836         if (ret < 0)
2837                 return ret;
2838
2839         /* Don't need rfe_ctl_lock during initialisation */
2840         ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2841         if (ret < 0)
2842                 return ret;
2843
2844         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2845
2846         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2847         if (ret < 0)
2848                 return ret;
2849
2850         /* Enable or disable checksum offload engines */
2851         ret = lan78xx_set_features(dev->net, dev->net->features);
2852         if (ret < 0)
2853                 return ret;
2854
2855         lan78xx_set_multicast(dev->net);
2856
2857         /* reset PHY */
2858         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2859         if (ret < 0)
2860                 return ret;
2861
2862         buf |= PMT_CTL_PHY_RST_;
2863
2864         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2865         if (ret < 0)
2866                 return ret;
2867
2868         timeout = jiffies + HZ;
2869         do {
2870                 mdelay(1);
2871                 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2872                 if (ret < 0)
2873                         return ret;
2874
2875                 if (time_after(jiffies, timeout)) {
2876                         netdev_warn(dev->net, "timeout waiting for PHY Reset");
2877                         ret = -ETIMEDOUT;
2878                         return ret;
2879                 }
2880         } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2881
2882         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2883         if (ret < 0)
2884                 return ret;
2885
2886         /* LAN7801 only has RGMII mode */
2887         if (dev->chipid == ID_REV_CHIP_ID_7801_)
2888                 buf &= ~MAC_CR_GMII_EN_;
2889
2890         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2891                 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2892                 if (!ret && sig != EEPROM_INDICATOR) {
2893                         /* Implies there is no external eeprom. Set mac speed */
2894                         netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2895                         buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2896                 }
2897         }
2898         ret = lan78xx_write_reg(dev, MAC_CR, buf);
2899         if (ret < 0)
2900                 return ret;
2901
2902         ret = lan78xx_set_rx_max_frame_length(dev,
2903                                               dev->net->mtu + VLAN_ETH_HLEN);
2904
2905         return ret;
2906 }
2907
2908 static void lan78xx_init_stats(struct lan78xx_net *dev)
2909 {
2910         u32 *p;
2911         int i;
2912
2913         /* initialize for stats update
2914          * some counters are 20bits and some are 32bits
2915          */
2916         p = (u32 *)&dev->stats.rollover_max;
2917         for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2918                 p[i] = 0xFFFFF;
2919
2920         dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2921         dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2922         dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2923         dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2924         dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2925         dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2926         dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2927         dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2928         dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2929         dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2930
2931         set_bit(EVENT_STAT_UPDATE, &dev->flags);
2932 }
2933
2934 static int lan78xx_open(struct net_device *net)
2935 {
2936         struct lan78xx_net *dev = netdev_priv(net);
2937         int ret;
2938
2939         netif_dbg(dev, ifup, dev->net, "open device");
2940
2941         ret = usb_autopm_get_interface(dev->intf);
2942         if (ret < 0)
2943                 return ret;
2944
2945         mutex_lock(&dev->dev_mutex);
2946
2947         phy_start(net->phydev);
2948
2949         netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2950
2951         /* for Link Check */
2952         if (dev->urb_intr) {
2953                 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2954                 if (ret < 0) {
2955                         netif_err(dev, ifup, dev->net,
2956                                   "intr submit %d\n", ret);
2957                         goto done;
2958                 }
2959         }
2960
2961         ret = lan78xx_flush_rx_fifo(dev);
2962         if (ret < 0)
2963                 goto done;
2964         ret = lan78xx_flush_tx_fifo(dev);
2965         if (ret < 0)
2966                 goto done;
2967
2968         ret = lan78xx_start_tx_path(dev);
2969         if (ret < 0)
2970                 goto done;
2971         ret = lan78xx_start_rx_path(dev);
2972         if (ret < 0)
2973                 goto done;
2974
2975         lan78xx_init_stats(dev);
2976
2977         set_bit(EVENT_DEV_OPEN, &dev->flags);
2978
2979         netif_start_queue(net);
2980
2981         dev->link_on = false;
2982
2983         lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2984 done:
2985         mutex_unlock(&dev->dev_mutex);
2986
2987         usb_autopm_put_interface(dev->intf);
2988
2989         return ret;
2990 }
2991
2992 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2993 {
2994         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2995         DECLARE_WAITQUEUE(wait, current);
2996         int temp;
2997
2998         /* ensure there are no more active urbs */
2999         add_wait_queue(&unlink_wakeup, &wait);
3000         set_current_state(TASK_UNINTERRUPTIBLE);
3001         dev->wait = &unlink_wakeup;
3002         temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
3003
3004         /* maybe wait for deletions to finish. */
3005         while (!skb_queue_empty(&dev->rxq) ||
3006                !skb_queue_empty(&dev->txq)) {
3007                 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
3008                 set_current_state(TASK_UNINTERRUPTIBLE);
3009                 netif_dbg(dev, ifdown, dev->net,
3010                           "waited for %d urb completions", temp);
3011         }
3012         set_current_state(TASK_RUNNING);
3013         dev->wait = NULL;
3014         remove_wait_queue(&unlink_wakeup, &wait);
3015
3016         while (!skb_queue_empty(&dev->done)) {
3017                 struct skb_data *entry;
3018                 struct sk_buff *skb;
3019
3020                 skb = skb_dequeue(&dev->done);
3021                 entry = (struct skb_data *)(skb->cb);
3022                 usb_free_urb(entry->urb);
3023                 dev_kfree_skb(skb);
3024         }
3025 }
3026
3027 static int lan78xx_stop(struct net_device *net)
3028 {
3029         struct lan78xx_net *dev = netdev_priv(net);
3030
3031         netif_dbg(dev, ifup, dev->net, "stop device");
3032
3033         mutex_lock(&dev->dev_mutex);
3034
3035         if (timer_pending(&dev->stat_monitor))
3036                 del_timer_sync(&dev->stat_monitor);
3037
3038         clear_bit(EVENT_DEV_OPEN, &dev->flags);
3039         netif_stop_queue(net);
3040         tasklet_kill(&dev->bh);
3041
3042         lan78xx_terminate_urbs(dev);
3043
3044         netif_info(dev, ifdown, dev->net,
3045                    "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
3046                    net->stats.rx_packets, net->stats.tx_packets,
3047                    net->stats.rx_errors, net->stats.tx_errors);
3048
3049         /* ignore errors that occur stopping the Tx and Rx data paths */
3050         lan78xx_stop_tx_path(dev);
3051         lan78xx_stop_rx_path(dev);
3052
3053         if (net->phydev)
3054                 phy_stop(net->phydev);
3055
3056         usb_kill_urb(dev->urb_intr);
3057
3058         /* deferred work (task, timer, softirq) must also stop.
3059          * can't flush_scheduled_work() until we drop rtnl (later),
3060          * else workers could deadlock; so make workers a NOP.
3061          */
3062         clear_bit(EVENT_TX_HALT, &dev->flags);
3063         clear_bit(EVENT_RX_HALT, &dev->flags);
3064         clear_bit(EVENT_LINK_RESET, &dev->flags);
3065         clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3066
3067         cancel_delayed_work_sync(&dev->wq);
3068
3069         usb_autopm_put_interface(dev->intf);
3070
3071         mutex_unlock(&dev->dev_mutex);
3072
3073         return 0;
3074 }
3075
3076 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
3077                                        struct sk_buff *skb, gfp_t flags)
3078 {
3079         u32 tx_cmd_a, tx_cmd_b;
3080         void *ptr;
3081
3082         if (skb_cow_head(skb, TX_OVERHEAD)) {
3083                 dev_kfree_skb_any(skb);
3084                 return NULL;
3085         }
3086
3087         if (skb_linearize(skb)) {
3088                 dev_kfree_skb_any(skb);
3089                 return NULL;
3090         }
3091
3092         tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
3093
3094         if (skb->ip_summed == CHECKSUM_PARTIAL)
3095                 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
3096
3097         tx_cmd_b = 0;
3098         if (skb_is_gso(skb)) {
3099                 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
3100
3101                 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
3102
3103                 tx_cmd_a |= TX_CMD_A_LSO_;
3104         }
3105
3106         if (skb_vlan_tag_present(skb)) {
3107                 tx_cmd_a |= TX_CMD_A_IVTG_;
3108                 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
3109         }
3110
3111         ptr = skb_push(skb, 8);
3112         put_unaligned_le32(tx_cmd_a, ptr);
3113         put_unaligned_le32(tx_cmd_b, ptr + 4);
3114
3115         return skb;
3116 }
3117
3118 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
3119                                struct sk_buff_head *list, enum skb_state state)
3120 {
3121         unsigned long flags;
3122         enum skb_state old_state;
3123         struct skb_data *entry = (struct skb_data *)skb->cb;
3124
3125         spin_lock_irqsave(&list->lock, flags);
3126         old_state = entry->state;
3127         entry->state = state;
3128
3129         __skb_unlink(skb, list);
3130         spin_unlock(&list->lock);
3131         spin_lock(&dev->done.lock);
3132
3133         __skb_queue_tail(&dev->done, skb);
3134         if (skb_queue_len(&dev->done) == 1)
3135                 tasklet_schedule(&dev->bh);
3136         spin_unlock_irqrestore(&dev->done.lock, flags);
3137
3138         return old_state;
3139 }
3140
3141 static void tx_complete(struct urb *urb)
3142 {
3143         struct sk_buff *skb = (struct sk_buff *)urb->context;
3144         struct skb_data *entry = (struct skb_data *)skb->cb;
3145         struct lan78xx_net *dev = entry->dev;
3146
3147         if (urb->status == 0) {
3148                 dev->net->stats.tx_packets += entry->num_of_packet;
3149                 dev->net->stats.tx_bytes += entry->length;
3150         } else {
3151                 dev->net->stats.tx_errors++;
3152
3153                 switch (urb->status) {
3154                 case -EPIPE:
3155                         lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3156                         break;
3157
3158                 /* software-driven interface shutdown */
3159                 case -ECONNRESET:
3160                 case -ESHUTDOWN:
3161                         netif_dbg(dev, tx_err, dev->net,
3162                                   "tx err interface gone %d\n",
3163                                   entry->urb->status);
3164                         break;
3165
3166                 case -EPROTO:
3167                 case -ETIME:
3168                 case -EILSEQ:
3169                         netif_stop_queue(dev->net);
3170                         netif_dbg(dev, tx_err, dev->net,
3171                                   "tx err queue stopped %d\n",
3172                                   entry->urb->status);
3173                         break;
3174                 default:
3175                         netif_dbg(dev, tx_err, dev->net,
3176                                   "unknown tx err %d\n",
3177                                   entry->urb->status);
3178                         break;
3179                 }
3180         }
3181
3182         usb_autopm_put_interface_async(dev->intf);
3183
3184         defer_bh(dev, skb, &dev->txq, tx_done);
3185 }
3186
3187 static void lan78xx_queue_skb(struct sk_buff_head *list,
3188                               struct sk_buff *newsk, enum skb_state state)
3189 {
3190         struct skb_data *entry = (struct skb_data *)newsk->cb;
3191
3192         __skb_queue_tail(list, newsk);
3193         entry->state = state;
3194 }
3195
3196 static netdev_tx_t
3197 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
3198 {
3199         struct lan78xx_net *dev = netdev_priv(net);
3200         struct sk_buff *skb2 = NULL;
3201
3202         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
3203                 schedule_delayed_work(&dev->wq, 0);
3204
3205         if (skb) {
3206                 skb_tx_timestamp(skb);
3207                 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
3208         }
3209
3210         if (skb2) {
3211                 skb_queue_tail(&dev->txq_pend, skb2);
3212
3213                 /* throttle TX patch at slower than SUPER SPEED USB */
3214                 if ((dev->udev->speed < USB_SPEED_SUPER) &&
3215                     (skb_queue_len(&dev->txq_pend) > 10))
3216                         netif_stop_queue(net);
3217         } else {
3218                 netif_dbg(dev, tx_err, dev->net,
3219                           "lan78xx_tx_prep return NULL\n");
3220                 dev->net->stats.tx_errors++;
3221                 dev->net->stats.tx_dropped++;
3222         }
3223
3224         tasklet_schedule(&dev->bh);
3225
3226         return NETDEV_TX_OK;
3227 }
3228
3229 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
3230 {
3231         struct lan78xx_priv *pdata = NULL;
3232         int ret;
3233         int i;
3234
3235         dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
3236
3237         pdata = (struct lan78xx_priv *)(dev->data[0]);
3238         if (!pdata) {
3239                 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
3240                 return -ENOMEM;
3241         }
3242
3243         pdata->dev = dev;
3244
3245         spin_lock_init(&pdata->rfe_ctl_lock);
3246         mutex_init(&pdata->dataport_mutex);
3247
3248         INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
3249
3250         for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
3251                 pdata->vlan_table[i] = 0;
3252
3253         INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
3254
3255         dev->net->features = 0;
3256
3257         if (DEFAULT_TX_CSUM_ENABLE)
3258                 dev->net->features |= NETIF_F_HW_CSUM;
3259
3260         if (DEFAULT_RX_CSUM_ENABLE)
3261                 dev->net->features |= NETIF_F_RXCSUM;
3262
3263         if (DEFAULT_TSO_CSUM_ENABLE)
3264                 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
3265
3266         if (DEFAULT_VLAN_RX_OFFLOAD)
3267                 dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
3268
3269         if (DEFAULT_VLAN_FILTER_ENABLE)
3270                 dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3271
3272         dev->net->hw_features = dev->net->features;
3273
3274         ret = lan78xx_setup_irq_domain(dev);
3275         if (ret < 0) {
3276                 netdev_warn(dev->net,
3277                             "lan78xx_setup_irq_domain() failed : %d", ret);
3278                 goto out1;
3279         }
3280
3281         dev->net->hard_header_len += TX_OVERHEAD;
3282         dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
3283
3284         /* Init all registers */
3285         ret = lan78xx_reset(dev);
3286         if (ret) {
3287                 netdev_warn(dev->net, "Registers INIT FAILED....");
3288                 goto out2;
3289         }
3290
3291         ret = lan78xx_mdio_init(dev);
3292         if (ret) {
3293                 netdev_warn(dev->net, "MDIO INIT FAILED.....");
3294                 goto out2;
3295         }
3296
3297         dev->net->flags |= IFF_MULTICAST;
3298
3299         pdata->wol = WAKE_MAGIC;
3300
3301         return ret;
3302
3303 out2:
3304         lan78xx_remove_irq_domain(dev);
3305
3306 out1:
3307         netdev_warn(dev->net, "Bind routine FAILED");
3308         cancel_work_sync(&pdata->set_multicast);
3309         cancel_work_sync(&pdata->set_vlan);
3310         kfree(pdata);
3311         return ret;
3312 }
3313
3314 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3315 {
3316         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3317
3318         lan78xx_remove_irq_domain(dev);
3319
3320         lan78xx_remove_mdio(dev);
3321
3322         if (pdata) {
3323                 cancel_work_sync(&pdata->set_multicast);
3324                 cancel_work_sync(&pdata->set_vlan);
3325                 netif_dbg(dev, ifdown, dev->net, "free pdata");
3326                 kfree(pdata);
3327                 pdata = NULL;
3328                 dev->data[0] = 0;
3329         }
3330 }
3331
3332 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3333                                     struct sk_buff *skb,
3334                                     u32 rx_cmd_a, u32 rx_cmd_b)
3335 {
3336         /* HW Checksum offload appears to be flawed if used when not stripping
3337          * VLAN headers. Drop back to S/W checksums under these conditions.
3338          */
3339         if (!(dev->net->features & NETIF_F_RXCSUM) ||
3340             unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3341             ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3342              !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3343                 skb->ip_summed = CHECKSUM_NONE;
3344         } else {
3345                 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3346                 skb->ip_summed = CHECKSUM_COMPLETE;
3347         }
3348 }
3349
3350 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3351                                     struct sk_buff *skb,
3352                                     u32 rx_cmd_a, u32 rx_cmd_b)
3353 {
3354         if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3355             (rx_cmd_a & RX_CMD_A_FVTG_))
3356                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3357                                        (rx_cmd_b & 0xffff));
3358 }
3359
3360 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3361 {
3362         int status;
3363
3364         dev->net->stats.rx_packets++;
3365         dev->net->stats.rx_bytes += skb->len;
3366
3367         skb->protocol = eth_type_trans(skb, dev->net);
3368
3369         netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3370                   skb->len + sizeof(struct ethhdr), skb->protocol);
3371         memset(skb->cb, 0, sizeof(struct skb_data));
3372
3373         if (skb_defer_rx_timestamp(skb))
3374                 return;
3375
3376         status = netif_rx(skb);
3377         if (status != NET_RX_SUCCESS)
3378                 netif_dbg(dev, rx_err, dev->net,
3379                           "netif_rx status %d\n", status);
3380 }
3381
3382 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3383 {
3384         if (skb->len < dev->net->hard_header_len)
3385                 return 0;
3386
3387         while (skb->len > 0) {
3388                 u32 rx_cmd_a, rx_cmd_b, align_count, size;
3389                 u16 rx_cmd_c;
3390                 struct sk_buff *skb2;
3391                 unsigned char *packet;
3392
3393                 rx_cmd_a = get_unaligned_le32(skb->data);
3394                 skb_pull(skb, sizeof(rx_cmd_a));
3395
3396                 rx_cmd_b = get_unaligned_le32(skb->data);
3397                 skb_pull(skb, sizeof(rx_cmd_b));
3398
3399                 rx_cmd_c = get_unaligned_le16(skb->data);
3400                 skb_pull(skb, sizeof(rx_cmd_c));
3401
3402                 packet = skb->data;
3403
3404                 /* get the packet length */
3405                 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3406                 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3407
3408                 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3409                         netif_dbg(dev, rx_err, dev->net,
3410                                   "Error rx_cmd_a=0x%08x", rx_cmd_a);
3411                 } else {
3412                         /* last frame in this batch */
3413                         if (skb->len == size) {
3414                                 lan78xx_rx_csum_offload(dev, skb,
3415                                                         rx_cmd_a, rx_cmd_b);
3416                                 lan78xx_rx_vlan_offload(dev, skb,
3417                                                         rx_cmd_a, rx_cmd_b);
3418
3419                                 skb_trim(skb, skb->len - 4); /* remove fcs */
3420                                 skb->truesize = size + sizeof(struct sk_buff);
3421
3422                                 return 1;
3423                         }
3424
3425                         skb2 = skb_clone(skb, GFP_ATOMIC);
3426                         if (unlikely(!skb2)) {
3427                                 netdev_warn(dev->net, "Error allocating skb");
3428                                 return 0;
3429                         }
3430
3431                         skb2->len = size;
3432                         skb2->data = packet;
3433                         skb_set_tail_pointer(skb2, size);
3434
3435                         lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3436                         lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3437
3438                         skb_trim(skb2, skb2->len - 4); /* remove fcs */
3439                         skb2->truesize = size + sizeof(struct sk_buff);
3440
3441                         lan78xx_skb_return(dev, skb2);
3442                 }
3443
3444                 skb_pull(skb, size);
3445
3446                 /* padding bytes before the next frame starts */
3447                 if (skb->len)
3448                         skb_pull(skb, align_count);
3449         }
3450
3451         return 1;
3452 }
3453
3454 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3455 {
3456         if (!lan78xx_rx(dev, skb)) {
3457                 dev->net->stats.rx_errors++;
3458                 goto done;
3459         }
3460
3461         if (skb->len) {
3462                 lan78xx_skb_return(dev, skb);
3463                 return;
3464         }
3465
3466         netif_dbg(dev, rx_err, dev->net, "drop\n");
3467         dev->net->stats.rx_errors++;
3468 done:
3469         skb_queue_tail(&dev->done, skb);
3470 }
3471
3472 static void rx_complete(struct urb *urb);
3473
3474 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3475 {
3476         struct sk_buff *skb;
3477         struct skb_data *entry;
3478         unsigned long lockflags;
3479         size_t size = dev->rx_urb_size;
3480         int ret = 0;
3481
3482         skb = netdev_alloc_skb_ip_align(dev->net, size);
3483         if (!skb) {
3484                 usb_free_urb(urb);
3485                 return -ENOMEM;
3486         }
3487
3488         entry = (struct skb_data *)skb->cb;
3489         entry->urb = urb;
3490         entry->dev = dev;
3491         entry->length = 0;
3492
3493         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3494                           skb->data, size, rx_complete, skb);
3495
3496         spin_lock_irqsave(&dev->rxq.lock, lockflags);
3497
3498         if (netif_device_present(dev->net) &&
3499             netif_running(dev->net) &&
3500             !test_bit(EVENT_RX_HALT, &dev->flags) &&
3501             !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3502                 ret = usb_submit_urb(urb, GFP_ATOMIC);
3503                 switch (ret) {
3504                 case 0:
3505                         lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3506                         break;
3507                 case -EPIPE:
3508                         lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3509                         break;
3510                 case -ENODEV:
3511                 case -ENOENT:
3512                         netif_dbg(dev, ifdown, dev->net, "device gone\n");
3513                         netif_device_detach(dev->net);
3514                         break;
3515                 case -EHOSTUNREACH:
3516                         ret = -ENOLINK;
3517                         break;
3518                 default:
3519                         netif_dbg(dev, rx_err, dev->net,
3520                                   "rx submit, %d\n", ret);
3521                         tasklet_schedule(&dev->bh);
3522                 }
3523         } else {
3524                 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3525                 ret = -ENOLINK;
3526         }
3527         spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3528         if (ret) {
3529                 dev_kfree_skb_any(skb);
3530                 usb_free_urb(urb);
3531         }
3532         return ret;
3533 }
3534
3535 static void rx_complete(struct urb *urb)
3536 {
3537         struct sk_buff  *skb = (struct sk_buff *)urb->context;
3538         struct skb_data *entry = (struct skb_data *)skb->cb;
3539         struct lan78xx_net *dev = entry->dev;
3540         int urb_status = urb->status;
3541         enum skb_state state;
3542
3543         skb_put(skb, urb->actual_length);
3544         state = rx_done;
3545         entry->urb = NULL;
3546
3547         switch (urb_status) {
3548         case 0:
3549                 if (skb->len < dev->net->hard_header_len) {
3550                         state = rx_cleanup;
3551                         dev->net->stats.rx_errors++;
3552                         dev->net->stats.rx_length_errors++;
3553                         netif_dbg(dev, rx_err, dev->net,
3554                                   "rx length %d\n", skb->len);
3555                 }
3556                 usb_mark_last_busy(dev->udev);
3557                 break;
3558         case -EPIPE:
3559                 dev->net->stats.rx_errors++;
3560                 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3561                 fallthrough;
3562         case -ECONNRESET:                               /* async unlink */
3563         case -ESHUTDOWN:                                /* hardware gone */
3564                 netif_dbg(dev, ifdown, dev->net,
3565                           "rx shutdown, code %d\n", urb_status);
3566                 state = rx_cleanup;
3567                 entry->urb = urb;
3568                 urb = NULL;
3569                 break;
3570         case -EPROTO:
3571         case -ETIME:
3572         case -EILSEQ:
3573                 dev->net->stats.rx_errors++;
3574                 state = rx_cleanup;
3575                 entry->urb = urb;
3576                 urb = NULL;
3577                 break;
3578
3579         /* data overrun ... flush fifo? */
3580         case -EOVERFLOW:
3581                 dev->net->stats.rx_over_errors++;
3582                 fallthrough;
3583
3584         default:
3585                 state = rx_cleanup;
3586                 dev->net->stats.rx_errors++;
3587                 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3588                 break;
3589         }
3590
3591         state = defer_bh(dev, skb, &dev->rxq, state);
3592
3593         if (urb) {
3594                 if (netif_running(dev->net) &&
3595                     !test_bit(EVENT_RX_HALT, &dev->flags) &&
3596                     state != unlink_start) {
3597                         rx_submit(dev, urb, GFP_ATOMIC);
3598                         return;
3599                 }
3600                 usb_free_urb(urb);
3601         }
3602         netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3603 }
3604
3605 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3606 {
3607         int length;
3608         struct urb *urb = NULL;
3609         struct skb_data *entry;
3610         unsigned long flags;
3611         struct sk_buff_head *tqp = &dev->txq_pend;
3612         struct sk_buff *skb, *skb2;
3613         int ret;
3614         int count, pos;
3615         int skb_totallen, pkt_cnt;
3616
3617         skb_totallen = 0;
3618         pkt_cnt = 0;
3619         count = 0;
3620         length = 0;
3621         spin_lock_irqsave(&tqp->lock, flags);
3622         skb_queue_walk(tqp, skb) {
3623                 if (skb_is_gso(skb)) {
3624                         if (!skb_queue_is_first(tqp, skb)) {
3625                                 /* handle previous packets first */
3626                                 break;
3627                         }
3628                         count = 1;
3629                         length = skb->len - TX_OVERHEAD;
3630                         __skb_unlink(skb, tqp);
3631                         spin_unlock_irqrestore(&tqp->lock, flags);
3632                         goto gso_skb;
3633                 }
3634
3635                 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3636                         break;
3637                 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3638                 pkt_cnt++;
3639         }
3640         spin_unlock_irqrestore(&tqp->lock, flags);
3641
3642         /* copy to a single skb */
3643         skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3644         if (!skb)
3645                 goto drop;
3646
3647         skb_put(skb, skb_totallen);
3648
3649         for (count = pos = 0; count < pkt_cnt; count++) {
3650                 skb2 = skb_dequeue(tqp);
3651                 if (skb2) {
3652                         length += (skb2->len - TX_OVERHEAD);
3653                         memcpy(skb->data + pos, skb2->data, skb2->len);
3654                         pos += roundup(skb2->len, sizeof(u32));
3655                         dev_kfree_skb(skb2);
3656                 }
3657         }
3658
3659 gso_skb:
3660         urb = usb_alloc_urb(0, GFP_ATOMIC);
3661         if (!urb)
3662                 goto drop;
3663
3664         entry = (struct skb_data *)skb->cb;
3665         entry->urb = urb;
3666         entry->dev = dev;
3667         entry->length = length;
3668         entry->num_of_packet = count;
3669
3670         spin_lock_irqsave(&dev->txq.lock, flags);
3671         ret = usb_autopm_get_interface_async(dev->intf);
3672         if (ret < 0) {
3673                 spin_unlock_irqrestore(&dev->txq.lock, flags);
3674                 goto drop;
3675         }
3676
3677         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3678                           skb->data, skb->len, tx_complete, skb);
3679
3680         if (length % dev->maxpacket == 0) {
3681                 /* send USB_ZERO_PACKET */
3682                 urb->transfer_flags |= URB_ZERO_PACKET;
3683         }
3684
3685 #ifdef CONFIG_PM
3686         /* if this triggers the device is still a sleep */
3687         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3688                 /* transmission will be done in resume */
3689                 usb_anchor_urb(urb, &dev->deferred);
3690                 /* no use to process more packets */
3691                 netif_stop_queue(dev->net);
3692                 usb_put_urb(urb);
3693                 spin_unlock_irqrestore(&dev->txq.lock, flags);
3694                 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3695                 return;
3696         }
3697 #endif
3698
3699         ret = usb_submit_urb(urb, GFP_ATOMIC);
3700         switch (ret) {
3701         case 0:
3702                 netif_trans_update(dev->net);
3703                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3704                 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3705                         netif_stop_queue(dev->net);
3706                 break;
3707         case -EPIPE:
3708                 netif_stop_queue(dev->net);
3709                 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3710                 usb_autopm_put_interface_async(dev->intf);
3711                 break;
3712         case -ENODEV:
3713         case -ENOENT:
3714                 netif_dbg(dev, tx_err, dev->net,
3715                           "tx: submit urb err %d (disconnected?)", ret);
3716                 netif_device_detach(dev->net);
3717                 break;
3718         default:
3719                 usb_autopm_put_interface_async(dev->intf);
3720                 netif_dbg(dev, tx_err, dev->net,
3721                           "tx: submit urb err %d\n", ret);
3722                 break;
3723         }
3724
3725         spin_unlock_irqrestore(&dev->txq.lock, flags);
3726
3727         if (ret) {
3728                 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3729 drop:
3730                 dev->net->stats.tx_dropped++;
3731                 if (skb)
3732                         dev_kfree_skb_any(skb);
3733                 usb_free_urb(urb);
3734         } else {
3735                 netif_dbg(dev, tx_queued, dev->net,
3736                           "> tx, len %d, type 0x%x\n", length, skb->protocol);
3737         }
3738 }
3739
3740 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3741 {
3742         struct urb *urb;
3743         int i;
3744
3745         if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3746                 for (i = 0; i < 10; i++) {
3747                         if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3748                                 break;
3749                         urb = usb_alloc_urb(0, GFP_ATOMIC);
3750                         if (urb)
3751                                 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3752                                         return;
3753                 }
3754
3755                 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3756                         tasklet_schedule(&dev->bh);
3757         }
3758         if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3759                 netif_wake_queue(dev->net);
3760 }
3761
3762 static void lan78xx_bh(struct tasklet_struct *t)
3763 {
3764         struct lan78xx_net *dev = from_tasklet(dev, t, bh);
3765         struct sk_buff *skb;
3766         struct skb_data *entry;
3767
3768         while ((skb = skb_dequeue(&dev->done))) {
3769                 entry = (struct skb_data *)(skb->cb);
3770                 switch (entry->state) {
3771                 case rx_done:
3772                         entry->state = rx_cleanup;
3773                         rx_process(dev, skb);
3774                         continue;
3775                 case tx_done:
3776                         usb_free_urb(entry->urb);
3777                         dev_kfree_skb(skb);
3778                         continue;
3779                 case rx_cleanup:
3780                         usb_free_urb(entry->urb);
3781                         dev_kfree_skb(skb);
3782                         continue;
3783                 default:
3784                         netdev_dbg(dev->net, "skb state %d\n", entry->state);
3785                         return;
3786                 }
3787         }
3788
3789         if (netif_device_present(dev->net) && netif_running(dev->net)) {
3790                 /* reset update timer delta */
3791                 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3792                         dev->delta = 1;
3793                         mod_timer(&dev->stat_monitor,
3794                                   jiffies + STAT_UPDATE_TIMER);
3795                 }
3796
3797                 if (!skb_queue_empty(&dev->txq_pend))
3798                         lan78xx_tx_bh(dev);
3799
3800                 if (!test_bit(EVENT_RX_HALT, &dev->flags))
3801                         lan78xx_rx_bh(dev);
3802         }
3803 }
3804
3805 static void lan78xx_delayedwork(struct work_struct *work)
3806 {
3807         int status;
3808         struct lan78xx_net *dev;
3809
3810         dev = container_of(work, struct lan78xx_net, wq.work);
3811
3812         if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
3813                 return;
3814
3815         if (usb_autopm_get_interface(dev->intf) < 0)
3816                 return;
3817
3818         if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3819                 unlink_urbs(dev, &dev->txq);
3820
3821                 status = usb_clear_halt(dev->udev, dev->pipe_out);
3822                 if (status < 0 &&
3823                     status != -EPIPE &&
3824                     status != -ESHUTDOWN) {
3825                         if (netif_msg_tx_err(dev))
3826                                 netdev_err(dev->net,
3827                                            "can't clear tx halt, status %d\n",
3828                                            status);
3829                 } else {
3830                         clear_bit(EVENT_TX_HALT, &dev->flags);
3831                         if (status != -ESHUTDOWN)
3832                                 netif_wake_queue(dev->net);
3833                 }
3834         }
3835
3836         if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3837                 unlink_urbs(dev, &dev->rxq);
3838                 status = usb_clear_halt(dev->udev, dev->pipe_in);
3839                 if (status < 0 &&
3840                     status != -EPIPE &&
3841                     status != -ESHUTDOWN) {
3842                         if (netif_msg_rx_err(dev))
3843                                 netdev_err(dev->net,
3844                                            "can't clear rx halt, status %d\n",
3845                                            status);
3846                 } else {
3847                         clear_bit(EVENT_RX_HALT, &dev->flags);
3848                         tasklet_schedule(&dev->bh);
3849                 }
3850         }
3851
3852         if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3853                 int ret = 0;
3854
3855                 clear_bit(EVENT_LINK_RESET, &dev->flags);
3856                 if (lan78xx_link_reset(dev) < 0) {
3857                         netdev_info(dev->net, "link reset failed (%d)\n",
3858                                     ret);
3859                 }
3860         }
3861
3862         if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3863                 lan78xx_update_stats(dev);
3864
3865                 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3866
3867                 mod_timer(&dev->stat_monitor,
3868                           jiffies + (STAT_UPDATE_TIMER * dev->delta));
3869
3870                 dev->delta = min((dev->delta * 2), 50);
3871         }
3872
3873         usb_autopm_put_interface(dev->intf);
3874 }
3875
3876 static void intr_complete(struct urb *urb)
3877 {
3878         struct lan78xx_net *dev = urb->context;
3879         int status = urb->status;
3880
3881         switch (status) {
3882         /* success */
3883         case 0:
3884                 lan78xx_status(dev, urb);
3885                 break;
3886
3887         /* software-driven interface shutdown */
3888         case -ENOENT:                   /* urb killed */
3889         case -ENODEV:                   /* hardware gone */
3890         case -ESHUTDOWN:                /* hardware gone */
3891                 netif_dbg(dev, ifdown, dev->net,
3892                           "intr shutdown, code %d\n", status);
3893                 return;
3894
3895         /* NOTE:  not throttling like RX/TX, since this endpoint
3896          * already polls infrequently
3897          */
3898         default:
3899                 netdev_dbg(dev->net, "intr status %d\n", status);
3900                 break;
3901         }
3902
3903         if (!netif_device_present(dev->net) ||
3904             !netif_running(dev->net)) {
3905                 netdev_warn(dev->net, "not submitting new status URB");
3906                 return;
3907         }
3908
3909         memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3910         status = usb_submit_urb(urb, GFP_ATOMIC);
3911
3912         switch (status) {
3913         case  0:
3914                 break;
3915         case -ENODEV:
3916         case -ENOENT:
3917                 netif_dbg(dev, timer, dev->net,
3918                           "intr resubmit %d (disconnect?)", status);
3919                 netif_device_detach(dev->net);
3920                 break;
3921         default:
3922                 netif_err(dev, timer, dev->net,
3923                           "intr resubmit --> %d\n", status);
3924                 break;
3925         }
3926 }
3927
3928 static void lan78xx_disconnect(struct usb_interface *intf)
3929 {
3930         struct lan78xx_net *dev;
3931         struct usb_device *udev;
3932         struct net_device *net;
3933         struct phy_device *phydev;
3934
3935         dev = usb_get_intfdata(intf);
3936         usb_set_intfdata(intf, NULL);
3937         if (!dev)
3938                 return;
3939
3940         set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
3941
3942         udev = interface_to_usbdev(intf);
3943         net = dev->net;
3944
3945         unregister_netdev(net);
3946
3947         cancel_delayed_work_sync(&dev->wq);
3948
3949         phydev = net->phydev;
3950
3951         phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3952         phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3953
3954         phy_disconnect(net->phydev);
3955
3956         if (phy_is_pseudo_fixed_link(phydev))
3957                 fixed_phy_unregister(phydev);
3958
3959         usb_scuttle_anchored_urbs(&dev->deferred);
3960
3961         if (timer_pending(&dev->stat_monitor))
3962                 del_timer_sync(&dev->stat_monitor);
3963
3964         lan78xx_unbind(dev, intf);
3965
3966         usb_kill_urb(dev->urb_intr);
3967         usb_free_urb(dev->urb_intr);
3968
3969         free_netdev(net);
3970         usb_put_dev(udev);
3971 }
3972
3973 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
3974 {
3975         struct lan78xx_net *dev = netdev_priv(net);
3976
3977         unlink_urbs(dev, &dev->txq);
3978         tasklet_schedule(&dev->bh);
3979 }
3980
3981 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
3982                                                 struct net_device *netdev,
3983                                                 netdev_features_t features)
3984 {
3985         if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
3986                 features &= ~NETIF_F_GSO_MASK;
3987
3988         features = vlan_features_check(skb, features);
3989         features = vxlan_features_check(skb, features);
3990
3991         return features;
3992 }
3993
3994 static const struct net_device_ops lan78xx_netdev_ops = {
3995         .ndo_open               = lan78xx_open,
3996         .ndo_stop               = lan78xx_stop,
3997         .ndo_start_xmit         = lan78xx_start_xmit,
3998         .ndo_tx_timeout         = lan78xx_tx_timeout,
3999         .ndo_change_mtu         = lan78xx_change_mtu,
4000         .ndo_set_mac_address    = lan78xx_set_mac_addr,
4001         .ndo_validate_addr      = eth_validate_addr,
4002         .ndo_eth_ioctl          = phy_do_ioctl_running,
4003         .ndo_set_rx_mode        = lan78xx_set_multicast,
4004         .ndo_set_features       = lan78xx_set_features,
4005         .ndo_vlan_rx_add_vid    = lan78xx_vlan_rx_add_vid,
4006         .ndo_vlan_rx_kill_vid   = lan78xx_vlan_rx_kill_vid,
4007         .ndo_features_check     = lan78xx_features_check,
4008 };
4009
4010 static void lan78xx_stat_monitor(struct timer_list *t)
4011 {
4012         struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
4013
4014         lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
4015 }
4016
4017 static int lan78xx_probe(struct usb_interface *intf,
4018                          const struct usb_device_id *id)
4019 {
4020         struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
4021         struct lan78xx_net *dev;
4022         struct net_device *netdev;
4023         struct usb_device *udev;
4024         int ret;
4025         unsigned int maxp;
4026         unsigned int period;
4027         u8 *buf = NULL;
4028
4029         udev = interface_to_usbdev(intf);
4030         udev = usb_get_dev(udev);
4031
4032         netdev = alloc_etherdev(sizeof(struct lan78xx_net));
4033         if (!netdev) {
4034                 dev_err(&intf->dev, "Error: OOM\n");
4035                 ret = -ENOMEM;
4036                 goto out1;
4037         }
4038
4039         /* netdev_printk() needs this */
4040         SET_NETDEV_DEV(netdev, &intf->dev);
4041
4042         dev = netdev_priv(netdev);
4043         dev->udev = udev;
4044         dev->intf = intf;
4045         dev->net = netdev;
4046         dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
4047                                         | NETIF_MSG_PROBE | NETIF_MSG_LINK);
4048
4049         skb_queue_head_init(&dev->rxq);
4050         skb_queue_head_init(&dev->txq);
4051         skb_queue_head_init(&dev->done);
4052         skb_queue_head_init(&dev->txq_pend);
4053         mutex_init(&dev->phy_mutex);
4054         mutex_init(&dev->dev_mutex);
4055
4056         tasklet_setup(&dev->bh, lan78xx_bh);
4057         INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
4058         init_usb_anchor(&dev->deferred);
4059
4060         netdev->netdev_ops = &lan78xx_netdev_ops;
4061         netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
4062         netdev->ethtool_ops = &lan78xx_ethtool_ops;
4063
4064         dev->delta = 1;
4065         timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
4066
4067         mutex_init(&dev->stats.access_lock);
4068
4069         if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
4070                 ret = -ENODEV;
4071                 goto out2;
4072         }
4073
4074         dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
4075         ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
4076         if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
4077                 ret = -ENODEV;
4078                 goto out2;
4079         }
4080
4081         dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
4082         ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
4083         if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
4084                 ret = -ENODEV;
4085                 goto out2;
4086         }
4087
4088         ep_intr = &intf->cur_altsetting->endpoint[2];
4089         if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
4090                 ret = -ENODEV;
4091                 goto out2;
4092         }
4093
4094         dev->pipe_intr = usb_rcvintpipe(dev->udev,
4095                                         usb_endpoint_num(&ep_intr->desc));
4096
4097         ret = lan78xx_bind(dev, intf);
4098         if (ret < 0)
4099                 goto out2;
4100
4101         if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
4102                 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
4103
4104         /* MTU range: 68 - 9000 */
4105         netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
4106         netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
4107
4108         period = ep_intr->desc.bInterval;
4109         maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
4110         buf = kmalloc(maxp, GFP_KERNEL);
4111         if (buf) {
4112                 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
4113                 if (!dev->urb_intr) {
4114                         ret = -ENOMEM;
4115                         kfree(buf);
4116                         goto out3;
4117                 } else {
4118                         usb_fill_int_urb(dev->urb_intr, dev->udev,
4119                                          dev->pipe_intr, buf, maxp,
4120                                          intr_complete, dev, period);
4121                         dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
4122                 }
4123         }
4124
4125         dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
4126
4127         /* Reject broken descriptors. */
4128         if (dev->maxpacket == 0) {
4129                 ret = -ENODEV;
4130                 goto out4;
4131         }
4132
4133         /* driver requires remote-wakeup capability during autosuspend. */
4134         intf->needs_remote_wakeup = 1;
4135
4136         ret = lan78xx_phy_init(dev);
4137         if (ret < 0)
4138                 goto out4;
4139
4140         ret = register_netdev(netdev);
4141         if (ret != 0) {
4142                 netif_err(dev, probe, netdev, "couldn't register the device\n");
4143                 goto out5;
4144         }
4145
4146         usb_set_intfdata(intf, dev);
4147
4148         ret = device_set_wakeup_enable(&udev->dev, true);
4149
4150          /* Default delay of 2sec has more overhead than advantage.
4151           * Set to 10sec as default.
4152           */
4153         pm_runtime_set_autosuspend_delay(&udev->dev,
4154                                          DEFAULT_AUTOSUSPEND_DELAY);
4155
4156         return 0;
4157
4158 out5:
4159         phy_disconnect(netdev->phydev);
4160 out4:
4161         usb_free_urb(dev->urb_intr);
4162 out3:
4163         lan78xx_unbind(dev, intf);
4164 out2:
4165         free_netdev(netdev);
4166 out1:
4167         usb_put_dev(udev);
4168
4169         return ret;
4170 }
4171
4172 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
4173 {
4174         const u16 crc16poly = 0x8005;
4175         int i;
4176         u16 bit, crc, msb;
4177         u8 data;
4178
4179         crc = 0xFFFF;
4180         for (i = 0; i < len; i++) {
4181                 data = *buf++;
4182                 for (bit = 0; bit < 8; bit++) {
4183                         msb = crc >> 15;
4184                         crc <<= 1;
4185
4186                         if (msb ^ (u16)(data & 1)) {
4187                                 crc ^= crc16poly;
4188                                 crc |= (u16)0x0001U;
4189                         }
4190                         data >>= 1;
4191                 }
4192         }
4193
4194         return crc;
4195 }
4196
4197 static int lan78xx_set_auto_suspend(struct lan78xx_net *dev)
4198 {
4199         u32 buf;
4200         int ret;
4201
4202         ret = lan78xx_stop_tx_path(dev);
4203         if (ret < 0)
4204                 return ret;
4205
4206         ret = lan78xx_stop_rx_path(dev);
4207         if (ret < 0)
4208                 return ret;
4209
4210         /* auto suspend (selective suspend) */
4211
4212         ret = lan78xx_write_reg(dev, WUCSR, 0);
4213         if (ret < 0)
4214                 return ret;
4215         ret = lan78xx_write_reg(dev, WUCSR2, 0);
4216         if (ret < 0)
4217                 return ret;
4218         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4219         if (ret < 0)
4220                 return ret;
4221
4222         /* set goodframe wakeup */
4223
4224         ret = lan78xx_read_reg(dev, WUCSR, &buf);
4225         if (ret < 0)
4226                 return ret;
4227
4228         buf |= WUCSR_RFE_WAKE_EN_;
4229         buf |= WUCSR_STORE_WAKE_;
4230
4231         ret = lan78xx_write_reg(dev, WUCSR, buf);
4232         if (ret < 0)
4233                 return ret;
4234
4235         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4236         if (ret < 0)
4237                 return ret;
4238
4239         buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4240         buf |= PMT_CTL_RES_CLR_WKP_STS_;
4241         buf |= PMT_CTL_PHY_WAKE_EN_;
4242         buf |= PMT_CTL_WOL_EN_;
4243         buf &= ~PMT_CTL_SUS_MODE_MASK_;
4244         buf |= PMT_CTL_SUS_MODE_3_;
4245
4246         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4247         if (ret < 0)
4248                 return ret;
4249
4250         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4251         if (ret < 0)
4252                 return ret;
4253
4254         buf |= PMT_CTL_WUPS_MASK_;
4255
4256         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4257         if (ret < 0)
4258                 return ret;
4259
4260         ret = lan78xx_start_rx_path(dev);
4261
4262         return ret;
4263 }
4264
4265 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
4266 {
4267         const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
4268         const u8 ipv6_multicast[3] = { 0x33, 0x33 };
4269         const u8 arp_type[2] = { 0x08, 0x06 };
4270         u32 temp_pmt_ctl;
4271         int mask_index;
4272         u32 temp_wucsr;
4273         u32 buf;
4274         u16 crc;
4275         int ret;
4276
4277         ret = lan78xx_stop_tx_path(dev);
4278         if (ret < 0)
4279                 return ret;
4280         ret = lan78xx_stop_rx_path(dev);
4281         if (ret < 0)
4282                 return ret;
4283
4284         ret = lan78xx_write_reg(dev, WUCSR, 0);
4285         if (ret < 0)
4286                 return ret;
4287         ret = lan78xx_write_reg(dev, WUCSR2, 0);
4288         if (ret < 0)
4289                 return ret;
4290         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4291         if (ret < 0)
4292                 return ret;
4293
4294         temp_wucsr = 0;
4295
4296         temp_pmt_ctl = 0;
4297
4298         ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
4299         if (ret < 0)
4300                 return ret;
4301
4302         temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
4303         temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
4304
4305         for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) {
4306                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
4307                 if (ret < 0)
4308                         return ret;
4309         }
4310
4311         mask_index = 0;
4312         if (wol & WAKE_PHY) {
4313                 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
4314
4315                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4316                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4317                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4318         }
4319         if (wol & WAKE_MAGIC) {
4320                 temp_wucsr |= WUCSR_MPEN_;
4321
4322                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4323                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4324                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
4325         }
4326         if (wol & WAKE_BCAST) {
4327                 temp_wucsr |= WUCSR_BCST_EN_;
4328
4329                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4330                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4331                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4332         }
4333         if (wol & WAKE_MCAST) {
4334                 temp_wucsr |= WUCSR_WAKE_EN_;
4335
4336                 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
4337                 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
4338                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4339                                         WUF_CFGX_EN_ |
4340                                         WUF_CFGX_TYPE_MCAST_ |
4341                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
4342                                         (crc & WUF_CFGX_CRC16_MASK_));
4343                 if (ret < 0)
4344                         return ret;
4345
4346                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
4347                 if (ret < 0)
4348                         return ret;
4349                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4350                 if (ret < 0)
4351                         return ret;
4352                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4353                 if (ret < 0)
4354                         return ret;
4355                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4356                 if (ret < 0)
4357                         return ret;
4358
4359                 mask_index++;
4360
4361                 /* for IPv6 Multicast */
4362                 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
4363                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4364                                         WUF_CFGX_EN_ |
4365                                         WUF_CFGX_TYPE_MCAST_ |
4366                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
4367                                         (crc & WUF_CFGX_CRC16_MASK_));
4368                 if (ret < 0)
4369                         return ret;
4370
4371                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
4372                 if (ret < 0)
4373                         return ret;
4374                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4375                 if (ret < 0)
4376                         return ret;
4377                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4378                 if (ret < 0)
4379                         return ret;
4380                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4381                 if (ret < 0)
4382                         return ret;
4383
4384                 mask_index++;
4385
4386                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4387                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4388                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4389         }
4390         if (wol & WAKE_UCAST) {
4391                 temp_wucsr |= WUCSR_PFDA_EN_;
4392
4393                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4394                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4395                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4396         }
4397         if (wol & WAKE_ARP) {
4398                 temp_wucsr |= WUCSR_WAKE_EN_;
4399
4400                 /* set WUF_CFG & WUF_MASK
4401                  * for packettype (offset 12,13) = ARP (0x0806)
4402                  */
4403                 crc = lan78xx_wakeframe_crc16(arp_type, 2);
4404                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4405                                         WUF_CFGX_EN_ |
4406                                         WUF_CFGX_TYPE_ALL_ |
4407                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
4408                                         (crc & WUF_CFGX_CRC16_MASK_));
4409                 if (ret < 0)
4410                         return ret;
4411
4412                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
4413                 if (ret < 0)
4414                         return ret;
4415                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4416                 if (ret < 0)
4417                         return ret;
4418                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4419                 if (ret < 0)
4420                         return ret;
4421                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4422                 if (ret < 0)
4423                         return ret;
4424
4425                 mask_index++;
4426
4427                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4428                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4429                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4430         }
4431
4432         ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
4433         if (ret < 0)
4434                 return ret;
4435
4436         /* when multiple WOL bits are set */
4437         if (hweight_long((unsigned long)wol) > 1) {
4438                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4439                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4440                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4441         }
4442         ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
4443         if (ret < 0)
4444                 return ret;
4445
4446         /* clear WUPS */
4447         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4448         if (ret < 0)
4449                 return ret;
4450
4451         buf |= PMT_CTL_WUPS_MASK_;
4452
4453         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4454         if (ret < 0)
4455                 return ret;
4456
4457         ret = lan78xx_start_rx_path(dev);
4458
4459         return ret;
4460 }
4461
4462 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
4463 {
4464         struct lan78xx_net *dev = usb_get_intfdata(intf);
4465         bool dev_open;
4466         int ret;
4467
4468         mutex_lock(&dev->dev_mutex);
4469
4470         netif_dbg(dev, ifdown, dev->net,
4471                   "suspending: pm event %#x", message.event);
4472
4473         dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4474
4475         if (dev_open) {
4476                 spin_lock_irq(&dev->txq.lock);
4477                 /* don't autosuspend while transmitting */
4478                 if ((skb_queue_len(&dev->txq) ||
4479                      skb_queue_len(&dev->txq_pend)) &&
4480                     PMSG_IS_AUTO(message)) {
4481                         spin_unlock_irq(&dev->txq.lock);
4482                         ret = -EBUSY;
4483                         goto out;
4484                 } else {
4485                         set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4486                         spin_unlock_irq(&dev->txq.lock);
4487                 }
4488
4489                 /* stop RX */
4490                 ret = lan78xx_stop_rx_path(dev);
4491                 if (ret < 0)
4492                         goto out;
4493
4494                 ret = lan78xx_flush_rx_fifo(dev);
4495                 if (ret < 0)
4496                         goto out;
4497
4498                 /* stop Tx */
4499                 ret = lan78xx_stop_tx_path(dev);
4500                 if (ret < 0)
4501                         goto out;
4502
4503                 /* empty out the Rx and Tx queues */
4504                 netif_device_detach(dev->net);
4505                 lan78xx_terminate_urbs(dev);
4506                 usb_kill_urb(dev->urb_intr);
4507
4508                 /* reattach */
4509                 netif_device_attach(dev->net);
4510
4511                 del_timer(&dev->stat_monitor);
4512
4513                 if (PMSG_IS_AUTO(message)) {
4514                         ret = lan78xx_set_auto_suspend(dev);
4515                         if (ret < 0)
4516                                 goto out;
4517                 } else {
4518                         struct lan78xx_priv *pdata;
4519
4520                         pdata = (struct lan78xx_priv *)(dev->data[0]);
4521                         netif_carrier_off(dev->net);
4522                         ret = lan78xx_set_suspend(dev, pdata->wol);
4523                         if (ret < 0)
4524                                 goto out;
4525                 }
4526         } else {
4527                 /* Interface is down; don't allow WOL and PHY
4528                  * events to wake up the host
4529                  */
4530                 u32 buf;
4531
4532                 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4533
4534                 ret = lan78xx_write_reg(dev, WUCSR, 0);
4535                 if (ret < 0)
4536                         goto out;
4537                 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4538                 if (ret < 0)
4539                         goto out;
4540
4541                 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4542                 if (ret < 0)
4543                         goto out;
4544
4545                 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4546                 buf |= PMT_CTL_RES_CLR_WKP_STS_;
4547                 buf &= ~PMT_CTL_SUS_MODE_MASK_;
4548                 buf |= PMT_CTL_SUS_MODE_3_;
4549
4550                 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4551                 if (ret < 0)
4552                         goto out;
4553
4554                 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4555                 if (ret < 0)
4556                         goto out;
4557
4558                 buf |= PMT_CTL_WUPS_MASK_;
4559
4560                 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4561                 if (ret < 0)
4562                         goto out;
4563         }
4564
4565         ret = 0;
4566 out:
4567         mutex_unlock(&dev->dev_mutex);
4568
4569         return ret;
4570 }
4571
4572 static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
4573 {
4574         bool pipe_halted = false;
4575         struct urb *urb;
4576
4577         while ((urb = usb_get_from_anchor(&dev->deferred))) {
4578                 struct sk_buff *skb = urb->context;
4579                 int ret;
4580
4581                 if (!netif_device_present(dev->net) ||
4582                     !netif_carrier_ok(dev->net) ||
4583                     pipe_halted) {
4584                         usb_free_urb(urb);
4585                         dev_kfree_skb(skb);
4586                         continue;
4587                 }
4588
4589                 ret = usb_submit_urb(urb, GFP_ATOMIC);
4590
4591                 if (ret == 0) {
4592                         netif_trans_update(dev->net);
4593                         lan78xx_queue_skb(&dev->txq, skb, tx_start);
4594                 } else {
4595                         usb_free_urb(urb);
4596                         dev_kfree_skb(skb);
4597
4598                         if (ret == -EPIPE) {
4599                                 netif_stop_queue(dev->net);
4600                                 pipe_halted = true;
4601                         } else if (ret == -ENODEV) {
4602                                 netif_device_detach(dev->net);
4603                         }
4604                 }
4605         }
4606
4607         return pipe_halted;
4608 }
4609
4610 static int lan78xx_resume(struct usb_interface *intf)
4611 {
4612         struct lan78xx_net *dev = usb_get_intfdata(intf);
4613         bool dev_open;
4614         int ret;
4615
4616         mutex_lock(&dev->dev_mutex);
4617
4618         netif_dbg(dev, ifup, dev->net, "resuming device");
4619
4620         dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4621
4622         if (dev_open) {
4623                 bool pipe_halted = false;
4624
4625                 ret = lan78xx_flush_tx_fifo(dev);
4626                 if (ret < 0)
4627                         goto out;
4628
4629                 if (dev->urb_intr) {
4630                         int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
4631
4632                         if (ret < 0) {
4633                                 if (ret == -ENODEV)
4634                                         netif_device_detach(dev->net);
4635
4636                         netdev_warn(dev->net, "Failed to submit intr URB");
4637                         }
4638                 }
4639
4640                 spin_lock_irq(&dev->txq.lock);
4641
4642                 if (netif_device_present(dev->net)) {
4643                         pipe_halted = lan78xx_submit_deferred_urbs(dev);
4644
4645                         if (pipe_halted)
4646                                 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
4647                 }
4648
4649                 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4650
4651                 spin_unlock_irq(&dev->txq.lock);
4652
4653                 if (!pipe_halted &&
4654                     netif_device_present(dev->net) &&
4655                     (skb_queue_len(&dev->txq) < dev->tx_qlen))
4656                         netif_start_queue(dev->net);
4657
4658                 ret = lan78xx_start_tx_path(dev);
4659                 if (ret < 0)
4660                         goto out;
4661
4662                 tasklet_schedule(&dev->bh);
4663
4664                 if (!timer_pending(&dev->stat_monitor)) {
4665                         dev->delta = 1;
4666                         mod_timer(&dev->stat_monitor,
4667                                   jiffies + STAT_UPDATE_TIMER);
4668                 }
4669
4670         } else {
4671                 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4672         }
4673
4674         ret = lan78xx_write_reg(dev, WUCSR2, 0);
4675         if (ret < 0)
4676                 goto out;
4677         ret = lan78xx_write_reg(dev, WUCSR, 0);
4678         if (ret < 0)
4679                 goto out;
4680         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4681         if (ret < 0)
4682                 goto out;
4683
4684         ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4685                                              WUCSR2_ARP_RCD_ |
4686                                              WUCSR2_IPV6_TCPSYN_RCD_ |
4687                                              WUCSR2_IPV4_TCPSYN_RCD_);
4688         if (ret < 0)
4689                 goto out;
4690
4691         ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
4692                                             WUCSR_EEE_RX_WAKE_ |
4693                                             WUCSR_PFDA_FR_ |
4694                                             WUCSR_RFE_WAKE_FR_ |
4695                                             WUCSR_WUFR_ |
4696                                             WUCSR_MPR_ |
4697                                             WUCSR_BCST_FR_);
4698         if (ret < 0)
4699                 goto out;
4700
4701         ret = 0;
4702 out:
4703         mutex_unlock(&dev->dev_mutex);
4704
4705         return ret;
4706 }
4707
4708 static int lan78xx_reset_resume(struct usb_interface *intf)
4709 {
4710         struct lan78xx_net *dev = usb_get_intfdata(intf);
4711         int ret;
4712
4713         netif_dbg(dev, ifup, dev->net, "(reset) resuming device");
4714
4715         ret = lan78xx_reset(dev);
4716         if (ret < 0)
4717                 return ret;
4718
4719         phy_start(dev->net->phydev);
4720
4721         ret = lan78xx_resume(intf);
4722
4723         return ret;
4724 }
4725
4726 static const struct usb_device_id products[] = {
4727         {
4728         /* LAN7800 USB Gigabit Ethernet Device */
4729         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4730         },
4731         {
4732         /* LAN7850 USB Gigabit Ethernet Device */
4733         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4734         },
4735         {
4736         /* LAN7801 USB Gigabit Ethernet Device */
4737         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4738         },
4739         {
4740         /* ATM2-AF USB Gigabit Ethernet Device */
4741         USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
4742         },
4743         {},
4744 };
4745 MODULE_DEVICE_TABLE(usb, products);
4746
4747 static struct usb_driver lan78xx_driver = {
4748         .name                   = DRIVER_NAME,
4749         .id_table               = products,
4750         .probe                  = lan78xx_probe,
4751         .disconnect             = lan78xx_disconnect,
4752         .suspend                = lan78xx_suspend,
4753         .resume                 = lan78xx_resume,
4754         .reset_resume           = lan78xx_reset_resume,
4755         .supports_autosuspend   = 1,
4756         .disable_hub_initiated_lpm = 1,
4757 };
4758
4759 module_usb_driver(lan78xx_driver);
4760
4761 MODULE_AUTHOR(DRIVER_AUTHOR);
4762 MODULE_DESCRIPTION(DRIVER_DESC);
4763 MODULE_LICENSE("GPL");