Merge remote-tracking branch 'stable/linux-4.19.y' into rpi-4.19.y
[platform/kernel/linux-rpi.git] / drivers / net / usb / lan78xx.c
1 /*
2  * Copyright (C) 2015 Microchip Technology
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
29 #include <linux/ip.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <linux/phy.h>
33 #include <net/ip6_checksum.h>
34 #include <linux/interrupt.h>
35 #include <linux/irqdomain.h>
36 #include <linux/irq.h>
37 #include <linux/irqchip/chained_irq.h>
38 #include <linux/microchipphy.h>
39 #include <linux/phy_fixed.h>
40 #include <linux/of_mdio.h>
41 #include <linux/of_net.h>
42 #include "lan78xx.h"
43
44 #define DRIVER_AUTHOR   "WOOJUNG HUH <woojung.huh@microchip.com>"
45 #define DRIVER_DESC     "LAN78XX USB 3.0 Gigabit Ethernet Devices"
46 #define DRIVER_NAME     "lan78xx"
47
48 #define TX_TIMEOUT_JIFFIES              (5 * HZ)
49 #define THROTTLE_JIFFIES                (HZ / 8)
50 #define UNLINK_TIMEOUT_MS               3
51
52 #define RX_MAX_QUEUE_MEMORY             (60 * 1518)
53
54 #define SS_USB_PKT_SIZE                 (1024)
55 #define HS_USB_PKT_SIZE                 (512)
56 #define FS_USB_PKT_SIZE                 (64)
57
58 #define MAX_RX_FIFO_SIZE                (12 * 1024)
59 #define MAX_TX_FIFO_SIZE                (12 * 1024)
60 #define DEFAULT_BURST_CAP_SIZE          (MAX_TX_FIFO_SIZE)
61 #define DEFAULT_BULK_IN_DELAY           (0x0800)
62 #define MAX_SINGLE_PACKET_SIZE          (9000)
63 #define DEFAULT_TX_CSUM_ENABLE          (true)
64 #define DEFAULT_RX_CSUM_ENABLE          (true)
65 #define DEFAULT_TSO_CSUM_ENABLE         (true)
66 #define DEFAULT_VLAN_FILTER_ENABLE      (true)
67 #define DEFAULT_VLAN_RX_OFFLOAD         (true)
68 #define TX_OVERHEAD                     (8)
69 #define RXW_PADDING                     2
70
71 #define LAN78XX_USB_VENDOR_ID           (0x0424)
72 #define LAN7800_USB_PRODUCT_ID          (0x7800)
73 #define LAN7850_USB_PRODUCT_ID          (0x7850)
74 #define LAN7801_USB_PRODUCT_ID          (0x7801)
75 #define LAN78XX_EEPROM_MAGIC            (0x78A5)
76 #define LAN78XX_OTP_MAGIC               (0x78F3)
77
78 #define MII_READ                        1
79 #define MII_WRITE                       0
80
81 #define EEPROM_INDICATOR                (0xA5)
82 #define EEPROM_MAC_OFFSET               (0x01)
83 #define MAX_EEPROM_SIZE                 512
84 #define OTP_INDICATOR_1                 (0xF3)
85 #define OTP_INDICATOR_2                 (0xF7)
86
87 #define WAKE_ALL                        (WAKE_PHY | WAKE_UCAST | \
88                                          WAKE_MCAST | WAKE_BCAST | \
89                                          WAKE_ARP | WAKE_MAGIC)
90
91 /* USB related defines */
92 #define BULK_IN_PIPE                    1
93 #define BULK_OUT_PIPE                   2
94
95 /* default autosuspend delay (mSec)*/
96 #define DEFAULT_AUTOSUSPEND_DELAY       (10 * 1000)
97
98 /* statistic update interval (mSec) */
99 #define STAT_UPDATE_TIMER               (1 * 1000)
100
101 /* defines interrupts from interrupt EP */
102 #define MAX_INT_EP                      (32)
103 #define INT_EP_INTEP                    (31)
104 #define INT_EP_OTP_WR_DONE              (28)
105 #define INT_EP_EEE_TX_LPI_START         (26)
106 #define INT_EP_EEE_TX_LPI_STOP          (25)
107 #define INT_EP_EEE_RX_LPI               (24)
108 #define INT_EP_MAC_RESET_TIMEOUT        (23)
109 #define INT_EP_RDFO                     (22)
110 #define INT_EP_TXE                      (21)
111 #define INT_EP_USB_STATUS               (20)
112 #define INT_EP_TX_DIS                   (19)
113 #define INT_EP_RX_DIS                   (18)
114 #define INT_EP_PHY                      (17)
115 #define INT_EP_DP                       (16)
116 #define INT_EP_MAC_ERR                  (15)
117 #define INT_EP_TDFU                     (14)
118 #define INT_EP_TDFO                     (13)
119 #define INT_EP_UTX                      (12)
120 #define INT_EP_GPIO_11                  (11)
121 #define INT_EP_GPIO_10                  (10)
122 #define INT_EP_GPIO_9                   (9)
123 #define INT_EP_GPIO_8                   (8)
124 #define INT_EP_GPIO_7                   (7)
125 #define INT_EP_GPIO_6                   (6)
126 #define INT_EP_GPIO_5                   (5)
127 #define INT_EP_GPIO_4                   (4)
128 #define INT_EP_GPIO_3                   (3)
129 #define INT_EP_GPIO_2                   (2)
130 #define INT_EP_GPIO_1                   (1)
131 #define INT_EP_GPIO_0                   (0)
132
133 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
134         "RX FCS Errors",
135         "RX Alignment Errors",
136         "Rx Fragment Errors",
137         "RX Jabber Errors",
138         "RX Undersize Frame Errors",
139         "RX Oversize Frame Errors",
140         "RX Dropped Frames",
141         "RX Unicast Byte Count",
142         "RX Broadcast Byte Count",
143         "RX Multicast Byte Count",
144         "RX Unicast Frames",
145         "RX Broadcast Frames",
146         "RX Multicast Frames",
147         "RX Pause Frames",
148         "RX 64 Byte Frames",
149         "RX 65 - 127 Byte Frames",
150         "RX 128 - 255 Byte Frames",
151         "RX 256 - 511 Bytes Frames",
152         "RX 512 - 1023 Byte Frames",
153         "RX 1024 - 1518 Byte Frames",
154         "RX Greater 1518 Byte Frames",
155         "EEE RX LPI Transitions",
156         "EEE RX LPI Time",
157         "TX FCS Errors",
158         "TX Excess Deferral Errors",
159         "TX Carrier Errors",
160         "TX Bad Byte Count",
161         "TX Single Collisions",
162         "TX Multiple Collisions",
163         "TX Excessive Collision",
164         "TX Late Collisions",
165         "TX Unicast Byte Count",
166         "TX Broadcast Byte Count",
167         "TX Multicast Byte Count",
168         "TX Unicast Frames",
169         "TX Broadcast Frames",
170         "TX Multicast Frames",
171         "TX Pause Frames",
172         "TX 64 Byte Frames",
173         "TX 65 - 127 Byte Frames",
174         "TX 128 - 255 Byte Frames",
175         "TX 256 - 511 Bytes Frames",
176         "TX 512 - 1023 Byte Frames",
177         "TX 1024 - 1518 Byte Frames",
178         "TX Greater 1518 Byte Frames",
179         "EEE TX LPI Transitions",
180         "EEE TX LPI Time",
181 };
182
183 struct lan78xx_statstage {
184         u32 rx_fcs_errors;
185         u32 rx_alignment_errors;
186         u32 rx_fragment_errors;
187         u32 rx_jabber_errors;
188         u32 rx_undersize_frame_errors;
189         u32 rx_oversize_frame_errors;
190         u32 rx_dropped_frames;
191         u32 rx_unicast_byte_count;
192         u32 rx_broadcast_byte_count;
193         u32 rx_multicast_byte_count;
194         u32 rx_unicast_frames;
195         u32 rx_broadcast_frames;
196         u32 rx_multicast_frames;
197         u32 rx_pause_frames;
198         u32 rx_64_byte_frames;
199         u32 rx_65_127_byte_frames;
200         u32 rx_128_255_byte_frames;
201         u32 rx_256_511_bytes_frames;
202         u32 rx_512_1023_byte_frames;
203         u32 rx_1024_1518_byte_frames;
204         u32 rx_greater_1518_byte_frames;
205         u32 eee_rx_lpi_transitions;
206         u32 eee_rx_lpi_time;
207         u32 tx_fcs_errors;
208         u32 tx_excess_deferral_errors;
209         u32 tx_carrier_errors;
210         u32 tx_bad_byte_count;
211         u32 tx_single_collisions;
212         u32 tx_multiple_collisions;
213         u32 tx_excessive_collision;
214         u32 tx_late_collisions;
215         u32 tx_unicast_byte_count;
216         u32 tx_broadcast_byte_count;
217         u32 tx_multicast_byte_count;
218         u32 tx_unicast_frames;
219         u32 tx_broadcast_frames;
220         u32 tx_multicast_frames;
221         u32 tx_pause_frames;
222         u32 tx_64_byte_frames;
223         u32 tx_65_127_byte_frames;
224         u32 tx_128_255_byte_frames;
225         u32 tx_256_511_bytes_frames;
226         u32 tx_512_1023_byte_frames;
227         u32 tx_1024_1518_byte_frames;
228         u32 tx_greater_1518_byte_frames;
229         u32 eee_tx_lpi_transitions;
230         u32 eee_tx_lpi_time;
231 };
232
233 struct lan78xx_statstage64 {
234         u64 rx_fcs_errors;
235         u64 rx_alignment_errors;
236         u64 rx_fragment_errors;
237         u64 rx_jabber_errors;
238         u64 rx_undersize_frame_errors;
239         u64 rx_oversize_frame_errors;
240         u64 rx_dropped_frames;
241         u64 rx_unicast_byte_count;
242         u64 rx_broadcast_byte_count;
243         u64 rx_multicast_byte_count;
244         u64 rx_unicast_frames;
245         u64 rx_broadcast_frames;
246         u64 rx_multicast_frames;
247         u64 rx_pause_frames;
248         u64 rx_64_byte_frames;
249         u64 rx_65_127_byte_frames;
250         u64 rx_128_255_byte_frames;
251         u64 rx_256_511_bytes_frames;
252         u64 rx_512_1023_byte_frames;
253         u64 rx_1024_1518_byte_frames;
254         u64 rx_greater_1518_byte_frames;
255         u64 eee_rx_lpi_transitions;
256         u64 eee_rx_lpi_time;
257         u64 tx_fcs_errors;
258         u64 tx_excess_deferral_errors;
259         u64 tx_carrier_errors;
260         u64 tx_bad_byte_count;
261         u64 tx_single_collisions;
262         u64 tx_multiple_collisions;
263         u64 tx_excessive_collision;
264         u64 tx_late_collisions;
265         u64 tx_unicast_byte_count;
266         u64 tx_broadcast_byte_count;
267         u64 tx_multicast_byte_count;
268         u64 tx_unicast_frames;
269         u64 tx_broadcast_frames;
270         u64 tx_multicast_frames;
271         u64 tx_pause_frames;
272         u64 tx_64_byte_frames;
273         u64 tx_65_127_byte_frames;
274         u64 tx_128_255_byte_frames;
275         u64 tx_256_511_bytes_frames;
276         u64 tx_512_1023_byte_frames;
277         u64 tx_1024_1518_byte_frames;
278         u64 tx_greater_1518_byte_frames;
279         u64 eee_tx_lpi_transitions;
280         u64 eee_tx_lpi_time;
281 };
282
283 static u32 lan78xx_regs[] = {
284         ID_REV,
285         INT_STS,
286         HW_CFG,
287         PMT_CTL,
288         E2P_CMD,
289         E2P_DATA,
290         USB_STATUS,
291         VLAN_TYPE,
292         MAC_CR,
293         MAC_RX,
294         MAC_TX,
295         FLOW,
296         ERR_STS,
297         MII_ACC,
298         MII_DATA,
299         EEE_TX_LPI_REQ_DLY,
300         EEE_TW_TX_SYS,
301         EEE_TX_LPI_REM_DLY,
302         WUCSR
303 };
304
305 #define PHY_REG_SIZE (32 * sizeof(u32))
306
307 struct lan78xx_net;
308
309 struct lan78xx_priv {
310         struct lan78xx_net *dev;
311         u32 rfe_ctl;
312         u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
313         u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
314         u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
315         struct mutex dataport_mutex; /* for dataport access */
316         spinlock_t rfe_ctl_lock; /* for rfe register access */
317         struct work_struct set_multicast;
318         struct work_struct set_vlan;
319         u32 wol;
320 };
321
322 enum skb_state {
323         illegal = 0,
324         tx_start,
325         tx_done,
326         rx_start,
327         rx_done,
328         rx_cleanup,
329         unlink_start
330 };
331
332 struct skb_data {               /* skb->cb is one of these */
333         struct urb *urb;
334         struct lan78xx_net *dev;
335         enum skb_state state;
336         size_t length;
337         int num_of_packet;
338 };
339
340 struct usb_context {
341         struct usb_ctrlrequest req;
342         struct lan78xx_net *dev;
343 };
344
345 #define EVENT_TX_HALT                   0
346 #define EVENT_RX_HALT                   1
347 #define EVENT_RX_MEMORY                 2
348 #define EVENT_STS_SPLIT                 3
349 #define EVENT_LINK_RESET                4
350 #define EVENT_RX_PAUSED                 5
351 #define EVENT_DEV_WAKING                6
352 #define EVENT_DEV_ASLEEP                7
353 #define EVENT_DEV_OPEN                  8
354 #define EVENT_STAT_UPDATE               9
355
356 struct statstage {
357         struct mutex                    access_lock;    /* for stats access */
358         struct lan78xx_statstage        saved;
359         struct lan78xx_statstage        rollover_count;
360         struct lan78xx_statstage        rollover_max;
361         struct lan78xx_statstage64      curr_stat;
362 };
363
364 struct irq_domain_data {
365         struct irq_domain       *irqdomain;
366         unsigned int            phyirq;
367         struct irq_chip         *irqchip;
368         irq_flow_handler_t      irq_handler;
369         u32                     irqenable;
370         struct mutex            irq_lock;               /* for irq bus access */
371 };
372
373 struct lan78xx_net {
374         struct net_device       *net;
375         struct usb_device       *udev;
376         struct usb_interface    *intf;
377         void                    *driver_priv;
378
379         int                     rx_qlen;
380         int                     tx_qlen;
381         struct sk_buff_head     rxq;
382         struct sk_buff_head     txq;
383         struct sk_buff_head     done;
384         struct sk_buff_head     rxq_pause;
385         struct sk_buff_head     txq_pend;
386
387         struct tasklet_struct   bh;
388         struct delayed_work     wq;
389
390         struct usb_host_endpoint *ep_blkin;
391         struct usb_host_endpoint *ep_blkout;
392         struct usb_host_endpoint *ep_intr;
393
394         int                     msg_enable;
395
396         struct urb              *urb_intr;
397         struct usb_anchor       deferred;
398
399         struct mutex            phy_mutex; /* for phy access */
400         unsigned                pipe_in, pipe_out, pipe_intr;
401
402         u32                     hard_mtu;       /* count any extra framing */
403         size_t                  rx_urb_size;    /* size for rx urbs */
404
405         unsigned long           flags;
406
407         wait_queue_head_t       *wait;
408         unsigned char           suspend_count;
409
410         unsigned                maxpacket;
411         struct timer_list       delay;
412         struct timer_list       stat_monitor;
413
414         unsigned long           data[5];
415
416         int                     link_on;
417         u8                      mdix_ctrl;
418
419         u32                     chipid;
420         u32                     chiprev;
421         struct mii_bus          *mdiobus;
422         phy_interface_t         interface;
423
424         int                     fc_autoneg;
425         u8                      fc_request_control;
426
427         int                     delta;
428         struct statstage        stats;
429
430         struct irq_domain_data  domain_data;
431 };
432
433 /* define external phy id */
434 #define PHY_LAN8835                     (0x0007C130)
435 #define PHY_KSZ9031RNX                  (0x00221620)
436
437 /* use ethtool to change the level for any given device */
438 static int msg_level = -1;
439 module_param(msg_level, int, 0);
440 MODULE_PARM_DESC(msg_level, "Override default message level");
441
442 /* TSO seems to be having some issue with Selective Acknowledge (SACK) that
443  * results in lost data never being retransmitted.
444  * Disable it by default now, but adds a module parameter to enable it for
445  * debug purposes (the full cause is not currently understood).
446  */
447 static bool enable_tso;
448 module_param(enable_tso, bool, 0644);
449 MODULE_PARM_DESC(enable_tso, "Enables TCP segmentation offload");
450
451 #define INT_URB_MICROFRAMES_PER_MS      8
452 static int int_urb_interval_ms = 8;
453 module_param(int_urb_interval_ms, int, 0);
454 MODULE_PARM_DESC(int_urb_interval_ms, "Override usb interrupt urb interval");
455
456 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
457 {
458         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
459         int ret;
460
461         if (!buf)
462                 return -ENOMEM;
463
464         ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
465                               USB_VENDOR_REQUEST_READ_REGISTER,
466                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
467                               0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
468         if (likely(ret >= 0)) {
469                 le32_to_cpus(buf);
470                 *data = *buf;
471         } else {
472                 netdev_warn(dev->net,
473                             "Failed to read register index 0x%08x. ret = %d",
474                             index, ret);
475         }
476
477         kfree(buf);
478
479         return ret;
480 }
481
482 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
483 {
484         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
485         int ret;
486
487         if (!buf)
488                 return -ENOMEM;
489
490         *buf = data;
491         cpu_to_le32s(buf);
492
493         ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
494                               USB_VENDOR_REQUEST_WRITE_REGISTER,
495                               USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
496                               0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
497         if (unlikely(ret < 0)) {
498                 netdev_warn(dev->net,
499                             "Failed to write register index 0x%08x. ret = %d",
500                             index, ret);
501         }
502
503         kfree(buf);
504
505         return ret;
506 }
507
508 static int lan78xx_read_stats(struct lan78xx_net *dev,
509                               struct lan78xx_statstage *data)
510 {
511         int ret = 0;
512         int i;
513         struct lan78xx_statstage *stats;
514         u32 *src;
515         u32 *dst;
516
517         stats = kmalloc(sizeof(*stats), GFP_KERNEL);
518         if (!stats)
519                 return -ENOMEM;
520
521         ret = usb_control_msg(dev->udev,
522                               usb_rcvctrlpipe(dev->udev, 0),
523                               USB_VENDOR_REQUEST_GET_STATS,
524                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
525                               0,
526                               0,
527                               (void *)stats,
528                               sizeof(*stats),
529                               USB_CTRL_SET_TIMEOUT);
530         if (likely(ret >= 0)) {
531                 src = (u32 *)stats;
532                 dst = (u32 *)data;
533                 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
534                         le32_to_cpus(&src[i]);
535                         dst[i] = src[i];
536                 }
537         } else {
538                 netdev_warn(dev->net,
539                             "Failed to read stat ret = 0x%x", ret);
540         }
541
542         kfree(stats);
543
544         return ret;
545 }
546
547 #define check_counter_rollover(struct1, dev_stats, member) {    \
548         if (struct1->member < dev_stats.saved.member)           \
549                 dev_stats.rollover_count.member++;              \
550         }
551
552 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
553                                         struct lan78xx_statstage *stats)
554 {
555         check_counter_rollover(stats, dev->stats, rx_fcs_errors);
556         check_counter_rollover(stats, dev->stats, rx_alignment_errors);
557         check_counter_rollover(stats, dev->stats, rx_fragment_errors);
558         check_counter_rollover(stats, dev->stats, rx_jabber_errors);
559         check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
560         check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
561         check_counter_rollover(stats, dev->stats, rx_dropped_frames);
562         check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
563         check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
564         check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
565         check_counter_rollover(stats, dev->stats, rx_unicast_frames);
566         check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
567         check_counter_rollover(stats, dev->stats, rx_multicast_frames);
568         check_counter_rollover(stats, dev->stats, rx_pause_frames);
569         check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
570         check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
571         check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
572         check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
573         check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
574         check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
575         check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
576         check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
577         check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
578         check_counter_rollover(stats, dev->stats, tx_fcs_errors);
579         check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
580         check_counter_rollover(stats, dev->stats, tx_carrier_errors);
581         check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
582         check_counter_rollover(stats, dev->stats, tx_single_collisions);
583         check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
584         check_counter_rollover(stats, dev->stats, tx_excessive_collision);
585         check_counter_rollover(stats, dev->stats, tx_late_collisions);
586         check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
587         check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
588         check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
589         check_counter_rollover(stats, dev->stats, tx_unicast_frames);
590         check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
591         check_counter_rollover(stats, dev->stats, tx_multicast_frames);
592         check_counter_rollover(stats, dev->stats, tx_pause_frames);
593         check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
594         check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
595         check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
596         check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
597         check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
598         check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
599         check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
600         check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
601         check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
602
603         memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
604 }
605
606 static void lan78xx_update_stats(struct lan78xx_net *dev)
607 {
608         u32 *p, *count, *max;
609         u64 *data;
610         int i;
611         struct lan78xx_statstage lan78xx_stats;
612
613         if (usb_autopm_get_interface(dev->intf) < 0)
614                 return;
615
616         p = (u32 *)&lan78xx_stats;
617         count = (u32 *)&dev->stats.rollover_count;
618         max = (u32 *)&dev->stats.rollover_max;
619         data = (u64 *)&dev->stats.curr_stat;
620
621         mutex_lock(&dev->stats.access_lock);
622
623         if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
624                 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
625
626         for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
627                 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
628
629         mutex_unlock(&dev->stats.access_lock);
630
631         usb_autopm_put_interface(dev->intf);
632 }
633
634 /* Loop until the read is completed with timeout called with phy_mutex held */
635 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
636 {
637         unsigned long start_time = jiffies;
638         u32 val;
639         int ret;
640
641         do {
642                 ret = lan78xx_read_reg(dev, MII_ACC, &val);
643                 if (unlikely(ret < 0))
644                         return -EIO;
645
646                 if (!(val & MII_ACC_MII_BUSY_))
647                         return 0;
648         } while (!time_after(jiffies, start_time + HZ));
649
650         return -EIO;
651 }
652
653 static inline u32 mii_access(int id, int index, int read)
654 {
655         u32 ret;
656
657         ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
658         ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
659         if (read)
660                 ret |= MII_ACC_MII_READ_;
661         else
662                 ret |= MII_ACC_MII_WRITE_;
663         ret |= MII_ACC_MII_BUSY_;
664
665         return ret;
666 }
667
668 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
669 {
670         unsigned long start_time = jiffies;
671         u32 val;
672         int ret;
673
674         do {
675                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
676                 if (unlikely(ret < 0))
677                         return -EIO;
678
679                 if (!(val & E2P_CMD_EPC_BUSY_) ||
680                     (val & E2P_CMD_EPC_TIMEOUT_))
681                         break;
682                 usleep_range(40, 100);
683         } while (!time_after(jiffies, start_time + HZ));
684
685         if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
686                 netdev_warn(dev->net, "EEPROM read operation timeout");
687                 return -EIO;
688         }
689
690         return 0;
691 }
692
693 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
694 {
695         unsigned long start_time = jiffies;
696         u32 val;
697         int ret;
698
699         do {
700                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
701                 if (unlikely(ret < 0))
702                         return -EIO;
703
704                 if (!(val & E2P_CMD_EPC_BUSY_))
705                         return 0;
706
707                 usleep_range(40, 100);
708         } while (!time_after(jiffies, start_time + HZ));
709
710         netdev_warn(dev->net, "EEPROM is busy");
711         return -EIO;
712 }
713
714 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
715                                    u32 length, u8 *data)
716 {
717         u32 val;
718         u32 saved;
719         int i, ret;
720         int retval;
721
722         /* depends on chip, some EEPROM pins are muxed with LED function.
723          * disable & restore LED function to access EEPROM.
724          */
725         ret = lan78xx_read_reg(dev, HW_CFG, &val);
726         saved = val;
727         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
728                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
729                 ret = lan78xx_write_reg(dev, HW_CFG, val);
730         }
731
732         retval = lan78xx_eeprom_confirm_not_busy(dev);
733         if (retval)
734                 return retval;
735
736         for (i = 0; i < length; i++) {
737                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
738                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
739                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
740                 if (unlikely(ret < 0)) {
741                         retval = -EIO;
742                         goto exit;
743                 }
744
745                 retval = lan78xx_wait_eeprom(dev);
746                 if (retval < 0)
747                         goto exit;
748
749                 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
750                 if (unlikely(ret < 0)) {
751                         retval = -EIO;
752                         goto exit;
753                 }
754
755                 data[i] = val & 0xFF;
756                 offset++;
757         }
758
759         retval = 0;
760 exit:
761         if (dev->chipid == ID_REV_CHIP_ID_7800_)
762                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
763
764         return retval;
765 }
766
767 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
768                                u32 length, u8 *data)
769 {
770         u8 sig;
771         int ret;
772
773         ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
774         if ((ret == 0) && (sig == EEPROM_INDICATOR))
775                 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
776         else
777                 ret = -EINVAL;
778
779         return ret;
780 }
781
782 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
783                                     u32 length, u8 *data)
784 {
785         u32 val;
786         u32 saved;
787         int i, ret;
788         int retval;
789
790         /* depends on chip, some EEPROM pins are muxed with LED function.
791          * disable & restore LED function to access EEPROM.
792          */
793         ret = lan78xx_read_reg(dev, HW_CFG, &val);
794         saved = val;
795         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
796                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
797                 ret = lan78xx_write_reg(dev, HW_CFG, val);
798         }
799
800         retval = lan78xx_eeprom_confirm_not_busy(dev);
801         if (retval)
802                 goto exit;
803
804         /* Issue write/erase enable command */
805         val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
806         ret = lan78xx_write_reg(dev, E2P_CMD, val);
807         if (unlikely(ret < 0)) {
808                 retval = -EIO;
809                 goto exit;
810         }
811
812         retval = lan78xx_wait_eeprom(dev);
813         if (retval < 0)
814                 goto exit;
815
816         for (i = 0; i < length; i++) {
817                 /* Fill data register */
818                 val = data[i];
819                 ret = lan78xx_write_reg(dev, E2P_DATA, val);
820                 if (ret < 0) {
821                         retval = -EIO;
822                         goto exit;
823                 }
824
825                 /* Send "write" command */
826                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
827                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
828                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
829                 if (ret < 0) {
830                         retval = -EIO;
831                         goto exit;
832                 }
833
834                 retval = lan78xx_wait_eeprom(dev);
835                 if (retval < 0)
836                         goto exit;
837
838                 offset++;
839         }
840
841         retval = 0;
842 exit:
843         if (dev->chipid == ID_REV_CHIP_ID_7800_)
844                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
845
846         return retval;
847 }
848
849 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
850                                 u32 length, u8 *data)
851 {
852         int i;
853         int ret;
854         u32 buf;
855         unsigned long timeout;
856
857         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
858
859         if (buf & OTP_PWR_DN_PWRDN_N_) {
860                 /* clear it and wait to be cleared */
861                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
862
863                 timeout = jiffies + HZ;
864                 do {
865                         usleep_range(1, 10);
866                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
867                         if (time_after(jiffies, timeout)) {
868                                 netdev_warn(dev->net,
869                                             "timeout on OTP_PWR_DN");
870                                 return -EIO;
871                         }
872                 } while (buf & OTP_PWR_DN_PWRDN_N_);
873         }
874
875         for (i = 0; i < length; i++) {
876                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
877                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
878                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
879                                         ((offset + i) & OTP_ADDR2_10_3));
880
881                 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
882                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
883
884                 timeout = jiffies + HZ;
885                 do {
886                         udelay(1);
887                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
888                         if (time_after(jiffies, timeout)) {
889                                 netdev_warn(dev->net,
890                                             "timeout on OTP_STATUS");
891                                 return -EIO;
892                         }
893                 } while (buf & OTP_STATUS_BUSY_);
894
895                 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
896
897                 data[i] = (u8)(buf & 0xFF);
898         }
899
900         return 0;
901 }
902
903 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
904                                  u32 length, u8 *data)
905 {
906         int i;
907         int ret;
908         u32 buf;
909         unsigned long timeout;
910
911         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
912
913         if (buf & OTP_PWR_DN_PWRDN_N_) {
914                 /* clear it and wait to be cleared */
915                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
916
917                 timeout = jiffies + HZ;
918                 do {
919                         udelay(1);
920                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
921                         if (time_after(jiffies, timeout)) {
922                                 netdev_warn(dev->net,
923                                             "timeout on OTP_PWR_DN completion");
924                                 return -EIO;
925                         }
926                 } while (buf & OTP_PWR_DN_PWRDN_N_);
927         }
928
929         /* set to BYTE program mode */
930         ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
931
932         for (i = 0; i < length; i++) {
933                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
934                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
935                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
936                                         ((offset + i) & OTP_ADDR2_10_3));
937                 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
938                 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
939                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
940
941                 timeout = jiffies + HZ;
942                 do {
943                         udelay(1);
944                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
945                         if (time_after(jiffies, timeout)) {
946                                 netdev_warn(dev->net,
947                                             "Timeout on OTP_STATUS completion");
948                                 return -EIO;
949                         }
950                 } while (buf & OTP_STATUS_BUSY_);
951         }
952
953         return 0;
954 }
955
956 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
957                             u32 length, u8 *data)
958 {
959         u8 sig;
960         int ret;
961
962         ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
963
964         if (ret == 0) {
965                 if (sig == OTP_INDICATOR_1)
966                         offset = offset;
967                 else if (sig == OTP_INDICATOR_2)
968                         offset += 0x100;
969                 else
970                         ret = -EINVAL;
971                 if (!ret)
972                         ret = lan78xx_read_raw_otp(dev, offset, length, data);
973         }
974
975         return ret;
976 }
977
978 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
979 {
980         int i, ret;
981
982         for (i = 0; i < 100; i++) {
983                 u32 dp_sel;
984
985                 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
986                 if (unlikely(ret < 0))
987                         return -EIO;
988
989                 if (dp_sel & DP_SEL_DPRDY_)
990                         return 0;
991
992                 usleep_range(40, 100);
993         }
994
995         netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
996
997         return -EIO;
998 }
999
1000 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
1001                                   u32 addr, u32 length, u32 *buf)
1002 {
1003         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1004         u32 dp_sel;
1005         int i, ret;
1006
1007         if (usb_autopm_get_interface(dev->intf) < 0)
1008                         return 0;
1009
1010         mutex_lock(&pdata->dataport_mutex);
1011
1012         ret = lan78xx_dataport_wait_not_busy(dev);
1013         if (ret < 0)
1014                 goto done;
1015
1016         ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1017
1018         dp_sel &= ~DP_SEL_RSEL_MASK_;
1019         dp_sel |= ram_select;
1020         ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
1021
1022         for (i = 0; i < length; i++) {
1023                 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1024
1025                 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1026
1027                 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1028
1029                 ret = lan78xx_dataport_wait_not_busy(dev);
1030                 if (ret < 0)
1031                         goto done;
1032         }
1033
1034 done:
1035         mutex_unlock(&pdata->dataport_mutex);
1036         usb_autopm_put_interface(dev->intf);
1037
1038         return ret;
1039 }
1040
1041 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1042                                     int index, u8 addr[ETH_ALEN])
1043 {
1044         u32     temp;
1045
1046         if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1047                 temp = addr[3];
1048                 temp = addr[2] | (temp << 8);
1049                 temp = addr[1] | (temp << 8);
1050                 temp = addr[0] | (temp << 8);
1051                 pdata->pfilter_table[index][1] = temp;
1052                 temp = addr[5];
1053                 temp = addr[4] | (temp << 8);
1054                 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1055                 pdata->pfilter_table[index][0] = temp;
1056         }
1057 }
1058
1059 /* returns hash bit number for given MAC address */
1060 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1061 {
1062         return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1063 }
1064
1065 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1066 {
1067         struct lan78xx_priv *pdata =
1068                         container_of(param, struct lan78xx_priv, set_multicast);
1069         struct lan78xx_net *dev = pdata->dev;
1070         int i;
1071         int ret;
1072
1073         netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1074                   pdata->rfe_ctl);
1075
1076         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1077                                DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1078
1079         for (i = 1; i < NUM_OF_MAF; i++) {
1080                 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1081                 ret = lan78xx_write_reg(dev, MAF_LO(i),
1082                                         pdata->pfilter_table[i][1]);
1083                 ret = lan78xx_write_reg(dev, MAF_HI(i),
1084                                         pdata->pfilter_table[i][0]);
1085         }
1086
1087         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1088 }
1089
1090 static void lan78xx_set_multicast(struct net_device *netdev)
1091 {
1092         struct lan78xx_net *dev = netdev_priv(netdev);
1093         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1094         unsigned long flags;
1095         int i;
1096
1097         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1098
1099         pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1100                             RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1101
1102         for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1103                         pdata->mchash_table[i] = 0;
1104         /* pfilter_table[0] has own HW address */
1105         for (i = 1; i < NUM_OF_MAF; i++) {
1106                         pdata->pfilter_table[i][0] =
1107                         pdata->pfilter_table[i][1] = 0;
1108         }
1109
1110         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1111
1112         if (dev->net->flags & IFF_PROMISC) {
1113                 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1114                 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1115         } else {
1116                 if (dev->net->flags & IFF_ALLMULTI) {
1117                         netif_dbg(dev, drv, dev->net,
1118                                   "receive all multicast enabled");
1119                         pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1120                 }
1121         }
1122
1123         if (netdev_mc_count(dev->net)) {
1124                 struct netdev_hw_addr *ha;
1125                 int i;
1126
1127                 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1128
1129                 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1130
1131                 i = 1;
1132                 netdev_for_each_mc_addr(ha, netdev) {
1133                         /* set first 32 into Perfect Filter */
1134                         if (i < 33) {
1135                                 lan78xx_set_addr_filter(pdata, i, ha->addr);
1136                         } else {
1137                                 u32 bitnum = lan78xx_hash(ha->addr);
1138
1139                                 pdata->mchash_table[bitnum / 32] |=
1140                                                         (1 << (bitnum % 32));
1141                                 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1142                         }
1143                         i++;
1144                 }
1145         }
1146
1147         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1148
1149         /* defer register writes to a sleepable context */
1150         schedule_work(&pdata->set_multicast);
1151 }
1152
1153 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1154                                       u16 lcladv, u16 rmtadv)
1155 {
1156         u32 flow = 0, fct_flow = 0;
1157         int ret;
1158         u8 cap;
1159
1160         if (dev->fc_autoneg)
1161                 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1162         else
1163                 cap = dev->fc_request_control;
1164
1165         if (cap & FLOW_CTRL_TX)
1166                 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1167
1168         if (cap & FLOW_CTRL_RX)
1169                 flow |= FLOW_CR_RX_FCEN_;
1170
1171         if (dev->udev->speed == USB_SPEED_SUPER)
1172                 fct_flow = 0x817;
1173         else if (dev->udev->speed == USB_SPEED_HIGH)
1174                 fct_flow = 0x211;
1175
1176         netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1177                   (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1178                   (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1179
1180         ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1181
1182         /* threshold value should be set before enabling flow */
1183         ret = lan78xx_write_reg(dev, FLOW, flow);
1184
1185         return 0;
1186 }
1187
1188 static int lan78xx_link_reset(struct lan78xx_net *dev)
1189 {
1190         struct phy_device *phydev = dev->net->phydev;
1191         struct ethtool_link_ksettings ecmd;
1192         int ladv, radv, ret;
1193         u32 buf;
1194
1195         /* clear LAN78xx interrupt status */
1196         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1197         if (unlikely(ret < 0))
1198                 return -EIO;
1199
1200         phy_read_status(phydev);
1201
1202         if (!phydev->link && dev->link_on) {
1203                 dev->link_on = false;
1204
1205                 /* reset MAC */
1206                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1207                 if (unlikely(ret < 0))
1208                         return -EIO;
1209                 buf |= MAC_CR_RST_;
1210                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1211                 if (unlikely(ret < 0))
1212                         return -EIO;
1213
1214                 del_timer(&dev->stat_monitor);
1215         } else if (phydev->link && !dev->link_on) {
1216                 dev->link_on = true;
1217
1218                 phy_ethtool_ksettings_get(phydev, &ecmd);
1219
1220                 if (dev->udev->speed == USB_SPEED_SUPER) {
1221                         if (ecmd.base.speed == 1000) {
1222                                 /* disable U2 */
1223                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1224                                 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1225                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1226                                 /* enable U1 */
1227                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1228                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1229                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1230                         } else {
1231                                 /* enable U1 & U2 */
1232                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1233                                 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1234                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1235                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1236                         }
1237                 }
1238
1239                 ladv = phy_read(phydev, MII_ADVERTISE);
1240                 if (ladv < 0)
1241                         return ladv;
1242
1243                 radv = phy_read(phydev, MII_LPA);
1244                 if (radv < 0)
1245                         return radv;
1246
1247                 netif_dbg(dev, link, dev->net,
1248                           "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1249                           ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1250
1251                 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1252                                                  radv);
1253
1254                 if (!timer_pending(&dev->stat_monitor)) {
1255                         dev->delta = 1;
1256                         mod_timer(&dev->stat_monitor,
1257                                   jiffies + STAT_UPDATE_TIMER);
1258                 }
1259
1260                 tasklet_schedule(&dev->bh);
1261         }
1262
1263         return ret;
1264 }
1265
1266 /* some work can't be done in tasklets, so we use keventd
1267  *
1268  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1269  * but tasklet_schedule() doesn't.      hope the failure is rare.
1270  */
1271 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1272 {
1273         set_bit(work, &dev->flags);
1274         if (!schedule_delayed_work(&dev->wq, 0))
1275                 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1276 }
1277
1278 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1279 {
1280         u32 intdata;
1281
1282         if (urb->actual_length != 4) {
1283                 netdev_warn(dev->net,
1284                             "unexpected urb length %d", urb->actual_length);
1285                 return;
1286         }
1287
1288         memcpy(&intdata, urb->transfer_buffer, 4);
1289         le32_to_cpus(&intdata);
1290
1291         if (intdata & INT_ENP_PHY_INT) {
1292                 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1293                 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1294
1295                 if (dev->domain_data.phyirq > 0) {
1296                         local_irq_disable();
1297                         generic_handle_irq(dev->domain_data.phyirq);
1298                         local_irq_enable();
1299                 }
1300         } else
1301                 netdev_warn(dev->net,
1302                             "unexpected interrupt: 0x%08x\n", intdata);
1303 }
1304
1305 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1306 {
1307         return MAX_EEPROM_SIZE;
1308 }
1309
1310 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1311                                       struct ethtool_eeprom *ee, u8 *data)
1312 {
1313         struct lan78xx_net *dev = netdev_priv(netdev);
1314         int ret;
1315
1316         ret = usb_autopm_get_interface(dev->intf);
1317         if (ret)
1318                 return ret;
1319
1320         ee->magic = LAN78XX_EEPROM_MAGIC;
1321
1322         ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1323
1324         usb_autopm_put_interface(dev->intf);
1325
1326         return ret;
1327 }
1328
1329 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1330                                       struct ethtool_eeprom *ee, u8 *data)
1331 {
1332         struct lan78xx_net *dev = netdev_priv(netdev);
1333         int ret;
1334
1335         ret = usb_autopm_get_interface(dev->intf);
1336         if (ret)
1337                 return ret;
1338
1339         /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1340          * to load data from EEPROM
1341          */
1342         if (ee->magic == LAN78XX_EEPROM_MAGIC)
1343                 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1344         else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1345                  (ee->offset == 0) &&
1346                  (ee->len == 512) &&
1347                  (data[0] == OTP_INDICATOR_1))
1348                 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1349
1350         usb_autopm_put_interface(dev->intf);
1351
1352         return ret;
1353 }
1354
1355 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1356                                 u8 *data)
1357 {
1358         if (stringset == ETH_SS_STATS)
1359                 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1360 }
1361
1362 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1363 {
1364         if (sset == ETH_SS_STATS)
1365                 return ARRAY_SIZE(lan78xx_gstrings);
1366         else
1367                 return -EOPNOTSUPP;
1368 }
1369
1370 static void lan78xx_get_stats(struct net_device *netdev,
1371                               struct ethtool_stats *stats, u64 *data)
1372 {
1373         struct lan78xx_net *dev = netdev_priv(netdev);
1374
1375         lan78xx_update_stats(dev);
1376
1377         mutex_lock(&dev->stats.access_lock);
1378         memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1379         mutex_unlock(&dev->stats.access_lock);
1380 }
1381
1382 static void lan78xx_get_wol(struct net_device *netdev,
1383                             struct ethtool_wolinfo *wol)
1384 {
1385         struct lan78xx_net *dev = netdev_priv(netdev);
1386         int ret;
1387         u32 buf;
1388         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1389
1390         if (usb_autopm_get_interface(dev->intf) < 0)
1391                         return;
1392
1393         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1394         if (unlikely(ret < 0)) {
1395                 wol->supported = 0;
1396                 wol->wolopts = 0;
1397         } else {
1398                 if (buf & USB_CFG_RMT_WKP_) {
1399                         wol->supported = WAKE_ALL;
1400                         wol->wolopts = pdata->wol;
1401                 } else {
1402                         wol->supported = 0;
1403                         wol->wolopts = 0;
1404                 }
1405         }
1406
1407         usb_autopm_put_interface(dev->intf);
1408 }
1409
1410 static int lan78xx_set_wol(struct net_device *netdev,
1411                            struct ethtool_wolinfo *wol)
1412 {
1413         struct lan78xx_net *dev = netdev_priv(netdev);
1414         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1415         int ret;
1416
1417         ret = usb_autopm_get_interface(dev->intf);
1418         if (ret < 0)
1419                 return ret;
1420
1421         if (wol->wolopts & ~WAKE_ALL)
1422                 return -EINVAL;
1423
1424         pdata->wol = wol->wolopts;
1425
1426         device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1427
1428         phy_ethtool_set_wol(netdev->phydev, wol);
1429
1430         usb_autopm_put_interface(dev->intf);
1431
1432         return ret;
1433 }
1434
1435 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1436 {
1437         struct lan78xx_net *dev = netdev_priv(net);
1438         struct phy_device *phydev = net->phydev;
1439         int ret;
1440         u32 buf;
1441
1442         ret = usb_autopm_get_interface(dev->intf);
1443         if (ret < 0)
1444                 return ret;
1445
1446         ret = phy_ethtool_get_eee(phydev, edata);
1447         if (ret < 0)
1448                 goto exit;
1449
1450         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1451         if (buf & MAC_CR_EEE_EN_) {
1452                 edata->eee_enabled = true;
1453                 edata->eee_active = !!(edata->advertised &
1454                                        edata->lp_advertised);
1455                 edata->tx_lpi_enabled = true;
1456                 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1457                 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1458                 edata->tx_lpi_timer = buf;
1459         } else {
1460                 edata->eee_enabled = false;
1461                 edata->eee_active = false;
1462                 edata->tx_lpi_enabled = false;
1463                 edata->tx_lpi_timer = 0;
1464         }
1465
1466         ret = 0;
1467 exit:
1468         usb_autopm_put_interface(dev->intf);
1469
1470         return ret;
1471 }
1472
1473 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1474 {
1475         struct lan78xx_net *dev = netdev_priv(net);
1476         int ret;
1477         u32 buf;
1478
1479         ret = usb_autopm_get_interface(dev->intf);
1480         if (ret < 0)
1481                 return ret;
1482
1483         if (edata->eee_enabled) {
1484                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1485                 buf |= MAC_CR_EEE_EN_;
1486                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1487
1488                 phy_ethtool_set_eee(net->phydev, edata);
1489
1490                 buf = (u32)edata->tx_lpi_timer;
1491                 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1492         } else {
1493                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1494                 buf &= ~MAC_CR_EEE_EN_;
1495                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1496         }
1497
1498         usb_autopm_put_interface(dev->intf);
1499
1500         return 0;
1501 }
1502
1503 static u32 lan78xx_get_link(struct net_device *net)
1504 {
1505         phy_read_status(net->phydev);
1506
1507         return net->phydev->link;
1508 }
1509
1510 static void lan78xx_get_drvinfo(struct net_device *net,
1511                                 struct ethtool_drvinfo *info)
1512 {
1513         struct lan78xx_net *dev = netdev_priv(net);
1514
1515         strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1516         usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1517 }
1518
1519 static u32 lan78xx_get_msglevel(struct net_device *net)
1520 {
1521         struct lan78xx_net *dev = netdev_priv(net);
1522
1523         return dev->msg_enable;
1524 }
1525
1526 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1527 {
1528         struct lan78xx_net *dev = netdev_priv(net);
1529
1530         dev->msg_enable = level;
1531 }
1532
1533 static int lan78xx_get_link_ksettings(struct net_device *net,
1534                                       struct ethtool_link_ksettings *cmd)
1535 {
1536         struct lan78xx_net *dev = netdev_priv(net);
1537         struct phy_device *phydev = net->phydev;
1538         int ret;
1539
1540         ret = usb_autopm_get_interface(dev->intf);
1541         if (ret < 0)
1542                 return ret;
1543
1544         phy_ethtool_ksettings_get(phydev, cmd);
1545
1546         usb_autopm_put_interface(dev->intf);
1547
1548         return ret;
1549 }
1550
1551 static int lan78xx_set_link_ksettings(struct net_device *net,
1552                                       const struct ethtool_link_ksettings *cmd)
1553 {
1554         struct lan78xx_net *dev = netdev_priv(net);
1555         struct phy_device *phydev = net->phydev;
1556         int ret = 0;
1557         int temp;
1558
1559         ret = usb_autopm_get_interface(dev->intf);
1560         if (ret < 0)
1561                 return ret;
1562
1563         /* change speed & duplex */
1564         ret = phy_ethtool_ksettings_set(phydev, cmd);
1565
1566         if (!cmd->base.autoneg) {
1567                 /* force link down */
1568                 temp = phy_read(phydev, MII_BMCR);
1569                 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1570                 mdelay(1);
1571                 phy_write(phydev, MII_BMCR, temp);
1572         }
1573
1574         usb_autopm_put_interface(dev->intf);
1575
1576         return ret;
1577 }
1578
1579 static void lan78xx_get_pause(struct net_device *net,
1580                               struct ethtool_pauseparam *pause)
1581 {
1582         struct lan78xx_net *dev = netdev_priv(net);
1583         struct phy_device *phydev = net->phydev;
1584         struct ethtool_link_ksettings ecmd;
1585
1586         phy_ethtool_ksettings_get(phydev, &ecmd);
1587
1588         pause->autoneg = dev->fc_autoneg;
1589
1590         if (dev->fc_request_control & FLOW_CTRL_TX)
1591                 pause->tx_pause = 1;
1592
1593         if (dev->fc_request_control & FLOW_CTRL_RX)
1594                 pause->rx_pause = 1;
1595 }
1596
1597 static int lan78xx_set_pause(struct net_device *net,
1598                              struct ethtool_pauseparam *pause)
1599 {
1600         struct lan78xx_net *dev = netdev_priv(net);
1601         struct phy_device *phydev = net->phydev;
1602         struct ethtool_link_ksettings ecmd;
1603         int ret;
1604
1605         phy_ethtool_ksettings_get(phydev, &ecmd);
1606
1607         if (pause->autoneg && !ecmd.base.autoneg) {
1608                 ret = -EINVAL;
1609                 goto exit;
1610         }
1611
1612         dev->fc_request_control = 0;
1613         if (pause->rx_pause)
1614                 dev->fc_request_control |= FLOW_CTRL_RX;
1615
1616         if (pause->tx_pause)
1617                 dev->fc_request_control |= FLOW_CTRL_TX;
1618
1619         if (ecmd.base.autoneg) {
1620                 u32 mii_adv;
1621                 u32 advertising;
1622
1623                 ethtool_convert_link_mode_to_legacy_u32(
1624                         &advertising, ecmd.link_modes.advertising);
1625
1626                 advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1627                 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1628                 advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1629
1630                 ethtool_convert_legacy_u32_to_link_mode(
1631                         ecmd.link_modes.advertising, advertising);
1632
1633                 phy_ethtool_ksettings_set(phydev, &ecmd);
1634         }
1635
1636         dev->fc_autoneg = pause->autoneg;
1637
1638         ret = 0;
1639 exit:
1640         return ret;
1641 }
1642
1643 static int lan78xx_get_regs_len(struct net_device *netdev)
1644 {
1645         if (!netdev->phydev)
1646                 return (sizeof(lan78xx_regs));
1647         else
1648                 return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1649 }
1650
1651 static void
1652 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1653                  void *buf)
1654 {
1655         u32 *data = buf;
1656         int i, j;
1657         struct lan78xx_net *dev = netdev_priv(netdev);
1658
1659         /* Read Device/MAC registers */
1660         for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
1661                 lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1662
1663         if (!netdev->phydev)
1664                 return;
1665
1666         /* Read PHY registers */
1667         for (j = 0; j < 32; i++, j++)
1668                 data[i] = phy_read(netdev->phydev, j);
1669 }
1670
1671 static const struct ethtool_ops lan78xx_ethtool_ops = {
1672         .get_link       = lan78xx_get_link,
1673         .nway_reset     = phy_ethtool_nway_reset,
1674         .get_drvinfo    = lan78xx_get_drvinfo,
1675         .get_msglevel   = lan78xx_get_msglevel,
1676         .set_msglevel   = lan78xx_set_msglevel,
1677         .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1678         .get_eeprom     = lan78xx_ethtool_get_eeprom,
1679         .set_eeprom     = lan78xx_ethtool_set_eeprom,
1680         .get_ethtool_stats = lan78xx_get_stats,
1681         .get_sset_count = lan78xx_get_sset_count,
1682         .get_strings    = lan78xx_get_strings,
1683         .get_wol        = lan78xx_get_wol,
1684         .set_wol        = lan78xx_set_wol,
1685         .get_eee        = lan78xx_get_eee,
1686         .set_eee        = lan78xx_set_eee,
1687         .get_pauseparam = lan78xx_get_pause,
1688         .set_pauseparam = lan78xx_set_pause,
1689         .get_link_ksettings = lan78xx_get_link_ksettings,
1690         .set_link_ksettings = lan78xx_set_link_ksettings,
1691         .get_regs_len   = lan78xx_get_regs_len,
1692         .get_regs       = lan78xx_get_regs,
1693 };
1694
1695 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1696 {
1697         if (!netif_running(netdev))
1698                 return -EINVAL;
1699
1700         return phy_mii_ioctl(netdev->phydev, rq, cmd);
1701 }
1702
1703 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1704 {
1705         u32 addr_lo, addr_hi;
1706         int ret;
1707         u8 addr[6];
1708
1709         ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1710         ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1711
1712         addr[0] = addr_lo & 0xFF;
1713         addr[1] = (addr_lo >> 8) & 0xFF;
1714         addr[2] = (addr_lo >> 16) & 0xFF;
1715         addr[3] = (addr_lo >> 24) & 0xFF;
1716         addr[4] = addr_hi & 0xFF;
1717         addr[5] = (addr_hi >> 8) & 0xFF;
1718
1719         if (!is_valid_ether_addr(addr)) {
1720                 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1721                         /* valid address present in Device Tree */
1722                         netif_dbg(dev, ifup, dev->net,
1723                                   "MAC address read from Device Tree");
1724                 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1725                                                  ETH_ALEN, addr) == 0) ||
1726                             (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1727                                               ETH_ALEN, addr) == 0)) &&
1728                            is_valid_ether_addr(addr)) {
1729                         /* eeprom values are valid so use them */
1730                         netif_dbg(dev, ifup, dev->net,
1731                                   "MAC address read from EEPROM");
1732                 } else {
1733                         /* generate random MAC */
1734                         eth_random_addr(addr);
1735                         netif_dbg(dev, ifup, dev->net,
1736                                   "MAC address set to random addr");
1737                 }
1738
1739                 addr_lo = addr[0] | (addr[1] << 8) |
1740                           (addr[2] << 16) | (addr[3] << 24);
1741                 addr_hi = addr[4] | (addr[5] << 8);
1742
1743                 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1744                 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1745         }
1746
1747         ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1748         ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1749
1750         ether_addr_copy(dev->net->dev_addr, addr);
1751 }
1752
1753 /* MDIO read and write wrappers for phylib */
1754 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1755 {
1756         struct lan78xx_net *dev = bus->priv;
1757         u32 val, addr;
1758         int ret;
1759
1760         ret = usb_autopm_get_interface(dev->intf);
1761         if (ret < 0)
1762                 return ret;
1763
1764         mutex_lock(&dev->phy_mutex);
1765
1766         /* confirm MII not busy */
1767         ret = lan78xx_phy_wait_not_busy(dev);
1768         if (ret < 0)
1769                 goto done;
1770
1771         /* set the address, index & direction (read from PHY) */
1772         addr = mii_access(phy_id, idx, MII_READ);
1773         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1774
1775         ret = lan78xx_phy_wait_not_busy(dev);
1776         if (ret < 0)
1777                 goto done;
1778
1779         ret = lan78xx_read_reg(dev, MII_DATA, &val);
1780
1781         ret = (int)(val & 0xFFFF);
1782
1783 done:
1784         mutex_unlock(&dev->phy_mutex);
1785         usb_autopm_put_interface(dev->intf);
1786
1787         return ret;
1788 }
1789
1790 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1791                                  u16 regval)
1792 {
1793         struct lan78xx_net *dev = bus->priv;
1794         u32 val, addr;
1795         int ret;
1796
1797         ret = usb_autopm_get_interface(dev->intf);
1798         if (ret < 0)
1799                 return ret;
1800
1801         mutex_lock(&dev->phy_mutex);
1802
1803         /* confirm MII not busy */
1804         ret = lan78xx_phy_wait_not_busy(dev);
1805         if (ret < 0)
1806                 goto done;
1807
1808         val = (u32)regval;
1809         ret = lan78xx_write_reg(dev, MII_DATA, val);
1810
1811         /* set the address, index & direction (write to PHY) */
1812         addr = mii_access(phy_id, idx, MII_WRITE);
1813         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1814
1815         ret = lan78xx_phy_wait_not_busy(dev);
1816         if (ret < 0)
1817                 goto done;
1818
1819 done:
1820         mutex_unlock(&dev->phy_mutex);
1821         usb_autopm_put_interface(dev->intf);
1822         return 0;
1823 }
1824
1825 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1826 {
1827         struct device_node *node;
1828         int ret;
1829
1830         dev->mdiobus = mdiobus_alloc();
1831         if (!dev->mdiobus) {
1832                 netdev_err(dev->net, "can't allocate MDIO bus\n");
1833                 return -ENOMEM;
1834         }
1835
1836         dev->mdiobus->priv = (void *)dev;
1837         dev->mdiobus->read = lan78xx_mdiobus_read;
1838         dev->mdiobus->write = lan78xx_mdiobus_write;
1839         dev->mdiobus->name = "lan78xx-mdiobus";
1840         dev->mdiobus->parent = &dev->udev->dev;
1841
1842         snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1843                  dev->udev->bus->busnum, dev->udev->devnum);
1844
1845         switch (dev->chipid) {
1846         case ID_REV_CHIP_ID_7800_:
1847         case ID_REV_CHIP_ID_7850_:
1848                 /* set to internal PHY id */
1849                 dev->mdiobus->phy_mask = ~(1 << 1);
1850                 break;
1851         case ID_REV_CHIP_ID_7801_:
1852                 /* scan thru PHYAD[2..0] */
1853                 dev->mdiobus->phy_mask = ~(0xFF);
1854                 break;
1855         }
1856
1857         node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
1858         ret = of_mdiobus_register(dev->mdiobus, node);
1859         if (node)
1860                 of_node_put(node);
1861         if (ret) {
1862                 netdev_err(dev->net, "can't register MDIO bus\n");
1863                 goto exit1;
1864         }
1865
1866         netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1867         return 0;
1868 exit1:
1869         mdiobus_free(dev->mdiobus);
1870         return ret;
1871 }
1872
1873 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1874 {
1875         mdiobus_unregister(dev->mdiobus);
1876         mdiobus_free(dev->mdiobus);
1877 }
1878
1879 static void lan78xx_link_status_change(struct net_device *net)
1880 {
1881         struct phy_device *phydev = net->phydev;
1882         int ret, temp;
1883
1884         /* At forced 100 F/H mode, chip may fail to set mode correctly
1885          * when cable is switched between long(~50+m) and short one.
1886          * As workaround, set to 10 before setting to 100
1887          * at forced 100 F/H mode.
1888          */
1889         if (!phydev->autoneg && (phydev->speed == 100)) {
1890                 /* disable phy interrupt */
1891                 temp = phy_read(phydev, LAN88XX_INT_MASK);
1892                 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1893                 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1894
1895                 temp = phy_read(phydev, MII_BMCR);
1896                 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1897                 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1898                 temp |= BMCR_SPEED100;
1899                 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1900
1901                 /* clear pending interrupt generated while workaround */
1902                 temp = phy_read(phydev, LAN88XX_INT_STS);
1903
1904                 /* enable phy interrupt back */
1905                 temp = phy_read(phydev, LAN88XX_INT_MASK);
1906                 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1907                 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1908         }
1909 }
1910
1911 static int irq_map(struct irq_domain *d, unsigned int irq,
1912                    irq_hw_number_t hwirq)
1913 {
1914         struct irq_domain_data *data = d->host_data;
1915
1916         irq_set_chip_data(irq, data);
1917         irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1918         irq_set_noprobe(irq);
1919
1920         return 0;
1921 }
1922
1923 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1924 {
1925         irq_set_chip_and_handler(irq, NULL, NULL);
1926         irq_set_chip_data(irq, NULL);
1927 }
1928
1929 static const struct irq_domain_ops chip_domain_ops = {
1930         .map    = irq_map,
1931         .unmap  = irq_unmap,
1932 };
1933
1934 static void lan78xx_irq_mask(struct irq_data *irqd)
1935 {
1936         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1937
1938         data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1939 }
1940
1941 static void lan78xx_irq_unmask(struct irq_data *irqd)
1942 {
1943         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1944
1945         data->irqenable |= BIT(irqd_to_hwirq(irqd));
1946 }
1947
1948 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1949 {
1950         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1951
1952         mutex_lock(&data->irq_lock);
1953 }
1954
1955 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1956 {
1957         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1958         struct lan78xx_net *dev =
1959                         container_of(data, struct lan78xx_net, domain_data);
1960         u32 buf;
1961         int ret;
1962
1963         /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1964          * are only two callbacks executed in non-atomic contex.
1965          */
1966         ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1967         if (buf != data->irqenable)
1968                 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1969
1970         mutex_unlock(&data->irq_lock);
1971 }
1972
1973 static struct irq_chip lan78xx_irqchip = {
1974         .name                   = "lan78xx-irqs",
1975         .irq_mask               = lan78xx_irq_mask,
1976         .irq_unmask             = lan78xx_irq_unmask,
1977         .irq_bus_lock           = lan78xx_irq_bus_lock,
1978         .irq_bus_sync_unlock    = lan78xx_irq_bus_sync_unlock,
1979 };
1980
1981 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1982 {
1983         struct device_node *of_node;
1984         struct irq_domain *irqdomain;
1985         unsigned int irqmap = 0;
1986         u32 buf;
1987         int ret = 0;
1988
1989         of_node = dev->udev->dev.parent->of_node;
1990
1991         mutex_init(&dev->domain_data.irq_lock);
1992
1993         lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1994         dev->domain_data.irqenable = buf;
1995
1996         dev->domain_data.irqchip = &lan78xx_irqchip;
1997         dev->domain_data.irq_handler = handle_simple_irq;
1998
1999         irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
2000                                           &chip_domain_ops, &dev->domain_data);
2001         if (irqdomain) {
2002                 /* create mapping for PHY interrupt */
2003                 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
2004                 if (!irqmap) {
2005                         irq_domain_remove(irqdomain);
2006
2007                         irqdomain = NULL;
2008                         ret = -EINVAL;
2009                 }
2010         } else {
2011                 ret = -EINVAL;
2012         }
2013
2014         dev->domain_data.irqdomain = irqdomain;
2015         dev->domain_data.phyirq = irqmap;
2016
2017         return ret;
2018 }
2019
2020 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2021 {
2022         if (dev->domain_data.phyirq > 0) {
2023                 irq_dispose_mapping(dev->domain_data.phyirq);
2024
2025                 if (dev->domain_data.irqdomain)
2026                         irq_domain_remove(dev->domain_data.irqdomain);
2027         }
2028         dev->domain_data.phyirq = 0;
2029         dev->domain_data.irqdomain = NULL;
2030 }
2031
2032 static int lan8835_fixup(struct phy_device *phydev)
2033 {
2034         int buf;
2035         int ret;
2036         struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2037
2038         /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2039         buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2040         buf &= ~0x1800;
2041         buf |= 0x0800;
2042         phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2043
2044         /* RGMII MAC TXC Delay Enable */
2045         ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2046                                 MAC_RGMII_ID_TXC_DELAY_EN_);
2047
2048         /* RGMII TX DLL Tune Adjust */
2049         ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2050
2051         dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2052
2053         return 1;
2054 }
2055
2056 static int ksz9031rnx_fixup(struct phy_device *phydev)
2057 {
2058         struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2059
2060         /* Micrel9301RNX PHY configuration */
2061         /* RGMII Control Signal Pad Skew */
2062         phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2063         /* RGMII RX Data Pad Skew */
2064         phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2065         /* RGMII RX Clock Pad Skew */
2066         phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2067
2068         dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2069
2070         return 1;
2071 }
2072
2073 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2074 {
2075         u32 buf;
2076         int ret;
2077         struct fixed_phy_status fphy_status = {
2078                 .link = 1,
2079                 .speed = SPEED_1000,
2080                 .duplex = DUPLEX_FULL,
2081         };
2082         struct phy_device *phydev;
2083
2084         phydev = phy_find_first(dev->mdiobus);
2085         if (!phydev) {
2086                 netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2087                 phydev = fixed_phy_register(PHY_POLL, &fphy_status, -1,
2088                                             NULL);
2089                 if (IS_ERR(phydev)) {
2090                         netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2091                         return NULL;
2092                 }
2093                 netdev_dbg(dev->net, "Registered FIXED PHY\n");
2094                 dev->interface = PHY_INTERFACE_MODE_RGMII;
2095                 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2096                                         MAC_RGMII_ID_TXC_DELAY_EN_);
2097                 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2098                 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2099                 buf |= HW_CFG_CLK125_EN_;
2100                 buf |= HW_CFG_REFCLK25_EN_;
2101                 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2102         } else {
2103                 if (!phydev->drv) {
2104                         netdev_err(dev->net, "no PHY driver found\n");
2105                         return NULL;
2106                 }
2107                 dev->interface = PHY_INTERFACE_MODE_RGMII;
2108                 /* external PHY fixup for KSZ9031RNX */
2109                 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2110                                                  ksz9031rnx_fixup);
2111                 if (ret < 0) {
2112                         netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2113                         return NULL;
2114                 }
2115                 /* external PHY fixup for LAN8835 */
2116                 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2117                                                  lan8835_fixup);
2118                 if (ret < 0) {
2119                         netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2120                         return NULL;
2121                 }
2122                 /* add more external PHY fixup here if needed */
2123
2124                 phydev->is_internal = false;
2125         }
2126         return phydev;
2127 }
2128
2129 static int lan78xx_phy_init(struct lan78xx_net *dev)
2130 {
2131         int ret;
2132         u32 mii_adv;
2133         struct phy_device *phydev;
2134
2135         switch (dev->chipid) {
2136         case ID_REV_CHIP_ID_7801_:
2137                 phydev = lan7801_phy_init(dev);
2138                 if (!phydev) {
2139                         netdev_err(dev->net, "lan7801: PHY Init Failed");
2140                         return -EIO;
2141                 }
2142                 break;
2143
2144         case ID_REV_CHIP_ID_7800_:
2145         case ID_REV_CHIP_ID_7850_:
2146                 phydev = phy_find_first(dev->mdiobus);
2147                 if (!phydev) {
2148                         netdev_err(dev->net, "no PHY found\n");
2149                         return -EIO;
2150                 }
2151                 phydev->is_internal = true;
2152                 dev->interface = PHY_INTERFACE_MODE_GMII;
2153                 break;
2154
2155         default:
2156                 netdev_err(dev->net, "Unknown CHIP ID found\n");
2157                 return -EIO;
2158         }
2159
2160         /* if phyirq is not set, use polling mode in phylib */
2161         if (dev->domain_data.phyirq > 0)
2162                 phydev->irq = dev->domain_data.phyirq;
2163         else
2164                 phydev->irq = 0;
2165         netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2166
2167         /* set to AUTOMDIX */
2168         phydev->mdix = ETH_TP_MDI_AUTO;
2169
2170         ret = phy_connect_direct(dev->net, phydev,
2171                                  lan78xx_link_status_change,
2172                                  dev->interface);
2173         if (ret) {
2174                 netdev_err(dev->net, "can't attach PHY to %s\n",
2175                            dev->mdiobus->id);
2176                 if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2177                         if (phy_is_pseudo_fixed_link(phydev)) {
2178                                 fixed_phy_unregister(phydev);
2179                         } else {
2180                                 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2181                                                              0xfffffff0);
2182                                 phy_unregister_fixup_for_uid(PHY_LAN8835,
2183                                                              0xfffffff0);
2184                         }
2185                 }
2186                 return -EIO;
2187         }
2188
2189         /* MAC doesn't support 1000T Half */
2190         phydev->supported &= ~SUPPORTED_1000baseT_Half;
2191
2192         /* support both flow controls */
2193         dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2194         phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
2195         mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2196         phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
2197
2198         if (of_property_read_bool(phydev->mdio.dev.of_node,
2199                                   "microchip,eee-enabled")) {
2200                 struct ethtool_eee edata;
2201                 memset(&edata, 0, sizeof(edata));
2202                 edata.cmd = ETHTOOL_SEEE;
2203                 edata.advertised = ADVERTISED_1000baseT_Full |
2204                                    ADVERTISED_100baseT_Full;
2205                 edata.eee_enabled = true;
2206                 edata.tx_lpi_enabled = true;
2207                 if (of_property_read_u32(dev->udev->dev.of_node,
2208                                          "microchip,tx-lpi-timer",
2209                                          &edata.tx_lpi_timer))
2210                         edata.tx_lpi_timer = 600; /* non-aggressive */
2211                 (void)lan78xx_set_eee(dev->net, &edata);
2212         }
2213
2214         if (phydev->mdio.dev.of_node) {
2215                 u32 reg;
2216                 int len;
2217
2218                 len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2219                                                       "microchip,led-modes",
2220                                                       sizeof(u32));
2221                 if (len >= 0) {
2222                         /* Ensure the appropriate LEDs are enabled */
2223                         lan78xx_read_reg(dev, HW_CFG, &reg);
2224                         reg &= ~(HW_CFG_LED0_EN_ |
2225                                  HW_CFG_LED1_EN_ |
2226                                  HW_CFG_LED2_EN_ |
2227                                  HW_CFG_LED3_EN_);
2228                         reg |= (len > 0) * HW_CFG_LED0_EN_ |
2229                                 (len > 1) * HW_CFG_LED1_EN_ |
2230                                 (len > 2) * HW_CFG_LED2_EN_ |
2231                                 (len > 3) * HW_CFG_LED3_EN_;
2232                         lan78xx_write_reg(dev, HW_CFG, reg);
2233                 }
2234         }
2235
2236         genphy_config_aneg(phydev);
2237
2238         dev->fc_autoneg = phydev->autoneg;
2239
2240         return 0;
2241 }
2242
2243 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2244 {
2245         int ret = 0;
2246         u32 buf;
2247         bool rxenabled;
2248
2249         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2250
2251         rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2252
2253         if (rxenabled) {
2254                 buf &= ~MAC_RX_RXEN_;
2255                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2256         }
2257
2258         /* add 4 to size for FCS */
2259         buf &= ~MAC_RX_MAX_SIZE_MASK_;
2260         buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2261
2262         ret = lan78xx_write_reg(dev, MAC_RX, buf);
2263
2264         if (rxenabled) {
2265                 buf |= MAC_RX_RXEN_;
2266                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2267         }
2268
2269         return 0;
2270 }
2271
2272 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2273 {
2274         struct sk_buff *skb;
2275         unsigned long flags;
2276         int count = 0;
2277
2278         spin_lock_irqsave(&q->lock, flags);
2279         while (!skb_queue_empty(q)) {
2280                 struct skb_data *entry;
2281                 struct urb *urb;
2282                 int ret;
2283
2284                 skb_queue_walk(q, skb) {
2285                         entry = (struct skb_data *)skb->cb;
2286                         if (entry->state != unlink_start)
2287                                 goto found;
2288                 }
2289                 break;
2290 found:
2291                 entry->state = unlink_start;
2292                 urb = entry->urb;
2293
2294                 /* Get reference count of the URB to avoid it to be
2295                  * freed during usb_unlink_urb, which may trigger
2296                  * use-after-free problem inside usb_unlink_urb since
2297                  * usb_unlink_urb is always racing with .complete
2298                  * handler(include defer_bh).
2299                  */
2300                 usb_get_urb(urb);
2301                 spin_unlock_irqrestore(&q->lock, flags);
2302                 /* during some PM-driven resume scenarios,
2303                  * these (async) unlinks complete immediately
2304                  */
2305                 ret = usb_unlink_urb(urb);
2306                 if (ret != -EINPROGRESS && ret != 0)
2307                         netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2308                 else
2309                         count++;
2310                 usb_put_urb(urb);
2311                 spin_lock_irqsave(&q->lock, flags);
2312         }
2313         spin_unlock_irqrestore(&q->lock, flags);
2314         return count;
2315 }
2316
2317 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2318 {
2319         struct lan78xx_net *dev = netdev_priv(netdev);
2320         int ll_mtu = new_mtu + netdev->hard_header_len;
2321         int old_hard_mtu = dev->hard_mtu;
2322         int old_rx_urb_size = dev->rx_urb_size;
2323         int ret;
2324
2325         /* no second zero-length packet read wanted after mtu-sized packets */
2326         if ((ll_mtu % dev->maxpacket) == 0)
2327                 return -EDOM;
2328
2329         ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
2330
2331         netdev->mtu = new_mtu;
2332
2333         dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2334         if (dev->rx_urb_size == old_hard_mtu) {
2335                 dev->rx_urb_size = dev->hard_mtu;
2336                 if (dev->rx_urb_size > old_rx_urb_size) {
2337                         if (netif_running(dev->net)) {
2338                                 unlink_urbs(dev, &dev->rxq);
2339                                 tasklet_schedule(&dev->bh);
2340                         }
2341                 }
2342         }
2343
2344         return 0;
2345 }
2346
2347 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2348 {
2349         struct lan78xx_net *dev = netdev_priv(netdev);
2350         struct sockaddr *addr = p;
2351         u32 addr_lo, addr_hi;
2352         int ret;
2353
2354         if (netif_running(netdev))
2355                 return -EBUSY;
2356
2357         if (!is_valid_ether_addr(addr->sa_data))
2358                 return -EADDRNOTAVAIL;
2359
2360         ether_addr_copy(netdev->dev_addr, addr->sa_data);
2361
2362         addr_lo = netdev->dev_addr[0] |
2363                   netdev->dev_addr[1] << 8 |
2364                   netdev->dev_addr[2] << 16 |
2365                   netdev->dev_addr[3] << 24;
2366         addr_hi = netdev->dev_addr[4] |
2367                   netdev->dev_addr[5] << 8;
2368
2369         ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2370         ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2371
2372         /* Added to support MAC address changes */
2373         ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2374         ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2375
2376         return 0;
2377 }
2378
2379 /* Enable or disable Rx checksum offload engine */
2380 static int lan78xx_set_features(struct net_device *netdev,
2381                                 netdev_features_t features)
2382 {
2383         struct lan78xx_net *dev = netdev_priv(netdev);
2384         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2385         unsigned long flags;
2386         int ret;
2387
2388         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2389
2390         if (features & NETIF_F_RXCSUM) {
2391                 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2392                 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2393         } else {
2394                 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2395                 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2396         }
2397
2398         if (features & NETIF_F_HW_VLAN_CTAG_RX)
2399                 pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2400         else
2401                 pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2402
2403         if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2404                 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2405         else
2406                 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2407
2408         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2409
2410         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2411
2412         return 0;
2413 }
2414
2415 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2416 {
2417         struct lan78xx_priv *pdata =
2418                         container_of(param, struct lan78xx_priv, set_vlan);
2419         struct lan78xx_net *dev = pdata->dev;
2420
2421         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2422                                DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2423 }
2424
2425 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2426                                    __be16 proto, u16 vid)
2427 {
2428         struct lan78xx_net *dev = netdev_priv(netdev);
2429         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2430         u16 vid_bit_index;
2431         u16 vid_dword_index;
2432
2433         vid_dword_index = (vid >> 5) & 0x7F;
2434         vid_bit_index = vid & 0x1F;
2435
2436         pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2437
2438         /* defer register writes to a sleepable context */
2439         schedule_work(&pdata->set_vlan);
2440
2441         return 0;
2442 }
2443
2444 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2445                                     __be16 proto, u16 vid)
2446 {
2447         struct lan78xx_net *dev = netdev_priv(netdev);
2448         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2449         u16 vid_bit_index;
2450         u16 vid_dword_index;
2451
2452         vid_dword_index = (vid >> 5) & 0x7F;
2453         vid_bit_index = vid & 0x1F;
2454
2455         pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2456
2457         /* defer register writes to a sleepable context */
2458         schedule_work(&pdata->set_vlan);
2459
2460         return 0;
2461 }
2462
2463 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2464 {
2465         int ret;
2466         u32 buf;
2467         u32 regs[6] = { 0 };
2468
2469         ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2470         if (buf & USB_CFG1_LTM_ENABLE_) {
2471                 u8 temp[2];
2472                 /* Get values from EEPROM first */
2473                 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2474                         if (temp[0] == 24) {
2475                                 ret = lan78xx_read_raw_eeprom(dev,
2476                                                               temp[1] * 2,
2477                                                               24,
2478                                                               (u8 *)regs);
2479                                 if (ret < 0)
2480                                         return;
2481                         }
2482                 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2483                         if (temp[0] == 24) {
2484                                 ret = lan78xx_read_raw_otp(dev,
2485                                                            temp[1] * 2,
2486                                                            24,
2487                                                            (u8 *)regs);
2488                                 if (ret < 0)
2489                                         return;
2490                         }
2491                 }
2492         }
2493
2494         lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2495         lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2496         lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2497         lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2498         lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2499         lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2500 }
2501
2502 static int lan78xx_reset(struct lan78xx_net *dev)
2503 {
2504         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2505         u32 buf;
2506         int ret = 0;
2507         unsigned long timeout;
2508         u8 sig;
2509         bool has_eeprom;
2510         bool has_otp;
2511
2512         has_eeprom = !lan78xx_read_eeprom(dev, 0, 0, NULL);
2513         has_otp = !lan78xx_read_otp(dev, 0, 0, NULL);
2514
2515         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2516         buf |= HW_CFG_LRST_;
2517         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2518
2519         timeout = jiffies + HZ;
2520         do {
2521                 mdelay(1);
2522                 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2523                 if (time_after(jiffies, timeout)) {
2524                         netdev_warn(dev->net,
2525                                     "timeout on completion of LiteReset");
2526                         return -EIO;
2527                 }
2528         } while (buf & HW_CFG_LRST_);
2529
2530         lan78xx_init_mac_address(dev);
2531
2532         /* save DEVID for later usage */
2533         ret = lan78xx_read_reg(dev, ID_REV, &buf);
2534         dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2535         dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2536
2537         /* Respond to the IN token with a NAK */
2538         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2539         buf |= USB_CFG_BIR_;
2540         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2541
2542         /* Init LTM */
2543         lan78xx_init_ltm(dev);
2544
2545         if (dev->udev->speed == USB_SPEED_SUPER) {
2546                 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2547                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2548                 dev->rx_qlen = 4;
2549                 dev->tx_qlen = 4;
2550         } else if (dev->udev->speed == USB_SPEED_HIGH) {
2551                 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2552                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2553                 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2554                 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2555         } else {
2556                 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2557                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2558                 dev->rx_qlen = 4;
2559                 dev->tx_qlen = 4;
2560         }
2561
2562         ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2563         ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2564
2565         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2566         buf |= HW_CFG_MEF_;
2567         /* If no valid EEPROM and no valid OTP, enable the LEDs by default */
2568         if (!has_eeprom && !has_otp)
2569             buf |= HW_CFG_LED0_EN_ | HW_CFG_LED1_EN_;
2570         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2571
2572         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2573         buf |= USB_CFG_BCE_;
2574         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2575
2576         /* set FIFO sizes */
2577         buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2578         ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2579
2580         buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2581         ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2582
2583         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2584         ret = lan78xx_write_reg(dev, FLOW, 0);
2585         ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2586
2587         /* Don't need rfe_ctl_lock during initialisation */
2588         ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2589         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2590         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2591
2592         /* Enable or disable checksum offload engines */
2593         lan78xx_set_features(dev->net, dev->net->features);
2594
2595         lan78xx_set_multicast(dev->net);
2596
2597         /* reset PHY */
2598         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2599         buf |= PMT_CTL_PHY_RST_;
2600         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2601
2602         timeout = jiffies + HZ;
2603         do {
2604                 mdelay(1);
2605                 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2606                 if (time_after(jiffies, timeout)) {
2607                         netdev_warn(dev->net, "timeout waiting for PHY Reset");
2608                         return -EIO;
2609                 }
2610         } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2611
2612         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2613         /* LAN7801 only has RGMII mode */
2614         if (dev->chipid == ID_REV_CHIP_ID_7801_)
2615                 buf &= ~MAC_CR_GMII_EN_;
2616
2617         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2618                 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2619                 if (!ret && sig != EEPROM_INDICATOR) {
2620                         /* Implies there is no external eeprom. Set mac speed */
2621                         netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2622                         buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2623                 }
2624         }
2625         /* If no valid EEPROM and no valid OTP, enable AUTO negotiation */
2626         if (!has_eeprom && !has_otp)
2627             buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2628         ret = lan78xx_write_reg(dev, MAC_CR, buf);
2629
2630         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2631         buf |= MAC_TX_TXEN_;
2632         ret = lan78xx_write_reg(dev, MAC_TX, buf);
2633
2634         ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2635         buf |= FCT_TX_CTL_EN_;
2636         ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2637
2638         ret = lan78xx_set_rx_max_frame_length(dev,
2639                                               dev->net->mtu + VLAN_ETH_HLEN);
2640
2641         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2642         buf |= MAC_RX_RXEN_;
2643         ret = lan78xx_write_reg(dev, MAC_RX, buf);
2644
2645         ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2646         buf |= FCT_RX_CTL_EN_;
2647         ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2648
2649         return 0;
2650 }
2651
2652 static void lan78xx_init_stats(struct lan78xx_net *dev)
2653 {
2654         u32 *p;
2655         int i;
2656
2657         /* initialize for stats update
2658          * some counters are 20bits and some are 32bits
2659          */
2660         p = (u32 *)&dev->stats.rollover_max;
2661         for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2662                 p[i] = 0xFFFFF;
2663
2664         dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2665         dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2666         dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2667         dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2668         dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2669         dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2670         dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2671         dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2672         dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2673         dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2674
2675         set_bit(EVENT_STAT_UPDATE, &dev->flags);
2676 }
2677
2678 static int lan78xx_open(struct net_device *net)
2679 {
2680         struct lan78xx_net *dev = netdev_priv(net);
2681         int ret;
2682
2683         ret = usb_autopm_get_interface(dev->intf);
2684         if (ret < 0)
2685                 goto out;
2686
2687         phy_start(net->phydev);
2688
2689         netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2690
2691         /* for Link Check */
2692         if (dev->urb_intr) {
2693                 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2694                 if (ret < 0) {
2695                         netif_err(dev, ifup, dev->net,
2696                                   "intr submit %d\n", ret);
2697                         goto done;
2698                 }
2699         }
2700
2701         lan78xx_init_stats(dev);
2702
2703         set_bit(EVENT_DEV_OPEN, &dev->flags);
2704
2705         netif_start_queue(net);
2706
2707         dev->link_on = false;
2708
2709         lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2710 done:
2711         usb_autopm_put_interface(dev->intf);
2712
2713 out:
2714         return ret;
2715 }
2716
2717 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2718 {
2719         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2720         DECLARE_WAITQUEUE(wait, current);
2721         int temp;
2722
2723         /* ensure there are no more active urbs */
2724         add_wait_queue(&unlink_wakeup, &wait);
2725         set_current_state(TASK_UNINTERRUPTIBLE);
2726         dev->wait = &unlink_wakeup;
2727         temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2728
2729         /* maybe wait for deletions to finish. */
2730         while (!skb_queue_empty(&dev->rxq) &&
2731                !skb_queue_empty(&dev->txq) &&
2732                !skb_queue_empty(&dev->done)) {
2733                 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2734                 set_current_state(TASK_UNINTERRUPTIBLE);
2735                 netif_dbg(dev, ifdown, dev->net,
2736                           "waited for %d urb completions\n", temp);
2737         }
2738         set_current_state(TASK_RUNNING);
2739         dev->wait = NULL;
2740         remove_wait_queue(&unlink_wakeup, &wait);
2741 }
2742
2743 static int lan78xx_stop(struct net_device *net)
2744 {
2745         struct lan78xx_net              *dev = netdev_priv(net);
2746
2747         if (timer_pending(&dev->stat_monitor))
2748                 del_timer_sync(&dev->stat_monitor);
2749
2750         if (net->phydev)
2751                 phy_stop(net->phydev);
2752
2753         clear_bit(EVENT_DEV_OPEN, &dev->flags);
2754         netif_stop_queue(net);
2755
2756         netif_info(dev, ifdown, dev->net,
2757                    "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2758                    net->stats.rx_packets, net->stats.tx_packets,
2759                    net->stats.rx_errors, net->stats.tx_errors);
2760
2761         lan78xx_terminate_urbs(dev);
2762
2763         usb_kill_urb(dev->urb_intr);
2764
2765         skb_queue_purge(&dev->rxq_pause);
2766
2767         /* deferred work (task, timer, softirq) must also stop.
2768          * can't flush_scheduled_work() until we drop rtnl (later),
2769          * else workers could deadlock; so make workers a NOP.
2770          */
2771         dev->flags = 0;
2772         cancel_delayed_work_sync(&dev->wq);
2773         tasklet_kill(&dev->bh);
2774
2775         usb_autopm_put_interface(dev->intf);
2776
2777         return 0;
2778 }
2779
2780 static int lan78xx_linearize(struct sk_buff *skb)
2781 {
2782         return skb_linearize(skb);
2783 }
2784
2785 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2786                                        struct sk_buff *skb, gfp_t flags)
2787 {
2788         u32 tx_cmd_a, tx_cmd_b;
2789
2790         if (skb_cow_head(skb, TX_OVERHEAD)) {
2791                 dev_kfree_skb_any(skb);
2792                 return NULL;
2793         }
2794
2795         if (lan78xx_linearize(skb) < 0)
2796                 return NULL;
2797
2798         tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2799
2800         if (skb->ip_summed == CHECKSUM_PARTIAL)
2801                 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2802
2803         tx_cmd_b = 0;
2804         if (skb_is_gso(skb)) {
2805                 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2806
2807                 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2808
2809                 tx_cmd_a |= TX_CMD_A_LSO_;
2810         }
2811
2812         if (skb_vlan_tag_present(skb)) {
2813                 tx_cmd_a |= TX_CMD_A_IVTG_;
2814                 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2815         }
2816
2817         skb_push(skb, 4);
2818         cpu_to_le32s(&tx_cmd_b);
2819         memcpy(skb->data, &tx_cmd_b, 4);
2820
2821         skb_push(skb, 4);
2822         cpu_to_le32s(&tx_cmd_a);
2823         memcpy(skb->data, &tx_cmd_a, 4);
2824
2825         return skb;
2826 }
2827
2828 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2829                                struct sk_buff_head *list, enum skb_state state)
2830 {
2831         unsigned long flags;
2832         enum skb_state old_state;
2833         struct skb_data *entry = (struct skb_data *)skb->cb;
2834
2835         spin_lock_irqsave(&list->lock, flags);
2836         old_state = entry->state;
2837         entry->state = state;
2838
2839         __skb_unlink(skb, list);
2840         spin_unlock(&list->lock);
2841         spin_lock(&dev->done.lock);
2842
2843         __skb_queue_tail(&dev->done, skb);
2844         if (skb_queue_len(&dev->done) == 1)
2845                 tasklet_schedule(&dev->bh);
2846         spin_unlock_irqrestore(&dev->done.lock, flags);
2847
2848         return old_state;
2849 }
2850
2851 static void tx_complete(struct urb *urb)
2852 {
2853         struct sk_buff *skb = (struct sk_buff *)urb->context;
2854         struct skb_data *entry = (struct skb_data *)skb->cb;
2855         struct lan78xx_net *dev = entry->dev;
2856
2857         if (urb->status == 0) {
2858                 dev->net->stats.tx_packets += entry->num_of_packet;
2859                 dev->net->stats.tx_bytes += entry->length;
2860         } else {
2861                 dev->net->stats.tx_errors++;
2862
2863                 switch (urb->status) {
2864                 case -EPIPE:
2865                         lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2866                         break;
2867
2868                 /* software-driven interface shutdown */
2869                 case -ECONNRESET:
2870                 case -ESHUTDOWN:
2871                         break;
2872
2873                 case -EPROTO:
2874                 case -ETIME:
2875                 case -EILSEQ:
2876                         netif_stop_queue(dev->net);
2877                         break;
2878                 default:
2879                         netif_dbg(dev, tx_err, dev->net,
2880                                   "tx err %d\n", entry->urb->status);
2881                         break;
2882                 }
2883         }
2884
2885         usb_autopm_put_interface_async(dev->intf);
2886
2887         defer_bh(dev, skb, &dev->txq, tx_done);
2888 }
2889
2890 static void lan78xx_queue_skb(struct sk_buff_head *list,
2891                               struct sk_buff *newsk, enum skb_state state)
2892 {
2893         struct skb_data *entry = (struct skb_data *)newsk->cb;
2894
2895         __skb_queue_tail(list, newsk);
2896         entry->state = state;
2897 }
2898
2899 static netdev_tx_t
2900 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2901 {
2902         struct lan78xx_net *dev = netdev_priv(net);
2903         struct sk_buff *skb2 = NULL;
2904
2905         if (skb) {
2906                 skb_tx_timestamp(skb);
2907                 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2908         }
2909
2910         if (skb2) {
2911                 skb_queue_tail(&dev->txq_pend, skb2);
2912
2913                 /* throttle TX patch at slower than SUPER SPEED USB */
2914                 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2915                     (skb_queue_len(&dev->txq_pend) > 10))
2916                         netif_stop_queue(net);
2917         } else {
2918                 netif_dbg(dev, tx_err, dev->net,
2919                           "lan78xx_tx_prep return NULL\n");
2920                 dev->net->stats.tx_errors++;
2921                 dev->net->stats.tx_dropped++;
2922         }
2923
2924         tasklet_schedule(&dev->bh);
2925
2926         return NETDEV_TX_OK;
2927 }
2928
2929 static int
2930 lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2931 {
2932         int tmp;
2933         struct usb_host_interface *alt = NULL;
2934         struct usb_host_endpoint *in = NULL, *out = NULL;
2935         struct usb_host_endpoint *status = NULL;
2936
2937         for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2938                 unsigned ep;
2939
2940                 in = NULL;
2941                 out = NULL;
2942                 status = NULL;
2943                 alt = intf->altsetting + tmp;
2944
2945                 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2946                         struct usb_host_endpoint *e;
2947                         int intr = 0;
2948
2949                         e = alt->endpoint + ep;
2950                         switch (e->desc.bmAttributes) {
2951                         case USB_ENDPOINT_XFER_INT:
2952                                 if (!usb_endpoint_dir_in(&e->desc))
2953                                         continue;
2954                                 intr = 1;
2955                                 /* FALLTHROUGH */
2956                         case USB_ENDPOINT_XFER_BULK:
2957                                 break;
2958                         default:
2959                                 continue;
2960                         }
2961                         if (usb_endpoint_dir_in(&e->desc)) {
2962                                 if (!intr && !in)
2963                                         in = e;
2964                                 else if (intr && !status)
2965                                         status = e;
2966                         } else {
2967                                 if (!out)
2968                                         out = e;
2969                         }
2970                 }
2971                 if (in && out)
2972                         break;
2973         }
2974         if (!alt || !in || !out)
2975                 return -EINVAL;
2976
2977         dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2978                                        in->desc.bEndpointAddress &
2979                                        USB_ENDPOINT_NUMBER_MASK);
2980         dev->pipe_out = usb_sndbulkpipe(dev->udev,
2981                                         out->desc.bEndpointAddress &
2982                                         USB_ENDPOINT_NUMBER_MASK);
2983         dev->ep_intr = status;
2984
2985         return 0;
2986 }
2987
2988 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2989 {
2990         struct lan78xx_priv *pdata = NULL;
2991         int ret;
2992         int i;
2993
2994         ret = lan78xx_get_endpoints(dev, intf);
2995         if (ret) {
2996                 netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n",
2997                             ret);
2998                 return ret;
2999         }
3000
3001         dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
3002
3003         pdata = (struct lan78xx_priv *)(dev->data[0]);
3004         if (!pdata) {
3005                 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
3006                 return -ENOMEM;
3007         }
3008
3009         pdata->dev = dev;
3010
3011         spin_lock_init(&pdata->rfe_ctl_lock);
3012         mutex_init(&pdata->dataport_mutex);
3013
3014         INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
3015
3016         for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
3017                 pdata->vlan_table[i] = 0;
3018
3019         INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
3020
3021         dev->net->features = 0;
3022
3023         if (DEFAULT_TX_CSUM_ENABLE)
3024                 dev->net->features |= NETIF_F_HW_CSUM;
3025
3026         if (DEFAULT_RX_CSUM_ENABLE)
3027                 dev->net->features |= NETIF_F_RXCSUM;
3028
3029         if (DEFAULT_TSO_CSUM_ENABLE) {
3030                 dev->net->features |= NETIF_F_SG;
3031                 /* Use module parameter to control TCP segmentation offload as
3032                  * it appears to cause issues.
3033                  */
3034                 if (enable_tso)
3035                         dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6;
3036         }
3037
3038         if (DEFAULT_VLAN_RX_OFFLOAD)
3039                 dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
3040
3041         if (DEFAULT_VLAN_FILTER_ENABLE)
3042                 dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3043
3044         dev->net->hw_features = dev->net->features;
3045
3046         ret = lan78xx_setup_irq_domain(dev);
3047         if (ret < 0) {
3048                 netdev_warn(dev->net,
3049                             "lan78xx_setup_irq_domain() failed : %d", ret);
3050                 goto out1;
3051         }
3052
3053         dev->net->hard_header_len += TX_OVERHEAD;
3054         dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
3055
3056         /* Init all registers */
3057         ret = lan78xx_reset(dev);
3058         if (ret) {
3059                 netdev_warn(dev->net, "Registers INIT FAILED....");
3060                 goto out2;
3061         }
3062
3063         ret = lan78xx_mdio_init(dev);
3064         if (ret) {
3065                 netdev_warn(dev->net, "MDIO INIT FAILED.....");
3066                 goto out2;
3067         }
3068
3069         dev->net->flags |= IFF_MULTICAST;
3070
3071         pdata->wol = WAKE_MAGIC;
3072
3073         return ret;
3074
3075 out2:
3076         lan78xx_remove_irq_domain(dev);
3077
3078 out1:
3079         netdev_warn(dev->net, "Bind routine FAILED");
3080         cancel_work_sync(&pdata->set_multicast);
3081         cancel_work_sync(&pdata->set_vlan);
3082         kfree(pdata);
3083         return ret;
3084 }
3085
3086 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3087 {
3088         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3089
3090         lan78xx_remove_irq_domain(dev);
3091
3092         lan78xx_remove_mdio(dev);
3093
3094         if (pdata) {
3095                 cancel_work_sync(&pdata->set_multicast);
3096                 cancel_work_sync(&pdata->set_vlan);
3097                 netif_dbg(dev, ifdown, dev->net, "free pdata");
3098                 kfree(pdata);
3099                 pdata = NULL;
3100                 dev->data[0] = 0;
3101         }
3102 }
3103
3104 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3105                                     struct sk_buff *skb,
3106                                     u32 rx_cmd_a, u32 rx_cmd_b)
3107 {
3108         /* HW Checksum offload appears to be flawed if used when not stripping
3109          * VLAN headers. Drop back to S/W checksums under these conditions.
3110          */
3111         if (!(dev->net->features & NETIF_F_RXCSUM) ||
3112             unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3113             ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3114              !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3115                 skb->ip_summed = CHECKSUM_NONE;
3116         } else {
3117                 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3118                 skb->ip_summed = CHECKSUM_COMPLETE;
3119         }
3120 }
3121
3122 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3123                                     struct sk_buff *skb,
3124                                     u32 rx_cmd_a, u32 rx_cmd_b)
3125 {
3126         if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3127             (rx_cmd_a & RX_CMD_A_FVTG_))
3128                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3129                                        (rx_cmd_b & 0xffff));
3130 }
3131
3132 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3133 {
3134         int             status;
3135
3136         if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
3137                 skb_queue_tail(&dev->rxq_pause, skb);
3138                 return;
3139         }
3140
3141         dev->net->stats.rx_packets++;
3142         dev->net->stats.rx_bytes += skb->len;
3143
3144         skb->protocol = eth_type_trans(skb, dev->net);
3145
3146         netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3147                   skb->len + sizeof(struct ethhdr), skb->protocol);
3148         memset(skb->cb, 0, sizeof(struct skb_data));
3149
3150         if (skb_defer_rx_timestamp(skb))
3151                 return;
3152
3153         status = netif_rx(skb);
3154         if (status != NET_RX_SUCCESS)
3155                 netif_dbg(dev, rx_err, dev->net,
3156                           "netif_rx status %d\n", status);
3157 }
3158
3159 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3160 {
3161         if (skb->len < dev->net->hard_header_len)
3162                 return 0;
3163
3164         while (skb->len > 0) {
3165                 u32 rx_cmd_a, rx_cmd_b, align_count, size;
3166                 u16 rx_cmd_c;
3167                 struct sk_buff *skb2;
3168                 unsigned char *packet;
3169
3170                 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
3171                 le32_to_cpus(&rx_cmd_a);
3172                 skb_pull(skb, sizeof(rx_cmd_a));
3173
3174                 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
3175                 le32_to_cpus(&rx_cmd_b);
3176                 skb_pull(skb, sizeof(rx_cmd_b));
3177
3178                 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
3179                 le16_to_cpus(&rx_cmd_c);
3180                 skb_pull(skb, sizeof(rx_cmd_c));
3181
3182                 packet = skb->data;
3183
3184                 /* get the packet length */
3185                 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3186                 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3187
3188                 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3189                         netif_dbg(dev, rx_err, dev->net,
3190                                   "Error rx_cmd_a=0x%08x", rx_cmd_a);
3191                 } else {
3192                         /* last frame in this batch */
3193                         if (skb->len == size) {
3194                                 lan78xx_rx_csum_offload(dev, skb,
3195                                                         rx_cmd_a, rx_cmd_b);
3196                                 lan78xx_rx_vlan_offload(dev, skb,
3197                                                         rx_cmd_a, rx_cmd_b);
3198
3199                                 skb_trim(skb, skb->len - 4); /* remove fcs */
3200                                 skb->truesize = size + sizeof(struct sk_buff);
3201
3202                                 return 1;
3203                         }
3204
3205                         skb2 = skb_clone(skb, GFP_ATOMIC);
3206                         if (unlikely(!skb2)) {
3207                                 netdev_warn(dev->net, "Error allocating skb");
3208                                 return 0;
3209                         }
3210
3211                         skb2->len = size;
3212                         skb2->data = packet;
3213                         skb_set_tail_pointer(skb2, size);
3214
3215                         lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3216                         lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3217
3218                         skb_trim(skb2, skb2->len - 4); /* remove fcs */
3219                         skb2->truesize = size + sizeof(struct sk_buff);
3220
3221                         lan78xx_skb_return(dev, skb2);
3222                 }
3223
3224                 skb_pull(skb, size);
3225
3226                 /* padding bytes before the next frame starts */
3227                 if (skb->len)
3228                         skb_pull(skb, align_count);
3229         }
3230
3231         return 1;
3232 }
3233
3234 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3235 {
3236         if (!lan78xx_rx(dev, skb)) {
3237                 dev->net->stats.rx_errors++;
3238                 goto done;
3239         }
3240
3241         if (skb->len) {
3242                 lan78xx_skb_return(dev, skb);
3243                 return;
3244         }
3245
3246         netif_dbg(dev, rx_err, dev->net, "drop\n");
3247         dev->net->stats.rx_errors++;
3248 done:
3249         skb_queue_tail(&dev->done, skb);
3250 }
3251
3252 static void rx_complete(struct urb *urb);
3253
3254 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3255 {
3256         struct sk_buff *skb;
3257         struct skb_data *entry;
3258         unsigned long lockflags;
3259         size_t size = dev->rx_urb_size;
3260         int ret = 0;
3261
3262         skb = netdev_alloc_skb(dev->net, size);
3263         if (!skb) {
3264                 usb_free_urb(urb);
3265                 return -ENOMEM;
3266         }
3267
3268         entry = (struct skb_data *)skb->cb;
3269         entry->urb = urb;
3270         entry->dev = dev;
3271         entry->length = 0;
3272
3273         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3274                           skb->data, size, rx_complete, skb);
3275
3276         spin_lock_irqsave(&dev->rxq.lock, lockflags);
3277
3278         if (netif_device_present(dev->net) &&
3279             netif_running(dev->net) &&
3280             !test_bit(EVENT_RX_HALT, &dev->flags) &&
3281             !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3282                 ret = usb_submit_urb(urb, GFP_ATOMIC);
3283                 switch (ret) {
3284                 case 0:
3285                         lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3286                         break;
3287                 case -EPIPE:
3288                         lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3289                         break;
3290                 case -ENODEV:
3291                         netif_dbg(dev, ifdown, dev->net, "device gone\n");
3292                         netif_device_detach(dev->net);
3293                         break;
3294                 case -EHOSTUNREACH:
3295                         ret = -ENOLINK;
3296                         break;
3297                 default:
3298                         netif_dbg(dev, rx_err, dev->net,
3299                                   "rx submit, %d\n", ret);
3300                         tasklet_schedule(&dev->bh);
3301                 }
3302         } else {
3303                 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3304                 ret = -ENOLINK;
3305         }
3306         spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3307         if (ret) {
3308                 dev_kfree_skb_any(skb);
3309                 usb_free_urb(urb);
3310         }
3311         return ret;
3312 }
3313
3314 static void rx_complete(struct urb *urb)
3315 {
3316         struct sk_buff  *skb = (struct sk_buff *)urb->context;
3317         struct skb_data *entry = (struct skb_data *)skb->cb;
3318         struct lan78xx_net *dev = entry->dev;
3319         int urb_status = urb->status;
3320         enum skb_state state;
3321
3322         skb_put(skb, urb->actual_length);
3323         state = rx_done;
3324         entry->urb = NULL;
3325
3326         switch (urb_status) {
3327         case 0:
3328                 if (skb->len < dev->net->hard_header_len) {
3329                         state = rx_cleanup;
3330                         dev->net->stats.rx_errors++;
3331                         dev->net->stats.rx_length_errors++;
3332                         netif_dbg(dev, rx_err, dev->net,
3333                                   "rx length %d\n", skb->len);
3334                 }
3335                 usb_mark_last_busy(dev->udev);
3336                 break;
3337         case -EPIPE:
3338                 dev->net->stats.rx_errors++;
3339                 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3340                 /* FALLTHROUGH */
3341         case -ECONNRESET:                               /* async unlink */
3342         case -ESHUTDOWN:                                /* hardware gone */
3343                 netif_dbg(dev, ifdown, dev->net,
3344                           "rx shutdown, code %d\n", urb_status);
3345                 state = rx_cleanup;
3346                 entry->urb = urb;
3347                 urb = NULL;
3348                 break;
3349         case -EPROTO:
3350         case -ETIME:
3351         case -EILSEQ:
3352                 dev->net->stats.rx_errors++;
3353                 state = rx_cleanup;
3354                 entry->urb = urb;
3355                 urb = NULL;
3356                 break;
3357
3358         /* data overrun ... flush fifo? */
3359         case -EOVERFLOW:
3360                 dev->net->stats.rx_over_errors++;
3361                 /* FALLTHROUGH */
3362
3363         default:
3364                 state = rx_cleanup;
3365                 dev->net->stats.rx_errors++;
3366                 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3367                 break;
3368         }
3369
3370         state = defer_bh(dev, skb, &dev->rxq, state);
3371
3372         if (urb) {
3373                 if (netif_running(dev->net) &&
3374                     !test_bit(EVENT_RX_HALT, &dev->flags) &&
3375                     state != unlink_start) {
3376                         rx_submit(dev, urb, GFP_ATOMIC);
3377                         return;
3378                 }
3379                 usb_free_urb(urb);
3380         }
3381         netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3382 }
3383
3384 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3385 {
3386         int length;
3387         struct urb *urb = NULL;
3388         struct skb_data *entry;
3389         unsigned long flags;
3390         struct sk_buff_head *tqp = &dev->txq_pend;
3391         struct sk_buff *skb, *skb2;
3392         int ret;
3393         int count, pos;
3394         int skb_totallen, pkt_cnt;
3395
3396         skb_totallen = 0;
3397         pkt_cnt = 0;
3398         count = 0;
3399         length = 0;
3400         spin_lock_irqsave(&tqp->lock, flags);
3401         for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
3402                 if (skb_is_gso(skb)) {
3403                         if (pkt_cnt) {
3404                                 /* handle previous packets first */
3405                                 break;
3406                         }
3407                         count = 1;
3408                         length = skb->len - TX_OVERHEAD;
3409                         __skb_unlink(skb, tqp);
3410                         spin_unlock_irqrestore(&tqp->lock, flags);
3411                         goto gso_skb;
3412                 }
3413
3414                 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3415                         break;
3416                 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3417                 pkt_cnt++;
3418         }
3419         spin_unlock_irqrestore(&tqp->lock, flags);
3420
3421         /* copy to a single skb */
3422         skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3423         if (!skb)
3424                 goto drop;
3425
3426         skb_put(skb, skb_totallen);
3427
3428         for (count = pos = 0; count < pkt_cnt; count++) {
3429                 skb2 = skb_dequeue(tqp);
3430                 if (skb2) {
3431                         length += (skb2->len - TX_OVERHEAD);
3432                         memcpy(skb->data + pos, skb2->data, skb2->len);
3433                         pos += roundup(skb2->len, sizeof(u32));
3434                         dev_kfree_skb(skb2);
3435                 }
3436         }
3437
3438 gso_skb:
3439         urb = usb_alloc_urb(0, GFP_ATOMIC);
3440         if (!urb)
3441                 goto drop;
3442
3443         entry = (struct skb_data *)skb->cb;
3444         entry->urb = urb;
3445         entry->dev = dev;
3446         entry->length = length;
3447         entry->num_of_packet = count;
3448
3449         spin_lock_irqsave(&dev->txq.lock, flags);
3450         ret = usb_autopm_get_interface_async(dev->intf);
3451         if (ret < 0) {
3452                 spin_unlock_irqrestore(&dev->txq.lock, flags);
3453                 goto drop;
3454         }
3455
3456         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3457                           skb->data, skb->len, tx_complete, skb);
3458
3459         if (length % dev->maxpacket == 0) {
3460                 /* send USB_ZERO_PACKET */
3461                 urb->transfer_flags |= URB_ZERO_PACKET;
3462         }
3463
3464 #ifdef CONFIG_PM
3465         /* if this triggers the device is still a sleep */
3466         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3467                 /* transmission will be done in resume */
3468                 usb_anchor_urb(urb, &dev->deferred);
3469                 /* no use to process more packets */
3470                 netif_stop_queue(dev->net);
3471                 usb_put_urb(urb);
3472                 spin_unlock_irqrestore(&dev->txq.lock, flags);
3473                 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3474                 return;
3475         }
3476 #endif
3477
3478         ret = usb_submit_urb(urb, GFP_ATOMIC);
3479         switch (ret) {
3480         case 0:
3481                 netif_trans_update(dev->net);
3482                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3483                 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3484                         netif_stop_queue(dev->net);
3485                 break;
3486         case -EPIPE:
3487                 netif_stop_queue(dev->net);
3488                 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3489                 usb_autopm_put_interface_async(dev->intf);
3490                 break;
3491         default:
3492                 usb_autopm_put_interface_async(dev->intf);
3493                 netif_dbg(dev, tx_err, dev->net,
3494                           "tx: submit urb err %d\n", ret);
3495                 break;
3496         }
3497
3498         spin_unlock_irqrestore(&dev->txq.lock, flags);
3499
3500         if (ret) {
3501                 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3502 drop:
3503                 dev->net->stats.tx_dropped++;
3504                 if (skb)
3505                         dev_kfree_skb_any(skb);
3506                 usb_free_urb(urb);
3507         } else
3508                 netif_dbg(dev, tx_queued, dev->net,
3509                           "> tx, len %d, type 0x%x\n", length, skb->protocol);
3510 }
3511
3512 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3513 {
3514         struct urb *urb;
3515         int i;
3516
3517         if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3518                 for (i = 0; i < 10; i++) {
3519                         if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3520                                 break;
3521                         urb = usb_alloc_urb(0, GFP_ATOMIC);
3522                         if (urb)
3523                                 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3524                                         return;
3525                 }
3526
3527                 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3528                         tasklet_schedule(&dev->bh);
3529         }
3530         if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3531                 netif_wake_queue(dev->net);
3532 }
3533
3534 static void lan78xx_bh(unsigned long param)
3535 {
3536         struct lan78xx_net *dev = (struct lan78xx_net *)param;
3537         struct sk_buff *skb;
3538         struct skb_data *entry;
3539
3540         while ((skb = skb_dequeue(&dev->done))) {
3541                 entry = (struct skb_data *)(skb->cb);
3542                 switch (entry->state) {
3543                 case rx_done:
3544                         entry->state = rx_cleanup;
3545                         rx_process(dev, skb);
3546                         continue;
3547                 case tx_done:
3548                         usb_free_urb(entry->urb);
3549                         dev_kfree_skb(skb);
3550                         continue;
3551                 case rx_cleanup:
3552                         usb_free_urb(entry->urb);
3553                         dev_kfree_skb(skb);
3554                         continue;
3555                 default:
3556                         netdev_dbg(dev->net, "skb state %d\n", entry->state);
3557                         return;
3558                 }
3559         }
3560
3561         if (netif_device_present(dev->net) && netif_running(dev->net)) {
3562                 /* reset update timer delta */
3563                 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3564                         dev->delta = 1;
3565                         mod_timer(&dev->stat_monitor,
3566                                   jiffies + STAT_UPDATE_TIMER);
3567                 }
3568
3569                 if (!skb_queue_empty(&dev->txq_pend))
3570                         lan78xx_tx_bh(dev);
3571
3572                 if (!timer_pending(&dev->delay) &&
3573                     !test_bit(EVENT_RX_HALT, &dev->flags))
3574                         lan78xx_rx_bh(dev);
3575         }
3576 }
3577
3578 static void lan78xx_delayedwork(struct work_struct *work)
3579 {
3580         int status;
3581         struct lan78xx_net *dev;
3582
3583         dev = container_of(work, struct lan78xx_net, wq.work);
3584
3585         if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3586                 unlink_urbs(dev, &dev->txq);
3587                 status = usb_autopm_get_interface(dev->intf);
3588                 if (status < 0)
3589                         goto fail_pipe;
3590                 status = usb_clear_halt(dev->udev, dev->pipe_out);
3591                 usb_autopm_put_interface(dev->intf);
3592                 if (status < 0 &&
3593                     status != -EPIPE &&
3594                     status != -ESHUTDOWN) {
3595                         if (netif_msg_tx_err(dev))
3596 fail_pipe:
3597                                 netdev_err(dev->net,
3598                                            "can't clear tx halt, status %d\n",
3599                                            status);
3600                 } else {
3601                         clear_bit(EVENT_TX_HALT, &dev->flags);
3602                         if (status != -ESHUTDOWN)
3603                                 netif_wake_queue(dev->net);
3604                 }
3605         }
3606         if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3607                 unlink_urbs(dev, &dev->rxq);
3608                 status = usb_autopm_get_interface(dev->intf);
3609                 if (status < 0)
3610                                 goto fail_halt;
3611                 status = usb_clear_halt(dev->udev, dev->pipe_in);
3612                 usb_autopm_put_interface(dev->intf);
3613                 if (status < 0 &&
3614                     status != -EPIPE &&
3615                     status != -ESHUTDOWN) {
3616                         if (netif_msg_rx_err(dev))
3617 fail_halt:
3618                                 netdev_err(dev->net,
3619                                            "can't clear rx halt, status %d\n",
3620                                            status);
3621                 } else {
3622                         clear_bit(EVENT_RX_HALT, &dev->flags);
3623                         tasklet_schedule(&dev->bh);
3624                 }
3625         }
3626
3627         if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3628                 int ret = 0;
3629
3630                 clear_bit(EVENT_LINK_RESET, &dev->flags);
3631                 status = usb_autopm_get_interface(dev->intf);
3632                 if (status < 0)
3633                         goto skip_reset;
3634                 if (lan78xx_link_reset(dev) < 0) {
3635                         usb_autopm_put_interface(dev->intf);
3636 skip_reset:
3637                         netdev_info(dev->net, "link reset failed (%d)\n",
3638                                     ret);
3639                 } else {
3640                         usb_autopm_put_interface(dev->intf);
3641                 }
3642         }
3643
3644         if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3645                 lan78xx_update_stats(dev);
3646
3647                 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3648
3649                 mod_timer(&dev->stat_monitor,
3650                           jiffies + (STAT_UPDATE_TIMER * dev->delta));
3651
3652                 dev->delta = min((dev->delta * 2), 50);
3653         }
3654 }
3655
3656 static void intr_complete(struct urb *urb)
3657 {
3658         struct lan78xx_net *dev = urb->context;
3659         int status = urb->status;
3660
3661         switch (status) {
3662         /* success */
3663         case 0:
3664                 lan78xx_status(dev, urb);
3665                 break;
3666
3667         /* software-driven interface shutdown */
3668         case -ENOENT:                   /* urb killed */
3669         case -ESHUTDOWN:                /* hardware gone */
3670                 netif_dbg(dev, ifdown, dev->net,
3671                           "intr shutdown, code %d\n", status);
3672                 return;
3673
3674         /* NOTE:  not throttling like RX/TX, since this endpoint
3675          * already polls infrequently
3676          */
3677         default:
3678                 netdev_dbg(dev->net, "intr status %d\n", status);
3679                 break;
3680         }
3681
3682         if (!netif_running(dev->net))
3683                 return;
3684
3685         memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3686         status = usb_submit_urb(urb, GFP_ATOMIC);
3687         if (status != 0)
3688                 netif_err(dev, timer, dev->net,
3689                           "intr resubmit --> %d\n", status);
3690 }
3691
3692 static void lan78xx_disconnect(struct usb_interface *intf)
3693 {
3694         struct lan78xx_net              *dev;
3695         struct usb_device               *udev;
3696         struct net_device               *net;
3697         struct phy_device               *phydev;
3698
3699         dev = usb_get_intfdata(intf);
3700         usb_set_intfdata(intf, NULL);
3701         if (!dev)
3702                 return;
3703
3704         udev = interface_to_usbdev(intf);
3705         net = dev->net;
3706         phydev = net->phydev;
3707
3708         phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3709         phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3710
3711         phy_disconnect(net->phydev);
3712
3713         if (phy_is_pseudo_fixed_link(phydev))
3714                 fixed_phy_unregister(phydev);
3715
3716         unregister_netdev(net);
3717
3718         cancel_delayed_work_sync(&dev->wq);
3719
3720         usb_scuttle_anchored_urbs(&dev->deferred);
3721
3722         lan78xx_unbind(dev, intf);
3723
3724         usb_kill_urb(dev->urb_intr);
3725         usb_free_urb(dev->urb_intr);
3726
3727         free_netdev(net);
3728         usb_put_dev(udev);
3729 }
3730
3731 static void lan78xx_tx_timeout(struct net_device *net)
3732 {
3733         struct lan78xx_net *dev = netdev_priv(net);
3734
3735         unlink_urbs(dev, &dev->txq);
3736         tasklet_schedule(&dev->bh);
3737 }
3738
3739 static const struct net_device_ops lan78xx_netdev_ops = {
3740         .ndo_open               = lan78xx_open,
3741         .ndo_stop               = lan78xx_stop,
3742         .ndo_start_xmit         = lan78xx_start_xmit,
3743         .ndo_tx_timeout         = lan78xx_tx_timeout,
3744         .ndo_change_mtu         = lan78xx_change_mtu,
3745         .ndo_set_mac_address    = lan78xx_set_mac_addr,
3746         .ndo_validate_addr      = eth_validate_addr,
3747         .ndo_do_ioctl           = lan78xx_ioctl,
3748         .ndo_set_rx_mode        = lan78xx_set_multicast,
3749         .ndo_set_features       = lan78xx_set_features,
3750         .ndo_vlan_rx_add_vid    = lan78xx_vlan_rx_add_vid,
3751         .ndo_vlan_rx_kill_vid   = lan78xx_vlan_rx_kill_vid,
3752 };
3753
3754 static void lan78xx_stat_monitor(struct timer_list *t)
3755 {
3756         struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
3757
3758         lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3759 }
3760
3761 static int lan78xx_probe(struct usb_interface *intf,
3762                          const struct usb_device_id *id)
3763 {
3764         struct lan78xx_net *dev;
3765         struct net_device *netdev;
3766         struct usb_device *udev;
3767         int ret;
3768         unsigned maxp;
3769         unsigned period;
3770         u8 *buf = NULL;
3771
3772         udev = interface_to_usbdev(intf);
3773         udev = usb_get_dev(udev);
3774
3775         netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3776         if (!netdev) {
3777                 dev_err(&intf->dev, "Error: OOM\n");
3778                 ret = -ENOMEM;
3779                 goto out1;
3780         }
3781
3782         /* netdev_printk() needs this */
3783         SET_NETDEV_DEV(netdev, &intf->dev);
3784
3785         dev = netdev_priv(netdev);
3786         dev->udev = udev;
3787         dev->intf = intf;
3788         dev->net = netdev;
3789         dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3790                                         | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3791
3792         skb_queue_head_init(&dev->rxq);
3793         skb_queue_head_init(&dev->txq);
3794         skb_queue_head_init(&dev->done);
3795         skb_queue_head_init(&dev->rxq_pause);
3796         skb_queue_head_init(&dev->txq_pend);
3797         mutex_init(&dev->phy_mutex);
3798
3799         tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3800         INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3801         init_usb_anchor(&dev->deferred);
3802
3803         netdev->netdev_ops = &lan78xx_netdev_ops;
3804         netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3805         netdev->ethtool_ops = &lan78xx_ethtool_ops;
3806
3807         dev->delta = 1;
3808         timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
3809
3810         mutex_init(&dev->stats.access_lock);
3811
3812         ret = lan78xx_bind(dev, intf);
3813         if (ret < 0)
3814                 goto out2;
3815         strcpy(netdev->name, "eth%d");
3816
3817         if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3818                 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3819
3820         /* MTU range: 68 - 9000 */
3821         netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3822
3823         dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3824         dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3825         dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3826
3827         dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3828         dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3829
3830         dev->pipe_intr = usb_rcvintpipe(dev->udev,
3831                                         dev->ep_intr->desc.bEndpointAddress &
3832                                         USB_ENDPOINT_NUMBER_MASK);
3833         if (int_urb_interval_ms <= 0)
3834                 period = dev->ep_intr->desc.bInterval;
3835         else
3836                 period = int_urb_interval_ms * INT_URB_MICROFRAMES_PER_MS;
3837
3838         netif_notice(dev, probe, netdev, "int urb period %d\n", period);
3839
3840         maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3841         buf = kmalloc(maxp, GFP_KERNEL);
3842         if (buf) {
3843                 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3844                 if (!dev->urb_intr) {
3845                         ret = -ENOMEM;
3846                         kfree(buf);
3847                         goto out3;
3848                 } else {
3849                         usb_fill_int_urb(dev->urb_intr, dev->udev,
3850                                          dev->pipe_intr, buf, maxp,
3851                                          intr_complete, dev, period);
3852                 }
3853         }
3854
3855         dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3856
3857         /* driver requires remote-wakeup capability during autosuspend. */
3858         intf->needs_remote_wakeup = 1;
3859
3860         ret = lan78xx_phy_init(dev);
3861         if (ret < 0)
3862                 goto out4;
3863
3864         ret = register_netdev(netdev);
3865         if (ret != 0) {
3866                 netif_err(dev, probe, netdev, "couldn't register the device\n");
3867                 goto out5;
3868         }
3869
3870         usb_set_intfdata(intf, dev);
3871
3872         ret = device_set_wakeup_enable(&udev->dev, true);
3873
3874          /* Default delay of 2sec has more overhead than advantage.
3875           * Set to 10sec as default.
3876           */
3877         pm_runtime_set_autosuspend_delay(&udev->dev,
3878                                          DEFAULT_AUTOSUSPEND_DELAY);
3879
3880         return 0;
3881
3882 out5:
3883         phy_disconnect(netdev->phydev);
3884 out4:
3885         usb_free_urb(dev->urb_intr);
3886 out3:
3887         lan78xx_unbind(dev, intf);
3888 out2:
3889         free_netdev(netdev);
3890 out1:
3891         usb_put_dev(udev);
3892
3893         return ret;
3894 }
3895
3896 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3897 {
3898         const u16 crc16poly = 0x8005;
3899         int i;
3900         u16 bit, crc, msb;
3901         u8 data;
3902
3903         crc = 0xFFFF;
3904         for (i = 0; i < len; i++) {
3905                 data = *buf++;
3906                 for (bit = 0; bit < 8; bit++) {
3907                         msb = crc >> 15;
3908                         crc <<= 1;
3909
3910                         if (msb ^ (u16)(data & 1)) {
3911                                 crc ^= crc16poly;
3912                                 crc |= (u16)0x0001U;
3913                         }
3914                         data >>= 1;
3915                 }
3916         }
3917
3918         return crc;
3919 }
3920
3921 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3922 {
3923         u32 buf;
3924         int ret;
3925         int mask_index;
3926         u16 crc;
3927         u32 temp_wucsr;
3928         u32 temp_pmt_ctl;
3929         const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3930         const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3931         const u8 arp_type[2] = { 0x08, 0x06 };
3932
3933         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3934         buf &= ~MAC_TX_TXEN_;
3935         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3936         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3937         buf &= ~MAC_RX_RXEN_;
3938         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3939
3940         ret = lan78xx_write_reg(dev, WUCSR, 0);
3941         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3942         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3943
3944         temp_wucsr = 0;
3945
3946         temp_pmt_ctl = 0;
3947         ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3948         temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3949         temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3950
3951         for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3952                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3953
3954         mask_index = 0;
3955         if (wol & WAKE_PHY) {
3956                 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3957
3958                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3959                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3960                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3961         }
3962         if (wol & WAKE_MAGIC) {
3963                 temp_wucsr |= WUCSR_MPEN_;
3964
3965                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3966                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3967                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3968         }
3969         if (wol & WAKE_BCAST) {
3970                 temp_wucsr |= WUCSR_BCST_EN_;
3971
3972                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3973                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3974                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3975         }
3976         if (wol & WAKE_MCAST) {
3977                 temp_wucsr |= WUCSR_WAKE_EN_;
3978
3979                 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3980                 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3981                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3982                                         WUF_CFGX_EN_ |
3983                                         WUF_CFGX_TYPE_MCAST_ |
3984                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3985                                         (crc & WUF_CFGX_CRC16_MASK_));
3986
3987                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3988                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3989                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3990                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3991                 mask_index++;
3992
3993                 /* for IPv6 Multicast */
3994                 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3995                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3996                                         WUF_CFGX_EN_ |
3997                                         WUF_CFGX_TYPE_MCAST_ |
3998                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3999                                         (crc & WUF_CFGX_CRC16_MASK_));
4000
4001                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
4002                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4003                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4004                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4005                 mask_index++;
4006
4007                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4008                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4009                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4010         }
4011         if (wol & WAKE_UCAST) {
4012                 temp_wucsr |= WUCSR_PFDA_EN_;
4013
4014                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4015                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4016                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4017         }
4018         if (wol & WAKE_ARP) {
4019                 temp_wucsr |= WUCSR_WAKE_EN_;
4020
4021                 /* set WUF_CFG & WUF_MASK
4022                  * for packettype (offset 12,13) = ARP (0x0806)
4023                  */
4024                 crc = lan78xx_wakeframe_crc16(arp_type, 2);
4025                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4026                                         WUF_CFGX_EN_ |
4027                                         WUF_CFGX_TYPE_ALL_ |
4028                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
4029                                         (crc & WUF_CFGX_CRC16_MASK_));
4030
4031                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
4032                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4033                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4034                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4035                 mask_index++;
4036
4037                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4038                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4039                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4040         }
4041
4042         ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
4043
4044         /* when multiple WOL bits are set */
4045         if (hweight_long((unsigned long)wol) > 1) {
4046                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4047                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4048                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4049         }
4050         ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
4051
4052         /* clear WUPS */
4053         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4054         buf |= PMT_CTL_WUPS_MASK_;
4055         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4056
4057         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4058         buf |= MAC_RX_RXEN_;
4059         ret = lan78xx_write_reg(dev, MAC_RX, buf);
4060
4061         return 0;
4062 }
4063
4064 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
4065 {
4066         struct lan78xx_net *dev = usb_get_intfdata(intf);
4067         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
4068         u32 buf;
4069         int ret;
4070         int event;
4071
4072         event = message.event;
4073
4074         if (!dev->suspend_count++) {
4075                 spin_lock_irq(&dev->txq.lock);
4076                 /* don't autosuspend while transmitting */
4077                 if ((skb_queue_len(&dev->txq) ||
4078                      skb_queue_len(&dev->txq_pend)) &&
4079                         PMSG_IS_AUTO(message)) {
4080                         spin_unlock_irq(&dev->txq.lock);
4081                         ret = -EBUSY;
4082                         goto out;
4083                 } else {
4084                         set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4085                         spin_unlock_irq(&dev->txq.lock);
4086                 }
4087
4088                 /* stop TX & RX */
4089                 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4090                 buf &= ~MAC_TX_TXEN_;
4091                 ret = lan78xx_write_reg(dev, MAC_TX, buf);
4092                 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4093                 buf &= ~MAC_RX_RXEN_;
4094                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4095
4096                 /* empty out the rx and queues */
4097                 netif_device_detach(dev->net);
4098                 lan78xx_terminate_urbs(dev);
4099                 usb_kill_urb(dev->urb_intr);
4100
4101                 /* reattach */
4102                 netif_device_attach(dev->net);
4103         }
4104
4105         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4106                 del_timer(&dev->stat_monitor);
4107
4108                 if (PMSG_IS_AUTO(message)) {
4109                         /* auto suspend (selective suspend) */
4110                         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4111                         buf &= ~MAC_TX_TXEN_;
4112                         ret = lan78xx_write_reg(dev, MAC_TX, buf);
4113                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4114                         buf &= ~MAC_RX_RXEN_;
4115                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
4116
4117                         ret = lan78xx_write_reg(dev, WUCSR, 0);
4118                         ret = lan78xx_write_reg(dev, WUCSR2, 0);
4119                         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4120
4121                         /* set goodframe wakeup */
4122                         ret = lan78xx_read_reg(dev, WUCSR, &buf);
4123
4124                         buf |= WUCSR_RFE_WAKE_EN_;
4125                         buf |= WUCSR_STORE_WAKE_;
4126
4127                         ret = lan78xx_write_reg(dev, WUCSR, buf);
4128
4129                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4130
4131                         buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4132                         buf |= PMT_CTL_RES_CLR_WKP_STS_;
4133
4134                         buf |= PMT_CTL_PHY_WAKE_EN_;
4135                         buf |= PMT_CTL_WOL_EN_;
4136                         buf &= ~PMT_CTL_SUS_MODE_MASK_;
4137                         buf |= PMT_CTL_SUS_MODE_3_;
4138
4139                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4140
4141                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4142
4143                         buf |= PMT_CTL_WUPS_MASK_;
4144
4145                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4146
4147                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4148                         buf |= MAC_RX_RXEN_;
4149                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
4150                 } else {
4151                         lan78xx_set_suspend(dev, pdata->wol);
4152                 }
4153         }
4154
4155         ret = 0;
4156 out:
4157         return ret;
4158 }
4159
4160 static int lan78xx_resume(struct usb_interface *intf)
4161 {
4162         struct lan78xx_net *dev = usb_get_intfdata(intf);
4163         struct sk_buff *skb;
4164         struct urb *res;
4165         int ret;
4166         u32 buf;
4167
4168         if (!timer_pending(&dev->stat_monitor)) {
4169                 dev->delta = 1;
4170                 mod_timer(&dev->stat_monitor,
4171                           jiffies + STAT_UPDATE_TIMER);
4172         }
4173
4174         if (!--dev->suspend_count) {
4175                 /* resume interrupt URBs */
4176                 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
4177                                 usb_submit_urb(dev->urb_intr, GFP_NOIO);
4178
4179                 spin_lock_irq(&dev->txq.lock);
4180                 while ((res = usb_get_from_anchor(&dev->deferred))) {
4181                         skb = (struct sk_buff *)res->context;
4182                         ret = usb_submit_urb(res, GFP_ATOMIC);
4183                         if (ret < 0) {
4184                                 dev_kfree_skb_any(skb);
4185                                 usb_free_urb(res);
4186                                 usb_autopm_put_interface_async(dev->intf);
4187                         } else {
4188                                 netif_trans_update(dev->net);
4189                                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
4190                         }
4191                 }
4192
4193                 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4194                 spin_unlock_irq(&dev->txq.lock);
4195
4196                 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
4197                         if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
4198                                 netif_start_queue(dev->net);
4199                         tasklet_schedule(&dev->bh);
4200                 }
4201         }
4202
4203         ret = lan78xx_write_reg(dev, WUCSR2, 0);
4204         ret = lan78xx_write_reg(dev, WUCSR, 0);
4205         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4206
4207         ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4208                                              WUCSR2_ARP_RCD_ |
4209                                              WUCSR2_IPV6_TCPSYN_RCD_ |
4210                                              WUCSR2_IPV4_TCPSYN_RCD_);
4211
4212         ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
4213                                             WUCSR_EEE_RX_WAKE_ |
4214                                             WUCSR_PFDA_FR_ |
4215                                             WUCSR_RFE_WAKE_FR_ |
4216                                             WUCSR_WUFR_ |
4217                                             WUCSR_MPR_ |
4218                                             WUCSR_BCST_FR_);
4219
4220         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4221         buf |= MAC_TX_TXEN_;
4222         ret = lan78xx_write_reg(dev, MAC_TX, buf);
4223
4224         return 0;
4225 }
4226
4227 static int lan78xx_reset_resume(struct usb_interface *intf)
4228 {
4229         struct lan78xx_net *dev = usb_get_intfdata(intf);
4230
4231         lan78xx_reset(dev);
4232
4233         phy_start(dev->net->phydev);
4234
4235         return lan78xx_resume(intf);
4236 }
4237
4238 static const struct usb_device_id products[] = {
4239         {
4240         /* LAN7800 USB Gigabit Ethernet Device */
4241         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4242         },
4243         {
4244         /* LAN7850 USB Gigabit Ethernet Device */
4245         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4246         },
4247         {
4248         /* LAN7801 USB Gigabit Ethernet Device */
4249         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4250         },
4251         {},
4252 };
4253 MODULE_DEVICE_TABLE(usb, products);
4254
4255 static struct usb_driver lan78xx_driver = {
4256         .name                   = DRIVER_NAME,
4257         .id_table               = products,
4258         .probe                  = lan78xx_probe,
4259         .disconnect             = lan78xx_disconnect,
4260         .suspend                = lan78xx_suspend,
4261         .resume                 = lan78xx_resume,
4262         .reset_resume           = lan78xx_reset_resume,
4263         .supports_autosuspend   = 1,
4264         .disable_hub_initiated_lpm = 1,
4265 };
4266
4267 module_usb_driver(lan78xx_driver);
4268
4269 MODULE_AUTHOR(DRIVER_AUTHOR);
4270 MODULE_DESCRIPTION(DRIVER_DESC);
4271 MODULE_LICENSE("GPL");