net: lan78xx: Ack pending PHY ints when resetting
[platform/kernel/linux-rpi.git] / drivers / net / usb / lan78xx.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Microchip Technology
4  */
5 #include <linux/module.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
9 #include <linux/usb.h>
10 #include <linux/crc32.h>
11 #include <linux/signal.h>
12 #include <linux/slab.h>
13 #include <linux/if_vlan.h>
14 #include <linux/uaccess.h>
15 #include <linux/linkmode.h>
16 #include <linux/list.h>
17 #include <linux/ip.h>
18 #include <linux/ipv6.h>
19 #include <linux/mdio.h>
20 #include <linux/phy.h>
21 #include <net/ip6_checksum.h>
22 #include <net/vxlan.h>
23 #include <linux/interrupt.h>
24 #include <linux/irqdomain.h>
25 #include <linux/irq.h>
26 #include <linux/irqchip/chained_irq.h>
27 #include <linux/microchipphy.h>
28 #include <linux/phy_fixed.h>
29 #include <linux/of_mdio.h>
30 #include <linux/of_net.h>
31 #include "lan78xx.h"
32
33 #define DRIVER_AUTHOR   "WOOJUNG HUH <woojung.huh@microchip.com>"
34 #define DRIVER_DESC     "LAN78XX USB 3.0 Gigabit Ethernet Devices"
35 #define DRIVER_NAME     "lan78xx"
36
37 #define TX_TIMEOUT_JIFFIES              (5 * HZ)
38 #define THROTTLE_JIFFIES                (HZ / 8)
39 #define UNLINK_TIMEOUT_MS               3
40
41 #define RX_MAX_QUEUE_MEMORY             (60 * 1518)
42
43 #define SS_USB_PKT_SIZE                 (1024)
44 #define HS_USB_PKT_SIZE                 (512)
45 #define FS_USB_PKT_SIZE                 (64)
46
47 #define MAX_RX_FIFO_SIZE                (12 * 1024)
48 #define MAX_TX_FIFO_SIZE                (12 * 1024)
49
50 #define FLOW_THRESHOLD(n)               ((((n) + 511) / 512) & 0x7F)
51 #define FLOW_CTRL_THRESHOLD(on, off)    ((FLOW_THRESHOLD(on)  << 0) | \
52                                          (FLOW_THRESHOLD(off) << 8))
53
54 /* Flow control turned on when Rx FIFO level rises above this level (bytes) */
55 #define FLOW_ON_SS                      9216
56 #define FLOW_ON_HS                      8704
57
58 /* Flow control turned off when Rx FIFO level falls below this level (bytes) */
59 #define FLOW_OFF_SS                     4096
60 #define FLOW_OFF_HS                     1024
61
62 #define DEFAULT_BURST_CAP_SIZE          (MAX_TX_FIFO_SIZE)
63 #define DEFAULT_BULK_IN_DELAY           (0x0800)
64 #define MAX_SINGLE_PACKET_SIZE          (9000)
65 #define DEFAULT_TX_CSUM_ENABLE          (true)
66 #define DEFAULT_RX_CSUM_ENABLE          (true)
67 #define DEFAULT_TSO_CSUM_ENABLE         (true)
68 #define DEFAULT_VLAN_FILTER_ENABLE      (true)
69 #define DEFAULT_VLAN_RX_OFFLOAD         (true)
70 #define TX_ALIGNMENT                    (4)
71 #define RXW_PADDING                     2
72
73 #define LAN78XX_USB_VENDOR_ID           (0x0424)
74 #define LAN7800_USB_PRODUCT_ID          (0x7800)
75 #define LAN7850_USB_PRODUCT_ID          (0x7850)
76 #define LAN7801_USB_PRODUCT_ID          (0x7801)
77 #define LAN78XX_EEPROM_MAGIC            (0x78A5)
78 #define LAN78XX_OTP_MAGIC               (0x78F3)
79 #define AT29M2AF_USB_VENDOR_ID          (0x07C9)
80 #define AT29M2AF_USB_PRODUCT_ID (0x0012)
81
82 #define MII_READ                        1
83 #define MII_WRITE                       0
84
85 #define EEPROM_INDICATOR                (0xA5)
86 #define EEPROM_MAC_OFFSET               (0x01)
87 #define MAX_EEPROM_SIZE                 512
88 #define OTP_INDICATOR_1                 (0xF3)
89 #define OTP_INDICATOR_2                 (0xF7)
90
91 #define WAKE_ALL                        (WAKE_PHY | WAKE_UCAST | \
92                                          WAKE_MCAST | WAKE_BCAST | \
93                                          WAKE_ARP | WAKE_MAGIC)
94
95 #define TX_URB_NUM                      10
96 #define TX_SS_URB_NUM                   TX_URB_NUM
97 #define TX_HS_URB_NUM                   TX_URB_NUM
98 #define TX_FS_URB_NUM                   TX_URB_NUM
99
100 /* A single URB buffer must be large enough to hold a complete jumbo packet
101  */
102 #define TX_SS_URB_SIZE                  (32 * 1024)
103 #define TX_HS_URB_SIZE                  (16 * 1024)
104 #define TX_FS_URB_SIZE                  (10 * 1024)
105
106 #define RX_SS_URB_NUM                   30
107 #define RX_HS_URB_NUM                   10
108 #define RX_FS_URB_NUM                   10
109 #define RX_SS_URB_SIZE                  TX_SS_URB_SIZE
110 #define RX_HS_URB_SIZE                  TX_HS_URB_SIZE
111 #define RX_FS_URB_SIZE                  TX_FS_URB_SIZE
112
113 #define SS_BURST_CAP_SIZE               RX_SS_URB_SIZE
114 #define SS_BULK_IN_DELAY                0x2000
115 #define HS_BURST_CAP_SIZE               RX_HS_URB_SIZE
116 #define HS_BULK_IN_DELAY                0x2000
117 #define FS_BURST_CAP_SIZE               RX_FS_URB_SIZE
118 #define FS_BULK_IN_DELAY                0x2000
119
120 #define TX_CMD_LEN                      8
121 #define TX_SKB_MIN_LEN                  (TX_CMD_LEN + ETH_HLEN)
122 #define LAN78XX_TSO_SIZE(dev)           ((dev)->tx_urb_size - TX_SKB_MIN_LEN)
123
124 #define RX_CMD_LEN                      10
125 #define RX_SKB_MIN_LEN                  (RX_CMD_LEN + ETH_HLEN)
126 #define RX_MAX_FRAME_LEN(mtu)           ((mtu) + ETH_HLEN + VLAN_HLEN)
127
128 /* USB related defines */
129 #define BULK_IN_PIPE                    1
130 #define BULK_OUT_PIPE                   2
131
132 /* default autosuspend delay (mSec)*/
133 #define DEFAULT_AUTOSUSPEND_DELAY       (10 * 1000)
134
135 /* statistic update interval (mSec) */
136 #define STAT_UPDATE_TIMER               (1 * 1000)
137
138 /* time to wait for MAC or FCT to stop (jiffies) */
139 #define HW_DISABLE_TIMEOUT              (HZ / 10)
140
141 /* time to wait between polling MAC or FCT state (ms) */
142 #define HW_DISABLE_DELAY_MS             1
143
144 /* defines interrupts from interrupt EP */
145 #define MAX_INT_EP                      (32)
146 #define INT_EP_INTEP                    (31)
147 #define INT_EP_OTP_WR_DONE              (28)
148 #define INT_EP_EEE_TX_LPI_START         (26)
149 #define INT_EP_EEE_TX_LPI_STOP          (25)
150 #define INT_EP_EEE_RX_LPI               (24)
151 #define INT_EP_MAC_RESET_TIMEOUT        (23)
152 #define INT_EP_RDFO                     (22)
153 #define INT_EP_TXE                      (21)
154 #define INT_EP_USB_STATUS               (20)
155 #define INT_EP_TX_DIS                   (19)
156 #define INT_EP_RX_DIS                   (18)
157 #define INT_EP_PHY                      (17)
158 #define INT_EP_DP                       (16)
159 #define INT_EP_MAC_ERR                  (15)
160 #define INT_EP_TDFU                     (14)
161 #define INT_EP_TDFO                     (13)
162 #define INT_EP_UTX                      (12)
163 #define INT_EP_GPIO_11                  (11)
164 #define INT_EP_GPIO_10                  (10)
165 #define INT_EP_GPIO_9                   (9)
166 #define INT_EP_GPIO_8                   (8)
167 #define INT_EP_GPIO_7                   (7)
168 #define INT_EP_GPIO_6                   (6)
169 #define INT_EP_GPIO_5                   (5)
170 #define INT_EP_GPIO_4                   (4)
171 #define INT_EP_GPIO_3                   (3)
172 #define INT_EP_GPIO_2                   (2)
173 #define INT_EP_GPIO_1                   (1)
174 #define INT_EP_GPIO_0                   (0)
175
176 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
177         "RX FCS Errors",
178         "RX Alignment Errors",
179         "Rx Fragment Errors",
180         "RX Jabber Errors",
181         "RX Undersize Frame Errors",
182         "RX Oversize Frame Errors",
183         "RX Dropped Frames",
184         "RX Unicast Byte Count",
185         "RX Broadcast Byte Count",
186         "RX Multicast Byte Count",
187         "RX Unicast Frames",
188         "RX Broadcast Frames",
189         "RX Multicast Frames",
190         "RX Pause Frames",
191         "RX 64 Byte Frames",
192         "RX 65 - 127 Byte Frames",
193         "RX 128 - 255 Byte Frames",
194         "RX 256 - 511 Bytes Frames",
195         "RX 512 - 1023 Byte Frames",
196         "RX 1024 - 1518 Byte Frames",
197         "RX Greater 1518 Byte Frames",
198         "EEE RX LPI Transitions",
199         "EEE RX LPI Time",
200         "TX FCS Errors",
201         "TX Excess Deferral Errors",
202         "TX Carrier Errors",
203         "TX Bad Byte Count",
204         "TX Single Collisions",
205         "TX Multiple Collisions",
206         "TX Excessive Collision",
207         "TX Late Collisions",
208         "TX Unicast Byte Count",
209         "TX Broadcast Byte Count",
210         "TX Multicast Byte Count",
211         "TX Unicast Frames",
212         "TX Broadcast Frames",
213         "TX Multicast Frames",
214         "TX Pause Frames",
215         "TX 64 Byte Frames",
216         "TX 65 - 127 Byte Frames",
217         "TX 128 - 255 Byte Frames",
218         "TX 256 - 511 Bytes Frames",
219         "TX 512 - 1023 Byte Frames",
220         "TX 1024 - 1518 Byte Frames",
221         "TX Greater 1518 Byte Frames",
222         "EEE TX LPI Transitions",
223         "EEE TX LPI Time",
224 };
225
226 struct lan78xx_statstage {
227         u32 rx_fcs_errors;
228         u32 rx_alignment_errors;
229         u32 rx_fragment_errors;
230         u32 rx_jabber_errors;
231         u32 rx_undersize_frame_errors;
232         u32 rx_oversize_frame_errors;
233         u32 rx_dropped_frames;
234         u32 rx_unicast_byte_count;
235         u32 rx_broadcast_byte_count;
236         u32 rx_multicast_byte_count;
237         u32 rx_unicast_frames;
238         u32 rx_broadcast_frames;
239         u32 rx_multicast_frames;
240         u32 rx_pause_frames;
241         u32 rx_64_byte_frames;
242         u32 rx_65_127_byte_frames;
243         u32 rx_128_255_byte_frames;
244         u32 rx_256_511_bytes_frames;
245         u32 rx_512_1023_byte_frames;
246         u32 rx_1024_1518_byte_frames;
247         u32 rx_greater_1518_byte_frames;
248         u32 eee_rx_lpi_transitions;
249         u32 eee_rx_lpi_time;
250         u32 tx_fcs_errors;
251         u32 tx_excess_deferral_errors;
252         u32 tx_carrier_errors;
253         u32 tx_bad_byte_count;
254         u32 tx_single_collisions;
255         u32 tx_multiple_collisions;
256         u32 tx_excessive_collision;
257         u32 tx_late_collisions;
258         u32 tx_unicast_byte_count;
259         u32 tx_broadcast_byte_count;
260         u32 tx_multicast_byte_count;
261         u32 tx_unicast_frames;
262         u32 tx_broadcast_frames;
263         u32 tx_multicast_frames;
264         u32 tx_pause_frames;
265         u32 tx_64_byte_frames;
266         u32 tx_65_127_byte_frames;
267         u32 tx_128_255_byte_frames;
268         u32 tx_256_511_bytes_frames;
269         u32 tx_512_1023_byte_frames;
270         u32 tx_1024_1518_byte_frames;
271         u32 tx_greater_1518_byte_frames;
272         u32 eee_tx_lpi_transitions;
273         u32 eee_tx_lpi_time;
274 };
275
276 struct lan78xx_statstage64 {
277         u64 rx_fcs_errors;
278         u64 rx_alignment_errors;
279         u64 rx_fragment_errors;
280         u64 rx_jabber_errors;
281         u64 rx_undersize_frame_errors;
282         u64 rx_oversize_frame_errors;
283         u64 rx_dropped_frames;
284         u64 rx_unicast_byte_count;
285         u64 rx_broadcast_byte_count;
286         u64 rx_multicast_byte_count;
287         u64 rx_unicast_frames;
288         u64 rx_broadcast_frames;
289         u64 rx_multicast_frames;
290         u64 rx_pause_frames;
291         u64 rx_64_byte_frames;
292         u64 rx_65_127_byte_frames;
293         u64 rx_128_255_byte_frames;
294         u64 rx_256_511_bytes_frames;
295         u64 rx_512_1023_byte_frames;
296         u64 rx_1024_1518_byte_frames;
297         u64 rx_greater_1518_byte_frames;
298         u64 eee_rx_lpi_transitions;
299         u64 eee_rx_lpi_time;
300         u64 tx_fcs_errors;
301         u64 tx_excess_deferral_errors;
302         u64 tx_carrier_errors;
303         u64 tx_bad_byte_count;
304         u64 tx_single_collisions;
305         u64 tx_multiple_collisions;
306         u64 tx_excessive_collision;
307         u64 tx_late_collisions;
308         u64 tx_unicast_byte_count;
309         u64 tx_broadcast_byte_count;
310         u64 tx_multicast_byte_count;
311         u64 tx_unicast_frames;
312         u64 tx_broadcast_frames;
313         u64 tx_multicast_frames;
314         u64 tx_pause_frames;
315         u64 tx_64_byte_frames;
316         u64 tx_65_127_byte_frames;
317         u64 tx_128_255_byte_frames;
318         u64 tx_256_511_bytes_frames;
319         u64 tx_512_1023_byte_frames;
320         u64 tx_1024_1518_byte_frames;
321         u64 tx_greater_1518_byte_frames;
322         u64 eee_tx_lpi_transitions;
323         u64 eee_tx_lpi_time;
324 };
325
326 static u32 lan78xx_regs[] = {
327         ID_REV,
328         INT_STS,
329         HW_CFG,
330         PMT_CTL,
331         E2P_CMD,
332         E2P_DATA,
333         USB_STATUS,
334         VLAN_TYPE,
335         MAC_CR,
336         MAC_RX,
337         MAC_TX,
338         FLOW,
339         ERR_STS,
340         MII_ACC,
341         MII_DATA,
342         EEE_TX_LPI_REQ_DLY,
343         EEE_TW_TX_SYS,
344         EEE_TX_LPI_REM_DLY,
345         WUCSR
346 };
347
348 #define PHY_REG_SIZE (32 * sizeof(u32))
349
350 struct lan78xx_net;
351
352 struct lan78xx_priv {
353         struct lan78xx_net *dev;
354         u32 rfe_ctl;
355         u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicast hash table */
356         u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
357         u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
358         struct mutex dataport_mutex; /* for dataport access */
359         spinlock_t rfe_ctl_lock; /* for rfe register access */
360         struct work_struct set_multicast;
361         struct work_struct set_vlan;
362         u32 wol;
363 };
364
365 enum skb_state {
366         illegal = 0,
367         tx_start,
368         tx_done,
369         rx_start,
370         rx_done,
371         rx_cleanup,
372         unlink_start
373 };
374
375 struct skb_data {               /* skb->cb is one of these */
376         struct urb *urb;
377         struct lan78xx_net *dev;
378         enum skb_state state;
379         size_t length;
380         int num_of_packet;
381 };
382
383 struct usb_context {
384         struct usb_ctrlrequest req;
385         struct lan78xx_net *dev;
386 };
387
388 #define EVENT_TX_HALT                   0
389 #define EVENT_RX_HALT                   1
390 #define EVENT_RX_MEMORY                 2
391 #define EVENT_STS_SPLIT                 3
392 #define EVENT_LINK_RESET                4
393 #define EVENT_RX_PAUSED                 5
394 #define EVENT_DEV_WAKING                6
395 #define EVENT_DEV_ASLEEP                7
396 #define EVENT_DEV_OPEN                  8
397 #define EVENT_STAT_UPDATE               9
398 #define EVENT_DEV_DISCONNECT            10
399
400 struct statstage {
401         struct mutex                    access_lock;    /* for stats access */
402         struct lan78xx_statstage        saved;
403         struct lan78xx_statstage        rollover_count;
404         struct lan78xx_statstage        rollover_max;
405         struct lan78xx_statstage64      curr_stat;
406 };
407
408 struct irq_domain_data {
409         struct irq_domain       *irqdomain;
410         unsigned int            phyirq;
411         struct irq_chip         *irqchip;
412         irq_flow_handler_t      irq_handler;
413         u32                     irqenable;
414         struct mutex            irq_lock;               /* for irq bus access */
415 };
416
417 struct lan78xx_net {
418         struct net_device       *net;
419         struct usb_device       *udev;
420         struct usb_interface    *intf;
421         void                    *driver_priv;
422
423         unsigned int            tx_pend_data_len;
424         size_t                  n_tx_urbs;
425         size_t                  n_rx_urbs;
426         size_t                  tx_urb_size;
427         size_t                  rx_urb_size;
428
429         struct sk_buff_head     rxq_free;
430         struct sk_buff_head     rxq;
431         struct sk_buff_head     rxq_done;
432         struct sk_buff_head     rxq_overflow;
433         struct sk_buff_head     txq_free;
434         struct sk_buff_head     txq;
435         struct sk_buff_head     txq_pend;
436
437         struct napi_struct      napi;
438
439         struct delayed_work     wq;
440
441         int                     msg_enable;
442
443         struct urb              *urb_intr;
444         struct usb_anchor       deferred;
445
446         struct mutex            dev_mutex; /* serialise open/stop wrt suspend/resume */
447         struct mutex            phy_mutex; /* for phy access */
448         unsigned int            pipe_in, pipe_out, pipe_intr;
449
450         unsigned int            bulk_in_delay;
451         unsigned int            burst_cap;
452
453         unsigned long           flags;
454
455         wait_queue_head_t       *wait;
456         unsigned char           suspend_count;
457
458         unsigned int            maxpacket;
459         struct timer_list       stat_monitor;
460
461         unsigned long           data[5];
462
463         int                     link_on;
464         u8                      mdix_ctrl;
465
466         u32                     chipid;
467         u32                     chiprev;
468         struct mii_bus          *mdiobus;
469         phy_interface_t         interface;
470
471         int                     fc_autoneg;
472         u8                      fc_request_control;
473
474         int                     delta;
475         struct statstage        stats;
476
477         struct irq_domain_data  domain_data;
478 };
479
480 /* define external phy id */
481 #define PHY_LAN8835                     (0x0007C130)
482 #define PHY_KSZ9031RNX                  (0x00221620)
483
484 /* use ethtool to change the level for any given device */
485 static int msg_level = -1;
486 module_param(msg_level, int, 0);
487 MODULE_PARM_DESC(msg_level, "Override default message level");
488
489 static struct sk_buff *lan78xx_get_buf(struct sk_buff_head *buf_pool)
490 {
491         if (skb_queue_empty(buf_pool))
492                 return NULL;
493
494         return skb_dequeue(buf_pool);
495 }
496
497 static void lan78xx_release_buf(struct sk_buff_head *buf_pool,
498                                 struct sk_buff *buf)
499 {
500         buf->data = buf->head;
501         skb_reset_tail_pointer(buf);
502
503         buf->len = 0;
504         buf->data_len = 0;
505
506         skb_queue_tail(buf_pool, buf);
507 }
508
509 static void lan78xx_free_buf_pool(struct sk_buff_head *buf_pool)
510 {
511         struct skb_data *entry;
512         struct sk_buff *buf;
513
514         while (!skb_queue_empty(buf_pool)) {
515                 buf = skb_dequeue(buf_pool);
516                 if (buf) {
517                         entry = (struct skb_data *)buf->cb;
518                         usb_free_urb(entry->urb);
519                         dev_kfree_skb_any(buf);
520                 }
521         }
522 }
523
524 static int lan78xx_alloc_buf_pool(struct sk_buff_head *buf_pool,
525                                   size_t n_urbs, size_t urb_size,
526                                   struct lan78xx_net *dev)
527 {
528         struct skb_data *entry;
529         struct sk_buff *buf;
530         struct urb *urb;
531         int i;
532
533         skb_queue_head_init(buf_pool);
534
535         for (i = 0; i < n_urbs; i++) {
536                 buf = alloc_skb(urb_size, GFP_ATOMIC);
537                 if (!buf)
538                         goto error;
539
540                 if (skb_linearize(buf) != 0) {
541                         dev_kfree_skb_any(buf);
542                         goto error;
543                 }
544
545                 urb = usb_alloc_urb(0, GFP_ATOMIC);
546                 if (!urb) {
547                         dev_kfree_skb_any(buf);
548                         goto error;
549                 }
550
551                 entry = (struct skb_data *)buf->cb;
552                 entry->urb = urb;
553                 entry->dev = dev;
554                 entry->length = 0;
555                 entry->num_of_packet = 0;
556
557                 skb_queue_tail(buf_pool, buf);
558         }
559
560         return 0;
561
562 error:
563         lan78xx_free_buf_pool(buf_pool);
564
565         return -ENOMEM;
566 }
567
568 static struct sk_buff *lan78xx_get_rx_buf(struct lan78xx_net *dev)
569 {
570         return lan78xx_get_buf(&dev->rxq_free);
571 }
572
573 static void lan78xx_release_rx_buf(struct lan78xx_net *dev,
574                                    struct sk_buff *rx_buf)
575 {
576         lan78xx_release_buf(&dev->rxq_free, rx_buf);
577 }
578
579 static void lan78xx_free_rx_resources(struct lan78xx_net *dev)
580 {
581         lan78xx_free_buf_pool(&dev->rxq_free);
582 }
583
584 static int lan78xx_alloc_rx_resources(struct lan78xx_net *dev)
585 {
586         return lan78xx_alloc_buf_pool(&dev->rxq_free,
587                                       dev->n_rx_urbs, dev->rx_urb_size, dev);
588 }
589
590 static struct sk_buff *lan78xx_get_tx_buf(struct lan78xx_net *dev)
591 {
592         return lan78xx_get_buf(&dev->txq_free);
593 }
594
595 static void lan78xx_release_tx_buf(struct lan78xx_net *dev,
596                                    struct sk_buff *tx_buf)
597 {
598         lan78xx_release_buf(&dev->txq_free, tx_buf);
599 }
600
601 static void lan78xx_free_tx_resources(struct lan78xx_net *dev)
602 {
603         lan78xx_free_buf_pool(&dev->txq_free);
604 }
605
606 static int lan78xx_alloc_tx_resources(struct lan78xx_net *dev)
607 {
608         return lan78xx_alloc_buf_pool(&dev->txq_free,
609                                       dev->n_tx_urbs, dev->tx_urb_size, dev);
610 }
611
612 /* TSO seems to be having some issue with Selective Acknowledge (SACK) that
613  * results in lost data never being retransmitted.
614  * Disable it by default now, but adds a module parameter to enable it for
615  * debug purposes (the full cause is not currently understood).
616  */
617 static bool enable_tso;
618 module_param(enable_tso, bool, 0644);
619 MODULE_PARM_DESC(enable_tso, "Enables TCP segmentation offload");
620
621 #define INT_URB_MICROFRAMES_PER_MS      8
622 static int int_urb_interval_ms = 8;
623 module_param(int_urb_interval_ms, int, 0);
624 MODULE_PARM_DESC(int_urb_interval_ms, "Override usb interrupt urb interval");
625
626 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
627 {
628         u32 *buf;
629         int ret;
630
631         if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
632                 return -ENODEV;
633
634         buf = kmalloc(sizeof(u32), GFP_KERNEL);
635         if (!buf)
636                 return -ENOMEM;
637
638         ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
639                               USB_VENDOR_REQUEST_READ_REGISTER,
640                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
641                               0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
642         if (likely(ret >= 0)) {
643                 le32_to_cpus(buf);
644                 *data = *buf;
645         } else if (net_ratelimit()) {
646                 netdev_warn(dev->net,
647                             "Failed to read register index 0x%08x. ret = %d",
648                             index, ret);
649         }
650
651         kfree(buf);
652
653         return ret;
654 }
655
656 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
657 {
658         u32 *buf;
659         int ret;
660
661         if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
662                 return -ENODEV;
663
664         buf = kmalloc(sizeof(u32), GFP_KERNEL);
665         if (!buf)
666                 return -ENOMEM;
667
668         *buf = data;
669         cpu_to_le32s(buf);
670
671         ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
672                               USB_VENDOR_REQUEST_WRITE_REGISTER,
673                               USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
674                               0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
675         if (unlikely(ret < 0) &&
676             net_ratelimit()) {
677                 netdev_warn(dev->net,
678                             "Failed to write register index 0x%08x. ret = %d",
679                             index, ret);
680         }
681
682         kfree(buf);
683
684         return ret;
685 }
686
687 static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
688                               u32 data)
689 {
690         int ret;
691         u32 buf;
692
693         ret = lan78xx_read_reg(dev, reg, &buf);
694         if (ret < 0)
695                 return ret;
696
697         buf &= ~mask;
698         buf |= (mask & data);
699
700         ret = lan78xx_write_reg(dev, reg, buf);
701         if (ret < 0)
702                 return ret;
703
704         return 0;
705 }
706
707 static int lan78xx_read_stats(struct lan78xx_net *dev,
708                               struct lan78xx_statstage *data)
709 {
710         int ret = 0;
711         int i;
712         struct lan78xx_statstage *stats;
713         u32 *src;
714         u32 *dst;
715
716         stats = kmalloc(sizeof(*stats), GFP_KERNEL);
717         if (!stats)
718                 return -ENOMEM;
719
720         ret = usb_control_msg(dev->udev,
721                               usb_rcvctrlpipe(dev->udev, 0),
722                               USB_VENDOR_REQUEST_GET_STATS,
723                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
724                               0,
725                               0,
726                               (void *)stats,
727                               sizeof(*stats),
728                               USB_CTRL_SET_TIMEOUT);
729         if (likely(ret >= 0)) {
730                 src = (u32 *)stats;
731                 dst = (u32 *)data;
732                 for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) {
733                         le32_to_cpus(&src[i]);
734                         dst[i] = src[i];
735                 }
736         } else {
737                 netdev_warn(dev->net,
738                             "Failed to read stat ret = %d", ret);
739         }
740
741         kfree(stats);
742
743         return ret;
744 }
745
746 #define check_counter_rollover(struct1, dev_stats, member)              \
747         do {                                                            \
748                 if ((struct1)->member < (dev_stats).saved.member)       \
749                         (dev_stats).rollover_count.member++;            \
750         } while (0)
751
752 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
753                                         struct lan78xx_statstage *stats)
754 {
755         check_counter_rollover(stats, dev->stats, rx_fcs_errors);
756         check_counter_rollover(stats, dev->stats, rx_alignment_errors);
757         check_counter_rollover(stats, dev->stats, rx_fragment_errors);
758         check_counter_rollover(stats, dev->stats, rx_jabber_errors);
759         check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
760         check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
761         check_counter_rollover(stats, dev->stats, rx_dropped_frames);
762         check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
763         check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
764         check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
765         check_counter_rollover(stats, dev->stats, rx_unicast_frames);
766         check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
767         check_counter_rollover(stats, dev->stats, rx_multicast_frames);
768         check_counter_rollover(stats, dev->stats, rx_pause_frames);
769         check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
770         check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
771         check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
772         check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
773         check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
774         check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
775         check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
776         check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
777         check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
778         check_counter_rollover(stats, dev->stats, tx_fcs_errors);
779         check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
780         check_counter_rollover(stats, dev->stats, tx_carrier_errors);
781         check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
782         check_counter_rollover(stats, dev->stats, tx_single_collisions);
783         check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
784         check_counter_rollover(stats, dev->stats, tx_excessive_collision);
785         check_counter_rollover(stats, dev->stats, tx_late_collisions);
786         check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
787         check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
788         check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
789         check_counter_rollover(stats, dev->stats, tx_unicast_frames);
790         check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
791         check_counter_rollover(stats, dev->stats, tx_multicast_frames);
792         check_counter_rollover(stats, dev->stats, tx_pause_frames);
793         check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
794         check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
795         check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
796         check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
797         check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
798         check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
799         check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
800         check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
801         check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
802
803         memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
804 }
805
806 static void lan78xx_update_stats(struct lan78xx_net *dev)
807 {
808         u32 *p, *count, *max;
809         u64 *data;
810         int i;
811         struct lan78xx_statstage lan78xx_stats;
812
813         if (usb_autopm_get_interface(dev->intf) < 0)
814                 return;
815
816         p = (u32 *)&lan78xx_stats;
817         count = (u32 *)&dev->stats.rollover_count;
818         max = (u32 *)&dev->stats.rollover_max;
819         data = (u64 *)&dev->stats.curr_stat;
820
821         mutex_lock(&dev->stats.access_lock);
822
823         if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
824                 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
825
826         for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
827                 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
828
829         mutex_unlock(&dev->stats.access_lock);
830
831         usb_autopm_put_interface(dev->intf);
832 }
833
834 /* Loop until the read is completed with timeout called with phy_mutex held */
835 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
836 {
837         unsigned long start_time = jiffies;
838         u32 val;
839         int ret;
840
841         do {
842                 ret = lan78xx_read_reg(dev, MII_ACC, &val);
843                 if (unlikely(ret < 0))
844                         return -EIO;
845
846                 if (!(val & MII_ACC_MII_BUSY_))
847                         return 0;
848         } while (!time_after(jiffies, start_time + HZ));
849
850         return -EIO;
851 }
852
853 static inline u32 mii_access(int id, int index, int read)
854 {
855         u32 ret;
856
857         ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
858         ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
859         if (read)
860                 ret |= MII_ACC_MII_READ_;
861         else
862                 ret |= MII_ACC_MII_WRITE_;
863         ret |= MII_ACC_MII_BUSY_;
864
865         return ret;
866 }
867
868 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
869 {
870         unsigned long start_time = jiffies;
871         u32 val;
872         int ret;
873
874         do {
875                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
876                 if (unlikely(ret < 0))
877                         return -EIO;
878
879                 if (!(val & E2P_CMD_EPC_BUSY_) ||
880                     (val & E2P_CMD_EPC_TIMEOUT_))
881                         break;
882                 usleep_range(40, 100);
883         } while (!time_after(jiffies, start_time + HZ));
884
885         if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
886                 netdev_warn(dev->net, "EEPROM read operation timeout");
887                 return -EIO;
888         }
889
890         return 0;
891 }
892
893 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
894 {
895         unsigned long start_time = jiffies;
896         u32 val;
897         int ret;
898
899         do {
900                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
901                 if (unlikely(ret < 0))
902                         return -EIO;
903
904                 if (!(val & E2P_CMD_EPC_BUSY_))
905                         return 0;
906
907                 usleep_range(40, 100);
908         } while (!time_after(jiffies, start_time + HZ));
909
910         netdev_warn(dev->net, "EEPROM is busy");
911         return -EIO;
912 }
913
914 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
915                                    u32 length, u8 *data)
916 {
917         u32 val;
918         u32 saved;
919         int i, ret;
920         int retval;
921
922         /* depends on chip, some EEPROM pins are muxed with LED function.
923          * disable & restore LED function to access EEPROM.
924          */
925         ret = lan78xx_read_reg(dev, HW_CFG, &val);
926         saved = val;
927         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
928                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
929                 ret = lan78xx_write_reg(dev, HW_CFG, val);
930         }
931
932         retval = lan78xx_eeprom_confirm_not_busy(dev);
933         if (retval)
934                 return retval;
935
936         for (i = 0; i < length; i++) {
937                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
938                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
939                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
940                 if (unlikely(ret < 0)) {
941                         retval = -EIO;
942                         goto exit;
943                 }
944
945                 retval = lan78xx_wait_eeprom(dev);
946                 if (retval < 0)
947                         goto exit;
948
949                 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
950                 if (unlikely(ret < 0)) {
951                         retval = -EIO;
952                         goto exit;
953                 }
954
955                 data[i] = val & 0xFF;
956                 offset++;
957         }
958
959         retval = 0;
960 exit:
961         if (dev->chipid == ID_REV_CHIP_ID_7800_)
962                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
963
964         return retval;
965 }
966
967 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
968                                u32 length, u8 *data)
969 {
970         u8 sig;
971         int ret;
972
973         ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
974         if ((ret == 0) && (sig == EEPROM_INDICATOR))
975                 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
976         else
977                 ret = -EINVAL;
978
979         return ret;
980 }
981
982 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
983                                     u32 length, u8 *data)
984 {
985         u32 val;
986         u32 saved;
987         int i, ret;
988         int retval;
989
990         /* depends on chip, some EEPROM pins are muxed with LED function.
991          * disable & restore LED function to access EEPROM.
992          */
993         ret = lan78xx_read_reg(dev, HW_CFG, &val);
994         saved = val;
995         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
996                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
997                 ret = lan78xx_write_reg(dev, HW_CFG, val);
998         }
999
1000         retval = lan78xx_eeprom_confirm_not_busy(dev);
1001         if (retval)
1002                 goto exit;
1003
1004         /* Issue write/erase enable command */
1005         val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
1006         ret = lan78xx_write_reg(dev, E2P_CMD, val);
1007         if (unlikely(ret < 0)) {
1008                 retval = -EIO;
1009                 goto exit;
1010         }
1011
1012         retval = lan78xx_wait_eeprom(dev);
1013         if (retval < 0)
1014                 goto exit;
1015
1016         for (i = 0; i < length; i++) {
1017                 /* Fill data register */
1018                 val = data[i];
1019                 ret = lan78xx_write_reg(dev, E2P_DATA, val);
1020                 if (ret < 0) {
1021                         retval = -EIO;
1022                         goto exit;
1023                 }
1024
1025                 /* Send "write" command */
1026                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
1027                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1028                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
1029                 if (ret < 0) {
1030                         retval = -EIO;
1031                         goto exit;
1032                 }
1033
1034                 retval = lan78xx_wait_eeprom(dev);
1035                 if (retval < 0)
1036                         goto exit;
1037
1038                 offset++;
1039         }
1040
1041         retval = 0;
1042 exit:
1043         if (dev->chipid == ID_REV_CHIP_ID_7800_)
1044                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
1045
1046         return retval;
1047 }
1048
1049 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
1050                                 u32 length, u8 *data)
1051 {
1052         int i;
1053         u32 buf;
1054         unsigned long timeout;
1055
1056         lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1057
1058         if (buf & OTP_PWR_DN_PWRDN_N_) {
1059                 /* clear it and wait to be cleared */
1060                 lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1061
1062                 timeout = jiffies + HZ;
1063                 do {
1064                         usleep_range(1, 10);
1065                         lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1066                         if (time_after(jiffies, timeout)) {
1067                                 netdev_warn(dev->net,
1068                                             "timeout on OTP_PWR_DN");
1069                                 return -EIO;
1070                         }
1071                 } while (buf & OTP_PWR_DN_PWRDN_N_);
1072         }
1073
1074         for (i = 0; i < length; i++) {
1075                 lan78xx_write_reg(dev, OTP_ADDR1,
1076                                   ((offset + i) >> 8) & OTP_ADDR1_15_11);
1077                 lan78xx_write_reg(dev, OTP_ADDR2,
1078                                   ((offset + i) & OTP_ADDR2_10_3));
1079
1080                 lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
1081                 lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1082
1083                 timeout = jiffies + HZ;
1084                 do {
1085                         udelay(1);
1086                         lan78xx_read_reg(dev, OTP_STATUS, &buf);
1087                         if (time_after(jiffies, timeout)) {
1088                                 netdev_warn(dev->net,
1089                                             "timeout on OTP_STATUS");
1090                                 return -EIO;
1091                         }
1092                 } while (buf & OTP_STATUS_BUSY_);
1093
1094                 lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
1095
1096                 data[i] = (u8)(buf & 0xFF);
1097         }
1098
1099         return 0;
1100 }
1101
1102 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
1103                                  u32 length, u8 *data)
1104 {
1105         int i;
1106         u32 buf;
1107         unsigned long timeout;
1108
1109         lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1110
1111         if (buf & OTP_PWR_DN_PWRDN_N_) {
1112                 /* clear it and wait to be cleared */
1113                 lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1114
1115                 timeout = jiffies + HZ;
1116                 do {
1117                         udelay(1);
1118                         lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1119                         if (time_after(jiffies, timeout)) {
1120                                 netdev_warn(dev->net,
1121                                             "timeout on OTP_PWR_DN completion");
1122                                 return -EIO;
1123                         }
1124                 } while (buf & OTP_PWR_DN_PWRDN_N_);
1125         }
1126
1127         /* set to BYTE program mode */
1128         lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
1129
1130         for (i = 0; i < length; i++) {
1131                 lan78xx_write_reg(dev, OTP_ADDR1,
1132                                   ((offset + i) >> 8) & OTP_ADDR1_15_11);
1133                 lan78xx_write_reg(dev, OTP_ADDR2,
1134                                   ((offset + i) & OTP_ADDR2_10_3));
1135                 lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
1136                 lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
1137                 lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1138
1139                 timeout = jiffies + HZ;
1140                 do {
1141                         udelay(1);
1142                         lan78xx_read_reg(dev, OTP_STATUS, &buf);
1143                         if (time_after(jiffies, timeout)) {
1144                                 netdev_warn(dev->net,
1145                                             "Timeout on OTP_STATUS completion");
1146                                 return -EIO;
1147                         }
1148                 } while (buf & OTP_STATUS_BUSY_);
1149         }
1150
1151         return 0;
1152 }
1153
1154 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
1155                             u32 length, u8 *data)
1156 {
1157         u8 sig;
1158         int ret;
1159
1160         ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
1161
1162         if (ret == 0) {
1163                 if (sig == OTP_INDICATOR_2)
1164                         offset += 0x100;
1165                 else if (sig != OTP_INDICATOR_1)
1166                         ret = -EINVAL;
1167                 if (!ret)
1168                         ret = lan78xx_read_raw_otp(dev, offset, length, data);
1169         }
1170
1171         return ret;
1172 }
1173
1174 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
1175 {
1176         int i, ret;
1177
1178         for (i = 0; i < 100; i++) {
1179                 u32 dp_sel;
1180
1181                 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1182                 if (unlikely(ret < 0))
1183                         return -EIO;
1184
1185                 if (dp_sel & DP_SEL_DPRDY_)
1186                         return 0;
1187
1188                 usleep_range(40, 100);
1189         }
1190
1191         netdev_warn(dev->net, "%s timed out", __func__);
1192
1193         return -EIO;
1194 }
1195
1196 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
1197                                   u32 addr, u32 length, u32 *buf)
1198 {
1199         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1200         u32 dp_sel;
1201         int i, ret;
1202
1203         if (usb_autopm_get_interface(dev->intf) < 0)
1204                 return 0;
1205
1206         mutex_lock(&pdata->dataport_mutex);
1207
1208         ret = lan78xx_dataport_wait_not_busy(dev);
1209         if (ret < 0)
1210                 goto done;
1211
1212         ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1213
1214         dp_sel &= ~DP_SEL_RSEL_MASK_;
1215         dp_sel |= ram_select;
1216         ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
1217
1218         for (i = 0; i < length; i++) {
1219                 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1220
1221                 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1222
1223                 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1224
1225                 ret = lan78xx_dataport_wait_not_busy(dev);
1226                 if (ret < 0)
1227                         goto done;
1228         }
1229
1230 done:
1231         mutex_unlock(&pdata->dataport_mutex);
1232         usb_autopm_put_interface(dev->intf);
1233
1234         return ret;
1235 }
1236
1237 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1238                                     int index, u8 addr[ETH_ALEN])
1239 {
1240         u32 temp;
1241
1242         if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1243                 temp = addr[3];
1244                 temp = addr[2] | (temp << 8);
1245                 temp = addr[1] | (temp << 8);
1246                 temp = addr[0] | (temp << 8);
1247                 pdata->pfilter_table[index][1] = temp;
1248                 temp = addr[5];
1249                 temp = addr[4] | (temp << 8);
1250                 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1251                 pdata->pfilter_table[index][0] = temp;
1252         }
1253 }
1254
1255 /* returns hash bit number for given MAC address */
1256 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1257 {
1258         return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1259 }
1260
1261 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1262 {
1263         struct lan78xx_priv *pdata =
1264                         container_of(param, struct lan78xx_priv, set_multicast);
1265         struct lan78xx_net *dev = pdata->dev;
1266         int i;
1267
1268         netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1269                   pdata->rfe_ctl);
1270
1271         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1272                                DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1273
1274         for (i = 1; i < NUM_OF_MAF; i++) {
1275                 lan78xx_write_reg(dev, MAF_HI(i), 0);
1276                 lan78xx_write_reg(dev, MAF_LO(i),
1277                                   pdata->pfilter_table[i][1]);
1278                 lan78xx_write_reg(dev, MAF_HI(i),
1279                                   pdata->pfilter_table[i][0]);
1280         }
1281
1282         lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1283 }
1284
1285 static void lan78xx_set_multicast(struct net_device *netdev)
1286 {
1287         struct lan78xx_net *dev = netdev_priv(netdev);
1288         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1289         unsigned long flags;
1290         int i;
1291
1292         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1293
1294         pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1295                             RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1296
1297         for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1298                 pdata->mchash_table[i] = 0;
1299
1300         /* pfilter_table[0] has own HW address */
1301         for (i = 1; i < NUM_OF_MAF; i++) {
1302                 pdata->pfilter_table[i][0] = 0;
1303                 pdata->pfilter_table[i][1] = 0;
1304         }
1305
1306         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1307
1308         if (dev->net->flags & IFF_PROMISC) {
1309                 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1310                 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1311         } else {
1312                 if (dev->net->flags & IFF_ALLMULTI) {
1313                         netif_dbg(dev, drv, dev->net,
1314                                   "receive all multicast enabled");
1315                         pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1316                 }
1317         }
1318
1319         if (netdev_mc_count(dev->net)) {
1320                 struct netdev_hw_addr *ha;
1321                 int i;
1322
1323                 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1324
1325                 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1326
1327                 i = 1;
1328                 netdev_for_each_mc_addr(ha, netdev) {
1329                         /* set first 32 into Perfect Filter */
1330                         if (i < 33) {
1331                                 lan78xx_set_addr_filter(pdata, i, ha->addr);
1332                         } else {
1333                                 u32 bitnum = lan78xx_hash(ha->addr);
1334
1335                                 pdata->mchash_table[bitnum / 32] |=
1336                                                         (1 << (bitnum % 32));
1337                                 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1338                         }
1339                         i++;
1340                 }
1341         }
1342
1343         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1344
1345         /* defer register writes to a sleepable context */
1346         schedule_work(&pdata->set_multicast);
1347 }
1348
1349 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1350                                       u16 lcladv, u16 rmtadv)
1351 {
1352         u32 flow = 0, fct_flow = 0;
1353         u8 cap;
1354
1355         if (dev->fc_autoneg)
1356                 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1357         else
1358                 cap = dev->fc_request_control;
1359
1360         if (cap & FLOW_CTRL_TX)
1361                 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1362
1363         if (cap & FLOW_CTRL_RX)
1364                 flow |= FLOW_CR_RX_FCEN_;
1365
1366         if (dev->udev->speed == USB_SPEED_SUPER)
1367                 fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
1368         else if (dev->udev->speed == USB_SPEED_HIGH)
1369                 fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
1370
1371         netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1372                   (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1373                   (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1374
1375         lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1376
1377         /* threshold value should be set before enabling flow */
1378         lan78xx_write_reg(dev, FLOW, flow);
1379
1380         return 0;
1381 }
1382
1383 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev);
1384
1385 static int lan78xx_mac_reset(struct lan78xx_net *dev)
1386 {
1387         unsigned long start_time = jiffies;
1388         u32 val;
1389         int ret;
1390
1391         mutex_lock(&dev->phy_mutex);
1392
1393         /* Resetting the device while there is activity on the MDIO
1394          * bus can result in the MAC interface locking up and not
1395          * completing register access transactions.
1396          */
1397         ret = lan78xx_phy_wait_not_busy(dev);
1398         if (ret < 0)
1399                 goto done;
1400
1401         ret = lan78xx_read_reg(dev, MAC_CR, &val);
1402         if (ret < 0)
1403                 goto done;
1404
1405         val |= MAC_CR_RST_;
1406         ret = lan78xx_write_reg(dev, MAC_CR, val);
1407         if (ret < 0)
1408                 goto done;
1409
1410         /* Wait for the reset to complete before allowing any further
1411          * MAC register accesses otherwise the MAC may lock up.
1412          */
1413         do {
1414                 ret = lan78xx_read_reg(dev, MAC_CR, &val);
1415                 if (ret < 0)
1416                         goto done;
1417
1418                 if (!(val & MAC_CR_RST_)) {
1419                         ret = 0;
1420                         goto done;
1421                 }
1422         } while (!time_after(jiffies, start_time + HZ));
1423
1424         ret = -ETIMEDOUT;
1425 done:
1426         mutex_unlock(&dev->phy_mutex);
1427
1428         return ret;
1429 }
1430
1431 static int lan78xx_link_reset(struct lan78xx_net *dev)
1432 {
1433         struct phy_device *phydev = dev->net->phydev;
1434         struct ethtool_link_ksettings ecmd;
1435         int ladv, radv, ret, link;
1436         u32 buf;
1437
1438         /* clear LAN78xx interrupt status */
1439         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1440         if (unlikely(ret < 0))
1441                 return ret;
1442
1443         /* Acknowledge any pending PHY interrupt, lest it be the last */
1444         phy_read(phydev, LAN88XX_INT_STS);
1445
1446         mutex_lock(&phydev->lock);
1447         phy_read_status(phydev);
1448         link = phydev->link;
1449         mutex_unlock(&phydev->lock);
1450
1451         if (!link && dev->link_on) {
1452                 dev->link_on = false;
1453
1454                 /* reset MAC */
1455                 ret = lan78xx_mac_reset(dev);
1456                 if (ret < 0)
1457                         return ret;
1458
1459                 del_timer(&dev->stat_monitor);
1460         } else if (link && !dev->link_on) {
1461                 dev->link_on = true;
1462
1463                 phy_ethtool_ksettings_get(phydev, &ecmd);
1464
1465                 if (dev->udev->speed == USB_SPEED_SUPER) {
1466                         if (ecmd.base.speed == 1000) {
1467                                 /* disable U2 */
1468                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1469                                 if (ret < 0)
1470                                         return ret;
1471                                 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1472                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1473                                 if (ret < 0)
1474                                         return ret;
1475                                 /* enable U1 */
1476                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1477                                 if (ret < 0)
1478                                         return ret;
1479                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1480                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1481                                 if (ret < 0)
1482                                         return ret;
1483                         } else {
1484                                 /* enable U1 & U2 */
1485                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1486                                 if (ret < 0)
1487                                         return ret;
1488                                 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1489                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1490                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1491                                 if (ret < 0)
1492                                         return ret;
1493                         }
1494                 }
1495
1496                 ladv = phy_read(phydev, MII_ADVERTISE);
1497                 if (ladv < 0)
1498                         return ladv;
1499
1500                 radv = phy_read(phydev, MII_LPA);
1501                 if (radv < 0)
1502                         return radv;
1503
1504                 netif_dbg(dev, link, dev->net,
1505                           "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1506                           ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1507
1508                 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1509                                                  radv);
1510                 if (ret < 0)
1511                         return ret;
1512
1513                 if (!timer_pending(&dev->stat_monitor)) {
1514                         dev->delta = 1;
1515                         mod_timer(&dev->stat_monitor,
1516                                   jiffies + STAT_UPDATE_TIMER);
1517                 }
1518
1519                 lan78xx_rx_urb_submit_all(dev);
1520
1521                 napi_schedule(&dev->napi);
1522         }
1523
1524         return 0;
1525 }
1526
1527 /* some work can't be done in tasklets, so we use keventd
1528  *
1529  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1530  * but tasklet_schedule() doesn't.      hope the failure is rare.
1531  */
1532 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1533 {
1534         set_bit(work, &dev->flags);
1535         if (!schedule_delayed_work(&dev->wq, 0))
1536                 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1537 }
1538
1539 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1540 {
1541         u32 intdata;
1542
1543         if (urb->actual_length != 4) {
1544                 netdev_warn(dev->net,
1545                             "unexpected urb length %d", urb->actual_length);
1546                 return;
1547         }
1548
1549         intdata = get_unaligned_le32(urb->transfer_buffer);
1550
1551         if (intdata & INT_ENP_PHY_INT) {
1552                 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1553                 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1554
1555                 if (dev->domain_data.phyirq > 0)
1556                         generic_handle_irq_safe(dev->domain_data.phyirq);
1557         } else {
1558                 netdev_warn(dev->net,
1559                             "unexpected interrupt: 0x%08x\n", intdata);
1560         }
1561 }
1562
1563 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1564 {
1565         return MAX_EEPROM_SIZE;
1566 }
1567
1568 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1569                                       struct ethtool_eeprom *ee, u8 *data)
1570 {
1571         struct lan78xx_net *dev = netdev_priv(netdev);
1572         int ret;
1573
1574         ret = usb_autopm_get_interface(dev->intf);
1575         if (ret)
1576                 return ret;
1577
1578         ee->magic = LAN78XX_EEPROM_MAGIC;
1579
1580         ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1581
1582         usb_autopm_put_interface(dev->intf);
1583
1584         return ret;
1585 }
1586
1587 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1588                                       struct ethtool_eeprom *ee, u8 *data)
1589 {
1590         struct lan78xx_net *dev = netdev_priv(netdev);
1591         int ret;
1592
1593         ret = usb_autopm_get_interface(dev->intf);
1594         if (ret)
1595                 return ret;
1596
1597         /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1598          * to load data from EEPROM
1599          */
1600         if (ee->magic == LAN78XX_EEPROM_MAGIC)
1601                 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1602         else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1603                  (ee->offset == 0) &&
1604                  (ee->len == 512) &&
1605                  (data[0] == OTP_INDICATOR_1))
1606                 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1607
1608         usb_autopm_put_interface(dev->intf);
1609
1610         return ret;
1611 }
1612
1613 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1614                                 u8 *data)
1615 {
1616         if (stringset == ETH_SS_STATS)
1617                 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1618 }
1619
1620 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1621 {
1622         if (sset == ETH_SS_STATS)
1623                 return ARRAY_SIZE(lan78xx_gstrings);
1624         else
1625                 return -EOPNOTSUPP;
1626 }
1627
1628 static void lan78xx_get_stats(struct net_device *netdev,
1629                               struct ethtool_stats *stats, u64 *data)
1630 {
1631         struct lan78xx_net *dev = netdev_priv(netdev);
1632
1633         lan78xx_update_stats(dev);
1634
1635         mutex_lock(&dev->stats.access_lock);
1636         memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1637         mutex_unlock(&dev->stats.access_lock);
1638 }
1639
1640 static void lan78xx_get_wol(struct net_device *netdev,
1641                             struct ethtool_wolinfo *wol)
1642 {
1643         struct lan78xx_net *dev = netdev_priv(netdev);
1644         int ret;
1645         u32 buf;
1646         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1647
1648         if (usb_autopm_get_interface(dev->intf) < 0)
1649                 return;
1650
1651         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1652         if (unlikely(ret < 0)) {
1653                 wol->supported = 0;
1654                 wol->wolopts = 0;
1655         } else {
1656                 if (buf & USB_CFG_RMT_WKP_) {
1657                         wol->supported = WAKE_ALL;
1658                         wol->wolopts = pdata->wol;
1659                 } else {
1660                         wol->supported = 0;
1661                         wol->wolopts = 0;
1662                 }
1663         }
1664
1665         usb_autopm_put_interface(dev->intf);
1666 }
1667
1668 static int lan78xx_set_wol(struct net_device *netdev,
1669                            struct ethtool_wolinfo *wol)
1670 {
1671         struct lan78xx_net *dev = netdev_priv(netdev);
1672         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1673         int ret;
1674
1675         ret = usb_autopm_get_interface(dev->intf);
1676         if (ret < 0)
1677                 return ret;
1678
1679         if (wol->wolopts & ~WAKE_ALL)
1680                 return -EINVAL;
1681
1682         pdata->wol = wol->wolopts;
1683
1684         device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1685
1686         phy_ethtool_set_wol(netdev->phydev, wol);
1687
1688         usb_autopm_put_interface(dev->intf);
1689
1690         return ret;
1691 }
1692
1693 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1694 {
1695         struct lan78xx_net *dev = netdev_priv(net);
1696         struct phy_device *phydev = net->phydev;
1697         int ret;
1698         u32 buf;
1699
1700         ret = usb_autopm_get_interface(dev->intf);
1701         if (ret < 0)
1702                 return ret;
1703
1704         ret = phy_ethtool_get_eee(phydev, edata);
1705         if (ret < 0)
1706                 goto exit;
1707
1708         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1709         if (buf & MAC_CR_EEE_EN_) {
1710                 edata->eee_enabled = true;
1711                 edata->eee_active = !!(edata->advertised &
1712                                        edata->lp_advertised);
1713                 edata->tx_lpi_enabled = true;
1714                 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1715                 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1716                 edata->tx_lpi_timer = buf;
1717         } else {
1718                 edata->eee_enabled = false;
1719                 edata->eee_active = false;
1720                 edata->tx_lpi_enabled = false;
1721                 edata->tx_lpi_timer = 0;
1722         }
1723
1724         ret = 0;
1725 exit:
1726         usb_autopm_put_interface(dev->intf);
1727
1728         return ret;
1729 }
1730
1731 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1732 {
1733         struct lan78xx_net *dev = netdev_priv(net);
1734         int ret;
1735         u32 buf;
1736
1737         ret = usb_autopm_get_interface(dev->intf);
1738         if (ret < 0)
1739                 return ret;
1740
1741         if (edata->eee_enabled) {
1742                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1743                 buf |= MAC_CR_EEE_EN_;
1744                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1745
1746                 phy_ethtool_set_eee(net->phydev, edata);
1747
1748                 buf = (u32)edata->tx_lpi_timer;
1749                 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1750         } else {
1751                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1752                 buf &= ~MAC_CR_EEE_EN_;
1753                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1754         }
1755
1756         usb_autopm_put_interface(dev->intf);
1757
1758         return 0;
1759 }
1760
1761 static u32 lan78xx_get_link(struct net_device *net)
1762 {
1763         u32 link;
1764
1765         mutex_lock(&net->phydev->lock);
1766         phy_read_status(net->phydev);
1767         link = net->phydev->link;
1768         mutex_unlock(&net->phydev->lock);
1769
1770         return link;
1771 }
1772
1773 static void lan78xx_get_drvinfo(struct net_device *net,
1774                                 struct ethtool_drvinfo *info)
1775 {
1776         struct lan78xx_net *dev = netdev_priv(net);
1777
1778         strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1779         usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1780 }
1781
1782 static u32 lan78xx_get_msglevel(struct net_device *net)
1783 {
1784         struct lan78xx_net *dev = netdev_priv(net);
1785
1786         return dev->msg_enable;
1787 }
1788
1789 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1790 {
1791         struct lan78xx_net *dev = netdev_priv(net);
1792
1793         dev->msg_enable = level;
1794 }
1795
1796 static int lan78xx_get_link_ksettings(struct net_device *net,
1797                                       struct ethtool_link_ksettings *cmd)
1798 {
1799         struct lan78xx_net *dev = netdev_priv(net);
1800         struct phy_device *phydev = net->phydev;
1801         int ret;
1802
1803         ret = usb_autopm_get_interface(dev->intf);
1804         if (ret < 0)
1805                 return ret;
1806
1807         phy_ethtool_ksettings_get(phydev, cmd);
1808
1809         usb_autopm_put_interface(dev->intf);
1810
1811         return ret;
1812 }
1813
1814 static int lan78xx_set_link_ksettings(struct net_device *net,
1815                                       const struct ethtool_link_ksettings *cmd)
1816 {
1817         struct lan78xx_net *dev = netdev_priv(net);
1818         struct phy_device *phydev = net->phydev;
1819         int ret = 0;
1820         int temp;
1821
1822         ret = usb_autopm_get_interface(dev->intf);
1823         if (ret < 0)
1824                 return ret;
1825
1826         /* change speed & duplex */
1827         ret = phy_ethtool_ksettings_set(phydev, cmd);
1828
1829         if (!cmd->base.autoneg) {
1830                 /* force link down */
1831                 temp = phy_read(phydev, MII_BMCR);
1832                 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1833                 mdelay(1);
1834                 phy_write(phydev, MII_BMCR, temp);
1835         }
1836
1837         usb_autopm_put_interface(dev->intf);
1838
1839         return ret;
1840 }
1841
1842 static void lan78xx_get_pause(struct net_device *net,
1843                               struct ethtool_pauseparam *pause)
1844 {
1845         struct lan78xx_net *dev = netdev_priv(net);
1846         struct phy_device *phydev = net->phydev;
1847         struct ethtool_link_ksettings ecmd;
1848
1849         phy_ethtool_ksettings_get(phydev, &ecmd);
1850
1851         pause->autoneg = dev->fc_autoneg;
1852
1853         if (dev->fc_request_control & FLOW_CTRL_TX)
1854                 pause->tx_pause = 1;
1855
1856         if (dev->fc_request_control & FLOW_CTRL_RX)
1857                 pause->rx_pause = 1;
1858 }
1859
1860 static int lan78xx_set_pause(struct net_device *net,
1861                              struct ethtool_pauseparam *pause)
1862 {
1863         struct lan78xx_net *dev = netdev_priv(net);
1864         struct phy_device *phydev = net->phydev;
1865         struct ethtool_link_ksettings ecmd;
1866         int ret;
1867
1868         phy_ethtool_ksettings_get(phydev, &ecmd);
1869
1870         if (pause->autoneg && !ecmd.base.autoneg) {
1871                 ret = -EINVAL;
1872                 goto exit;
1873         }
1874
1875         dev->fc_request_control = 0;
1876         if (pause->rx_pause)
1877                 dev->fc_request_control |= FLOW_CTRL_RX;
1878
1879         if (pause->tx_pause)
1880                 dev->fc_request_control |= FLOW_CTRL_TX;
1881
1882         if (ecmd.base.autoneg) {
1883                 __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
1884                 u32 mii_adv;
1885
1886                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1887                                    ecmd.link_modes.advertising);
1888                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1889                                    ecmd.link_modes.advertising);
1890                 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1891                 mii_adv_to_linkmode_adv_t(fc, mii_adv);
1892                 linkmode_or(ecmd.link_modes.advertising, fc,
1893                             ecmd.link_modes.advertising);
1894
1895                 phy_ethtool_ksettings_set(phydev, &ecmd);
1896         }
1897
1898         dev->fc_autoneg = pause->autoneg;
1899
1900         ret = 0;
1901 exit:
1902         return ret;
1903 }
1904
1905 static int lan78xx_get_regs_len(struct net_device *netdev)
1906 {
1907         if (!netdev->phydev)
1908                 return (sizeof(lan78xx_regs));
1909         else
1910                 return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1911 }
1912
1913 static void
1914 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1915                  void *buf)
1916 {
1917         u32 *data = buf;
1918         int i, j;
1919         struct lan78xx_net *dev = netdev_priv(netdev);
1920
1921         /* Read Device/MAC registers */
1922         for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
1923                 lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1924
1925         if (!netdev->phydev)
1926                 return;
1927
1928         /* Read PHY registers */
1929         for (j = 0; j < 32; i++, j++)
1930                 data[i] = phy_read(netdev->phydev, j);
1931 }
1932
1933 static const struct ethtool_ops lan78xx_ethtool_ops = {
1934         .get_link       = lan78xx_get_link,
1935         .nway_reset     = phy_ethtool_nway_reset,
1936         .get_drvinfo    = lan78xx_get_drvinfo,
1937         .get_msglevel   = lan78xx_get_msglevel,
1938         .set_msglevel   = lan78xx_set_msglevel,
1939         .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1940         .get_eeprom     = lan78xx_ethtool_get_eeprom,
1941         .set_eeprom     = lan78xx_ethtool_set_eeprom,
1942         .get_ethtool_stats = lan78xx_get_stats,
1943         .get_sset_count = lan78xx_get_sset_count,
1944         .get_strings    = lan78xx_get_strings,
1945         .get_wol        = lan78xx_get_wol,
1946         .set_wol        = lan78xx_set_wol,
1947         .get_ts_info    = ethtool_op_get_ts_info,
1948         .get_eee        = lan78xx_get_eee,
1949         .set_eee        = lan78xx_set_eee,
1950         .get_pauseparam = lan78xx_get_pause,
1951         .set_pauseparam = lan78xx_set_pause,
1952         .get_link_ksettings = lan78xx_get_link_ksettings,
1953         .set_link_ksettings = lan78xx_set_link_ksettings,
1954         .get_regs_len   = lan78xx_get_regs_len,
1955         .get_regs       = lan78xx_get_regs,
1956 };
1957
1958 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1959 {
1960         u32 addr_lo, addr_hi;
1961         u8 addr[6];
1962
1963         lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1964         lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1965
1966         addr[0] = addr_lo & 0xFF;
1967         addr[1] = (addr_lo >> 8) & 0xFF;
1968         addr[2] = (addr_lo >> 16) & 0xFF;
1969         addr[3] = (addr_lo >> 24) & 0xFF;
1970         addr[4] = addr_hi & 0xFF;
1971         addr[5] = (addr_hi >> 8) & 0xFF;
1972
1973         if (!is_valid_ether_addr(addr)) {
1974                 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1975                         /* valid address present in Device Tree */
1976                         netif_dbg(dev, ifup, dev->net,
1977                                   "MAC address read from Device Tree");
1978                 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1979                                                  ETH_ALEN, addr) == 0) ||
1980                             (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1981                                               ETH_ALEN, addr) == 0)) &&
1982                            is_valid_ether_addr(addr)) {
1983                         /* eeprom values are valid so use them */
1984                         netif_dbg(dev, ifup, dev->net,
1985                                   "MAC address read from EEPROM");
1986                 } else {
1987                         /* generate random MAC */
1988                         eth_random_addr(addr);
1989                         netif_dbg(dev, ifup, dev->net,
1990                                   "MAC address set to random addr");
1991                 }
1992
1993                 addr_lo = addr[0] | (addr[1] << 8) |
1994                           (addr[2] << 16) | (addr[3] << 24);
1995                 addr_hi = addr[4] | (addr[5] << 8);
1996
1997                 lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1998                 lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1999         }
2000
2001         lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2002         lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2003
2004         eth_hw_addr_set(dev->net, addr);
2005 }
2006
2007 /* MDIO read and write wrappers for phylib */
2008 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
2009 {
2010         struct lan78xx_net *dev = bus->priv;
2011         u32 val, addr;
2012         int ret;
2013
2014         ret = usb_autopm_get_interface(dev->intf);
2015         if (ret < 0)
2016                 return ret;
2017
2018         mutex_lock(&dev->phy_mutex);
2019
2020         /* confirm MII not busy */
2021         ret = lan78xx_phy_wait_not_busy(dev);
2022         if (ret < 0)
2023                 goto done;
2024
2025         /* set the address, index & direction (read from PHY) */
2026         addr = mii_access(phy_id, idx, MII_READ);
2027         ret = lan78xx_write_reg(dev, MII_ACC, addr);
2028
2029         ret = lan78xx_phy_wait_not_busy(dev);
2030         if (ret < 0)
2031                 goto done;
2032
2033         ret = lan78xx_read_reg(dev, MII_DATA, &val);
2034
2035         ret = (int)(val & 0xFFFF);
2036
2037 done:
2038         mutex_unlock(&dev->phy_mutex);
2039         usb_autopm_put_interface(dev->intf);
2040
2041         return ret;
2042 }
2043
2044 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
2045                                  u16 regval)
2046 {
2047         struct lan78xx_net *dev = bus->priv;
2048         u32 val, addr;
2049         int ret;
2050
2051         ret = usb_autopm_get_interface(dev->intf);
2052         if (ret < 0)
2053                 return ret;
2054
2055         mutex_lock(&dev->phy_mutex);
2056
2057         /* confirm MII not busy */
2058         ret = lan78xx_phy_wait_not_busy(dev);
2059         if (ret < 0)
2060                 goto done;
2061
2062         val = (u32)regval;
2063         ret = lan78xx_write_reg(dev, MII_DATA, val);
2064
2065         /* set the address, index & direction (write to PHY) */
2066         addr = mii_access(phy_id, idx, MII_WRITE);
2067         ret = lan78xx_write_reg(dev, MII_ACC, addr);
2068
2069         ret = lan78xx_phy_wait_not_busy(dev);
2070         if (ret < 0)
2071                 goto done;
2072
2073 done:
2074         mutex_unlock(&dev->phy_mutex);
2075         usb_autopm_put_interface(dev->intf);
2076         return 0;
2077 }
2078
2079 static int lan78xx_mdio_init(struct lan78xx_net *dev)
2080 {
2081         struct device_node *node;
2082         int ret;
2083
2084         dev->mdiobus = mdiobus_alloc();
2085         if (!dev->mdiobus) {
2086                 netdev_err(dev->net, "can't allocate MDIO bus\n");
2087                 return -ENOMEM;
2088         }
2089
2090         dev->mdiobus->priv = (void *)dev;
2091         dev->mdiobus->read = lan78xx_mdiobus_read;
2092         dev->mdiobus->write = lan78xx_mdiobus_write;
2093         dev->mdiobus->name = "lan78xx-mdiobus";
2094         dev->mdiobus->parent = &dev->udev->dev;
2095
2096         snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
2097                  dev->udev->bus->busnum, dev->udev->devnum);
2098
2099         switch (dev->chipid) {
2100         case ID_REV_CHIP_ID_7800_:
2101         case ID_REV_CHIP_ID_7850_:
2102                 /* set to internal PHY id */
2103                 dev->mdiobus->phy_mask = ~(1 << 1);
2104                 break;
2105         case ID_REV_CHIP_ID_7801_:
2106                 /* scan thru PHYAD[2..0] */
2107                 dev->mdiobus->phy_mask = ~(0xFF);
2108                 break;
2109         }
2110
2111         node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
2112         ret = of_mdiobus_register(dev->mdiobus, node);
2113         of_node_put(node);
2114         if (ret) {
2115                 netdev_err(dev->net, "can't register MDIO bus\n");
2116                 goto exit1;
2117         }
2118
2119         netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
2120         return 0;
2121 exit1:
2122         mdiobus_free(dev->mdiobus);
2123         return ret;
2124 }
2125
2126 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
2127 {
2128         mdiobus_unregister(dev->mdiobus);
2129         mdiobus_free(dev->mdiobus);
2130 }
2131
2132 static void lan78xx_link_status_change(struct net_device *net)
2133 {
2134         struct phy_device *phydev = net->phydev;
2135
2136         phy_print_status(phydev);
2137 }
2138
2139 static int irq_map(struct irq_domain *d, unsigned int irq,
2140                    irq_hw_number_t hwirq)
2141 {
2142         struct irq_domain_data *data = d->host_data;
2143
2144         irq_set_chip_data(irq, data);
2145         irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
2146         irq_set_noprobe(irq);
2147
2148         return 0;
2149 }
2150
2151 static void irq_unmap(struct irq_domain *d, unsigned int irq)
2152 {
2153         irq_set_chip_and_handler(irq, NULL, NULL);
2154         irq_set_chip_data(irq, NULL);
2155 }
2156
2157 static const struct irq_domain_ops chip_domain_ops = {
2158         .map    = irq_map,
2159         .unmap  = irq_unmap,
2160 };
2161
2162 static void lan78xx_irq_mask(struct irq_data *irqd)
2163 {
2164         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2165
2166         data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
2167 }
2168
2169 static void lan78xx_irq_unmask(struct irq_data *irqd)
2170 {
2171         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2172
2173         data->irqenable |= BIT(irqd_to_hwirq(irqd));
2174 }
2175
2176 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
2177 {
2178         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2179
2180         mutex_lock(&data->irq_lock);
2181 }
2182
2183 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
2184 {
2185         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2186         struct lan78xx_net *dev =
2187                         container_of(data, struct lan78xx_net, domain_data);
2188         u32 buf;
2189
2190         /* call register access here because irq_bus_lock & irq_bus_sync_unlock
2191          * are only two callbacks executed in non-atomic contex.
2192          */
2193         lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2194         if (buf != data->irqenable)
2195                 lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
2196
2197         mutex_unlock(&data->irq_lock);
2198 }
2199
2200 static struct irq_chip lan78xx_irqchip = {
2201         .name                   = "lan78xx-irqs",
2202         .irq_mask               = lan78xx_irq_mask,
2203         .irq_unmask             = lan78xx_irq_unmask,
2204         .irq_bus_lock           = lan78xx_irq_bus_lock,
2205         .irq_bus_sync_unlock    = lan78xx_irq_bus_sync_unlock,
2206 };
2207
2208 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
2209 {
2210         struct device_node *of_node;
2211         struct irq_domain *irqdomain;
2212         unsigned int irqmap = 0;
2213         u32 buf;
2214         int ret = 0;
2215
2216         of_node = dev->udev->dev.parent->of_node;
2217
2218         mutex_init(&dev->domain_data.irq_lock);
2219
2220         lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2221         dev->domain_data.irqenable = buf;
2222
2223         dev->domain_data.irqchip = &lan78xx_irqchip;
2224         dev->domain_data.irq_handler = handle_simple_irq;
2225
2226         irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
2227                                           &chip_domain_ops, &dev->domain_data);
2228         if (irqdomain) {
2229                 /* create mapping for PHY interrupt */
2230                 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
2231                 if (!irqmap) {
2232                         irq_domain_remove(irqdomain);
2233
2234                         irqdomain = NULL;
2235                         ret = -EINVAL;
2236                 }
2237         } else {
2238                 ret = -EINVAL;
2239         }
2240
2241         dev->domain_data.irqdomain = irqdomain;
2242         dev->domain_data.phyirq = irqmap;
2243
2244         return ret;
2245 }
2246
2247 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2248 {
2249         if (dev->domain_data.phyirq > 0) {
2250                 irq_dispose_mapping(dev->domain_data.phyirq);
2251
2252                 if (dev->domain_data.irqdomain)
2253                         irq_domain_remove(dev->domain_data.irqdomain);
2254         }
2255         dev->domain_data.phyirq = 0;
2256         dev->domain_data.irqdomain = NULL;
2257 }
2258
2259 static int lan8835_fixup(struct phy_device *phydev)
2260 {
2261         int buf;
2262         struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2263
2264         /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2265         buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2266         buf &= ~0x1800;
2267         buf |= 0x0800;
2268         phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2269
2270         /* RGMII MAC TXC Delay Enable */
2271         lan78xx_write_reg(dev, MAC_RGMII_ID,
2272                           MAC_RGMII_ID_TXC_DELAY_EN_);
2273
2274         /* RGMII TX DLL Tune Adjust */
2275         lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2276
2277         dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2278
2279         return 1;
2280 }
2281
2282 static int ksz9031rnx_fixup(struct phy_device *phydev)
2283 {
2284         struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2285
2286         /* Micrel9301RNX PHY configuration */
2287         /* RGMII Control Signal Pad Skew */
2288         phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2289         /* RGMII RX Data Pad Skew */
2290         phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2291         /* RGMII RX Clock Pad Skew */
2292         phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2293
2294         dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2295
2296         return 1;
2297 }
2298
2299 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2300 {
2301         u32 buf;
2302         int ret;
2303         struct fixed_phy_status fphy_status = {
2304                 .link = 1,
2305                 .speed = SPEED_1000,
2306                 .duplex = DUPLEX_FULL,
2307         };
2308         struct phy_device *phydev;
2309
2310         phydev = phy_find_first(dev->mdiobus);
2311         if (!phydev) {
2312                 netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2313                 phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
2314                 if (IS_ERR(phydev)) {
2315                         netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2316                         return NULL;
2317                 }
2318                 netdev_dbg(dev->net, "Registered FIXED PHY\n");
2319                 dev->interface = PHY_INTERFACE_MODE_RGMII;
2320                 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2321                                         MAC_RGMII_ID_TXC_DELAY_EN_);
2322                 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2323                 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2324                 buf |= HW_CFG_CLK125_EN_;
2325                 buf |= HW_CFG_REFCLK25_EN_;
2326                 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2327         } else {
2328                 if (!phydev->drv) {
2329                         netdev_err(dev->net, "no PHY driver found\n");
2330                         return NULL;
2331                 }
2332                 dev->interface = PHY_INTERFACE_MODE_RGMII;
2333                 /* external PHY fixup for KSZ9031RNX */
2334                 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2335                                                  ksz9031rnx_fixup);
2336                 if (ret < 0) {
2337                         netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2338                         return NULL;
2339                 }
2340                 /* external PHY fixup for LAN8835 */
2341                 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2342                                                  lan8835_fixup);
2343                 if (ret < 0) {
2344                         netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2345                         return NULL;
2346                 }
2347                 /* add more external PHY fixup here if needed */
2348
2349                 phydev->is_internal = false;
2350         }
2351         return phydev;
2352 }
2353
2354 static int lan78xx_phy_init(struct lan78xx_net *dev)
2355 {
2356         __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2357         int ret;
2358         u32 mii_adv;
2359         struct phy_device *phydev;
2360
2361         switch (dev->chipid) {
2362         case ID_REV_CHIP_ID_7801_:
2363                 phydev = lan7801_phy_init(dev);
2364                 if (!phydev) {
2365                         netdev_err(dev->net, "lan7801: PHY Init Failed");
2366                         return -EIO;
2367                 }
2368                 break;
2369
2370         case ID_REV_CHIP_ID_7800_:
2371         case ID_REV_CHIP_ID_7850_:
2372                 phydev = phy_find_first(dev->mdiobus);
2373                 if (!phydev) {
2374                         netdev_err(dev->net, "no PHY found\n");
2375                         return -EIO;
2376                 }
2377                 phydev->is_internal = true;
2378                 dev->interface = PHY_INTERFACE_MODE_GMII;
2379                 break;
2380
2381         default:
2382                 netdev_err(dev->net, "Unknown CHIP ID found\n");
2383                 return -EIO;
2384         }
2385
2386         /* if phyirq is not set, use polling mode in phylib */
2387         if (dev->domain_data.phyirq > 0)
2388                 phydev->irq = dev->domain_data.phyirq;
2389         else
2390                 phydev->irq = PHY_POLL;
2391         netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2392
2393         /* set to AUTOMDIX */
2394         phydev->mdix = ETH_TP_MDI_AUTO;
2395
2396         ret = phy_connect_direct(dev->net, phydev,
2397                                  lan78xx_link_status_change,
2398                                  dev->interface);
2399         if (ret) {
2400                 netdev_err(dev->net, "can't attach PHY to %s\n",
2401                            dev->mdiobus->id);
2402                 if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2403                         if (phy_is_pseudo_fixed_link(phydev)) {
2404                                 fixed_phy_unregister(phydev);
2405                         } else {
2406                                 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2407                                                              0xfffffff0);
2408                                 phy_unregister_fixup_for_uid(PHY_LAN8835,
2409                                                              0xfffffff0);
2410                         }
2411                 }
2412                 return -EIO;
2413         }
2414
2415         /* MAC doesn't support 1000T Half */
2416         phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2417
2418         /* support both flow controls */
2419         dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2420         linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2421                            phydev->advertising);
2422         linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2423                            phydev->advertising);
2424         mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2425         mii_adv_to_linkmode_adv_t(fc, mii_adv);
2426         linkmode_or(phydev->advertising, fc, phydev->advertising);
2427
2428         if (of_property_read_bool(phydev->mdio.dev.of_node,
2429                                   "microchip,eee-enabled")) {
2430                 struct ethtool_eee edata;
2431                 memset(&edata, 0, sizeof(edata));
2432                 edata.cmd = ETHTOOL_SEEE;
2433                 edata.advertised = ADVERTISED_1000baseT_Full |
2434                                    ADVERTISED_100baseT_Full;
2435                 edata.eee_enabled = true;
2436                 edata.tx_lpi_enabled = true;
2437                 if (of_property_read_u32(dev->udev->dev.of_node,
2438                                          "microchip,tx-lpi-timer",
2439                                          &edata.tx_lpi_timer))
2440                         edata.tx_lpi_timer = 600; /* non-aggressive */
2441                 (void)lan78xx_set_eee(dev->net, &edata);
2442         }
2443
2444         if (phydev->mdio.dev.of_node) {
2445                 u32 reg;
2446                 int len;
2447
2448                 len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2449                                                       "microchip,led-modes",
2450                                                       sizeof(u32));
2451                 if (len >= 0) {
2452                         /* Ensure the appropriate LEDs are enabled */
2453                         lan78xx_read_reg(dev, HW_CFG, &reg);
2454                         reg &= ~(HW_CFG_LED0_EN_ |
2455                                  HW_CFG_LED1_EN_ |
2456                                  HW_CFG_LED2_EN_ |
2457                                  HW_CFG_LED3_EN_);
2458                         reg |= (len > 0) * HW_CFG_LED0_EN_ |
2459                                 (len > 1) * HW_CFG_LED1_EN_ |
2460                                 (len > 2) * HW_CFG_LED2_EN_ |
2461                                 (len > 3) * HW_CFG_LED3_EN_;
2462                         lan78xx_write_reg(dev, HW_CFG, reg);
2463                 }
2464         }
2465
2466         genphy_config_aneg(phydev);
2467
2468         dev->fc_autoneg = phydev->autoneg;
2469
2470         return 0;
2471 }
2472
2473 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2474 {
2475         u32 buf;
2476         bool rxenabled;
2477
2478         lan78xx_read_reg(dev, MAC_RX, &buf);
2479
2480         rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2481
2482         if (rxenabled) {
2483                 buf &= ~MAC_RX_RXEN_;
2484                 lan78xx_write_reg(dev, MAC_RX, buf);
2485         }
2486
2487         /* add 4 to size for FCS */
2488         buf &= ~MAC_RX_MAX_SIZE_MASK_;
2489         buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2490
2491         lan78xx_write_reg(dev, MAC_RX, buf);
2492
2493         if (rxenabled) {
2494                 buf |= MAC_RX_RXEN_;
2495                 lan78xx_write_reg(dev, MAC_RX, buf);
2496         }
2497
2498         return 0;
2499 }
2500
2501 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2502 {
2503         struct sk_buff *skb;
2504         unsigned long flags;
2505         int count = 0;
2506
2507         spin_lock_irqsave(&q->lock, flags);
2508         while (!skb_queue_empty(q)) {
2509                 struct skb_data *entry;
2510                 struct urb *urb;
2511                 int ret;
2512
2513                 skb_queue_walk(q, skb) {
2514                         entry = (struct skb_data *)skb->cb;
2515                         if (entry->state != unlink_start)
2516                                 goto found;
2517                 }
2518                 break;
2519 found:
2520                 entry->state = unlink_start;
2521                 urb = entry->urb;
2522
2523                 /* Get reference count of the URB to avoid it to be
2524                  * freed during usb_unlink_urb, which may trigger
2525                  * use-after-free problem inside usb_unlink_urb since
2526                  * usb_unlink_urb is always racing with .complete
2527                  * handler(include defer_bh).
2528                  */
2529                 usb_get_urb(urb);
2530                 spin_unlock_irqrestore(&q->lock, flags);
2531                 /* during some PM-driven resume scenarios,
2532                  * these (async) unlinks complete immediately
2533                  */
2534                 ret = usb_unlink_urb(urb);
2535                 if (ret != -EINPROGRESS && ret != 0)
2536                         netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2537                 else
2538                         count++;
2539                 usb_put_urb(urb);
2540                 spin_lock_irqsave(&q->lock, flags);
2541         }
2542         spin_unlock_irqrestore(&q->lock, flags);
2543         return count;
2544 }
2545
2546 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2547 {
2548         struct lan78xx_net *dev = netdev_priv(netdev);
2549         int max_frame_len = RX_MAX_FRAME_LEN(new_mtu);
2550         int ret;
2551
2552         /* no second zero-length packet read wanted after mtu-sized packets */
2553         if ((max_frame_len % dev->maxpacket) == 0)
2554                 return -EDOM;
2555
2556         ret = usb_autopm_get_interface(dev->intf);
2557         if (ret < 0)
2558                 return ret;
2559
2560         ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len);
2561         if (!ret)
2562                 netdev->mtu = new_mtu;
2563
2564         usb_autopm_put_interface(dev->intf);
2565
2566         return ret;
2567 }
2568
2569 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2570 {
2571         struct lan78xx_net *dev = netdev_priv(netdev);
2572         struct sockaddr *addr = p;
2573         u32 addr_lo, addr_hi;
2574
2575         if (netif_running(netdev))
2576                 return -EBUSY;
2577
2578         if (!is_valid_ether_addr(addr->sa_data))
2579                 return -EADDRNOTAVAIL;
2580
2581         eth_hw_addr_set(netdev, addr->sa_data);
2582
2583         addr_lo = netdev->dev_addr[0] |
2584                   netdev->dev_addr[1] << 8 |
2585                   netdev->dev_addr[2] << 16 |
2586                   netdev->dev_addr[3] << 24;
2587         addr_hi = netdev->dev_addr[4] |
2588                   netdev->dev_addr[5] << 8;
2589
2590         lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2591         lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2592
2593         /* Added to support MAC address changes */
2594         lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2595         lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2596
2597         return 0;
2598 }
2599
2600 /* Enable or disable Rx checksum offload engine */
2601 static int lan78xx_set_features(struct net_device *netdev,
2602                                 netdev_features_t features)
2603 {
2604         struct lan78xx_net *dev = netdev_priv(netdev);
2605         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2606         unsigned long flags;
2607
2608         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2609
2610         if (features & NETIF_F_RXCSUM) {
2611                 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2612                 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2613         } else {
2614                 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2615                 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2616         }
2617
2618         if (features & NETIF_F_HW_VLAN_CTAG_RX)
2619                 pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2620         else
2621                 pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2622
2623         if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2624                 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2625         else
2626                 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2627
2628         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2629
2630         lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2631
2632         return 0;
2633 }
2634
2635 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2636 {
2637         struct lan78xx_priv *pdata =
2638                         container_of(param, struct lan78xx_priv, set_vlan);
2639         struct lan78xx_net *dev = pdata->dev;
2640
2641         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2642                                DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2643 }
2644
2645 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2646                                    __be16 proto, u16 vid)
2647 {
2648         struct lan78xx_net *dev = netdev_priv(netdev);
2649         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2650         u16 vid_bit_index;
2651         u16 vid_dword_index;
2652
2653         vid_dword_index = (vid >> 5) & 0x7F;
2654         vid_bit_index = vid & 0x1F;
2655
2656         pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2657
2658         /* defer register writes to a sleepable context */
2659         schedule_work(&pdata->set_vlan);
2660
2661         return 0;
2662 }
2663
2664 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2665                                     __be16 proto, u16 vid)
2666 {
2667         struct lan78xx_net *dev = netdev_priv(netdev);
2668         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2669         u16 vid_bit_index;
2670         u16 vid_dword_index;
2671
2672         vid_dword_index = (vid >> 5) & 0x7F;
2673         vid_bit_index = vid & 0x1F;
2674
2675         pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2676
2677         /* defer register writes to a sleepable context */
2678         schedule_work(&pdata->set_vlan);
2679
2680         return 0;
2681 }
2682
2683 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2684 {
2685         int ret;
2686         u32 buf;
2687         u32 regs[6] = { 0 };
2688
2689         ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2690         if (buf & USB_CFG1_LTM_ENABLE_) {
2691                 u8 temp[2];
2692                 /* Get values from EEPROM first */
2693                 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2694                         if (temp[0] == 24) {
2695                                 ret = lan78xx_read_raw_eeprom(dev,
2696                                                               temp[1] * 2,
2697                                                               24,
2698                                                               (u8 *)regs);
2699                                 if (ret < 0)
2700                                         return;
2701                         }
2702                 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2703                         if (temp[0] == 24) {
2704                                 ret = lan78xx_read_raw_otp(dev,
2705                                                            temp[1] * 2,
2706                                                            24,
2707                                                            (u8 *)regs);
2708                                 if (ret < 0)
2709                                         return;
2710                         }
2711                 }
2712         }
2713
2714         lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2715         lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2716         lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2717         lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2718         lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2719         lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2720 }
2721
2722 static int lan78xx_urb_config_init(struct lan78xx_net *dev)
2723 {
2724         int result = 0;
2725
2726         switch (dev->udev->speed) {
2727         case USB_SPEED_SUPER:
2728                 dev->rx_urb_size = RX_SS_URB_SIZE;
2729                 dev->tx_urb_size = TX_SS_URB_SIZE;
2730                 dev->n_rx_urbs = RX_SS_URB_NUM;
2731                 dev->n_tx_urbs = TX_SS_URB_NUM;
2732                 dev->bulk_in_delay = SS_BULK_IN_DELAY;
2733                 dev->burst_cap = SS_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2734                 break;
2735         case USB_SPEED_HIGH:
2736                 dev->rx_urb_size = RX_HS_URB_SIZE;
2737                 dev->tx_urb_size = TX_HS_URB_SIZE;
2738                 dev->n_rx_urbs = RX_HS_URB_NUM;
2739                 dev->n_tx_urbs = TX_HS_URB_NUM;
2740                 dev->bulk_in_delay = HS_BULK_IN_DELAY;
2741                 dev->burst_cap = HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2742                 break;
2743         case USB_SPEED_FULL:
2744                 dev->rx_urb_size = RX_FS_URB_SIZE;
2745                 dev->tx_urb_size = TX_FS_URB_SIZE;
2746                 dev->n_rx_urbs = RX_FS_URB_NUM;
2747                 dev->n_tx_urbs = TX_FS_URB_NUM;
2748                 dev->bulk_in_delay = FS_BULK_IN_DELAY;
2749                 dev->burst_cap = FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2750                 break;
2751         default:
2752                 netdev_warn(dev->net, "USB bus speed not supported\n");
2753                 result = -EIO;
2754                 break;
2755         }
2756
2757         return result;
2758 }
2759
2760 static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
2761 {
2762         return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
2763 }
2764
2765 static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
2766                            u32 hw_disabled)
2767 {
2768         unsigned long timeout;
2769         bool stopped = true;
2770         int ret;
2771         u32 buf;
2772
2773         /* Stop the h/w block (if not already stopped) */
2774
2775         ret = lan78xx_read_reg(dev, reg, &buf);
2776         if (ret < 0)
2777                 return ret;
2778
2779         if (buf & hw_enabled) {
2780                 buf &= ~hw_enabled;
2781
2782                 ret = lan78xx_write_reg(dev, reg, buf);
2783                 if (ret < 0)
2784                         return ret;
2785
2786                 stopped = false;
2787                 timeout = jiffies + HW_DISABLE_TIMEOUT;
2788                 do  {
2789                         ret = lan78xx_read_reg(dev, reg, &buf);
2790                         if (ret < 0)
2791                                 return ret;
2792
2793                         if (buf & hw_disabled)
2794                                 stopped = true;
2795                         else
2796                                 msleep(HW_DISABLE_DELAY_MS);
2797                 } while (!stopped && !time_after(jiffies, timeout));
2798         }
2799
2800         ret = stopped ? 0 : -ETIME;
2801
2802         return ret;
2803 }
2804
2805 static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
2806 {
2807         return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
2808 }
2809
2810 static int lan78xx_start_tx_path(struct lan78xx_net *dev)
2811 {
2812         int ret;
2813
2814         netif_dbg(dev, drv, dev->net, "start tx path");
2815
2816         /* Start the MAC transmitter */
2817
2818         ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
2819         if (ret < 0)
2820                 return ret;
2821
2822         /* Start the Tx FIFO */
2823
2824         ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
2825         if (ret < 0)
2826                 return ret;
2827
2828         return 0;
2829 }
2830
2831 static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
2832 {
2833         int ret;
2834
2835         netif_dbg(dev, drv, dev->net, "stop tx path");
2836
2837         /* Stop the Tx FIFO */
2838
2839         ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
2840         if (ret < 0)
2841                 return ret;
2842
2843         /* Stop the MAC transmitter */
2844
2845         ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
2846         if (ret < 0)
2847                 return ret;
2848
2849         return 0;
2850 }
2851
2852 /* The caller must ensure the Tx path is stopped before calling
2853  * lan78xx_flush_tx_fifo().
2854  */
2855 static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
2856 {
2857         return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
2858 }
2859
2860 static int lan78xx_start_rx_path(struct lan78xx_net *dev)
2861 {
2862         int ret;
2863
2864         netif_dbg(dev, drv, dev->net, "start rx path");
2865
2866         /* Start the Rx FIFO */
2867
2868         ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
2869         if (ret < 0)
2870                 return ret;
2871
2872         /* Start the MAC receiver*/
2873
2874         ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
2875         if (ret < 0)
2876                 return ret;
2877
2878         return 0;
2879 }
2880
2881 static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
2882 {
2883         int ret;
2884
2885         netif_dbg(dev, drv, dev->net, "stop rx path");
2886
2887         /* Stop the MAC receiver */
2888
2889         ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
2890         if (ret < 0)
2891                 return ret;
2892
2893         /* Stop the Rx FIFO */
2894
2895         ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
2896         if (ret < 0)
2897                 return ret;
2898
2899         return 0;
2900 }
2901
2902 /* The caller must ensure the Rx path is stopped before calling
2903  * lan78xx_flush_rx_fifo().
2904  */
2905 static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
2906 {
2907         return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
2908 }
2909
2910 static int lan78xx_reset(struct lan78xx_net *dev)
2911 {
2912         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2913         unsigned long timeout;
2914         int ret;
2915         u32 buf;
2916         u8 sig;
2917         bool has_eeprom;
2918         bool has_otp;
2919
2920         has_eeprom = !lan78xx_read_eeprom(dev, 0, 0, NULL);
2921         has_otp = !lan78xx_read_otp(dev, 0, 0, NULL);
2922
2923         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2924         if (ret < 0)
2925                 return ret;
2926
2927         buf |= HW_CFG_LRST_;
2928
2929         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2930         if (ret < 0)
2931                 return ret;
2932
2933         timeout = jiffies + HZ;
2934         do {
2935                 mdelay(1);
2936                 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2937                 if (ret < 0)
2938                         return ret;
2939
2940                 if (time_after(jiffies, timeout)) {
2941                         netdev_warn(dev->net,
2942                                     "timeout on completion of LiteReset");
2943                         ret = -ETIMEDOUT;
2944                         return ret;
2945                 }
2946         } while (buf & HW_CFG_LRST_);
2947
2948         lan78xx_init_mac_address(dev);
2949
2950         /* save DEVID for later usage */
2951         ret = lan78xx_read_reg(dev, ID_REV, &buf);
2952         if (ret < 0)
2953                 return ret;
2954
2955         dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2956         dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2957
2958         /* Respond to the IN token with a NAK */
2959         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2960         if (ret < 0)
2961                 return ret;
2962
2963         buf |= USB_CFG_BIR_;
2964
2965         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2966         if (ret < 0)
2967                 return ret;
2968
2969         /* Init LTM */
2970         lan78xx_init_ltm(dev);
2971
2972         ret = lan78xx_write_reg(dev, BURST_CAP, dev->burst_cap);
2973         if (ret < 0)
2974                 return ret;
2975
2976         ret = lan78xx_write_reg(dev, BULK_IN_DLY, dev->bulk_in_delay);
2977         if (ret < 0)
2978                 return ret;
2979
2980         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2981         if (ret < 0)
2982                 return ret;
2983
2984         buf |= HW_CFG_MEF_;
2985
2986         /* If no valid EEPROM and no valid OTP, enable the LEDs by default */
2987         if (!has_eeprom && !has_otp)
2988             buf |= HW_CFG_LED0_EN_ | HW_CFG_LED1_EN_;
2989
2990         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2991         if (ret < 0)
2992                 return ret;
2993
2994         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2995         if (ret < 0)
2996                 return ret;
2997
2998         buf |= USB_CFG_BCE_;
2999
3000         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
3001         if (ret < 0)
3002                 return ret;
3003
3004         /* set FIFO sizes */
3005         buf = (MAX_RX_FIFO_SIZE - 512) / 512;
3006
3007         ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
3008         if (ret < 0)
3009                 return ret;
3010
3011         buf = (MAX_TX_FIFO_SIZE - 512) / 512;
3012
3013         ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
3014         if (ret < 0)
3015                 return ret;
3016
3017         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
3018         if (ret < 0)
3019                 return ret;
3020
3021         ret = lan78xx_write_reg(dev, FLOW, 0);
3022         if (ret < 0)
3023                 return ret;
3024
3025         ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
3026         if (ret < 0)
3027                 return ret;
3028
3029         /* Don't need rfe_ctl_lock during initialisation */
3030         ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
3031         if (ret < 0)
3032                 return ret;
3033
3034         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
3035
3036         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
3037         if (ret < 0)
3038                 return ret;
3039
3040         /* Enable or disable checksum offload engines */
3041         ret = lan78xx_set_features(dev->net, dev->net->features);
3042         if (ret < 0)
3043                 return ret;
3044
3045         lan78xx_set_multicast(dev->net);
3046
3047         /* reset PHY */
3048         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3049         if (ret < 0)
3050                 return ret;
3051
3052         buf |= PMT_CTL_PHY_RST_;
3053
3054         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3055         if (ret < 0)
3056                 return ret;
3057
3058         timeout = jiffies + HZ;
3059         do {
3060                 mdelay(1);
3061                 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3062                 if (ret < 0)
3063                         return ret;
3064
3065                 if (time_after(jiffies, timeout)) {
3066                         netdev_warn(dev->net, "timeout waiting for PHY Reset");
3067                         ret = -ETIMEDOUT;
3068                         return ret;
3069                 }
3070         } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
3071
3072         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
3073         if (ret < 0)
3074                 return ret;
3075
3076         /* LAN7801 only has RGMII mode */
3077         if (dev->chipid == ID_REV_CHIP_ID_7801_)
3078                 buf &= ~MAC_CR_GMII_EN_;
3079
3080         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
3081                 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
3082                 if (!ret && sig != EEPROM_INDICATOR) {
3083                         /* Implies there is no external eeprom. Set mac speed */
3084                         netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
3085                         buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
3086                 }
3087         }
3088         /* If no valid EEPROM and no valid OTP, enable AUTO negotiation */
3089         if (!has_eeprom && !has_otp)
3090             buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
3091         ret = lan78xx_write_reg(dev, MAC_CR, buf);
3092         if (ret < 0)
3093                 return ret;
3094
3095         ret = lan78xx_set_rx_max_frame_length(dev,
3096                                               RX_MAX_FRAME_LEN(dev->net->mtu));
3097
3098         return ret;
3099 }
3100
3101 static void lan78xx_init_stats(struct lan78xx_net *dev)
3102 {
3103         u32 *p;
3104         int i;
3105
3106         /* initialize for stats update
3107          * some counters are 20bits and some are 32bits
3108          */
3109         p = (u32 *)&dev->stats.rollover_max;
3110         for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
3111                 p[i] = 0xFFFFF;
3112
3113         dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
3114         dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
3115         dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
3116         dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
3117         dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
3118         dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
3119         dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
3120         dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
3121         dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
3122         dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
3123
3124         set_bit(EVENT_STAT_UPDATE, &dev->flags);
3125 }
3126
3127 static int lan78xx_open(struct net_device *net)
3128 {
3129         struct lan78xx_net *dev = netdev_priv(net);
3130         int ret;
3131
3132         netif_dbg(dev, ifup, dev->net, "open device");
3133
3134         ret = usb_autopm_get_interface(dev->intf);
3135         if (ret < 0)
3136                 return ret;
3137
3138         mutex_lock(&dev->dev_mutex);
3139
3140         phy_start(net->phydev);
3141
3142         netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
3143
3144         /* for Link Check */
3145         if (dev->urb_intr) {
3146                 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
3147                 if (ret < 0) {
3148                         netif_err(dev, ifup, dev->net,
3149                                   "intr submit %d\n", ret);
3150                         goto done;
3151                 }
3152         }
3153
3154         ret = lan78xx_flush_rx_fifo(dev);
3155         if (ret < 0)
3156                 goto done;
3157         ret = lan78xx_flush_tx_fifo(dev);
3158         if (ret < 0)
3159                 goto done;
3160
3161         ret = lan78xx_start_tx_path(dev);
3162         if (ret < 0)
3163                 goto done;
3164         ret = lan78xx_start_rx_path(dev);
3165         if (ret < 0)
3166                 goto done;
3167
3168         lan78xx_init_stats(dev);
3169
3170         set_bit(EVENT_DEV_OPEN, &dev->flags);
3171
3172         netif_start_queue(net);
3173
3174         dev->link_on = false;
3175
3176         napi_enable(&dev->napi);
3177
3178         lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
3179 done:
3180         mutex_unlock(&dev->dev_mutex);
3181
3182         usb_autopm_put_interface(dev->intf);
3183
3184         return ret;
3185 }
3186
3187 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
3188 {
3189         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
3190         DECLARE_WAITQUEUE(wait, current);
3191         int temp;
3192
3193         /* ensure there are no more active urbs */
3194         add_wait_queue(&unlink_wakeup, &wait);
3195         set_current_state(TASK_UNINTERRUPTIBLE);
3196         dev->wait = &unlink_wakeup;
3197         temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
3198
3199         /* maybe wait for deletions to finish. */
3200         while (!skb_queue_empty(&dev->rxq) ||
3201                !skb_queue_empty(&dev->txq)) {
3202                 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
3203                 set_current_state(TASK_UNINTERRUPTIBLE);
3204                 netif_dbg(dev, ifdown, dev->net,
3205                           "waited for %d urb completions", temp);
3206         }
3207         set_current_state(TASK_RUNNING);
3208         dev->wait = NULL;
3209         remove_wait_queue(&unlink_wakeup, &wait);
3210
3211         /* empty Rx done, Rx overflow and Tx pend queues
3212          */
3213         while (!skb_queue_empty(&dev->rxq_done)) {
3214                 struct sk_buff *skb = skb_dequeue(&dev->rxq_done);
3215
3216                 lan78xx_release_rx_buf(dev, skb);
3217         }
3218
3219         skb_queue_purge(&dev->rxq_overflow);
3220         skb_queue_purge(&dev->txq_pend);
3221 }
3222
3223 static int lan78xx_stop(struct net_device *net)
3224 {
3225         struct lan78xx_net *dev = netdev_priv(net);
3226
3227         netif_dbg(dev, ifup, dev->net, "stop device");
3228
3229         mutex_lock(&dev->dev_mutex);
3230
3231         if (timer_pending(&dev->stat_monitor))
3232                 del_timer_sync(&dev->stat_monitor);
3233
3234         clear_bit(EVENT_DEV_OPEN, &dev->flags);
3235         netif_stop_queue(net);
3236         napi_disable(&dev->napi);
3237
3238         lan78xx_terminate_urbs(dev);
3239
3240         netif_info(dev, ifdown, dev->net,
3241                    "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
3242                    net->stats.rx_packets, net->stats.tx_packets,
3243                    net->stats.rx_errors, net->stats.tx_errors);
3244
3245         /* ignore errors that occur stopping the Tx and Rx data paths */
3246         lan78xx_stop_tx_path(dev);
3247         lan78xx_stop_rx_path(dev);
3248
3249         if (net->phydev)
3250                 phy_stop(net->phydev);
3251
3252         usb_kill_urb(dev->urb_intr);
3253
3254         /* deferred work (task, timer, softirq) must also stop.
3255          * can't flush_scheduled_work() until we drop rtnl (later),
3256          * else workers could deadlock; so make workers a NOP.
3257          */
3258         clear_bit(EVENT_TX_HALT, &dev->flags);
3259         clear_bit(EVENT_RX_HALT, &dev->flags);
3260         clear_bit(EVENT_LINK_RESET, &dev->flags);
3261         clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3262
3263         cancel_delayed_work_sync(&dev->wq);
3264
3265         usb_autopm_put_interface(dev->intf);
3266
3267         mutex_unlock(&dev->dev_mutex);
3268
3269         return 0;
3270 }
3271
3272 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
3273                                struct sk_buff_head *list, enum skb_state state)
3274 {
3275         unsigned long flags;
3276         enum skb_state old_state;
3277         struct skb_data *entry = (struct skb_data *)skb->cb;
3278
3279         spin_lock_irqsave(&list->lock, flags);
3280         old_state = entry->state;
3281         entry->state = state;
3282
3283         __skb_unlink(skb, list);
3284         spin_unlock(&list->lock);
3285         spin_lock(&dev->rxq_done.lock);
3286
3287         __skb_queue_tail(&dev->rxq_done, skb);
3288         if (skb_queue_len(&dev->rxq_done) == 1)
3289                 napi_schedule(&dev->napi);
3290
3291         spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
3292
3293         return old_state;
3294 }
3295
3296 static void tx_complete(struct urb *urb)
3297 {
3298         struct sk_buff *skb = (struct sk_buff *)urb->context;
3299         struct skb_data *entry = (struct skb_data *)skb->cb;
3300         struct lan78xx_net *dev = entry->dev;
3301
3302         if (urb->status == 0) {
3303                 dev->net->stats.tx_packets += entry->num_of_packet;
3304                 dev->net->stats.tx_bytes += entry->length;
3305         } else {
3306                 dev->net->stats.tx_errors += entry->num_of_packet;
3307
3308                 switch (urb->status) {
3309                 case -EPIPE:
3310                         lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3311                         break;
3312
3313                 /* software-driven interface shutdown */
3314                 case -ECONNRESET:
3315                 case -ESHUTDOWN:
3316                         netif_dbg(dev, tx_err, dev->net,
3317                                   "tx err interface gone %d\n",
3318                                   entry->urb->status);
3319                         break;
3320
3321                 case -EPROTO:
3322                 case -ETIME:
3323                 case -EILSEQ:
3324                         netif_stop_queue(dev->net);
3325                         netif_dbg(dev, tx_err, dev->net,
3326                                   "tx err queue stopped %d\n",
3327                                   entry->urb->status);
3328                         break;
3329                 default:
3330                         netif_dbg(dev, tx_err, dev->net,
3331                                   "unknown tx err %d\n",
3332                                   entry->urb->status);
3333                         break;
3334                 }
3335         }
3336
3337         usb_autopm_put_interface_async(dev->intf);
3338
3339         skb_unlink(skb, &dev->txq);
3340
3341         lan78xx_release_tx_buf(dev, skb);
3342
3343         /* Re-schedule NAPI if Tx data pending but no URBs in progress.
3344          */
3345         if (skb_queue_empty(&dev->txq) &&
3346             !skb_queue_empty(&dev->txq_pend))
3347                 napi_schedule(&dev->napi);
3348 }
3349
3350 static void lan78xx_queue_skb(struct sk_buff_head *list,
3351                               struct sk_buff *newsk, enum skb_state state)
3352 {
3353         struct skb_data *entry = (struct skb_data *)newsk->cb;
3354
3355         __skb_queue_tail(list, newsk);
3356         entry->state = state;
3357 }
3358
3359 static unsigned int lan78xx_tx_urb_space(struct lan78xx_net *dev)
3360 {
3361         return skb_queue_len(&dev->txq_free) * dev->tx_urb_size;
3362 }
3363
3364 static unsigned int lan78xx_tx_pend_data_len(struct lan78xx_net *dev)
3365 {
3366         return dev->tx_pend_data_len;
3367 }
3368
3369 static void lan78xx_tx_pend_skb_add(struct lan78xx_net *dev,
3370                                     struct sk_buff *skb,
3371                                     unsigned int *tx_pend_data_len)
3372 {
3373         unsigned long flags;
3374
3375         spin_lock_irqsave(&dev->txq_pend.lock, flags);
3376
3377         __skb_queue_tail(&dev->txq_pend, skb);
3378
3379         dev->tx_pend_data_len += skb->len;
3380         *tx_pend_data_len = dev->tx_pend_data_len;
3381
3382         spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3383 }
3384
3385 static void lan78xx_tx_pend_skb_head_add(struct lan78xx_net *dev,
3386                                          struct sk_buff *skb,
3387                                          unsigned int *tx_pend_data_len)
3388 {
3389         unsigned long flags;
3390
3391         spin_lock_irqsave(&dev->txq_pend.lock, flags);
3392
3393         __skb_queue_head(&dev->txq_pend, skb);
3394
3395         dev->tx_pend_data_len += skb->len;
3396         *tx_pend_data_len = dev->tx_pend_data_len;
3397
3398         spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3399 }
3400
3401 static void lan78xx_tx_pend_skb_get(struct lan78xx_net *dev,
3402                                     struct sk_buff **skb,
3403                                     unsigned int *tx_pend_data_len)
3404 {
3405         unsigned long flags;
3406
3407         spin_lock_irqsave(&dev->txq_pend.lock, flags);
3408
3409         *skb = __skb_dequeue(&dev->txq_pend);
3410         if (*skb)
3411                 dev->tx_pend_data_len -= (*skb)->len;
3412         *tx_pend_data_len = dev->tx_pend_data_len;
3413
3414         spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3415 }
3416
3417 static netdev_tx_t
3418 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
3419 {
3420         struct lan78xx_net *dev = netdev_priv(net);
3421         unsigned int tx_pend_data_len;
3422
3423         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
3424                 schedule_delayed_work(&dev->wq, 0);
3425
3426         skb_tx_timestamp(skb);
3427
3428         lan78xx_tx_pend_skb_add(dev, skb, &tx_pend_data_len);
3429
3430         /* Set up a Tx URB if none is in progress */
3431
3432         if (skb_queue_empty(&dev->txq))
3433                 napi_schedule(&dev->napi);
3434
3435         /* Stop stack Tx queue if we have enough data to fill
3436          * all the free Tx URBs.
3437          */
3438         if (tx_pend_data_len > lan78xx_tx_urb_space(dev)) {
3439                 netif_stop_queue(net);
3440
3441                 netif_dbg(dev, hw, dev->net, "tx data len: %u, urb space %u",
3442                           tx_pend_data_len, lan78xx_tx_urb_space(dev));
3443
3444                 /* Kick off transmission of pending data */
3445
3446                 if (!skb_queue_empty(&dev->txq_free))
3447                         napi_schedule(&dev->napi);
3448         }
3449
3450         return NETDEV_TX_OK;
3451 }
3452
3453 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
3454 {
3455         struct lan78xx_priv *pdata = NULL;
3456         int ret;
3457         int i;
3458
3459         dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
3460
3461         pdata = (struct lan78xx_priv *)(dev->data[0]);
3462         if (!pdata) {
3463                 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
3464                 return -ENOMEM;
3465         }
3466
3467         pdata->dev = dev;
3468
3469         spin_lock_init(&pdata->rfe_ctl_lock);
3470         mutex_init(&pdata->dataport_mutex);
3471
3472         INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
3473
3474         for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
3475                 pdata->vlan_table[i] = 0;
3476
3477         INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
3478
3479         dev->net->features = 0;
3480
3481         if (DEFAULT_TX_CSUM_ENABLE)
3482                 dev->net->features |= NETIF_F_HW_CSUM;
3483
3484         if (DEFAULT_RX_CSUM_ENABLE)
3485                 dev->net->features |= NETIF_F_RXCSUM;
3486
3487         if (DEFAULT_TSO_CSUM_ENABLE) {
3488                 dev->net->features |= NETIF_F_SG;
3489                 /* Use module parameter to control TCP segmentation offload as
3490                  * it appears to cause issues.
3491                  */
3492                 if (enable_tso)
3493                         dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6;
3494         }
3495
3496         if (DEFAULT_VLAN_RX_OFFLOAD)
3497                 dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
3498
3499         if (DEFAULT_VLAN_FILTER_ENABLE)
3500                 dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3501
3502         dev->net->hw_features = dev->net->features;
3503
3504         ret = lan78xx_setup_irq_domain(dev);
3505         if (ret < 0) {
3506                 netdev_warn(dev->net,
3507                             "lan78xx_setup_irq_domain() failed : %d", ret);
3508                 goto out1;
3509         }
3510
3511         /* Init all registers */
3512         ret = lan78xx_reset(dev);
3513         if (ret) {
3514                 netdev_warn(dev->net, "Registers INIT FAILED....");
3515                 goto out2;
3516         }
3517
3518         ret = lan78xx_mdio_init(dev);
3519         if (ret) {
3520                 netdev_warn(dev->net, "MDIO INIT FAILED.....");
3521                 goto out2;
3522         }
3523
3524         dev->net->flags |= IFF_MULTICAST;
3525
3526         pdata->wol = WAKE_MAGIC;
3527
3528         return ret;
3529
3530 out2:
3531         lan78xx_remove_irq_domain(dev);
3532
3533 out1:
3534         netdev_warn(dev->net, "Bind routine FAILED");
3535         cancel_work_sync(&pdata->set_multicast);
3536         cancel_work_sync(&pdata->set_vlan);
3537         kfree(pdata);
3538         return ret;
3539 }
3540
3541 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3542 {
3543         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3544
3545         lan78xx_remove_irq_domain(dev);
3546
3547         lan78xx_remove_mdio(dev);
3548
3549         if (pdata) {
3550                 cancel_work_sync(&pdata->set_multicast);
3551                 cancel_work_sync(&pdata->set_vlan);
3552                 netif_dbg(dev, ifdown, dev->net, "free pdata");
3553                 kfree(pdata);
3554                 pdata = NULL;
3555                 dev->data[0] = 0;
3556         }
3557 }
3558
3559 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3560                                     struct sk_buff *skb,
3561                                     u32 rx_cmd_a, u32 rx_cmd_b)
3562 {
3563         /* HW Checksum offload appears to be flawed if used when not stripping
3564          * VLAN headers. Drop back to S/W checksums under these conditions.
3565          */
3566         if (!(dev->net->features & NETIF_F_RXCSUM) ||
3567             unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3568             ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3569              !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3570                 skb->ip_summed = CHECKSUM_NONE;
3571         } else {
3572                 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3573                 skb->ip_summed = CHECKSUM_COMPLETE;
3574         }
3575 }
3576
3577 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3578                                     struct sk_buff *skb,
3579                                     u32 rx_cmd_a, u32 rx_cmd_b)
3580 {
3581         if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3582             (rx_cmd_a & RX_CMD_A_FVTG_))
3583                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3584                                        (rx_cmd_b & 0xffff));
3585 }
3586
3587 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3588 {
3589         dev->net->stats.rx_packets++;
3590         dev->net->stats.rx_bytes += skb->len;
3591
3592         skb->protocol = eth_type_trans(skb, dev->net);
3593
3594         netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3595                   skb->len + sizeof(struct ethhdr), skb->protocol);
3596         memset(skb->cb, 0, sizeof(struct skb_data));
3597
3598         if (skb_defer_rx_timestamp(skb))
3599                 return;
3600
3601         napi_gro_receive(&dev->napi, skb);
3602 }
3603
3604 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb,
3605                       int budget, int *work_done)
3606 {
3607         if (skb->len < RX_SKB_MIN_LEN)
3608                 return 0;
3609
3610         /* Extract frames from the URB buffer and pass each one to
3611          * the stack in a new NAPI SKB.
3612          */
3613         while (skb->len > 0) {
3614                 u32 rx_cmd_a, rx_cmd_b, align_count, size;
3615                 u16 rx_cmd_c;
3616                 unsigned char *packet;
3617
3618                 rx_cmd_a = get_unaligned_le32(skb->data);
3619                 skb_pull(skb, sizeof(rx_cmd_a));
3620
3621                 rx_cmd_b = get_unaligned_le32(skb->data);
3622                 skb_pull(skb, sizeof(rx_cmd_b));
3623
3624                 rx_cmd_c = get_unaligned_le16(skb->data);
3625                 skb_pull(skb, sizeof(rx_cmd_c));
3626
3627                 packet = skb->data;
3628
3629                 /* get the packet length */
3630                 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3631                 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3632
3633                 if (unlikely(size > skb->len)) {
3634                         netif_dbg(dev, rx_err, dev->net,
3635                                   "size err rx_cmd_a=0x%08x\n",
3636                                   rx_cmd_a);
3637                         return 0;
3638                 }
3639
3640                 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3641                         netif_dbg(dev, rx_err, dev->net,
3642                                   "Error rx_cmd_a=0x%08x", rx_cmd_a);
3643                 } else {
3644                         u32 frame_len;
3645                         struct sk_buff *skb2;
3646
3647                         if (unlikely(size < ETH_FCS_LEN)) {
3648                                 netif_dbg(dev, rx_err, dev->net,
3649                                           "size err rx_cmd_a=0x%08x\n",
3650                                           rx_cmd_a);
3651                                 return 0;
3652                         }
3653
3654                         frame_len = size - ETH_FCS_LEN;
3655
3656                         skb2 = napi_alloc_skb(&dev->napi, frame_len);
3657                         if (!skb2)
3658                                 return 0;
3659
3660                         memcpy(skb2->data, packet, frame_len);
3661
3662                         skb_put(skb2, frame_len);
3663
3664                         lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3665                         lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3666
3667                         /* Processing of the URB buffer must complete once
3668                          * it has started. If the NAPI work budget is exhausted
3669                          * while frames remain they are added to the overflow
3670                          * queue for delivery in the next NAPI polling cycle.
3671                          */
3672                         if (*work_done < budget) {
3673                                 lan78xx_skb_return(dev, skb2);
3674                                 ++(*work_done);
3675                         } else {
3676                                 skb_queue_tail(&dev->rxq_overflow, skb2);
3677                         }
3678                 }
3679
3680                 skb_pull(skb, size);
3681
3682                 /* skip padding bytes before the next frame starts */
3683                 if (skb->len)
3684                         skb_pull(skb, align_count);
3685         }
3686
3687         return 1;
3688 }
3689
3690 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb,
3691                               int budget, int *work_done)
3692 {
3693         if (!lan78xx_rx(dev, skb, budget, work_done)) {
3694                 netif_dbg(dev, rx_err, dev->net, "drop\n");
3695                 dev->net->stats.rx_errors++;
3696         }
3697 }
3698
3699 static void rx_complete(struct urb *urb)
3700 {
3701         struct sk_buff  *skb = (struct sk_buff *)urb->context;
3702         struct skb_data *entry = (struct skb_data *)skb->cb;
3703         struct lan78xx_net *dev = entry->dev;
3704         int urb_status = urb->status;
3705         enum skb_state state;
3706
3707         netif_dbg(dev, rx_status, dev->net,
3708                   "rx done: status %d", urb->status);
3709
3710         skb_put(skb, urb->actual_length);
3711         state = rx_done;
3712
3713         if (urb != entry->urb)
3714                 netif_warn(dev, rx_err, dev->net, "URB pointer mismatch");
3715
3716         switch (urb_status) {
3717         case 0:
3718                 if (skb->len < RX_SKB_MIN_LEN) {
3719                         state = rx_cleanup;
3720                         dev->net->stats.rx_errors++;
3721                         dev->net->stats.rx_length_errors++;
3722                         netif_dbg(dev, rx_err, dev->net,
3723                                   "rx length %d\n", skb->len);
3724                 }
3725                 usb_mark_last_busy(dev->udev);
3726                 break;
3727         case -EPIPE:
3728                 dev->net->stats.rx_errors++;
3729                 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3730                 fallthrough;
3731         case -ECONNRESET:                               /* async unlink */
3732         case -ESHUTDOWN:                                /* hardware gone */
3733                 netif_dbg(dev, ifdown, dev->net,
3734                           "rx shutdown, code %d\n", urb_status);
3735                 state = rx_cleanup;
3736                 break;
3737         case -EPROTO:
3738         case -ETIME:
3739         case -EILSEQ:
3740                 dev->net->stats.rx_errors++;
3741                 state = rx_cleanup;
3742                 break;
3743
3744         /* data overrun ... flush fifo? */
3745         case -EOVERFLOW:
3746                 dev->net->stats.rx_over_errors++;
3747                 fallthrough;
3748
3749         default:
3750                 state = rx_cleanup;
3751                 dev->net->stats.rx_errors++;
3752                 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3753                 break;
3754         }
3755
3756         state = defer_bh(dev, skb, &dev->rxq, state);
3757 }
3758
3759 static int rx_submit(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags)
3760 {
3761         struct skb_data *entry = (struct skb_data *)skb->cb;
3762         size_t size = dev->rx_urb_size;
3763         struct urb *urb = entry->urb;
3764         unsigned long lockflags;
3765         int ret = 0;
3766
3767         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3768                           skb->data, size, rx_complete, skb);
3769
3770         spin_lock_irqsave(&dev->rxq.lock, lockflags);
3771
3772         if (netif_device_present(dev->net) &&
3773             netif_running(dev->net) &&
3774             !test_bit(EVENT_RX_HALT, &dev->flags) &&
3775             !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3776                 ret = usb_submit_urb(urb, flags);
3777                 switch (ret) {
3778                 case 0:
3779                         lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3780                         break;
3781                 case -EPIPE:
3782                         lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3783                         break;
3784                 case -ENODEV:
3785                 case -ENOENT:
3786                         netif_dbg(dev, ifdown, dev->net, "device gone\n");
3787                         netif_device_detach(dev->net);
3788                         break;
3789                 case -EHOSTUNREACH:
3790                         ret = -ENOLINK;
3791                         napi_schedule(&dev->napi);
3792                         break;
3793                 default:
3794                         netif_dbg(dev, rx_err, dev->net,
3795                                   "rx submit, %d\n", ret);
3796                         napi_schedule(&dev->napi);
3797                         break;
3798                 }
3799         } else {
3800                 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3801                 ret = -ENOLINK;
3802         }
3803         spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3804
3805         if (ret)
3806                 lan78xx_release_rx_buf(dev, skb);
3807
3808         return ret;
3809 }
3810
3811 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev)
3812 {
3813         struct sk_buff *rx_buf;
3814
3815         /* Ensure the maximum number of Rx URBs is submitted
3816          */
3817         while ((rx_buf = lan78xx_get_rx_buf(dev)) != NULL) {
3818                 if (rx_submit(dev, rx_buf, GFP_ATOMIC) != 0)
3819                         break;
3820         }
3821 }
3822
3823 static void lan78xx_rx_urb_resubmit(struct lan78xx_net *dev,
3824                                     struct sk_buff *rx_buf)
3825 {
3826         /* reset SKB data pointers */
3827
3828         rx_buf->data = rx_buf->head;
3829         skb_reset_tail_pointer(rx_buf);
3830         rx_buf->len = 0;
3831         rx_buf->data_len = 0;
3832
3833         rx_submit(dev, rx_buf, GFP_ATOMIC);
3834 }
3835
3836 static void lan78xx_fill_tx_cmd_words(struct sk_buff *skb, u8 *buffer)
3837 {
3838         u32 tx_cmd_a;
3839         u32 tx_cmd_b;
3840
3841         tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
3842
3843         if (skb->ip_summed == CHECKSUM_PARTIAL)
3844                 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
3845
3846         tx_cmd_b = 0;
3847         if (skb_is_gso(skb)) {
3848                 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
3849
3850                 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
3851
3852                 tx_cmd_a |= TX_CMD_A_LSO_;
3853         }
3854
3855         if (skb_vlan_tag_present(skb)) {
3856                 tx_cmd_a |= TX_CMD_A_IVTG_;
3857                 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
3858         }
3859
3860         put_unaligned_le32(tx_cmd_a, buffer);
3861         put_unaligned_le32(tx_cmd_b, buffer + 4);
3862 }
3863
3864 static struct skb_data *lan78xx_tx_buf_fill(struct lan78xx_net *dev,
3865                                             struct sk_buff *tx_buf)
3866 {
3867         struct skb_data *entry = (struct skb_data *)tx_buf->cb;
3868         int remain = dev->tx_urb_size;
3869         u8 *tx_data = tx_buf->data;
3870         u32 urb_len = 0;
3871
3872         entry->num_of_packet = 0;
3873         entry->length = 0;
3874
3875         /* Work through the pending SKBs and copy the data of each SKB into
3876          * the URB buffer if there room for all the SKB data.
3877          *
3878          * There must be at least DST+SRC+TYPE in the SKB (with padding enabled)
3879          */
3880         while (remain >= TX_SKB_MIN_LEN) {
3881                 unsigned int pending_bytes;
3882                 unsigned int align_bytes;
3883                 struct sk_buff *skb;
3884                 unsigned int len;
3885
3886                 lan78xx_tx_pend_skb_get(dev, &skb, &pending_bytes);
3887
3888                 if (!skb)
3889                         break;
3890
3891                 align_bytes = (TX_ALIGNMENT - (urb_len % TX_ALIGNMENT)) %
3892                               TX_ALIGNMENT;
3893                 len = align_bytes + TX_CMD_LEN + skb->len;
3894                 if (len > remain) {
3895                         lan78xx_tx_pend_skb_head_add(dev, skb, &pending_bytes);
3896                         break;
3897                 }
3898
3899                 tx_data += align_bytes;
3900
3901                 lan78xx_fill_tx_cmd_words(skb, tx_data);
3902                 tx_data += TX_CMD_LEN;
3903
3904                 len = skb->len;
3905                 if (skb_copy_bits(skb, 0, tx_data, len) < 0) {
3906                         struct net_device_stats *stats = &dev->net->stats;
3907
3908                         stats->tx_dropped++;
3909                         dev_kfree_skb_any(skb);
3910                         tx_data -= TX_CMD_LEN;
3911                         continue;
3912                 }
3913
3914                 tx_data += len;
3915                 entry->length += len;
3916                 entry->num_of_packet += skb_shinfo(skb)->gso_segs ?: 1;
3917
3918                 dev_kfree_skb_any(skb);
3919
3920                 urb_len = (u32)(tx_data - (u8 *)tx_buf->data);
3921
3922                 remain = dev->tx_urb_size - urb_len;
3923         }
3924
3925         skb_put(tx_buf, urb_len);
3926
3927         return entry;
3928 }
3929
3930 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3931 {
3932         int ret;
3933
3934         /* Start the stack Tx queue if it was stopped
3935          */
3936         netif_tx_lock(dev->net);
3937         if (netif_queue_stopped(dev->net)) {
3938                 if (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev))
3939                         netif_wake_queue(dev->net);
3940         }
3941         netif_tx_unlock(dev->net);
3942
3943         /* Go through the Tx pending queue and set up URBs to transfer
3944          * the data to the device. Stop if no more pending data or URBs,
3945          * or if an error occurs when a URB is submitted.
3946          */
3947         do {
3948                 struct skb_data *entry;
3949                 struct sk_buff *tx_buf;
3950                 unsigned long flags;
3951
3952                 if (skb_queue_empty(&dev->txq_pend))
3953                         break;
3954
3955                 tx_buf = lan78xx_get_tx_buf(dev);
3956                 if (!tx_buf)
3957                         break;
3958
3959                 entry = lan78xx_tx_buf_fill(dev, tx_buf);
3960
3961                 spin_lock_irqsave(&dev->txq.lock, flags);
3962                 ret = usb_autopm_get_interface_async(dev->intf);
3963                 if (ret < 0) {
3964                         spin_unlock_irqrestore(&dev->txq.lock, flags);
3965                         goto out;
3966                 }
3967
3968                 usb_fill_bulk_urb(entry->urb, dev->udev, dev->pipe_out,
3969                                   tx_buf->data, tx_buf->len, tx_complete,
3970                                   tx_buf);
3971
3972                 if (tx_buf->len % dev->maxpacket == 0) {
3973                         /* send USB_ZERO_PACKET */
3974                         entry->urb->transfer_flags |= URB_ZERO_PACKET;
3975                 }
3976
3977 #ifdef CONFIG_PM
3978                 /* if device is asleep stop outgoing packet processing */
3979                 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3980                         usb_anchor_urb(entry->urb, &dev->deferred);
3981                         netif_stop_queue(dev->net);
3982                         spin_unlock_irqrestore(&dev->txq.lock, flags);
3983                         netdev_dbg(dev->net,
3984                                    "Delaying transmission for resumption\n");
3985                         return;
3986                 }
3987 #endif
3988                 ret = usb_submit_urb(entry->urb, GFP_ATOMIC);
3989                 switch (ret) {
3990                 case 0:
3991                         netif_trans_update(dev->net);
3992                         lan78xx_queue_skb(&dev->txq, tx_buf, tx_start);
3993                         break;
3994                 case -EPIPE:
3995                         netif_stop_queue(dev->net);
3996                         lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3997                         usb_autopm_put_interface_async(dev->intf);
3998                         break;
3999                 case -ENODEV:
4000                 case -ENOENT:
4001                         netif_dbg(dev, tx_err, dev->net,
4002                                   "tx submit urb err %d (disconnected?)", ret);
4003                         netif_device_detach(dev->net);
4004                         break;
4005                 default:
4006                         usb_autopm_put_interface_async(dev->intf);
4007                         netif_dbg(dev, tx_err, dev->net,
4008                                   "tx submit urb err %d\n", ret);
4009                         break;
4010                 }
4011
4012                 spin_unlock_irqrestore(&dev->txq.lock, flags);
4013
4014                 if (ret) {
4015                         netdev_warn(dev->net, "failed to tx urb %d\n", ret);
4016 out:
4017                         dev->net->stats.tx_dropped += entry->num_of_packet;
4018                         lan78xx_release_tx_buf(dev, tx_buf);
4019                 }
4020         } while (ret == 0);
4021 }
4022
4023 static int lan78xx_bh(struct lan78xx_net *dev, int budget)
4024 {
4025         struct sk_buff_head done;
4026         struct sk_buff *rx_buf;
4027         struct skb_data *entry;
4028         unsigned long flags;
4029         int work_done = 0;
4030
4031         /* Pass frames received in the last NAPI cycle before
4032          * working on newly completed URBs.
4033          */
4034         while (!skb_queue_empty(&dev->rxq_overflow)) {
4035                 lan78xx_skb_return(dev, skb_dequeue(&dev->rxq_overflow));
4036                 ++work_done;
4037         }
4038
4039         /* Take a snapshot of the done queue and move items to a
4040          * temporary queue. Rx URB completions will continue to add
4041          * to the done queue.
4042          */
4043         __skb_queue_head_init(&done);
4044
4045         spin_lock_irqsave(&dev->rxq_done.lock, flags);
4046         skb_queue_splice_init(&dev->rxq_done, &done);
4047         spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4048
4049         /* Extract receive frames from completed URBs and
4050          * pass them to the stack. Re-submit each completed URB.
4051          */
4052         while ((work_done < budget) &&
4053                (rx_buf = __skb_dequeue(&done))) {
4054                 entry = (struct skb_data *)(rx_buf->cb);
4055                 switch (entry->state) {
4056                 case rx_done:
4057                         rx_process(dev, rx_buf, budget, &work_done);
4058                         break;
4059                 case rx_cleanup:
4060                         break;
4061                 default:
4062                         netdev_dbg(dev->net, "rx buf state %d\n",
4063                                    entry->state);
4064                         break;
4065                 }
4066
4067                 lan78xx_rx_urb_resubmit(dev, rx_buf);
4068         }
4069
4070         /* If budget was consumed before processing all the URBs put them
4071          * back on the front of the done queue. They will be first to be
4072          * processed in the next NAPI cycle.
4073          */
4074         spin_lock_irqsave(&dev->rxq_done.lock, flags);
4075         skb_queue_splice(&done, &dev->rxq_done);
4076         spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4077
4078         if (netif_device_present(dev->net) && netif_running(dev->net)) {
4079                 /* reset update timer delta */
4080                 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
4081                         dev->delta = 1;
4082                         mod_timer(&dev->stat_monitor,
4083                                   jiffies + STAT_UPDATE_TIMER);
4084                 }
4085
4086                 /* Submit all free Rx URBs */
4087
4088                 if (!test_bit(EVENT_RX_HALT, &dev->flags))
4089                         lan78xx_rx_urb_submit_all(dev);
4090
4091                 /* Submit new Tx URBs */
4092
4093                 lan78xx_tx_bh(dev);
4094         }
4095
4096         return work_done;
4097 }
4098
4099 static int lan78xx_poll(struct napi_struct *napi, int budget)
4100 {
4101         struct lan78xx_net *dev = container_of(napi, struct lan78xx_net, napi);
4102         int result = budget;
4103         int work_done;
4104
4105         /* Don't do any work if the device is suspended */
4106
4107         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4108                 napi_complete_done(napi, 0);
4109                 return 0;
4110         }
4111
4112         /* Process completed URBs and submit new URBs */
4113
4114         work_done = lan78xx_bh(dev, budget);
4115
4116         if (work_done < budget) {
4117                 napi_complete_done(napi, work_done);
4118
4119                 /* Start a new polling cycle if data was received or
4120                  * data is waiting to be transmitted.
4121                  */
4122                 if (!skb_queue_empty(&dev->rxq_done)) {
4123                         napi_schedule(napi);
4124                 } else if (netif_carrier_ok(dev->net)) {
4125                         if (skb_queue_empty(&dev->txq) &&
4126                             !skb_queue_empty(&dev->txq_pend)) {
4127                                 napi_schedule(napi);
4128                         } else {
4129                                 netif_tx_lock(dev->net);
4130                                 if (netif_queue_stopped(dev->net)) {
4131                                         netif_wake_queue(dev->net);
4132                                         napi_schedule(napi);
4133                                 }
4134                                 netif_tx_unlock(dev->net);
4135                         }
4136                 }
4137                 result = work_done;
4138         }
4139
4140         return result;
4141 }
4142
4143 static void lan78xx_delayedwork(struct work_struct *work)
4144 {
4145         int status;
4146         struct lan78xx_net *dev;
4147
4148         dev = container_of(work, struct lan78xx_net, wq.work);
4149
4150         if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
4151                 return;
4152
4153         if (usb_autopm_get_interface(dev->intf) < 0)
4154                 return;
4155
4156         if (test_bit(EVENT_TX_HALT, &dev->flags)) {
4157                 unlink_urbs(dev, &dev->txq);
4158
4159                 status = usb_clear_halt(dev->udev, dev->pipe_out);
4160                 if (status < 0 &&
4161                     status != -EPIPE &&
4162                     status != -ESHUTDOWN) {
4163                         if (netif_msg_tx_err(dev))
4164                                 netdev_err(dev->net,
4165                                            "can't clear tx halt, status %d\n",
4166                                            status);
4167                 } else {
4168                         clear_bit(EVENT_TX_HALT, &dev->flags);
4169                         if (status != -ESHUTDOWN)
4170                                 netif_wake_queue(dev->net);
4171                 }
4172         }
4173
4174         if (test_bit(EVENT_RX_HALT, &dev->flags)) {
4175                 unlink_urbs(dev, &dev->rxq);
4176                 status = usb_clear_halt(dev->udev, dev->pipe_in);
4177                 if (status < 0 &&
4178                     status != -EPIPE &&
4179                     status != -ESHUTDOWN) {
4180                         if (netif_msg_rx_err(dev))
4181                                 netdev_err(dev->net,
4182                                            "can't clear rx halt, status %d\n",
4183                                            status);
4184                 } else {
4185                         clear_bit(EVENT_RX_HALT, &dev->flags);
4186                         napi_schedule(&dev->napi);
4187                 }
4188         }
4189
4190         if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
4191                 int ret = 0;
4192
4193                 clear_bit(EVENT_LINK_RESET, &dev->flags);
4194                 if (lan78xx_link_reset(dev) < 0) {
4195                         netdev_info(dev->net, "link reset failed (%d)\n",
4196                                     ret);
4197                 }
4198         }
4199
4200         if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
4201                 lan78xx_update_stats(dev);
4202
4203                 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
4204
4205                 mod_timer(&dev->stat_monitor,
4206                           jiffies + (STAT_UPDATE_TIMER * dev->delta));
4207
4208                 dev->delta = min((dev->delta * 2), 50);
4209         }
4210
4211         usb_autopm_put_interface(dev->intf);
4212 }
4213
4214 static void intr_complete(struct urb *urb)
4215 {
4216         struct lan78xx_net *dev = urb->context;
4217         int status = urb->status;
4218
4219         switch (status) {
4220         /* success */
4221         case 0:
4222                 lan78xx_status(dev, urb);
4223                 break;
4224
4225         /* software-driven interface shutdown */
4226         case -ENOENT:                   /* urb killed */
4227         case -ENODEV:                   /* hardware gone */
4228         case -ESHUTDOWN:                /* hardware gone */
4229                 netif_dbg(dev, ifdown, dev->net,
4230                           "intr shutdown, code %d\n", status);
4231                 return;
4232
4233         /* NOTE:  not throttling like RX/TX, since this endpoint
4234          * already polls infrequently
4235          */
4236         default:
4237                 netdev_dbg(dev->net, "intr status %d\n", status);
4238                 break;
4239         }
4240
4241         if (!netif_device_present(dev->net) ||
4242             !netif_running(dev->net)) {
4243                 netdev_warn(dev->net, "not submitting new status URB");
4244                 return;
4245         }
4246
4247         memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
4248         status = usb_submit_urb(urb, GFP_ATOMIC);
4249
4250         switch (status) {
4251         case  0:
4252                 break;
4253         case -ENODEV:
4254         case -ENOENT:
4255                 netif_dbg(dev, timer, dev->net,
4256                           "intr resubmit %d (disconnect?)", status);
4257                 netif_device_detach(dev->net);
4258                 break;
4259         default:
4260                 netif_err(dev, timer, dev->net,
4261                           "intr resubmit --> %d\n", status);
4262                 break;
4263         }
4264 }
4265
4266 static void lan78xx_disconnect(struct usb_interface *intf)
4267 {
4268         struct lan78xx_net *dev;
4269         struct usb_device *udev;
4270         struct net_device *net;
4271         struct phy_device *phydev;
4272
4273         dev = usb_get_intfdata(intf);
4274         usb_set_intfdata(intf, NULL);
4275         if (!dev)
4276                 return;
4277
4278         netif_napi_del(&dev->napi);
4279
4280         udev = interface_to_usbdev(intf);
4281         net = dev->net;
4282
4283         unregister_netdev(net);
4284
4285         timer_shutdown_sync(&dev->stat_monitor);
4286         set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
4287         cancel_delayed_work_sync(&dev->wq);
4288
4289         phydev = net->phydev;
4290
4291         phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
4292         phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
4293
4294         phy_disconnect(net->phydev);
4295
4296         if (phy_is_pseudo_fixed_link(phydev))
4297                 fixed_phy_unregister(phydev);
4298
4299         usb_scuttle_anchored_urbs(&dev->deferred);
4300
4301         lan78xx_unbind(dev, intf);
4302
4303         lan78xx_free_tx_resources(dev);
4304         lan78xx_free_rx_resources(dev);
4305
4306         usb_kill_urb(dev->urb_intr);
4307         usb_free_urb(dev->urb_intr);
4308
4309         free_netdev(net);
4310         usb_put_dev(udev);
4311 }
4312
4313 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
4314 {
4315         struct lan78xx_net *dev = netdev_priv(net);
4316
4317         unlink_urbs(dev, &dev->txq);
4318         napi_schedule(&dev->napi);
4319 }
4320
4321 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
4322                                                 struct net_device *netdev,
4323                                                 netdev_features_t features)
4324 {
4325         struct lan78xx_net *dev = netdev_priv(netdev);
4326
4327         if (skb->len > LAN78XX_TSO_SIZE(dev))
4328                 features &= ~NETIF_F_GSO_MASK;
4329
4330         features = vlan_features_check(skb, features);
4331         features = vxlan_features_check(skb, features);
4332
4333         return features;
4334 }
4335
4336 static const struct net_device_ops lan78xx_netdev_ops = {
4337         .ndo_open               = lan78xx_open,
4338         .ndo_stop               = lan78xx_stop,
4339         .ndo_start_xmit         = lan78xx_start_xmit,
4340         .ndo_tx_timeout         = lan78xx_tx_timeout,
4341         .ndo_change_mtu         = lan78xx_change_mtu,
4342         .ndo_set_mac_address    = lan78xx_set_mac_addr,
4343         .ndo_validate_addr      = eth_validate_addr,
4344         .ndo_eth_ioctl          = phy_do_ioctl_running,
4345         .ndo_set_rx_mode        = lan78xx_set_multicast,
4346         .ndo_set_features       = lan78xx_set_features,
4347         .ndo_vlan_rx_add_vid    = lan78xx_vlan_rx_add_vid,
4348         .ndo_vlan_rx_kill_vid   = lan78xx_vlan_rx_kill_vid,
4349         .ndo_features_check     = lan78xx_features_check,
4350 };
4351
4352 static void lan78xx_stat_monitor(struct timer_list *t)
4353 {
4354         struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
4355
4356         lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
4357 }
4358
4359 static int lan78xx_probe(struct usb_interface *intf,
4360                          const struct usb_device_id *id)
4361 {
4362         struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
4363         struct lan78xx_net *dev;
4364         struct net_device *netdev;
4365         struct usb_device *udev;
4366         int ret;
4367         unsigned int maxp;
4368         unsigned int period;
4369         u8 *buf = NULL;
4370
4371         udev = interface_to_usbdev(intf);
4372         udev = usb_get_dev(udev);
4373
4374         netdev = alloc_etherdev(sizeof(struct lan78xx_net));
4375         if (!netdev) {
4376                 dev_err(&intf->dev, "Error: OOM\n");
4377                 ret = -ENOMEM;
4378                 goto out1;
4379         }
4380
4381         /* netdev_printk() needs this */
4382         SET_NETDEV_DEV(netdev, &intf->dev);
4383
4384         dev = netdev_priv(netdev);
4385         dev->udev = udev;
4386         dev->intf = intf;
4387         dev->net = netdev;
4388         dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
4389                                         | NETIF_MSG_PROBE | NETIF_MSG_LINK);
4390
4391         skb_queue_head_init(&dev->rxq);
4392         skb_queue_head_init(&dev->txq);
4393         skb_queue_head_init(&dev->rxq_done);
4394         skb_queue_head_init(&dev->txq_pend);
4395         skb_queue_head_init(&dev->rxq_overflow);
4396         mutex_init(&dev->phy_mutex);
4397         mutex_init(&dev->dev_mutex);
4398
4399         ret = lan78xx_urb_config_init(dev);
4400         if (ret < 0)
4401                 goto out2;
4402
4403         ret = lan78xx_alloc_tx_resources(dev);
4404         if (ret < 0)
4405                 goto out2;
4406
4407         ret = lan78xx_alloc_rx_resources(dev);
4408         if (ret < 0)
4409                 goto out3;
4410
4411         /* MTU range: 68 - 9000 */
4412         netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
4413
4414         netif_set_tso_max_size(netdev, LAN78XX_TSO_SIZE(dev));
4415
4416         netif_napi_add(netdev, &dev->napi, lan78xx_poll);
4417
4418         INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
4419         init_usb_anchor(&dev->deferred);
4420
4421         netdev->netdev_ops = &lan78xx_netdev_ops;
4422         netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
4423         netdev->ethtool_ops = &lan78xx_ethtool_ops;
4424
4425         dev->delta = 1;
4426         timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
4427
4428         mutex_init(&dev->stats.access_lock);
4429
4430         if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
4431                 ret = -ENODEV;
4432                 goto out4;
4433         }
4434
4435         dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
4436         ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
4437         if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
4438                 ret = -ENODEV;
4439                 goto out4;
4440         }
4441
4442         dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
4443         ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
4444         if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
4445                 ret = -ENODEV;
4446                 goto out4;
4447         }
4448
4449         ep_intr = &intf->cur_altsetting->endpoint[2];
4450         if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
4451                 ret = -ENODEV;
4452                 goto out4;
4453         }
4454
4455         dev->pipe_intr = usb_rcvintpipe(dev->udev,
4456                                         usb_endpoint_num(&ep_intr->desc));
4457
4458         ret = lan78xx_bind(dev, intf);
4459         if (ret < 0)
4460                 goto out4;
4461
4462         if (int_urb_interval_ms <= 0)
4463                 period = ep_intr->desc.bInterval;
4464         else
4465                 period = int_urb_interval_ms * INT_URB_MICROFRAMES_PER_MS;
4466
4467         netif_notice(dev, probe, netdev, "int urb period %d\n", period);
4468
4469         maxp = usb_maxpacket(dev->udev, dev->pipe_intr);
4470         buf = kmalloc(maxp, GFP_KERNEL);
4471         if (!buf) {
4472                 ret = -ENOMEM;
4473                 goto out5;
4474         }
4475
4476         dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
4477         if (!dev->urb_intr) {
4478                 ret = -ENOMEM;
4479                 goto out6;
4480         } else {
4481                 usb_fill_int_urb(dev->urb_intr, dev->udev,
4482                                  dev->pipe_intr, buf, maxp,
4483                                  intr_complete, dev, period);
4484                 dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
4485         }
4486
4487         dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out);
4488
4489         /* Reject broken descriptors. */
4490         if (dev->maxpacket == 0) {
4491                 ret = -ENODEV;
4492                 goto out6;
4493         }
4494
4495         /* driver requires remote-wakeup capability during autosuspend. */
4496         intf->needs_remote_wakeup = 1;
4497
4498         ret = lan78xx_phy_init(dev);
4499         if (ret < 0)
4500                 goto out7;
4501
4502         ret = register_netdev(netdev);
4503         if (ret != 0) {
4504                 netif_err(dev, probe, netdev, "couldn't register the device\n");
4505                 goto out8;
4506         }
4507
4508         usb_set_intfdata(intf, dev);
4509
4510         ret = device_set_wakeup_enable(&udev->dev, true);
4511
4512          /* Default delay of 2sec has more overhead than advantage.
4513           * Set to 10sec as default.
4514           */
4515         pm_runtime_set_autosuspend_delay(&udev->dev,
4516                                          DEFAULT_AUTOSUSPEND_DELAY);
4517
4518         return 0;
4519
4520 out8:
4521         phy_disconnect(netdev->phydev);
4522 out7:
4523         usb_free_urb(dev->urb_intr);
4524 out6:
4525         kfree(buf);
4526 out5:
4527         lan78xx_unbind(dev, intf);
4528 out4:
4529         netif_napi_del(&dev->napi);
4530         lan78xx_free_rx_resources(dev);
4531 out3:
4532         lan78xx_free_tx_resources(dev);
4533 out2:
4534         free_netdev(netdev);
4535 out1:
4536         usb_put_dev(udev);
4537
4538         return ret;
4539 }
4540
4541 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
4542 {
4543         const u16 crc16poly = 0x8005;
4544         int i;
4545         u16 bit, crc, msb;
4546         u8 data;
4547
4548         crc = 0xFFFF;
4549         for (i = 0; i < len; i++) {
4550                 data = *buf++;
4551                 for (bit = 0; bit < 8; bit++) {
4552                         msb = crc >> 15;
4553                         crc <<= 1;
4554
4555                         if (msb ^ (u16)(data & 1)) {
4556                                 crc ^= crc16poly;
4557                                 crc |= (u16)0x0001U;
4558                         }
4559                         data >>= 1;
4560                 }
4561         }
4562
4563         return crc;
4564 }
4565
4566 static int lan78xx_set_auto_suspend(struct lan78xx_net *dev)
4567 {
4568         u32 buf;
4569         int ret;
4570
4571         ret = lan78xx_stop_tx_path(dev);
4572         if (ret < 0)
4573                 return ret;
4574
4575         ret = lan78xx_stop_rx_path(dev);
4576         if (ret < 0)
4577                 return ret;
4578
4579         /* auto suspend (selective suspend) */
4580
4581         ret = lan78xx_write_reg(dev, WUCSR, 0);
4582         if (ret < 0)
4583                 return ret;
4584         ret = lan78xx_write_reg(dev, WUCSR2, 0);
4585         if (ret < 0)
4586                 return ret;
4587         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4588         if (ret < 0)
4589                 return ret;
4590
4591         /* set goodframe wakeup */
4592
4593         ret = lan78xx_read_reg(dev, WUCSR, &buf);
4594         if (ret < 0)
4595                 return ret;
4596
4597         buf |= WUCSR_RFE_WAKE_EN_;
4598         buf |= WUCSR_STORE_WAKE_;
4599
4600         ret = lan78xx_write_reg(dev, WUCSR, buf);
4601         if (ret < 0)
4602                 return ret;
4603
4604         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4605         if (ret < 0)
4606                 return ret;
4607
4608         buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4609         buf |= PMT_CTL_RES_CLR_WKP_STS_;
4610         buf |= PMT_CTL_PHY_WAKE_EN_;
4611         buf |= PMT_CTL_WOL_EN_;
4612         buf &= ~PMT_CTL_SUS_MODE_MASK_;
4613         buf |= PMT_CTL_SUS_MODE_3_;
4614
4615         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4616         if (ret < 0)
4617                 return ret;
4618
4619         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4620         if (ret < 0)
4621                 return ret;
4622
4623         buf |= PMT_CTL_WUPS_MASK_;
4624
4625         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4626         if (ret < 0)
4627                 return ret;
4628
4629         ret = lan78xx_start_rx_path(dev);
4630
4631         return ret;
4632 }
4633
4634 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
4635 {
4636         const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
4637         const u8 ipv6_multicast[3] = { 0x33, 0x33 };
4638         const u8 arp_type[2] = { 0x08, 0x06 };
4639         u32 temp_pmt_ctl;
4640         int mask_index;
4641         u32 temp_wucsr;
4642         u32 buf;
4643         u16 crc;
4644         int ret;
4645
4646         ret = lan78xx_stop_tx_path(dev);
4647         if (ret < 0)
4648                 return ret;
4649         ret = lan78xx_stop_rx_path(dev);
4650         if (ret < 0)
4651                 return ret;
4652
4653         ret = lan78xx_write_reg(dev, WUCSR, 0);
4654         if (ret < 0)
4655                 return ret;
4656         ret = lan78xx_write_reg(dev, WUCSR2, 0);
4657         if (ret < 0)
4658                 return ret;
4659         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4660         if (ret < 0)
4661                 return ret;
4662
4663         temp_wucsr = 0;
4664
4665         temp_pmt_ctl = 0;
4666
4667         ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
4668         if (ret < 0)
4669                 return ret;
4670
4671         temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
4672         temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
4673
4674         for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) {
4675                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
4676                 if (ret < 0)
4677                         return ret;
4678         }
4679
4680         mask_index = 0;
4681         if (wol & WAKE_PHY) {
4682                 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
4683
4684                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4685                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4686                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4687         }
4688         if (wol & WAKE_MAGIC) {
4689                 temp_wucsr |= WUCSR_MPEN_;
4690
4691                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4692                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4693                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
4694         }
4695         if (wol & WAKE_BCAST) {
4696                 temp_wucsr |= WUCSR_BCST_EN_;
4697
4698                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4699                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4700                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4701         }
4702         if (wol & WAKE_MCAST) {
4703                 temp_wucsr |= WUCSR_WAKE_EN_;
4704
4705                 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
4706                 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
4707                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4708                                         WUF_CFGX_EN_ |
4709                                         WUF_CFGX_TYPE_MCAST_ |
4710                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
4711                                         (crc & WUF_CFGX_CRC16_MASK_));
4712                 if (ret < 0)
4713                         return ret;
4714
4715                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
4716                 if (ret < 0)
4717                         return ret;
4718                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4719                 if (ret < 0)
4720                         return ret;
4721                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4722                 if (ret < 0)
4723                         return ret;
4724                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4725                 if (ret < 0)
4726                         return ret;
4727
4728                 mask_index++;
4729
4730                 /* for IPv6 Multicast */
4731                 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
4732                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4733                                         WUF_CFGX_EN_ |
4734                                         WUF_CFGX_TYPE_MCAST_ |
4735                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
4736                                         (crc & WUF_CFGX_CRC16_MASK_));
4737                 if (ret < 0)
4738                         return ret;
4739
4740                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
4741                 if (ret < 0)
4742                         return ret;
4743                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4744                 if (ret < 0)
4745                         return ret;
4746                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4747                 if (ret < 0)
4748                         return ret;
4749                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4750                 if (ret < 0)
4751                         return ret;
4752
4753                 mask_index++;
4754
4755                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4756                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4757                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4758         }
4759         if (wol & WAKE_UCAST) {
4760                 temp_wucsr |= WUCSR_PFDA_EN_;
4761
4762                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4763                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4764                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4765         }
4766         if (wol & WAKE_ARP) {
4767                 temp_wucsr |= WUCSR_WAKE_EN_;
4768
4769                 /* set WUF_CFG & WUF_MASK
4770                  * for packettype (offset 12,13) = ARP (0x0806)
4771                  */
4772                 crc = lan78xx_wakeframe_crc16(arp_type, 2);
4773                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4774                                         WUF_CFGX_EN_ |
4775                                         WUF_CFGX_TYPE_ALL_ |
4776                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
4777                                         (crc & WUF_CFGX_CRC16_MASK_));
4778                 if (ret < 0)
4779                         return ret;
4780
4781                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
4782                 if (ret < 0)
4783                         return ret;
4784                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4785                 if (ret < 0)
4786                         return ret;
4787                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4788                 if (ret < 0)
4789                         return ret;
4790                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4791                 if (ret < 0)
4792                         return ret;
4793
4794                 mask_index++;
4795
4796                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4797                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4798                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4799         }
4800
4801         ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
4802         if (ret < 0)
4803                 return ret;
4804
4805         /* when multiple WOL bits are set */
4806         if (hweight_long((unsigned long)wol) > 1) {
4807                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4808                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4809                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4810         }
4811         ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
4812         if (ret < 0)
4813                 return ret;
4814
4815         /* clear WUPS */
4816         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4817         if (ret < 0)
4818                 return ret;
4819
4820         buf |= PMT_CTL_WUPS_MASK_;
4821
4822         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4823         if (ret < 0)
4824                 return ret;
4825
4826         ret = lan78xx_start_rx_path(dev);
4827
4828         return ret;
4829 }
4830
4831 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
4832 {
4833         struct lan78xx_net *dev = usb_get_intfdata(intf);
4834         bool dev_open;
4835         int ret;
4836
4837         mutex_lock(&dev->dev_mutex);
4838
4839         netif_dbg(dev, ifdown, dev->net,
4840                   "suspending: pm event %#x", message.event);
4841
4842         dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4843
4844         if (dev_open) {
4845                 spin_lock_irq(&dev->txq.lock);
4846                 /* don't autosuspend while transmitting */
4847                 if ((skb_queue_len(&dev->txq) ||
4848                      skb_queue_len(&dev->txq_pend)) &&
4849                     PMSG_IS_AUTO(message)) {
4850                         spin_unlock_irq(&dev->txq.lock);
4851                         ret = -EBUSY;
4852                         goto out;
4853                 } else {
4854                         set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4855                         spin_unlock_irq(&dev->txq.lock);
4856                 }
4857
4858                 /* stop RX */
4859                 ret = lan78xx_stop_rx_path(dev);
4860                 if (ret < 0)
4861                         goto out;
4862
4863                 ret = lan78xx_flush_rx_fifo(dev);
4864                 if (ret < 0)
4865                         goto out;
4866
4867                 /* stop Tx */
4868                 ret = lan78xx_stop_tx_path(dev);
4869                 if (ret < 0)
4870                         goto out;
4871
4872                 /* empty out the Rx and Tx queues */
4873                 netif_device_detach(dev->net);
4874                 lan78xx_terminate_urbs(dev);
4875                 usb_kill_urb(dev->urb_intr);
4876
4877                 /* reattach */
4878                 netif_device_attach(dev->net);
4879
4880                 del_timer(&dev->stat_monitor);
4881
4882                 if (PMSG_IS_AUTO(message)) {
4883                         ret = lan78xx_set_auto_suspend(dev);
4884                         if (ret < 0)
4885                                 goto out;
4886                 } else {
4887                         struct lan78xx_priv *pdata;
4888
4889                         pdata = (struct lan78xx_priv *)(dev->data[0]);
4890                         netif_carrier_off(dev->net);
4891                         ret = lan78xx_set_suspend(dev, pdata->wol);
4892                         if (ret < 0)
4893                                 goto out;
4894                 }
4895         } else {
4896                 /* Interface is down; don't allow WOL and PHY
4897                  * events to wake up the host
4898                  */
4899                 u32 buf;
4900
4901                 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4902
4903                 ret = lan78xx_write_reg(dev, WUCSR, 0);
4904                 if (ret < 0)
4905                         goto out;
4906                 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4907                 if (ret < 0)
4908                         goto out;
4909
4910                 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4911                 if (ret < 0)
4912                         goto out;
4913
4914                 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4915                 buf |= PMT_CTL_RES_CLR_WKP_STS_;
4916                 buf &= ~PMT_CTL_SUS_MODE_MASK_;
4917                 buf |= PMT_CTL_SUS_MODE_3_;
4918
4919                 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4920                 if (ret < 0)
4921                         goto out;
4922
4923                 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4924                 if (ret < 0)
4925                         goto out;
4926
4927                 buf |= PMT_CTL_WUPS_MASK_;
4928
4929                 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4930                 if (ret < 0)
4931                         goto out;
4932         }
4933
4934         ret = 0;
4935 out:
4936         mutex_unlock(&dev->dev_mutex);
4937
4938         return ret;
4939 }
4940
4941 static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
4942 {
4943         bool pipe_halted = false;
4944         struct urb *urb;
4945
4946         while ((urb = usb_get_from_anchor(&dev->deferred))) {
4947                 struct sk_buff *skb = urb->context;
4948                 int ret;
4949
4950                 if (!netif_device_present(dev->net) ||
4951                     !netif_carrier_ok(dev->net) ||
4952                     pipe_halted) {
4953                         lan78xx_release_tx_buf(dev, skb);
4954                         continue;
4955                 }
4956
4957                 ret = usb_submit_urb(urb, GFP_ATOMIC);
4958
4959                 if (ret == 0) {
4960                         netif_trans_update(dev->net);
4961                         lan78xx_queue_skb(&dev->txq, skb, tx_start);
4962                 } else {
4963                         if (ret == -EPIPE) {
4964                                 netif_stop_queue(dev->net);
4965                                 pipe_halted = true;
4966                         } else if (ret == -ENODEV) {
4967                                 netif_device_detach(dev->net);
4968                         }
4969
4970                         lan78xx_release_tx_buf(dev, skb);
4971                 }
4972         }
4973
4974         return pipe_halted;
4975 }
4976
4977 static int lan78xx_resume(struct usb_interface *intf)
4978 {
4979         struct lan78xx_net *dev = usb_get_intfdata(intf);
4980         bool dev_open;
4981         int ret;
4982
4983         mutex_lock(&dev->dev_mutex);
4984
4985         netif_dbg(dev, ifup, dev->net, "resuming device");
4986
4987         dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4988
4989         if (dev_open) {
4990                 bool pipe_halted = false;
4991
4992                 ret = lan78xx_flush_tx_fifo(dev);
4993                 if (ret < 0)
4994                         goto out;
4995
4996                 if (dev->urb_intr) {
4997                         int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
4998
4999                         if (ret < 0) {
5000                                 if (ret == -ENODEV)
5001                                         netif_device_detach(dev->net);
5002                                 netdev_warn(dev->net, "Failed to submit intr URB");
5003                         }
5004                 }
5005
5006                 spin_lock_irq(&dev->txq.lock);
5007
5008                 if (netif_device_present(dev->net)) {
5009                         pipe_halted = lan78xx_submit_deferred_urbs(dev);
5010
5011                         if (pipe_halted)
5012                                 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
5013                 }
5014
5015                 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
5016
5017                 spin_unlock_irq(&dev->txq.lock);
5018
5019                 if (!pipe_halted &&
5020                     netif_device_present(dev->net) &&
5021                     (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev)))
5022                         netif_start_queue(dev->net);
5023
5024                 ret = lan78xx_start_tx_path(dev);
5025                 if (ret < 0)
5026                         goto out;
5027
5028                 napi_schedule(&dev->napi);
5029
5030                 if (!timer_pending(&dev->stat_monitor)) {
5031                         dev->delta = 1;
5032                         mod_timer(&dev->stat_monitor,
5033                                   jiffies + STAT_UPDATE_TIMER);
5034                 }
5035
5036         } else {
5037                 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
5038         }
5039
5040         ret = lan78xx_write_reg(dev, WUCSR2, 0);
5041         if (ret < 0)
5042                 goto out;
5043         ret = lan78xx_write_reg(dev, WUCSR, 0);
5044         if (ret < 0)
5045                 goto out;
5046         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
5047         if (ret < 0)
5048                 goto out;
5049
5050         ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
5051                                              WUCSR2_ARP_RCD_ |
5052                                              WUCSR2_IPV6_TCPSYN_RCD_ |
5053                                              WUCSR2_IPV4_TCPSYN_RCD_);
5054         if (ret < 0)
5055                 goto out;
5056
5057         ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
5058                                             WUCSR_EEE_RX_WAKE_ |
5059                                             WUCSR_PFDA_FR_ |
5060                                             WUCSR_RFE_WAKE_FR_ |
5061                                             WUCSR_WUFR_ |
5062                                             WUCSR_MPR_ |
5063                                             WUCSR_BCST_FR_);
5064         if (ret < 0)
5065                 goto out;
5066
5067         ret = 0;
5068 out:
5069         mutex_unlock(&dev->dev_mutex);
5070
5071         return ret;
5072 }
5073
5074 static int lan78xx_reset_resume(struct usb_interface *intf)
5075 {
5076         struct lan78xx_net *dev = usb_get_intfdata(intf);
5077         int ret;
5078
5079         netif_dbg(dev, ifup, dev->net, "(reset) resuming device");
5080
5081         ret = lan78xx_reset(dev);
5082         if (ret < 0)
5083                 return ret;
5084
5085         phy_start(dev->net->phydev);
5086
5087         ret = lan78xx_resume(intf);
5088
5089         return ret;
5090 }
5091
5092 static const struct usb_device_id products[] = {
5093         {
5094         /* LAN7800 USB Gigabit Ethernet Device */
5095         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
5096         },
5097         {
5098         /* LAN7850 USB Gigabit Ethernet Device */
5099         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
5100         },
5101         {
5102         /* LAN7801 USB Gigabit Ethernet Device */
5103         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
5104         },
5105         {
5106         /* ATM2-AF USB Gigabit Ethernet Device */
5107         USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
5108         },
5109         {},
5110 };
5111 MODULE_DEVICE_TABLE(usb, products);
5112
5113 static struct usb_driver lan78xx_driver = {
5114         .name                   = DRIVER_NAME,
5115         .id_table               = products,
5116         .probe                  = lan78xx_probe,
5117         .disconnect             = lan78xx_disconnect,
5118         .suspend                = lan78xx_suspend,
5119         .resume                 = lan78xx_resume,
5120         .reset_resume           = lan78xx_reset_resume,
5121         .supports_autosuspend   = 1,
5122         .disable_hub_initiated_lpm = 1,
5123 };
5124
5125 module_usb_driver(lan78xx_driver);
5126
5127 MODULE_AUTHOR(DRIVER_AUTHOR);
5128 MODULE_DESCRIPTION(DRIVER_DESC);
5129 MODULE_LICENSE("GPL");