lan78xx: Don't reset the interface on open
[platform/kernel/linux-rpi.git] / drivers / net / usb / lan78xx.c
1 /*
2  * Copyright (C) 2015 Microchip Technology
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
29 #include <linux/ip.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <linux/phy.h>
33 #include <net/ip6_checksum.h>
34 #include <linux/interrupt.h>
35 #include <linux/irqdomain.h>
36 #include <linux/irq.h>
37 #include <linux/irqchip/chained_irq.h>
38 #include <linux/microchipphy.h>
39 #include <linux/phy.h>
40 #include "lan78xx.h"
41
42 #define DRIVER_AUTHOR   "WOOJUNG HUH <woojung.huh@microchip.com>"
43 #define DRIVER_DESC     "LAN78XX USB 3.0 Gigabit Ethernet Devices"
44 #define DRIVER_NAME     "lan78xx"
45 #define DRIVER_VERSION  "1.0.6"
46
47 #define TX_TIMEOUT_JIFFIES              (5 * HZ)
48 #define THROTTLE_JIFFIES                (HZ / 8)
49 #define UNLINK_TIMEOUT_MS               3
50
51 #define RX_MAX_QUEUE_MEMORY             (60 * 1518)
52
53 #define SS_USB_PKT_SIZE                 (1024)
54 #define HS_USB_PKT_SIZE                 (512)
55 #define FS_USB_PKT_SIZE                 (64)
56
57 #define MAX_RX_FIFO_SIZE                (12 * 1024)
58 #define MAX_TX_FIFO_SIZE                (12 * 1024)
59 #define DEFAULT_BURST_CAP_SIZE          (MAX_TX_FIFO_SIZE)
60 #define DEFAULT_BULK_IN_DELAY           (0x0800)
61 #define MAX_SINGLE_PACKET_SIZE          (9000)
62 #define DEFAULT_TX_CSUM_ENABLE          (true)
63 #define DEFAULT_RX_CSUM_ENABLE          (true)
64 #define DEFAULT_TSO_CSUM_ENABLE         (true)
65 #define DEFAULT_VLAN_FILTER_ENABLE      (true)
66 #define TX_OVERHEAD                     (8)
67 #define RXW_PADDING                     2
68
69 #define LAN78XX_USB_VENDOR_ID           (0x0424)
70 #define LAN7800_USB_PRODUCT_ID          (0x7800)
71 #define LAN7850_USB_PRODUCT_ID          (0x7850)
72 #define LAN7801_USB_PRODUCT_ID          (0x7801)
73 #define LAN78XX_EEPROM_MAGIC            (0x78A5)
74 #define LAN78XX_OTP_MAGIC               (0x78F3)
75
76 #define MII_READ                        1
77 #define MII_WRITE                       0
78
79 #define EEPROM_INDICATOR                (0xA5)
80 #define EEPROM_MAC_OFFSET               (0x01)
81 #define MAX_EEPROM_SIZE                 512
82 #define OTP_INDICATOR_1                 (0xF3)
83 #define OTP_INDICATOR_2                 (0xF7)
84
85 #define WAKE_ALL                        (WAKE_PHY | WAKE_UCAST | \
86                                          WAKE_MCAST | WAKE_BCAST | \
87                                          WAKE_ARP | WAKE_MAGIC)
88
89 /* USB related defines */
90 #define BULK_IN_PIPE                    1
91 #define BULK_OUT_PIPE                   2
92
93 /* default autosuspend delay (mSec)*/
94 #define DEFAULT_AUTOSUSPEND_DELAY       (10 * 1000)
95
96 /* statistic update interval (mSec) */
97 #define STAT_UPDATE_TIMER               (1 * 1000)
98
99 /* defines interrupts from interrupt EP */
100 #define MAX_INT_EP                      (32)
101 #define INT_EP_INTEP                    (31)
102 #define INT_EP_OTP_WR_DONE              (28)
103 #define INT_EP_EEE_TX_LPI_START         (26)
104 #define INT_EP_EEE_TX_LPI_STOP          (25)
105 #define INT_EP_EEE_RX_LPI               (24)
106 #define INT_EP_MAC_RESET_TIMEOUT        (23)
107 #define INT_EP_RDFO                     (22)
108 #define INT_EP_TXE                      (21)
109 #define INT_EP_USB_STATUS               (20)
110 #define INT_EP_TX_DIS                   (19)
111 #define INT_EP_RX_DIS                   (18)
112 #define INT_EP_PHY                      (17)
113 #define INT_EP_DP                       (16)
114 #define INT_EP_MAC_ERR                  (15)
115 #define INT_EP_TDFU                     (14)
116 #define INT_EP_TDFO                     (13)
117 #define INT_EP_UTX                      (12)
118 #define INT_EP_GPIO_11                  (11)
119 #define INT_EP_GPIO_10                  (10)
120 #define INT_EP_GPIO_9                   (9)
121 #define INT_EP_GPIO_8                   (8)
122 #define INT_EP_GPIO_7                   (7)
123 #define INT_EP_GPIO_6                   (6)
124 #define INT_EP_GPIO_5                   (5)
125 #define INT_EP_GPIO_4                   (4)
126 #define INT_EP_GPIO_3                   (3)
127 #define INT_EP_GPIO_2                   (2)
128 #define INT_EP_GPIO_1                   (1)
129 #define INT_EP_GPIO_0                   (0)
130
131 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
132         "RX FCS Errors",
133         "RX Alignment Errors",
134         "Rx Fragment Errors",
135         "RX Jabber Errors",
136         "RX Undersize Frame Errors",
137         "RX Oversize Frame Errors",
138         "RX Dropped Frames",
139         "RX Unicast Byte Count",
140         "RX Broadcast Byte Count",
141         "RX Multicast Byte Count",
142         "RX Unicast Frames",
143         "RX Broadcast Frames",
144         "RX Multicast Frames",
145         "RX Pause Frames",
146         "RX 64 Byte Frames",
147         "RX 65 - 127 Byte Frames",
148         "RX 128 - 255 Byte Frames",
149         "RX 256 - 511 Bytes Frames",
150         "RX 512 - 1023 Byte Frames",
151         "RX 1024 - 1518 Byte Frames",
152         "RX Greater 1518 Byte Frames",
153         "EEE RX LPI Transitions",
154         "EEE RX LPI Time",
155         "TX FCS Errors",
156         "TX Excess Deferral Errors",
157         "TX Carrier Errors",
158         "TX Bad Byte Count",
159         "TX Single Collisions",
160         "TX Multiple Collisions",
161         "TX Excessive Collision",
162         "TX Late Collisions",
163         "TX Unicast Byte Count",
164         "TX Broadcast Byte Count",
165         "TX Multicast Byte Count",
166         "TX Unicast Frames",
167         "TX Broadcast Frames",
168         "TX Multicast Frames",
169         "TX Pause Frames",
170         "TX 64 Byte Frames",
171         "TX 65 - 127 Byte Frames",
172         "TX 128 - 255 Byte Frames",
173         "TX 256 - 511 Bytes Frames",
174         "TX 512 - 1023 Byte Frames",
175         "TX 1024 - 1518 Byte Frames",
176         "TX Greater 1518 Byte Frames",
177         "EEE TX LPI Transitions",
178         "EEE TX LPI Time",
179 };
180
181 struct lan78xx_statstage {
182         u32 rx_fcs_errors;
183         u32 rx_alignment_errors;
184         u32 rx_fragment_errors;
185         u32 rx_jabber_errors;
186         u32 rx_undersize_frame_errors;
187         u32 rx_oversize_frame_errors;
188         u32 rx_dropped_frames;
189         u32 rx_unicast_byte_count;
190         u32 rx_broadcast_byte_count;
191         u32 rx_multicast_byte_count;
192         u32 rx_unicast_frames;
193         u32 rx_broadcast_frames;
194         u32 rx_multicast_frames;
195         u32 rx_pause_frames;
196         u32 rx_64_byte_frames;
197         u32 rx_65_127_byte_frames;
198         u32 rx_128_255_byte_frames;
199         u32 rx_256_511_bytes_frames;
200         u32 rx_512_1023_byte_frames;
201         u32 rx_1024_1518_byte_frames;
202         u32 rx_greater_1518_byte_frames;
203         u32 eee_rx_lpi_transitions;
204         u32 eee_rx_lpi_time;
205         u32 tx_fcs_errors;
206         u32 tx_excess_deferral_errors;
207         u32 tx_carrier_errors;
208         u32 tx_bad_byte_count;
209         u32 tx_single_collisions;
210         u32 tx_multiple_collisions;
211         u32 tx_excessive_collision;
212         u32 tx_late_collisions;
213         u32 tx_unicast_byte_count;
214         u32 tx_broadcast_byte_count;
215         u32 tx_multicast_byte_count;
216         u32 tx_unicast_frames;
217         u32 tx_broadcast_frames;
218         u32 tx_multicast_frames;
219         u32 tx_pause_frames;
220         u32 tx_64_byte_frames;
221         u32 tx_65_127_byte_frames;
222         u32 tx_128_255_byte_frames;
223         u32 tx_256_511_bytes_frames;
224         u32 tx_512_1023_byte_frames;
225         u32 tx_1024_1518_byte_frames;
226         u32 tx_greater_1518_byte_frames;
227         u32 eee_tx_lpi_transitions;
228         u32 eee_tx_lpi_time;
229 };
230
231 struct lan78xx_statstage64 {
232         u64 rx_fcs_errors;
233         u64 rx_alignment_errors;
234         u64 rx_fragment_errors;
235         u64 rx_jabber_errors;
236         u64 rx_undersize_frame_errors;
237         u64 rx_oversize_frame_errors;
238         u64 rx_dropped_frames;
239         u64 rx_unicast_byte_count;
240         u64 rx_broadcast_byte_count;
241         u64 rx_multicast_byte_count;
242         u64 rx_unicast_frames;
243         u64 rx_broadcast_frames;
244         u64 rx_multicast_frames;
245         u64 rx_pause_frames;
246         u64 rx_64_byte_frames;
247         u64 rx_65_127_byte_frames;
248         u64 rx_128_255_byte_frames;
249         u64 rx_256_511_bytes_frames;
250         u64 rx_512_1023_byte_frames;
251         u64 rx_1024_1518_byte_frames;
252         u64 rx_greater_1518_byte_frames;
253         u64 eee_rx_lpi_transitions;
254         u64 eee_rx_lpi_time;
255         u64 tx_fcs_errors;
256         u64 tx_excess_deferral_errors;
257         u64 tx_carrier_errors;
258         u64 tx_bad_byte_count;
259         u64 tx_single_collisions;
260         u64 tx_multiple_collisions;
261         u64 tx_excessive_collision;
262         u64 tx_late_collisions;
263         u64 tx_unicast_byte_count;
264         u64 tx_broadcast_byte_count;
265         u64 tx_multicast_byte_count;
266         u64 tx_unicast_frames;
267         u64 tx_broadcast_frames;
268         u64 tx_multicast_frames;
269         u64 tx_pause_frames;
270         u64 tx_64_byte_frames;
271         u64 tx_65_127_byte_frames;
272         u64 tx_128_255_byte_frames;
273         u64 tx_256_511_bytes_frames;
274         u64 tx_512_1023_byte_frames;
275         u64 tx_1024_1518_byte_frames;
276         u64 tx_greater_1518_byte_frames;
277         u64 eee_tx_lpi_transitions;
278         u64 eee_tx_lpi_time;
279 };
280
281 struct lan78xx_net;
282
283 struct lan78xx_priv {
284         struct lan78xx_net *dev;
285         u32 rfe_ctl;
286         u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
287         u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
288         u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
289         struct mutex dataport_mutex; /* for dataport access */
290         spinlock_t rfe_ctl_lock; /* for rfe register access */
291         struct work_struct set_multicast;
292         struct work_struct set_vlan;
293         u32 wol;
294 };
295
296 enum skb_state {
297         illegal = 0,
298         tx_start,
299         tx_done,
300         rx_start,
301         rx_done,
302         rx_cleanup,
303         unlink_start
304 };
305
306 struct skb_data {               /* skb->cb is one of these */
307         struct urb *urb;
308         struct lan78xx_net *dev;
309         enum skb_state state;
310         size_t length;
311         int num_of_packet;
312 };
313
314 struct usb_context {
315         struct usb_ctrlrequest req;
316         struct lan78xx_net *dev;
317 };
318
319 #define EVENT_TX_HALT                   0
320 #define EVENT_RX_HALT                   1
321 #define EVENT_RX_MEMORY                 2
322 #define EVENT_STS_SPLIT                 3
323 #define EVENT_LINK_RESET                4
324 #define EVENT_RX_PAUSED                 5
325 #define EVENT_DEV_WAKING                6
326 #define EVENT_DEV_ASLEEP                7
327 #define EVENT_DEV_OPEN                  8
328 #define EVENT_STAT_UPDATE               9
329
330 struct statstage {
331         struct mutex                    access_lock;    /* for stats access */
332         struct lan78xx_statstage        saved;
333         struct lan78xx_statstage        rollover_count;
334         struct lan78xx_statstage        rollover_max;
335         struct lan78xx_statstage64      curr_stat;
336 };
337
338 struct irq_domain_data {
339         struct irq_domain       *irqdomain;
340         unsigned int            phyirq;
341         struct irq_chip         *irqchip;
342         irq_flow_handler_t      irq_handler;
343         u32                     irqenable;
344         struct mutex            irq_lock;               /* for irq bus access */
345 };
346
347 struct lan78xx_net {
348         struct net_device       *net;
349         struct usb_device       *udev;
350         struct usb_interface    *intf;
351         void                    *driver_priv;
352
353         int                     rx_qlen;
354         int                     tx_qlen;
355         struct sk_buff_head     rxq;
356         struct sk_buff_head     txq;
357         struct sk_buff_head     done;
358         struct sk_buff_head     rxq_pause;
359         struct sk_buff_head     txq_pend;
360
361         struct tasklet_struct   bh;
362         struct delayed_work     wq;
363
364         struct usb_host_endpoint *ep_blkin;
365         struct usb_host_endpoint *ep_blkout;
366         struct usb_host_endpoint *ep_intr;
367
368         int                     msg_enable;
369
370         struct urb              *urb_intr;
371         struct usb_anchor       deferred;
372
373         struct mutex            phy_mutex; /* for phy access */
374         unsigned                pipe_in, pipe_out, pipe_intr;
375
376         u32                     hard_mtu;       /* count any extra framing */
377         size_t                  rx_urb_size;    /* size for rx urbs */
378
379         unsigned long           flags;
380
381         wait_queue_head_t       *wait;
382         unsigned char           suspend_count;
383
384         unsigned                maxpacket;
385         struct timer_list       delay;
386         struct timer_list       stat_monitor;
387
388         unsigned long           data[5];
389
390         int                     link_on;
391         u8                      mdix_ctrl;
392
393         u32                     chipid;
394         u32                     chiprev;
395         struct mii_bus          *mdiobus;
396         phy_interface_t         interface;
397
398         int                     fc_autoneg;
399         u8                      fc_request_control;
400
401         int                     delta;
402         struct statstage        stats;
403
404         struct irq_domain_data  domain_data;
405 };
406
407 /* define external phy id */
408 #define PHY_LAN8835                     (0x0007C130)
409 #define PHY_KSZ9031RNX                  (0x00221620)
410
411 /* use ethtool to change the level for any given device */
412 static int msg_level = -1;
413 module_param(msg_level, int, 0);
414 MODULE_PARM_DESC(msg_level, "Override default message level");
415
416 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
417 {
418         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
419         int ret;
420
421         if (!buf)
422                 return -ENOMEM;
423
424         ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
425                               USB_VENDOR_REQUEST_READ_REGISTER,
426                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
427                               0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
428         if (likely(ret >= 0)) {
429                 le32_to_cpus(buf);
430                 *data = *buf;
431         } else {
432                 netdev_warn(dev->net,
433                             "Failed to read register index 0x%08x. ret = %d",
434                             index, ret);
435         }
436
437         kfree(buf);
438
439         return ret;
440 }
441
442 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
443 {
444         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
445         int ret;
446
447         if (!buf)
448                 return -ENOMEM;
449
450         *buf = data;
451         cpu_to_le32s(buf);
452
453         ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
454                               USB_VENDOR_REQUEST_WRITE_REGISTER,
455                               USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
456                               0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
457         if (unlikely(ret < 0)) {
458                 netdev_warn(dev->net,
459                             "Failed to write register index 0x%08x. ret = %d",
460                             index, ret);
461         }
462
463         kfree(buf);
464
465         return ret;
466 }
467
468 static int lan78xx_read_stats(struct lan78xx_net *dev,
469                               struct lan78xx_statstage *data)
470 {
471         int ret = 0;
472         int i;
473         struct lan78xx_statstage *stats;
474         u32 *src;
475         u32 *dst;
476
477         stats = kmalloc(sizeof(*stats), GFP_KERNEL);
478         if (!stats)
479                 return -ENOMEM;
480
481         ret = usb_control_msg(dev->udev,
482                               usb_rcvctrlpipe(dev->udev, 0),
483                               USB_VENDOR_REQUEST_GET_STATS,
484                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
485                               0,
486                               0,
487                               (void *)stats,
488                               sizeof(*stats),
489                               USB_CTRL_SET_TIMEOUT);
490         if (likely(ret >= 0)) {
491                 src = (u32 *)stats;
492                 dst = (u32 *)data;
493                 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
494                         le32_to_cpus(&src[i]);
495                         dst[i] = src[i];
496                 }
497         } else {
498                 netdev_warn(dev->net,
499                             "Failed to read stat ret = 0x%x", ret);
500         }
501
502         kfree(stats);
503
504         return ret;
505 }
506
507 #define check_counter_rollover(struct1, dev_stats, member) {    \
508         if (struct1->member < dev_stats.saved.member)           \
509                 dev_stats.rollover_count.member++;              \
510         }
511
512 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
513                                         struct lan78xx_statstage *stats)
514 {
515         check_counter_rollover(stats, dev->stats, rx_fcs_errors);
516         check_counter_rollover(stats, dev->stats, rx_alignment_errors);
517         check_counter_rollover(stats, dev->stats, rx_fragment_errors);
518         check_counter_rollover(stats, dev->stats, rx_jabber_errors);
519         check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
520         check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
521         check_counter_rollover(stats, dev->stats, rx_dropped_frames);
522         check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
523         check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
524         check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
525         check_counter_rollover(stats, dev->stats, rx_unicast_frames);
526         check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
527         check_counter_rollover(stats, dev->stats, rx_multicast_frames);
528         check_counter_rollover(stats, dev->stats, rx_pause_frames);
529         check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
530         check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
531         check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
532         check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
533         check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
534         check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
535         check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
536         check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
537         check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
538         check_counter_rollover(stats, dev->stats, tx_fcs_errors);
539         check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
540         check_counter_rollover(stats, dev->stats, tx_carrier_errors);
541         check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
542         check_counter_rollover(stats, dev->stats, tx_single_collisions);
543         check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
544         check_counter_rollover(stats, dev->stats, tx_excessive_collision);
545         check_counter_rollover(stats, dev->stats, tx_late_collisions);
546         check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
547         check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
548         check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
549         check_counter_rollover(stats, dev->stats, tx_unicast_frames);
550         check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
551         check_counter_rollover(stats, dev->stats, tx_multicast_frames);
552         check_counter_rollover(stats, dev->stats, tx_pause_frames);
553         check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
554         check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
555         check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
556         check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
557         check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
558         check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
559         check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
560         check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
561         check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
562
563         memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
564 }
565
566 static void lan78xx_update_stats(struct lan78xx_net *dev)
567 {
568         u32 *p, *count, *max;
569         u64 *data;
570         int i;
571         struct lan78xx_statstage lan78xx_stats;
572
573         if (usb_autopm_get_interface(dev->intf) < 0)
574                 return;
575
576         p = (u32 *)&lan78xx_stats;
577         count = (u32 *)&dev->stats.rollover_count;
578         max = (u32 *)&dev->stats.rollover_max;
579         data = (u64 *)&dev->stats.curr_stat;
580
581         mutex_lock(&dev->stats.access_lock);
582
583         if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
584                 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
585
586         for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
587                 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
588
589         mutex_unlock(&dev->stats.access_lock);
590
591         usb_autopm_put_interface(dev->intf);
592 }
593
594 /* Loop until the read is completed with timeout called with phy_mutex held */
595 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
596 {
597         unsigned long start_time = jiffies;
598         u32 val;
599         int ret;
600
601         do {
602                 ret = lan78xx_read_reg(dev, MII_ACC, &val);
603                 if (unlikely(ret < 0))
604                         return -EIO;
605
606                 if (!(val & MII_ACC_MII_BUSY_))
607                         return 0;
608         } while (!time_after(jiffies, start_time + HZ));
609
610         return -EIO;
611 }
612
613 static inline u32 mii_access(int id, int index, int read)
614 {
615         u32 ret;
616
617         ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
618         ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
619         if (read)
620                 ret |= MII_ACC_MII_READ_;
621         else
622                 ret |= MII_ACC_MII_WRITE_;
623         ret |= MII_ACC_MII_BUSY_;
624
625         return ret;
626 }
627
628 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
629 {
630         unsigned long start_time = jiffies;
631         u32 val;
632         int ret;
633
634         do {
635                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
636                 if (unlikely(ret < 0))
637                         return -EIO;
638
639                 if (!(val & E2P_CMD_EPC_BUSY_) ||
640                     (val & E2P_CMD_EPC_TIMEOUT_))
641                         break;
642                 usleep_range(40, 100);
643         } while (!time_after(jiffies, start_time + HZ));
644
645         if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
646                 netdev_warn(dev->net, "EEPROM read operation timeout");
647                 return -EIO;
648         }
649
650         return 0;
651 }
652
653 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
654 {
655         unsigned long start_time = jiffies;
656         u32 val;
657         int ret;
658
659         do {
660                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
661                 if (unlikely(ret < 0))
662                         return -EIO;
663
664                 if (!(val & E2P_CMD_EPC_BUSY_))
665                         return 0;
666
667                 usleep_range(40, 100);
668         } while (!time_after(jiffies, start_time + HZ));
669
670         netdev_warn(dev->net, "EEPROM is busy");
671         return -EIO;
672 }
673
674 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
675                                    u32 length, u8 *data)
676 {
677         u32 val;
678         u32 saved;
679         int i, ret;
680         int retval;
681
682         /* depends on chip, some EEPROM pins are muxed with LED function.
683          * disable & restore LED function to access EEPROM.
684          */
685         ret = lan78xx_read_reg(dev, HW_CFG, &val);
686         saved = val;
687         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
688                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
689                 ret = lan78xx_write_reg(dev, HW_CFG, val);
690         }
691
692         retval = lan78xx_eeprom_confirm_not_busy(dev);
693         if (retval)
694                 return retval;
695
696         for (i = 0; i < length; i++) {
697                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
698                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
699                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
700                 if (unlikely(ret < 0)) {
701                         retval = -EIO;
702                         goto exit;
703                 }
704
705                 retval = lan78xx_wait_eeprom(dev);
706                 if (retval < 0)
707                         goto exit;
708
709                 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
710                 if (unlikely(ret < 0)) {
711                         retval = -EIO;
712                         goto exit;
713                 }
714
715                 data[i] = val & 0xFF;
716                 offset++;
717         }
718
719         retval = 0;
720 exit:
721         if (dev->chipid == ID_REV_CHIP_ID_7800_)
722                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
723
724         return retval;
725 }
726
727 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
728                                u32 length, u8 *data)
729 {
730         u8 sig;
731         int ret;
732
733         ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
734         if ((ret == 0) && (sig == EEPROM_INDICATOR))
735                 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
736         else
737                 ret = -EINVAL;
738
739         return ret;
740 }
741
742 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
743                                     u32 length, u8 *data)
744 {
745         u32 val;
746         u32 saved;
747         int i, ret;
748         int retval;
749
750         /* depends on chip, some EEPROM pins are muxed with LED function.
751          * disable & restore LED function to access EEPROM.
752          */
753         ret = lan78xx_read_reg(dev, HW_CFG, &val);
754         saved = val;
755         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
756                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
757                 ret = lan78xx_write_reg(dev, HW_CFG, val);
758         }
759
760         retval = lan78xx_eeprom_confirm_not_busy(dev);
761         if (retval)
762                 goto exit;
763
764         /* Issue write/erase enable command */
765         val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
766         ret = lan78xx_write_reg(dev, E2P_CMD, val);
767         if (unlikely(ret < 0)) {
768                 retval = -EIO;
769                 goto exit;
770         }
771
772         retval = lan78xx_wait_eeprom(dev);
773         if (retval < 0)
774                 goto exit;
775
776         for (i = 0; i < length; i++) {
777                 /* Fill data register */
778                 val = data[i];
779                 ret = lan78xx_write_reg(dev, E2P_DATA, val);
780                 if (ret < 0) {
781                         retval = -EIO;
782                         goto exit;
783                 }
784
785                 /* Send "write" command */
786                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
787                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
788                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
789                 if (ret < 0) {
790                         retval = -EIO;
791                         goto exit;
792                 }
793
794                 retval = lan78xx_wait_eeprom(dev);
795                 if (retval < 0)
796                         goto exit;
797
798                 offset++;
799         }
800
801         retval = 0;
802 exit:
803         if (dev->chipid == ID_REV_CHIP_ID_7800_)
804                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
805
806         return retval;
807 }
808
809 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
810                                 u32 length, u8 *data)
811 {
812         int i;
813         int ret;
814         u32 buf;
815         unsigned long timeout;
816
817         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
818
819         if (buf & OTP_PWR_DN_PWRDN_N_) {
820                 /* clear it and wait to be cleared */
821                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
822
823                 timeout = jiffies + HZ;
824                 do {
825                         usleep_range(1, 10);
826                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
827                         if (time_after(jiffies, timeout)) {
828                                 netdev_warn(dev->net,
829                                             "timeout on OTP_PWR_DN");
830                                 return -EIO;
831                         }
832                 } while (buf & OTP_PWR_DN_PWRDN_N_);
833         }
834
835         for (i = 0; i < length; i++) {
836                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
837                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
838                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
839                                         ((offset + i) & OTP_ADDR2_10_3));
840
841                 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
842                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
843
844                 timeout = jiffies + HZ;
845                 do {
846                         udelay(1);
847                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
848                         if (time_after(jiffies, timeout)) {
849                                 netdev_warn(dev->net,
850                                             "timeout on OTP_STATUS");
851                                 return -EIO;
852                         }
853                 } while (buf & OTP_STATUS_BUSY_);
854
855                 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
856
857                 data[i] = (u8)(buf & 0xFF);
858         }
859
860         return 0;
861 }
862
863 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
864                                  u32 length, u8 *data)
865 {
866         int i;
867         int ret;
868         u32 buf;
869         unsigned long timeout;
870
871         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
872
873         if (buf & OTP_PWR_DN_PWRDN_N_) {
874                 /* clear it and wait to be cleared */
875                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
876
877                 timeout = jiffies + HZ;
878                 do {
879                         udelay(1);
880                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
881                         if (time_after(jiffies, timeout)) {
882                                 netdev_warn(dev->net,
883                                             "timeout on OTP_PWR_DN completion");
884                                 return -EIO;
885                         }
886                 } while (buf & OTP_PWR_DN_PWRDN_N_);
887         }
888
889         /* set to BYTE program mode */
890         ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
891
892         for (i = 0; i < length; i++) {
893                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
894                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
895                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
896                                         ((offset + i) & OTP_ADDR2_10_3));
897                 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
898                 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
899                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
900
901                 timeout = jiffies + HZ;
902                 do {
903                         udelay(1);
904                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
905                         if (time_after(jiffies, timeout)) {
906                                 netdev_warn(dev->net,
907                                             "Timeout on OTP_STATUS completion");
908                                 return -EIO;
909                         }
910                 } while (buf & OTP_STATUS_BUSY_);
911         }
912
913         return 0;
914 }
915
916 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
917                             u32 length, u8 *data)
918 {
919         u8 sig;
920         int ret;
921
922         ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
923
924         if (ret == 0) {
925                 if (sig == OTP_INDICATOR_1)
926                         offset = offset;
927                 else if (sig == OTP_INDICATOR_2)
928                         offset += 0x100;
929                 else
930                         ret = -EINVAL;
931                 if (!ret)
932                         ret = lan78xx_read_raw_otp(dev, offset, length, data);
933         }
934
935         return ret;
936 }
937
938 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
939 {
940         int i, ret;
941
942         for (i = 0; i < 100; i++) {
943                 u32 dp_sel;
944
945                 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
946                 if (unlikely(ret < 0))
947                         return -EIO;
948
949                 if (dp_sel & DP_SEL_DPRDY_)
950                         return 0;
951
952                 usleep_range(40, 100);
953         }
954
955         netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
956
957         return -EIO;
958 }
959
960 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
961                                   u32 addr, u32 length, u32 *buf)
962 {
963         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
964         u32 dp_sel;
965         int i, ret;
966
967         if (usb_autopm_get_interface(dev->intf) < 0)
968                         return 0;
969
970         mutex_lock(&pdata->dataport_mutex);
971
972         ret = lan78xx_dataport_wait_not_busy(dev);
973         if (ret < 0)
974                 goto done;
975
976         ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
977
978         dp_sel &= ~DP_SEL_RSEL_MASK_;
979         dp_sel |= ram_select;
980         ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
981
982         for (i = 0; i < length; i++) {
983                 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
984
985                 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
986
987                 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
988
989                 ret = lan78xx_dataport_wait_not_busy(dev);
990                 if (ret < 0)
991                         goto done;
992         }
993
994 done:
995         mutex_unlock(&pdata->dataport_mutex);
996         usb_autopm_put_interface(dev->intf);
997
998         return ret;
999 }
1000
1001 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1002                                     int index, u8 addr[ETH_ALEN])
1003 {
1004         u32     temp;
1005
1006         if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1007                 temp = addr[3];
1008                 temp = addr[2] | (temp << 8);
1009                 temp = addr[1] | (temp << 8);
1010                 temp = addr[0] | (temp << 8);
1011                 pdata->pfilter_table[index][1] = temp;
1012                 temp = addr[5];
1013                 temp = addr[4] | (temp << 8);
1014                 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1015                 pdata->pfilter_table[index][0] = temp;
1016         }
1017 }
1018
1019 /* returns hash bit number for given MAC address */
1020 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1021 {
1022         return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1023 }
1024
1025 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1026 {
1027         struct lan78xx_priv *pdata =
1028                         container_of(param, struct lan78xx_priv, set_multicast);
1029         struct lan78xx_net *dev = pdata->dev;
1030         int i;
1031         int ret;
1032
1033         netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1034                   pdata->rfe_ctl);
1035
1036         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1037                                DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1038
1039         for (i = 1; i < NUM_OF_MAF; i++) {
1040                 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1041                 ret = lan78xx_write_reg(dev, MAF_LO(i),
1042                                         pdata->pfilter_table[i][1]);
1043                 ret = lan78xx_write_reg(dev, MAF_HI(i),
1044                                         pdata->pfilter_table[i][0]);
1045         }
1046
1047         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1048 }
1049
1050 static void lan78xx_set_multicast(struct net_device *netdev)
1051 {
1052         struct lan78xx_net *dev = netdev_priv(netdev);
1053         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1054         unsigned long flags;
1055         int i;
1056
1057         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1058
1059         pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1060                             RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1061
1062         for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1063                         pdata->mchash_table[i] = 0;
1064         /* pfilter_table[0] has own HW address */
1065         for (i = 1; i < NUM_OF_MAF; i++) {
1066                         pdata->pfilter_table[i][0] =
1067                         pdata->pfilter_table[i][1] = 0;
1068         }
1069
1070         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1071
1072         if (dev->net->flags & IFF_PROMISC) {
1073                 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1074                 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1075         } else {
1076                 if (dev->net->flags & IFF_ALLMULTI) {
1077                         netif_dbg(dev, drv, dev->net,
1078                                   "receive all multicast enabled");
1079                         pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1080                 }
1081         }
1082
1083         if (netdev_mc_count(dev->net)) {
1084                 struct netdev_hw_addr *ha;
1085                 int i;
1086
1087                 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1088
1089                 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1090
1091                 i = 1;
1092                 netdev_for_each_mc_addr(ha, netdev) {
1093                         /* set first 32 into Perfect Filter */
1094                         if (i < 33) {
1095                                 lan78xx_set_addr_filter(pdata, i, ha->addr);
1096                         } else {
1097                                 u32 bitnum = lan78xx_hash(ha->addr);
1098
1099                                 pdata->mchash_table[bitnum / 32] |=
1100                                                         (1 << (bitnum % 32));
1101                                 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1102                         }
1103                         i++;
1104                 }
1105         }
1106
1107         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1108
1109         /* defer register writes to a sleepable context */
1110         schedule_work(&pdata->set_multicast);
1111 }
1112
1113 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1114                                       u16 lcladv, u16 rmtadv)
1115 {
1116         u32 flow = 0, fct_flow = 0;
1117         int ret;
1118         u8 cap;
1119
1120         if (dev->fc_autoneg)
1121                 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1122         else
1123                 cap = dev->fc_request_control;
1124
1125         if (cap & FLOW_CTRL_TX)
1126                 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1127
1128         if (cap & FLOW_CTRL_RX)
1129                 flow |= FLOW_CR_RX_FCEN_;
1130
1131         if (dev->udev->speed == USB_SPEED_SUPER)
1132                 fct_flow = 0x817;
1133         else if (dev->udev->speed == USB_SPEED_HIGH)
1134                 fct_flow = 0x211;
1135
1136         netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1137                   (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1138                   (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1139
1140         ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1141
1142         /* threshold value should be set before enabling flow */
1143         ret = lan78xx_write_reg(dev, FLOW, flow);
1144
1145         return 0;
1146 }
1147
1148 static int lan78xx_link_reset(struct lan78xx_net *dev)
1149 {
1150         struct phy_device *phydev = dev->net->phydev;
1151         struct ethtool_link_ksettings ecmd;
1152         int ladv, radv, ret;
1153         u32 buf;
1154
1155         /* clear LAN78xx interrupt status */
1156         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1157         if (unlikely(ret < 0))
1158                 return -EIO;
1159
1160         phy_read_status(phydev);
1161
1162         if (!phydev->link && dev->link_on) {
1163                 dev->link_on = false;
1164
1165                 /* reset MAC */
1166                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1167                 if (unlikely(ret < 0))
1168                         return -EIO;
1169                 buf |= MAC_CR_RST_;
1170                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1171                 if (unlikely(ret < 0))
1172                         return -EIO;
1173
1174                 del_timer(&dev->stat_monitor);
1175         } else if (phydev->link && !dev->link_on) {
1176                 dev->link_on = true;
1177
1178                 phy_ethtool_ksettings_get(phydev, &ecmd);
1179
1180                 if (dev->udev->speed == USB_SPEED_SUPER) {
1181                         if (ecmd.base.speed == 1000) {
1182                                 /* disable U2 */
1183                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1184                                 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1185                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1186                                 /* enable U1 */
1187                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1188                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1189                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1190                         } else {
1191                                 /* enable U1 & U2 */
1192                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1193                                 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1194                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1195                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1196                         }
1197                 }
1198
1199                 ladv = phy_read(phydev, MII_ADVERTISE);
1200                 if (ladv < 0)
1201                         return ladv;
1202
1203                 radv = phy_read(phydev, MII_LPA);
1204                 if (radv < 0)
1205                         return radv;
1206
1207                 netif_dbg(dev, link, dev->net,
1208                           "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1209                           ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1210
1211                 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1212                                                  radv);
1213
1214                 if (!timer_pending(&dev->stat_monitor)) {
1215                         dev->delta = 1;
1216                         mod_timer(&dev->stat_monitor,
1217                                   jiffies + STAT_UPDATE_TIMER);
1218                 }
1219
1220                 tasklet_schedule(&dev->bh);
1221         }
1222
1223         return ret;
1224 }
1225
1226 /* some work can't be done in tasklets, so we use keventd
1227  *
1228  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1229  * but tasklet_schedule() doesn't.      hope the failure is rare.
1230  */
1231 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1232 {
1233         set_bit(work, &dev->flags);
1234         if (!schedule_delayed_work(&dev->wq, 0))
1235                 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1236 }
1237
1238 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1239 {
1240         u32 intdata;
1241
1242         if (urb->actual_length != 4) {
1243                 netdev_warn(dev->net,
1244                             "unexpected urb length %d", urb->actual_length);
1245                 return;
1246         }
1247
1248         memcpy(&intdata, urb->transfer_buffer, 4);
1249         le32_to_cpus(&intdata);
1250
1251         if (intdata & INT_ENP_PHY_INT) {
1252                 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1253                 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1254
1255                 if (dev->domain_data.phyirq > 0)
1256                         generic_handle_irq(dev->domain_data.phyirq);
1257         } else
1258                 netdev_warn(dev->net,
1259                             "unexpected interrupt: 0x%08x\n", intdata);
1260 }
1261
1262 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1263 {
1264         return MAX_EEPROM_SIZE;
1265 }
1266
1267 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1268                                       struct ethtool_eeprom *ee, u8 *data)
1269 {
1270         struct lan78xx_net *dev = netdev_priv(netdev);
1271         int ret;
1272
1273         ret = usb_autopm_get_interface(dev->intf);
1274         if (ret)
1275                 return ret;
1276
1277         ee->magic = LAN78XX_EEPROM_MAGIC;
1278
1279         ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1280
1281         usb_autopm_put_interface(dev->intf);
1282
1283         return ret;
1284 }
1285
1286 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1287                                       struct ethtool_eeprom *ee, u8 *data)
1288 {
1289         struct lan78xx_net *dev = netdev_priv(netdev);
1290         int ret;
1291
1292         ret = usb_autopm_get_interface(dev->intf);
1293         if (ret)
1294                 return ret;
1295
1296         /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1297          * to load data from EEPROM
1298          */
1299         if (ee->magic == LAN78XX_EEPROM_MAGIC)
1300                 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1301         else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1302                  (ee->offset == 0) &&
1303                  (ee->len == 512) &&
1304                  (data[0] == OTP_INDICATOR_1))
1305                 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1306
1307         usb_autopm_put_interface(dev->intf);
1308
1309         return ret;
1310 }
1311
1312 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1313                                 u8 *data)
1314 {
1315         if (stringset == ETH_SS_STATS)
1316                 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1317 }
1318
1319 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1320 {
1321         if (sset == ETH_SS_STATS)
1322                 return ARRAY_SIZE(lan78xx_gstrings);
1323         else
1324                 return -EOPNOTSUPP;
1325 }
1326
1327 static void lan78xx_get_stats(struct net_device *netdev,
1328                               struct ethtool_stats *stats, u64 *data)
1329 {
1330         struct lan78xx_net *dev = netdev_priv(netdev);
1331
1332         lan78xx_update_stats(dev);
1333
1334         mutex_lock(&dev->stats.access_lock);
1335         memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1336         mutex_unlock(&dev->stats.access_lock);
1337 }
1338
1339 static void lan78xx_get_wol(struct net_device *netdev,
1340                             struct ethtool_wolinfo *wol)
1341 {
1342         struct lan78xx_net *dev = netdev_priv(netdev);
1343         int ret;
1344         u32 buf;
1345         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1346
1347         if (usb_autopm_get_interface(dev->intf) < 0)
1348                         return;
1349
1350         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1351         if (unlikely(ret < 0)) {
1352                 wol->supported = 0;
1353                 wol->wolopts = 0;
1354         } else {
1355                 if (buf & USB_CFG_RMT_WKP_) {
1356                         wol->supported = WAKE_ALL;
1357                         wol->wolopts = pdata->wol;
1358                 } else {
1359                         wol->supported = 0;
1360                         wol->wolopts = 0;
1361                 }
1362         }
1363
1364         usb_autopm_put_interface(dev->intf);
1365 }
1366
1367 static int lan78xx_set_wol(struct net_device *netdev,
1368                            struct ethtool_wolinfo *wol)
1369 {
1370         struct lan78xx_net *dev = netdev_priv(netdev);
1371         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1372         int ret;
1373
1374         ret = usb_autopm_get_interface(dev->intf);
1375         if (ret < 0)
1376                 return ret;
1377
1378         if (wol->wolopts & ~WAKE_ALL)
1379                 return -EINVAL;
1380
1381         pdata->wol = wol->wolopts;
1382
1383         device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1384
1385         phy_ethtool_set_wol(netdev->phydev, wol);
1386
1387         usb_autopm_put_interface(dev->intf);
1388
1389         return ret;
1390 }
1391
1392 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1393 {
1394         struct lan78xx_net *dev = netdev_priv(net);
1395         struct phy_device *phydev = net->phydev;
1396         int ret;
1397         u32 buf;
1398
1399         ret = usb_autopm_get_interface(dev->intf);
1400         if (ret < 0)
1401                 return ret;
1402
1403         ret = phy_ethtool_get_eee(phydev, edata);
1404         if (ret < 0)
1405                 goto exit;
1406
1407         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1408         if (buf & MAC_CR_EEE_EN_) {
1409                 edata->eee_enabled = true;
1410                 edata->eee_active = !!(edata->advertised &
1411                                        edata->lp_advertised);
1412                 edata->tx_lpi_enabled = true;
1413                 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1414                 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1415                 edata->tx_lpi_timer = buf;
1416         } else {
1417                 edata->eee_enabled = false;
1418                 edata->eee_active = false;
1419                 edata->tx_lpi_enabled = false;
1420                 edata->tx_lpi_timer = 0;
1421         }
1422
1423         ret = 0;
1424 exit:
1425         usb_autopm_put_interface(dev->intf);
1426
1427         return ret;
1428 }
1429
1430 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1431 {
1432         struct lan78xx_net *dev = netdev_priv(net);
1433         int ret;
1434         u32 buf;
1435
1436         ret = usb_autopm_get_interface(dev->intf);
1437         if (ret < 0)
1438                 return ret;
1439
1440         if (edata->eee_enabled) {
1441                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1442                 buf |= MAC_CR_EEE_EN_;
1443                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1444
1445                 phy_ethtool_set_eee(net->phydev, edata);
1446
1447                 buf = (u32)edata->tx_lpi_timer;
1448                 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1449         } else {
1450                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1451                 buf &= ~MAC_CR_EEE_EN_;
1452                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1453         }
1454
1455         usb_autopm_put_interface(dev->intf);
1456
1457         return 0;
1458 }
1459
1460 static u32 lan78xx_get_link(struct net_device *net)
1461 {
1462         phy_read_status(net->phydev);
1463
1464         return net->phydev->link;
1465 }
1466
1467 static void lan78xx_get_drvinfo(struct net_device *net,
1468                                 struct ethtool_drvinfo *info)
1469 {
1470         struct lan78xx_net *dev = netdev_priv(net);
1471
1472         strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1473         strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1474         usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1475 }
1476
1477 static u32 lan78xx_get_msglevel(struct net_device *net)
1478 {
1479         struct lan78xx_net *dev = netdev_priv(net);
1480
1481         return dev->msg_enable;
1482 }
1483
1484 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1485 {
1486         struct lan78xx_net *dev = netdev_priv(net);
1487
1488         dev->msg_enable = level;
1489 }
1490
1491 static int lan78xx_get_link_ksettings(struct net_device *net,
1492                                       struct ethtool_link_ksettings *cmd)
1493 {
1494         struct lan78xx_net *dev = netdev_priv(net);
1495         struct phy_device *phydev = net->phydev;
1496         int ret;
1497
1498         ret = usb_autopm_get_interface(dev->intf);
1499         if (ret < 0)
1500                 return ret;
1501
1502         phy_ethtool_ksettings_get(phydev, cmd);
1503
1504         usb_autopm_put_interface(dev->intf);
1505
1506         return ret;
1507 }
1508
1509 static int lan78xx_set_link_ksettings(struct net_device *net,
1510                                       const struct ethtool_link_ksettings *cmd)
1511 {
1512         struct lan78xx_net *dev = netdev_priv(net);
1513         struct phy_device *phydev = net->phydev;
1514         int ret = 0;
1515         int temp;
1516
1517         ret = usb_autopm_get_interface(dev->intf);
1518         if (ret < 0)
1519                 return ret;
1520
1521         /* change speed & duplex */
1522         ret = phy_ethtool_ksettings_set(phydev, cmd);
1523
1524         if (!cmd->base.autoneg) {
1525                 /* force link down */
1526                 temp = phy_read(phydev, MII_BMCR);
1527                 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1528                 mdelay(1);
1529                 phy_write(phydev, MII_BMCR, temp);
1530         }
1531
1532         usb_autopm_put_interface(dev->intf);
1533
1534         return ret;
1535 }
1536
1537 static void lan78xx_get_pause(struct net_device *net,
1538                               struct ethtool_pauseparam *pause)
1539 {
1540         struct lan78xx_net *dev = netdev_priv(net);
1541         struct phy_device *phydev = net->phydev;
1542         struct ethtool_link_ksettings ecmd;
1543
1544         phy_ethtool_ksettings_get(phydev, &ecmd);
1545
1546         pause->autoneg = dev->fc_autoneg;
1547
1548         if (dev->fc_request_control & FLOW_CTRL_TX)
1549                 pause->tx_pause = 1;
1550
1551         if (dev->fc_request_control & FLOW_CTRL_RX)
1552                 pause->rx_pause = 1;
1553 }
1554
1555 static int lan78xx_set_pause(struct net_device *net,
1556                              struct ethtool_pauseparam *pause)
1557 {
1558         struct lan78xx_net *dev = netdev_priv(net);
1559         struct phy_device *phydev = net->phydev;
1560         struct ethtool_link_ksettings ecmd;
1561         int ret;
1562
1563         phy_ethtool_ksettings_get(phydev, &ecmd);
1564
1565         if (pause->autoneg && !ecmd.base.autoneg) {
1566                 ret = -EINVAL;
1567                 goto exit;
1568         }
1569
1570         dev->fc_request_control = 0;
1571         if (pause->rx_pause)
1572                 dev->fc_request_control |= FLOW_CTRL_RX;
1573
1574         if (pause->tx_pause)
1575                 dev->fc_request_control |= FLOW_CTRL_TX;
1576
1577         if (ecmd.base.autoneg) {
1578                 u32 mii_adv;
1579                 u32 advertising;
1580
1581                 ethtool_convert_link_mode_to_legacy_u32(
1582                         &advertising, ecmd.link_modes.advertising);
1583
1584                 advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1585                 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1586                 advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1587
1588                 ethtool_convert_legacy_u32_to_link_mode(
1589                         ecmd.link_modes.advertising, advertising);
1590
1591                 phy_ethtool_ksettings_set(phydev, &ecmd);
1592         }
1593
1594         dev->fc_autoneg = pause->autoneg;
1595
1596         ret = 0;
1597 exit:
1598         return ret;
1599 }
1600
1601 static const struct ethtool_ops lan78xx_ethtool_ops = {
1602         .get_link       = lan78xx_get_link,
1603         .nway_reset     = phy_ethtool_nway_reset,
1604         .get_drvinfo    = lan78xx_get_drvinfo,
1605         .get_msglevel   = lan78xx_get_msglevel,
1606         .set_msglevel   = lan78xx_set_msglevel,
1607         .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1608         .get_eeprom     = lan78xx_ethtool_get_eeprom,
1609         .set_eeprom     = lan78xx_ethtool_set_eeprom,
1610         .get_ethtool_stats = lan78xx_get_stats,
1611         .get_sset_count = lan78xx_get_sset_count,
1612         .get_strings    = lan78xx_get_strings,
1613         .get_wol        = lan78xx_get_wol,
1614         .set_wol        = lan78xx_set_wol,
1615         .get_eee        = lan78xx_get_eee,
1616         .set_eee        = lan78xx_set_eee,
1617         .get_pauseparam = lan78xx_get_pause,
1618         .set_pauseparam = lan78xx_set_pause,
1619         .get_link_ksettings = lan78xx_get_link_ksettings,
1620         .set_link_ksettings = lan78xx_set_link_ksettings,
1621 };
1622
1623 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1624 {
1625         if (!netif_running(netdev))
1626                 return -EINVAL;
1627
1628         return phy_mii_ioctl(netdev->phydev, rq, cmd);
1629 }
1630
1631 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1632 {
1633         u32 addr_lo, addr_hi;
1634         int ret;
1635         u8 addr[6];
1636
1637         ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1638         ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1639
1640         addr[0] = addr_lo & 0xFF;
1641         addr[1] = (addr_lo >> 8) & 0xFF;
1642         addr[2] = (addr_lo >> 16) & 0xFF;
1643         addr[3] = (addr_lo >> 24) & 0xFF;
1644         addr[4] = addr_hi & 0xFF;
1645         addr[5] = (addr_hi >> 8) & 0xFF;
1646
1647         if (!is_valid_ether_addr(addr)) {
1648                 /* reading mac address from EEPROM or OTP */
1649                 if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1650                                          addr) == 0) ||
1651                     (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1652                                       addr) == 0)) {
1653                         if (is_valid_ether_addr(addr)) {
1654                                 /* eeprom values are valid so use them */
1655                                 netif_dbg(dev, ifup, dev->net,
1656                                           "MAC address read from EEPROM");
1657                         } else {
1658                                 /* generate random MAC */
1659                                 random_ether_addr(addr);
1660                                 netif_dbg(dev, ifup, dev->net,
1661                                           "MAC address set to random addr");
1662                         }
1663
1664                         addr_lo = addr[0] | (addr[1] << 8) |
1665                                   (addr[2] << 16) | (addr[3] << 24);
1666                         addr_hi = addr[4] | (addr[5] << 8);
1667
1668                         ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1669                         ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1670                 } else {
1671                         /* generate random MAC */
1672                         random_ether_addr(addr);
1673                         netif_dbg(dev, ifup, dev->net,
1674                                   "MAC address set to random addr");
1675                 }
1676         }
1677
1678         ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1679         ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1680
1681         ether_addr_copy(dev->net->dev_addr, addr);
1682 }
1683
1684 /* MDIO read and write wrappers for phylib */
1685 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1686 {
1687         struct lan78xx_net *dev = bus->priv;
1688         u32 val, addr;
1689         int ret;
1690
1691         ret = usb_autopm_get_interface(dev->intf);
1692         if (ret < 0)
1693                 return ret;
1694
1695         mutex_lock(&dev->phy_mutex);
1696
1697         /* confirm MII not busy */
1698         ret = lan78xx_phy_wait_not_busy(dev);
1699         if (ret < 0)
1700                 goto done;
1701
1702         /* set the address, index & direction (read from PHY) */
1703         addr = mii_access(phy_id, idx, MII_READ);
1704         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1705
1706         ret = lan78xx_phy_wait_not_busy(dev);
1707         if (ret < 0)
1708                 goto done;
1709
1710         ret = lan78xx_read_reg(dev, MII_DATA, &val);
1711
1712         ret = (int)(val & 0xFFFF);
1713
1714 done:
1715         mutex_unlock(&dev->phy_mutex);
1716         usb_autopm_put_interface(dev->intf);
1717
1718         return ret;
1719 }
1720
1721 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1722                                  u16 regval)
1723 {
1724         struct lan78xx_net *dev = bus->priv;
1725         u32 val, addr;
1726         int ret;
1727
1728         ret = usb_autopm_get_interface(dev->intf);
1729         if (ret < 0)
1730                 return ret;
1731
1732         mutex_lock(&dev->phy_mutex);
1733
1734         /* confirm MII not busy */
1735         ret = lan78xx_phy_wait_not_busy(dev);
1736         if (ret < 0)
1737                 goto done;
1738
1739         val = (u32)regval;
1740         ret = lan78xx_write_reg(dev, MII_DATA, val);
1741
1742         /* set the address, index & direction (write to PHY) */
1743         addr = mii_access(phy_id, idx, MII_WRITE);
1744         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1745
1746         ret = lan78xx_phy_wait_not_busy(dev);
1747         if (ret < 0)
1748                 goto done;
1749
1750 done:
1751         mutex_unlock(&dev->phy_mutex);
1752         usb_autopm_put_interface(dev->intf);
1753         return 0;
1754 }
1755
1756 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1757 {
1758         int ret;
1759
1760         dev->mdiobus = mdiobus_alloc();
1761         if (!dev->mdiobus) {
1762                 netdev_err(dev->net, "can't allocate MDIO bus\n");
1763                 return -ENOMEM;
1764         }
1765
1766         dev->mdiobus->priv = (void *)dev;
1767         dev->mdiobus->read = lan78xx_mdiobus_read;
1768         dev->mdiobus->write = lan78xx_mdiobus_write;
1769         dev->mdiobus->name = "lan78xx-mdiobus";
1770
1771         snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1772                  dev->udev->bus->busnum, dev->udev->devnum);
1773
1774         switch (dev->chipid) {
1775         case ID_REV_CHIP_ID_7800_:
1776         case ID_REV_CHIP_ID_7850_:
1777                 /* set to internal PHY id */
1778                 dev->mdiobus->phy_mask = ~(1 << 1);
1779                 break;
1780         case ID_REV_CHIP_ID_7801_:
1781                 /* scan thru PHYAD[2..0] */
1782                 dev->mdiobus->phy_mask = ~(0xFF);
1783                 break;
1784         }
1785
1786         ret = mdiobus_register(dev->mdiobus);
1787         if (ret) {
1788                 netdev_err(dev->net, "can't register MDIO bus\n");
1789                 goto exit1;
1790         }
1791
1792         netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1793         return 0;
1794 exit1:
1795         mdiobus_free(dev->mdiobus);
1796         return ret;
1797 }
1798
1799 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1800 {
1801         mdiobus_unregister(dev->mdiobus);
1802         mdiobus_free(dev->mdiobus);
1803 }
1804
1805 static void lan78xx_link_status_change(struct net_device *net)
1806 {
1807         struct phy_device *phydev = net->phydev;
1808         int ret, temp;
1809
1810         /* At forced 100 F/H mode, chip may fail to set mode correctly
1811          * when cable is switched between long(~50+m) and short one.
1812          * As workaround, set to 10 before setting to 100
1813          * at forced 100 F/H mode.
1814          */
1815         if (!phydev->autoneg && (phydev->speed == 100)) {
1816                 /* disable phy interrupt */
1817                 temp = phy_read(phydev, LAN88XX_INT_MASK);
1818                 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1819                 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1820
1821                 temp = phy_read(phydev, MII_BMCR);
1822                 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1823                 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1824                 temp |= BMCR_SPEED100;
1825                 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1826
1827                 /* clear pending interrupt generated while workaround */
1828                 temp = phy_read(phydev, LAN88XX_INT_STS);
1829
1830                 /* enable phy interrupt back */
1831                 temp = phy_read(phydev, LAN88XX_INT_MASK);
1832                 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1833                 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1834         }
1835 }
1836
1837 static int irq_map(struct irq_domain *d, unsigned int irq,
1838                    irq_hw_number_t hwirq)
1839 {
1840         struct irq_domain_data *data = d->host_data;
1841
1842         irq_set_chip_data(irq, data);
1843         irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1844         irq_set_noprobe(irq);
1845
1846         return 0;
1847 }
1848
1849 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1850 {
1851         irq_set_chip_and_handler(irq, NULL, NULL);
1852         irq_set_chip_data(irq, NULL);
1853 }
1854
1855 static const struct irq_domain_ops chip_domain_ops = {
1856         .map    = irq_map,
1857         .unmap  = irq_unmap,
1858 };
1859
1860 static void lan78xx_irq_mask(struct irq_data *irqd)
1861 {
1862         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1863
1864         data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1865 }
1866
1867 static void lan78xx_irq_unmask(struct irq_data *irqd)
1868 {
1869         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1870
1871         data->irqenable |= BIT(irqd_to_hwirq(irqd));
1872 }
1873
1874 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1875 {
1876         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1877
1878         mutex_lock(&data->irq_lock);
1879 }
1880
1881 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1882 {
1883         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1884         struct lan78xx_net *dev =
1885                         container_of(data, struct lan78xx_net, domain_data);
1886         u32 buf;
1887         int ret;
1888
1889         /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1890          * are only two callbacks executed in non-atomic contex.
1891          */
1892         ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1893         if (buf != data->irqenable)
1894                 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1895
1896         mutex_unlock(&data->irq_lock);
1897 }
1898
1899 static struct irq_chip lan78xx_irqchip = {
1900         .name                   = "lan78xx-irqs",
1901         .irq_mask               = lan78xx_irq_mask,
1902         .irq_unmask             = lan78xx_irq_unmask,
1903         .irq_bus_lock           = lan78xx_irq_bus_lock,
1904         .irq_bus_sync_unlock    = lan78xx_irq_bus_sync_unlock,
1905 };
1906
1907 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1908 {
1909         struct device_node *of_node;
1910         struct irq_domain *irqdomain;
1911         unsigned int irqmap = 0;
1912         u32 buf;
1913         int ret = 0;
1914
1915         of_node = dev->udev->dev.parent->of_node;
1916
1917         mutex_init(&dev->domain_data.irq_lock);
1918
1919         lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1920         dev->domain_data.irqenable = buf;
1921
1922         dev->domain_data.irqchip = &lan78xx_irqchip;
1923         dev->domain_data.irq_handler = handle_simple_irq;
1924
1925         irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1926                                           &chip_domain_ops, &dev->domain_data);
1927         if (irqdomain) {
1928                 /* create mapping for PHY interrupt */
1929                 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1930                 if (!irqmap) {
1931                         irq_domain_remove(irqdomain);
1932
1933                         irqdomain = NULL;
1934                         ret = -EINVAL;
1935                 }
1936         } else {
1937                 ret = -EINVAL;
1938         }
1939
1940         dev->domain_data.irqdomain = irqdomain;
1941         dev->domain_data.phyirq = irqmap;
1942
1943         return ret;
1944 }
1945
1946 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
1947 {
1948         if (dev->domain_data.phyirq > 0) {
1949                 irq_dispose_mapping(dev->domain_data.phyirq);
1950
1951                 if (dev->domain_data.irqdomain)
1952                         irq_domain_remove(dev->domain_data.irqdomain);
1953         }
1954         dev->domain_data.phyirq = 0;
1955         dev->domain_data.irqdomain = NULL;
1956 }
1957
1958 static int lan8835_fixup(struct phy_device *phydev)
1959 {
1960         int buf;
1961         int ret;
1962         struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1963
1964         /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
1965         buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
1966         buf &= ~0x1800;
1967         buf |= 0x0800;
1968         phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
1969
1970         /* RGMII MAC TXC Delay Enable */
1971         ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
1972                                 MAC_RGMII_ID_TXC_DELAY_EN_);
1973
1974         /* RGMII TX DLL Tune Adjust */
1975         ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
1976
1977         dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
1978
1979         return 1;
1980 }
1981
1982 static int ksz9031rnx_fixup(struct phy_device *phydev)
1983 {
1984         struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1985
1986         /* Micrel9301RNX PHY configuration */
1987         /* RGMII Control Signal Pad Skew */
1988         phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
1989         /* RGMII RX Data Pad Skew */
1990         phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
1991         /* RGMII RX Clock Pad Skew */
1992         phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
1993
1994         dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
1995
1996         return 1;
1997 }
1998
1999 static int lan78xx_phy_init(struct lan78xx_net *dev)
2000 {
2001         int ret;
2002         u32 mii_adv;
2003         struct phy_device *phydev = dev->net->phydev;
2004
2005         phydev = phy_find_first(dev->mdiobus);
2006         if (!phydev) {
2007                 netdev_err(dev->net, "no PHY found\n");
2008                 return -EIO;
2009         }
2010
2011         if ((dev->chipid == ID_REV_CHIP_ID_7800_) ||
2012             (dev->chipid == ID_REV_CHIP_ID_7850_)) {
2013                 phydev->is_internal = true;
2014                 dev->interface = PHY_INTERFACE_MODE_GMII;
2015
2016         } else if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2017                 if (!phydev->drv) {
2018                         netdev_err(dev->net, "no PHY driver found\n");
2019                         return -EIO;
2020                 }
2021
2022                 dev->interface = PHY_INTERFACE_MODE_RGMII;
2023
2024                 /* external PHY fixup for KSZ9031RNX */
2025                 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2026                                                  ksz9031rnx_fixup);
2027                 if (ret < 0) {
2028                         netdev_err(dev->net, "fail to register fixup\n");
2029                         return ret;
2030                 }
2031                 /* external PHY fixup for LAN8835 */
2032                 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2033                                                  lan8835_fixup);
2034                 if (ret < 0) {
2035                         netdev_err(dev->net, "fail to register fixup\n");
2036                         return ret;
2037                 }
2038                 /* add more external PHY fixup here if needed */
2039
2040                 phydev->is_internal = false;
2041         } else {
2042                 netdev_err(dev->net, "unknown ID found\n");
2043                 ret = -EIO;
2044                 goto error;
2045         }
2046
2047         /* if phyirq is not set, use polling mode in phylib */
2048         if (dev->domain_data.phyirq > 0)
2049                 phydev->irq = dev->domain_data.phyirq;
2050         else
2051                 phydev->irq = 0;
2052         netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2053
2054         /* set to AUTOMDIX */
2055         phydev->mdix = ETH_TP_MDI_AUTO;
2056
2057         ret = phy_connect_direct(dev->net, phydev,
2058                                  lan78xx_link_status_change,
2059                                  dev->interface);
2060         if (ret) {
2061                 netdev_err(dev->net, "can't attach PHY to %s\n",
2062                            dev->mdiobus->id);
2063                 return -EIO;
2064         }
2065
2066         /* MAC doesn't support 1000T Half */
2067         phydev->supported &= ~SUPPORTED_1000baseT_Half;
2068
2069         /* support both flow controls */
2070         dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2071         phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
2072         mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2073         phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
2074
2075         genphy_config_aneg(phydev);
2076
2077         dev->fc_autoneg = phydev->autoneg;
2078
2079         return 0;
2080
2081 error:
2082         phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
2083         phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
2084
2085         return ret;
2086 }
2087
2088 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2089 {
2090         int ret = 0;
2091         u32 buf;
2092         bool rxenabled;
2093
2094         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2095
2096         rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2097
2098         if (rxenabled) {
2099                 buf &= ~MAC_RX_RXEN_;
2100                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2101         }
2102
2103         /* add 4 to size for FCS */
2104         buf &= ~MAC_RX_MAX_SIZE_MASK_;
2105         buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2106
2107         ret = lan78xx_write_reg(dev, MAC_RX, buf);
2108
2109         if (rxenabled) {
2110                 buf |= MAC_RX_RXEN_;
2111                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2112         }
2113
2114         return 0;
2115 }
2116
2117 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2118 {
2119         struct sk_buff *skb;
2120         unsigned long flags;
2121         int count = 0;
2122
2123         spin_lock_irqsave(&q->lock, flags);
2124         while (!skb_queue_empty(q)) {
2125                 struct skb_data *entry;
2126                 struct urb *urb;
2127                 int ret;
2128
2129                 skb_queue_walk(q, skb) {
2130                         entry = (struct skb_data *)skb->cb;
2131                         if (entry->state != unlink_start)
2132                                 goto found;
2133                 }
2134                 break;
2135 found:
2136                 entry->state = unlink_start;
2137                 urb = entry->urb;
2138
2139                 /* Get reference count of the URB to avoid it to be
2140                  * freed during usb_unlink_urb, which may trigger
2141                  * use-after-free problem inside usb_unlink_urb since
2142                  * usb_unlink_urb is always racing with .complete
2143                  * handler(include defer_bh).
2144                  */
2145                 usb_get_urb(urb);
2146                 spin_unlock_irqrestore(&q->lock, flags);
2147                 /* during some PM-driven resume scenarios,
2148                  * these (async) unlinks complete immediately
2149                  */
2150                 ret = usb_unlink_urb(urb);
2151                 if (ret != -EINPROGRESS && ret != 0)
2152                         netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2153                 else
2154                         count++;
2155                 usb_put_urb(urb);
2156                 spin_lock_irqsave(&q->lock, flags);
2157         }
2158         spin_unlock_irqrestore(&q->lock, flags);
2159         return count;
2160 }
2161
2162 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2163 {
2164         struct lan78xx_net *dev = netdev_priv(netdev);
2165         int ll_mtu = new_mtu + netdev->hard_header_len;
2166         int old_hard_mtu = dev->hard_mtu;
2167         int old_rx_urb_size = dev->rx_urb_size;
2168         int ret;
2169
2170         /* no second zero-length packet read wanted after mtu-sized packets */
2171         if ((ll_mtu % dev->maxpacket) == 0)
2172                 return -EDOM;
2173
2174         ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
2175
2176         netdev->mtu = new_mtu;
2177
2178         dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2179         if (dev->rx_urb_size == old_hard_mtu) {
2180                 dev->rx_urb_size = dev->hard_mtu;
2181                 if (dev->rx_urb_size > old_rx_urb_size) {
2182                         if (netif_running(dev->net)) {
2183                                 unlink_urbs(dev, &dev->rxq);
2184                                 tasklet_schedule(&dev->bh);
2185                         }
2186                 }
2187         }
2188
2189         return 0;
2190 }
2191
2192 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2193 {
2194         struct lan78xx_net *dev = netdev_priv(netdev);
2195         struct sockaddr *addr = p;
2196         u32 addr_lo, addr_hi;
2197         int ret;
2198
2199         if (netif_running(netdev))
2200                 return -EBUSY;
2201
2202         if (!is_valid_ether_addr(addr->sa_data))
2203                 return -EADDRNOTAVAIL;
2204
2205         ether_addr_copy(netdev->dev_addr, addr->sa_data);
2206
2207         addr_lo = netdev->dev_addr[0] |
2208                   netdev->dev_addr[1] << 8 |
2209                   netdev->dev_addr[2] << 16 |
2210                   netdev->dev_addr[3] << 24;
2211         addr_hi = netdev->dev_addr[4] |
2212                   netdev->dev_addr[5] << 8;
2213
2214         ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2215         ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2216
2217         return 0;
2218 }
2219
2220 /* Enable or disable Rx checksum offload engine */
2221 static int lan78xx_set_features(struct net_device *netdev,
2222                                 netdev_features_t features)
2223 {
2224         struct lan78xx_net *dev = netdev_priv(netdev);
2225         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2226         unsigned long flags;
2227         int ret;
2228
2229         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2230
2231         if (features & NETIF_F_RXCSUM) {
2232                 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2233                 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2234         } else {
2235                 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2236                 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2237         }
2238
2239         if (features & NETIF_F_HW_VLAN_CTAG_RX)
2240                 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2241         else
2242                 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2243
2244         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2245
2246         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2247
2248         return 0;
2249 }
2250
2251 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2252 {
2253         struct lan78xx_priv *pdata =
2254                         container_of(param, struct lan78xx_priv, set_vlan);
2255         struct lan78xx_net *dev = pdata->dev;
2256
2257         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2258                                DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2259 }
2260
2261 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2262                                    __be16 proto, u16 vid)
2263 {
2264         struct lan78xx_net *dev = netdev_priv(netdev);
2265         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2266         u16 vid_bit_index;
2267         u16 vid_dword_index;
2268
2269         vid_dword_index = (vid >> 5) & 0x7F;
2270         vid_bit_index = vid & 0x1F;
2271
2272         pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2273
2274         /* defer register writes to a sleepable context */
2275         schedule_work(&pdata->set_vlan);
2276
2277         return 0;
2278 }
2279
2280 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2281                                     __be16 proto, u16 vid)
2282 {
2283         struct lan78xx_net *dev = netdev_priv(netdev);
2284         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2285         u16 vid_bit_index;
2286         u16 vid_dword_index;
2287
2288         vid_dword_index = (vid >> 5) & 0x7F;
2289         vid_bit_index = vid & 0x1F;
2290
2291         pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2292
2293         /* defer register writes to a sleepable context */
2294         schedule_work(&pdata->set_vlan);
2295
2296         return 0;
2297 }
2298
2299 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2300 {
2301         int ret;
2302         u32 buf;
2303         u32 regs[6] = { 0 };
2304
2305         ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2306         if (buf & USB_CFG1_LTM_ENABLE_) {
2307                 u8 temp[2];
2308                 /* Get values from EEPROM first */
2309                 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2310                         if (temp[0] == 24) {
2311                                 ret = lan78xx_read_raw_eeprom(dev,
2312                                                               temp[1] * 2,
2313                                                               24,
2314                                                               (u8 *)regs);
2315                                 if (ret < 0)
2316                                         return;
2317                         }
2318                 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2319                         if (temp[0] == 24) {
2320                                 ret = lan78xx_read_raw_otp(dev,
2321                                                            temp[1] * 2,
2322                                                            24,
2323                                                            (u8 *)regs);
2324                                 if (ret < 0)
2325                                         return;
2326                         }
2327                 }
2328         }
2329
2330         lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2331         lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2332         lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2333         lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2334         lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2335         lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2336 }
2337
2338 static int lan78xx_reset(struct lan78xx_net *dev)
2339 {
2340         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2341         u32 buf;
2342         int ret = 0;
2343         unsigned long timeout;
2344         u8 sig;
2345
2346         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2347         buf |= HW_CFG_LRST_;
2348         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2349
2350         timeout = jiffies + HZ;
2351         do {
2352                 mdelay(1);
2353                 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2354                 if (time_after(jiffies, timeout)) {
2355                         netdev_warn(dev->net,
2356                                     "timeout on completion of LiteReset");
2357                         return -EIO;
2358                 }
2359         } while (buf & HW_CFG_LRST_);
2360
2361         lan78xx_init_mac_address(dev);
2362
2363         /* save DEVID for later usage */
2364         ret = lan78xx_read_reg(dev, ID_REV, &buf);
2365         dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2366         dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2367
2368         /* Respond to the IN token with a NAK */
2369         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2370         buf |= USB_CFG_BIR_;
2371         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2372
2373         /* Init LTM */
2374         lan78xx_init_ltm(dev);
2375
2376         if (dev->udev->speed == USB_SPEED_SUPER) {
2377                 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2378                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2379                 dev->rx_qlen = 4;
2380                 dev->tx_qlen = 4;
2381         } else if (dev->udev->speed == USB_SPEED_HIGH) {
2382                 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2383                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2384                 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2385                 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2386         } else {
2387                 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2388                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2389                 dev->rx_qlen = 4;
2390                 dev->tx_qlen = 4;
2391         }
2392
2393         ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2394         ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2395
2396         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2397         buf |= HW_CFG_MEF_;
2398         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2399
2400         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2401         buf |= USB_CFG_BCE_;
2402         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2403
2404         /* set FIFO sizes */
2405         buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2406         ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2407
2408         buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2409         ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2410
2411         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2412         ret = lan78xx_write_reg(dev, FLOW, 0);
2413         ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2414
2415         /* Don't need rfe_ctl_lock during initialisation */
2416         ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2417         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2418         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2419
2420         /* Enable or disable checksum offload engines */
2421         lan78xx_set_features(dev->net, dev->net->features);
2422
2423         lan78xx_set_multicast(dev->net);
2424
2425         /* reset PHY */
2426         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2427         buf |= PMT_CTL_PHY_RST_;
2428         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2429
2430         timeout = jiffies + HZ;
2431         do {
2432                 mdelay(1);
2433                 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2434                 if (time_after(jiffies, timeout)) {
2435                         netdev_warn(dev->net, "timeout waiting for PHY Reset");
2436                         return -EIO;
2437                 }
2438         } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2439
2440         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2441         /* LAN7801 only has RGMII mode */
2442         if (dev->chipid == ID_REV_CHIP_ID_7801_)
2443                 buf &= ~MAC_CR_GMII_EN_;
2444
2445         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2446                 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2447                 if (!ret && sig != EEPROM_INDICATOR) {
2448                         /* Implies there is no external eeprom. Set mac speed */
2449                         netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2450                         buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2451                 }
2452         }
2453         ret = lan78xx_write_reg(dev, MAC_CR, buf);
2454
2455         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2456         buf |= MAC_TX_TXEN_;
2457         ret = lan78xx_write_reg(dev, MAC_TX, buf);
2458
2459         ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2460         buf |= FCT_TX_CTL_EN_;
2461         ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2462
2463         ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2464
2465         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2466         buf |= MAC_RX_RXEN_;
2467         ret = lan78xx_write_reg(dev, MAC_RX, buf);
2468
2469         ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2470         buf |= FCT_RX_CTL_EN_;
2471         ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2472
2473         return 0;
2474 }
2475
2476 static void lan78xx_init_stats(struct lan78xx_net *dev)
2477 {
2478         u32 *p;
2479         int i;
2480
2481         /* initialize for stats update
2482          * some counters are 20bits and some are 32bits
2483          */
2484         p = (u32 *)&dev->stats.rollover_max;
2485         for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2486                 p[i] = 0xFFFFF;
2487
2488         dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2489         dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2490         dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2491         dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2492         dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2493         dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2494         dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2495         dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2496         dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2497         dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2498
2499         lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
2500 }
2501
2502 static int lan78xx_open(struct net_device *net)
2503 {
2504         struct lan78xx_net *dev = netdev_priv(net);
2505         int ret;
2506
2507         ret = usb_autopm_get_interface(dev->intf);
2508         if (ret < 0)
2509                 goto out;
2510
2511         phy_start(net->phydev);
2512
2513         netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2514
2515         /* for Link Check */
2516         if (dev->urb_intr) {
2517                 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2518                 if (ret < 0) {
2519                         netif_err(dev, ifup, dev->net,
2520                                   "intr submit %d\n", ret);
2521                         goto done;
2522                 }
2523         }
2524
2525         lan78xx_init_stats(dev);
2526
2527         set_bit(EVENT_DEV_OPEN, &dev->flags);
2528
2529         netif_start_queue(net);
2530
2531         dev->link_on = false;
2532
2533         lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2534 done:
2535         usb_autopm_put_interface(dev->intf);
2536
2537 out:
2538         return ret;
2539 }
2540
2541 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2542 {
2543         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2544         DECLARE_WAITQUEUE(wait, current);
2545         int temp;
2546
2547         /* ensure there are no more active urbs */
2548         add_wait_queue(&unlink_wakeup, &wait);
2549         set_current_state(TASK_UNINTERRUPTIBLE);
2550         dev->wait = &unlink_wakeup;
2551         temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2552
2553         /* maybe wait for deletions to finish. */
2554         while (!skb_queue_empty(&dev->rxq) &&
2555                !skb_queue_empty(&dev->txq) &&
2556                !skb_queue_empty(&dev->done)) {
2557                 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2558                 set_current_state(TASK_UNINTERRUPTIBLE);
2559                 netif_dbg(dev, ifdown, dev->net,
2560                           "waited for %d urb completions\n", temp);
2561         }
2562         set_current_state(TASK_RUNNING);
2563         dev->wait = NULL;
2564         remove_wait_queue(&unlink_wakeup, &wait);
2565 }
2566
2567 static int lan78xx_stop(struct net_device *net)
2568 {
2569         struct lan78xx_net              *dev = netdev_priv(net);
2570
2571         if (timer_pending(&dev->stat_monitor))
2572                 del_timer_sync(&dev->stat_monitor);
2573
2574         if (net->phydev)
2575                 phy_stop(net->phydev);
2576
2577         clear_bit(EVENT_DEV_OPEN, &dev->flags);
2578         netif_stop_queue(net);
2579
2580         netif_info(dev, ifdown, dev->net,
2581                    "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2582                    net->stats.rx_packets, net->stats.tx_packets,
2583                    net->stats.rx_errors, net->stats.tx_errors);
2584
2585         lan78xx_terminate_urbs(dev);
2586
2587         usb_kill_urb(dev->urb_intr);
2588
2589         skb_queue_purge(&dev->rxq_pause);
2590
2591         /* deferred work (task, timer, softirq) must also stop.
2592          * can't flush_scheduled_work() until we drop rtnl (later),
2593          * else workers could deadlock; so make workers a NOP.
2594          */
2595         dev->flags = 0;
2596         cancel_delayed_work_sync(&dev->wq);
2597         tasklet_kill(&dev->bh);
2598
2599         usb_autopm_put_interface(dev->intf);
2600
2601         return 0;
2602 }
2603
2604 static int lan78xx_linearize(struct sk_buff *skb)
2605 {
2606         return skb_linearize(skb);
2607 }
2608
2609 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2610                                        struct sk_buff *skb, gfp_t flags)
2611 {
2612         u32 tx_cmd_a, tx_cmd_b;
2613
2614         if (skb_cow_head(skb, TX_OVERHEAD)) {
2615                 dev_kfree_skb_any(skb);
2616                 return NULL;
2617         }
2618
2619         if (lan78xx_linearize(skb) < 0)
2620                 return NULL;
2621
2622         tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2623
2624         if (skb->ip_summed == CHECKSUM_PARTIAL)
2625                 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2626
2627         tx_cmd_b = 0;
2628         if (skb_is_gso(skb)) {
2629                 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2630
2631                 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2632
2633                 tx_cmd_a |= TX_CMD_A_LSO_;
2634         }
2635
2636         if (skb_vlan_tag_present(skb)) {
2637                 tx_cmd_a |= TX_CMD_A_IVTG_;
2638                 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2639         }
2640
2641         skb_push(skb, 4);
2642         cpu_to_le32s(&tx_cmd_b);
2643         memcpy(skb->data, &tx_cmd_b, 4);
2644
2645         skb_push(skb, 4);
2646         cpu_to_le32s(&tx_cmd_a);
2647         memcpy(skb->data, &tx_cmd_a, 4);
2648
2649         return skb;
2650 }
2651
2652 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2653                                struct sk_buff_head *list, enum skb_state state)
2654 {
2655         unsigned long flags;
2656         enum skb_state old_state;
2657         struct skb_data *entry = (struct skb_data *)skb->cb;
2658
2659         spin_lock_irqsave(&list->lock, flags);
2660         old_state = entry->state;
2661         entry->state = state;
2662
2663         __skb_unlink(skb, list);
2664         spin_unlock(&list->lock);
2665         spin_lock(&dev->done.lock);
2666
2667         __skb_queue_tail(&dev->done, skb);
2668         if (skb_queue_len(&dev->done) == 1)
2669                 tasklet_schedule(&dev->bh);
2670         spin_unlock_irqrestore(&dev->done.lock, flags);
2671
2672         return old_state;
2673 }
2674
2675 static void tx_complete(struct urb *urb)
2676 {
2677         struct sk_buff *skb = (struct sk_buff *)urb->context;
2678         struct skb_data *entry = (struct skb_data *)skb->cb;
2679         struct lan78xx_net *dev = entry->dev;
2680
2681         if (urb->status == 0) {
2682                 dev->net->stats.tx_packets += entry->num_of_packet;
2683                 dev->net->stats.tx_bytes += entry->length;
2684         } else {
2685                 dev->net->stats.tx_errors++;
2686
2687                 switch (urb->status) {
2688                 case -EPIPE:
2689                         lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2690                         break;
2691
2692                 /* software-driven interface shutdown */
2693                 case -ECONNRESET:
2694                 case -ESHUTDOWN:
2695                         break;
2696
2697                 case -EPROTO:
2698                 case -ETIME:
2699                 case -EILSEQ:
2700                         netif_stop_queue(dev->net);
2701                         break;
2702                 default:
2703                         netif_dbg(dev, tx_err, dev->net,
2704                                   "tx err %d\n", entry->urb->status);
2705                         break;
2706                 }
2707         }
2708
2709         usb_autopm_put_interface_async(dev->intf);
2710
2711         defer_bh(dev, skb, &dev->txq, tx_done);
2712 }
2713
2714 static void lan78xx_queue_skb(struct sk_buff_head *list,
2715                               struct sk_buff *newsk, enum skb_state state)
2716 {
2717         struct skb_data *entry = (struct skb_data *)newsk->cb;
2718
2719         __skb_queue_tail(list, newsk);
2720         entry->state = state;
2721 }
2722
2723 static netdev_tx_t
2724 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2725 {
2726         struct lan78xx_net *dev = netdev_priv(net);
2727         struct sk_buff *skb2 = NULL;
2728
2729         if (skb) {
2730                 skb_tx_timestamp(skb);
2731                 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2732         }
2733
2734         if (skb2) {
2735                 skb_queue_tail(&dev->txq_pend, skb2);
2736
2737                 /* throttle TX patch at slower than SUPER SPEED USB */
2738                 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2739                     (skb_queue_len(&dev->txq_pend) > 10))
2740                         netif_stop_queue(net);
2741         } else {
2742                 netif_dbg(dev, tx_err, dev->net,
2743                           "lan78xx_tx_prep return NULL\n");
2744                 dev->net->stats.tx_errors++;
2745                 dev->net->stats.tx_dropped++;
2746         }
2747
2748         tasklet_schedule(&dev->bh);
2749
2750         return NETDEV_TX_OK;
2751 }
2752
2753 static int
2754 lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2755 {
2756         int tmp;
2757         struct usb_host_interface *alt = NULL;
2758         struct usb_host_endpoint *in = NULL, *out = NULL;
2759         struct usb_host_endpoint *status = NULL;
2760
2761         for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2762                 unsigned ep;
2763
2764                 in = NULL;
2765                 out = NULL;
2766                 status = NULL;
2767                 alt = intf->altsetting + tmp;
2768
2769                 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2770                         struct usb_host_endpoint *e;
2771                         int intr = 0;
2772
2773                         e = alt->endpoint + ep;
2774                         switch (e->desc.bmAttributes) {
2775                         case USB_ENDPOINT_XFER_INT:
2776                                 if (!usb_endpoint_dir_in(&e->desc))
2777                                         continue;
2778                                 intr = 1;
2779                                 /* FALLTHROUGH */
2780                         case USB_ENDPOINT_XFER_BULK:
2781                                 break;
2782                         default:
2783                                 continue;
2784                         }
2785                         if (usb_endpoint_dir_in(&e->desc)) {
2786                                 if (!intr && !in)
2787                                         in = e;
2788                                 else if (intr && !status)
2789                                         status = e;
2790                         } else {
2791                                 if (!out)
2792                                         out = e;
2793                         }
2794                 }
2795                 if (in && out)
2796                         break;
2797         }
2798         if (!alt || !in || !out)
2799                 return -EINVAL;
2800
2801         dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2802                                        in->desc.bEndpointAddress &
2803                                        USB_ENDPOINT_NUMBER_MASK);
2804         dev->pipe_out = usb_sndbulkpipe(dev->udev,
2805                                         out->desc.bEndpointAddress &
2806                                         USB_ENDPOINT_NUMBER_MASK);
2807         dev->ep_intr = status;
2808
2809         return 0;
2810 }
2811
2812 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2813 {
2814         struct lan78xx_priv *pdata = NULL;
2815         int ret;
2816         int i;
2817
2818         ret = lan78xx_get_endpoints(dev, intf);
2819
2820         dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2821
2822         pdata = (struct lan78xx_priv *)(dev->data[0]);
2823         if (!pdata) {
2824                 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2825                 return -ENOMEM;
2826         }
2827
2828         pdata->dev = dev;
2829
2830         spin_lock_init(&pdata->rfe_ctl_lock);
2831         mutex_init(&pdata->dataport_mutex);
2832
2833         INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2834
2835         for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2836                 pdata->vlan_table[i] = 0;
2837
2838         INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2839
2840         dev->net->features = 0;
2841
2842         if (DEFAULT_TX_CSUM_ENABLE)
2843                 dev->net->features |= NETIF_F_HW_CSUM;
2844
2845         if (DEFAULT_RX_CSUM_ENABLE)
2846                 dev->net->features |= NETIF_F_RXCSUM;
2847
2848         if (DEFAULT_TSO_CSUM_ENABLE)
2849                 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2850
2851         dev->net->hw_features = dev->net->features;
2852
2853         ret = lan78xx_setup_irq_domain(dev);
2854         if (ret < 0) {
2855                 netdev_warn(dev->net,
2856                             "lan78xx_setup_irq_domain() failed : %d", ret);
2857                 goto out1;
2858         }
2859
2860         dev->net->hard_header_len += TX_OVERHEAD;
2861         dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2862
2863         /* Init all registers */
2864         ret = lan78xx_reset(dev);
2865         if (ret) {
2866                 netdev_warn(dev->net, "Registers INIT FAILED....");
2867                 goto out2;
2868         }
2869
2870         ret = lan78xx_mdio_init(dev);
2871         if (ret) {
2872                 netdev_warn(dev->net, "MDIO INIT FAILED.....");
2873                 goto out2;
2874         }
2875
2876         dev->net->flags |= IFF_MULTICAST;
2877
2878         pdata->wol = WAKE_MAGIC;
2879
2880         return ret;
2881
2882 out2:
2883         lan78xx_remove_irq_domain(dev);
2884
2885 out1:
2886         netdev_warn(dev->net, "Bind routine FAILED");
2887         cancel_work_sync(&pdata->set_multicast);
2888         cancel_work_sync(&pdata->set_vlan);
2889         kfree(pdata);
2890         return ret;
2891 }
2892
2893 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2894 {
2895         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2896
2897         lan78xx_remove_irq_domain(dev);
2898
2899         lan78xx_remove_mdio(dev);
2900
2901         if (pdata) {
2902                 cancel_work_sync(&pdata->set_multicast);
2903                 cancel_work_sync(&pdata->set_vlan);
2904                 netif_dbg(dev, ifdown, dev->net, "free pdata");
2905                 kfree(pdata);
2906                 pdata = NULL;
2907                 dev->data[0] = 0;
2908         }
2909 }
2910
2911 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2912                                     struct sk_buff *skb,
2913                                     u32 rx_cmd_a, u32 rx_cmd_b)
2914 {
2915         if (!(dev->net->features & NETIF_F_RXCSUM) ||
2916             unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2917                 skb->ip_summed = CHECKSUM_NONE;
2918         } else {
2919                 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2920                 skb->ip_summed = CHECKSUM_COMPLETE;
2921         }
2922 }
2923
2924 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2925 {
2926         int             status;
2927
2928         if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2929                 skb_queue_tail(&dev->rxq_pause, skb);
2930                 return;
2931         }
2932
2933         dev->net->stats.rx_packets++;
2934         dev->net->stats.rx_bytes += skb->len;
2935
2936         skb->protocol = eth_type_trans(skb, dev->net);
2937
2938         netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2939                   skb->len + sizeof(struct ethhdr), skb->protocol);
2940         memset(skb->cb, 0, sizeof(struct skb_data));
2941
2942         if (skb_defer_rx_timestamp(skb))
2943                 return;
2944
2945         status = netif_rx(skb);
2946         if (status != NET_RX_SUCCESS)
2947                 netif_dbg(dev, rx_err, dev->net,
2948                           "netif_rx status %d\n", status);
2949 }
2950
2951 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2952 {
2953         if (skb->len < dev->net->hard_header_len)
2954                 return 0;
2955
2956         while (skb->len > 0) {
2957                 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2958                 u16 rx_cmd_c;
2959                 struct sk_buff *skb2;
2960                 unsigned char *packet;
2961
2962                 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2963                 le32_to_cpus(&rx_cmd_a);
2964                 skb_pull(skb, sizeof(rx_cmd_a));
2965
2966                 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2967                 le32_to_cpus(&rx_cmd_b);
2968                 skb_pull(skb, sizeof(rx_cmd_b));
2969
2970                 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2971                 le16_to_cpus(&rx_cmd_c);
2972                 skb_pull(skb, sizeof(rx_cmd_c));
2973
2974                 packet = skb->data;
2975
2976                 /* get the packet length */
2977                 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2978                 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2979
2980                 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2981                         netif_dbg(dev, rx_err, dev->net,
2982                                   "Error rx_cmd_a=0x%08x", rx_cmd_a);
2983                 } else {
2984                         /* last frame in this batch */
2985                         if (skb->len == size) {
2986                                 lan78xx_rx_csum_offload(dev, skb,
2987                                                         rx_cmd_a, rx_cmd_b);
2988
2989                                 skb_trim(skb, skb->len - 4); /* remove fcs */
2990                                 skb->truesize = size + sizeof(struct sk_buff);
2991
2992                                 return 1;
2993                         }
2994
2995                         skb2 = skb_clone(skb, GFP_ATOMIC);
2996                         if (unlikely(!skb2)) {
2997                                 netdev_warn(dev->net, "Error allocating skb");
2998                                 return 0;
2999                         }
3000
3001                         skb2->len = size;
3002                         skb2->data = packet;
3003                         skb_set_tail_pointer(skb2, size);
3004
3005                         lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3006
3007                         skb_trim(skb2, skb2->len - 4); /* remove fcs */
3008                         skb2->truesize = size + sizeof(struct sk_buff);
3009
3010                         lan78xx_skb_return(dev, skb2);
3011                 }
3012
3013                 skb_pull(skb, size);
3014
3015                 /* padding bytes before the next frame starts */
3016                 if (skb->len)
3017                         skb_pull(skb, align_count);
3018         }
3019
3020         return 1;
3021 }
3022
3023 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3024 {
3025         if (!lan78xx_rx(dev, skb)) {
3026                 dev->net->stats.rx_errors++;
3027                 goto done;
3028         }
3029
3030         if (skb->len) {
3031                 lan78xx_skb_return(dev, skb);
3032                 return;
3033         }
3034
3035         netif_dbg(dev, rx_err, dev->net, "drop\n");
3036         dev->net->stats.rx_errors++;
3037 done:
3038         skb_queue_tail(&dev->done, skb);
3039 }
3040
3041 static void rx_complete(struct urb *urb);
3042
3043 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3044 {
3045         struct sk_buff *skb;
3046         struct skb_data *entry;
3047         unsigned long lockflags;
3048         size_t size = dev->rx_urb_size;
3049         int ret = 0;
3050
3051         skb = netdev_alloc_skb_ip_align(dev->net, size);
3052         if (!skb) {
3053                 usb_free_urb(urb);
3054                 return -ENOMEM;
3055         }
3056
3057         entry = (struct skb_data *)skb->cb;
3058         entry->urb = urb;
3059         entry->dev = dev;
3060         entry->length = 0;
3061
3062         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3063                           skb->data, size, rx_complete, skb);
3064
3065         spin_lock_irqsave(&dev->rxq.lock, lockflags);
3066
3067         if (netif_device_present(dev->net) &&
3068             netif_running(dev->net) &&
3069             !test_bit(EVENT_RX_HALT, &dev->flags) &&
3070             !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3071                 ret = usb_submit_urb(urb, GFP_ATOMIC);
3072                 switch (ret) {
3073                 case 0:
3074                         lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3075                         break;
3076                 case -EPIPE:
3077                         lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3078                         break;
3079                 case -ENODEV:
3080                         netif_dbg(dev, ifdown, dev->net, "device gone\n");
3081                         netif_device_detach(dev->net);
3082                         break;
3083                 case -EHOSTUNREACH:
3084                         ret = -ENOLINK;
3085                         break;
3086                 default:
3087                         netif_dbg(dev, rx_err, dev->net,
3088                                   "rx submit, %d\n", ret);
3089                         tasklet_schedule(&dev->bh);
3090                 }
3091         } else {
3092                 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3093                 ret = -ENOLINK;
3094         }
3095         spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3096         if (ret) {
3097                 dev_kfree_skb_any(skb);
3098                 usb_free_urb(urb);
3099         }
3100         return ret;
3101 }
3102
3103 static void rx_complete(struct urb *urb)
3104 {
3105         struct sk_buff  *skb = (struct sk_buff *)urb->context;
3106         struct skb_data *entry = (struct skb_data *)skb->cb;
3107         struct lan78xx_net *dev = entry->dev;
3108         int urb_status = urb->status;
3109         enum skb_state state;
3110
3111         skb_put(skb, urb->actual_length);
3112         state = rx_done;
3113         entry->urb = NULL;
3114
3115         switch (urb_status) {
3116         case 0:
3117                 if (skb->len < dev->net->hard_header_len) {
3118                         state = rx_cleanup;
3119                         dev->net->stats.rx_errors++;
3120                         dev->net->stats.rx_length_errors++;
3121                         netif_dbg(dev, rx_err, dev->net,
3122                                   "rx length %d\n", skb->len);
3123                 }
3124                 usb_mark_last_busy(dev->udev);
3125                 break;
3126         case -EPIPE:
3127                 dev->net->stats.rx_errors++;
3128                 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3129                 /* FALLTHROUGH */
3130         case -ECONNRESET:                               /* async unlink */
3131         case -ESHUTDOWN:                                /* hardware gone */
3132                 netif_dbg(dev, ifdown, dev->net,
3133                           "rx shutdown, code %d\n", urb_status);
3134                 state = rx_cleanup;
3135                 entry->urb = urb;
3136                 urb = NULL;
3137                 break;
3138         case -EPROTO:
3139         case -ETIME:
3140         case -EILSEQ:
3141                 dev->net->stats.rx_errors++;
3142                 state = rx_cleanup;
3143                 entry->urb = urb;
3144                 urb = NULL;
3145                 break;
3146
3147         /* data overrun ... flush fifo? */
3148         case -EOVERFLOW:
3149                 dev->net->stats.rx_over_errors++;
3150                 /* FALLTHROUGH */
3151
3152         default:
3153                 state = rx_cleanup;
3154                 dev->net->stats.rx_errors++;
3155                 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3156                 break;
3157         }
3158
3159         state = defer_bh(dev, skb, &dev->rxq, state);
3160
3161         if (urb) {
3162                 if (netif_running(dev->net) &&
3163                     !test_bit(EVENT_RX_HALT, &dev->flags) &&
3164                     state != unlink_start) {
3165                         rx_submit(dev, urb, GFP_ATOMIC);
3166                         return;
3167                 }
3168                 usb_free_urb(urb);
3169         }
3170         netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3171 }
3172
3173 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3174 {
3175         int length;
3176         struct urb *urb = NULL;
3177         struct skb_data *entry;
3178         unsigned long flags;
3179         struct sk_buff_head *tqp = &dev->txq_pend;
3180         struct sk_buff *skb, *skb2;
3181         int ret;
3182         int count, pos;
3183         int skb_totallen, pkt_cnt;
3184
3185         skb_totallen = 0;
3186         pkt_cnt = 0;
3187         count = 0;
3188         length = 0;
3189         spin_lock_irqsave(&tqp->lock, flags);
3190         for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
3191                 if (skb_is_gso(skb)) {
3192                         if (pkt_cnt) {
3193                                 /* handle previous packets first */
3194                                 break;
3195                         }
3196                         count = 1;
3197                         length = skb->len - TX_OVERHEAD;
3198                         __skb_unlink(skb, tqp);
3199                         spin_unlock_irqrestore(&tqp->lock, flags);
3200                         goto gso_skb;
3201                 }
3202
3203                 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3204                         break;
3205                 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3206                 pkt_cnt++;
3207         }
3208         spin_unlock_irqrestore(&tqp->lock, flags);
3209
3210         /* copy to a single skb */
3211         skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3212         if (!skb)
3213                 goto drop;
3214
3215         skb_put(skb, skb_totallen);
3216
3217         for (count = pos = 0; count < pkt_cnt; count++) {
3218                 skb2 = skb_dequeue(tqp);
3219                 if (skb2) {
3220                         length += (skb2->len - TX_OVERHEAD);
3221                         memcpy(skb->data + pos, skb2->data, skb2->len);
3222                         pos += roundup(skb2->len, sizeof(u32));
3223                         dev_kfree_skb(skb2);
3224                 }
3225         }
3226
3227 gso_skb:
3228         urb = usb_alloc_urb(0, GFP_ATOMIC);
3229         if (!urb)
3230                 goto drop;
3231
3232         entry = (struct skb_data *)skb->cb;
3233         entry->urb = urb;
3234         entry->dev = dev;
3235         entry->length = length;
3236         entry->num_of_packet = count;
3237
3238         spin_lock_irqsave(&dev->txq.lock, flags);
3239         ret = usb_autopm_get_interface_async(dev->intf);
3240         if (ret < 0) {
3241                 spin_unlock_irqrestore(&dev->txq.lock, flags);
3242                 goto drop;
3243         }
3244
3245         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3246                           skb->data, skb->len, tx_complete, skb);
3247
3248         if (length % dev->maxpacket == 0) {
3249                 /* send USB_ZERO_PACKET */
3250                 urb->transfer_flags |= URB_ZERO_PACKET;
3251         }
3252
3253 #ifdef CONFIG_PM
3254         /* if this triggers the device is still a sleep */
3255         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3256                 /* transmission will be done in resume */
3257                 usb_anchor_urb(urb, &dev->deferred);
3258                 /* no use to process more packets */
3259                 netif_stop_queue(dev->net);
3260                 usb_put_urb(urb);
3261                 spin_unlock_irqrestore(&dev->txq.lock, flags);
3262                 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3263                 return;
3264         }
3265 #endif
3266
3267         ret = usb_submit_urb(urb, GFP_ATOMIC);
3268         switch (ret) {
3269         case 0:
3270                 netif_trans_update(dev->net);
3271                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3272                 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3273                         netif_stop_queue(dev->net);
3274                 break;
3275         case -EPIPE:
3276                 netif_stop_queue(dev->net);
3277                 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3278                 usb_autopm_put_interface_async(dev->intf);
3279                 break;
3280         default:
3281                 usb_autopm_put_interface_async(dev->intf);
3282                 netif_dbg(dev, tx_err, dev->net,
3283                           "tx: submit urb err %d\n", ret);
3284                 break;
3285         }
3286
3287         spin_unlock_irqrestore(&dev->txq.lock, flags);
3288
3289         if (ret) {
3290                 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3291 drop:
3292                 dev->net->stats.tx_dropped++;
3293                 if (skb)
3294                         dev_kfree_skb_any(skb);
3295                 usb_free_urb(urb);
3296         } else
3297                 netif_dbg(dev, tx_queued, dev->net,
3298                           "> tx, len %d, type 0x%x\n", length, skb->protocol);
3299 }
3300
3301 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3302 {
3303         struct urb *urb;
3304         int i;
3305
3306         if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3307                 for (i = 0; i < 10; i++) {
3308                         if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3309                                 break;
3310                         urb = usb_alloc_urb(0, GFP_ATOMIC);
3311                         if (urb)
3312                                 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3313                                         return;
3314                 }
3315
3316                 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3317                         tasklet_schedule(&dev->bh);
3318         }
3319         if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3320                 netif_wake_queue(dev->net);
3321 }
3322
3323 static void lan78xx_bh(unsigned long param)
3324 {
3325         struct lan78xx_net *dev = (struct lan78xx_net *)param;
3326         struct sk_buff *skb;
3327         struct skb_data *entry;
3328
3329         while ((skb = skb_dequeue(&dev->done))) {
3330                 entry = (struct skb_data *)(skb->cb);
3331                 switch (entry->state) {
3332                 case rx_done:
3333                         entry->state = rx_cleanup;
3334                         rx_process(dev, skb);
3335                         continue;
3336                 case tx_done:
3337                         usb_free_urb(entry->urb);
3338                         dev_kfree_skb(skb);
3339                         continue;
3340                 case rx_cleanup:
3341                         usb_free_urb(entry->urb);
3342                         dev_kfree_skb(skb);
3343                         continue;
3344                 default:
3345                         netdev_dbg(dev->net, "skb state %d\n", entry->state);
3346                         return;
3347                 }
3348         }
3349
3350         if (netif_device_present(dev->net) && netif_running(dev->net)) {
3351                 /* reset update timer delta */
3352                 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3353                         dev->delta = 1;
3354                         mod_timer(&dev->stat_monitor,
3355                                   jiffies + STAT_UPDATE_TIMER);
3356                 }
3357
3358                 if (!skb_queue_empty(&dev->txq_pend))
3359                         lan78xx_tx_bh(dev);
3360
3361                 if (!timer_pending(&dev->delay) &&
3362                     !test_bit(EVENT_RX_HALT, &dev->flags))
3363                         lan78xx_rx_bh(dev);
3364         }
3365 }
3366
3367 static void lan78xx_delayedwork(struct work_struct *work)
3368 {
3369         int status;
3370         struct lan78xx_net *dev;
3371
3372         dev = container_of(work, struct lan78xx_net, wq.work);
3373
3374         if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3375                 unlink_urbs(dev, &dev->txq);
3376                 status = usb_autopm_get_interface(dev->intf);
3377                 if (status < 0)
3378                         goto fail_pipe;
3379                 status = usb_clear_halt(dev->udev, dev->pipe_out);
3380                 usb_autopm_put_interface(dev->intf);
3381                 if (status < 0 &&
3382                     status != -EPIPE &&
3383                     status != -ESHUTDOWN) {
3384                         if (netif_msg_tx_err(dev))
3385 fail_pipe:
3386                                 netdev_err(dev->net,
3387                                            "can't clear tx halt, status %d\n",
3388                                            status);
3389                 } else {
3390                         clear_bit(EVENT_TX_HALT, &dev->flags);
3391                         if (status != -ESHUTDOWN)
3392                                 netif_wake_queue(dev->net);
3393                 }
3394         }
3395         if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3396                 unlink_urbs(dev, &dev->rxq);
3397                 status = usb_autopm_get_interface(dev->intf);
3398                 if (status < 0)
3399                                 goto fail_halt;
3400                 status = usb_clear_halt(dev->udev, dev->pipe_in);
3401                 usb_autopm_put_interface(dev->intf);
3402                 if (status < 0 &&
3403                     status != -EPIPE &&
3404                     status != -ESHUTDOWN) {
3405                         if (netif_msg_rx_err(dev))
3406 fail_halt:
3407                                 netdev_err(dev->net,
3408                                            "can't clear rx halt, status %d\n",
3409                                            status);
3410                 } else {
3411                         clear_bit(EVENT_RX_HALT, &dev->flags);
3412                         tasklet_schedule(&dev->bh);
3413                 }
3414         }
3415
3416         if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3417                 int ret = 0;
3418
3419                 clear_bit(EVENT_LINK_RESET, &dev->flags);
3420                 status = usb_autopm_get_interface(dev->intf);
3421                 if (status < 0)
3422                         goto skip_reset;
3423                 if (lan78xx_link_reset(dev) < 0) {
3424                         usb_autopm_put_interface(dev->intf);
3425 skip_reset:
3426                         netdev_info(dev->net, "link reset failed (%d)\n",
3427                                     ret);
3428                 } else {
3429                         usb_autopm_put_interface(dev->intf);
3430                 }
3431         }
3432
3433         if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3434                 lan78xx_update_stats(dev);
3435
3436                 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3437
3438                 mod_timer(&dev->stat_monitor,
3439                           jiffies + (STAT_UPDATE_TIMER * dev->delta));
3440
3441                 dev->delta = min((dev->delta * 2), 50);
3442         }
3443 }
3444
3445 static void intr_complete(struct urb *urb)
3446 {
3447         struct lan78xx_net *dev = urb->context;
3448         int status = urb->status;
3449
3450         switch (status) {
3451         /* success */
3452         case 0:
3453                 lan78xx_status(dev, urb);
3454                 break;
3455
3456         /* software-driven interface shutdown */
3457         case -ENOENT:                   /* urb killed */
3458         case -ESHUTDOWN:                /* hardware gone */
3459                 netif_dbg(dev, ifdown, dev->net,
3460                           "intr shutdown, code %d\n", status);
3461                 return;
3462
3463         /* NOTE:  not throttling like RX/TX, since this endpoint
3464          * already polls infrequently
3465          */
3466         default:
3467                 netdev_dbg(dev->net, "intr status %d\n", status);
3468                 break;
3469         }
3470
3471         if (!netif_running(dev->net))
3472                 return;
3473
3474         memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3475         status = usb_submit_urb(urb, GFP_ATOMIC);
3476         if (status != 0)
3477                 netif_err(dev, timer, dev->net,
3478                           "intr resubmit --> %d\n", status);
3479 }
3480
3481 static void lan78xx_disconnect(struct usb_interface *intf)
3482 {
3483         struct lan78xx_net              *dev;
3484         struct usb_device               *udev;
3485         struct net_device               *net;
3486
3487         dev = usb_get_intfdata(intf);
3488         usb_set_intfdata(intf, NULL);
3489         if (!dev)
3490                 return;
3491
3492         udev = interface_to_usbdev(intf);
3493         net = dev->net;
3494
3495         phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3496         phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3497
3498         phy_disconnect(net->phydev);
3499
3500         unregister_netdev(net);
3501
3502         cancel_delayed_work_sync(&dev->wq);
3503
3504         usb_scuttle_anchored_urbs(&dev->deferred);
3505
3506         lan78xx_unbind(dev, intf);
3507
3508         usb_kill_urb(dev->urb_intr);
3509         usb_free_urb(dev->urb_intr);
3510
3511         free_netdev(net);
3512         usb_put_dev(udev);
3513 }
3514
3515 static void lan78xx_tx_timeout(struct net_device *net)
3516 {
3517         struct lan78xx_net *dev = netdev_priv(net);
3518
3519         unlink_urbs(dev, &dev->txq);
3520         tasklet_schedule(&dev->bh);
3521 }
3522
3523 static const struct net_device_ops lan78xx_netdev_ops = {
3524         .ndo_open               = lan78xx_open,
3525         .ndo_stop               = lan78xx_stop,
3526         .ndo_start_xmit         = lan78xx_start_xmit,
3527         .ndo_tx_timeout         = lan78xx_tx_timeout,
3528         .ndo_change_mtu         = lan78xx_change_mtu,
3529         .ndo_set_mac_address    = lan78xx_set_mac_addr,
3530         .ndo_validate_addr      = eth_validate_addr,
3531         .ndo_do_ioctl           = lan78xx_ioctl,
3532         .ndo_set_rx_mode        = lan78xx_set_multicast,
3533         .ndo_set_features       = lan78xx_set_features,
3534         .ndo_vlan_rx_add_vid    = lan78xx_vlan_rx_add_vid,
3535         .ndo_vlan_rx_kill_vid   = lan78xx_vlan_rx_kill_vid,
3536 };
3537
3538 static void lan78xx_stat_monitor(unsigned long param)
3539 {
3540         struct lan78xx_net *dev;
3541
3542         dev = (struct lan78xx_net *)param;
3543
3544         lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3545 }
3546
3547 static int lan78xx_probe(struct usb_interface *intf,
3548                          const struct usb_device_id *id)
3549 {
3550         struct lan78xx_net *dev;
3551         struct net_device *netdev;
3552         struct usb_device *udev;
3553         int ret;
3554         unsigned maxp;
3555         unsigned period;
3556         u8 *buf = NULL;
3557
3558         udev = interface_to_usbdev(intf);
3559         udev = usb_get_dev(udev);
3560
3561         netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3562         if (!netdev) {
3563                 dev_err(&intf->dev, "Error: OOM\n");
3564                 ret = -ENOMEM;
3565                 goto out1;
3566         }
3567
3568         /* netdev_printk() needs this */
3569         SET_NETDEV_DEV(netdev, &intf->dev);
3570
3571         dev = netdev_priv(netdev);
3572         dev->udev = udev;
3573         dev->intf = intf;
3574         dev->net = netdev;
3575         dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3576                                         | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3577
3578         skb_queue_head_init(&dev->rxq);
3579         skb_queue_head_init(&dev->txq);
3580         skb_queue_head_init(&dev->done);
3581         skb_queue_head_init(&dev->rxq_pause);
3582         skb_queue_head_init(&dev->txq_pend);
3583         mutex_init(&dev->phy_mutex);
3584
3585         tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3586         INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3587         init_usb_anchor(&dev->deferred);
3588
3589         netdev->netdev_ops = &lan78xx_netdev_ops;
3590         netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3591         netdev->ethtool_ops = &lan78xx_ethtool_ops;
3592
3593         dev->stat_monitor.function = lan78xx_stat_monitor;
3594         dev->stat_monitor.data = (unsigned long)dev;
3595         dev->delta = 1;
3596         init_timer(&dev->stat_monitor);
3597
3598         mutex_init(&dev->stats.access_lock);
3599
3600         ret = lan78xx_bind(dev, intf);
3601         if (ret < 0)
3602                 goto out2;
3603         strcpy(netdev->name, "eth%d");
3604
3605         if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3606                 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3607
3608         /* MTU range: 68 - 9000 */
3609         netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3610
3611         dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3612         dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3613         dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3614
3615         dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3616         dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3617
3618         dev->pipe_intr = usb_rcvintpipe(dev->udev,
3619                                         dev->ep_intr->desc.bEndpointAddress &
3620                                         USB_ENDPOINT_NUMBER_MASK);
3621         period = dev->ep_intr->desc.bInterval;
3622
3623         maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3624         buf = kmalloc(maxp, GFP_KERNEL);
3625         if (buf) {
3626                 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3627                 if (!dev->urb_intr) {
3628                         ret = -ENOMEM;
3629                         kfree(buf);
3630                         goto out3;
3631                 } else {
3632                         usb_fill_int_urb(dev->urb_intr, dev->udev,
3633                                          dev->pipe_intr, buf, maxp,
3634                                          intr_complete, dev, period);
3635                 }
3636         }
3637
3638         dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3639
3640         /* driver requires remote-wakeup capability during autosuspend. */
3641         intf->needs_remote_wakeup = 1;
3642
3643         ret = register_netdev(netdev);
3644         if (ret != 0) {
3645                 netif_err(dev, probe, netdev, "couldn't register the device\n");
3646                 goto out3;
3647         }
3648
3649         usb_set_intfdata(intf, dev);
3650
3651         ret = device_set_wakeup_enable(&udev->dev, true);
3652
3653          /* Default delay of 2sec has more overhead than advantage.
3654           * Set to 10sec as default.
3655           */
3656         pm_runtime_set_autosuspend_delay(&udev->dev,
3657                                          DEFAULT_AUTOSUSPEND_DELAY);
3658
3659         ret = lan78xx_phy_init(dev);
3660         if (ret < 0)
3661                 goto out4;
3662
3663         return 0;
3664
3665 out4:
3666         unregister_netdev(netdev);
3667 out3:
3668         lan78xx_unbind(dev, intf);
3669 out2:
3670         free_netdev(netdev);
3671 out1:
3672         usb_put_dev(udev);
3673
3674         return ret;
3675 }
3676
3677 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3678 {
3679         const u16 crc16poly = 0x8005;
3680         int i;
3681         u16 bit, crc, msb;
3682         u8 data;
3683
3684         crc = 0xFFFF;
3685         for (i = 0; i < len; i++) {
3686                 data = *buf++;
3687                 for (bit = 0; bit < 8; bit++) {
3688                         msb = crc >> 15;
3689                         crc <<= 1;
3690
3691                         if (msb ^ (u16)(data & 1)) {
3692                                 crc ^= crc16poly;
3693                                 crc |= (u16)0x0001U;
3694                         }
3695                         data >>= 1;
3696                 }
3697         }
3698
3699         return crc;
3700 }
3701
3702 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3703 {
3704         u32 buf;
3705         int ret;
3706         int mask_index;
3707         u16 crc;
3708         u32 temp_wucsr;
3709         u32 temp_pmt_ctl;
3710         const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3711         const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3712         const u8 arp_type[2] = { 0x08, 0x06 };
3713
3714         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3715         buf &= ~MAC_TX_TXEN_;
3716         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3717         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3718         buf &= ~MAC_RX_RXEN_;
3719         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3720
3721         ret = lan78xx_write_reg(dev, WUCSR, 0);
3722         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3723         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3724
3725         temp_wucsr = 0;
3726
3727         temp_pmt_ctl = 0;
3728         ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3729         temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3730         temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3731
3732         for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3733                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3734
3735         mask_index = 0;
3736         if (wol & WAKE_PHY) {
3737                 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3738
3739                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3740                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3741                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3742         }
3743         if (wol & WAKE_MAGIC) {
3744                 temp_wucsr |= WUCSR_MPEN_;
3745
3746                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3747                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3748                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3749         }
3750         if (wol & WAKE_BCAST) {
3751                 temp_wucsr |= WUCSR_BCST_EN_;
3752
3753                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3754                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3755                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3756         }
3757         if (wol & WAKE_MCAST) {
3758                 temp_wucsr |= WUCSR_WAKE_EN_;
3759
3760                 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3761                 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3762                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3763                                         WUF_CFGX_EN_ |
3764                                         WUF_CFGX_TYPE_MCAST_ |
3765                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3766                                         (crc & WUF_CFGX_CRC16_MASK_));
3767
3768                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3769                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3770                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3771                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3772                 mask_index++;
3773
3774                 /* for IPv6 Multicast */
3775                 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3776                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3777                                         WUF_CFGX_EN_ |
3778                                         WUF_CFGX_TYPE_MCAST_ |
3779                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3780                                         (crc & WUF_CFGX_CRC16_MASK_));
3781
3782                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3783                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3784                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3785                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3786                 mask_index++;
3787
3788                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3789                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3790                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3791         }
3792         if (wol & WAKE_UCAST) {
3793                 temp_wucsr |= WUCSR_PFDA_EN_;
3794
3795                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3796                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3797                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3798         }
3799         if (wol & WAKE_ARP) {
3800                 temp_wucsr |= WUCSR_WAKE_EN_;
3801
3802                 /* set WUF_CFG & WUF_MASK
3803                  * for packettype (offset 12,13) = ARP (0x0806)
3804                  */
3805                 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3806                 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3807                                         WUF_CFGX_EN_ |
3808                                         WUF_CFGX_TYPE_ALL_ |
3809                                         (0 << WUF_CFGX_OFFSET_SHIFT_) |
3810                                         (crc & WUF_CFGX_CRC16_MASK_));
3811
3812                 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3813                 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3814                 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3815                 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3816                 mask_index++;
3817
3818                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3819                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3820                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3821         }
3822
3823         ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3824
3825         /* when multiple WOL bits are set */
3826         if (hweight_long((unsigned long)wol) > 1) {
3827                 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3828                 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3829                 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3830         }
3831         ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3832
3833         /* clear WUPS */
3834         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3835         buf |= PMT_CTL_WUPS_MASK_;
3836         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3837
3838         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3839         buf |= MAC_RX_RXEN_;
3840         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3841
3842         return 0;
3843 }
3844
3845 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3846 {
3847         struct lan78xx_net *dev = usb_get_intfdata(intf);
3848         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3849         u32 buf;
3850         int ret;
3851         int event;
3852
3853         event = message.event;
3854
3855         if (!dev->suspend_count++) {
3856                 spin_lock_irq(&dev->txq.lock);
3857                 /* don't autosuspend while transmitting */
3858                 if ((skb_queue_len(&dev->txq) ||
3859                      skb_queue_len(&dev->txq_pend)) &&
3860                         PMSG_IS_AUTO(message)) {
3861                         spin_unlock_irq(&dev->txq.lock);
3862                         ret = -EBUSY;
3863                         goto out;
3864                 } else {
3865                         set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3866                         spin_unlock_irq(&dev->txq.lock);
3867                 }
3868
3869                 /* stop TX & RX */
3870                 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3871                 buf &= ~MAC_TX_TXEN_;
3872                 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3873                 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3874                 buf &= ~MAC_RX_RXEN_;
3875                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3876
3877                 /* empty out the rx and queues */
3878                 netif_device_detach(dev->net);
3879                 lan78xx_terminate_urbs(dev);
3880                 usb_kill_urb(dev->urb_intr);
3881
3882                 /* reattach */
3883                 netif_device_attach(dev->net);
3884         }
3885
3886         if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3887                 del_timer(&dev->stat_monitor);
3888
3889                 if (PMSG_IS_AUTO(message)) {
3890                         /* auto suspend (selective suspend) */
3891                         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3892                         buf &= ~MAC_TX_TXEN_;
3893                         ret = lan78xx_write_reg(dev, MAC_TX, buf);
3894                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3895                         buf &= ~MAC_RX_RXEN_;
3896                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3897
3898                         ret = lan78xx_write_reg(dev, WUCSR, 0);
3899                         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3900                         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3901
3902                         /* set goodframe wakeup */
3903                         ret = lan78xx_read_reg(dev, WUCSR, &buf);
3904
3905                         buf |= WUCSR_RFE_WAKE_EN_;
3906                         buf |= WUCSR_STORE_WAKE_;
3907
3908                         ret = lan78xx_write_reg(dev, WUCSR, buf);
3909
3910                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3911
3912                         buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3913                         buf |= PMT_CTL_RES_CLR_WKP_STS_;
3914
3915                         buf |= PMT_CTL_PHY_WAKE_EN_;
3916                         buf |= PMT_CTL_WOL_EN_;
3917                         buf &= ~PMT_CTL_SUS_MODE_MASK_;
3918                         buf |= PMT_CTL_SUS_MODE_3_;
3919
3920                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3921
3922                         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3923
3924                         buf |= PMT_CTL_WUPS_MASK_;
3925
3926                         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3927
3928                         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3929                         buf |= MAC_RX_RXEN_;
3930                         ret = lan78xx_write_reg(dev, MAC_RX, buf);
3931                 } else {
3932                         lan78xx_set_suspend(dev, pdata->wol);
3933                 }
3934         }
3935
3936         ret = 0;
3937 out:
3938         return ret;
3939 }
3940
3941 static int lan78xx_resume(struct usb_interface *intf)
3942 {
3943         struct lan78xx_net *dev = usb_get_intfdata(intf);
3944         struct sk_buff *skb;
3945         struct urb *res;
3946         int ret;
3947         u32 buf;
3948
3949         if (!timer_pending(&dev->stat_monitor)) {
3950                 dev->delta = 1;
3951                 mod_timer(&dev->stat_monitor,
3952                           jiffies + STAT_UPDATE_TIMER);
3953         }
3954
3955         if (!--dev->suspend_count) {
3956                 /* resume interrupt URBs */
3957                 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3958                                 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3959
3960                 spin_lock_irq(&dev->txq.lock);
3961                 while ((res = usb_get_from_anchor(&dev->deferred))) {
3962                         skb = (struct sk_buff *)res->context;
3963                         ret = usb_submit_urb(res, GFP_ATOMIC);
3964                         if (ret < 0) {
3965                                 dev_kfree_skb_any(skb);
3966                                 usb_free_urb(res);
3967                                 usb_autopm_put_interface_async(dev->intf);
3968                         } else {
3969                                 netif_trans_update(dev->net);
3970                                 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3971                         }
3972                 }
3973
3974                 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3975                 spin_unlock_irq(&dev->txq.lock);
3976
3977                 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3978                         if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3979                                 netif_start_queue(dev->net);
3980                         tasklet_schedule(&dev->bh);
3981                 }
3982         }
3983
3984         ret = lan78xx_write_reg(dev, WUCSR2, 0);
3985         ret = lan78xx_write_reg(dev, WUCSR, 0);
3986         ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3987
3988         ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3989                                              WUCSR2_ARP_RCD_ |
3990                                              WUCSR2_IPV6_TCPSYN_RCD_ |
3991                                              WUCSR2_IPV4_TCPSYN_RCD_);
3992
3993         ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3994                                             WUCSR_EEE_RX_WAKE_ |
3995                                             WUCSR_PFDA_FR_ |
3996                                             WUCSR_RFE_WAKE_FR_ |
3997                                             WUCSR_WUFR_ |
3998                                             WUCSR_MPR_ |
3999                                             WUCSR_BCST_FR_);
4000
4001         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4002         buf |= MAC_TX_TXEN_;
4003         ret = lan78xx_write_reg(dev, MAC_TX, buf);
4004
4005         return 0;
4006 }
4007
4008 static int lan78xx_reset_resume(struct usb_interface *intf)
4009 {
4010         struct lan78xx_net *dev = usb_get_intfdata(intf);
4011
4012         lan78xx_reset(dev);
4013
4014         phy_start(dev->net->phydev);
4015
4016         return lan78xx_resume(intf);
4017 }
4018
4019 static const struct usb_device_id products[] = {
4020         {
4021         /* LAN7800 USB Gigabit Ethernet Device */
4022         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4023         },
4024         {
4025         /* LAN7850 USB Gigabit Ethernet Device */
4026         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4027         },
4028         {
4029         /* LAN7801 USB Gigabit Ethernet Device */
4030         USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4031         },
4032         {},
4033 };
4034 MODULE_DEVICE_TABLE(usb, products);
4035
4036 static struct usb_driver lan78xx_driver = {
4037         .name                   = DRIVER_NAME,
4038         .id_table               = products,
4039         .probe                  = lan78xx_probe,
4040         .disconnect             = lan78xx_disconnect,
4041         .suspend                = lan78xx_suspend,
4042         .resume                 = lan78xx_resume,
4043         .reset_resume           = lan78xx_reset_resume,
4044         .supports_autosuspend   = 1,
4045         .disable_hub_initiated_lpm = 1,
4046 };
4047
4048 module_usb_driver(lan78xx_driver);
4049
4050 MODULE_AUTHOR(DRIVER_AUTHOR);
4051 MODULE_DESCRIPTION(DRIVER_DESC);
4052 MODULE_LICENSE("GPL");