1 /* SuperH Ethernet device driver
3 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu
4 * Copyright (C) 2008-2013 Renesas Solutions Corp.
5 * Copyright (C) 2013 Cogent Embedded, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * The full GNU General Public License is included in this distribution in
17 * the file called "COPYING".
20 #include <linux/module.h>
21 #include <linux/kernel.h>
22 #include <linux/spinlock.h>
23 #include <linux/interrupt.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/etherdevice.h>
26 #include <linux/delay.h>
27 #include <linux/platform_device.h>
28 #include <linux/mdio-bitbang.h>
29 #include <linux/netdevice.h>
30 #include <linux/phy.h>
31 #include <linux/cache.h>
33 #include <linux/pm_runtime.h>
34 #include <linux/slab.h>
35 #include <linux/ethtool.h>
36 #include <linux/if_vlan.h>
37 #include <linux/clk.h>
38 #include <linux/sh_eth.h>
42 #define SH_ETH_DEF_MSG_ENABLE \
48 static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
102 [TSU_CTRST] = 0x0004,
103 [TSU_FWEN0] = 0x0010,
104 [TSU_FWEN1] = 0x0014,
106 [TSU_BSYSL0] = 0x0020,
107 [TSU_BSYSL1] = 0x0024,
108 [TSU_PRISL0] = 0x0028,
109 [TSU_PRISL1] = 0x002c,
110 [TSU_FWSL0] = 0x0030,
111 [TSU_FWSL1] = 0x0034,
112 [TSU_FWSLC] = 0x0038,
113 [TSU_QTAG0] = 0x0040,
114 [TSU_QTAG1] = 0x0044,
116 [TSU_FWINMK] = 0x0054,
117 [TSU_ADQT0] = 0x0048,
118 [TSU_ADQT1] = 0x004c,
119 [TSU_VTAG0] = 0x0058,
120 [TSU_VTAG1] = 0x005c,
121 [TSU_ADSBSY] = 0x0060,
123 [TSU_POST1] = 0x0070,
124 [TSU_POST2] = 0x0074,
125 [TSU_POST3] = 0x0078,
126 [TSU_POST4] = 0x007c,
127 [TSU_ADRH0] = 0x0100,
128 [TSU_ADRL0] = 0x0104,
129 [TSU_ADRH31] = 0x01f8,
130 [TSU_ADRL31] = 0x01fc,
146 static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
190 [TSU_CTRST] = 0x0004,
191 [TSU_VTAG0] = 0x0058,
192 [TSU_ADSBSY] = 0x0060,
194 [TSU_ADRH0] = 0x0100,
195 [TSU_ADRL0] = 0x0104,
196 [TSU_ADRH31] = 0x01f8,
197 [TSU_ADRL31] = 0x01fc,
205 static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
251 static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
303 static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
329 [TSU_CTRST] = 0x0004,
330 [TSU_FWEN0] = 0x0010,
331 [TSU_FWEN1] = 0x0014,
333 [TSU_BSYSL0] = 0x0020,
334 [TSU_BSYSL1] = 0x0024,
335 [TSU_PRISL0] = 0x0028,
336 [TSU_PRISL1] = 0x002c,
337 [TSU_FWSL0] = 0x0030,
338 [TSU_FWSL1] = 0x0034,
339 [TSU_FWSLC] = 0x0038,
340 [TSU_QTAGM0] = 0x0040,
341 [TSU_QTAGM1] = 0x0044,
342 [TSU_ADQT0] = 0x0048,
343 [TSU_ADQT1] = 0x004c,
345 [TSU_FWINMK] = 0x0054,
346 [TSU_ADSBSY] = 0x0060,
348 [TSU_POST1] = 0x0070,
349 [TSU_POST2] = 0x0074,
350 [TSU_POST3] = 0x0078,
351 [TSU_POST4] = 0x007c,
366 [TSU_ADRH0] = 0x0100,
367 [TSU_ADRL0] = 0x0104,
368 [TSU_ADRL31] = 0x01fc,
371 static bool sh_eth_is_gether(struct sh_eth_private *mdp)
373 return mdp->reg_offset == sh_eth_offset_gigabit;
376 static bool sh_eth_is_rz_fast_ether(struct sh_eth_private *mdp)
378 return mdp->reg_offset == sh_eth_offset_fast_rz;
381 static void sh_eth_select_mii(struct net_device *ndev)
384 struct sh_eth_private *mdp = netdev_priv(ndev);
386 switch (mdp->phy_interface) {
387 case PHY_INTERFACE_MODE_GMII:
390 case PHY_INTERFACE_MODE_MII:
393 case PHY_INTERFACE_MODE_RMII:
397 pr_warn("PHY interface mode was not setup. Set to MII.\n");
402 sh_eth_write(ndev, value, RMII_MII);
405 static void sh_eth_set_duplex(struct net_device *ndev)
407 struct sh_eth_private *mdp = netdev_priv(ndev);
409 if (mdp->duplex) /* Full */
410 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
412 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
415 /* There is CPU dependent code */
416 static void sh_eth_set_rate_r8a777x(struct net_device *ndev)
418 struct sh_eth_private *mdp = netdev_priv(ndev);
420 switch (mdp->speed) {
421 case 10: /* 10BASE */
422 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR);
424 case 100:/* 100BASE */
425 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR);
433 static struct sh_eth_cpu_data r8a777x_data = {
434 .set_duplex = sh_eth_set_duplex,
435 .set_rate = sh_eth_set_rate_r8a777x,
437 .register_type = SH_ETH_REG_FAST_RCAR,
439 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
440 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
441 .eesipr_value = 0x01ff009f,
443 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
444 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
445 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
455 static struct sh_eth_cpu_data r8a779x_data = {
456 .set_duplex = sh_eth_set_duplex,
457 .set_rate = sh_eth_set_rate_r8a777x,
459 .register_type = SH_ETH_REG_FAST_RCAR,
461 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
462 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
463 .eesipr_value = 0x01ff009f,
465 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
466 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
467 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
478 static void sh_eth_set_rate_sh7724(struct net_device *ndev)
480 struct sh_eth_private *mdp = netdev_priv(ndev);
482 switch (mdp->speed) {
483 case 10: /* 10BASE */
484 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
486 case 100:/* 100BASE */
487 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
495 static struct sh_eth_cpu_data sh7724_data = {
496 .set_duplex = sh_eth_set_duplex,
497 .set_rate = sh_eth_set_rate_sh7724,
499 .register_type = SH_ETH_REG_FAST_SH4,
501 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
502 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
503 .eesipr_value = 0x01ff009f,
505 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
506 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
507 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
515 .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
518 static void sh_eth_set_rate_sh7757(struct net_device *ndev)
520 struct sh_eth_private *mdp = netdev_priv(ndev);
522 switch (mdp->speed) {
523 case 10: /* 10BASE */
524 sh_eth_write(ndev, 0, RTRATE);
526 case 100:/* 100BASE */
527 sh_eth_write(ndev, 1, RTRATE);
535 static struct sh_eth_cpu_data sh7757_data = {
536 .set_duplex = sh_eth_set_duplex,
537 .set_rate = sh_eth_set_rate_sh7757,
539 .register_type = SH_ETH_REG_FAST_SH4,
541 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
542 .rmcr_value = RMCR_RNC,
544 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
545 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
546 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
549 .irq_flags = IRQF_SHARED,
556 .rpadir_value = 2 << 16,
559 #define SH_GIGA_ETH_BASE 0xfee00000UL
560 #define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
561 #define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
562 static void sh_eth_chip_reset_giga(struct net_device *ndev)
565 unsigned long mahr[2], malr[2];
567 /* save MAHR and MALR */
568 for (i = 0; i < 2; i++) {
569 malr[i] = ioread32((void *)GIGA_MALR(i));
570 mahr[i] = ioread32((void *)GIGA_MAHR(i));
574 iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
577 /* restore MAHR and MALR */
578 for (i = 0; i < 2; i++) {
579 iowrite32(malr[i], (void *)GIGA_MALR(i));
580 iowrite32(mahr[i], (void *)GIGA_MAHR(i));
584 static void sh_eth_set_rate_giga(struct net_device *ndev)
586 struct sh_eth_private *mdp = netdev_priv(ndev);
588 switch (mdp->speed) {
589 case 10: /* 10BASE */
590 sh_eth_write(ndev, 0x00000000, GECMR);
592 case 100:/* 100BASE */
593 sh_eth_write(ndev, 0x00000010, GECMR);
595 case 1000: /* 1000BASE */
596 sh_eth_write(ndev, 0x00000020, GECMR);
603 /* SH7757(GETHERC) */
604 static struct sh_eth_cpu_data sh7757_data_giga = {
605 .chip_reset = sh_eth_chip_reset_giga,
606 .set_duplex = sh_eth_set_duplex,
607 .set_rate = sh_eth_set_rate_giga,
609 .register_type = SH_ETH_REG_GIGABIT,
611 .ecsr_value = ECSR_ICD | ECSR_MPD,
612 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
613 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
615 .tx_check = EESR_TC1 | EESR_FTC,
616 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
617 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
619 .fdr_value = 0x0000072f,
620 .rmcr_value = RMCR_RNC,
622 .irq_flags = IRQF_SHARED,
629 .rpadir_value = 2 << 16,
635 static void sh_eth_chip_reset(struct net_device *ndev)
637 struct sh_eth_private *mdp = netdev_priv(ndev);
640 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
644 static void sh_eth_set_rate_gether(struct net_device *ndev)
646 struct sh_eth_private *mdp = netdev_priv(ndev);
648 switch (mdp->speed) {
649 case 10: /* 10BASE */
650 sh_eth_write(ndev, GECMR_10, GECMR);
652 case 100:/* 100BASE */
653 sh_eth_write(ndev, GECMR_100, GECMR);
655 case 1000: /* 1000BASE */
656 sh_eth_write(ndev, GECMR_1000, GECMR);
664 static struct sh_eth_cpu_data sh7734_data = {
665 .chip_reset = sh_eth_chip_reset,
666 .set_duplex = sh_eth_set_duplex,
667 .set_rate = sh_eth_set_rate_gether,
669 .register_type = SH_ETH_REG_GIGABIT,
671 .ecsr_value = ECSR_ICD | ECSR_MPD,
672 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
673 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
675 .tx_check = EESR_TC1 | EESR_FTC,
676 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
677 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
693 static struct sh_eth_cpu_data sh7763_data = {
694 .chip_reset = sh_eth_chip_reset,
695 .set_duplex = sh_eth_set_duplex,
696 .set_rate = sh_eth_set_rate_gether,
698 .register_type = SH_ETH_REG_GIGABIT,
700 .ecsr_value = ECSR_ICD | ECSR_MPD,
701 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
702 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
704 .tx_check = EESR_TC1 | EESR_FTC,
705 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
706 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
717 .irq_flags = IRQF_SHARED,
720 static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
722 struct sh_eth_private *mdp = netdev_priv(ndev);
725 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
728 sh_eth_select_mii(ndev);
732 static struct sh_eth_cpu_data r8a7740_data = {
733 .chip_reset = sh_eth_chip_reset_r8a7740,
734 .set_duplex = sh_eth_set_duplex,
735 .set_rate = sh_eth_set_rate_gether,
737 .register_type = SH_ETH_REG_GIGABIT,
739 .ecsr_value = ECSR_ICD | ECSR_MPD,
740 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
741 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
743 .tx_check = EESR_TC1 | EESR_FTC,
744 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
745 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
747 .fdr_value = 0x0000070f,
748 .rmcr_value = RMCR_RNC,
756 .rpadir_value = 2 << 16,
765 static struct sh_eth_cpu_data r7s72100_data = {
766 .chip_reset = sh_eth_chip_reset,
767 .set_duplex = sh_eth_set_duplex,
769 .register_type = SH_ETH_REG_FAST_RZ,
771 .ecsr_value = ECSR_ICD,
772 .ecsipr_value = ECSIPR_ICDIP,
773 .eesipr_value = 0xff7f009f,
775 .tx_check = EESR_TC1 | EESR_FTC,
776 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
777 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
779 .fdr_value = 0x0000070f,
780 .rmcr_value = RMCR_RNC,
788 .rpadir_value = 2 << 16,
796 static struct sh_eth_cpu_data sh7619_data = {
797 .register_type = SH_ETH_REG_FAST_SH3_SH2,
799 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
807 static struct sh_eth_cpu_data sh771x_data = {
808 .register_type = SH_ETH_REG_FAST_SH3_SH2,
810 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
814 static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
817 cd->ecsr_value = DEFAULT_ECSR_INIT;
819 if (!cd->ecsipr_value)
820 cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
822 if (!cd->fcftr_value)
823 cd->fcftr_value = DEFAULT_FIFO_F_D_RFF |
824 DEFAULT_FIFO_F_D_RFD;
827 cd->fdr_value = DEFAULT_FDR_INIT;
830 cd->rmcr_value = DEFAULT_RMCR_VALUE;
833 cd->tx_check = DEFAULT_TX_CHECK;
835 if (!cd->eesr_err_check)
836 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
839 static int sh_eth_check_reset(struct net_device *ndev)
845 if (!(sh_eth_read(ndev, EDMR) & 0x3))
851 pr_err("Device reset failed\n");
857 static int sh_eth_reset(struct net_device *ndev)
859 struct sh_eth_private *mdp = netdev_priv(ndev);
862 if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) {
863 sh_eth_write(ndev, EDSR_ENALL, EDSR);
864 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
867 ret = sh_eth_check_reset(ndev);
872 sh_eth_write(ndev, 0x0, TDLAR);
873 sh_eth_write(ndev, 0x0, TDFAR);
874 sh_eth_write(ndev, 0x0, TDFXR);
875 sh_eth_write(ndev, 0x0, TDFFR);
876 sh_eth_write(ndev, 0x0, RDLAR);
877 sh_eth_write(ndev, 0x0, RDFAR);
878 sh_eth_write(ndev, 0x0, RDFXR);
879 sh_eth_write(ndev, 0x0, RDFFR);
881 /* Reset HW CRC register */
883 sh_eth_write(ndev, 0x0, CSMR);
885 /* Select MII mode */
886 if (mdp->cd->select_mii)
887 sh_eth_select_mii(ndev);
889 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
892 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
900 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
901 static void sh_eth_set_receive_align(struct sk_buff *skb)
905 reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
907 skb_reserve(skb, reserve);
910 static void sh_eth_set_receive_align(struct sk_buff *skb)
912 skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
917 /* CPU <-> EDMAC endian convert */
918 static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
920 switch (mdp->edmac_endian) {
921 case EDMAC_LITTLE_ENDIAN:
922 return cpu_to_le32(x);
923 case EDMAC_BIG_ENDIAN:
924 return cpu_to_be32(x);
929 static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
931 switch (mdp->edmac_endian) {
932 case EDMAC_LITTLE_ENDIAN:
933 return le32_to_cpu(x);
934 case EDMAC_BIG_ENDIAN:
935 return be32_to_cpu(x);
940 /* Program the hardware MAC address from dev->dev_addr. */
941 static void update_mac_address(struct net_device *ndev)
944 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
945 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
947 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
950 /* Get MAC address from SuperH MAC address register
952 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
953 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
954 * When you want use this device, you must set MAC address in bootloader.
957 static void read_mac_address(struct net_device *ndev, unsigned char *mac)
959 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
960 memcpy(ndev->dev_addr, mac, ETH_ALEN);
962 ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
963 ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
964 ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
965 ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
966 ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
967 ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
971 static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
973 if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp))
974 return EDTRR_TRNS_GETHER;
976 return EDTRR_TRNS_ETHER;
980 void (*set_gate)(void *addr);
981 struct mdiobb_ctrl ctrl;
983 u32 mmd_msk;/* MMD */
990 static void bb_set(void *addr, u32 msk)
992 iowrite32(ioread32(addr) | msk, addr);
996 static void bb_clr(void *addr, u32 msk)
998 iowrite32((ioread32(addr) & ~msk), addr);
1002 static int bb_read(void *addr, u32 msk)
1004 return (ioread32(addr) & msk) != 0;
1007 /* Data I/O pin control */
1008 static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1010 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1012 if (bitbang->set_gate)
1013 bitbang->set_gate(bitbang->addr);
1016 bb_set(bitbang->addr, bitbang->mmd_msk);
1018 bb_clr(bitbang->addr, bitbang->mmd_msk);
1022 static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
1024 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1026 if (bitbang->set_gate)
1027 bitbang->set_gate(bitbang->addr);
1030 bb_set(bitbang->addr, bitbang->mdo_msk);
1032 bb_clr(bitbang->addr, bitbang->mdo_msk);
1036 static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
1038 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1040 if (bitbang->set_gate)
1041 bitbang->set_gate(bitbang->addr);
1043 return bb_read(bitbang->addr, bitbang->mdi_msk);
1046 /* MDC pin control */
1047 static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1049 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1051 if (bitbang->set_gate)
1052 bitbang->set_gate(bitbang->addr);
1055 bb_set(bitbang->addr, bitbang->mdc_msk);
1057 bb_clr(bitbang->addr, bitbang->mdc_msk);
1060 /* mdio bus control struct */
1061 static struct mdiobb_ops bb_ops = {
1062 .owner = THIS_MODULE,
1063 .set_mdc = sh_mdc_ctrl,
1064 .set_mdio_dir = sh_mmd_ctrl,
1065 .set_mdio_data = sh_set_mdio,
1066 .get_mdio_data = sh_get_mdio,
1069 /* free skb and descriptor buffer */
1070 static void sh_eth_ring_free(struct net_device *ndev)
1072 struct sh_eth_private *mdp = netdev_priv(ndev);
1075 /* Free Rx skb ringbuffer */
1076 if (mdp->rx_skbuff) {
1077 for (i = 0; i < mdp->num_rx_ring; i++) {
1078 if (mdp->rx_skbuff[i])
1079 dev_kfree_skb(mdp->rx_skbuff[i]);
1082 kfree(mdp->rx_skbuff);
1083 mdp->rx_skbuff = NULL;
1085 /* Free Tx skb ringbuffer */
1086 if (mdp->tx_skbuff) {
1087 for (i = 0; i < mdp->num_tx_ring; i++) {
1088 if (mdp->tx_skbuff[i])
1089 dev_kfree_skb(mdp->tx_skbuff[i]);
1092 kfree(mdp->tx_skbuff);
1093 mdp->tx_skbuff = NULL;
1096 /* format skb and descriptor buffer */
1097 static void sh_eth_ring_format(struct net_device *ndev)
1099 struct sh_eth_private *mdp = netdev_priv(ndev);
1101 struct sk_buff *skb;
1102 struct sh_eth_rxdesc *rxdesc = NULL;
1103 struct sh_eth_txdesc *txdesc = NULL;
1104 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1105 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
1112 memset(mdp->rx_ring, 0, rx_ringsize);
1114 /* build Rx ring buffer */
1115 for (i = 0; i < mdp->num_rx_ring; i++) {
1117 mdp->rx_skbuff[i] = NULL;
1118 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
1119 mdp->rx_skbuff[i] = skb;
1122 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
1124 sh_eth_set_receive_align(skb);
1127 rxdesc = &mdp->rx_ring[i];
1128 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
1129 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1131 /* The size of the buffer is 16 byte boundary. */
1132 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1133 /* Rx descriptor address set */
1135 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
1136 if (sh_eth_is_gether(mdp) ||
1137 sh_eth_is_rz_fast_ether(mdp))
1138 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
1142 mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
1144 /* Mark the last entry as wrapping the ring. */
1145 rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
1147 memset(mdp->tx_ring, 0, tx_ringsize);
1149 /* build Tx ring buffer */
1150 for (i = 0; i < mdp->num_tx_ring; i++) {
1151 mdp->tx_skbuff[i] = NULL;
1152 txdesc = &mdp->tx_ring[i];
1153 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1154 txdesc->buffer_length = 0;
1156 /* Tx descriptor address set */
1157 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
1158 if (sh_eth_is_gether(mdp) ||
1159 sh_eth_is_rz_fast_ether(mdp))
1160 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
1164 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1167 /* Get skb and descriptor buffer */
1168 static int sh_eth_ring_init(struct net_device *ndev)
1170 struct sh_eth_private *mdp = netdev_priv(ndev);
1171 int rx_ringsize, tx_ringsize, ret = 0;
1173 /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
1174 * card needs room to do 8 byte alignment, +2 so we can reserve
1175 * the first 2 bytes, and +16 gets room for the status word from the
1178 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
1179 (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
1180 if (mdp->cd->rpadir)
1181 mdp->rx_buf_sz += NET_IP_ALIGN;
1183 /* Allocate RX and TX skb rings */
1184 mdp->rx_skbuff = kmalloc_array(mdp->num_rx_ring,
1185 sizeof(*mdp->rx_skbuff), GFP_KERNEL);
1186 if (!mdp->rx_skbuff) {
1191 mdp->tx_skbuff = kmalloc_array(mdp->num_tx_ring,
1192 sizeof(*mdp->tx_skbuff), GFP_KERNEL);
1193 if (!mdp->tx_skbuff) {
1198 /* Allocate all Rx descriptors. */
1199 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1200 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
1202 if (!mdp->rx_ring) {
1204 goto desc_ring_free;
1209 /* Allocate all Tx descriptors. */
1210 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1211 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
1213 if (!mdp->tx_ring) {
1215 goto desc_ring_free;
1220 /* free DMA buffer */
1221 dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
1224 /* Free Rx and Tx skb ring buffer */
1225 sh_eth_ring_free(ndev);
1226 mdp->tx_ring = NULL;
1227 mdp->rx_ring = NULL;
1232 static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp)
1237 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1238 dma_free_coherent(NULL, ringsize, mdp->rx_ring,
1240 mdp->rx_ring = NULL;
1244 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1245 dma_free_coherent(NULL, ringsize, mdp->tx_ring,
1247 mdp->tx_ring = NULL;
1251 static int sh_eth_dev_init(struct net_device *ndev, bool start)
1254 struct sh_eth_private *mdp = netdev_priv(ndev);
1258 ret = sh_eth_reset(ndev);
1262 if (mdp->cd->rmiimode)
1263 sh_eth_write(ndev, 0x1, RMIIMODE);
1265 /* Descriptor format */
1266 sh_eth_ring_format(ndev);
1267 if (mdp->cd->rpadir)
1268 sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
1270 /* all sh_eth int mask */
1271 sh_eth_write(ndev, 0, EESIPR);
1273 #if defined(__LITTLE_ENDIAN)
1274 if (mdp->cd->hw_swap)
1275 sh_eth_write(ndev, EDMR_EL, EDMR);
1278 sh_eth_write(ndev, 0, EDMR);
1281 sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
1282 sh_eth_write(ndev, 0, TFTR);
1284 /* Frame recv control */
1285 sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
1287 sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
1290 sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
1292 sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
1294 if (!mdp->cd->no_trimd)
1295 sh_eth_write(ndev, 0, TRIMD);
1297 /* Recv frame limit set register */
1298 sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
1301 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
1303 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1305 /* PAUSE Prohibition */
1306 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
1307 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
1309 sh_eth_write(ndev, val, ECMR);
1311 if (mdp->cd->set_rate)
1312 mdp->cd->set_rate(ndev);
1314 /* E-MAC Status Register clear */
1315 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
1317 /* E-MAC Interrupt Enable register */
1319 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
1321 /* Set MAC address */
1322 update_mac_address(ndev);
1326 sh_eth_write(ndev, APR_AP, APR);
1328 sh_eth_write(ndev, MPR_MP, MPR);
1329 if (mdp->cd->tpauser)
1330 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
1333 /* Setting the Rx mode will start the Rx process. */
1334 sh_eth_write(ndev, EDRRR_R, EDRRR);
1336 netif_start_queue(ndev);
1343 /* free Tx skb function */
1344 static int sh_eth_txfree(struct net_device *ndev)
1346 struct sh_eth_private *mdp = netdev_priv(ndev);
1347 struct sh_eth_txdesc *txdesc;
1351 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1352 entry = mdp->dirty_tx % mdp->num_tx_ring;
1353 txdesc = &mdp->tx_ring[entry];
1354 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
1356 /* Free the original skb. */
1357 if (mdp->tx_skbuff[entry]) {
1358 dma_unmap_single(&ndev->dev, txdesc->addr,
1359 txdesc->buffer_length, DMA_TO_DEVICE);
1360 dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1361 mdp->tx_skbuff[entry] = NULL;
1364 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1365 if (entry >= mdp->num_tx_ring - 1)
1366 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1368 ndev->stats.tx_packets++;
1369 ndev->stats.tx_bytes += txdesc->buffer_length;
1374 /* Packet receive function */
1375 static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1377 struct sh_eth_private *mdp = netdev_priv(ndev);
1378 struct sh_eth_rxdesc *rxdesc;
1380 int entry = mdp->cur_rx % mdp->num_rx_ring;
1381 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
1382 struct sk_buff *skb;
1387 rxdesc = &mdp->rx_ring[entry];
1388 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
1389 desc_status = edmac_to_cpu(mdp, rxdesc->status);
1390 pkt_len = rxdesc->frame_length;
1401 if (!(desc_status & RDFEND))
1402 ndev->stats.rx_length_errors++;
1404 /* In case of almost all GETHER/ETHERs, the Receive Frame State
1405 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
1406 * bit 0. However, in case of the R8A7740, R8A779x, and
1407 * R7S72100 the RFS bits are from bit 25 to bit 16. So, the
1408 * driver needs right shifting by 16.
1410 if (mdp->cd->shift_rd0)
1413 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1414 RD_RFS5 | RD_RFS6 | RD_RFS10)) {
1415 ndev->stats.rx_errors++;
1416 if (desc_status & RD_RFS1)
1417 ndev->stats.rx_crc_errors++;
1418 if (desc_status & RD_RFS2)
1419 ndev->stats.rx_frame_errors++;
1420 if (desc_status & RD_RFS3)
1421 ndev->stats.rx_length_errors++;
1422 if (desc_status & RD_RFS4)
1423 ndev->stats.rx_length_errors++;
1424 if (desc_status & RD_RFS6)
1425 ndev->stats.rx_missed_errors++;
1426 if (desc_status & RD_RFS10)
1427 ndev->stats.rx_over_errors++;
1429 if (!mdp->cd->hw_swap)
1431 phys_to_virt(ALIGN(rxdesc->addr, 4)),
1433 skb = mdp->rx_skbuff[entry];
1434 mdp->rx_skbuff[entry] = NULL;
1435 if (mdp->cd->rpadir)
1436 skb_reserve(skb, NET_IP_ALIGN);
1437 dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
1440 skb_put(skb, pkt_len);
1441 skb->protocol = eth_type_trans(skb, ndev);
1442 netif_receive_skb(skb);
1443 ndev->stats.rx_packets++;
1444 ndev->stats.rx_bytes += pkt_len;
1446 rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
1447 entry = (++mdp->cur_rx) % mdp->num_rx_ring;
1448 rxdesc = &mdp->rx_ring[entry];
1451 /* Refill the Rx ring buffers. */
1452 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
1453 entry = mdp->dirty_rx % mdp->num_rx_ring;
1454 rxdesc = &mdp->rx_ring[entry];
1455 /* The size of the buffer is 16 byte boundary. */
1456 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1458 if (mdp->rx_skbuff[entry] == NULL) {
1459 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
1460 mdp->rx_skbuff[entry] = skb;
1462 break; /* Better luck next round. */
1463 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
1465 sh_eth_set_receive_align(skb);
1467 skb_checksum_none_assert(skb);
1468 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
1470 if (entry >= mdp->num_rx_ring - 1)
1472 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
1475 cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1478 /* Restart Rx engine if stopped. */
1479 /* If we don't need to check status, don't. -KDU */
1480 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
1481 /* fix the values for the next receiving if RDE is set */
1482 if (intr_status & EESR_RDE) {
1483 u32 count = (sh_eth_read(ndev, RDFAR) -
1484 sh_eth_read(ndev, RDLAR)) >> 4;
1486 mdp->cur_rx = count;
1487 mdp->dirty_rx = count;
1489 sh_eth_write(ndev, EDRRR_R, EDRRR);
1495 static void sh_eth_rcv_snd_disable(struct net_device *ndev)
1497 /* disable tx and rx */
1498 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
1499 ~(ECMR_RE | ECMR_TE), ECMR);
1502 static void sh_eth_rcv_snd_enable(struct net_device *ndev)
1504 /* enable tx and rx */
1505 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
1506 (ECMR_RE | ECMR_TE), ECMR);
1509 /* error control function */
1510 static void sh_eth_error(struct net_device *ndev, int intr_status)
1512 struct sh_eth_private *mdp = netdev_priv(ndev);
1517 if (intr_status & EESR_ECI) {
1518 felic_stat = sh_eth_read(ndev, ECSR);
1519 sh_eth_write(ndev, felic_stat, ECSR); /* clear int */
1520 if (felic_stat & ECSR_ICD)
1521 ndev->stats.tx_carrier_errors++;
1522 if (felic_stat & ECSR_LCHNG) {
1524 if (mdp->cd->no_psr || mdp->no_ether_link) {
1527 link_stat = (sh_eth_read(ndev, PSR));
1528 if (mdp->ether_link_active_low)
1529 link_stat = ~link_stat;
1531 if (!(link_stat & PHY_ST_LINK)) {
1532 sh_eth_rcv_snd_disable(ndev);
1535 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
1536 ~DMAC_M_ECI, EESIPR);
1538 sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
1540 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
1541 DMAC_M_ECI, EESIPR);
1542 /* enable tx and rx */
1543 sh_eth_rcv_snd_enable(ndev);
1549 if (intr_status & EESR_TWB) {
1550 /* Unused write back interrupt */
1551 if (intr_status & EESR_TABT) { /* Transmit Abort int */
1552 ndev->stats.tx_aborted_errors++;
1553 if (netif_msg_tx_err(mdp))
1554 dev_err(&ndev->dev, "Transmit Abort\n");
1558 if (intr_status & EESR_RABT) {
1559 /* Receive Abort int */
1560 if (intr_status & EESR_RFRMER) {
1561 /* Receive Frame Overflow int */
1562 ndev->stats.rx_frame_errors++;
1563 if (netif_msg_rx_err(mdp))
1564 dev_err(&ndev->dev, "Receive Abort\n");
1568 if (intr_status & EESR_TDE) {
1569 /* Transmit Descriptor Empty int */
1570 ndev->stats.tx_fifo_errors++;
1571 if (netif_msg_tx_err(mdp))
1572 dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
1575 if (intr_status & EESR_TFE) {
1576 /* FIFO under flow */
1577 ndev->stats.tx_fifo_errors++;
1578 if (netif_msg_tx_err(mdp))
1579 dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
1582 if (intr_status & EESR_RDE) {
1583 /* Receive Descriptor Empty int */
1584 ndev->stats.rx_over_errors++;
1586 if (netif_msg_rx_err(mdp))
1587 dev_err(&ndev->dev, "Receive Descriptor Empty\n");
1590 if (intr_status & EESR_RFE) {
1591 /* Receive FIFO Overflow int */
1592 ndev->stats.rx_fifo_errors++;
1593 if (netif_msg_rx_err(mdp))
1594 dev_err(&ndev->dev, "Receive FIFO Overflow\n");
1597 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1599 ndev->stats.tx_fifo_errors++;
1600 if (netif_msg_tx_err(mdp))
1601 dev_err(&ndev->dev, "Address Error\n");
1604 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1605 if (mdp->cd->no_ade)
1607 if (intr_status & mask) {
1609 u32 edtrr = sh_eth_read(ndev, EDTRR);
1612 dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1613 intr_status, mdp->cur_tx, mdp->dirty_tx,
1614 (u32)ndev->state, edtrr);
1615 /* dirty buffer free */
1616 sh_eth_txfree(ndev);
1619 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
1621 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1624 netif_wake_queue(ndev);
1628 static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1630 struct net_device *ndev = netdev;
1631 struct sh_eth_private *mdp = netdev_priv(ndev);
1632 struct sh_eth_cpu_data *cd = mdp->cd;
1633 irqreturn_t ret = IRQ_NONE;
1634 unsigned long intr_status, intr_enable;
1636 spin_lock(&mdp->lock);
1638 /* Get interrupt status */
1639 intr_status = sh_eth_read(ndev, EESR);
1640 /* Mask it with the interrupt mask, forcing ECI interrupt to be always
1641 * enabled since it's the one that comes thru regardless of the mask,
1642 * and we need to fully handle it in sh_eth_error() in order to quench
1643 * it as it doesn't get cleared by just writing 1 to the ECI bit...
1645 intr_enable = sh_eth_read(ndev, EESIPR);
1646 intr_status &= intr_enable | DMAC_M_ECI;
1647 if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
1652 if (intr_status & EESR_RX_CHECK) {
1653 if (napi_schedule_prep(&mdp->napi)) {
1654 /* Mask Rx interrupts */
1655 sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK,
1657 __napi_schedule(&mdp->napi);
1659 dev_warn(&ndev->dev,
1660 "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n",
1661 intr_status, intr_enable);
1666 if (intr_status & cd->tx_check) {
1667 /* Clear Tx interrupts */
1668 sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
1670 sh_eth_txfree(ndev);
1671 netif_wake_queue(ndev);
1674 if (intr_status & cd->eesr_err_check) {
1675 /* Clear error interrupts */
1676 sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR);
1678 sh_eth_error(ndev, intr_status);
1682 spin_unlock(&mdp->lock);
1687 static int sh_eth_poll(struct napi_struct *napi, int budget)
1689 struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private,
1691 struct net_device *ndev = napi->dev;
1693 unsigned long intr_status;
1696 intr_status = sh_eth_read(ndev, EESR);
1697 if (!(intr_status & EESR_RX_CHECK))
1699 /* Clear Rx interrupts */
1700 sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR);
1702 if (sh_eth_rx(ndev, intr_status, "a))
1706 napi_complete(napi);
1708 /* Reenable Rx interrupts */
1709 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1711 return budget - quota;
1714 /* PHY state control function */
1715 static void sh_eth_adjust_link(struct net_device *ndev)
1717 struct sh_eth_private *mdp = netdev_priv(ndev);
1718 struct phy_device *phydev = mdp->phydev;
1722 if (phydev->duplex != mdp->duplex) {
1724 mdp->duplex = phydev->duplex;
1725 if (mdp->cd->set_duplex)
1726 mdp->cd->set_duplex(ndev);
1729 if (phydev->speed != mdp->speed) {
1731 mdp->speed = phydev->speed;
1732 if (mdp->cd->set_rate)
1733 mdp->cd->set_rate(ndev);
1737 sh_eth_read(ndev, ECMR) & ~ECMR_TXF,
1740 mdp->link = phydev->link;
1741 if (mdp->cd->no_psr || mdp->no_ether_link)
1742 sh_eth_rcv_snd_enable(ndev);
1744 } else if (mdp->link) {
1749 if (mdp->cd->no_psr || mdp->no_ether_link)
1750 sh_eth_rcv_snd_disable(ndev);
1753 if (new_state && netif_msg_link(mdp))
1754 phy_print_status(phydev);
1757 /* PHY init function */
1758 static int sh_eth_phy_init(struct net_device *ndev)
1760 struct sh_eth_private *mdp = netdev_priv(ndev);
1761 char phy_id[MII_BUS_ID_SIZE + 3];
1762 struct phy_device *phydev = NULL;
1764 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1765 mdp->mii_bus->id, mdp->phy_id);
1771 /* Try connect to PHY */
1772 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1773 mdp->phy_interface);
1774 if (IS_ERR(phydev)) {
1775 dev_err(&ndev->dev, "phy_connect failed\n");
1776 return PTR_ERR(phydev);
1779 dev_info(&ndev->dev, "attached PHY %d (IRQ %d) to driver %s\n",
1780 phydev->addr, phydev->irq, phydev->drv->name);
1782 mdp->phydev = phydev;
1787 /* PHY control start function */
1788 static int sh_eth_phy_start(struct net_device *ndev)
1790 struct sh_eth_private *mdp = netdev_priv(ndev);
1793 ret = sh_eth_phy_init(ndev);
1797 phy_start(mdp->phydev);
1802 static int sh_eth_get_settings(struct net_device *ndev,
1803 struct ethtool_cmd *ecmd)
1805 struct sh_eth_private *mdp = netdev_priv(ndev);
1806 unsigned long flags;
1809 spin_lock_irqsave(&mdp->lock, flags);
1810 ret = phy_ethtool_gset(mdp->phydev, ecmd);
1811 spin_unlock_irqrestore(&mdp->lock, flags);
1816 static int sh_eth_set_settings(struct net_device *ndev,
1817 struct ethtool_cmd *ecmd)
1819 struct sh_eth_private *mdp = netdev_priv(ndev);
1820 unsigned long flags;
1823 spin_lock_irqsave(&mdp->lock, flags);
1825 /* disable tx and rx */
1826 sh_eth_rcv_snd_disable(ndev);
1828 ret = phy_ethtool_sset(mdp->phydev, ecmd);
1832 if (ecmd->duplex == DUPLEX_FULL)
1837 if (mdp->cd->set_duplex)
1838 mdp->cd->set_duplex(ndev);
1843 /* enable tx and rx */
1844 sh_eth_rcv_snd_enable(ndev);
1846 spin_unlock_irqrestore(&mdp->lock, flags);
1851 static int sh_eth_nway_reset(struct net_device *ndev)
1853 struct sh_eth_private *mdp = netdev_priv(ndev);
1854 unsigned long flags;
1857 spin_lock_irqsave(&mdp->lock, flags);
1858 ret = phy_start_aneg(mdp->phydev);
1859 spin_unlock_irqrestore(&mdp->lock, flags);
1864 static u32 sh_eth_get_msglevel(struct net_device *ndev)
1866 struct sh_eth_private *mdp = netdev_priv(ndev);
1867 return mdp->msg_enable;
1870 static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
1872 struct sh_eth_private *mdp = netdev_priv(ndev);
1873 mdp->msg_enable = value;
1876 static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
1877 "rx_current", "tx_current",
1878 "rx_dirty", "tx_dirty",
1880 #define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats)
1882 static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
1886 return SH_ETH_STATS_LEN;
1892 static void sh_eth_get_ethtool_stats(struct net_device *ndev,
1893 struct ethtool_stats *stats, u64 *data)
1895 struct sh_eth_private *mdp = netdev_priv(ndev);
1898 /* device-specific stats */
1899 data[i++] = mdp->cur_rx;
1900 data[i++] = mdp->cur_tx;
1901 data[i++] = mdp->dirty_rx;
1902 data[i++] = mdp->dirty_tx;
1905 static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1907 switch (stringset) {
1909 memcpy(data, *sh_eth_gstrings_stats,
1910 sizeof(sh_eth_gstrings_stats));
1915 static void sh_eth_get_ringparam(struct net_device *ndev,
1916 struct ethtool_ringparam *ring)
1918 struct sh_eth_private *mdp = netdev_priv(ndev);
1920 ring->rx_max_pending = RX_RING_MAX;
1921 ring->tx_max_pending = TX_RING_MAX;
1922 ring->rx_pending = mdp->num_rx_ring;
1923 ring->tx_pending = mdp->num_tx_ring;
1926 static int sh_eth_set_ringparam(struct net_device *ndev,
1927 struct ethtool_ringparam *ring)
1929 struct sh_eth_private *mdp = netdev_priv(ndev);
1932 if (ring->tx_pending > TX_RING_MAX ||
1933 ring->rx_pending > RX_RING_MAX ||
1934 ring->tx_pending < TX_RING_MIN ||
1935 ring->rx_pending < RX_RING_MIN)
1937 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1940 if (netif_running(ndev)) {
1941 netif_tx_disable(ndev);
1942 /* Disable interrupts by clearing the interrupt mask. */
1943 sh_eth_write(ndev, 0x0000, EESIPR);
1944 /* Stop the chip's Tx and Rx processes. */
1945 sh_eth_write(ndev, 0, EDTRR);
1946 sh_eth_write(ndev, 0, EDRRR);
1947 synchronize_irq(ndev->irq);
1950 /* Free all the skbuffs in the Rx queue. */
1951 sh_eth_ring_free(ndev);
1952 /* Free DMA buffer */
1953 sh_eth_free_dma_buffer(mdp);
1955 /* Set new parameters */
1956 mdp->num_rx_ring = ring->rx_pending;
1957 mdp->num_tx_ring = ring->tx_pending;
1959 ret = sh_eth_ring_init(ndev);
1961 dev_err(&ndev->dev, "%s: sh_eth_ring_init failed.\n", __func__);
1964 ret = sh_eth_dev_init(ndev, false);
1966 dev_err(&ndev->dev, "%s: sh_eth_dev_init failed.\n", __func__);
1970 if (netif_running(ndev)) {
1971 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1972 /* Setting the Rx mode will start the Rx process. */
1973 sh_eth_write(ndev, EDRRR_R, EDRRR);
1974 netif_wake_queue(ndev);
1980 static const struct ethtool_ops sh_eth_ethtool_ops = {
1981 .get_settings = sh_eth_get_settings,
1982 .set_settings = sh_eth_set_settings,
1983 .nway_reset = sh_eth_nway_reset,
1984 .get_msglevel = sh_eth_get_msglevel,
1985 .set_msglevel = sh_eth_set_msglevel,
1986 .get_link = ethtool_op_get_link,
1987 .get_strings = sh_eth_get_strings,
1988 .get_ethtool_stats = sh_eth_get_ethtool_stats,
1989 .get_sset_count = sh_eth_get_sset_count,
1990 .get_ringparam = sh_eth_get_ringparam,
1991 .set_ringparam = sh_eth_set_ringparam,
1994 /* network device open function */
1995 static int sh_eth_open(struct net_device *ndev)
1998 struct sh_eth_private *mdp = netdev_priv(ndev);
2000 pm_runtime_get_sync(&mdp->pdev->dev);
2002 napi_enable(&mdp->napi);
2004 ret = request_irq(ndev->irq, sh_eth_interrupt,
2005 mdp->cd->irq_flags, ndev->name, ndev);
2007 dev_err(&ndev->dev, "Can not assign IRQ number\n");
2011 /* Descriptor set */
2012 ret = sh_eth_ring_init(ndev);
2017 ret = sh_eth_dev_init(ndev, true);
2021 /* PHY control start*/
2022 ret = sh_eth_phy_start(ndev);
2029 free_irq(ndev->irq, ndev);
2031 napi_disable(&mdp->napi);
2032 pm_runtime_put_sync(&mdp->pdev->dev);
2036 /* Timeout function */
2037 static void sh_eth_tx_timeout(struct net_device *ndev)
2039 struct sh_eth_private *mdp = netdev_priv(ndev);
2040 struct sh_eth_rxdesc *rxdesc;
2043 netif_stop_queue(ndev);
2045 if (netif_msg_timer(mdp)) {
2046 dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x, resetting...\n",
2047 ndev->name, (int)sh_eth_read(ndev, EESR));
2050 /* tx_errors count up */
2051 ndev->stats.tx_errors++;
2053 /* Free all the skbuffs in the Rx queue. */
2054 for (i = 0; i < mdp->num_rx_ring; i++) {
2055 rxdesc = &mdp->rx_ring[i];
2057 rxdesc->addr = 0xBADF00D0;
2058 if (mdp->rx_skbuff[i])
2059 dev_kfree_skb(mdp->rx_skbuff[i]);
2060 mdp->rx_skbuff[i] = NULL;
2062 for (i = 0; i < mdp->num_tx_ring; i++) {
2063 if (mdp->tx_skbuff[i])
2064 dev_kfree_skb(mdp->tx_skbuff[i]);
2065 mdp->tx_skbuff[i] = NULL;
2069 sh_eth_dev_init(ndev, true);
2072 /* Packet transmit function */
2073 static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2075 struct sh_eth_private *mdp = netdev_priv(ndev);
2076 struct sh_eth_txdesc *txdesc;
2078 unsigned long flags;
2080 spin_lock_irqsave(&mdp->lock, flags);
2081 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
2082 if (!sh_eth_txfree(ndev)) {
2083 if (netif_msg_tx_queued(mdp))
2084 dev_warn(&ndev->dev, "TxFD exhausted.\n");
2085 netif_stop_queue(ndev);
2086 spin_unlock_irqrestore(&mdp->lock, flags);
2087 return NETDEV_TX_BUSY;
2090 spin_unlock_irqrestore(&mdp->lock, flags);
2092 entry = mdp->cur_tx % mdp->num_tx_ring;
2093 mdp->tx_skbuff[entry] = skb;
2094 txdesc = &mdp->tx_ring[entry];
2096 if (!mdp->cd->hw_swap)
2097 sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
2099 txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
2101 if (skb->len < ETHERSMALL)
2102 txdesc->buffer_length = ETHERSMALL;
2104 txdesc->buffer_length = skb->len;
2106 if (entry >= mdp->num_tx_ring - 1)
2107 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
2109 txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
2113 if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
2114 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
2116 return NETDEV_TX_OK;
2119 /* device close function */
2120 static int sh_eth_close(struct net_device *ndev)
2122 struct sh_eth_private *mdp = netdev_priv(ndev);
2124 netif_stop_queue(ndev);
2126 /* Disable interrupts by clearing the interrupt mask. */
2127 sh_eth_write(ndev, 0x0000, EESIPR);
2129 /* Stop the chip's Tx and Rx processes. */
2130 sh_eth_write(ndev, 0, EDTRR);
2131 sh_eth_write(ndev, 0, EDRRR);
2133 /* PHY Disconnect */
2135 phy_stop(mdp->phydev);
2136 phy_disconnect(mdp->phydev);
2139 free_irq(ndev->irq, ndev);
2141 napi_disable(&mdp->napi);
2143 /* Free all the skbuffs in the Rx queue. */
2144 sh_eth_ring_free(ndev);
2146 /* free DMA buffer */
2147 sh_eth_free_dma_buffer(mdp);
2149 pm_runtime_put_sync(&mdp->pdev->dev);
2154 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2156 struct sh_eth_private *mdp = netdev_priv(ndev);
2158 if (sh_eth_is_rz_fast_ether(mdp))
2159 return &ndev->stats;
2161 pm_runtime_get_sync(&mdp->pdev->dev);
2163 ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
2164 sh_eth_write(ndev, 0, TROCR); /* (write clear) */
2165 ndev->stats.collisions += sh_eth_read(ndev, CDCR);
2166 sh_eth_write(ndev, 0, CDCR); /* (write clear) */
2167 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
2168 sh_eth_write(ndev, 0, LCCR); /* (write clear) */
2169 if (sh_eth_is_gether(mdp)) {
2170 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
2171 sh_eth_write(ndev, 0, CERCR); /* (write clear) */
2172 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
2173 sh_eth_write(ndev, 0, CEECR); /* (write clear) */
2175 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
2176 sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
2178 pm_runtime_put_sync(&mdp->pdev->dev);
2180 return &ndev->stats;
2183 /* ioctl to device function */
2184 static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2186 struct sh_eth_private *mdp = netdev_priv(ndev);
2187 struct phy_device *phydev = mdp->phydev;
2189 if (!netif_running(ndev))
2195 return phy_mii_ioctl(phydev, rq, cmd);
2198 /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
2199 static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
2202 return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4);
2205 static u32 sh_eth_tsu_get_post_mask(int entry)
2207 return 0x0f << (28 - ((entry % 8) * 4));
2210 static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
2212 return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
2215 static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
2218 struct sh_eth_private *mdp = netdev_priv(ndev);
2222 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2223 tmp = ioread32(reg_offset);
2224 iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset);
2227 static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
2230 struct sh_eth_private *mdp = netdev_priv(ndev);
2231 u32 post_mask, ref_mask, tmp;
2234 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2235 post_mask = sh_eth_tsu_get_post_mask(entry);
2236 ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
2238 tmp = ioread32(reg_offset);
2239 iowrite32(tmp & ~post_mask, reg_offset);
2241 /* If other port enables, the function returns "true" */
2242 return tmp & ref_mask;
2245 static int sh_eth_tsu_busy(struct net_device *ndev)
2247 int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
2248 struct sh_eth_private *mdp = netdev_priv(ndev);
2250 while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
2254 dev_err(&ndev->dev, "%s: timeout\n", __func__);
2262 static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg,
2267 val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
2268 iowrite32(val, reg);
2269 if (sh_eth_tsu_busy(ndev) < 0)
2272 val = addr[4] << 8 | addr[5];
2273 iowrite32(val, reg + 4);
2274 if (sh_eth_tsu_busy(ndev) < 0)
2280 static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
2284 val = ioread32(reg);
2285 addr[0] = (val >> 24) & 0xff;
2286 addr[1] = (val >> 16) & 0xff;
2287 addr[2] = (val >> 8) & 0xff;
2288 addr[3] = val & 0xff;
2289 val = ioread32(reg + 4);
2290 addr[4] = (val >> 8) & 0xff;
2291 addr[5] = val & 0xff;
2295 static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
2297 struct sh_eth_private *mdp = netdev_priv(ndev);
2298 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2300 u8 c_addr[ETH_ALEN];
2302 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2303 sh_eth_tsu_read_entry(reg_offset, c_addr);
2304 if (ether_addr_equal(addr, c_addr))
2311 static int sh_eth_tsu_find_empty(struct net_device *ndev)
2316 memset(blank, 0, sizeof(blank));
2317 entry = sh_eth_tsu_find_entry(ndev, blank);
2318 return (entry < 0) ? -ENOMEM : entry;
2321 static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
2324 struct sh_eth_private *mdp = netdev_priv(ndev);
2325 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2329 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
2330 ~(1 << (31 - entry)), TSU_TEN);
2332 memset(blank, 0, sizeof(blank));
2333 ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
2339 static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
2341 struct sh_eth_private *mdp = netdev_priv(ndev);
2342 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2348 i = sh_eth_tsu_find_entry(ndev, addr);
2350 /* No entry found, create one */
2351 i = sh_eth_tsu_find_empty(ndev);
2354 ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
2358 /* Enable the entry */
2359 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
2360 (1 << (31 - i)), TSU_TEN);
2363 /* Entry found or created, enable POST */
2364 sh_eth_tsu_enable_cam_entry_post(ndev, i);
2369 static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
2371 struct sh_eth_private *mdp = netdev_priv(ndev);
2377 i = sh_eth_tsu_find_entry(ndev, addr);
2380 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2383 /* Disable the entry if both ports was disabled */
2384 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2392 static int sh_eth_tsu_purge_all(struct net_device *ndev)
2394 struct sh_eth_private *mdp = netdev_priv(ndev);
2397 if (unlikely(!mdp->cd->tsu))
2400 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
2401 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2404 /* Disable the entry if both ports was disabled */
2405 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2413 static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
2415 struct sh_eth_private *mdp = netdev_priv(ndev);
2417 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2420 if (unlikely(!mdp->cd->tsu))
2423 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2424 sh_eth_tsu_read_entry(reg_offset, addr);
2425 if (is_multicast_ether_addr(addr))
2426 sh_eth_tsu_del_entry(ndev, addr);
2430 /* Multicast reception directions set */
2431 static void sh_eth_set_multicast_list(struct net_device *ndev)
2433 struct sh_eth_private *mdp = netdev_priv(ndev);
2436 unsigned long flags;
2438 spin_lock_irqsave(&mdp->lock, flags);
2439 /* Initial condition is MCT = 1, PRM = 0.
2440 * Depending on ndev->flags, set PRM or clear MCT
2442 ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT;
2444 if (!(ndev->flags & IFF_MULTICAST)) {
2445 sh_eth_tsu_purge_mcast(ndev);
2448 if (ndev->flags & IFF_ALLMULTI) {
2449 sh_eth_tsu_purge_mcast(ndev);
2450 ecmr_bits &= ~ECMR_MCT;
2454 if (ndev->flags & IFF_PROMISC) {
2455 sh_eth_tsu_purge_all(ndev);
2456 ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
2457 } else if (mdp->cd->tsu) {
2458 struct netdev_hw_addr *ha;
2459 netdev_for_each_mc_addr(ha, ndev) {
2460 if (mcast_all && is_multicast_ether_addr(ha->addr))
2463 if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
2465 sh_eth_tsu_purge_mcast(ndev);
2466 ecmr_bits &= ~ECMR_MCT;
2472 /* Normal, unicast/broadcast-only mode. */
2473 ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT;
2476 /* update the ethernet mode */
2477 sh_eth_write(ndev, ecmr_bits, ECMR);
2479 spin_unlock_irqrestore(&mdp->lock, flags);
2482 static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
2490 static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
2491 __be16 proto, u16 vid)
2493 struct sh_eth_private *mdp = netdev_priv(ndev);
2494 int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2496 if (unlikely(!mdp->cd->tsu))
2499 /* No filtering if vid = 0 */
2503 mdp->vlan_num_ids++;
2505 /* The controller has one VLAN tag HW filter. So, if the filter is
2506 * already enabled, the driver disables it and the filte
2508 if (mdp->vlan_num_ids > 1) {
2509 /* disable VLAN filter */
2510 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2514 sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
2520 static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
2521 __be16 proto, u16 vid)
2523 struct sh_eth_private *mdp = netdev_priv(ndev);
2524 int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2526 if (unlikely(!mdp->cd->tsu))
2529 /* No filtering if vid = 0 */
2533 mdp->vlan_num_ids--;
2534 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2539 /* SuperH's TSU register init function */
2540 static void sh_eth_tsu_init(struct sh_eth_private *mdp)
2542 if (sh_eth_is_rz_fast_ether(mdp)) {
2543 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
2547 sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */
2548 sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */
2549 sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */
2550 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
2551 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
2552 sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
2553 sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
2554 sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
2555 sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
2556 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
2557 if (sh_eth_is_gether(mdp)) {
2558 sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */
2559 sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */
2561 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */
2562 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */
2564 sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */
2565 sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */
2566 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
2567 sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */
2568 sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */
2569 sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */
2570 sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */
2573 /* MDIO bus release function */
2574 static int sh_mdio_release(struct net_device *ndev)
2576 struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
2578 /* unregister mdio bus */
2579 mdiobus_unregister(bus);
2581 /* remove mdio bus info from net_device */
2582 dev_set_drvdata(&ndev->dev, NULL);
2584 /* free bitbang info */
2585 free_mdio_bitbang(bus);
2590 /* MDIO bus init function */
2591 static int sh_mdio_init(struct net_device *ndev, int id,
2592 struct sh_eth_plat_data *pd)
2595 struct bb_info *bitbang;
2596 struct sh_eth_private *mdp = netdev_priv(ndev);
2598 /* create bit control struct for PHY */
2599 bitbang = devm_kzalloc(&ndev->dev, sizeof(struct bb_info),
2607 bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
2608 bitbang->set_gate = pd->set_mdio_gate;
2609 bitbang->mdi_msk = PIR_MDI;
2610 bitbang->mdo_msk = PIR_MDO;
2611 bitbang->mmd_msk = PIR_MMD;
2612 bitbang->mdc_msk = PIR_MDC;
2613 bitbang->ctrl.ops = &bb_ops;
2615 /* MII controller setting */
2616 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
2617 if (!mdp->mii_bus) {
2622 /* Hook up MII support for ethtool */
2623 mdp->mii_bus->name = "sh_mii";
2624 mdp->mii_bus->parent = &ndev->dev;
2625 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2626 mdp->pdev->name, id);
2629 mdp->mii_bus->irq = devm_kzalloc(&ndev->dev,
2630 sizeof(int) * PHY_MAX_ADDR,
2632 if (!mdp->mii_bus->irq) {
2637 for (i = 0; i < PHY_MAX_ADDR; i++)
2638 mdp->mii_bus->irq[i] = PHY_POLL;
2639 if (pd->phy_irq > 0)
2640 mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
2642 /* register mdio bus */
2643 ret = mdiobus_register(mdp->mii_bus);
2647 dev_set_drvdata(&ndev->dev, mdp->mii_bus);
2652 free_mdio_bitbang(mdp->mii_bus);
2658 static const u16 *sh_eth_get_register_offset(int register_type)
2660 const u16 *reg_offset = NULL;
2662 switch (register_type) {
2663 case SH_ETH_REG_GIGABIT:
2664 reg_offset = sh_eth_offset_gigabit;
2666 case SH_ETH_REG_FAST_RZ:
2667 reg_offset = sh_eth_offset_fast_rz;
2669 case SH_ETH_REG_FAST_RCAR:
2670 reg_offset = sh_eth_offset_fast_rcar;
2672 case SH_ETH_REG_FAST_SH4:
2673 reg_offset = sh_eth_offset_fast_sh4;
2675 case SH_ETH_REG_FAST_SH3_SH2:
2676 reg_offset = sh_eth_offset_fast_sh3_sh2;
2679 pr_err("Unknown register type (%d)\n", register_type);
2686 static const struct net_device_ops sh_eth_netdev_ops = {
2687 .ndo_open = sh_eth_open,
2688 .ndo_stop = sh_eth_close,
2689 .ndo_start_xmit = sh_eth_start_xmit,
2690 .ndo_get_stats = sh_eth_get_stats,
2691 .ndo_tx_timeout = sh_eth_tx_timeout,
2692 .ndo_do_ioctl = sh_eth_do_ioctl,
2693 .ndo_validate_addr = eth_validate_addr,
2694 .ndo_set_mac_address = eth_mac_addr,
2695 .ndo_change_mtu = eth_change_mtu,
2698 static const struct net_device_ops sh_eth_netdev_ops_tsu = {
2699 .ndo_open = sh_eth_open,
2700 .ndo_stop = sh_eth_close,
2701 .ndo_start_xmit = sh_eth_start_xmit,
2702 .ndo_get_stats = sh_eth_get_stats,
2703 .ndo_set_rx_mode = sh_eth_set_multicast_list,
2704 .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid,
2705 .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid,
2706 .ndo_tx_timeout = sh_eth_tx_timeout,
2707 .ndo_do_ioctl = sh_eth_do_ioctl,
2708 .ndo_validate_addr = eth_validate_addr,
2709 .ndo_set_mac_address = eth_mac_addr,
2710 .ndo_change_mtu = eth_change_mtu,
2713 static int sh_eth_drv_probe(struct platform_device *pdev)
2716 struct resource *res;
2717 struct net_device *ndev = NULL;
2718 struct sh_eth_private *mdp = NULL;
2719 struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev);
2720 const struct platform_device_id *id = platform_get_device_id(pdev);
2723 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2724 if (unlikely(res == NULL)) {
2725 dev_err(&pdev->dev, "invalid resource\n");
2730 ndev = alloc_etherdev(sizeof(struct sh_eth_private));
2736 /* The sh Ether-specific entries in the device structure. */
2737 ndev->base_addr = res->start;
2743 ret = platform_get_irq(pdev, 0);
2750 SET_NETDEV_DEV(ndev, &pdev->dev);
2752 mdp = netdev_priv(ndev);
2753 mdp->num_tx_ring = TX_RING_SIZE;
2754 mdp->num_rx_ring = RX_RING_SIZE;
2755 mdp->addr = devm_ioremap_resource(&pdev->dev, res);
2756 if (IS_ERR(mdp->addr)) {
2757 ret = PTR_ERR(mdp->addr);
2761 spin_lock_init(&mdp->lock);
2763 pm_runtime_enable(&pdev->dev);
2764 pm_runtime_resume(&pdev->dev);
2767 dev_err(&pdev->dev, "no platform data\n");
2773 mdp->phy_id = pd->phy;
2774 mdp->phy_interface = pd->phy_interface;
2776 mdp->edmac_endian = pd->edmac_endian;
2777 mdp->no_ether_link = pd->no_ether_link;
2778 mdp->ether_link_active_low = pd->ether_link_active_low;
2781 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
2782 mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
2783 sh_eth_set_default_cpu_data(mdp->cd);
2787 ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
2789 ndev->netdev_ops = &sh_eth_netdev_ops;
2790 SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
2791 ndev->watchdog_timeo = TX_TIMEOUT;
2793 /* debug message level */
2794 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
2796 /* read and set MAC address */
2797 read_mac_address(ndev, pd->mac_addr);
2798 if (!is_valid_ether_addr(ndev->dev_addr)) {
2799 dev_warn(&pdev->dev,
2800 "no valid MAC address supplied, using a random one.\n");
2801 eth_hw_addr_random(ndev);
2804 /* ioremap the TSU registers */
2806 struct resource *rtsu;
2807 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2808 mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
2809 if (IS_ERR(mdp->tsu_addr)) {
2810 ret = PTR_ERR(mdp->tsu_addr);
2813 mdp->port = devno % 2;
2814 ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
2817 /* initialize first or needed device */
2818 if (!devno || pd->needs_init) {
2819 if (mdp->cd->chip_reset)
2820 mdp->cd->chip_reset(ndev);
2823 /* TSU init (Init only)*/
2824 sh_eth_tsu_init(mdp);
2828 netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
2830 /* network device register */
2831 ret = register_netdev(ndev);
2836 ret = sh_mdio_init(ndev, pdev->id, pd);
2838 goto out_unregister;
2840 /* print device information */
2841 pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
2842 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
2844 platform_set_drvdata(pdev, ndev);
2849 unregister_netdev(ndev);
2852 netif_napi_del(&mdp->napi);
2863 static int sh_eth_drv_remove(struct platform_device *pdev)
2865 struct net_device *ndev = platform_get_drvdata(pdev);
2866 struct sh_eth_private *mdp = netdev_priv(ndev);
2868 sh_mdio_release(ndev);
2869 unregister_netdev(ndev);
2870 netif_napi_del(&mdp->napi);
2871 pm_runtime_disable(&pdev->dev);
2878 static int sh_eth_runtime_nop(struct device *dev)
2880 /* Runtime PM callback shared between ->runtime_suspend()
2881 * and ->runtime_resume(). Simply returns success.
2883 * This driver re-initializes all registers after
2884 * pm_runtime_get_sync() anyway so there is no need
2885 * to save and restore registers here.
2890 static const struct dev_pm_ops sh_eth_dev_pm_ops = {
2891 .runtime_suspend = sh_eth_runtime_nop,
2892 .runtime_resume = sh_eth_runtime_nop,
2894 #define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
2896 #define SH_ETH_PM_OPS NULL
2899 static struct platform_device_id sh_eth_id_table[] = {
2900 { "sh7619-ether", (kernel_ulong_t)&sh7619_data },
2901 { "sh771x-ether", (kernel_ulong_t)&sh771x_data },
2902 { "sh7724-ether", (kernel_ulong_t)&sh7724_data },
2903 { "sh7734-gether", (kernel_ulong_t)&sh7734_data },
2904 { "sh7757-ether", (kernel_ulong_t)&sh7757_data },
2905 { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
2906 { "sh7763-gether", (kernel_ulong_t)&sh7763_data },
2907 { "r7s72100-ether", (kernel_ulong_t)&r7s72100_data },
2908 { "r8a7740-gether", (kernel_ulong_t)&r8a7740_data },
2909 { "r8a777x-ether", (kernel_ulong_t)&r8a777x_data },
2910 { "r8a7790-ether", (kernel_ulong_t)&r8a779x_data },
2911 { "r8a7791-ether", (kernel_ulong_t)&r8a779x_data },
2914 MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
2916 static struct platform_driver sh_eth_driver = {
2917 .probe = sh_eth_drv_probe,
2918 .remove = sh_eth_drv_remove,
2919 .id_table = sh_eth_id_table,
2922 .pm = SH_ETH_PM_OPS,
2926 module_platform_driver(sh_eth_driver);
2928 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
2929 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
2930 MODULE_LICENSE("GPL v2");