1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018 MediaTek Inc.
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
6 * Author: Mark Lee <mark-mc.lee@mediatek.com>
20 #include <asm/cache.h>
23 #include <dm/device_compat.h>
24 #include <linux/delay.h>
25 #include <linux/err.h>
26 #include <linux/ioport.h>
27 #include <linux/mdio.h>
28 #include <linux/mii.h>
32 #define NUM_TX_DESC 24
33 #define NUM_RX_DESC 24
34 #define TX_TOTAL_BUF_SIZE (NUM_TX_DESC * PKTSIZE_ALIGN)
35 #define RX_TOTAL_BUF_SIZE (NUM_RX_DESC * PKTSIZE_ALIGN)
36 #define TOTAL_PKT_BUF_SIZE (TX_TOTAL_BUF_SIZE + RX_TOTAL_BUF_SIZE)
38 #define MT753X_NUM_PHYS 5
39 #define MT753X_NUM_PORTS 7
40 #define MT753X_DFL_SMI_ADDR 31
41 #define MT753X_SMI_ADDR_MASK 0x1f
43 #define MT753X_PHY_ADDR(base, addr) \
44 (((base) + (addr)) & 0x1f)
46 #define GDMA_FWD_TO_CPU \
52 (DP_PDMA << MYMAC_DP_S) | \
53 (DP_PDMA << BC_DP_S) | \
54 (DP_PDMA << MC_DP_S) | \
57 #define GDMA_FWD_DISCARD \
63 (DP_DISCARD << MYMAC_DP_S) | \
64 (DP_DISCARD << BC_DP_S) | \
65 (DP_DISCARD << MC_DP_S) | \
66 (DP_DISCARD << UN_DP_S))
74 /* struct mtk_soc_data - This is the structure holding all differences
75 * among various plaforms
76 * @caps Flags shown the extra capability for the SoC
77 * @ana_rgc3: The offset for register ANA_RGC3 related to
79 * @pdma_base: Register base of PDMA block
80 * @txd_size: Tx DMA descriptor size.
81 * @rxd_size: Rx DMA descriptor size.
92 char pkt_pool[TOTAL_PKT_BUF_SIZE] __aligned(ARCH_DMA_MINALIGN);
97 int rx_dma_owner_idx0;
98 int tx_cpu_owner_idx0;
100 void __iomem *fe_base;
101 void __iomem *gmac_base;
102 void __iomem *sgmii_base;
104 struct regmap *ethsys_regmap;
106 struct mii_dev *mdio_bus;
107 int (*mii_read)(struct mtk_eth_priv *priv, u8 phy, u8 reg);
108 int (*mii_write)(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 val);
109 int (*mmd_read)(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg);
110 int (*mmd_write)(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg,
113 const struct mtk_soc_data *soc;
119 struct phy_device *phydev;
124 int (*switch_init)(struct mtk_eth_priv *priv);
128 struct gpio_desc rst_gpio;
131 struct reset_ctl rst_fe;
132 struct reset_ctl rst_mcm;
135 static void mtk_pdma_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
137 writel(val, priv->fe_base + priv->soc->pdma_base + reg);
140 static void mtk_pdma_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
143 clrsetbits_le32(priv->fe_base + priv->soc->pdma_base + reg, clr, set);
146 static void mtk_gdma_write(struct mtk_eth_priv *priv, int no, u32 reg,
152 gdma_base = GDMA2_BASE;
154 gdma_base = GDMA1_BASE;
156 writel(val, priv->fe_base + gdma_base + reg);
159 static u32 mtk_gmac_read(struct mtk_eth_priv *priv, u32 reg)
161 return readl(priv->gmac_base + reg);
164 static void mtk_gmac_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
166 writel(val, priv->gmac_base + reg);
169 static void mtk_gmac_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr, u32 set)
171 clrsetbits_le32(priv->gmac_base + reg, clr, set);
174 static void mtk_ethsys_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
179 regmap_read(priv->ethsys_regmap, reg, &val);
182 regmap_write(priv->ethsys_regmap, reg, val);
185 /* Direct MDIO clause 22/45 access via SoC */
186 static int mtk_mii_rw(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 data,
192 val = (st << MDIO_ST_S) |
193 ((cmd << MDIO_CMD_S) & MDIO_CMD_M) |
194 (((u32)phy << MDIO_PHY_ADDR_S) & MDIO_PHY_ADDR_M) |
195 (((u32)reg << MDIO_REG_ADDR_S) & MDIO_REG_ADDR_M);
197 if (cmd == MDIO_CMD_WRITE)
198 val |= data & MDIO_RW_DATA_M;
200 mtk_gmac_write(priv, GMAC_PIAC_REG, val | PHY_ACS_ST);
202 ret = wait_for_bit_le32(priv->gmac_base + GMAC_PIAC_REG,
203 PHY_ACS_ST, 0, 5000, 0);
205 pr_warn("MDIO access timeout\n");
209 if (cmd == MDIO_CMD_READ) {
210 val = mtk_gmac_read(priv, GMAC_PIAC_REG);
211 return val & MDIO_RW_DATA_M;
217 /* Direct MDIO clause 22 read via SoC */
218 static int mtk_mii_read(struct mtk_eth_priv *priv, u8 phy, u8 reg)
220 return mtk_mii_rw(priv, phy, reg, 0, MDIO_CMD_READ, MDIO_ST_C22);
223 /* Direct MDIO clause 22 write via SoC */
224 static int mtk_mii_write(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 data)
226 return mtk_mii_rw(priv, phy, reg, data, MDIO_CMD_WRITE, MDIO_ST_C22);
229 /* Direct MDIO clause 45 read via SoC */
230 static int mtk_mmd_read(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg)
234 ret = mtk_mii_rw(priv, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
238 return mtk_mii_rw(priv, addr, devad, 0, MDIO_CMD_READ_C45,
242 /* Direct MDIO clause 45 write via SoC */
243 static int mtk_mmd_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
248 ret = mtk_mii_rw(priv, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
252 return mtk_mii_rw(priv, addr, devad, val, MDIO_CMD_WRITE,
256 /* Indirect MDIO clause 45 read via MII registers */
257 static int mtk_mmd_ind_read(struct mtk_eth_priv *priv, u8 addr, u8 devad,
262 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
263 (MMD_ADDR << MMD_CMD_S) |
264 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
268 ret = priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, reg);
272 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
273 (MMD_DATA << MMD_CMD_S) |
274 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
278 return priv->mii_read(priv, addr, MII_MMD_ADDR_DATA_REG);
281 /* Indirect MDIO clause 45 write via MII registers */
282 static int mtk_mmd_ind_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
287 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
288 (MMD_ADDR << MMD_CMD_S) |
289 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
293 ret = priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, reg);
297 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
298 (MMD_DATA << MMD_CMD_S) |
299 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
303 return priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, val);
307 * MT7530 Internal Register Address Bits
308 * -------------------------------------------------------------------
309 * | 15 14 13 12 11 10 9 8 7 6 | 5 4 3 2 | 1 0 |
310 * |----------------------------------------|---------------|--------|
311 * | Page Address | Reg Address | Unused |
312 * -------------------------------------------------------------------
315 static int mt753x_reg_read(struct mtk_eth_priv *priv, u32 reg, u32 *data)
317 int ret, low_word, high_word;
319 /* Write page address */
320 ret = mtk_mii_write(priv, priv->mt753x_smi_addr, 0x1f, reg >> 6);
325 low_word = mtk_mii_read(priv, priv->mt753x_smi_addr, (reg >> 2) & 0xf);
330 high_word = mtk_mii_read(priv, priv->mt753x_smi_addr, 0x10);
335 *data = ((u32)high_word << 16) | (low_word & 0xffff);
340 static int mt753x_reg_write(struct mtk_eth_priv *priv, u32 reg, u32 data)
344 /* Write page address */
345 ret = mtk_mii_write(priv, priv->mt753x_smi_addr, 0x1f, reg >> 6);
350 ret = mtk_mii_write(priv, priv->mt753x_smi_addr, (reg >> 2) & 0xf,
355 /* Write high word */
356 return mtk_mii_write(priv, priv->mt753x_smi_addr, 0x10, data >> 16);
359 static void mt753x_reg_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
364 mt753x_reg_read(priv, reg, &val);
367 mt753x_reg_write(priv, reg, val);
370 /* Indirect MDIO clause 22/45 access */
371 static int mt7531_mii_rw(struct mtk_eth_priv *priv, int phy, int reg, u16 data,
378 val = (st << MDIO_ST_S) |
379 ((cmd << MDIO_CMD_S) & MDIO_CMD_M) |
380 ((phy << MDIO_PHY_ADDR_S) & MDIO_PHY_ADDR_M) |
381 ((reg << MDIO_REG_ADDR_S) & MDIO_REG_ADDR_M);
383 if (cmd == MDIO_CMD_WRITE || cmd == MDIO_CMD_ADDR)
384 val |= data & MDIO_RW_DATA_M;
386 mt753x_reg_write(priv, MT7531_PHY_IAC, val | PHY_ACS_ST);
389 timeout = get_timer(0);
391 mt753x_reg_read(priv, MT7531_PHY_IAC, &val);
393 if ((val & PHY_ACS_ST) == 0)
396 if (get_timer(timeout) > timeout_ms)
400 if (cmd == MDIO_CMD_READ || cmd == MDIO_CMD_READ_C45) {
401 mt753x_reg_read(priv, MT7531_PHY_IAC, &val);
402 ret = val & MDIO_RW_DATA_M;
408 static int mt7531_mii_ind_read(struct mtk_eth_priv *priv, u8 phy, u8 reg)
412 if (phy >= MT753X_NUM_PHYS)
415 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, phy);
417 return mt7531_mii_rw(priv, phy_addr, reg, 0, MDIO_CMD_READ,
421 static int mt7531_mii_ind_write(struct mtk_eth_priv *priv, u8 phy, u8 reg,
426 if (phy >= MT753X_NUM_PHYS)
429 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, phy);
431 return mt7531_mii_rw(priv, phy_addr, reg, val, MDIO_CMD_WRITE,
435 int mt7531_mmd_ind_read(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg)
440 if (addr >= MT753X_NUM_PHYS)
443 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, addr);
445 ret = mt7531_mii_rw(priv, phy_addr, devad, reg, MDIO_CMD_ADDR,
450 return mt7531_mii_rw(priv, phy_addr, devad, 0, MDIO_CMD_READ_C45,
454 static int mt7531_mmd_ind_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
460 if (addr >= MT753X_NUM_PHYS)
463 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, addr);
465 ret = mt7531_mii_rw(priv, phy_addr, devad, reg, MDIO_CMD_ADDR,
470 return mt7531_mii_rw(priv, phy_addr, devad, val, MDIO_CMD_WRITE,
474 static int mtk_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
476 struct mtk_eth_priv *priv = bus->priv;
479 return priv->mii_read(priv, addr, reg);
481 return priv->mmd_read(priv, addr, devad, reg);
484 static int mtk_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
487 struct mtk_eth_priv *priv = bus->priv;
490 return priv->mii_write(priv, addr, reg, val);
492 return priv->mmd_write(priv, addr, devad, reg, val);
495 static int mtk_mdio_register(struct udevice *dev)
497 struct mtk_eth_priv *priv = dev_get_priv(dev);
498 struct mii_dev *mdio_bus = mdio_alloc();
504 /* Assign MDIO access APIs according to the switch/phy */
507 priv->mii_read = mtk_mii_read;
508 priv->mii_write = mtk_mii_write;
509 priv->mmd_read = mtk_mmd_ind_read;
510 priv->mmd_write = mtk_mmd_ind_write;
513 priv->mii_read = mt7531_mii_ind_read;
514 priv->mii_write = mt7531_mii_ind_write;
515 priv->mmd_read = mt7531_mmd_ind_read;
516 priv->mmd_write = mt7531_mmd_ind_write;
519 priv->mii_read = mtk_mii_read;
520 priv->mii_write = mtk_mii_write;
521 priv->mmd_read = mtk_mmd_read;
522 priv->mmd_write = mtk_mmd_write;
525 mdio_bus->read = mtk_mdio_read;
526 mdio_bus->write = mtk_mdio_write;
527 snprintf(mdio_bus->name, sizeof(mdio_bus->name), dev->name);
529 mdio_bus->priv = (void *)priv;
531 ret = mdio_register(mdio_bus);
536 priv->mdio_bus = mdio_bus;
541 static int mt753x_core_reg_read(struct mtk_eth_priv *priv, u32 reg)
543 u8 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, 0);
545 return priv->mmd_read(priv, phy_addr, 0x1f, reg);
548 static void mt753x_core_reg_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
550 u8 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, 0);
552 priv->mmd_write(priv, phy_addr, 0x1f, reg, val);
555 static int mt7530_pad_clk_setup(struct mtk_eth_priv *priv, int mode)
557 u32 ncpo1, ssc_delta;
560 case PHY_INTERFACE_MODE_RGMII:
565 printf("error: xMII mode %d not supported\n", mode);
569 /* Disable MT7530 core clock */
570 mt753x_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG, 0);
572 /* Disable MT7530 PLL */
573 mt753x_core_reg_write(priv, CORE_GSWPLL_GRP1,
574 (2 << RG_GSWPLL_POSDIV_200M_S) |
575 (32 << RG_GSWPLL_FBKDIV_200M_S));
577 /* For MT7530 core clock = 500Mhz */
578 mt753x_core_reg_write(priv, CORE_GSWPLL_GRP2,
579 (1 << RG_GSWPLL_POSDIV_500M_S) |
580 (25 << RG_GSWPLL_FBKDIV_500M_S));
582 /* Enable MT7530 PLL */
583 mt753x_core_reg_write(priv, CORE_GSWPLL_GRP1,
584 (2 << RG_GSWPLL_POSDIV_200M_S) |
585 (32 << RG_GSWPLL_FBKDIV_200M_S) |
590 mt753x_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
592 /* Setup the MT7530 TRGMII Tx Clock */
593 mt753x_core_reg_write(priv, CORE_PLL_GROUP5, ncpo1);
594 mt753x_core_reg_write(priv, CORE_PLL_GROUP6, 0);
595 mt753x_core_reg_write(priv, CORE_PLL_GROUP10, ssc_delta);
596 mt753x_core_reg_write(priv, CORE_PLL_GROUP11, ssc_delta);
597 mt753x_core_reg_write(priv, CORE_PLL_GROUP4, RG_SYSPLL_DDSFBK_EN |
598 RG_SYSPLL_BIAS_EN | RG_SYSPLL_BIAS_LPF_EN);
600 mt753x_core_reg_write(priv, CORE_PLL_GROUP2,
601 RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
602 (1 << RG_SYSPLL_POSDIV_S));
604 mt753x_core_reg_write(priv, CORE_PLL_GROUP7,
605 RG_LCDDS_PCW_NCPO_CHG | (3 << RG_LCCDS_C_S) |
606 RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
608 /* Enable MT7530 core clock */
609 mt753x_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG,
610 REG_GSWCK_EN | REG_TRGMIICK_EN);
615 static int mt7530_setup(struct mtk_eth_priv *priv)
617 u16 phy_addr, phy_val;
621 if (!MTK_HAS_CAPS(priv->soc->caps, MTK_TRGMII_MT7621_CLK)) {
622 /* Select 250MHz clk for RGMII mode */
623 mtk_ethsys_rmw(priv, ETHSYS_CLKCFG0_REG,
624 ETHSYS_TRGMII_CLK_SEL362_5, 0);
631 /* Modify HWTRAP first to allow direct access to internal PHYs */
632 mt753x_reg_read(priv, HWTRAP_REG, &val);
635 mt753x_reg_write(priv, MHWTRAP_REG, val);
637 /* Calculate the phy base address */
638 val = ((val & SMI_ADDR_M) >> SMI_ADDR_S) << 3;
639 priv->mt753x_phy_base = (val | 0x7) + 1;
642 for (i = 0; i < MT753X_NUM_PHYS; i++) {
643 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
644 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
645 phy_val |= BMCR_PDOWN;
646 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
649 /* Force MAC link down before reset */
650 mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE);
651 mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE);
654 mt753x_reg_write(priv, SYS_CTRL_REG, SW_SYS_RST | SW_REG_RST);
657 val = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
658 MAC_MODE | FORCE_MODE |
659 MAC_TX_EN | MAC_RX_EN |
660 BKOFF_EN | BACKPR_EN |
661 (SPEED_1000M << FORCE_SPD_S) |
662 FORCE_DPX | FORCE_LINK;
664 /* MT7530 Port6: Forced 1000M/FD, FC disabled */
665 mt753x_reg_write(priv, PMCR_REG(6), val);
667 /* MT7530 Port5: Forced link down */
668 mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE);
670 /* MT7530 Port6: Set to RGMII */
671 mt753x_reg_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_M, P6_INTF_MODE_RGMII);
673 /* Hardware Trap: Enable Port6, Disable Port5 */
674 mt753x_reg_read(priv, HWTRAP_REG, &val);
675 val |= CHG_TRAP | LOOPDET_DIS | P5_INTF_DIS |
676 (P5_INTF_SEL_GMAC5 << P5_INTF_SEL_S) |
677 (P5_INTF_MODE_RGMII << P5_INTF_MODE_S);
678 val &= ~(C_MDIO_BPS | P6_INTF_DIS);
679 mt753x_reg_write(priv, MHWTRAP_REG, val);
681 /* Setup switch core pll */
682 mt7530_pad_clk_setup(priv, priv->phy_interface);
684 /* Lower Tx Driving for TRGMII path */
685 for (i = 0 ; i < NUM_TRGMII_CTRL ; i++)
686 mt753x_reg_write(priv, MT7530_TRGMII_TD_ODT(i),
687 (txdrv << TD_DM_DRVP_S) |
688 (txdrv << TD_DM_DRVN_S));
690 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
691 mt753x_reg_rmw(priv, MT7530_TRGMII_RD(i), RD_TAP_M, 16);
694 for (i = 0; i < MT753X_NUM_PHYS; i++) {
695 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
696 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
697 phy_val &= ~BMCR_PDOWN;
698 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
704 static void mt7531_core_pll_setup(struct mtk_eth_priv *priv, int mcm)
706 /* Step 1 : Disable MT7531 COREPLL */
707 mt753x_reg_rmw(priv, MT7531_PLLGP_EN, EN_COREPLL, 0);
709 /* Step 2: switch to XTAL output */
710 mt753x_reg_rmw(priv, MT7531_PLLGP_EN, SW_CLKSW, SW_CLKSW);
712 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_EN, 0);
714 /* Step 3: disable PLLGP and enable program PLLGP */
715 mt753x_reg_rmw(priv, MT7531_PLLGP_EN, SW_PLLGP, SW_PLLGP);
717 /* Step 4: program COREPLL output frequency to 500MHz */
718 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_POSDIV_M,
719 2 << RG_COREPLL_POSDIV_S);
722 /* Currently, support XTAL 25Mhz only */
723 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_SDM_PCW_M,
724 0x140000 << RG_COREPLL_SDM_PCW_S);
726 /* Set feedback divide ratio update signal to high */
727 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_SDM_PCW_CHG,
728 RG_COREPLL_SDM_PCW_CHG);
730 /* Wait for at least 16 XTAL clocks */
733 /* Step 5: set feedback divide ratio update signal to low */
734 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_SDM_PCW_CHG, 0);
736 /* add enable 325M clock for SGMII */
737 mt753x_reg_write(priv, MT7531_ANA_PLLGP_CR5, 0xad0000);
739 /* add enable 250SSC clock for RGMII */
740 mt753x_reg_write(priv, MT7531_ANA_PLLGP_CR2, 0x4f40000);
742 /*Step 6: Enable MT7531 PLL */
743 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_EN, RG_COREPLL_EN);
745 mt753x_reg_rmw(priv, MT7531_PLLGP_EN, EN_COREPLL, EN_COREPLL);
750 static int mt7531_port_sgmii_init(struct mtk_eth_priv *priv,
753 if (port != 5 && port != 6) {
754 printf("mt7531: port %d is not a SGMII port\n", port);
758 /* Set SGMII GEN2 speed(2.5G) */
759 mt753x_reg_rmw(priv, MT7531_PHYA_CTRL_SIGNAL3(port),
760 SGMSYS_SPEED_2500, SGMSYS_SPEED_2500);
762 /* Disable SGMII AN */
763 mt753x_reg_rmw(priv, MT7531_PCS_CONTROL_1(port),
766 /* SGMII force mode setting */
767 mt753x_reg_write(priv, MT7531_SGMII_MODE(port), SGMII_FORCE_MODE);
769 /* Release PHYA power down state */
770 mt753x_reg_rmw(priv, MT7531_QPHY_PWR_STATE_CTRL(port),
776 static int mt7531_port_rgmii_init(struct mtk_eth_priv *priv, u32 port)
781 printf("error: RGMII mode is not available for port %d\n",
786 mt753x_reg_read(priv, MT7531_CLKGEN_CTRL, &val);
789 val |= GP_MODE_RGMII << GP_MODE_S;
790 val |= TXCLK_NO_REVERSE;
791 val |= RXCLK_NO_DELAY;
792 val &= ~CLK_SKEW_IN_M;
793 val |= CLK_SKEW_IN_NO_CHANGE << CLK_SKEW_IN_S;
794 val &= ~CLK_SKEW_OUT_M;
795 val |= CLK_SKEW_OUT_NO_CHANGE << CLK_SKEW_OUT_S;
796 mt753x_reg_write(priv, MT7531_CLKGEN_CTRL, val);
801 static void mt7531_phy_setting(struct mtk_eth_priv *priv)
806 for (i = 0; i < MT753X_NUM_PHYS; i++) {
807 /* Enable HW auto downshift */
808 priv->mii_write(priv, i, 0x1f, 0x1);
809 val = priv->mii_read(priv, i, PHY_EXT_REG_14);
810 val |= PHY_EN_DOWN_SHFIT;
811 priv->mii_write(priv, i, PHY_EXT_REG_14, val);
813 /* PHY link down power saving enable */
814 val = priv->mii_read(priv, i, PHY_EXT_REG_17);
815 val |= PHY_LINKDOWN_POWER_SAVING_EN;
816 priv->mii_write(priv, i, PHY_EXT_REG_17, val);
818 val = priv->mmd_read(priv, i, 0x1e, PHY_DEV1E_REG_0C6);
819 val &= ~PHY_POWER_SAVING_M;
820 val |= PHY_POWER_SAVING_TX << PHY_POWER_SAVING_S;
821 priv->mmd_write(priv, i, 0x1e, PHY_DEV1E_REG_0C6, val);
825 static int mt7531_setup(struct mtk_eth_priv *priv)
827 u16 phy_addr, phy_val;
833 priv->mt753x_phy_base = (priv->mt753x_smi_addr + 1) &
834 MT753X_SMI_ADDR_MASK;
837 for (i = 0; i < MT753X_NUM_PHYS; i++) {
838 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
839 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
840 phy_val |= BMCR_PDOWN;
841 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
844 /* Force MAC link down before reset */
845 mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE_LNK);
846 mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE_LNK);
848 /* Switch soft reset */
849 mt753x_reg_write(priv, SYS_CTRL_REG, SW_SYS_RST | SW_REG_RST);
852 /* Enable MDC input Schmitt Trigger */
853 mt753x_reg_rmw(priv, MT7531_SMT0_IOLB, SMT_IOLB_5_SMI_MDC_EN,
854 SMT_IOLB_5_SMI_MDC_EN);
856 mt7531_core_pll_setup(priv, priv->mcm);
858 mt753x_reg_read(priv, MT7531_TOP_SIG_SR, &val);
859 port5_sgmii = !!(val & PAD_DUAL_SGMII_EN);
861 /* port5 support either RGMII or SGMII, port6 only support SGMII. */
862 switch (priv->phy_interface) {
863 case PHY_INTERFACE_MODE_RGMII:
865 mt7531_port_rgmii_init(priv, 5);
867 case PHY_INTERFACE_MODE_SGMII:
868 mt7531_port_sgmii_init(priv, 6);
870 mt7531_port_sgmii_init(priv, 5);
876 pmcr = MT7531_FORCE_MODE |
877 (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
878 MAC_MODE | MAC_TX_EN | MAC_RX_EN |
879 BKOFF_EN | BACKPR_EN |
880 FORCE_RX_FC | FORCE_TX_FC |
881 (SPEED_1000M << FORCE_SPD_S) | FORCE_DPX |
884 mt753x_reg_write(priv, PMCR_REG(5), pmcr);
885 mt753x_reg_write(priv, PMCR_REG(6), pmcr);
888 for (i = 0; i < MT753X_NUM_PHYS; i++) {
889 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
890 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
891 phy_val &= ~BMCR_PDOWN;
892 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
895 mt7531_phy_setting(priv);
897 /* Enable Internal PHYs */
898 val = mt753x_core_reg_read(priv, CORE_PLL_GROUP4);
899 val |= MT7531_BYPASS_MODE;
900 val &= ~MT7531_POWER_ON_OFF;
901 mt753x_core_reg_write(priv, CORE_PLL_GROUP4, val);
906 int mt753x_switch_init(struct mtk_eth_priv *priv)
911 /* Global reset switch */
913 reset_assert(&priv->rst_mcm);
915 reset_deassert(&priv->rst_mcm);
917 } else if (dm_gpio_is_valid(&priv->rst_gpio)) {
918 dm_gpio_set_value(&priv->rst_gpio, 0);
920 dm_gpio_set_value(&priv->rst_gpio, 1);
924 ret = priv->switch_init(priv);
928 /* Set port isolation */
929 for (i = 0; i < MT753X_NUM_PORTS; i++) {
930 /* Set port matrix mode */
932 mt753x_reg_write(priv, PCR_REG(i),
933 (0x40 << PORT_MATRIX_S));
935 mt753x_reg_write(priv, PCR_REG(i),
936 (0x3f << PORT_MATRIX_S));
938 /* Set port mode to user port */
939 mt753x_reg_write(priv, PVC_REG(i),
940 (0x8100 << STAG_VPID_S) |
941 (VLAN_ATTR_USER << VLAN_ATTR_S));
947 static void mtk_phy_link_adjust(struct mtk_eth_priv *priv)
949 u16 lcl_adv = 0, rmt_adv = 0;
953 mcr = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
954 (MAC_RX_PKT_LEN_1536 << MAC_RX_PKT_LEN_S) |
955 MAC_MODE | FORCE_MODE |
956 MAC_TX_EN | MAC_RX_EN |
957 BKOFF_EN | BACKPR_EN;
959 switch (priv->phydev->speed) {
961 mcr |= (SPEED_10M << FORCE_SPD_S);
964 mcr |= (SPEED_100M << FORCE_SPD_S);
967 mcr |= (SPEED_1000M << FORCE_SPD_S);
971 if (priv->phydev->link)
974 if (priv->phydev->duplex) {
977 if (priv->phydev->pause)
978 rmt_adv = LPA_PAUSE_CAP;
979 if (priv->phydev->asym_pause)
980 rmt_adv |= LPA_PAUSE_ASYM;
982 if (priv->phydev->advertising & ADVERTISED_Pause)
983 lcl_adv |= ADVERTISE_PAUSE_CAP;
984 if (priv->phydev->advertising & ADVERTISED_Asym_Pause)
985 lcl_adv |= ADVERTISE_PAUSE_ASYM;
987 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
989 if (flowctrl & FLOW_CTRL_TX)
991 if (flowctrl & FLOW_CTRL_RX)
994 debug("rx pause %s, tx pause %s\n",
995 flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
996 flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
999 mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), mcr);
1002 static int mtk_phy_start(struct mtk_eth_priv *priv)
1004 struct phy_device *phydev = priv->phydev;
1007 ret = phy_startup(phydev);
1010 debug("Could not initialize PHY %s\n", phydev->dev->name);
1014 if (!phydev->link) {
1015 debug("%s: link down.\n", phydev->dev->name);
1019 mtk_phy_link_adjust(priv);
1021 debug("Speed: %d, %s duplex%s\n", phydev->speed,
1022 (phydev->duplex) ? "full" : "half",
1023 (phydev->port == PORT_FIBRE) ? ", fiber mode" : "");
1028 static int mtk_phy_probe(struct udevice *dev)
1030 struct mtk_eth_priv *priv = dev_get_priv(dev);
1031 struct phy_device *phydev;
1033 phydev = phy_connect(priv->mdio_bus, priv->phy_addr, dev,
1034 priv->phy_interface);
1038 phydev->supported &= PHY_GBIT_FEATURES;
1039 phydev->advertising = phydev->supported;
1041 priv->phydev = phydev;
1047 static void mtk_sgmii_init(struct mtk_eth_priv *priv)
1049 /* Set SGMII GEN2 speed(2.5G) */
1050 setbits_le32(priv->sgmii_base + priv->soc->ana_rgc3,
1053 /* Disable SGMII AN */
1054 clrsetbits_le32(priv->sgmii_base + SGMSYS_PCS_CONTROL_1,
1055 SGMII_AN_ENABLE, 0);
1057 /* SGMII force mode setting */
1058 writel(SGMII_FORCE_MODE, priv->sgmii_base + SGMSYS_SGMII_MODE);
1060 /* Release PHYA power down state */
1061 clrsetbits_le32(priv->sgmii_base + SGMSYS_QPHY_PWR_STATE_CTRL,
1065 static void mtk_mac_init(struct mtk_eth_priv *priv)
1070 switch (priv->phy_interface) {
1071 case PHY_INTERFACE_MODE_RGMII_RXID:
1072 case PHY_INTERFACE_MODE_RGMII:
1073 ge_mode = GE_MODE_RGMII;
1075 case PHY_INTERFACE_MODE_SGMII:
1076 ge_mode = GE_MODE_RGMII;
1077 mtk_ethsys_rmw(priv, ETHSYS_SYSCFG0_REG, SYSCFG0_SGMII_SEL_M,
1078 SYSCFG0_SGMII_SEL(priv->gmac_id));
1079 mtk_sgmii_init(priv);
1081 case PHY_INTERFACE_MODE_MII:
1082 case PHY_INTERFACE_MODE_GMII:
1083 ge_mode = GE_MODE_MII;
1085 case PHY_INTERFACE_MODE_RMII:
1086 ge_mode = GE_MODE_RMII;
1092 /* set the gmac to the right mode */
1093 mtk_ethsys_rmw(priv, ETHSYS_SYSCFG0_REG,
1094 SYSCFG0_GE_MODE_M << SYSCFG0_GE_MODE_S(priv->gmac_id),
1095 ge_mode << SYSCFG0_GE_MODE_S(priv->gmac_id));
1097 if (priv->force_mode) {
1098 mcr = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
1099 (MAC_RX_PKT_LEN_1536 << MAC_RX_PKT_LEN_S) |
1100 MAC_MODE | FORCE_MODE |
1101 MAC_TX_EN | MAC_RX_EN |
1102 BKOFF_EN | BACKPR_EN |
1105 switch (priv->speed) {
1107 mcr |= SPEED_10M << FORCE_SPD_S;
1110 mcr |= SPEED_100M << FORCE_SPD_S;
1113 mcr |= SPEED_1000M << FORCE_SPD_S;
1120 mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), mcr);
1123 if (MTK_HAS_CAPS(priv->soc->caps, MTK_GMAC1_TRGMII) &&
1124 !MTK_HAS_CAPS(priv->soc->caps, MTK_TRGMII_MT7621_CLK)) {
1125 /* Lower Tx Driving for TRGMII path */
1126 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
1127 mtk_gmac_write(priv, GMAC_TRGMII_TD_ODT(i),
1128 (8 << TD_DM_DRVP_S) |
1129 (8 << TD_DM_DRVN_S));
1131 mtk_gmac_rmw(priv, GMAC_TRGMII_RCK_CTRL, 0,
1132 RX_RST | RXC_DQSISEL);
1133 mtk_gmac_rmw(priv, GMAC_TRGMII_RCK_CTRL, RX_RST, 0);
1137 static void mtk_eth_fifo_init(struct mtk_eth_priv *priv)
1139 char *pkt_base = priv->pkt_pool;
1140 struct mtk_tx_dma_v2 *txd;
1141 struct mtk_rx_dma_v2 *rxd;
1144 mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG, 0xffff0000, 0);
1147 memset(priv->tx_ring_noc, 0, NUM_TX_DESC * priv->soc->txd_size);
1148 memset(priv->rx_ring_noc, 0, NUM_RX_DESC * priv->soc->rxd_size);
1149 memset(priv->pkt_pool, 0xff, TOTAL_PKT_BUF_SIZE);
1151 flush_dcache_range((ulong)pkt_base,
1152 (ulong)(pkt_base + TOTAL_PKT_BUF_SIZE));
1154 priv->rx_dma_owner_idx0 = 0;
1155 priv->tx_cpu_owner_idx0 = 0;
1157 for (i = 0; i < NUM_TX_DESC; i++) {
1158 txd = priv->tx_ring_noc + i * priv->soc->txd_size;
1160 txd->txd1 = virt_to_phys(pkt_base);
1161 txd->txd2 = PDMA_TXD2_DDONE | PDMA_TXD2_LS0;
1163 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2))
1164 txd->txd5 = PDMA_V2_TXD5_FPORT_SET(priv->gmac_id + 1);
1166 txd->txd4 = PDMA_V1_TXD4_FPORT_SET(priv->gmac_id + 1);
1168 pkt_base += PKTSIZE_ALIGN;
1171 for (i = 0; i < NUM_RX_DESC; i++) {
1172 rxd = priv->rx_ring_noc + i * priv->soc->rxd_size;
1174 rxd->rxd1 = virt_to_phys(pkt_base);
1176 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2))
1177 rxd->rxd2 = PDMA_V2_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
1179 rxd->rxd2 = PDMA_V1_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
1181 pkt_base += PKTSIZE_ALIGN;
1184 mtk_pdma_write(priv, TX_BASE_PTR_REG(0),
1185 virt_to_phys(priv->tx_ring_noc));
1186 mtk_pdma_write(priv, TX_MAX_CNT_REG(0), NUM_TX_DESC);
1187 mtk_pdma_write(priv, TX_CTX_IDX_REG(0), priv->tx_cpu_owner_idx0);
1189 mtk_pdma_write(priv, RX_BASE_PTR_REG(0),
1190 virt_to_phys(priv->rx_ring_noc));
1191 mtk_pdma_write(priv, RX_MAX_CNT_REG(0), NUM_RX_DESC);
1192 mtk_pdma_write(priv, RX_CRX_IDX_REG(0), NUM_RX_DESC - 1);
1194 mtk_pdma_write(priv, PDMA_RST_IDX_REG, RST_DTX_IDX0 | RST_DRX_IDX0);
1197 static int mtk_eth_start(struct udevice *dev)
1199 struct mtk_eth_priv *priv = dev_get_priv(dev);
1203 reset_assert(&priv->rst_fe);
1205 reset_deassert(&priv->rst_fe);
1208 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2))
1209 setbits_le32(priv->fe_base + FE_GLO_MISC_REG, PDMA_VER_V2);
1211 /* Packets forward to PDMA */
1212 mtk_gdma_write(priv, priv->gmac_id, GDMA_IG_CTRL_REG, GDMA_FWD_TO_CPU);
1214 if (priv->gmac_id == 0)
1215 mtk_gdma_write(priv, 1, GDMA_IG_CTRL_REG, GDMA_FWD_DISCARD);
1217 mtk_gdma_write(priv, 0, GDMA_IG_CTRL_REG, GDMA_FWD_DISCARD);
1221 mtk_eth_fifo_init(priv);
1224 if (priv->sw == SW_NONE) {
1225 ret = mtk_phy_start(priv);
1230 mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG, 0,
1231 TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
1237 static void mtk_eth_stop(struct udevice *dev)
1239 struct mtk_eth_priv *priv = dev_get_priv(dev);
1241 mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG,
1242 TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN, 0);
1245 wait_for_bit_le32(priv->fe_base + priv->soc->pdma_base + PDMA_GLO_CFG_REG,
1246 RX_DMA_BUSY | TX_DMA_BUSY, 0, 5000, 0);
1249 static int mtk_eth_write_hwaddr(struct udevice *dev)
1251 struct eth_pdata *pdata = dev_get_plat(dev);
1252 struct mtk_eth_priv *priv = dev_get_priv(dev);
1253 unsigned char *mac = pdata->enetaddr;
1254 u32 macaddr_lsb, macaddr_msb;
1256 macaddr_msb = ((u32)mac[0] << 8) | (u32)mac[1];
1257 macaddr_lsb = ((u32)mac[2] << 24) | ((u32)mac[3] << 16) |
1258 ((u32)mac[4] << 8) | (u32)mac[5];
1260 mtk_gdma_write(priv, priv->gmac_id, GDMA_MAC_MSB_REG, macaddr_msb);
1261 mtk_gdma_write(priv, priv->gmac_id, GDMA_MAC_LSB_REG, macaddr_lsb);
1266 static int mtk_eth_send(struct udevice *dev, void *packet, int length)
1268 struct mtk_eth_priv *priv = dev_get_priv(dev);
1269 u32 idx = priv->tx_cpu_owner_idx0;
1270 struct mtk_tx_dma_v2 *txd;
1273 txd = priv->tx_ring_noc + idx * priv->soc->txd_size;
1275 if (!(txd->txd2 & PDMA_TXD2_DDONE)) {
1276 debug("mtk-eth: TX DMA descriptor ring is full\n");
1280 pkt_base = (void *)phys_to_virt(txd->txd1);
1281 memcpy(pkt_base, packet, length);
1282 flush_dcache_range((ulong)pkt_base, (ulong)pkt_base +
1283 roundup(length, ARCH_DMA_MINALIGN));
1285 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2))
1286 txd->txd2 = PDMA_TXD2_LS0 | PDMA_V2_TXD2_SDL0_SET(length);
1288 txd->txd2 = PDMA_TXD2_LS0 | PDMA_V1_TXD2_SDL0_SET(length);
1290 priv->tx_cpu_owner_idx0 = (priv->tx_cpu_owner_idx0 + 1) % NUM_TX_DESC;
1291 mtk_pdma_write(priv, TX_CTX_IDX_REG(0), priv->tx_cpu_owner_idx0);
1296 static int mtk_eth_recv(struct udevice *dev, int flags, uchar **packetp)
1298 struct mtk_eth_priv *priv = dev_get_priv(dev);
1299 u32 idx = priv->rx_dma_owner_idx0;
1300 struct mtk_rx_dma_v2 *rxd;
1304 rxd = priv->rx_ring_noc + idx * priv->soc->rxd_size;
1306 if (!(rxd->rxd2 & PDMA_RXD2_DDONE)) {
1307 debug("mtk-eth: RX DMA descriptor ring is empty\n");
1311 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2))
1312 length = PDMA_V2_RXD2_PLEN0_GET(rxd->rxd2);
1314 length = PDMA_V1_RXD2_PLEN0_GET(rxd->rxd2);
1316 pkt_base = (void *)phys_to_virt(rxd->rxd1);
1317 invalidate_dcache_range((ulong)pkt_base, (ulong)pkt_base +
1318 roundup(length, ARCH_DMA_MINALIGN));
1321 *packetp = pkt_base;
1326 static int mtk_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
1328 struct mtk_eth_priv *priv = dev_get_priv(dev);
1329 u32 idx = priv->rx_dma_owner_idx0;
1330 struct mtk_rx_dma_v2 *rxd;
1332 rxd = priv->rx_ring_noc + idx * priv->soc->rxd_size;
1334 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2))
1335 rxd->rxd2 = PDMA_V2_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
1337 rxd->rxd2 = PDMA_V1_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
1339 mtk_pdma_write(priv, RX_CRX_IDX_REG(0), idx);
1340 priv->rx_dma_owner_idx0 = (priv->rx_dma_owner_idx0 + 1) % NUM_RX_DESC;
1345 static int mtk_eth_probe(struct udevice *dev)
1347 struct eth_pdata *pdata = dev_get_plat(dev);
1348 struct mtk_eth_priv *priv = dev_get_priv(dev);
1349 ulong iobase = pdata->iobase;
1352 /* Frame Engine Register Base */
1353 priv->fe_base = (void *)iobase;
1355 /* GMAC Register Base */
1356 priv->gmac_base = (void *)(iobase + GMAC_BASE);
1359 ret = mtk_mdio_register(dev);
1363 /* Prepare for tx/rx rings */
1364 priv->tx_ring_noc = (void *)
1365 noncached_alloc(priv->soc->txd_size * NUM_TX_DESC,
1367 priv->rx_ring_noc = (void *)
1368 noncached_alloc(priv->soc->rxd_size * NUM_RX_DESC,
1374 /* Probe phy if switch is not specified */
1375 if (priv->sw == SW_NONE)
1376 return mtk_phy_probe(dev);
1378 /* Initialize switch */
1379 return mt753x_switch_init(priv);
1382 static int mtk_eth_remove(struct udevice *dev)
1384 struct mtk_eth_priv *priv = dev_get_priv(dev);
1386 /* MDIO unregister */
1387 mdio_unregister(priv->mdio_bus);
1388 mdio_free(priv->mdio_bus);
1390 /* Stop possibly started DMA */
1396 static int mtk_eth_of_to_plat(struct udevice *dev)
1398 struct eth_pdata *pdata = dev_get_plat(dev);
1399 struct mtk_eth_priv *priv = dev_get_priv(dev);
1400 struct ofnode_phandle_args args;
1401 struct regmap *regmap;
1406 priv->soc = (const struct mtk_soc_data *)dev_get_driver_data(dev);
1408 dev_err(dev, "missing soc compatible data\n");
1412 pdata->iobase = (phys_addr_t)dev_remap_addr(dev);
1414 /* get corresponding ethsys phandle */
1415 ret = dev_read_phandle_with_args(dev, "mediatek,ethsys", NULL, 0, 0,
1420 priv->ethsys_regmap = syscon_node_to_regmap(args.node);
1421 if (IS_ERR(priv->ethsys_regmap))
1422 return PTR_ERR(priv->ethsys_regmap);
1424 /* Reset controllers */
1425 ret = reset_get_by_name(dev, "fe", &priv->rst_fe);
1427 printf("error: Unable to get reset ctrl for frame engine\n");
1431 priv->gmac_id = dev_read_u32_default(dev, "mediatek,gmac-id", 0);
1433 /* Interface mode is required */
1434 pdata->phy_interface = dev_read_phy_mode(dev);
1435 priv->phy_interface = pdata->phy_interface;
1436 if (pdata->phy_interface == PHY_INTERFACE_MODE_NA) {
1437 printf("error: phy-mode is not set\n");
1441 /* Force mode or autoneg */
1442 subnode = ofnode_find_subnode(dev_ofnode(dev), "fixed-link");
1443 if (ofnode_valid(subnode)) {
1444 priv->force_mode = 1;
1445 priv->speed = ofnode_read_u32_default(subnode, "speed", 0);
1446 priv->duplex = ofnode_read_bool(subnode, "full-duplex");
1448 if (priv->speed != SPEED_10 && priv->speed != SPEED_100 &&
1449 priv->speed != SPEED_1000) {
1450 printf("error: no valid speed set in fixed-link\n");
1455 if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1456 /* get corresponding sgmii phandle */
1457 ret = dev_read_phandle_with_args(dev, "mediatek,sgmiisys",
1462 regmap = syscon_node_to_regmap(args.node);
1465 return PTR_ERR(regmap);
1467 priv->sgmii_base = regmap_get_range(regmap, 0);
1469 if (!priv->sgmii_base) {
1470 dev_err(dev, "Unable to find sgmii\n");
1475 /* check for switch first, otherwise phy will be used */
1477 priv->switch_init = NULL;
1478 str = dev_read_string(dev, "mediatek,switch");
1481 if (!strcmp(str, "mt7530")) {
1482 priv->sw = SW_MT7530;
1483 priv->switch_init = mt7530_setup;
1484 priv->mt753x_smi_addr = MT753X_DFL_SMI_ADDR;
1485 } else if (!strcmp(str, "mt7531")) {
1486 priv->sw = SW_MT7531;
1487 priv->switch_init = mt7531_setup;
1488 priv->mt753x_smi_addr = MT753X_DFL_SMI_ADDR;
1490 printf("error: unsupported switch\n");
1494 priv->mcm = dev_read_bool(dev, "mediatek,mcm");
1496 ret = reset_get_by_name(dev, "mcm", &priv->rst_mcm);
1498 printf("error: no reset ctrl for mcm\n");
1502 gpio_request_by_name(dev, "reset-gpios", 0,
1503 &priv->rst_gpio, GPIOD_IS_OUT);
1506 ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0,
1509 printf("error: phy-handle is not specified\n");
1513 priv->phy_addr = ofnode_read_s32_default(args.node, "reg", -1);
1514 if (priv->phy_addr < 0) {
1515 printf("error: phy address is not specified\n");
1523 static const struct mtk_soc_data mt7629_data = {
1525 .pdma_base = PDMA_V1_BASE,
1526 .txd_size = sizeof(struct mtk_tx_dma),
1527 .rxd_size = sizeof(struct mtk_rx_dma),
1530 static const struct mtk_soc_data mt7623_data = {
1531 .caps = MT7623_CAPS,
1532 .pdma_base = PDMA_V1_BASE,
1533 .txd_size = sizeof(struct mtk_tx_dma),
1534 .rxd_size = sizeof(struct mtk_rx_dma),
1537 static const struct mtk_soc_data mt7622_data = {
1539 .pdma_base = PDMA_V1_BASE,
1540 .txd_size = sizeof(struct mtk_tx_dma),
1541 .rxd_size = sizeof(struct mtk_rx_dma),
1544 static const struct mtk_soc_data mt7621_data = {
1545 .caps = MT7621_CAPS,
1546 .pdma_base = PDMA_V1_BASE,
1547 .txd_size = sizeof(struct mtk_tx_dma),
1548 .rxd_size = sizeof(struct mtk_rx_dma),
1551 static const struct udevice_id mtk_eth_ids[] = {
1552 { .compatible = "mediatek,mt7629-eth", .data = (ulong)&mt7629_data },
1553 { .compatible = "mediatek,mt7623-eth", .data = (ulong)&mt7623_data },
1554 { .compatible = "mediatek,mt7622-eth", .data = (ulong)&mt7622_data },
1555 { .compatible = "mediatek,mt7621-eth", .data = (ulong)&mt7621_data },
1559 static const struct eth_ops mtk_eth_ops = {
1560 .start = mtk_eth_start,
1561 .stop = mtk_eth_stop,
1562 .send = mtk_eth_send,
1563 .recv = mtk_eth_recv,
1564 .free_pkt = mtk_eth_free_pkt,
1565 .write_hwaddr = mtk_eth_write_hwaddr,
1568 U_BOOT_DRIVER(mtk_eth) = {
1571 .of_match = mtk_eth_ids,
1572 .of_to_plat = mtk_eth_of_to_plat,
1573 .plat_auto = sizeof(struct eth_pdata),
1574 .probe = mtk_eth_probe,
1575 .remove = mtk_eth_remove,
1576 .ops = &mtk_eth_ops,
1577 .priv_auto = sizeof(struct mtk_eth_priv),
1578 .flags = DM_FLAG_ALLOC_PRIV_DMA,