1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
9 #include <linux/of_device.h>
10 #include <linux/of_mdio.h>
11 #include <linux/of_net.h>
12 #include <linux/of_address.h>
13 #include <linux/mfd/syscon.h>
14 #include <linux/regmap.h>
15 #include <linux/clk.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/if_vlan.h>
18 #include <linux/reset.h>
19 #include <linux/tcp.h>
20 #include <linux/interrupt.h>
21 #include <linux/pinctrl/devinfo.h>
22 #include <linux/phylink.h>
23 #include <linux/jhash.h>
24 #include <linux/bitfield.h>
27 #include "mtk_eth_soc.h"
30 static int mtk_msg_level = -1;
31 module_param_named(msg_level, mtk_msg_level, int, 0);
32 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
34 #define MTK_ETHTOOL_STAT(x) { #x, \
35 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
37 #define MTK_ETHTOOL_XDP_STAT(x) { #x, \
38 offsetof(struct mtk_hw_stats, xdp_stats.x) / \
41 static const struct mtk_reg_map mtk_reg_map = {
42 .tx_irq_mask = 0x1a1c,
43 .tx_irq_status = 0x1a18,
76 .gdma_to_ppe = 0x4444,
84 static const struct mtk_reg_map mt7628_reg_map = {
85 .tx_irq_mask = 0x0a28,
86 .tx_irq_status = 0x0a20,
100 static const struct mtk_reg_map mt7986_reg_map = {
101 .tx_irq_mask = 0x461c,
102 .tx_irq_status = 0x4618,
105 .rx_cnt_cfg = 0x6104,
110 .irq_status = 0x6220,
117 .rx_cnt_cfg = 0x4504,
135 .gdma_to_ppe = 0x3333,
143 /* strings used by ethtool */
144 static const struct mtk_ethtool_stats {
145 char str[ETH_GSTRING_LEN];
147 } mtk_ethtool_stats[] = {
148 MTK_ETHTOOL_STAT(tx_bytes),
149 MTK_ETHTOOL_STAT(tx_packets),
150 MTK_ETHTOOL_STAT(tx_skip),
151 MTK_ETHTOOL_STAT(tx_collisions),
152 MTK_ETHTOOL_STAT(rx_bytes),
153 MTK_ETHTOOL_STAT(rx_packets),
154 MTK_ETHTOOL_STAT(rx_overflow),
155 MTK_ETHTOOL_STAT(rx_fcs_errors),
156 MTK_ETHTOOL_STAT(rx_short_errors),
157 MTK_ETHTOOL_STAT(rx_long_errors),
158 MTK_ETHTOOL_STAT(rx_checksum_errors),
159 MTK_ETHTOOL_STAT(rx_flow_control_packets),
160 MTK_ETHTOOL_XDP_STAT(rx_xdp_redirect),
161 MTK_ETHTOOL_XDP_STAT(rx_xdp_pass),
162 MTK_ETHTOOL_XDP_STAT(rx_xdp_drop),
163 MTK_ETHTOOL_XDP_STAT(rx_xdp_tx),
164 MTK_ETHTOOL_XDP_STAT(rx_xdp_tx_errors),
165 MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit),
166 MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit_errors),
169 static const char * const mtk_clks_source_name[] = {
170 "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
171 "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
172 "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
173 "sgmii_ck", "eth2pll", "wocpu0", "wocpu1", "netsys0", "netsys1"
176 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
178 __raw_writel(val, eth->base + reg);
181 u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
183 return __raw_readl(eth->base + reg);
186 static u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
190 val = mtk_r32(eth, reg);
193 mtk_w32(eth, val, reg);
197 static int mtk_mdio_busy_wait(struct mtk_eth *eth)
199 unsigned long t_start = jiffies;
202 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
204 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
209 dev_err(eth->dev, "mdio: MDIO timeout\n");
213 static int _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
218 ret = mtk_mdio_busy_wait(eth);
222 if (phy_reg & MII_ADDR_C45) {
223 mtk_w32(eth, PHY_IAC_ACCESS |
225 PHY_IAC_CMD_C45_ADDR |
226 PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
227 PHY_IAC_ADDR(phy_addr) |
228 PHY_IAC_DATA(mdiobus_c45_regad(phy_reg)),
231 ret = mtk_mdio_busy_wait(eth);
235 mtk_w32(eth, PHY_IAC_ACCESS |
238 PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
239 PHY_IAC_ADDR(phy_addr) |
240 PHY_IAC_DATA(write_data),
243 mtk_w32(eth, PHY_IAC_ACCESS |
246 PHY_IAC_REG(phy_reg) |
247 PHY_IAC_ADDR(phy_addr) |
248 PHY_IAC_DATA(write_data),
252 ret = mtk_mdio_busy_wait(eth);
259 static int _mtk_mdio_read(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
263 ret = mtk_mdio_busy_wait(eth);
267 if (phy_reg & MII_ADDR_C45) {
268 mtk_w32(eth, PHY_IAC_ACCESS |
270 PHY_IAC_CMD_C45_ADDR |
271 PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
272 PHY_IAC_ADDR(phy_addr) |
273 PHY_IAC_DATA(mdiobus_c45_regad(phy_reg)),
276 ret = mtk_mdio_busy_wait(eth);
280 mtk_w32(eth, PHY_IAC_ACCESS |
282 PHY_IAC_CMD_C45_READ |
283 PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
284 PHY_IAC_ADDR(phy_addr),
287 mtk_w32(eth, PHY_IAC_ACCESS |
289 PHY_IAC_CMD_C22_READ |
290 PHY_IAC_REG(phy_reg) |
291 PHY_IAC_ADDR(phy_addr),
295 ret = mtk_mdio_busy_wait(eth);
299 return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
302 static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
303 int phy_reg, u16 val)
305 struct mtk_eth *eth = bus->priv;
307 return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
310 static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
312 struct mtk_eth *eth = bus->priv;
314 return _mtk_mdio_read(eth, phy_addr, phy_reg);
317 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
318 phy_interface_t interface)
322 /* Check DDR memory type.
323 * Currently TRGMII mode with DDR2 memory is not supported.
325 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
326 if (interface == PHY_INTERFACE_MODE_TRGMII &&
327 val & SYSCFG_DRAM_TYPE_DDR2) {
329 "TRGMII mode with DDR2 memory is not supported!\n");
333 val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
334 ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
336 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
337 ETHSYS_TRGMII_MT7621_MASK, val);
342 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
343 phy_interface_t interface, int speed)
348 if (interface == PHY_INTERFACE_MODE_TRGMII) {
349 mtk_w32(eth, TRGMII_MODE, INTF_MODE);
351 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
353 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
357 val = (speed == SPEED_1000) ?
358 INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
359 mtk_w32(eth, val, INTF_MODE);
361 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
362 ETHSYS_TRGMII_CLK_SEL362_5,
363 ETHSYS_TRGMII_CLK_SEL362_5);
365 val = (speed == SPEED_1000) ? 250000000 : 500000000;
366 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
368 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
370 val = (speed == SPEED_1000) ?
371 RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
372 mtk_w32(eth, val, TRGMII_RCK_CTRL);
374 val = (speed == SPEED_1000) ?
375 TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
376 mtk_w32(eth, val, TRGMII_TCK_CTRL);
379 static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
380 phy_interface_t interface)
382 struct mtk_mac *mac = container_of(config, struct mtk_mac,
384 struct mtk_eth *eth = mac->hw;
387 if (interface == PHY_INTERFACE_MODE_SGMII ||
388 phy_interface_mode_is_8023z(interface)) {
389 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
392 return mtk_sgmii_select_pcs(eth->sgmii, sid);
398 static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
399 const struct phylink_link_state *state)
401 struct mtk_mac *mac = container_of(config, struct mtk_mac,
403 struct mtk_eth *eth = mac->hw;
404 int val, ge_mode, err = 0;
407 /* MT76x8 has no hardware settings between for the MAC */
408 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
409 mac->interface != state->interface) {
410 /* Setup soc pin functions */
411 switch (state->interface) {
412 case PHY_INTERFACE_MODE_TRGMII:
415 if (!MTK_HAS_CAPS(mac->hw->soc->caps,
419 case PHY_INTERFACE_MODE_RGMII_TXID:
420 case PHY_INTERFACE_MODE_RGMII_RXID:
421 case PHY_INTERFACE_MODE_RGMII_ID:
422 case PHY_INTERFACE_MODE_RGMII:
423 case PHY_INTERFACE_MODE_MII:
424 case PHY_INTERFACE_MODE_REVMII:
425 case PHY_INTERFACE_MODE_RMII:
426 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
427 err = mtk_gmac_rgmii_path_setup(eth, mac->id);
432 case PHY_INTERFACE_MODE_1000BASEX:
433 case PHY_INTERFACE_MODE_2500BASEX:
434 case PHY_INTERFACE_MODE_SGMII:
435 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
436 err = mtk_gmac_sgmii_path_setup(eth, mac->id);
441 case PHY_INTERFACE_MODE_GMII:
442 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
443 err = mtk_gmac_gephy_path_setup(eth, mac->id);
452 /* Setup clock for 1st gmac */
453 if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
454 !phy_interface_mode_is_8023z(state->interface) &&
455 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
456 if (MTK_HAS_CAPS(mac->hw->soc->caps,
457 MTK_TRGMII_MT7621_CLK)) {
458 if (mt7621_gmac0_rgmii_adjust(mac->hw,
462 /* FIXME: this is incorrect. Not only does it
463 * use state->speed (which is not guaranteed
464 * to be correct) but it also makes use of it
465 * in a code path that will only be reachable
466 * when the PHY interface mode changes, not
467 * when the speed changes. Consequently, RGMII
468 * is probably broken.
470 mtk_gmac0_rgmii_adjust(mac->hw,
474 /* mt7623_pad_clk_setup */
475 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
477 TD_DM_DRVP(8) | TD_DM_DRVN(8),
480 /* Assert/release MT7623 RXC reset */
481 mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
483 mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
488 switch (state->interface) {
489 case PHY_INTERFACE_MODE_MII:
490 case PHY_INTERFACE_MODE_GMII:
493 case PHY_INTERFACE_MODE_REVMII:
496 case PHY_INTERFACE_MODE_RMII:
505 /* put the gmac into the right mode */
506 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
507 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
508 val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
509 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
511 mac->interface = state->interface;
515 if (state->interface == PHY_INTERFACE_MODE_SGMII ||
516 phy_interface_mode_is_8023z(state->interface)) {
517 /* The path GMAC to SGMII will be enabled once the SGMIISYS is
520 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
522 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
524 ~(u32)SYSCFG0_SGMII_MASK);
526 /* Save the syscfg0 value for mac_finish */
528 } else if (phylink_autoneg_inband(mode)) {
530 "In-band mode not supported in non SGMII mode!\n");
537 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
538 mac->id, phy_modes(state->interface));
542 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
543 mac->id, phy_modes(state->interface), err);
546 static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
547 phy_interface_t interface)
549 struct mtk_mac *mac = container_of(config, struct mtk_mac,
551 struct mtk_eth *eth = mac->hw;
552 u32 mcr_cur, mcr_new;
555 if (interface == PHY_INTERFACE_MODE_SGMII ||
556 phy_interface_mode_is_8023z(interface))
557 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
558 SYSCFG0_SGMII_MASK, mac->syscfg0);
561 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
563 mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
564 MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
566 /* Only update control register when needed! */
567 if (mcr_new != mcr_cur)
568 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
573 static void mtk_mac_pcs_get_state(struct phylink_config *config,
574 struct phylink_link_state *state)
576 struct mtk_mac *mac = container_of(config, struct mtk_mac,
578 u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
580 state->link = (pmsr & MAC_MSR_LINK);
581 state->duplex = (pmsr & MAC_MSR_DPX) >> 1;
583 switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) {
585 state->speed = SPEED_10;
587 case MAC_MSR_SPEED_100:
588 state->speed = SPEED_100;
590 case MAC_MSR_SPEED_1000:
591 state->speed = SPEED_1000;
594 state->speed = SPEED_UNKNOWN;
598 state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
599 if (pmsr & MAC_MSR_RX_FC)
600 state->pause |= MLO_PAUSE_RX;
601 if (pmsr & MAC_MSR_TX_FC)
602 state->pause |= MLO_PAUSE_TX;
605 static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
606 phy_interface_t interface)
608 struct mtk_mac *mac = container_of(config, struct mtk_mac,
610 u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
612 mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
613 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
616 static void mtk_mac_link_up(struct phylink_config *config,
617 struct phy_device *phy,
618 unsigned int mode, phy_interface_t interface,
619 int speed, int duplex, bool tx_pause, bool rx_pause)
621 struct mtk_mac *mac = container_of(config, struct mtk_mac,
625 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
626 mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
627 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
628 MAC_MCR_FORCE_RX_FC);
630 /* Configure speed */
634 mcr |= MAC_MCR_SPEED_1000;
637 mcr |= MAC_MCR_SPEED_100;
641 /* Configure duplex */
642 if (duplex == DUPLEX_FULL)
643 mcr |= MAC_MCR_FORCE_DPX;
645 /* Configure pause modes - phylink will avoid these for half duplex */
647 mcr |= MAC_MCR_FORCE_TX_FC;
649 mcr |= MAC_MCR_FORCE_RX_FC;
651 mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
652 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
655 static const struct phylink_mac_ops mtk_phylink_ops = {
656 .validate = phylink_generic_validate,
657 .mac_select_pcs = mtk_mac_select_pcs,
658 .mac_pcs_get_state = mtk_mac_pcs_get_state,
659 .mac_config = mtk_mac_config,
660 .mac_finish = mtk_mac_finish,
661 .mac_link_down = mtk_mac_link_down,
662 .mac_link_up = mtk_mac_link_up,
665 static int mtk_mdio_init(struct mtk_eth *eth)
667 struct device_node *mii_np;
670 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
672 dev_err(eth->dev, "no %s child node found", "mdio-bus");
676 if (!of_device_is_available(mii_np)) {
681 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
687 eth->mii_bus->name = "mdio";
688 eth->mii_bus->read = mtk_mdio_read;
689 eth->mii_bus->write = mtk_mdio_write;
690 eth->mii_bus->probe_capabilities = MDIOBUS_C22_C45;
691 eth->mii_bus->priv = eth;
692 eth->mii_bus->parent = eth->dev;
694 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
695 ret = of_mdiobus_register(eth->mii_bus, mii_np);
702 static void mtk_mdio_cleanup(struct mtk_eth *eth)
707 mdiobus_unregister(eth->mii_bus);
710 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
715 spin_lock_irqsave(ð->tx_irq_lock, flags);
716 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
717 mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
718 spin_unlock_irqrestore(ð->tx_irq_lock, flags);
721 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
726 spin_lock_irqsave(ð->tx_irq_lock, flags);
727 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
728 mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
729 spin_unlock_irqrestore(ð->tx_irq_lock, flags);
732 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
737 spin_lock_irqsave(ð->rx_irq_lock, flags);
738 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
739 mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
740 spin_unlock_irqrestore(ð->rx_irq_lock, flags);
743 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
748 spin_lock_irqsave(ð->rx_irq_lock, flags);
749 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
750 mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
751 spin_unlock_irqrestore(ð->rx_irq_lock, flags);
754 static int mtk_set_mac_address(struct net_device *dev, void *p)
756 int ret = eth_mac_addr(dev, p);
757 struct mtk_mac *mac = netdev_priv(dev);
758 struct mtk_eth *eth = mac->hw;
759 const char *macaddr = dev->dev_addr;
764 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
767 spin_lock_bh(&mac->hw->page_lock);
768 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
769 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
770 MT7628_SDM_MAC_ADRH);
771 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
772 (macaddr[4] << 8) | macaddr[5],
773 MT7628_SDM_MAC_ADRL);
775 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
776 MTK_GDMA_MAC_ADRH(mac->id));
777 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
778 (macaddr[4] << 8) | macaddr[5],
779 MTK_GDMA_MAC_ADRL(mac->id));
781 spin_unlock_bh(&mac->hw->page_lock);
786 void mtk_stats_update_mac(struct mtk_mac *mac)
788 struct mtk_hw_stats *hw_stats = mac->hw_stats;
789 struct mtk_eth *eth = mac->hw;
791 u64_stats_update_begin(&hw_stats->syncp);
793 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
794 hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
795 hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
796 hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
797 hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
798 hw_stats->rx_checksum_errors +=
799 mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
801 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
802 unsigned int offs = hw_stats->reg_offset;
805 hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
806 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
808 hw_stats->rx_bytes += (stats << 32);
809 hw_stats->rx_packets +=
810 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs);
811 hw_stats->rx_overflow +=
812 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
813 hw_stats->rx_fcs_errors +=
814 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
815 hw_stats->rx_short_errors +=
816 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
817 hw_stats->rx_long_errors +=
818 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
819 hw_stats->rx_checksum_errors +=
820 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
821 hw_stats->rx_flow_control_packets +=
822 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
824 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
825 hw_stats->tx_collisions +=
826 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
827 hw_stats->tx_bytes +=
828 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
829 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
831 hw_stats->tx_bytes += (stats << 32);
832 hw_stats->tx_packets +=
833 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
836 u64_stats_update_end(&hw_stats->syncp);
839 static void mtk_stats_update(struct mtk_eth *eth)
843 for (i = 0; i < MTK_MAC_COUNT; i++) {
844 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
846 if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) {
847 mtk_stats_update_mac(eth->mac[i]);
848 spin_unlock(ð->mac[i]->hw_stats->stats_lock);
853 static void mtk_get_stats64(struct net_device *dev,
854 struct rtnl_link_stats64 *storage)
856 struct mtk_mac *mac = netdev_priv(dev);
857 struct mtk_hw_stats *hw_stats = mac->hw_stats;
860 if (netif_running(dev) && netif_device_present(dev)) {
861 if (spin_trylock_bh(&hw_stats->stats_lock)) {
862 mtk_stats_update_mac(mac);
863 spin_unlock_bh(&hw_stats->stats_lock);
868 start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
869 storage->rx_packets = hw_stats->rx_packets;
870 storage->tx_packets = hw_stats->tx_packets;
871 storage->rx_bytes = hw_stats->rx_bytes;
872 storage->tx_bytes = hw_stats->tx_bytes;
873 storage->collisions = hw_stats->tx_collisions;
874 storage->rx_length_errors = hw_stats->rx_short_errors +
875 hw_stats->rx_long_errors;
876 storage->rx_over_errors = hw_stats->rx_overflow;
877 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
878 storage->rx_errors = hw_stats->rx_checksum_errors;
879 storage->tx_aborted_errors = hw_stats->tx_skip;
880 } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
882 storage->tx_errors = dev->stats.tx_errors;
883 storage->rx_dropped = dev->stats.rx_dropped;
884 storage->tx_dropped = dev->stats.tx_dropped;
887 static inline int mtk_max_frag_size(int mtu)
889 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
890 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH_2K)
891 mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
893 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
894 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
897 static inline int mtk_max_buf_size(int frag_size)
899 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
900 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
902 WARN_ON(buf_size < MTK_MAX_RX_LENGTH_2K);
907 static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
908 struct mtk_rx_dma_v2 *dma_rxd)
910 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
911 if (!(rxd->rxd2 & RX_DMA_DONE))
914 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
915 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
916 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
917 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
918 rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
919 rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
925 static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
927 unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
930 data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
936 /* the qdma core needs scratch memory to be setup */
937 static int mtk_init_fq_dma(struct mtk_eth *eth)
939 const struct mtk_soc_data *soc = eth->soc;
940 dma_addr_t phy_ring_tail;
941 int cnt = MTK_DMA_SIZE;
945 eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
946 cnt * soc->txrx.txd_size,
947 ð->phy_scratch_ring,
949 if (unlikely(!eth->scratch_ring))
952 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
953 if (unlikely(!eth->scratch_head))
956 dma_addr = dma_map_single(eth->dma_dev,
957 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
959 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
962 phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
964 for (i = 0; i < cnt; i++) {
965 struct mtk_tx_dma_v2 *txd;
967 txd = eth->scratch_ring + i * soc->txrx.txd_size;
968 txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
970 txd->txd2 = eth->phy_scratch_ring +
971 (i + 1) * soc->txrx.txd_size;
973 txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
975 if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
983 mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
984 mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
985 mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
986 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
991 static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
993 return ring->dma + (desc - ring->phys);
996 static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
997 void *txd, u32 txd_size)
999 int idx = (txd - ring->dma) / txd_size;
1001 return &ring->buf[idx];
1004 static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
1005 struct mtk_tx_dma *dma)
1007 return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma;
1010 static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
1012 return (dma - ring->dma) / txd_size;
1015 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1016 struct xdp_frame_bulk *bq, bool napi)
1018 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1019 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
1020 dma_unmap_single(eth->dma_dev,
1021 dma_unmap_addr(tx_buf, dma_addr0),
1022 dma_unmap_len(tx_buf, dma_len0),
1024 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
1025 dma_unmap_page(eth->dma_dev,
1026 dma_unmap_addr(tx_buf, dma_addr0),
1027 dma_unmap_len(tx_buf, dma_len0),
1031 if (dma_unmap_len(tx_buf, dma_len0)) {
1032 dma_unmap_page(eth->dma_dev,
1033 dma_unmap_addr(tx_buf, dma_addr0),
1034 dma_unmap_len(tx_buf, dma_len0),
1038 if (dma_unmap_len(tx_buf, dma_len1)) {
1039 dma_unmap_page(eth->dma_dev,
1040 dma_unmap_addr(tx_buf, dma_addr1),
1041 dma_unmap_len(tx_buf, dma_len1),
1046 if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
1047 if (tx_buf->type == MTK_TYPE_SKB) {
1048 struct sk_buff *skb = tx_buf->data;
1051 napi_consume_skb(skb, napi);
1053 dev_kfree_skb_any(skb);
1055 struct xdp_frame *xdpf = tx_buf->data;
1057 if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
1058 xdp_return_frame_rx_napi(xdpf);
1060 xdp_return_frame_bulk(xdpf, bq);
1062 xdp_return_frame(xdpf);
1066 tx_buf->data = NULL;
1069 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1070 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
1071 size_t size, int idx)
1073 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1074 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1075 dma_unmap_len_set(tx_buf, dma_len0, size);
1078 txd->txd3 = mapped_addr;
1079 txd->txd2 |= TX_DMA_PLEN1(size);
1080 dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
1081 dma_unmap_len_set(tx_buf, dma_len1, size);
1083 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1084 txd->txd1 = mapped_addr;
1085 txd->txd2 = TX_DMA_PLEN0(size);
1086 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1087 dma_unmap_len_set(tx_buf, dma_len0, size);
1092 static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
1093 struct mtk_tx_dma_desc_info *info)
1095 struct mtk_mac *mac = netdev_priv(dev);
1096 struct mtk_eth *eth = mac->hw;
1097 struct mtk_tx_dma *desc = txd;
1100 WRITE_ONCE(desc->txd1, info->addr);
1102 data = TX_DMA_SWC | TX_DMA_PLEN0(info->size);
1105 WRITE_ONCE(desc->txd3, data);
1107 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1111 /* tx checksum offload */
1113 data |= TX_DMA_CHKSUM;
1114 /* vlan header offload */
1116 data |= TX_DMA_INS_VLAN | info->vlan_tci;
1118 WRITE_ONCE(desc->txd4, data);
1121 static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
1122 struct mtk_tx_dma_desc_info *info)
1124 struct mtk_mac *mac = netdev_priv(dev);
1125 struct mtk_tx_dma_v2 *desc = txd;
1126 struct mtk_eth *eth = mac->hw;
1129 WRITE_ONCE(desc->txd1, info->addr);
1131 data = TX_DMA_PLEN0(info->size);
1134 WRITE_ONCE(desc->txd3, data);
1136 if (!info->qid && mac->id)
1137 info->qid = MTK_QDMA_GMAC2_QID;
1139 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
1140 data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
1141 WRITE_ONCE(desc->txd4, data);
1146 data |= TX_DMA_TSO_V2;
1147 /* tx checksum offload */
1149 data |= TX_DMA_CHKSUM_V2;
1151 WRITE_ONCE(desc->txd5, data);
1154 if (info->first && info->vlan)
1155 data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1156 WRITE_ONCE(desc->txd6, data);
1158 WRITE_ONCE(desc->txd7, 0);
1159 WRITE_ONCE(desc->txd8, 0);
1162 static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
1163 struct mtk_tx_dma_desc_info *info)
1165 struct mtk_mac *mac = netdev_priv(dev);
1166 struct mtk_eth *eth = mac->hw;
1168 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1169 mtk_tx_set_dma_desc_v2(dev, txd, info);
1171 mtk_tx_set_dma_desc_v1(dev, txd, info);
1174 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
1175 int tx_num, struct mtk_tx_ring *ring, bool gso)
1177 struct mtk_tx_dma_desc_info txd_info = {
1178 .size = skb_headlen(skb),
1180 .csum = skb->ip_summed == CHECKSUM_PARTIAL,
1181 .vlan = skb_vlan_tag_present(skb),
1182 .qid = skb->mark & MTK_QDMA_TX_MASK,
1183 .vlan_tci = skb_vlan_tag_get(skb),
1185 .last = !skb_is_nonlinear(skb),
1187 struct mtk_mac *mac = netdev_priv(dev);
1188 struct mtk_eth *eth = mac->hw;
1189 const struct mtk_soc_data *soc = eth->soc;
1190 struct mtk_tx_dma *itxd, *txd;
1191 struct mtk_tx_dma *itxd_pdma, *txd_pdma;
1192 struct mtk_tx_buf *itx_buf, *tx_buf;
1196 itxd = ring->next_free;
1197 itxd_pdma = qdma_to_pdma(ring, itxd);
1198 if (itxd == ring->last_free)
1201 itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
1202 memset(itx_buf, 0, sizeof(*itx_buf));
1204 txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
1206 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1209 mtk_tx_set_dma_desc(dev, itxd, &txd_info);
1211 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1212 itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1213 MTK_TX_FLAGS_FPORT1;
1214 setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
1219 txd_pdma = qdma_to_pdma(ring, txd);
1221 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1222 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1223 unsigned int offset = 0;
1224 int frag_size = skb_frag_size(frag);
1227 bool new_desc = true;
1229 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
1231 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1232 txd_pdma = qdma_to_pdma(ring, txd);
1233 if (txd == ring->last_free)
1241 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1242 txd_info.size = min_t(unsigned int, frag_size,
1243 soc->txrx.dma_max_len);
1244 txd_info.qid = skb->mark & MTK_QDMA_TX_MASK;
1245 txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
1246 !(frag_size - txd_info.size);
1247 txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
1248 offset, txd_info.size,
1250 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1253 mtk_tx_set_dma_desc(dev, txd, &txd_info);
1255 tx_buf = mtk_desc_to_tx_buf(ring, txd,
1256 soc->txrx.txd_size);
1258 memset(tx_buf, 0, sizeof(*tx_buf));
1259 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1260 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
1261 tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1262 MTK_TX_FLAGS_FPORT1;
1264 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
1265 txd_info.size, k++);
1267 frag_size -= txd_info.size;
1268 offset += txd_info.size;
1272 /* store skb to cleanup */
1273 itx_buf->type = MTK_TYPE_SKB;
1274 itx_buf->data = skb;
1276 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1278 txd_pdma->txd2 |= TX_DMA_LS0;
1280 txd_pdma->txd2 |= TX_DMA_LS1;
1283 netdev_sent_queue(dev, skb->len);
1284 skb_tx_timestamp(skb);
1286 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1287 atomic_sub(n_desc, &ring->free_count);
1289 /* make sure that all changes to the dma ring are flushed before we
1294 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1295 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
1296 !netdev_xmit_more())
1297 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1301 next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
1303 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1310 tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
1313 mtk_tx_unmap(eth, tx_buf, NULL, false);
1315 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1316 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
1317 itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1319 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1320 itxd_pdma = qdma_to_pdma(ring, itxd);
1321 } while (itxd != txd);
1326 static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
1331 if (skb_is_gso(skb)) {
1332 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1333 frag = &skb_shinfo(skb)->frags[i];
1334 nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1335 eth->soc->txrx.dma_max_len);
1338 nfrags += skb_shinfo(skb)->nr_frags;
1344 static int mtk_queue_stopped(struct mtk_eth *eth)
1348 for (i = 0; i < MTK_MAC_COUNT; i++) {
1349 if (!eth->netdev[i])
1351 if (netif_queue_stopped(eth->netdev[i]))
1358 static void mtk_wake_queue(struct mtk_eth *eth)
1362 for (i = 0; i < MTK_MAC_COUNT; i++) {
1363 if (!eth->netdev[i])
1365 netif_wake_queue(eth->netdev[i]);
1369 static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1371 struct mtk_mac *mac = netdev_priv(dev);
1372 struct mtk_eth *eth = mac->hw;
1373 struct mtk_tx_ring *ring = ð->tx_ring;
1374 struct net_device_stats *stats = &dev->stats;
1378 /* normally we can rely on the stack not calling this more than once,
1379 * however we have 2 queues running on the same ring so we need to lock
1382 spin_lock(ð->page_lock);
1384 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1387 tx_num = mtk_cal_txd_req(eth, skb);
1388 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1389 netif_stop_queue(dev);
1390 netif_err(eth, tx_queued, dev,
1391 "Tx Ring full when queue awake!\n");
1392 spin_unlock(ð->page_lock);
1393 return NETDEV_TX_BUSY;
1396 /* TSO: fill MSS info in tcp checksum field */
1397 if (skb_is_gso(skb)) {
1398 if (skb_cow_head(skb, 0)) {
1399 netif_warn(eth, tx_err, dev,
1400 "GSO expand head fail.\n");
1404 if (skb_shinfo(skb)->gso_type &
1405 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1407 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1411 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1414 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1415 netif_stop_queue(dev);
1417 spin_unlock(ð->page_lock);
1419 return NETDEV_TX_OK;
1422 spin_unlock(ð->page_lock);
1423 stats->tx_dropped++;
1424 dev_kfree_skb_any(skb);
1425 return NETDEV_TX_OK;
1428 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1431 struct mtk_rx_ring *ring;
1435 return ð->rx_ring[0];
1437 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1438 struct mtk_rx_dma *rxd;
1440 ring = ð->rx_ring[i];
1441 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1442 rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
1443 if (rxd->rxd2 & RX_DMA_DONE) {
1444 ring->calc_idx_update = true;
1452 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
1454 struct mtk_rx_ring *ring;
1458 ring = ð->rx_ring[0];
1459 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1461 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1462 ring = ð->rx_ring[i];
1463 if (ring->calc_idx_update) {
1464 ring->calc_idx_update = false;
1465 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1471 static bool mtk_page_pool_enabled(struct mtk_eth *eth)
1473 return MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2);
1476 static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
1477 struct xdp_rxq_info *xdp_q,
1480 struct page_pool_params pp_params = {
1482 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
1484 .nid = NUMA_NO_NODE,
1485 .dev = eth->dma_dev,
1486 .offset = MTK_PP_HEADROOM,
1487 .max_len = MTK_PP_MAX_BUF_SIZE,
1489 struct page_pool *pp;
1492 pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL
1494 pp = page_pool_create(&pp_params);
1498 err = __xdp_rxq_info_reg(xdp_q, ð->dummy_dev, eth->rx_napi.napi_id,
1503 err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp);
1505 goto err_unregister_rxq;
1510 xdp_rxq_info_unreg(xdp_q);
1512 page_pool_destroy(pp);
1514 return ERR_PTR(err);
1517 static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
1522 page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
1526 *dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM;
1527 return page_address(page);
1530 static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
1532 if (ring->page_pool)
1533 page_pool_put_full_page(ring->page_pool,
1534 virt_to_head_page(data), napi);
1536 skb_free_frag(data);
1539 static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
1540 struct mtk_tx_dma_desc_info *txd_info,
1541 struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf,
1542 void *data, u16 headroom, int index, bool dma_map)
1544 struct mtk_tx_ring *ring = ð->tx_ring;
1545 struct mtk_mac *mac = netdev_priv(dev);
1546 struct mtk_tx_dma *txd_pdma;
1548 if (dma_map) { /* ndo_xdp_xmit */
1549 txd_info->addr = dma_map_single(eth->dma_dev, data,
1550 txd_info->size, DMA_TO_DEVICE);
1551 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr)))
1554 tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1556 struct page *page = virt_to_head_page(data);
1558 txd_info->addr = page_pool_get_dma_addr(page) +
1559 sizeof(struct xdp_frame) + headroom;
1560 dma_sync_single_for_device(eth->dma_dev, txd_info->addr,
1561 txd_info->size, DMA_BIDIRECTIONAL);
1563 mtk_tx_set_dma_desc(dev, txd, txd_info);
1565 tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
1566 tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
1567 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1569 txd_pdma = qdma_to_pdma(ring, txd);
1570 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
1576 static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
1577 struct net_device *dev, bool dma_map)
1579 struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
1580 const struct mtk_soc_data *soc = eth->soc;
1581 struct mtk_tx_ring *ring = ð->tx_ring;
1582 struct mtk_tx_dma_desc_info txd_info = {
1585 .last = !xdp_frame_has_frags(xdpf),
1587 int err, index = 0, n_desc = 1, nr_frags;
1588 struct mtk_tx_buf *htx_buf, *tx_buf;
1589 struct mtk_tx_dma *htxd, *txd;
1590 void *data = xdpf->data;
1592 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1595 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
1596 if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
1599 spin_lock(ð->page_lock);
1601 txd = ring->next_free;
1602 if (txd == ring->last_free) {
1603 spin_unlock(ð->page_lock);
1608 tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
1609 memset(tx_buf, 0, sizeof(*tx_buf));
1613 err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
1614 data, xdpf->headroom, index, dma_map);
1621 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
1622 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1623 if (txd == ring->last_free)
1626 tx_buf = mtk_desc_to_tx_buf(ring, txd,
1627 soc->txrx.txd_size);
1628 memset(tx_buf, 0, sizeof(*tx_buf));
1632 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1633 txd_info.size = skb_frag_size(&sinfo->frags[index]);
1634 txd_info.last = index + 1 == nr_frags;
1635 data = skb_frag_address(&sinfo->frags[index]);
1639 /* store xdpf for cleanup */
1640 htx_buf->data = xdpf;
1642 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1643 struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, txd);
1646 txd_pdma->txd2 |= TX_DMA_LS0;
1648 txd_pdma->txd2 |= TX_DMA_LS1;
1651 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1652 atomic_sub(n_desc, &ring->free_count);
1654 /* make sure that all changes to the dma ring are flushed before we
1659 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1660 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1664 idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
1665 mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
1666 MT7628_TX_CTX_IDX0);
1669 spin_unlock(ð->page_lock);
1674 while (htxd != txd) {
1675 tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
1676 mtk_tx_unmap(eth, tx_buf, NULL, false);
1678 htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1679 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1680 struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, htxd);
1682 txd_pdma->txd2 = TX_DMA_DESP2_DEF;
1685 htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
1688 spin_unlock(ð->page_lock);
1693 static int mtk_xdp_xmit(struct net_device *dev, int num_frame,
1694 struct xdp_frame **frames, u32 flags)
1696 struct mtk_mac *mac = netdev_priv(dev);
1697 struct mtk_hw_stats *hw_stats = mac->hw_stats;
1698 struct mtk_eth *eth = mac->hw;
1701 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1704 for (i = 0; i < num_frame; i++) {
1705 if (mtk_xdp_submit_frame(eth, frames[i], dev, true))
1710 u64_stats_update_begin(&hw_stats->syncp);
1711 hw_stats->xdp_stats.tx_xdp_xmit += nxmit;
1712 hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit;
1713 u64_stats_update_end(&hw_stats->syncp);
1718 static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
1719 struct xdp_buff *xdp, struct net_device *dev)
1721 struct mtk_mac *mac = netdev_priv(dev);
1722 struct mtk_hw_stats *hw_stats = mac->hw_stats;
1723 u64 *count = &hw_stats->xdp_stats.rx_xdp_drop;
1724 struct bpf_prog *prog;
1729 prog = rcu_dereference(eth->prog);
1733 act = bpf_prog_run_xdp(prog, xdp);
1736 count = &hw_stats->xdp_stats.rx_xdp_pass;
1739 if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
1744 count = &hw_stats->xdp_stats.rx_xdp_redirect;
1747 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
1749 if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
1750 count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
1755 count = &hw_stats->xdp_stats.rx_xdp_tx;
1759 bpf_warn_invalid_xdp_action(dev, prog, act);
1762 trace_xdp_exception(dev, prog, act);
1768 page_pool_put_full_page(ring->page_pool,
1769 virt_to_head_page(xdp->data), true);
1772 u64_stats_update_begin(&hw_stats->syncp);
1773 *count = *count + 1;
1774 u64_stats_update_end(&hw_stats->syncp);
1781 static int mtk_poll_rx(struct napi_struct *napi, int budget,
1782 struct mtk_eth *eth)
1784 struct dim_sample dim_sample = {};
1785 struct mtk_rx_ring *ring;
1786 bool xdp_flush = false;
1788 struct sk_buff *skb;
1789 u8 *data, *new_data;
1790 struct mtk_rx_dma_v2 *rxd, trxd;
1791 int done = 0, bytes = 0;
1793 while (done < budget) {
1794 unsigned int pktlen, *rxdcsum;
1795 struct net_device *netdev;
1796 dma_addr_t dma_addr;
1800 ring = mtk_get_rx_ring(eth);
1801 if (unlikely(!ring))
1804 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1805 rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
1806 data = ring->data[idx];
1808 if (!mtk_rx_get_desc(eth, &trxd, rxd))
1811 /* find out which mac the packet come from. values start at 1 */
1812 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1813 mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
1814 else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
1815 !(trxd.rxd4 & RX_DMA_SPECIAL_TAG))
1816 mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
1818 if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
1822 netdev = eth->netdev[mac];
1824 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1827 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
1829 /* alloc new buffer */
1830 if (ring->page_pool) {
1831 struct page *page = virt_to_head_page(data);
1832 struct xdp_buff xdp;
1835 new_data = mtk_page_pool_get_buff(ring->page_pool,
1838 if (unlikely(!new_data)) {
1839 netdev->stats.rx_dropped++;
1843 dma_sync_single_for_cpu(eth->dma_dev,
1844 page_pool_get_dma_addr(page) + MTK_PP_HEADROOM,
1845 pktlen, page_pool_get_dma_dir(ring->page_pool));
1847 xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
1848 xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
1850 xdp_buff_clear_frags_flag(&xdp);
1852 ret = mtk_xdp_run(eth, ring, &xdp, netdev);
1853 if (ret == XDP_REDIRECT)
1856 if (ret != XDP_PASS)
1859 skb = build_skb(data, PAGE_SIZE);
1860 if (unlikely(!skb)) {
1861 page_pool_put_full_page(ring->page_pool,
1863 netdev->stats.rx_dropped++;
1867 skb_reserve(skb, xdp.data - xdp.data_hard_start);
1868 skb_put(skb, xdp.data_end - xdp.data);
1869 skb_mark_for_recycle(skb);
1871 if (ring->frag_size <= PAGE_SIZE)
1872 new_data = napi_alloc_frag(ring->frag_size);
1874 new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
1876 if (unlikely(!new_data)) {
1877 netdev->stats.rx_dropped++;
1881 dma_addr = dma_map_single(eth->dma_dev,
1882 new_data + NET_SKB_PAD + eth->ip_align,
1883 ring->buf_size, DMA_FROM_DEVICE);
1884 if (unlikely(dma_mapping_error(eth->dma_dev,
1886 skb_free_frag(new_data);
1887 netdev->stats.rx_dropped++;
1891 dma_unmap_single(eth->dma_dev, trxd.rxd1,
1892 ring->buf_size, DMA_FROM_DEVICE);
1894 skb = build_skb(data, ring->frag_size);
1895 if (unlikely(!skb)) {
1896 netdev->stats.rx_dropped++;
1897 skb_free_frag(data);
1901 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1902 skb_put(skb, pktlen);
1908 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
1909 reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
1910 hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
1911 if (hash != MTK_RXD5_FOE_ENTRY)
1912 skb_set_hash(skb, jhash_1word(hash, 0),
1914 rxdcsum = &trxd.rxd3;
1916 reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
1917 hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
1918 if (hash != MTK_RXD4_FOE_ENTRY)
1919 skb_set_hash(skb, jhash_1word(hash, 0),
1921 rxdcsum = &trxd.rxd4;
1924 if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
1925 skb->ip_summed = CHECKSUM_UNNECESSARY;
1927 skb_checksum_none_assert(skb);
1928 skb->protocol = eth_type_trans(skb, netdev);
1930 if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
1931 mtk_ppe_check_skb(eth->ppe[0], skb, hash);
1933 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
1934 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
1935 if (trxd.rxd3 & RX_DMA_VTAG_V2)
1936 __vlan_hwaccel_put_tag(skb,
1937 htons(RX_DMA_VPID(trxd.rxd4)),
1938 RX_DMA_VID(trxd.rxd4));
1939 } else if (trxd.rxd2 & RX_DMA_VTAG) {
1940 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1941 RX_DMA_VID(trxd.rxd3));
1944 /* If the device is attached to a dsa switch, the special
1945 * tag inserted in VLAN field by hw switch can * be offloaded
1946 * by RX HW VLAN offload. Clear vlan info.
1948 if (netdev_uses_dsa(netdev))
1949 __vlan_hwaccel_clear_tag(skb);
1952 skb_record_rx_queue(skb, 0);
1953 napi_gro_receive(napi, skb);
1956 ring->data[idx] = new_data;
1957 rxd->rxd1 = (unsigned int)dma_addr;
1959 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
1960 rxd->rxd2 = RX_DMA_LSO;
1962 rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
1964 ring->calc_idx = idx;
1970 /* make sure that all changes to the dma ring are flushed before
1974 mtk_update_rx_cpu_idx(eth);
1977 eth->rx_packets += done;
1978 eth->rx_bytes += bytes;
1979 dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes,
1981 net_dim(ð->rx_dim, dim_sample);
1989 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
1990 unsigned int *done, unsigned int *bytes)
1992 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
1993 struct mtk_tx_ring *ring = ð->tx_ring;
1994 struct mtk_tx_buf *tx_buf;
1995 struct xdp_frame_bulk bq;
1996 struct mtk_tx_dma *desc;
1999 cpu = ring->last_free_ptr;
2000 dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
2002 desc = mtk_qdma_phys_to_virt(ring, cpu);
2003 xdp_frame_bulk_init(&bq);
2005 while ((cpu != dma) && budget) {
2006 u32 next_cpu = desc->txd2;
2009 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
2010 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
2013 tx_buf = mtk_desc_to_tx_buf(ring, desc,
2014 eth->soc->txrx.txd_size);
2015 if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
2021 if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2022 if (tx_buf->type == MTK_TYPE_SKB) {
2023 struct sk_buff *skb = tx_buf->data;
2025 bytes[mac] += skb->len;
2030 mtk_tx_unmap(eth, tx_buf, &bq, true);
2032 ring->last_free = desc;
2033 atomic_inc(&ring->free_count);
2037 xdp_flush_frame_bulk(&bq);
2039 ring->last_free_ptr = cpu;
2040 mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
2045 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
2046 unsigned int *done, unsigned int *bytes)
2048 struct mtk_tx_ring *ring = ð->tx_ring;
2049 struct mtk_tx_buf *tx_buf;
2050 struct xdp_frame_bulk bq;
2051 struct mtk_tx_dma *desc;
2054 cpu = ring->cpu_idx;
2055 dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
2056 xdp_frame_bulk_init(&bq);
2058 while ((cpu != dma) && budget) {
2059 tx_buf = &ring->buf[cpu];
2063 if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2064 if (tx_buf->type == MTK_TYPE_SKB) {
2065 struct sk_buff *skb = tx_buf->data;
2067 bytes[0] += skb->len;
2072 mtk_tx_unmap(eth, tx_buf, &bq, true);
2074 desc = ring->dma + cpu * eth->soc->txrx.txd_size;
2075 ring->last_free = desc;
2076 atomic_inc(&ring->free_count);
2078 cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
2080 xdp_flush_frame_bulk(&bq);
2082 ring->cpu_idx = cpu;
2087 static int mtk_poll_tx(struct mtk_eth *eth, int budget)
2089 struct mtk_tx_ring *ring = ð->tx_ring;
2090 struct dim_sample dim_sample = {};
2091 unsigned int done[MTK_MAX_DEVS];
2092 unsigned int bytes[MTK_MAX_DEVS];
2095 memset(done, 0, sizeof(done));
2096 memset(bytes, 0, sizeof(bytes));
2098 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2099 budget = mtk_poll_tx_qdma(eth, budget, done, bytes);
2101 budget = mtk_poll_tx_pdma(eth, budget, done, bytes);
2103 for (i = 0; i < MTK_MAC_COUNT; i++) {
2104 if (!eth->netdev[i] || !done[i])
2106 netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
2108 eth->tx_packets += done[i];
2109 eth->tx_bytes += bytes[i];
2112 dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
2114 net_dim(ð->tx_dim, dim_sample);
2116 if (mtk_queue_stopped(eth) &&
2117 (atomic_read(&ring->free_count) > ring->thresh))
2118 mtk_wake_queue(eth);
2123 static void mtk_handle_status_irq(struct mtk_eth *eth)
2125 u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
2127 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
2128 mtk_stats_update(eth);
2129 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
2134 static int mtk_napi_tx(struct napi_struct *napi, int budget)
2136 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
2137 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2140 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2141 mtk_handle_status_irq(eth);
2142 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
2143 tx_done = mtk_poll_tx(eth, budget);
2145 if (unlikely(netif_msg_intr(eth))) {
2147 "done tx %d, intr 0x%08x/0x%x\n", tx_done,
2148 mtk_r32(eth, reg_map->tx_irq_status),
2149 mtk_r32(eth, reg_map->tx_irq_mask));
2152 if (tx_done == budget)
2155 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
2158 if (napi_complete_done(napi, tx_done))
2159 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2164 static int mtk_napi_rx(struct napi_struct *napi, int budget)
2166 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
2167 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2168 int rx_done_total = 0;
2170 mtk_handle_status_irq(eth);
2175 mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
2176 reg_map->pdma.irq_status);
2177 rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
2178 rx_done_total += rx_done;
2180 if (unlikely(netif_msg_intr(eth))) {
2182 "done rx %d, intr 0x%08x/0x%x\n", rx_done,
2183 mtk_r32(eth, reg_map->pdma.irq_status),
2184 mtk_r32(eth, reg_map->pdma.irq_mask));
2187 if (rx_done_total == budget)
2190 } while (mtk_r32(eth, reg_map->pdma.irq_status) &
2191 eth->soc->txrx.rx_irq_done_mask);
2193 if (napi_complete_done(napi, rx_done_total))
2194 mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
2196 return rx_done_total;
2199 static int mtk_tx_alloc(struct mtk_eth *eth)
2201 const struct mtk_soc_data *soc = eth->soc;
2202 struct mtk_tx_ring *ring = ð->tx_ring;
2203 int i, sz = soc->txrx.txd_size;
2204 struct mtk_tx_dma_v2 *txd;
2206 ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
2211 ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
2212 &ring->phys, GFP_KERNEL);
2216 for (i = 0; i < MTK_DMA_SIZE; i++) {
2217 int next = (i + 1) % MTK_DMA_SIZE;
2218 u32 next_ptr = ring->phys + next * sz;
2220 txd = ring->dma + i * sz;
2221 txd->txd2 = next_ptr;
2222 txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
2224 if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
2232 /* On MT7688 (PDMA only) this driver uses the ring->dma structs
2233 * only as the framework. The real HW descriptors are the PDMA
2234 * descriptors in ring->dma_pdma.
2236 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2237 ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
2238 &ring->phys_pdma, GFP_KERNEL);
2239 if (!ring->dma_pdma)
2242 for (i = 0; i < MTK_DMA_SIZE; i++) {
2243 ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
2244 ring->dma_pdma[i].txd4 = 0;
2248 ring->dma_size = MTK_DMA_SIZE;
2249 atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
2250 ring->next_free = ring->dma;
2251 ring->last_free = (void *)txd;
2252 ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
2253 ring->thresh = MAX_SKB_FRAGS;
2255 /* make sure that all changes to the dma ring are flushed before we
2260 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2261 mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
2262 mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
2264 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
2265 soc->reg_map->qdma.crx_ptr);
2266 mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
2267 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
2268 soc->reg_map->qdma.qtx_cfg);
2270 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
2271 mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
2272 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
2273 mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
2282 static void mtk_tx_clean(struct mtk_eth *eth)
2284 const struct mtk_soc_data *soc = eth->soc;
2285 struct mtk_tx_ring *ring = ð->tx_ring;
2289 for (i = 0; i < MTK_DMA_SIZE; i++)
2290 mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
2296 dma_free_coherent(eth->dma_dev,
2297 MTK_DMA_SIZE * soc->txrx.txd_size,
2298 ring->dma, ring->phys);
2302 if (ring->dma_pdma) {
2303 dma_free_coherent(eth->dma_dev,
2304 MTK_DMA_SIZE * soc->txrx.txd_size,
2305 ring->dma_pdma, ring->phys_pdma);
2306 ring->dma_pdma = NULL;
2310 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
2312 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2313 struct mtk_rx_ring *ring;
2314 int rx_data_len, rx_dma_size;
2317 if (rx_flag == MTK_RX_FLAGS_QDMA) {
2320 ring = ð->rx_ring_qdma;
2322 ring = ð->rx_ring[ring_no];
2325 if (rx_flag == MTK_RX_FLAGS_HWLRO) {
2326 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
2327 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
2329 rx_data_len = ETH_DATA_LEN;
2330 rx_dma_size = MTK_DMA_SIZE;
2333 ring->frag_size = mtk_max_frag_size(rx_data_len);
2334 ring->buf_size = mtk_max_buf_size(ring->frag_size);
2335 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
2340 if (mtk_page_pool_enabled(eth)) {
2341 struct page_pool *pp;
2343 pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
2348 ring->page_pool = pp;
2351 ring->dma = dma_alloc_coherent(eth->dma_dev,
2352 rx_dma_size * eth->soc->txrx.rxd_size,
2353 &ring->phys, GFP_KERNEL);
2357 for (i = 0; i < rx_dma_size; i++) {
2358 struct mtk_rx_dma_v2 *rxd;
2359 dma_addr_t dma_addr;
2362 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2363 if (ring->page_pool) {
2364 data = mtk_page_pool_get_buff(ring->page_pool,
2365 &dma_addr, GFP_KERNEL);
2369 if (ring->frag_size <= PAGE_SIZE)
2370 data = netdev_alloc_frag(ring->frag_size);
2372 data = mtk_max_lro_buf_alloc(GFP_KERNEL);
2377 dma_addr = dma_map_single(eth->dma_dev,
2378 data + NET_SKB_PAD + eth->ip_align,
2379 ring->buf_size, DMA_FROM_DEVICE);
2380 if (unlikely(dma_mapping_error(eth->dma_dev,
2384 rxd->rxd1 = (unsigned int)dma_addr;
2385 ring->data[i] = data;
2387 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2388 rxd->rxd2 = RX_DMA_LSO;
2390 rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2394 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2402 ring->dma_size = rx_dma_size;
2403 ring->calc_idx_update = false;
2404 ring->calc_idx = rx_dma_size - 1;
2405 if (rx_flag == MTK_RX_FLAGS_QDMA)
2406 ring->crx_idx_reg = reg_map->qdma.qcrx_ptr +
2407 ring_no * MTK_QRX_OFFSET;
2409 ring->crx_idx_reg = reg_map->pdma.pcrx_ptr +
2410 ring_no * MTK_QRX_OFFSET;
2411 /* make sure that all changes to the dma ring are flushed before we
2416 if (rx_flag == MTK_RX_FLAGS_QDMA) {
2417 mtk_w32(eth, ring->phys,
2418 reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2419 mtk_w32(eth, rx_dma_size,
2420 reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2421 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2422 reg_map->qdma.rst_idx);
2424 mtk_w32(eth, ring->phys,
2425 reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2426 mtk_w32(eth, rx_dma_size,
2427 reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2428 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2429 reg_map->pdma.rst_idx);
2431 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
2436 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
2440 if (ring->data && ring->dma) {
2441 for (i = 0; i < ring->dma_size; i++) {
2442 struct mtk_rx_dma *rxd;
2447 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2451 dma_unmap_single(eth->dma_dev, rxd->rxd1,
2452 ring->buf_size, DMA_FROM_DEVICE);
2453 mtk_rx_put_buff(ring, ring->data[i], false);
2460 dma_free_coherent(eth->dma_dev,
2461 ring->dma_size * eth->soc->txrx.rxd_size,
2462 ring->dma, ring->phys);
2466 if (ring->page_pool) {
2467 if (xdp_rxq_info_is_reg(&ring->xdp_q))
2468 xdp_rxq_info_unreg(&ring->xdp_q);
2469 page_pool_destroy(ring->page_pool);
2470 ring->page_pool = NULL;
2474 static int mtk_hwlro_rx_init(struct mtk_eth *eth)
2477 u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
2478 u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
2480 /* set LRO rings to auto-learn modes */
2481 ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
2483 /* validate LRO ring */
2484 ring_ctrl_dw2 |= MTK_RING_VLD;
2486 /* set AGE timer (unit: 20us) */
2487 ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
2488 ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
2490 /* set max AGG timer (unit: 20us) */
2491 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
2493 /* set max LRO AGG count */
2494 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
2495 ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
2497 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2498 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
2499 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
2500 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
2503 /* IPv4 checksum update enable */
2504 lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
2506 /* switch priority comparison to packet count mode */
2507 lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
2509 /* bandwidth threshold setting */
2510 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
2512 /* auto-learn score delta setting */
2513 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
2515 /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
2516 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
2517 MTK_PDMA_LRO_ALT_REFRESH_TIMER);
2519 /* set HW LRO mode & the max aggregation count for rx packets */
2520 lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
2522 /* the minimal remaining room of SDL0 in RXD for lro aggregation */
2523 lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
2526 lro_ctrl_dw0 |= MTK_LRO_EN;
2528 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
2529 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
2534 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
2539 /* relinquish lro rings, flush aggregated packets */
2540 mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
2542 /* wait for relinquishments done */
2543 for (i = 0; i < 10; i++) {
2544 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
2545 if (val & MTK_LRO_RING_RELINQUISH_DONE) {
2552 /* invalidate lro rings */
2553 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
2554 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
2556 /* disable HW LRO */
2557 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
2560 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
2564 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2566 /* invalidate the IP setting */
2567 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2569 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
2571 /* validate the IP setting */
2572 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2575 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
2579 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2581 /* invalidate the IP setting */
2582 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2584 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
2587 static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
2592 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2593 if (mac->hwlro_ip[i])
2600 static int mtk_hwlro_add_ipaddr(struct net_device *dev,
2601 struct ethtool_rxnfc *cmd)
2603 struct ethtool_rx_flow_spec *fsp =
2604 (struct ethtool_rx_flow_spec *)&cmd->fs;
2605 struct mtk_mac *mac = netdev_priv(dev);
2606 struct mtk_eth *eth = mac->hw;
2609 if ((fsp->flow_type != TCP_V4_FLOW) ||
2610 (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
2611 (fsp->location > 1))
2614 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
2615 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2617 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2619 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
2624 static int mtk_hwlro_del_ipaddr(struct net_device *dev,
2625 struct ethtool_rxnfc *cmd)
2627 struct ethtool_rx_flow_spec *fsp =
2628 (struct ethtool_rx_flow_spec *)&cmd->fs;
2629 struct mtk_mac *mac = netdev_priv(dev);
2630 struct mtk_eth *eth = mac->hw;
2633 if (fsp->location > 1)
2636 mac->hwlro_ip[fsp->location] = 0;
2637 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2639 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2641 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2646 static void mtk_hwlro_netdev_disable(struct net_device *dev)
2648 struct mtk_mac *mac = netdev_priv(dev);
2649 struct mtk_eth *eth = mac->hw;
2652 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2653 mac->hwlro_ip[i] = 0;
2654 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
2656 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2659 mac->hwlro_ip_cnt = 0;
2662 static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
2663 struct ethtool_rxnfc *cmd)
2665 struct mtk_mac *mac = netdev_priv(dev);
2666 struct ethtool_rx_flow_spec *fsp =
2667 (struct ethtool_rx_flow_spec *)&cmd->fs;
2669 if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
2672 /* only tcp dst ipv4 is meaningful, others are meaningless */
2673 fsp->flow_type = TCP_V4_FLOW;
2674 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
2675 fsp->m_u.tcp_ip4_spec.ip4dst = 0;
2677 fsp->h_u.tcp_ip4_spec.ip4src = 0;
2678 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
2679 fsp->h_u.tcp_ip4_spec.psrc = 0;
2680 fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
2681 fsp->h_u.tcp_ip4_spec.pdst = 0;
2682 fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
2683 fsp->h_u.tcp_ip4_spec.tos = 0;
2684 fsp->m_u.tcp_ip4_spec.tos = 0xff;
2689 static int mtk_hwlro_get_fdir_all(struct net_device *dev,
2690 struct ethtool_rxnfc *cmd,
2693 struct mtk_mac *mac = netdev_priv(dev);
2697 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2698 if (mac->hwlro_ip[i]) {
2704 cmd->rule_cnt = cnt;
2709 static netdev_features_t mtk_fix_features(struct net_device *dev,
2710 netdev_features_t features)
2712 if (!(features & NETIF_F_LRO)) {
2713 struct mtk_mac *mac = netdev_priv(dev);
2714 int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2717 netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
2719 features |= NETIF_F_LRO;
2726 static int mtk_set_features(struct net_device *dev, netdev_features_t features)
2730 if (!((dev->features ^ features) & NETIF_F_LRO))
2733 if (!(features & NETIF_F_LRO))
2734 mtk_hwlro_netdev_disable(dev);
2739 /* wait for DMA to finish whatever it is doing before we start using it again */
2740 static int mtk_dma_busy_wait(struct mtk_eth *eth)
2746 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2747 reg = eth->soc->reg_map->qdma.glo_cfg;
2749 reg = eth->soc->reg_map->pdma.glo_cfg;
2751 ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
2752 !(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
2753 5, MTK_DMA_BUSY_TIMEOUT_US);
2755 dev_err(eth->dev, "DMA init timeout\n");
2760 static int mtk_dma_init(struct mtk_eth *eth)
2765 if (mtk_dma_busy_wait(eth))
2768 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2769 /* QDMA needs scratch memory for internal reordering of the
2772 err = mtk_init_fq_dma(eth);
2777 err = mtk_tx_alloc(eth);
2781 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2782 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
2787 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
2792 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2793 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
2797 err = mtk_hwlro_rx_init(eth);
2802 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2803 /* Enable random early drop and set drop threshold
2806 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
2807 FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
2808 mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred);
2814 static void mtk_dma_free(struct mtk_eth *eth)
2816 const struct mtk_soc_data *soc = eth->soc;
2819 for (i = 0; i < MTK_MAC_COUNT; i++)
2821 netdev_reset_queue(eth->netdev[i]);
2822 if (eth->scratch_ring) {
2823 dma_free_coherent(eth->dma_dev,
2824 MTK_DMA_SIZE * soc->txrx.txd_size,
2825 eth->scratch_ring, eth->phy_scratch_ring);
2826 eth->scratch_ring = NULL;
2827 eth->phy_scratch_ring = 0;
2830 mtk_rx_clean(eth, ð->rx_ring[0]);
2831 mtk_rx_clean(eth, ð->rx_ring_qdma);
2834 mtk_hwlro_rx_uninit(eth);
2835 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
2836 mtk_rx_clean(eth, ð->rx_ring[i]);
2839 kfree(eth->scratch_head);
2842 static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
2844 struct mtk_mac *mac = netdev_priv(dev);
2845 struct mtk_eth *eth = mac->hw;
2847 eth->netdev[mac->id]->stats.tx_errors++;
2848 netif_err(eth, tx_err, dev,
2849 "transmit timed out\n");
2850 schedule_work(ð->pending_work);
2853 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
2855 struct mtk_eth *eth = _eth;
2858 if (likely(napi_schedule_prep(ð->rx_napi))) {
2859 __napi_schedule(ð->rx_napi);
2860 mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
2866 static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
2868 struct mtk_eth *eth = _eth;
2871 if (likely(napi_schedule_prep(ð->tx_napi))) {
2872 __napi_schedule(ð->tx_napi);
2873 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
2879 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
2881 struct mtk_eth *eth = _eth;
2882 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2884 if (mtk_r32(eth, reg_map->pdma.irq_mask) &
2885 eth->soc->txrx.rx_irq_done_mask) {
2886 if (mtk_r32(eth, reg_map->pdma.irq_status) &
2887 eth->soc->txrx.rx_irq_done_mask)
2888 mtk_handle_irq_rx(irq, _eth);
2890 if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
2891 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
2892 mtk_handle_irq_tx(irq, _eth);
2898 #ifdef CONFIG_NET_POLL_CONTROLLER
2899 static void mtk_poll_controller(struct net_device *dev)
2901 struct mtk_mac *mac = netdev_priv(dev);
2902 struct mtk_eth *eth = mac->hw;
2904 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
2905 mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
2906 mtk_handle_irq_rx(eth->irq[2], dev);
2907 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2908 mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
2912 static int mtk_start_dma(struct mtk_eth *eth)
2914 u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
2915 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2918 err = mtk_dma_init(eth);
2924 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2925 val = mtk_r32(eth, reg_map->qdma.glo_cfg);
2926 val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN |
2927 MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
2928 MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
2930 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
2931 val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
2932 MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
2935 val |= MTK_RX_BT_32DWORDS;
2936 mtk_w32(eth, val, reg_map->qdma.glo_cfg);
2939 MTK_RX_DMA_EN | rx_2b_offset |
2940 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
2941 reg_map->pdma.glo_cfg);
2943 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
2944 MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
2945 reg_map->pdma.glo_cfg);
2951 static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
2955 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2958 for (i = 0; i < MTK_MAC_COUNT; i++) {
2959 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
2961 /* default setup the forward port to send frame to PDMA */
2964 /* Enable RX checksum */
2965 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
2969 if (!i && eth->netdev[0] && netdev_uses_dsa(eth->netdev[0]))
2970 val |= MTK_GDMA_SPECIAL_TAG;
2972 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
2974 /* Reset and enable PSE */
2975 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
2976 mtk_w32(eth, 0, MTK_RST_GL);
2979 static int mtk_open(struct net_device *dev)
2981 struct mtk_mac *mac = netdev_priv(dev);
2982 struct mtk_eth *eth = mac->hw;
2985 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
2987 netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
2992 /* we run 2 netdevs on the same dma ring so we only bring it up once */
2993 if (!refcount_read(ð->dma_refcnt)) {
2994 const struct mtk_soc_data *soc = eth->soc;
2998 err = mtk_start_dma(eth);
3002 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3003 mtk_ppe_start(eth->ppe[i]);
3005 gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe
3007 mtk_gdm_config(eth, gdm_config);
3009 napi_enable(ð->tx_napi);
3010 napi_enable(ð->rx_napi);
3011 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3012 mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
3013 refcount_set(ð->dma_refcnt, 1);
3016 refcount_inc(ð->dma_refcnt);
3018 phylink_start(mac->phylink);
3019 netif_start_queue(dev);
3023 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
3028 /* stop the dma engine */
3029 spin_lock_bh(ð->page_lock);
3030 val = mtk_r32(eth, glo_cfg);
3031 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
3033 spin_unlock_bh(ð->page_lock);
3035 /* wait for dma stop */
3036 for (i = 0; i < 10; i++) {
3037 val = mtk_r32(eth, glo_cfg);
3038 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
3046 static int mtk_stop(struct net_device *dev)
3048 struct mtk_mac *mac = netdev_priv(dev);
3049 struct mtk_eth *eth = mac->hw;
3052 phylink_stop(mac->phylink);
3054 netif_tx_disable(dev);
3056 phylink_disconnect_phy(mac->phylink);
3058 /* only shutdown DMA if this is the last user */
3059 if (!refcount_dec_and_test(ð->dma_refcnt))
3062 mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
3064 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3065 mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
3066 napi_disable(ð->tx_napi);
3067 napi_disable(ð->rx_napi);
3069 cancel_work_sync(ð->rx_dim.work);
3070 cancel_work_sync(ð->tx_dim.work);
3072 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3073 mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
3074 mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
3078 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3079 mtk_ppe_stop(eth->ppe[i]);
3084 static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
3085 struct netlink_ext_ack *extack)
3087 struct mtk_mac *mac = netdev_priv(dev);
3088 struct mtk_eth *eth = mac->hw;
3089 struct bpf_prog *old_prog;
3093 NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO");
3097 if (dev->mtu > MTK_PP_MAX_BUF_SIZE) {
3098 NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
3102 need_update = !!eth->prog != !!prog;
3103 if (netif_running(dev) && need_update)
3106 old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
3108 bpf_prog_put(old_prog);
3110 if (netif_running(dev) && need_update)
3111 return mtk_open(dev);
3116 static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3118 switch (xdp->command) {
3119 case XDP_SETUP_PROG:
3120 return mtk_xdp_setup(dev, xdp->prog, xdp->extack);
3126 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
3128 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3132 usleep_range(1000, 1100);
3133 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3139 static void mtk_clk_disable(struct mtk_eth *eth)
3143 for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
3144 clk_disable_unprepare(eth->clks[clk]);
3147 static int mtk_clk_enable(struct mtk_eth *eth)
3151 for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
3152 ret = clk_prepare_enable(eth->clks[clk]);
3154 goto err_disable_clks;
3161 clk_disable_unprepare(eth->clks[clk]);
3166 static void mtk_dim_rx(struct work_struct *work)
3168 struct dim *dim = container_of(work, struct dim, work);
3169 struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
3170 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3171 struct dim_cq_moder cur_profile;
3174 cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode,
3176 spin_lock_bh(ð->dim_lock);
3178 val = mtk_r32(eth, reg_map->pdma.delay_irq);
3179 val &= MTK_PDMA_DELAY_TX_MASK;
3180 val |= MTK_PDMA_DELAY_RX_EN;
3182 cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3183 val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT;
3185 cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3186 val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
3188 mtk_w32(eth, val, reg_map->pdma.delay_irq);
3189 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3190 mtk_w32(eth, val, reg_map->qdma.delay_irq);
3192 spin_unlock_bh(ð->dim_lock);
3194 dim->state = DIM_START_MEASURE;
3197 static void mtk_dim_tx(struct work_struct *work)
3199 struct dim *dim = container_of(work, struct dim, work);
3200 struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
3201 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3202 struct dim_cq_moder cur_profile;
3205 cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode,
3207 spin_lock_bh(ð->dim_lock);
3209 val = mtk_r32(eth, reg_map->pdma.delay_irq);
3210 val &= MTK_PDMA_DELAY_RX_MASK;
3211 val |= MTK_PDMA_DELAY_TX_EN;
3213 cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3214 val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT;
3216 cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3217 val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
3219 mtk_w32(eth, val, reg_map->pdma.delay_irq);
3220 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3221 mtk_w32(eth, val, reg_map->qdma.delay_irq);
3223 spin_unlock_bh(ð->dim_lock);
3225 dim->state = DIM_START_MEASURE;
3228 static int mtk_hw_init(struct mtk_eth *eth)
3230 u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
3231 ETHSYS_DMA_AG_MAP_PPE;
3232 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3235 if (test_and_set_bit(MTK_HW_INIT, ð->state))
3238 pm_runtime_enable(eth->dev);
3239 pm_runtime_get_sync(eth->dev);
3241 ret = mtk_clk_enable(eth);
3243 goto err_disable_pm;
3246 regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
3247 of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
3249 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3250 ret = device_reset(eth->dev);
3252 dev_err(eth->dev, "MAC reset failed!\n");
3253 goto err_disable_pm;
3256 /* set interrupt delays based on current Net DIM sample */
3257 mtk_dim_rx(ð->rx_dim.work);
3258 mtk_dim_tx(ð->tx_dim.work);
3260 /* disable delay and normal interrupt */
3261 mtk_tx_irq_disable(eth, ~0);
3262 mtk_rx_irq_disable(eth, ~0);
3267 val = RSTCTRL_FE | RSTCTRL_PPE;
3268 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
3269 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
3272 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3273 val |= RSTCTRL_PPE1;
3276 ethsys_reset(eth, val);
3278 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
3279 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
3282 /* Set FE to PDMAv2 if necessary */
3283 val = mtk_r32(eth, MTK_FE_GLO_MISC);
3284 mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
3288 /* Set GE2 driving and slew rate */
3289 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
3292 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
3295 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
3298 /* Set linkdown as the default for each GMAC. Its own MCR would be set
3299 * up with the more appropriate value when mtk_mac_config call is being
3302 for (i = 0; i < MTK_MAC_COUNT; i++)
3303 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
3305 /* Indicates CDM to parse the MTK special tag from CPU
3306 * which also is working out for untag packets.
3308 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
3309 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
3311 /* Enable RX VLan Offloading */
3312 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
3314 /* set interrupt delays based on current Net DIM sample */
3315 mtk_dim_rx(ð->rx_dim.work);
3316 mtk_dim_tx(ð->tx_dim.work);
3318 /* disable delay and normal interrupt */
3319 mtk_tx_irq_disable(eth, ~0);
3320 mtk_rx_irq_disable(eth, ~0);
3322 /* FE int grouping */
3323 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
3324 mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
3325 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
3326 mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
3327 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
3329 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
3330 /* PSE should not drop port8 and port9 packets */
3331 mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
3333 /* PSE Free Queue Flow Control */
3334 mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
3336 /* PSE config input queue threshold */
3337 mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
3338 mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
3339 mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
3340 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
3341 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
3342 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
3343 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
3344 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
3346 /* PSE config output queue threshold */
3347 mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
3348 mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
3349 mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
3350 mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
3351 mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
3352 mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
3353 mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
3354 mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
3356 /* GDM and CDM Threshold */
3357 mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
3358 mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
3359 mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
3360 mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
3361 mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
3362 mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
3368 pm_runtime_put_sync(eth->dev);
3369 pm_runtime_disable(eth->dev);
3374 static int mtk_hw_deinit(struct mtk_eth *eth)
3376 if (!test_and_clear_bit(MTK_HW_INIT, ð->state))
3379 mtk_clk_disable(eth);
3381 pm_runtime_put_sync(eth->dev);
3382 pm_runtime_disable(eth->dev);
3387 static int __init mtk_init(struct net_device *dev)
3389 struct mtk_mac *mac = netdev_priv(dev);
3390 struct mtk_eth *eth = mac->hw;
3393 ret = of_get_ethdev_address(mac->of_node, dev);
3395 /* If the mac address is invalid, use random mac address */
3396 eth_hw_addr_random(dev);
3397 dev_err(eth->dev, "generated random MAC address %pM\n",
3404 static void mtk_uninit(struct net_device *dev)
3406 struct mtk_mac *mac = netdev_priv(dev);
3407 struct mtk_eth *eth = mac->hw;
3409 phylink_disconnect_phy(mac->phylink);
3410 mtk_tx_irq_disable(eth, ~0);
3411 mtk_rx_irq_disable(eth, ~0);
3414 static int mtk_change_mtu(struct net_device *dev, int new_mtu)
3416 int length = new_mtu + MTK_RX_ETH_HLEN;
3417 struct mtk_mac *mac = netdev_priv(dev);
3418 struct mtk_eth *eth = mac->hw;
3419 u32 mcr_cur, mcr_new;
3421 if (rcu_access_pointer(eth->prog) &&
3422 length > MTK_PP_MAX_BUF_SIZE) {
3423 netdev_err(dev, "Invalid MTU for XDP mode\n");
3427 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3428 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
3429 mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
3432 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
3433 else if (length <= 1536)
3434 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
3435 else if (length <= 1552)
3436 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
3438 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
3440 if (mcr_new != mcr_cur)
3441 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
3449 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3451 struct mtk_mac *mac = netdev_priv(dev);
3457 return phylink_mii_ioctl(mac->phylink, ifr, cmd);
3465 static void mtk_pending_work(struct work_struct *work)
3467 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
3469 unsigned long restart = 0;
3473 dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
3475 while (test_and_set_bit_lock(MTK_RESETTING, ð->state))
3478 dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
3479 /* stop all devices to make sure that dma is properly shut down */
3480 for (i = 0; i < MTK_MAC_COUNT; i++) {
3481 if (!eth->netdev[i])
3483 mtk_stop(eth->netdev[i]);
3484 __set_bit(i, &restart);
3486 dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
3488 /* restart underlying hardware such as power, clock, pin mux
3489 * and the connected phy
3494 pinctrl_select_state(eth->dev->pins->p,
3495 eth->dev->pins->default_state);
3498 /* restart DMA and enable IRQs */
3499 for (i = 0; i < MTK_MAC_COUNT; i++) {
3500 if (!test_bit(i, &restart))
3502 err = mtk_open(eth->netdev[i]);
3504 netif_alert(eth, ifup, eth->netdev[i],
3505 "Driver up/down cycle failed, closing device.\n");
3506 dev_close(eth->netdev[i]);
3510 dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
3512 clear_bit_unlock(MTK_RESETTING, ð->state);
3517 static int mtk_free_dev(struct mtk_eth *eth)
3521 for (i = 0; i < MTK_MAC_COUNT; i++) {
3522 if (!eth->netdev[i])
3524 free_netdev(eth->netdev[i]);
3530 static int mtk_unreg_dev(struct mtk_eth *eth)
3534 for (i = 0; i < MTK_MAC_COUNT; i++) {
3535 if (!eth->netdev[i])
3537 unregister_netdev(eth->netdev[i]);
3543 static int mtk_cleanup(struct mtk_eth *eth)
3547 cancel_work_sync(ð->pending_work);
3552 static int mtk_get_link_ksettings(struct net_device *ndev,
3553 struct ethtool_link_ksettings *cmd)
3555 struct mtk_mac *mac = netdev_priv(ndev);
3557 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3560 return phylink_ethtool_ksettings_get(mac->phylink, cmd);
3563 static int mtk_set_link_ksettings(struct net_device *ndev,
3564 const struct ethtool_link_ksettings *cmd)
3566 struct mtk_mac *mac = netdev_priv(ndev);
3568 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3571 return phylink_ethtool_ksettings_set(mac->phylink, cmd);
3574 static void mtk_get_drvinfo(struct net_device *dev,
3575 struct ethtool_drvinfo *info)
3577 struct mtk_mac *mac = netdev_priv(dev);
3579 strscpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
3580 strscpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
3581 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
3584 static u32 mtk_get_msglevel(struct net_device *dev)
3586 struct mtk_mac *mac = netdev_priv(dev);
3588 return mac->hw->msg_enable;
3591 static void mtk_set_msglevel(struct net_device *dev, u32 value)
3593 struct mtk_mac *mac = netdev_priv(dev);
3595 mac->hw->msg_enable = value;
3598 static int mtk_nway_reset(struct net_device *dev)
3600 struct mtk_mac *mac = netdev_priv(dev);
3602 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3608 return phylink_ethtool_nway_reset(mac->phylink);
3611 static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3615 switch (stringset) {
3616 case ETH_SS_STATS: {
3617 struct mtk_mac *mac = netdev_priv(dev);
3619 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
3620 memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
3621 data += ETH_GSTRING_LEN;
3623 if (mtk_page_pool_enabled(mac->hw))
3624 page_pool_ethtool_stats_get_strings(data);
3632 static int mtk_get_sset_count(struct net_device *dev, int sset)
3635 case ETH_SS_STATS: {
3636 int count = ARRAY_SIZE(mtk_ethtool_stats);
3637 struct mtk_mac *mac = netdev_priv(dev);
3639 if (mtk_page_pool_enabled(mac->hw))
3640 count += page_pool_ethtool_stats_get_count();
3648 static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data)
3650 struct page_pool_stats stats = {};
3653 for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) {
3654 struct mtk_rx_ring *ring = ð->rx_ring[i];
3656 if (!ring->page_pool)
3659 page_pool_get_stats(ring->page_pool, &stats);
3661 page_pool_ethtool_stats_get(data, &stats);
3664 static void mtk_get_ethtool_stats(struct net_device *dev,
3665 struct ethtool_stats *stats, u64 *data)
3667 struct mtk_mac *mac = netdev_priv(dev);
3668 struct mtk_hw_stats *hwstats = mac->hw_stats;
3669 u64 *data_src, *data_dst;
3673 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3676 if (netif_running(dev) && netif_device_present(dev)) {
3677 if (spin_trylock_bh(&hwstats->stats_lock)) {
3678 mtk_stats_update_mac(mac);
3679 spin_unlock_bh(&hwstats->stats_lock);
3683 data_src = (u64 *)hwstats;
3687 start = u64_stats_fetch_begin_irq(&hwstats->syncp);
3689 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
3690 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
3691 if (mtk_page_pool_enabled(mac->hw))
3692 mtk_ethtool_pp_stats(mac->hw, data_dst);
3693 } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
3696 static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
3699 int ret = -EOPNOTSUPP;
3702 case ETHTOOL_GRXRINGS:
3703 if (dev->hw_features & NETIF_F_LRO) {
3704 cmd->data = MTK_MAX_RX_RING_NUM;
3708 case ETHTOOL_GRXCLSRLCNT:
3709 if (dev->hw_features & NETIF_F_LRO) {
3710 struct mtk_mac *mac = netdev_priv(dev);
3712 cmd->rule_cnt = mac->hwlro_ip_cnt;
3716 case ETHTOOL_GRXCLSRULE:
3717 if (dev->hw_features & NETIF_F_LRO)
3718 ret = mtk_hwlro_get_fdir_entry(dev, cmd);
3720 case ETHTOOL_GRXCLSRLALL:
3721 if (dev->hw_features & NETIF_F_LRO)
3722 ret = mtk_hwlro_get_fdir_all(dev, cmd,
3732 static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
3734 int ret = -EOPNOTSUPP;
3737 case ETHTOOL_SRXCLSRLINS:
3738 if (dev->hw_features & NETIF_F_LRO)
3739 ret = mtk_hwlro_add_ipaddr(dev, cmd);
3741 case ETHTOOL_SRXCLSRLDEL:
3742 if (dev->hw_features & NETIF_F_LRO)
3743 ret = mtk_hwlro_del_ipaddr(dev, cmd);
3752 static const struct ethtool_ops mtk_ethtool_ops = {
3753 .get_link_ksettings = mtk_get_link_ksettings,
3754 .set_link_ksettings = mtk_set_link_ksettings,
3755 .get_drvinfo = mtk_get_drvinfo,
3756 .get_msglevel = mtk_get_msglevel,
3757 .set_msglevel = mtk_set_msglevel,
3758 .nway_reset = mtk_nway_reset,
3759 .get_link = ethtool_op_get_link,
3760 .get_strings = mtk_get_strings,
3761 .get_sset_count = mtk_get_sset_count,
3762 .get_ethtool_stats = mtk_get_ethtool_stats,
3763 .get_rxnfc = mtk_get_rxnfc,
3764 .set_rxnfc = mtk_set_rxnfc,
3767 static const struct net_device_ops mtk_netdev_ops = {
3768 .ndo_init = mtk_init,
3769 .ndo_uninit = mtk_uninit,
3770 .ndo_open = mtk_open,
3771 .ndo_stop = mtk_stop,
3772 .ndo_start_xmit = mtk_start_xmit,
3773 .ndo_set_mac_address = mtk_set_mac_address,
3774 .ndo_validate_addr = eth_validate_addr,
3775 .ndo_eth_ioctl = mtk_do_ioctl,
3776 .ndo_change_mtu = mtk_change_mtu,
3777 .ndo_tx_timeout = mtk_tx_timeout,
3778 .ndo_get_stats64 = mtk_get_stats64,
3779 .ndo_fix_features = mtk_fix_features,
3780 .ndo_set_features = mtk_set_features,
3781 #ifdef CONFIG_NET_POLL_CONTROLLER
3782 .ndo_poll_controller = mtk_poll_controller,
3784 .ndo_setup_tc = mtk_eth_setup_tc,
3786 .ndo_xdp_xmit = mtk_xdp_xmit,
3789 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
3791 const __be32 *_id = of_get_property(np, "reg", NULL);
3792 phy_interface_t phy_mode;
3793 struct phylink *phylink;
3794 struct mtk_mac *mac;
3798 dev_err(eth->dev, "missing mac id\n");
3802 id = be32_to_cpup(_id);
3803 if (id >= MTK_MAC_COUNT) {
3804 dev_err(eth->dev, "%d is not a valid mac id\n", id);
3808 if (eth->netdev[id]) {
3809 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
3813 eth->netdev[id] = alloc_etherdev(sizeof(*mac));
3814 if (!eth->netdev[id]) {
3815 dev_err(eth->dev, "alloc_etherdev failed\n");
3818 mac = netdev_priv(eth->netdev[id]);
3824 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
3825 mac->hwlro_ip_cnt = 0;
3827 mac->hw_stats = devm_kzalloc(eth->dev,
3828 sizeof(*mac->hw_stats),
3830 if (!mac->hw_stats) {
3831 dev_err(eth->dev, "failed to allocate counter memory\n");
3835 spin_lock_init(&mac->hw_stats->stats_lock);
3836 u64_stats_init(&mac->hw_stats->syncp);
3837 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
3839 /* phylink create */
3840 err = of_get_phy_mode(np, &phy_mode);
3842 dev_err(eth->dev, "incorrect phy-mode\n");
3846 /* mac config is not set */
3847 mac->interface = PHY_INTERFACE_MODE_NA;
3848 mac->speed = SPEED_UNKNOWN;
3850 mac->phylink_config.dev = ð->netdev[id]->dev;
3851 mac->phylink_config.type = PHYLINK_NETDEV;
3852 /* This driver makes use of state->speed in mac_config */
3853 mac->phylink_config.legacy_pre_march2020 = true;
3854 mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
3855 MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
3857 __set_bit(PHY_INTERFACE_MODE_MII,
3858 mac->phylink_config.supported_interfaces);
3859 __set_bit(PHY_INTERFACE_MODE_GMII,
3860 mac->phylink_config.supported_interfaces);
3862 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
3863 phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
3865 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id)
3866 __set_bit(PHY_INTERFACE_MODE_TRGMII,
3867 mac->phylink_config.supported_interfaces);
3869 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
3870 __set_bit(PHY_INTERFACE_MODE_SGMII,
3871 mac->phylink_config.supported_interfaces);
3872 __set_bit(PHY_INTERFACE_MODE_1000BASEX,
3873 mac->phylink_config.supported_interfaces);
3874 __set_bit(PHY_INTERFACE_MODE_2500BASEX,
3875 mac->phylink_config.supported_interfaces);
3878 phylink = phylink_create(&mac->phylink_config,
3879 of_fwnode_handle(mac->of_node),
3880 phy_mode, &mtk_phylink_ops);
3881 if (IS_ERR(phylink)) {
3882 err = PTR_ERR(phylink);
3886 mac->phylink = phylink;
3888 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
3889 eth->netdev[id]->watchdog_timeo = 5 * HZ;
3890 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
3891 eth->netdev[id]->base_addr = (unsigned long)eth->base;
3893 eth->netdev[id]->hw_features = eth->soc->hw_features;
3895 eth->netdev[id]->hw_features |= NETIF_F_LRO;
3897 eth->netdev[id]->vlan_features = eth->soc->hw_features &
3898 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
3899 eth->netdev[id]->features |= eth->soc->hw_features;
3900 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
3902 eth->netdev[id]->irq = eth->irq[0];
3903 eth->netdev[id]->dev.of_node = np;
3905 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3906 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
3908 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
3913 free_netdev(eth->netdev[id]);
3917 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
3919 struct net_device *dev, *tmp;
3920 LIST_HEAD(dev_list);
3925 for (i = 0; i < MTK_MAC_COUNT; i++) {
3926 dev = eth->netdev[i];
3928 if (!dev || !(dev->flags & IFF_UP))
3931 list_add_tail(&dev->close_list, &dev_list);
3934 dev_close_many(&dev_list, false);
3936 eth->dma_dev = dma_dev;
3938 list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
3939 list_del_init(&dev->close_list);
3940 dev_open(dev, NULL);
3946 static int mtk_probe(struct platform_device *pdev)
3948 struct resource *res = NULL;
3949 struct device_node *mac_np;
3950 struct mtk_eth *eth;
3953 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
3957 eth->soc = of_device_get_match_data(&pdev->dev);
3959 eth->dev = &pdev->dev;
3960 eth->dma_dev = &pdev->dev;
3961 eth->base = devm_platform_ioremap_resource(pdev, 0);
3962 if (IS_ERR(eth->base))
3963 return PTR_ERR(eth->base);
3965 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3966 eth->ip_align = NET_IP_ALIGN;
3968 spin_lock_init(ð->page_lock);
3969 spin_lock_init(ð->tx_irq_lock);
3970 spin_lock_init(ð->rx_irq_lock);
3971 spin_lock_init(ð->dim_lock);
3973 eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
3974 INIT_WORK(ð->rx_dim.work, mtk_dim_rx);
3976 eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
3977 INIT_WORK(ð->tx_dim.work, mtk_dim_tx);
3979 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3980 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3982 if (IS_ERR(eth->ethsys)) {
3983 dev_err(&pdev->dev, "no ethsys regmap found\n");
3984 return PTR_ERR(eth->ethsys);
3988 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
3989 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3990 "mediatek,infracfg");
3991 if (IS_ERR(eth->infra)) {
3992 dev_err(&pdev->dev, "no infracfg regmap found\n");
3993 return PTR_ERR(eth->infra);
3997 if (of_dma_is_coherent(pdev->dev.of_node)) {
4000 cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4001 "cci-control-port");
4002 /* enable CPU/bus coherency */
4004 regmap_write(cci, 0, 3);
4007 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
4008 eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
4013 err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
4014 eth->soc->ana_rgc3);
4020 if (eth->soc->required_pctl) {
4021 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4023 if (IS_ERR(eth->pctl)) {
4024 dev_err(&pdev->dev, "no pctl regmap found\n");
4025 return PTR_ERR(eth->pctl);
4029 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
4030 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4035 if (eth->soc->offload_version) {
4037 struct device_node *np;
4038 phys_addr_t wdma_phy;
4041 if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
4044 np = of_parse_phandle(pdev->dev.of_node,
4049 wdma_base = eth->soc->reg_map->wdma_base[i];
4050 wdma_phy = res ? res->start + wdma_base : 0;
4051 mtk_wed_add_hw(np, eth, eth->base + wdma_base,
4056 for (i = 0; i < 3; i++) {
4057 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
4058 eth->irq[i] = eth->irq[0];
4060 eth->irq[i] = platform_get_irq(pdev, i);
4061 if (eth->irq[i] < 0) {
4062 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
4067 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
4068 eth->clks[i] = devm_clk_get(eth->dev,
4069 mtk_clks_source_name[i]);
4070 if (IS_ERR(eth->clks[i])) {
4071 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) {
4072 err = -EPROBE_DEFER;
4075 if (eth->soc->required_clks & BIT(i)) {
4076 dev_err(&pdev->dev, "clock %s not found\n",
4077 mtk_clks_source_name[i]);
4081 eth->clks[i] = NULL;
4085 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
4086 INIT_WORK(ð->pending_work, mtk_pending_work);
4088 err = mtk_hw_init(eth);
4092 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
4094 for_each_child_of_node(pdev->dev.of_node, mac_np) {
4095 if (!of_device_is_compatible(mac_np,
4096 "mediatek,eth-mac"))
4099 if (!of_device_is_available(mac_np))
4102 err = mtk_add_mac(eth, mac_np);
4104 of_node_put(mac_np);
4109 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
4110 err = devm_request_irq(eth->dev, eth->irq[0],
4112 dev_name(eth->dev), eth);
4114 err = devm_request_irq(eth->dev, eth->irq[1],
4115 mtk_handle_irq_tx, 0,
4116 dev_name(eth->dev), eth);
4120 err = devm_request_irq(eth->dev, eth->irq[2],
4121 mtk_handle_irq_rx, 0,
4122 dev_name(eth->dev), eth);
4127 /* No MT7628/88 support yet */
4128 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4129 err = mtk_mdio_init(eth);
4134 if (eth->soc->offload_version) {
4137 num_ppe = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
4138 num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
4139 for (i = 0; i < num_ppe; i++) {
4140 u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
4142 eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr,
4143 eth->soc->offload_version, i);
4150 err = mtk_eth_offload_init(eth);
4155 for (i = 0; i < MTK_MAX_DEVS; i++) {
4156 if (!eth->netdev[i])
4159 err = register_netdev(eth->netdev[i]);
4161 dev_err(eth->dev, "error bringing up device\n");
4162 goto err_deinit_mdio;
4164 netif_info(eth, probe, eth->netdev[i],
4165 "mediatek frame engine at 0x%08lx, irq %d\n",
4166 eth->netdev[i]->base_addr, eth->irq[0]);
4169 /* we run 2 devices on the same DMA ring so we need a dummy device
4172 init_dummy_netdev(ð->dummy_dev);
4173 netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx);
4174 netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx);
4176 platform_set_drvdata(pdev, eth);
4181 mtk_mdio_cleanup(eth);
4192 static int mtk_remove(struct platform_device *pdev)
4194 struct mtk_eth *eth = platform_get_drvdata(pdev);
4195 struct mtk_mac *mac;
4198 /* stop all devices to make sure that dma is properly shut down */
4199 for (i = 0; i < MTK_MAC_COUNT; i++) {
4200 if (!eth->netdev[i])
4202 mtk_stop(eth->netdev[i]);
4203 mac = netdev_priv(eth->netdev[i]);
4204 phylink_disconnect_phy(mac->phylink);
4210 netif_napi_del(ð->tx_napi);
4211 netif_napi_del(ð->rx_napi);
4213 mtk_mdio_cleanup(eth);
4218 static const struct mtk_soc_data mt2701_data = {
4219 .reg_map = &mtk_reg_map,
4220 .caps = MT7623_CAPS | MTK_HWLRO,
4221 .hw_features = MTK_HW_FEATURES,
4222 .required_clks = MT7623_CLKS_BITMAP,
4223 .required_pctl = true,
4225 .txd_size = sizeof(struct mtk_tx_dma),
4226 .rxd_size = sizeof(struct mtk_rx_dma),
4227 .rx_irq_done_mask = MTK_RX_DONE_INT,
4228 .rx_dma_l4_valid = RX_DMA_L4_VALID,
4229 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4230 .dma_len_offset = 16,
4234 static const struct mtk_soc_data mt7621_data = {
4235 .reg_map = &mtk_reg_map,
4236 .caps = MT7621_CAPS,
4237 .hw_features = MTK_HW_FEATURES,
4238 .required_clks = MT7621_CLKS_BITMAP,
4239 .required_pctl = false,
4240 .offload_version = 2,
4242 .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
4244 .txd_size = sizeof(struct mtk_tx_dma),
4245 .rxd_size = sizeof(struct mtk_rx_dma),
4246 .rx_irq_done_mask = MTK_RX_DONE_INT,
4247 .rx_dma_l4_valid = RX_DMA_L4_VALID,
4248 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4249 .dma_len_offset = 16,
4253 static const struct mtk_soc_data mt7622_data = {
4254 .reg_map = &mtk_reg_map,
4256 .caps = MT7622_CAPS | MTK_HWLRO,
4257 .hw_features = MTK_HW_FEATURES,
4258 .required_clks = MT7622_CLKS_BITMAP,
4259 .required_pctl = false,
4260 .offload_version = 2,
4262 .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
4264 .txd_size = sizeof(struct mtk_tx_dma),
4265 .rxd_size = sizeof(struct mtk_rx_dma),
4266 .rx_irq_done_mask = MTK_RX_DONE_INT,
4267 .rx_dma_l4_valid = RX_DMA_L4_VALID,
4268 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4269 .dma_len_offset = 16,
4273 static const struct mtk_soc_data mt7623_data = {
4274 .reg_map = &mtk_reg_map,
4275 .caps = MT7623_CAPS | MTK_HWLRO,
4276 .hw_features = MTK_HW_FEATURES,
4277 .required_clks = MT7623_CLKS_BITMAP,
4278 .required_pctl = true,
4279 .offload_version = 2,
4281 .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
4283 .txd_size = sizeof(struct mtk_tx_dma),
4284 .rxd_size = sizeof(struct mtk_rx_dma),
4285 .rx_irq_done_mask = MTK_RX_DONE_INT,
4286 .rx_dma_l4_valid = RX_DMA_L4_VALID,
4287 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4288 .dma_len_offset = 16,
4292 static const struct mtk_soc_data mt7629_data = {
4293 .reg_map = &mtk_reg_map,
4295 .caps = MT7629_CAPS | MTK_HWLRO,
4296 .hw_features = MTK_HW_FEATURES,
4297 .required_clks = MT7629_CLKS_BITMAP,
4298 .required_pctl = false,
4300 .txd_size = sizeof(struct mtk_tx_dma),
4301 .rxd_size = sizeof(struct mtk_rx_dma),
4302 .rx_irq_done_mask = MTK_RX_DONE_INT,
4303 .rx_dma_l4_valid = RX_DMA_L4_VALID,
4304 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4305 .dma_len_offset = 16,
4309 static const struct mtk_soc_data mt7986_data = {
4310 .reg_map = &mt7986_reg_map,
4312 .caps = MT7986_CAPS,
4313 .hw_features = MTK_HW_FEATURES,
4314 .required_clks = MT7986_CLKS_BITMAP,
4315 .required_pctl = false,
4317 .foe_entry_size = sizeof(struct mtk_foe_entry),
4319 .txd_size = sizeof(struct mtk_tx_dma_v2),
4320 .rxd_size = sizeof(struct mtk_rx_dma_v2),
4321 .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
4322 .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
4323 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
4324 .dma_len_offset = 8,
4328 static const struct mtk_soc_data rt5350_data = {
4329 .reg_map = &mt7628_reg_map,
4330 .caps = MT7628_CAPS,
4331 .hw_features = MTK_HW_FEATURES_MT7628,
4332 .required_clks = MT7628_CLKS_BITMAP,
4333 .required_pctl = false,
4335 .txd_size = sizeof(struct mtk_tx_dma),
4336 .rxd_size = sizeof(struct mtk_rx_dma),
4337 .rx_irq_done_mask = MTK_RX_DONE_INT,
4338 .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
4339 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4340 .dma_len_offset = 16,
4344 const struct of_device_id of_mtk_match[] = {
4345 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
4346 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
4347 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
4348 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
4349 { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
4350 { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
4351 { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
4354 MODULE_DEVICE_TABLE(of, of_mtk_match);
4356 static struct platform_driver mtk_driver = {
4358 .remove = mtk_remove,
4360 .name = "mtk_soc_eth",
4361 .of_match_table = of_mtk_match,
4365 module_platform_driver(mtk_driver);
4367 MODULE_LICENSE("GPL");
4368 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
4369 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");