1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
9 #include <linux/of_device.h>
10 #include <linux/of_mdio.h>
11 #include <linux/of_net.h>
12 #include <linux/of_address.h>
13 #include <linux/mfd/syscon.h>
14 #include <linux/regmap.h>
15 #include <linux/clk.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/if_vlan.h>
18 #include <linux/reset.h>
19 #include <linux/tcp.h>
20 #include <linux/interrupt.h>
21 #include <linux/pinctrl/devinfo.h>
22 #include <linux/phylink.h>
23 #include <linux/jhash.h>
24 #include <linux/bitfield.h>
27 #include "mtk_eth_soc.h"
30 static int mtk_msg_level = -1;
31 module_param_named(msg_level, mtk_msg_level, int, 0);
32 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
34 #define MTK_ETHTOOL_STAT(x) { #x, \
35 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
37 #define MTK_ETHTOOL_XDP_STAT(x) { #x, \
38 offsetof(struct mtk_hw_stats, xdp_stats.x) / \
41 static const struct mtk_reg_map mtk_reg_map = {
42 .tx_irq_mask = 0x1a1c,
43 .tx_irq_status = 0x1a18,
78 static const struct mtk_reg_map mt7628_reg_map = {
79 .tx_irq_mask = 0x0a28,
80 .tx_irq_status = 0x0a20,
94 static const struct mtk_reg_map mt7986_reg_map = {
95 .tx_irq_mask = 0x461c,
96 .tx_irq_status = 0x4618,
104 .irq_status = 0x6220,
111 .rx_cnt_cfg = 0x4504,
131 /* strings used by ethtool */
132 static const struct mtk_ethtool_stats {
133 char str[ETH_GSTRING_LEN];
135 } mtk_ethtool_stats[] = {
136 MTK_ETHTOOL_STAT(tx_bytes),
137 MTK_ETHTOOL_STAT(tx_packets),
138 MTK_ETHTOOL_STAT(tx_skip),
139 MTK_ETHTOOL_STAT(tx_collisions),
140 MTK_ETHTOOL_STAT(rx_bytes),
141 MTK_ETHTOOL_STAT(rx_packets),
142 MTK_ETHTOOL_STAT(rx_overflow),
143 MTK_ETHTOOL_STAT(rx_fcs_errors),
144 MTK_ETHTOOL_STAT(rx_short_errors),
145 MTK_ETHTOOL_STAT(rx_long_errors),
146 MTK_ETHTOOL_STAT(rx_checksum_errors),
147 MTK_ETHTOOL_STAT(rx_flow_control_packets),
148 MTK_ETHTOOL_XDP_STAT(rx_xdp_redirect),
149 MTK_ETHTOOL_XDP_STAT(rx_xdp_pass),
150 MTK_ETHTOOL_XDP_STAT(rx_xdp_drop),
151 MTK_ETHTOOL_XDP_STAT(rx_xdp_tx),
152 MTK_ETHTOOL_XDP_STAT(rx_xdp_tx_errors),
153 MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit),
154 MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit_errors),
157 static const char * const mtk_clks_source_name[] = {
158 "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
159 "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
160 "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
161 "sgmii_ck", "eth2pll", "wocpu0", "wocpu1", "netsys0", "netsys1"
164 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
166 __raw_writel(val, eth->base + reg);
169 u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
171 return __raw_readl(eth->base + reg);
174 static u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
178 val = mtk_r32(eth, reg);
181 mtk_w32(eth, val, reg);
185 static int mtk_mdio_busy_wait(struct mtk_eth *eth)
187 unsigned long t_start = jiffies;
190 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
192 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
197 dev_err(eth->dev, "mdio: MDIO timeout\n");
201 static int _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
206 ret = mtk_mdio_busy_wait(eth);
210 if (phy_reg & MII_ADDR_C45) {
211 mtk_w32(eth, PHY_IAC_ACCESS |
213 PHY_IAC_CMD_C45_ADDR |
214 PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
215 PHY_IAC_ADDR(phy_addr) |
216 PHY_IAC_DATA(mdiobus_c45_regad(phy_reg)),
219 ret = mtk_mdio_busy_wait(eth);
223 mtk_w32(eth, PHY_IAC_ACCESS |
226 PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
227 PHY_IAC_ADDR(phy_addr) |
228 PHY_IAC_DATA(write_data),
231 mtk_w32(eth, PHY_IAC_ACCESS |
234 PHY_IAC_REG(phy_reg) |
235 PHY_IAC_ADDR(phy_addr) |
236 PHY_IAC_DATA(write_data),
240 ret = mtk_mdio_busy_wait(eth);
247 static int _mtk_mdio_read(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
251 ret = mtk_mdio_busy_wait(eth);
255 if (phy_reg & MII_ADDR_C45) {
256 mtk_w32(eth, PHY_IAC_ACCESS |
258 PHY_IAC_CMD_C45_ADDR |
259 PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
260 PHY_IAC_ADDR(phy_addr) |
261 PHY_IAC_DATA(mdiobus_c45_regad(phy_reg)),
264 ret = mtk_mdio_busy_wait(eth);
268 mtk_w32(eth, PHY_IAC_ACCESS |
270 PHY_IAC_CMD_C45_READ |
271 PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
272 PHY_IAC_ADDR(phy_addr),
275 mtk_w32(eth, PHY_IAC_ACCESS |
277 PHY_IAC_CMD_C22_READ |
278 PHY_IAC_REG(phy_reg) |
279 PHY_IAC_ADDR(phy_addr),
283 ret = mtk_mdio_busy_wait(eth);
287 return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
290 static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
291 int phy_reg, u16 val)
293 struct mtk_eth *eth = bus->priv;
295 return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
298 static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
300 struct mtk_eth *eth = bus->priv;
302 return _mtk_mdio_read(eth, phy_addr, phy_reg);
305 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
306 phy_interface_t interface)
310 /* Check DDR memory type.
311 * Currently TRGMII mode with DDR2 memory is not supported.
313 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
314 if (interface == PHY_INTERFACE_MODE_TRGMII &&
315 val & SYSCFG_DRAM_TYPE_DDR2) {
317 "TRGMII mode with DDR2 memory is not supported!\n");
321 val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
322 ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
324 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
325 ETHSYS_TRGMII_MT7621_MASK, val);
330 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
331 phy_interface_t interface, int speed)
336 if (interface == PHY_INTERFACE_MODE_TRGMII) {
337 mtk_w32(eth, TRGMII_MODE, INTF_MODE);
339 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
341 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
345 val = (speed == SPEED_1000) ?
346 INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
347 mtk_w32(eth, val, INTF_MODE);
349 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
350 ETHSYS_TRGMII_CLK_SEL362_5,
351 ETHSYS_TRGMII_CLK_SEL362_5);
353 val = (speed == SPEED_1000) ? 250000000 : 500000000;
354 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
356 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
358 val = (speed == SPEED_1000) ?
359 RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
360 mtk_w32(eth, val, TRGMII_RCK_CTRL);
362 val = (speed == SPEED_1000) ?
363 TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
364 mtk_w32(eth, val, TRGMII_TCK_CTRL);
367 static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
368 phy_interface_t interface)
370 struct mtk_mac *mac = container_of(config, struct mtk_mac,
372 struct mtk_eth *eth = mac->hw;
375 if (interface == PHY_INTERFACE_MODE_SGMII ||
376 phy_interface_mode_is_8023z(interface)) {
377 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
380 return mtk_sgmii_select_pcs(eth->sgmii, sid);
386 static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
387 const struct phylink_link_state *state)
389 struct mtk_mac *mac = container_of(config, struct mtk_mac,
391 struct mtk_eth *eth = mac->hw;
392 int val, ge_mode, err = 0;
395 /* MT76x8 has no hardware settings between for the MAC */
396 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
397 mac->interface != state->interface) {
398 /* Setup soc pin functions */
399 switch (state->interface) {
400 case PHY_INTERFACE_MODE_TRGMII:
403 if (!MTK_HAS_CAPS(mac->hw->soc->caps,
407 case PHY_INTERFACE_MODE_RGMII_TXID:
408 case PHY_INTERFACE_MODE_RGMII_RXID:
409 case PHY_INTERFACE_MODE_RGMII_ID:
410 case PHY_INTERFACE_MODE_RGMII:
411 case PHY_INTERFACE_MODE_MII:
412 case PHY_INTERFACE_MODE_REVMII:
413 case PHY_INTERFACE_MODE_RMII:
414 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
415 err = mtk_gmac_rgmii_path_setup(eth, mac->id);
420 case PHY_INTERFACE_MODE_1000BASEX:
421 case PHY_INTERFACE_MODE_2500BASEX:
422 case PHY_INTERFACE_MODE_SGMII:
423 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
424 err = mtk_gmac_sgmii_path_setup(eth, mac->id);
429 case PHY_INTERFACE_MODE_GMII:
430 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
431 err = mtk_gmac_gephy_path_setup(eth, mac->id);
440 /* Setup clock for 1st gmac */
441 if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
442 !phy_interface_mode_is_8023z(state->interface) &&
443 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
444 if (MTK_HAS_CAPS(mac->hw->soc->caps,
445 MTK_TRGMII_MT7621_CLK)) {
446 if (mt7621_gmac0_rgmii_adjust(mac->hw,
450 /* FIXME: this is incorrect. Not only does it
451 * use state->speed (which is not guaranteed
452 * to be correct) but it also makes use of it
453 * in a code path that will only be reachable
454 * when the PHY interface mode changes, not
455 * when the speed changes. Consequently, RGMII
456 * is probably broken.
458 mtk_gmac0_rgmii_adjust(mac->hw,
462 /* mt7623_pad_clk_setup */
463 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
465 TD_DM_DRVP(8) | TD_DM_DRVN(8),
468 /* Assert/release MT7623 RXC reset */
469 mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
471 mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
476 switch (state->interface) {
477 case PHY_INTERFACE_MODE_MII:
478 case PHY_INTERFACE_MODE_GMII:
481 case PHY_INTERFACE_MODE_REVMII:
484 case PHY_INTERFACE_MODE_RMII:
493 /* put the gmac into the right mode */
494 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
495 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
496 val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
497 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
499 mac->interface = state->interface;
503 if (state->interface == PHY_INTERFACE_MODE_SGMII ||
504 phy_interface_mode_is_8023z(state->interface)) {
505 /* The path GMAC to SGMII will be enabled once the SGMIISYS is
508 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
510 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
512 ~(u32)SYSCFG0_SGMII_MASK);
514 /* Save the syscfg0 value for mac_finish */
516 } else if (phylink_autoneg_inband(mode)) {
518 "In-band mode not supported in non SGMII mode!\n");
525 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
526 mac->id, phy_modes(state->interface));
530 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
531 mac->id, phy_modes(state->interface), err);
534 static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
535 phy_interface_t interface)
537 struct mtk_mac *mac = container_of(config, struct mtk_mac,
539 struct mtk_eth *eth = mac->hw;
540 u32 mcr_cur, mcr_new;
543 if (interface == PHY_INTERFACE_MODE_SGMII ||
544 phy_interface_mode_is_8023z(interface))
545 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
546 SYSCFG0_SGMII_MASK, mac->syscfg0);
549 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
551 mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
552 MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
554 /* Only update control register when needed! */
555 if (mcr_new != mcr_cur)
556 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
561 static void mtk_mac_pcs_get_state(struct phylink_config *config,
562 struct phylink_link_state *state)
564 struct mtk_mac *mac = container_of(config, struct mtk_mac,
566 u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
568 state->link = (pmsr & MAC_MSR_LINK);
569 state->duplex = (pmsr & MAC_MSR_DPX) >> 1;
571 switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) {
573 state->speed = SPEED_10;
575 case MAC_MSR_SPEED_100:
576 state->speed = SPEED_100;
578 case MAC_MSR_SPEED_1000:
579 state->speed = SPEED_1000;
582 state->speed = SPEED_UNKNOWN;
586 state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
587 if (pmsr & MAC_MSR_RX_FC)
588 state->pause |= MLO_PAUSE_RX;
589 if (pmsr & MAC_MSR_TX_FC)
590 state->pause |= MLO_PAUSE_TX;
593 static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
594 phy_interface_t interface)
596 struct mtk_mac *mac = container_of(config, struct mtk_mac,
598 u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
600 mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
601 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
604 static void mtk_mac_link_up(struct phylink_config *config,
605 struct phy_device *phy,
606 unsigned int mode, phy_interface_t interface,
607 int speed, int duplex, bool tx_pause, bool rx_pause)
609 struct mtk_mac *mac = container_of(config, struct mtk_mac,
613 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
614 mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
615 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
616 MAC_MCR_FORCE_RX_FC);
618 /* Configure speed */
622 mcr |= MAC_MCR_SPEED_1000;
625 mcr |= MAC_MCR_SPEED_100;
629 /* Configure duplex */
630 if (duplex == DUPLEX_FULL)
631 mcr |= MAC_MCR_FORCE_DPX;
633 /* Configure pause modes - phylink will avoid these for half duplex */
635 mcr |= MAC_MCR_FORCE_TX_FC;
637 mcr |= MAC_MCR_FORCE_RX_FC;
639 mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
640 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
643 static const struct phylink_mac_ops mtk_phylink_ops = {
644 .validate = phylink_generic_validate,
645 .mac_select_pcs = mtk_mac_select_pcs,
646 .mac_pcs_get_state = mtk_mac_pcs_get_state,
647 .mac_config = mtk_mac_config,
648 .mac_finish = mtk_mac_finish,
649 .mac_link_down = mtk_mac_link_down,
650 .mac_link_up = mtk_mac_link_up,
653 static int mtk_mdio_init(struct mtk_eth *eth)
655 struct device_node *mii_np;
658 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
660 dev_err(eth->dev, "no %s child node found", "mdio-bus");
664 if (!of_device_is_available(mii_np)) {
669 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
675 eth->mii_bus->name = "mdio";
676 eth->mii_bus->read = mtk_mdio_read;
677 eth->mii_bus->write = mtk_mdio_write;
678 eth->mii_bus->probe_capabilities = MDIOBUS_C22_C45;
679 eth->mii_bus->priv = eth;
680 eth->mii_bus->parent = eth->dev;
682 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
683 ret = of_mdiobus_register(eth->mii_bus, mii_np);
690 static void mtk_mdio_cleanup(struct mtk_eth *eth)
695 mdiobus_unregister(eth->mii_bus);
698 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
703 spin_lock_irqsave(ð->tx_irq_lock, flags);
704 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
705 mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
706 spin_unlock_irqrestore(ð->tx_irq_lock, flags);
709 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
714 spin_lock_irqsave(ð->tx_irq_lock, flags);
715 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
716 mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
717 spin_unlock_irqrestore(ð->tx_irq_lock, flags);
720 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
725 spin_lock_irqsave(ð->rx_irq_lock, flags);
726 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
727 mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
728 spin_unlock_irqrestore(ð->rx_irq_lock, flags);
731 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
736 spin_lock_irqsave(ð->rx_irq_lock, flags);
737 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
738 mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
739 spin_unlock_irqrestore(ð->rx_irq_lock, flags);
742 static int mtk_set_mac_address(struct net_device *dev, void *p)
744 int ret = eth_mac_addr(dev, p);
745 struct mtk_mac *mac = netdev_priv(dev);
746 struct mtk_eth *eth = mac->hw;
747 const char *macaddr = dev->dev_addr;
752 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
755 spin_lock_bh(&mac->hw->page_lock);
756 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
757 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
758 MT7628_SDM_MAC_ADRH);
759 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
760 (macaddr[4] << 8) | macaddr[5],
761 MT7628_SDM_MAC_ADRL);
763 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
764 MTK_GDMA_MAC_ADRH(mac->id));
765 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
766 (macaddr[4] << 8) | macaddr[5],
767 MTK_GDMA_MAC_ADRL(mac->id));
769 spin_unlock_bh(&mac->hw->page_lock);
774 void mtk_stats_update_mac(struct mtk_mac *mac)
776 struct mtk_hw_stats *hw_stats = mac->hw_stats;
777 struct mtk_eth *eth = mac->hw;
779 u64_stats_update_begin(&hw_stats->syncp);
781 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
782 hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
783 hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
784 hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
785 hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
786 hw_stats->rx_checksum_errors +=
787 mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
789 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
790 unsigned int offs = hw_stats->reg_offset;
793 hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
794 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
796 hw_stats->rx_bytes += (stats << 32);
797 hw_stats->rx_packets +=
798 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs);
799 hw_stats->rx_overflow +=
800 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
801 hw_stats->rx_fcs_errors +=
802 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
803 hw_stats->rx_short_errors +=
804 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
805 hw_stats->rx_long_errors +=
806 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
807 hw_stats->rx_checksum_errors +=
808 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
809 hw_stats->rx_flow_control_packets +=
810 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
812 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
813 hw_stats->tx_collisions +=
814 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
815 hw_stats->tx_bytes +=
816 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
817 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
819 hw_stats->tx_bytes += (stats << 32);
820 hw_stats->tx_packets +=
821 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
824 u64_stats_update_end(&hw_stats->syncp);
827 static void mtk_stats_update(struct mtk_eth *eth)
831 for (i = 0; i < MTK_MAC_COUNT; i++) {
832 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
834 if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) {
835 mtk_stats_update_mac(eth->mac[i]);
836 spin_unlock(ð->mac[i]->hw_stats->stats_lock);
841 static void mtk_get_stats64(struct net_device *dev,
842 struct rtnl_link_stats64 *storage)
844 struct mtk_mac *mac = netdev_priv(dev);
845 struct mtk_hw_stats *hw_stats = mac->hw_stats;
848 if (netif_running(dev) && netif_device_present(dev)) {
849 if (spin_trylock_bh(&hw_stats->stats_lock)) {
850 mtk_stats_update_mac(mac);
851 spin_unlock_bh(&hw_stats->stats_lock);
856 start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
857 storage->rx_packets = hw_stats->rx_packets;
858 storage->tx_packets = hw_stats->tx_packets;
859 storage->rx_bytes = hw_stats->rx_bytes;
860 storage->tx_bytes = hw_stats->tx_bytes;
861 storage->collisions = hw_stats->tx_collisions;
862 storage->rx_length_errors = hw_stats->rx_short_errors +
863 hw_stats->rx_long_errors;
864 storage->rx_over_errors = hw_stats->rx_overflow;
865 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
866 storage->rx_errors = hw_stats->rx_checksum_errors;
867 storage->tx_aborted_errors = hw_stats->tx_skip;
868 } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
870 storage->tx_errors = dev->stats.tx_errors;
871 storage->rx_dropped = dev->stats.rx_dropped;
872 storage->tx_dropped = dev->stats.tx_dropped;
875 static inline int mtk_max_frag_size(int mtu)
877 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
878 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH_2K)
879 mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
881 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
882 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
885 static inline int mtk_max_buf_size(int frag_size)
887 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
888 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
890 WARN_ON(buf_size < MTK_MAX_RX_LENGTH_2K);
895 static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
896 struct mtk_rx_dma_v2 *dma_rxd)
898 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
899 if (!(rxd->rxd2 & RX_DMA_DONE))
902 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
903 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
904 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
905 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
906 rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
907 rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
913 static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
915 unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
918 data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
924 /* the qdma core needs scratch memory to be setup */
925 static int mtk_init_fq_dma(struct mtk_eth *eth)
927 const struct mtk_soc_data *soc = eth->soc;
928 dma_addr_t phy_ring_tail;
929 int cnt = MTK_DMA_SIZE;
933 eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
934 cnt * soc->txrx.txd_size,
935 ð->phy_scratch_ring,
937 if (unlikely(!eth->scratch_ring))
940 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
941 if (unlikely(!eth->scratch_head))
944 dma_addr = dma_map_single(eth->dma_dev,
945 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
947 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
950 phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
952 for (i = 0; i < cnt; i++) {
953 struct mtk_tx_dma_v2 *txd;
955 txd = eth->scratch_ring + i * soc->txrx.txd_size;
956 txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
958 txd->txd2 = eth->phy_scratch_ring +
959 (i + 1) * soc->txrx.txd_size;
961 txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
963 if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
971 mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
972 mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
973 mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
974 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
979 static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
981 return ring->dma + (desc - ring->phys);
984 static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
985 void *txd, u32 txd_size)
987 int idx = (txd - ring->dma) / txd_size;
989 return &ring->buf[idx];
992 static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
993 struct mtk_tx_dma *dma)
995 return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma;
998 static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
1000 return (dma - ring->dma) / txd_size;
1003 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1004 struct xdp_frame_bulk *bq, bool napi)
1006 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1007 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
1008 dma_unmap_single(eth->dma_dev,
1009 dma_unmap_addr(tx_buf, dma_addr0),
1010 dma_unmap_len(tx_buf, dma_len0),
1012 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
1013 dma_unmap_page(eth->dma_dev,
1014 dma_unmap_addr(tx_buf, dma_addr0),
1015 dma_unmap_len(tx_buf, dma_len0),
1019 if (dma_unmap_len(tx_buf, dma_len0)) {
1020 dma_unmap_page(eth->dma_dev,
1021 dma_unmap_addr(tx_buf, dma_addr0),
1022 dma_unmap_len(tx_buf, dma_len0),
1026 if (dma_unmap_len(tx_buf, dma_len1)) {
1027 dma_unmap_page(eth->dma_dev,
1028 dma_unmap_addr(tx_buf, dma_addr1),
1029 dma_unmap_len(tx_buf, dma_len1),
1034 if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
1035 if (tx_buf->type == MTK_TYPE_SKB) {
1036 struct sk_buff *skb = tx_buf->data;
1039 napi_consume_skb(skb, napi);
1041 dev_kfree_skb_any(skb);
1043 struct xdp_frame *xdpf = tx_buf->data;
1045 if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
1046 xdp_return_frame_rx_napi(xdpf);
1048 xdp_return_frame_bulk(xdpf, bq);
1050 xdp_return_frame(xdpf);
1054 tx_buf->data = NULL;
1057 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1058 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
1059 size_t size, int idx)
1061 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1062 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1063 dma_unmap_len_set(tx_buf, dma_len0, size);
1066 txd->txd3 = mapped_addr;
1067 txd->txd2 |= TX_DMA_PLEN1(size);
1068 dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
1069 dma_unmap_len_set(tx_buf, dma_len1, size);
1071 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1072 txd->txd1 = mapped_addr;
1073 txd->txd2 = TX_DMA_PLEN0(size);
1074 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1075 dma_unmap_len_set(tx_buf, dma_len0, size);
1080 static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
1081 struct mtk_tx_dma_desc_info *info)
1083 struct mtk_mac *mac = netdev_priv(dev);
1084 struct mtk_eth *eth = mac->hw;
1085 struct mtk_tx_dma *desc = txd;
1088 WRITE_ONCE(desc->txd1, info->addr);
1090 data = TX_DMA_SWC | TX_DMA_PLEN0(info->size);
1093 WRITE_ONCE(desc->txd3, data);
1095 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1099 /* tx checksum offload */
1101 data |= TX_DMA_CHKSUM;
1102 /* vlan header offload */
1104 data |= TX_DMA_INS_VLAN | info->vlan_tci;
1106 WRITE_ONCE(desc->txd4, data);
1109 static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
1110 struct mtk_tx_dma_desc_info *info)
1112 struct mtk_mac *mac = netdev_priv(dev);
1113 struct mtk_tx_dma_v2 *desc = txd;
1114 struct mtk_eth *eth = mac->hw;
1117 WRITE_ONCE(desc->txd1, info->addr);
1119 data = TX_DMA_PLEN0(info->size);
1122 WRITE_ONCE(desc->txd3, data);
1124 if (!info->qid && mac->id)
1125 info->qid = MTK_QDMA_GMAC2_QID;
1127 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
1128 data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
1129 WRITE_ONCE(desc->txd4, data);
1134 data |= TX_DMA_TSO_V2;
1135 /* tx checksum offload */
1137 data |= TX_DMA_CHKSUM_V2;
1139 WRITE_ONCE(desc->txd5, data);
1142 if (info->first && info->vlan)
1143 data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1144 WRITE_ONCE(desc->txd6, data);
1146 WRITE_ONCE(desc->txd7, 0);
1147 WRITE_ONCE(desc->txd8, 0);
1150 static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
1151 struct mtk_tx_dma_desc_info *info)
1153 struct mtk_mac *mac = netdev_priv(dev);
1154 struct mtk_eth *eth = mac->hw;
1156 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1157 mtk_tx_set_dma_desc_v2(dev, txd, info);
1159 mtk_tx_set_dma_desc_v1(dev, txd, info);
1162 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
1163 int tx_num, struct mtk_tx_ring *ring, bool gso)
1165 struct mtk_tx_dma_desc_info txd_info = {
1166 .size = skb_headlen(skb),
1168 .csum = skb->ip_summed == CHECKSUM_PARTIAL,
1169 .vlan = skb_vlan_tag_present(skb),
1170 .qid = skb->mark & MTK_QDMA_TX_MASK,
1171 .vlan_tci = skb_vlan_tag_get(skb),
1173 .last = !skb_is_nonlinear(skb),
1175 struct mtk_mac *mac = netdev_priv(dev);
1176 struct mtk_eth *eth = mac->hw;
1177 const struct mtk_soc_data *soc = eth->soc;
1178 struct mtk_tx_dma *itxd, *txd;
1179 struct mtk_tx_dma *itxd_pdma, *txd_pdma;
1180 struct mtk_tx_buf *itx_buf, *tx_buf;
1184 itxd = ring->next_free;
1185 itxd_pdma = qdma_to_pdma(ring, itxd);
1186 if (itxd == ring->last_free)
1189 itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
1190 memset(itx_buf, 0, sizeof(*itx_buf));
1192 txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
1194 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1197 mtk_tx_set_dma_desc(dev, itxd, &txd_info);
1199 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1200 itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1201 MTK_TX_FLAGS_FPORT1;
1202 setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
1207 txd_pdma = qdma_to_pdma(ring, txd);
1209 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1210 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1211 unsigned int offset = 0;
1212 int frag_size = skb_frag_size(frag);
1215 bool new_desc = true;
1217 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
1219 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1220 txd_pdma = qdma_to_pdma(ring, txd);
1221 if (txd == ring->last_free)
1229 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1230 txd_info.size = min_t(unsigned int, frag_size,
1231 soc->txrx.dma_max_len);
1232 txd_info.qid = skb->mark & MTK_QDMA_TX_MASK;
1233 txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
1234 !(frag_size - txd_info.size);
1235 txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
1236 offset, txd_info.size,
1238 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1241 mtk_tx_set_dma_desc(dev, txd, &txd_info);
1243 tx_buf = mtk_desc_to_tx_buf(ring, txd,
1244 soc->txrx.txd_size);
1246 memset(tx_buf, 0, sizeof(*tx_buf));
1247 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1248 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
1249 tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1250 MTK_TX_FLAGS_FPORT1;
1252 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
1253 txd_info.size, k++);
1255 frag_size -= txd_info.size;
1256 offset += txd_info.size;
1260 /* store skb to cleanup */
1261 itx_buf->type = MTK_TYPE_SKB;
1262 itx_buf->data = skb;
1264 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1266 txd_pdma->txd2 |= TX_DMA_LS0;
1268 txd_pdma->txd2 |= TX_DMA_LS1;
1271 netdev_sent_queue(dev, skb->len);
1272 skb_tx_timestamp(skb);
1274 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1275 atomic_sub(n_desc, &ring->free_count);
1277 /* make sure that all changes to the dma ring are flushed before we
1282 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1283 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
1284 !netdev_xmit_more())
1285 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1289 next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
1291 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1298 tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
1301 mtk_tx_unmap(eth, tx_buf, NULL, false);
1303 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1304 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
1305 itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1307 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1308 itxd_pdma = qdma_to_pdma(ring, itxd);
1309 } while (itxd != txd);
1314 static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
1319 if (skb_is_gso(skb)) {
1320 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1321 frag = &skb_shinfo(skb)->frags[i];
1322 nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1323 eth->soc->txrx.dma_max_len);
1326 nfrags += skb_shinfo(skb)->nr_frags;
1332 static int mtk_queue_stopped(struct mtk_eth *eth)
1336 for (i = 0; i < MTK_MAC_COUNT; i++) {
1337 if (!eth->netdev[i])
1339 if (netif_queue_stopped(eth->netdev[i]))
1346 static void mtk_wake_queue(struct mtk_eth *eth)
1350 for (i = 0; i < MTK_MAC_COUNT; i++) {
1351 if (!eth->netdev[i])
1353 netif_wake_queue(eth->netdev[i]);
1357 static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1359 struct mtk_mac *mac = netdev_priv(dev);
1360 struct mtk_eth *eth = mac->hw;
1361 struct mtk_tx_ring *ring = ð->tx_ring;
1362 struct net_device_stats *stats = &dev->stats;
1366 /* normally we can rely on the stack not calling this more than once,
1367 * however we have 2 queues running on the same ring so we need to lock
1370 spin_lock(ð->page_lock);
1372 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1375 tx_num = mtk_cal_txd_req(eth, skb);
1376 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1377 netif_stop_queue(dev);
1378 netif_err(eth, tx_queued, dev,
1379 "Tx Ring full when queue awake!\n");
1380 spin_unlock(ð->page_lock);
1381 return NETDEV_TX_BUSY;
1384 /* TSO: fill MSS info in tcp checksum field */
1385 if (skb_is_gso(skb)) {
1386 if (skb_cow_head(skb, 0)) {
1387 netif_warn(eth, tx_err, dev,
1388 "GSO expand head fail.\n");
1392 if (skb_shinfo(skb)->gso_type &
1393 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1395 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1399 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1402 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1403 netif_stop_queue(dev);
1405 spin_unlock(ð->page_lock);
1407 return NETDEV_TX_OK;
1410 spin_unlock(ð->page_lock);
1411 stats->tx_dropped++;
1412 dev_kfree_skb_any(skb);
1413 return NETDEV_TX_OK;
1416 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1419 struct mtk_rx_ring *ring;
1423 return ð->rx_ring[0];
1425 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1426 struct mtk_rx_dma *rxd;
1428 ring = ð->rx_ring[i];
1429 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1430 rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
1431 if (rxd->rxd2 & RX_DMA_DONE) {
1432 ring->calc_idx_update = true;
1440 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
1442 struct mtk_rx_ring *ring;
1446 ring = ð->rx_ring[0];
1447 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1449 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1450 ring = ð->rx_ring[i];
1451 if (ring->calc_idx_update) {
1452 ring->calc_idx_update = false;
1453 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1459 static bool mtk_page_pool_enabled(struct mtk_eth *eth)
1464 static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
1465 struct xdp_rxq_info *xdp_q,
1468 struct page_pool_params pp_params = {
1470 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
1472 .nid = NUMA_NO_NODE,
1473 .dev = eth->dma_dev,
1474 .offset = MTK_PP_HEADROOM,
1475 .max_len = MTK_PP_MAX_BUF_SIZE,
1477 struct page_pool *pp;
1480 pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL
1482 pp = page_pool_create(&pp_params);
1486 err = __xdp_rxq_info_reg(xdp_q, ð->dummy_dev, eth->rx_napi.napi_id,
1491 err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp);
1493 goto err_unregister_rxq;
1498 xdp_rxq_info_unreg(xdp_q);
1500 page_pool_destroy(pp);
1502 return ERR_PTR(err);
1505 static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
1510 page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
1514 *dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM;
1515 return page_address(page);
1518 static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
1520 if (ring->page_pool)
1521 page_pool_put_full_page(ring->page_pool,
1522 virt_to_head_page(data), napi);
1524 skb_free_frag(data);
1527 static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
1528 struct mtk_tx_dma_desc_info *txd_info,
1529 struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf,
1530 void *data, u16 headroom, int index, bool dma_map)
1532 struct mtk_tx_ring *ring = ð->tx_ring;
1533 struct mtk_mac *mac = netdev_priv(dev);
1534 struct mtk_tx_dma *txd_pdma;
1536 if (dma_map) { /* ndo_xdp_xmit */
1537 txd_info->addr = dma_map_single(eth->dma_dev, data,
1538 txd_info->size, DMA_TO_DEVICE);
1539 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr)))
1542 tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1544 struct page *page = virt_to_head_page(data);
1546 txd_info->addr = page_pool_get_dma_addr(page) +
1547 sizeof(struct xdp_frame) + headroom;
1548 dma_sync_single_for_device(eth->dma_dev, txd_info->addr,
1549 txd_info->size, DMA_BIDIRECTIONAL);
1551 mtk_tx_set_dma_desc(dev, txd, txd_info);
1553 tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
1554 tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
1555 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1557 txd_pdma = qdma_to_pdma(ring, txd);
1558 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
1564 static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
1565 struct net_device *dev, bool dma_map)
1567 struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
1568 const struct mtk_soc_data *soc = eth->soc;
1569 struct mtk_tx_ring *ring = ð->tx_ring;
1570 struct mtk_tx_dma_desc_info txd_info = {
1573 .last = !xdp_frame_has_frags(xdpf),
1575 int err, index = 0, n_desc = 1, nr_frags;
1576 struct mtk_tx_dma *htxd, *txd, *txd_pdma;
1577 struct mtk_tx_buf *htx_buf, *tx_buf;
1578 void *data = xdpf->data;
1580 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1583 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
1584 if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
1587 spin_lock(ð->page_lock);
1589 txd = ring->next_free;
1590 if (txd == ring->last_free) {
1591 spin_unlock(ð->page_lock);
1596 tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
1597 memset(tx_buf, 0, sizeof(*tx_buf));
1601 err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
1602 data, xdpf->headroom, index, dma_map);
1609 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
1610 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1611 txd_pdma = qdma_to_pdma(ring, txd);
1612 if (txd == ring->last_free)
1615 tx_buf = mtk_desc_to_tx_buf(ring, txd,
1616 soc->txrx.txd_size);
1617 memset(tx_buf, 0, sizeof(*tx_buf));
1621 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1622 txd_info.size = skb_frag_size(&sinfo->frags[index]);
1623 txd_info.last = index + 1 == nr_frags;
1624 data = skb_frag_address(&sinfo->frags[index]);
1628 /* store xdpf for cleanup */
1629 htx_buf->data = xdpf;
1631 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1632 txd_pdma = qdma_to_pdma(ring, txd);
1634 txd_pdma->txd2 |= TX_DMA_LS0;
1636 txd_pdma->txd2 |= TX_DMA_LS1;
1639 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1640 atomic_sub(n_desc, &ring->free_count);
1642 /* make sure that all changes to the dma ring are flushed before we
1647 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1648 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1652 idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
1653 mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
1654 MT7628_TX_CTX_IDX0);
1657 spin_unlock(ð->page_lock);
1662 while (htxd != txd) {
1663 txd_pdma = qdma_to_pdma(ring, htxd);
1664 tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
1665 mtk_tx_unmap(eth, tx_buf, NULL, false);
1667 htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1668 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
1669 txd_pdma->txd2 = TX_DMA_DESP2_DEF;
1671 htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
1674 spin_unlock(ð->page_lock);
1679 static int mtk_xdp_xmit(struct net_device *dev, int num_frame,
1680 struct xdp_frame **frames, u32 flags)
1682 struct mtk_mac *mac = netdev_priv(dev);
1683 struct mtk_hw_stats *hw_stats = mac->hw_stats;
1684 struct mtk_eth *eth = mac->hw;
1687 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1690 for (i = 0; i < num_frame; i++) {
1691 if (mtk_xdp_submit_frame(eth, frames[i], dev, true))
1696 u64_stats_update_begin(&hw_stats->syncp);
1697 hw_stats->xdp_stats.tx_xdp_xmit += nxmit;
1698 hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit;
1699 u64_stats_update_end(&hw_stats->syncp);
1704 static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
1705 struct xdp_buff *xdp, struct net_device *dev)
1707 struct mtk_mac *mac = netdev_priv(dev);
1708 struct mtk_hw_stats *hw_stats = mac->hw_stats;
1709 u64 *count = &hw_stats->xdp_stats.rx_xdp_drop;
1710 struct bpf_prog *prog;
1715 prog = rcu_dereference(eth->prog);
1719 act = bpf_prog_run_xdp(prog, xdp);
1722 count = &hw_stats->xdp_stats.rx_xdp_pass;
1725 if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
1730 count = &hw_stats->xdp_stats.rx_xdp_redirect;
1733 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
1735 if (mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
1736 count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
1741 count = &hw_stats->xdp_stats.rx_xdp_tx;
1745 bpf_warn_invalid_xdp_action(dev, prog, act);
1748 trace_xdp_exception(dev, prog, act);
1754 page_pool_put_full_page(ring->page_pool,
1755 virt_to_head_page(xdp->data), true);
1758 u64_stats_update_begin(&hw_stats->syncp);
1759 *count = *count + 1;
1760 u64_stats_update_end(&hw_stats->syncp);
1767 static int mtk_poll_rx(struct napi_struct *napi, int budget,
1768 struct mtk_eth *eth)
1770 struct dim_sample dim_sample = {};
1771 struct mtk_rx_ring *ring;
1772 bool xdp_flush = false;
1774 struct sk_buff *skb;
1775 u8 *data, *new_data;
1776 struct mtk_rx_dma_v2 *rxd, trxd;
1777 int done = 0, bytes = 0;
1779 while (done < budget) {
1780 unsigned int pktlen, *rxdcsum;
1781 struct net_device *netdev;
1782 dma_addr_t dma_addr;
1786 ring = mtk_get_rx_ring(eth);
1787 if (unlikely(!ring))
1790 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1791 rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
1792 data = ring->data[idx];
1794 if (!mtk_rx_get_desc(eth, &trxd, rxd))
1797 /* find out which mac the packet come from. values start at 1 */
1798 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1799 mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
1800 else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
1801 !(trxd.rxd4 & RX_DMA_SPECIAL_TAG))
1802 mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
1804 if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
1808 netdev = eth->netdev[mac];
1810 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1813 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
1815 /* alloc new buffer */
1816 if (ring->page_pool) {
1817 struct page *page = virt_to_head_page(data);
1818 struct xdp_buff xdp;
1821 new_data = mtk_page_pool_get_buff(ring->page_pool,
1824 if (unlikely(!new_data)) {
1825 netdev->stats.rx_dropped++;
1829 dma_sync_single_for_cpu(eth->dma_dev,
1830 page_pool_get_dma_addr(page) + MTK_PP_HEADROOM,
1831 pktlen, page_pool_get_dma_dir(ring->page_pool));
1833 xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
1834 xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
1836 xdp_buff_clear_frags_flag(&xdp);
1838 ret = mtk_xdp_run(eth, ring, &xdp, netdev);
1839 if (ret == XDP_REDIRECT)
1842 if (ret != XDP_PASS)
1845 skb = build_skb(data, PAGE_SIZE);
1846 if (unlikely(!skb)) {
1847 page_pool_put_full_page(ring->page_pool,
1849 netdev->stats.rx_dropped++;
1853 skb_reserve(skb, xdp.data - xdp.data_hard_start);
1854 skb_put(skb, xdp.data_end - xdp.data);
1855 skb_mark_for_recycle(skb);
1857 if (ring->frag_size <= PAGE_SIZE)
1858 new_data = napi_alloc_frag(ring->frag_size);
1860 new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
1862 if (unlikely(!new_data)) {
1863 netdev->stats.rx_dropped++;
1867 dma_addr = dma_map_single(eth->dma_dev,
1868 new_data + NET_SKB_PAD + eth->ip_align,
1869 ring->buf_size, DMA_FROM_DEVICE);
1870 if (unlikely(dma_mapping_error(eth->dma_dev,
1872 skb_free_frag(new_data);
1873 netdev->stats.rx_dropped++;
1877 dma_unmap_single(eth->dma_dev, trxd.rxd1,
1878 ring->buf_size, DMA_FROM_DEVICE);
1880 skb = build_skb(data, ring->frag_size);
1881 if (unlikely(!skb)) {
1882 netdev->stats.rx_dropped++;
1883 skb_free_frag(data);
1887 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1888 skb_put(skb, pktlen);
1894 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1895 rxdcsum = &trxd.rxd3;
1897 rxdcsum = &trxd.rxd4;
1899 if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
1900 skb->ip_summed = CHECKSUM_UNNECESSARY;
1902 skb_checksum_none_assert(skb);
1903 skb->protocol = eth_type_trans(skb, netdev);
1905 hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
1906 if (hash != MTK_RXD4_FOE_ENTRY) {
1907 hash = jhash_1word(hash, 0);
1908 skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
1911 reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
1912 if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
1913 mtk_ppe_check_skb(eth->ppe, skb,
1914 trxd.rxd4 & MTK_RXD4_FOE_ENTRY);
1916 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
1917 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
1918 if (trxd.rxd3 & RX_DMA_VTAG_V2)
1919 __vlan_hwaccel_put_tag(skb,
1920 htons(RX_DMA_VPID(trxd.rxd4)),
1921 RX_DMA_VID(trxd.rxd4));
1922 } else if (trxd.rxd2 & RX_DMA_VTAG) {
1923 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1924 RX_DMA_VID(trxd.rxd3));
1927 /* If the device is attached to a dsa switch, the special
1928 * tag inserted in VLAN field by hw switch can * be offloaded
1929 * by RX HW VLAN offload. Clear vlan info.
1931 if (netdev_uses_dsa(netdev))
1932 __vlan_hwaccel_clear_tag(skb);
1935 skb_record_rx_queue(skb, 0);
1936 napi_gro_receive(napi, skb);
1939 ring->data[idx] = new_data;
1940 rxd->rxd1 = (unsigned int)dma_addr;
1942 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
1943 rxd->rxd2 = RX_DMA_LSO;
1945 rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
1947 ring->calc_idx = idx;
1953 /* make sure that all changes to the dma ring are flushed before
1957 mtk_update_rx_cpu_idx(eth);
1960 eth->rx_packets += done;
1961 eth->rx_bytes += bytes;
1962 dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes,
1964 net_dim(ð->rx_dim, dim_sample);
1972 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
1973 unsigned int *done, unsigned int *bytes)
1975 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
1976 struct mtk_tx_ring *ring = ð->tx_ring;
1977 struct mtk_tx_buf *tx_buf;
1978 struct xdp_frame_bulk bq;
1979 struct mtk_tx_dma *desc;
1982 cpu = ring->last_free_ptr;
1983 dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
1985 desc = mtk_qdma_phys_to_virt(ring, cpu);
1986 xdp_frame_bulk_init(&bq);
1988 while ((cpu != dma) && budget) {
1989 u32 next_cpu = desc->txd2;
1992 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
1993 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
1996 tx_buf = mtk_desc_to_tx_buf(ring, desc,
1997 eth->soc->txrx.txd_size);
1998 if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
2004 if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2005 if (tx_buf->type == MTK_TYPE_SKB) {
2006 struct sk_buff *skb = tx_buf->data;
2008 bytes[mac] += skb->len;
2013 mtk_tx_unmap(eth, tx_buf, &bq, true);
2015 ring->last_free = desc;
2016 atomic_inc(&ring->free_count);
2020 xdp_flush_frame_bulk(&bq);
2022 ring->last_free_ptr = cpu;
2023 mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
2028 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
2029 unsigned int *done, unsigned int *bytes)
2031 struct mtk_tx_ring *ring = ð->tx_ring;
2032 struct mtk_tx_buf *tx_buf;
2033 struct xdp_frame_bulk bq;
2034 struct mtk_tx_dma *desc;
2037 cpu = ring->cpu_idx;
2038 dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
2039 xdp_frame_bulk_init(&bq);
2041 while ((cpu != dma) && budget) {
2042 tx_buf = &ring->buf[cpu];
2046 if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2047 if (tx_buf->type == MTK_TYPE_SKB) {
2048 struct sk_buff *skb = tx_buf->data;
2050 bytes[0] += skb->len;
2055 mtk_tx_unmap(eth, tx_buf, &bq, true);
2057 desc = ring->dma + cpu * eth->soc->txrx.txd_size;
2058 ring->last_free = desc;
2059 atomic_inc(&ring->free_count);
2061 cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
2063 xdp_flush_frame_bulk(&bq);
2065 ring->cpu_idx = cpu;
2070 static int mtk_poll_tx(struct mtk_eth *eth, int budget)
2072 struct mtk_tx_ring *ring = ð->tx_ring;
2073 struct dim_sample dim_sample = {};
2074 unsigned int done[MTK_MAX_DEVS];
2075 unsigned int bytes[MTK_MAX_DEVS];
2078 memset(done, 0, sizeof(done));
2079 memset(bytes, 0, sizeof(bytes));
2081 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2082 budget = mtk_poll_tx_qdma(eth, budget, done, bytes);
2084 budget = mtk_poll_tx_pdma(eth, budget, done, bytes);
2086 for (i = 0; i < MTK_MAC_COUNT; i++) {
2087 if (!eth->netdev[i] || !done[i])
2089 netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
2091 eth->tx_packets += done[i];
2092 eth->tx_bytes += bytes[i];
2095 dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
2097 net_dim(ð->tx_dim, dim_sample);
2099 if (mtk_queue_stopped(eth) &&
2100 (atomic_read(&ring->free_count) > ring->thresh))
2101 mtk_wake_queue(eth);
2106 static void mtk_handle_status_irq(struct mtk_eth *eth)
2108 u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
2110 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
2111 mtk_stats_update(eth);
2112 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
2117 static int mtk_napi_tx(struct napi_struct *napi, int budget)
2119 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
2120 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2123 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2124 mtk_handle_status_irq(eth);
2125 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
2126 tx_done = mtk_poll_tx(eth, budget);
2128 if (unlikely(netif_msg_intr(eth))) {
2130 "done tx %d, intr 0x%08x/0x%x\n", tx_done,
2131 mtk_r32(eth, reg_map->tx_irq_status),
2132 mtk_r32(eth, reg_map->tx_irq_mask));
2135 if (tx_done == budget)
2138 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
2141 if (napi_complete_done(napi, tx_done))
2142 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2147 static int mtk_napi_rx(struct napi_struct *napi, int budget)
2149 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
2150 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2151 int rx_done_total = 0;
2153 mtk_handle_status_irq(eth);
2158 mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
2159 reg_map->pdma.irq_status);
2160 rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
2161 rx_done_total += rx_done;
2163 if (unlikely(netif_msg_intr(eth))) {
2165 "done rx %d, intr 0x%08x/0x%x\n", rx_done,
2166 mtk_r32(eth, reg_map->pdma.irq_status),
2167 mtk_r32(eth, reg_map->pdma.irq_mask));
2170 if (rx_done_total == budget)
2173 } while (mtk_r32(eth, reg_map->pdma.irq_status) &
2174 eth->soc->txrx.rx_irq_done_mask);
2176 if (napi_complete_done(napi, rx_done_total))
2177 mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
2179 return rx_done_total;
2182 static int mtk_tx_alloc(struct mtk_eth *eth)
2184 const struct mtk_soc_data *soc = eth->soc;
2185 struct mtk_tx_ring *ring = ð->tx_ring;
2186 int i, sz = soc->txrx.txd_size;
2187 struct mtk_tx_dma_v2 *txd;
2189 ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
2194 ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
2195 &ring->phys, GFP_KERNEL);
2199 for (i = 0; i < MTK_DMA_SIZE; i++) {
2200 int next = (i + 1) % MTK_DMA_SIZE;
2201 u32 next_ptr = ring->phys + next * sz;
2203 txd = ring->dma + i * sz;
2204 txd->txd2 = next_ptr;
2205 txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
2207 if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
2215 /* On MT7688 (PDMA only) this driver uses the ring->dma structs
2216 * only as the framework. The real HW descriptors are the PDMA
2217 * descriptors in ring->dma_pdma.
2219 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2220 ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
2221 &ring->phys_pdma, GFP_KERNEL);
2222 if (!ring->dma_pdma)
2225 for (i = 0; i < MTK_DMA_SIZE; i++) {
2226 ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
2227 ring->dma_pdma[i].txd4 = 0;
2231 ring->dma_size = MTK_DMA_SIZE;
2232 atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
2233 ring->next_free = ring->dma;
2234 ring->last_free = (void *)txd;
2235 ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
2236 ring->thresh = MAX_SKB_FRAGS;
2238 /* make sure that all changes to the dma ring are flushed before we
2243 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2244 mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
2245 mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
2247 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
2248 soc->reg_map->qdma.crx_ptr);
2249 mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
2250 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
2251 soc->reg_map->qdma.qtx_cfg);
2253 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
2254 mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
2255 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
2256 mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
2265 static void mtk_tx_clean(struct mtk_eth *eth)
2267 const struct mtk_soc_data *soc = eth->soc;
2268 struct mtk_tx_ring *ring = ð->tx_ring;
2272 for (i = 0; i < MTK_DMA_SIZE; i++)
2273 mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
2279 dma_free_coherent(eth->dma_dev,
2280 MTK_DMA_SIZE * soc->txrx.txd_size,
2281 ring->dma, ring->phys);
2285 if (ring->dma_pdma) {
2286 dma_free_coherent(eth->dma_dev,
2287 MTK_DMA_SIZE * soc->txrx.txd_size,
2288 ring->dma_pdma, ring->phys_pdma);
2289 ring->dma_pdma = NULL;
2293 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
2295 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2296 struct mtk_rx_ring *ring;
2297 int rx_data_len, rx_dma_size;
2300 if (rx_flag == MTK_RX_FLAGS_QDMA) {
2303 ring = ð->rx_ring_qdma;
2305 ring = ð->rx_ring[ring_no];
2308 if (rx_flag == MTK_RX_FLAGS_HWLRO) {
2309 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
2310 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
2312 rx_data_len = ETH_DATA_LEN;
2313 rx_dma_size = MTK_DMA_SIZE;
2316 ring->frag_size = mtk_max_frag_size(rx_data_len);
2317 ring->buf_size = mtk_max_buf_size(ring->frag_size);
2318 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
2323 if (mtk_page_pool_enabled(eth)) {
2324 struct page_pool *pp;
2326 pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
2331 ring->page_pool = pp;
2334 ring->dma = dma_alloc_coherent(eth->dma_dev,
2335 rx_dma_size * eth->soc->txrx.rxd_size,
2336 &ring->phys, GFP_KERNEL);
2340 for (i = 0; i < rx_dma_size; i++) {
2341 struct mtk_rx_dma_v2 *rxd;
2342 dma_addr_t dma_addr;
2345 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2346 if (ring->page_pool) {
2347 data = mtk_page_pool_get_buff(ring->page_pool,
2348 &dma_addr, GFP_KERNEL);
2352 if (ring->frag_size <= PAGE_SIZE)
2353 data = netdev_alloc_frag(ring->frag_size);
2355 data = mtk_max_lro_buf_alloc(GFP_KERNEL);
2360 dma_addr = dma_map_single(eth->dma_dev,
2361 data + NET_SKB_PAD + eth->ip_align,
2362 ring->buf_size, DMA_FROM_DEVICE);
2363 if (unlikely(dma_mapping_error(eth->dma_dev,
2367 rxd->rxd1 = (unsigned int)dma_addr;
2368 ring->data[i] = data;
2370 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2371 rxd->rxd2 = RX_DMA_LSO;
2373 rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2377 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2385 ring->dma_size = rx_dma_size;
2386 ring->calc_idx_update = false;
2387 ring->calc_idx = rx_dma_size - 1;
2388 if (rx_flag == MTK_RX_FLAGS_QDMA)
2389 ring->crx_idx_reg = reg_map->qdma.qcrx_ptr +
2390 ring_no * MTK_QRX_OFFSET;
2392 ring->crx_idx_reg = reg_map->pdma.pcrx_ptr +
2393 ring_no * MTK_QRX_OFFSET;
2394 /* make sure that all changes to the dma ring are flushed before we
2399 if (rx_flag == MTK_RX_FLAGS_QDMA) {
2400 mtk_w32(eth, ring->phys,
2401 reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2402 mtk_w32(eth, rx_dma_size,
2403 reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2404 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2405 reg_map->qdma.rst_idx);
2407 mtk_w32(eth, ring->phys,
2408 reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2409 mtk_w32(eth, rx_dma_size,
2410 reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2411 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2412 reg_map->pdma.rst_idx);
2414 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
2419 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
2423 if (ring->data && ring->dma) {
2424 for (i = 0; i < ring->dma_size; i++) {
2425 struct mtk_rx_dma *rxd;
2430 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2434 dma_unmap_single(eth->dma_dev, rxd->rxd1,
2435 ring->buf_size, DMA_FROM_DEVICE);
2436 mtk_rx_put_buff(ring, ring->data[i], false);
2443 dma_free_coherent(eth->dma_dev,
2444 ring->dma_size * eth->soc->txrx.rxd_size,
2445 ring->dma, ring->phys);
2449 if (ring->page_pool) {
2450 if (xdp_rxq_info_is_reg(&ring->xdp_q))
2451 xdp_rxq_info_unreg(&ring->xdp_q);
2452 page_pool_destroy(ring->page_pool);
2453 ring->page_pool = NULL;
2457 static int mtk_hwlro_rx_init(struct mtk_eth *eth)
2460 u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
2461 u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
2463 /* set LRO rings to auto-learn modes */
2464 ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
2466 /* validate LRO ring */
2467 ring_ctrl_dw2 |= MTK_RING_VLD;
2469 /* set AGE timer (unit: 20us) */
2470 ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
2471 ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
2473 /* set max AGG timer (unit: 20us) */
2474 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
2476 /* set max LRO AGG count */
2477 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
2478 ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
2480 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2481 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
2482 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
2483 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
2486 /* IPv4 checksum update enable */
2487 lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
2489 /* switch priority comparison to packet count mode */
2490 lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
2492 /* bandwidth threshold setting */
2493 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
2495 /* auto-learn score delta setting */
2496 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
2498 /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
2499 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
2500 MTK_PDMA_LRO_ALT_REFRESH_TIMER);
2502 /* set HW LRO mode & the max aggregation count for rx packets */
2503 lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
2505 /* the minimal remaining room of SDL0 in RXD for lro aggregation */
2506 lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
2509 lro_ctrl_dw0 |= MTK_LRO_EN;
2511 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
2512 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
2517 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
2522 /* relinquish lro rings, flush aggregated packets */
2523 mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
2525 /* wait for relinquishments done */
2526 for (i = 0; i < 10; i++) {
2527 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
2528 if (val & MTK_LRO_RING_RELINQUISH_DONE) {
2535 /* invalidate lro rings */
2536 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
2537 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
2539 /* disable HW LRO */
2540 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
2543 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
2547 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2549 /* invalidate the IP setting */
2550 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2552 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
2554 /* validate the IP setting */
2555 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2558 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
2562 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2564 /* invalidate the IP setting */
2565 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2567 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
2570 static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
2575 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2576 if (mac->hwlro_ip[i])
2583 static int mtk_hwlro_add_ipaddr(struct net_device *dev,
2584 struct ethtool_rxnfc *cmd)
2586 struct ethtool_rx_flow_spec *fsp =
2587 (struct ethtool_rx_flow_spec *)&cmd->fs;
2588 struct mtk_mac *mac = netdev_priv(dev);
2589 struct mtk_eth *eth = mac->hw;
2592 if ((fsp->flow_type != TCP_V4_FLOW) ||
2593 (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
2594 (fsp->location > 1))
2597 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
2598 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2600 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2602 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
2607 static int mtk_hwlro_del_ipaddr(struct net_device *dev,
2608 struct ethtool_rxnfc *cmd)
2610 struct ethtool_rx_flow_spec *fsp =
2611 (struct ethtool_rx_flow_spec *)&cmd->fs;
2612 struct mtk_mac *mac = netdev_priv(dev);
2613 struct mtk_eth *eth = mac->hw;
2616 if (fsp->location > 1)
2619 mac->hwlro_ip[fsp->location] = 0;
2620 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2622 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2624 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2629 static void mtk_hwlro_netdev_disable(struct net_device *dev)
2631 struct mtk_mac *mac = netdev_priv(dev);
2632 struct mtk_eth *eth = mac->hw;
2635 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2636 mac->hwlro_ip[i] = 0;
2637 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
2639 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2642 mac->hwlro_ip_cnt = 0;
2645 static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
2646 struct ethtool_rxnfc *cmd)
2648 struct mtk_mac *mac = netdev_priv(dev);
2649 struct ethtool_rx_flow_spec *fsp =
2650 (struct ethtool_rx_flow_spec *)&cmd->fs;
2652 if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
2655 /* only tcp dst ipv4 is meaningful, others are meaningless */
2656 fsp->flow_type = TCP_V4_FLOW;
2657 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
2658 fsp->m_u.tcp_ip4_spec.ip4dst = 0;
2660 fsp->h_u.tcp_ip4_spec.ip4src = 0;
2661 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
2662 fsp->h_u.tcp_ip4_spec.psrc = 0;
2663 fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
2664 fsp->h_u.tcp_ip4_spec.pdst = 0;
2665 fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
2666 fsp->h_u.tcp_ip4_spec.tos = 0;
2667 fsp->m_u.tcp_ip4_spec.tos = 0xff;
2672 static int mtk_hwlro_get_fdir_all(struct net_device *dev,
2673 struct ethtool_rxnfc *cmd,
2676 struct mtk_mac *mac = netdev_priv(dev);
2680 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2681 if (mac->hwlro_ip[i]) {
2687 cmd->rule_cnt = cnt;
2692 static netdev_features_t mtk_fix_features(struct net_device *dev,
2693 netdev_features_t features)
2695 if (!(features & NETIF_F_LRO)) {
2696 struct mtk_mac *mac = netdev_priv(dev);
2697 int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2700 netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
2702 features |= NETIF_F_LRO;
2709 static int mtk_set_features(struct net_device *dev, netdev_features_t features)
2713 if (!((dev->features ^ features) & NETIF_F_LRO))
2716 if (!(features & NETIF_F_LRO))
2717 mtk_hwlro_netdev_disable(dev);
2722 /* wait for DMA to finish whatever it is doing before we start using it again */
2723 static int mtk_dma_busy_wait(struct mtk_eth *eth)
2729 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2730 reg = eth->soc->reg_map->qdma.glo_cfg;
2732 reg = eth->soc->reg_map->pdma.glo_cfg;
2734 ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
2735 !(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
2736 5, MTK_DMA_BUSY_TIMEOUT_US);
2738 dev_err(eth->dev, "DMA init timeout\n");
2743 static int mtk_dma_init(struct mtk_eth *eth)
2748 if (mtk_dma_busy_wait(eth))
2751 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2752 /* QDMA needs scratch memory for internal reordering of the
2755 err = mtk_init_fq_dma(eth);
2760 err = mtk_tx_alloc(eth);
2764 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2765 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
2770 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
2775 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2776 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
2780 err = mtk_hwlro_rx_init(eth);
2785 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2786 /* Enable random early drop and set drop threshold
2789 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
2790 FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
2791 mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred);
2797 static void mtk_dma_free(struct mtk_eth *eth)
2799 const struct mtk_soc_data *soc = eth->soc;
2802 for (i = 0; i < MTK_MAC_COUNT; i++)
2804 netdev_reset_queue(eth->netdev[i]);
2805 if (eth->scratch_ring) {
2806 dma_free_coherent(eth->dma_dev,
2807 MTK_DMA_SIZE * soc->txrx.txd_size,
2808 eth->scratch_ring, eth->phy_scratch_ring);
2809 eth->scratch_ring = NULL;
2810 eth->phy_scratch_ring = 0;
2813 mtk_rx_clean(eth, ð->rx_ring[0]);
2814 mtk_rx_clean(eth, ð->rx_ring_qdma);
2817 mtk_hwlro_rx_uninit(eth);
2818 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
2819 mtk_rx_clean(eth, ð->rx_ring[i]);
2822 kfree(eth->scratch_head);
2825 static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
2827 struct mtk_mac *mac = netdev_priv(dev);
2828 struct mtk_eth *eth = mac->hw;
2830 eth->netdev[mac->id]->stats.tx_errors++;
2831 netif_err(eth, tx_err, dev,
2832 "transmit timed out\n");
2833 schedule_work(ð->pending_work);
2836 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
2838 struct mtk_eth *eth = _eth;
2841 if (likely(napi_schedule_prep(ð->rx_napi))) {
2842 __napi_schedule(ð->rx_napi);
2843 mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
2849 static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
2851 struct mtk_eth *eth = _eth;
2854 if (likely(napi_schedule_prep(ð->tx_napi))) {
2855 __napi_schedule(ð->tx_napi);
2856 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
2862 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
2864 struct mtk_eth *eth = _eth;
2865 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2867 if (mtk_r32(eth, reg_map->pdma.irq_mask) &
2868 eth->soc->txrx.rx_irq_done_mask) {
2869 if (mtk_r32(eth, reg_map->pdma.irq_status) &
2870 eth->soc->txrx.rx_irq_done_mask)
2871 mtk_handle_irq_rx(irq, _eth);
2873 if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
2874 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
2875 mtk_handle_irq_tx(irq, _eth);
2881 #ifdef CONFIG_NET_POLL_CONTROLLER
2882 static void mtk_poll_controller(struct net_device *dev)
2884 struct mtk_mac *mac = netdev_priv(dev);
2885 struct mtk_eth *eth = mac->hw;
2887 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
2888 mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
2889 mtk_handle_irq_rx(eth->irq[2], dev);
2890 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2891 mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
2895 static int mtk_start_dma(struct mtk_eth *eth)
2897 u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
2898 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2901 err = mtk_dma_init(eth);
2907 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2908 val = mtk_r32(eth, reg_map->qdma.glo_cfg);
2909 val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN |
2910 MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
2911 MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
2913 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
2914 val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
2915 MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
2918 val |= MTK_RX_BT_32DWORDS;
2919 mtk_w32(eth, val, reg_map->qdma.glo_cfg);
2922 MTK_RX_DMA_EN | rx_2b_offset |
2923 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
2924 reg_map->pdma.glo_cfg);
2926 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
2927 MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
2928 reg_map->pdma.glo_cfg);
2934 static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
2938 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2941 for (i = 0; i < MTK_MAC_COUNT; i++) {
2942 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
2944 /* default setup the forward port to send frame to PDMA */
2947 /* Enable RX checksum */
2948 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
2952 if (!i && eth->netdev[0] && netdev_uses_dsa(eth->netdev[0]))
2953 val |= MTK_GDMA_SPECIAL_TAG;
2955 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
2957 /* Reset and enable PSE */
2958 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
2959 mtk_w32(eth, 0, MTK_RST_GL);
2962 static int mtk_open(struct net_device *dev)
2964 struct mtk_mac *mac = netdev_priv(dev);
2965 struct mtk_eth *eth = mac->hw;
2968 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
2970 netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
2975 /* we run 2 netdevs on the same dma ring so we only bring it up once */
2976 if (!refcount_read(ð->dma_refcnt)) {
2977 u32 gdm_config = MTK_GDMA_TO_PDMA;
2979 err = mtk_start_dma(eth);
2983 if (eth->soc->offload_version && mtk_ppe_start(eth->ppe) == 0)
2984 gdm_config = MTK_GDMA_TO_PPE;
2986 mtk_gdm_config(eth, gdm_config);
2988 napi_enable(ð->tx_napi);
2989 napi_enable(ð->rx_napi);
2990 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2991 mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
2992 refcount_set(ð->dma_refcnt, 1);
2995 refcount_inc(ð->dma_refcnt);
2997 phylink_start(mac->phylink);
2998 netif_start_queue(dev);
3002 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
3007 /* stop the dma engine */
3008 spin_lock_bh(ð->page_lock);
3009 val = mtk_r32(eth, glo_cfg);
3010 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
3012 spin_unlock_bh(ð->page_lock);
3014 /* wait for dma stop */
3015 for (i = 0; i < 10; i++) {
3016 val = mtk_r32(eth, glo_cfg);
3017 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
3025 static int mtk_stop(struct net_device *dev)
3027 struct mtk_mac *mac = netdev_priv(dev);
3028 struct mtk_eth *eth = mac->hw;
3030 phylink_stop(mac->phylink);
3032 netif_tx_disable(dev);
3034 phylink_disconnect_phy(mac->phylink);
3036 /* only shutdown DMA if this is the last user */
3037 if (!refcount_dec_and_test(ð->dma_refcnt))
3040 mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
3042 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3043 mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
3044 napi_disable(ð->tx_napi);
3045 napi_disable(ð->rx_napi);
3047 cancel_work_sync(ð->rx_dim.work);
3048 cancel_work_sync(ð->tx_dim.work);
3050 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3051 mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
3052 mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
3056 if (eth->soc->offload_version)
3057 mtk_ppe_stop(eth->ppe);
3062 static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
3063 struct netlink_ext_ack *extack)
3065 struct mtk_mac *mac = netdev_priv(dev);
3066 struct mtk_eth *eth = mac->hw;
3067 struct bpf_prog *old_prog;
3071 NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO");
3075 if (dev->mtu > MTK_PP_MAX_BUF_SIZE) {
3076 NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
3080 need_update = !!eth->prog != !!prog;
3081 if (netif_running(dev) && need_update)
3084 old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
3086 bpf_prog_put(old_prog);
3088 if (netif_running(dev) && need_update)
3089 return mtk_open(dev);
3094 static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3096 switch (xdp->command) {
3097 case XDP_SETUP_PROG:
3098 return mtk_xdp_setup(dev, xdp->prog, xdp->extack);
3104 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
3106 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3110 usleep_range(1000, 1100);
3111 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3117 static void mtk_clk_disable(struct mtk_eth *eth)
3121 for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
3122 clk_disable_unprepare(eth->clks[clk]);
3125 static int mtk_clk_enable(struct mtk_eth *eth)
3129 for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
3130 ret = clk_prepare_enable(eth->clks[clk]);
3132 goto err_disable_clks;
3139 clk_disable_unprepare(eth->clks[clk]);
3144 static void mtk_dim_rx(struct work_struct *work)
3146 struct dim *dim = container_of(work, struct dim, work);
3147 struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
3148 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3149 struct dim_cq_moder cur_profile;
3152 cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode,
3154 spin_lock_bh(ð->dim_lock);
3156 val = mtk_r32(eth, reg_map->pdma.delay_irq);
3157 val &= MTK_PDMA_DELAY_TX_MASK;
3158 val |= MTK_PDMA_DELAY_RX_EN;
3160 cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3161 val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT;
3163 cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3164 val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
3166 mtk_w32(eth, val, reg_map->pdma.delay_irq);
3167 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3168 mtk_w32(eth, val, reg_map->qdma.delay_irq);
3170 spin_unlock_bh(ð->dim_lock);
3172 dim->state = DIM_START_MEASURE;
3175 static void mtk_dim_tx(struct work_struct *work)
3177 struct dim *dim = container_of(work, struct dim, work);
3178 struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
3179 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3180 struct dim_cq_moder cur_profile;
3183 cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode,
3185 spin_lock_bh(ð->dim_lock);
3187 val = mtk_r32(eth, reg_map->pdma.delay_irq);
3188 val &= MTK_PDMA_DELAY_RX_MASK;
3189 val |= MTK_PDMA_DELAY_TX_EN;
3191 cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3192 val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT;
3194 cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3195 val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
3197 mtk_w32(eth, val, reg_map->pdma.delay_irq);
3198 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3199 mtk_w32(eth, val, reg_map->qdma.delay_irq);
3201 spin_unlock_bh(ð->dim_lock);
3203 dim->state = DIM_START_MEASURE;
3206 static int mtk_hw_init(struct mtk_eth *eth)
3208 u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
3209 ETHSYS_DMA_AG_MAP_PPE;
3210 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3213 if (test_and_set_bit(MTK_HW_INIT, ð->state))
3216 pm_runtime_enable(eth->dev);
3217 pm_runtime_get_sync(eth->dev);
3219 ret = mtk_clk_enable(eth);
3221 goto err_disable_pm;
3224 regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
3225 of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
3227 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3228 ret = device_reset(eth->dev);
3230 dev_err(eth->dev, "MAC reset failed!\n");
3231 goto err_disable_pm;
3234 /* set interrupt delays based on current Net DIM sample */
3235 mtk_dim_rx(ð->rx_dim.work);
3236 mtk_dim_tx(ð->tx_dim.work);
3238 /* disable delay and normal interrupt */
3239 mtk_tx_irq_disable(eth, ~0);
3240 mtk_rx_irq_disable(eth, ~0);
3245 val = RSTCTRL_FE | RSTCTRL_PPE;
3246 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
3247 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
3250 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3251 val |= RSTCTRL_PPE1;
3254 ethsys_reset(eth, val);
3256 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
3257 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
3260 /* Set FE to PDMAv2 if necessary */
3261 val = mtk_r32(eth, MTK_FE_GLO_MISC);
3262 mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
3266 /* Set GE2 driving and slew rate */
3267 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
3270 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
3273 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
3276 /* Set linkdown as the default for each GMAC. Its own MCR would be set
3277 * up with the more appropriate value when mtk_mac_config call is being
3280 for (i = 0; i < MTK_MAC_COUNT; i++)
3281 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
3283 /* Indicates CDM to parse the MTK special tag from CPU
3284 * which also is working out for untag packets.
3286 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
3287 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
3289 /* Enable RX VLan Offloading */
3290 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
3292 /* set interrupt delays based on current Net DIM sample */
3293 mtk_dim_rx(ð->rx_dim.work);
3294 mtk_dim_tx(ð->tx_dim.work);
3296 /* disable delay and normal interrupt */
3297 mtk_tx_irq_disable(eth, ~0);
3298 mtk_rx_irq_disable(eth, ~0);
3300 /* FE int grouping */
3301 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
3302 mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
3303 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
3304 mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
3305 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
3307 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
3308 /* PSE should not drop port8 and port9 packets */
3309 mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
3311 /* PSE Free Queue Flow Control */
3312 mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
3314 /* PSE config input queue threshold */
3315 mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
3316 mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
3317 mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
3318 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
3319 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
3320 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
3321 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
3322 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
3324 /* PSE config output queue threshold */
3325 mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
3326 mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
3327 mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
3328 mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
3329 mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
3330 mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
3331 mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
3332 mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
3334 /* GDM and CDM Threshold */
3335 mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
3336 mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
3337 mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
3338 mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
3339 mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
3340 mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
3346 pm_runtime_put_sync(eth->dev);
3347 pm_runtime_disable(eth->dev);
3352 static int mtk_hw_deinit(struct mtk_eth *eth)
3354 if (!test_and_clear_bit(MTK_HW_INIT, ð->state))
3357 mtk_clk_disable(eth);
3359 pm_runtime_put_sync(eth->dev);
3360 pm_runtime_disable(eth->dev);
3365 static int __init mtk_init(struct net_device *dev)
3367 struct mtk_mac *mac = netdev_priv(dev);
3368 struct mtk_eth *eth = mac->hw;
3371 ret = of_get_ethdev_address(mac->of_node, dev);
3373 /* If the mac address is invalid, use random mac address */
3374 eth_hw_addr_random(dev);
3375 dev_err(eth->dev, "generated random MAC address %pM\n",
3382 static void mtk_uninit(struct net_device *dev)
3384 struct mtk_mac *mac = netdev_priv(dev);
3385 struct mtk_eth *eth = mac->hw;
3387 phylink_disconnect_phy(mac->phylink);
3388 mtk_tx_irq_disable(eth, ~0);
3389 mtk_rx_irq_disable(eth, ~0);
3392 static int mtk_change_mtu(struct net_device *dev, int new_mtu)
3394 int length = new_mtu + MTK_RX_ETH_HLEN;
3395 struct mtk_mac *mac = netdev_priv(dev);
3396 struct mtk_eth *eth = mac->hw;
3397 u32 mcr_cur, mcr_new;
3399 if (rcu_access_pointer(eth->prog) &&
3400 length > MTK_PP_MAX_BUF_SIZE) {
3401 netdev_err(dev, "Invalid MTU for XDP mode\n");
3405 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3406 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
3407 mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
3410 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
3411 else if (length <= 1536)
3412 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
3413 else if (length <= 1552)
3414 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
3416 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
3418 if (mcr_new != mcr_cur)
3419 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
3427 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3429 struct mtk_mac *mac = netdev_priv(dev);
3435 return phylink_mii_ioctl(mac->phylink, ifr, cmd);
3443 static void mtk_pending_work(struct work_struct *work)
3445 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
3447 unsigned long restart = 0;
3451 dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
3453 while (test_and_set_bit_lock(MTK_RESETTING, ð->state))
3456 dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
3457 /* stop all devices to make sure that dma is properly shut down */
3458 for (i = 0; i < MTK_MAC_COUNT; i++) {
3459 if (!eth->netdev[i])
3461 mtk_stop(eth->netdev[i]);
3462 __set_bit(i, &restart);
3464 dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
3466 /* restart underlying hardware such as power, clock, pin mux
3467 * and the connected phy
3472 pinctrl_select_state(eth->dev->pins->p,
3473 eth->dev->pins->default_state);
3476 /* restart DMA and enable IRQs */
3477 for (i = 0; i < MTK_MAC_COUNT; i++) {
3478 if (!test_bit(i, &restart))
3480 err = mtk_open(eth->netdev[i]);
3482 netif_alert(eth, ifup, eth->netdev[i],
3483 "Driver up/down cycle failed, closing device.\n");
3484 dev_close(eth->netdev[i]);
3488 dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
3490 clear_bit_unlock(MTK_RESETTING, ð->state);
3495 static int mtk_free_dev(struct mtk_eth *eth)
3499 for (i = 0; i < MTK_MAC_COUNT; i++) {
3500 if (!eth->netdev[i])
3502 free_netdev(eth->netdev[i]);
3508 static int mtk_unreg_dev(struct mtk_eth *eth)
3512 for (i = 0; i < MTK_MAC_COUNT; i++) {
3513 if (!eth->netdev[i])
3515 unregister_netdev(eth->netdev[i]);
3521 static int mtk_cleanup(struct mtk_eth *eth)
3525 cancel_work_sync(ð->pending_work);
3530 static int mtk_get_link_ksettings(struct net_device *ndev,
3531 struct ethtool_link_ksettings *cmd)
3533 struct mtk_mac *mac = netdev_priv(ndev);
3535 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3538 return phylink_ethtool_ksettings_get(mac->phylink, cmd);
3541 static int mtk_set_link_ksettings(struct net_device *ndev,
3542 const struct ethtool_link_ksettings *cmd)
3544 struct mtk_mac *mac = netdev_priv(ndev);
3546 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3549 return phylink_ethtool_ksettings_set(mac->phylink, cmd);
3552 static void mtk_get_drvinfo(struct net_device *dev,
3553 struct ethtool_drvinfo *info)
3555 struct mtk_mac *mac = netdev_priv(dev);
3557 strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
3558 strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
3559 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
3562 static u32 mtk_get_msglevel(struct net_device *dev)
3564 struct mtk_mac *mac = netdev_priv(dev);
3566 return mac->hw->msg_enable;
3569 static void mtk_set_msglevel(struct net_device *dev, u32 value)
3571 struct mtk_mac *mac = netdev_priv(dev);
3573 mac->hw->msg_enable = value;
3576 static int mtk_nway_reset(struct net_device *dev)
3578 struct mtk_mac *mac = netdev_priv(dev);
3580 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3586 return phylink_ethtool_nway_reset(mac->phylink);
3589 static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3593 switch (stringset) {
3594 case ETH_SS_STATS: {
3595 struct mtk_mac *mac = netdev_priv(dev);
3597 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
3598 memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
3599 data += ETH_GSTRING_LEN;
3601 if (mtk_page_pool_enabled(mac->hw))
3602 page_pool_ethtool_stats_get_strings(data);
3610 static int mtk_get_sset_count(struct net_device *dev, int sset)
3613 case ETH_SS_STATS: {
3614 int count = ARRAY_SIZE(mtk_ethtool_stats);
3615 struct mtk_mac *mac = netdev_priv(dev);
3617 if (mtk_page_pool_enabled(mac->hw))
3618 count += page_pool_ethtool_stats_get_count();
3626 static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data)
3628 struct page_pool_stats stats = {};
3631 for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) {
3632 struct mtk_rx_ring *ring = ð->rx_ring[i];
3634 if (!ring->page_pool)
3637 page_pool_get_stats(ring->page_pool, &stats);
3639 page_pool_ethtool_stats_get(data, &stats);
3642 static void mtk_get_ethtool_stats(struct net_device *dev,
3643 struct ethtool_stats *stats, u64 *data)
3645 struct mtk_mac *mac = netdev_priv(dev);
3646 struct mtk_hw_stats *hwstats = mac->hw_stats;
3647 u64 *data_src, *data_dst;
3651 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
3654 if (netif_running(dev) && netif_device_present(dev)) {
3655 if (spin_trylock_bh(&hwstats->stats_lock)) {
3656 mtk_stats_update_mac(mac);
3657 spin_unlock_bh(&hwstats->stats_lock);
3661 data_src = (u64 *)hwstats;
3665 start = u64_stats_fetch_begin_irq(&hwstats->syncp);
3667 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
3668 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
3669 if (mtk_page_pool_enabled(mac->hw))
3670 mtk_ethtool_pp_stats(mac->hw, data_dst);
3671 } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
3674 static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
3677 int ret = -EOPNOTSUPP;
3680 case ETHTOOL_GRXRINGS:
3681 if (dev->hw_features & NETIF_F_LRO) {
3682 cmd->data = MTK_MAX_RX_RING_NUM;
3686 case ETHTOOL_GRXCLSRLCNT:
3687 if (dev->hw_features & NETIF_F_LRO) {
3688 struct mtk_mac *mac = netdev_priv(dev);
3690 cmd->rule_cnt = mac->hwlro_ip_cnt;
3694 case ETHTOOL_GRXCLSRULE:
3695 if (dev->hw_features & NETIF_F_LRO)
3696 ret = mtk_hwlro_get_fdir_entry(dev, cmd);
3698 case ETHTOOL_GRXCLSRLALL:
3699 if (dev->hw_features & NETIF_F_LRO)
3700 ret = mtk_hwlro_get_fdir_all(dev, cmd,
3710 static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
3712 int ret = -EOPNOTSUPP;
3715 case ETHTOOL_SRXCLSRLINS:
3716 if (dev->hw_features & NETIF_F_LRO)
3717 ret = mtk_hwlro_add_ipaddr(dev, cmd);
3719 case ETHTOOL_SRXCLSRLDEL:
3720 if (dev->hw_features & NETIF_F_LRO)
3721 ret = mtk_hwlro_del_ipaddr(dev, cmd);
3730 static const struct ethtool_ops mtk_ethtool_ops = {
3731 .get_link_ksettings = mtk_get_link_ksettings,
3732 .set_link_ksettings = mtk_set_link_ksettings,
3733 .get_drvinfo = mtk_get_drvinfo,
3734 .get_msglevel = mtk_get_msglevel,
3735 .set_msglevel = mtk_set_msglevel,
3736 .nway_reset = mtk_nway_reset,
3737 .get_link = ethtool_op_get_link,
3738 .get_strings = mtk_get_strings,
3739 .get_sset_count = mtk_get_sset_count,
3740 .get_ethtool_stats = mtk_get_ethtool_stats,
3741 .get_rxnfc = mtk_get_rxnfc,
3742 .set_rxnfc = mtk_set_rxnfc,
3745 static const struct net_device_ops mtk_netdev_ops = {
3746 .ndo_init = mtk_init,
3747 .ndo_uninit = mtk_uninit,
3748 .ndo_open = mtk_open,
3749 .ndo_stop = mtk_stop,
3750 .ndo_start_xmit = mtk_start_xmit,
3751 .ndo_set_mac_address = mtk_set_mac_address,
3752 .ndo_validate_addr = eth_validate_addr,
3753 .ndo_eth_ioctl = mtk_do_ioctl,
3754 .ndo_change_mtu = mtk_change_mtu,
3755 .ndo_tx_timeout = mtk_tx_timeout,
3756 .ndo_get_stats64 = mtk_get_stats64,
3757 .ndo_fix_features = mtk_fix_features,
3758 .ndo_set_features = mtk_set_features,
3759 #ifdef CONFIG_NET_POLL_CONTROLLER
3760 .ndo_poll_controller = mtk_poll_controller,
3762 .ndo_setup_tc = mtk_eth_setup_tc,
3764 .ndo_xdp_xmit = mtk_xdp_xmit,
3767 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
3769 const __be32 *_id = of_get_property(np, "reg", NULL);
3770 phy_interface_t phy_mode;
3771 struct phylink *phylink;
3772 struct mtk_mac *mac;
3776 dev_err(eth->dev, "missing mac id\n");
3780 id = be32_to_cpup(_id);
3781 if (id >= MTK_MAC_COUNT) {
3782 dev_err(eth->dev, "%d is not a valid mac id\n", id);
3786 if (eth->netdev[id]) {
3787 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
3791 eth->netdev[id] = alloc_etherdev(sizeof(*mac));
3792 if (!eth->netdev[id]) {
3793 dev_err(eth->dev, "alloc_etherdev failed\n");
3796 mac = netdev_priv(eth->netdev[id]);
3802 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
3803 mac->hwlro_ip_cnt = 0;
3805 mac->hw_stats = devm_kzalloc(eth->dev,
3806 sizeof(*mac->hw_stats),
3808 if (!mac->hw_stats) {
3809 dev_err(eth->dev, "failed to allocate counter memory\n");
3813 spin_lock_init(&mac->hw_stats->stats_lock);
3814 u64_stats_init(&mac->hw_stats->syncp);
3815 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
3817 /* phylink create */
3818 err = of_get_phy_mode(np, &phy_mode);
3820 dev_err(eth->dev, "incorrect phy-mode\n");
3824 /* mac config is not set */
3825 mac->interface = PHY_INTERFACE_MODE_NA;
3826 mac->speed = SPEED_UNKNOWN;
3828 mac->phylink_config.dev = ð->netdev[id]->dev;
3829 mac->phylink_config.type = PHYLINK_NETDEV;
3830 /* This driver makes use of state->speed in mac_config */
3831 mac->phylink_config.legacy_pre_march2020 = true;
3832 mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
3833 MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
3835 __set_bit(PHY_INTERFACE_MODE_MII,
3836 mac->phylink_config.supported_interfaces);
3837 __set_bit(PHY_INTERFACE_MODE_GMII,
3838 mac->phylink_config.supported_interfaces);
3840 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
3841 phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
3843 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id)
3844 __set_bit(PHY_INTERFACE_MODE_TRGMII,
3845 mac->phylink_config.supported_interfaces);
3847 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
3848 __set_bit(PHY_INTERFACE_MODE_SGMII,
3849 mac->phylink_config.supported_interfaces);
3850 __set_bit(PHY_INTERFACE_MODE_1000BASEX,
3851 mac->phylink_config.supported_interfaces);
3852 __set_bit(PHY_INTERFACE_MODE_2500BASEX,
3853 mac->phylink_config.supported_interfaces);
3856 phylink = phylink_create(&mac->phylink_config,
3857 of_fwnode_handle(mac->of_node),
3858 phy_mode, &mtk_phylink_ops);
3859 if (IS_ERR(phylink)) {
3860 err = PTR_ERR(phylink);
3864 mac->phylink = phylink;
3866 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
3867 eth->netdev[id]->watchdog_timeo = 5 * HZ;
3868 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
3869 eth->netdev[id]->base_addr = (unsigned long)eth->base;
3871 eth->netdev[id]->hw_features = eth->soc->hw_features;
3873 eth->netdev[id]->hw_features |= NETIF_F_LRO;
3875 eth->netdev[id]->vlan_features = eth->soc->hw_features &
3876 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
3877 eth->netdev[id]->features |= eth->soc->hw_features;
3878 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
3880 eth->netdev[id]->irq = eth->irq[0];
3881 eth->netdev[id]->dev.of_node = np;
3883 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3884 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
3886 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
3891 free_netdev(eth->netdev[id]);
3895 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
3897 struct net_device *dev, *tmp;
3898 LIST_HEAD(dev_list);
3903 for (i = 0; i < MTK_MAC_COUNT; i++) {
3904 dev = eth->netdev[i];
3906 if (!dev || !(dev->flags & IFF_UP))
3909 list_add_tail(&dev->close_list, &dev_list);
3912 dev_close_many(&dev_list, false);
3914 eth->dma_dev = dma_dev;
3916 list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
3917 list_del_init(&dev->close_list);
3918 dev_open(dev, NULL);
3924 static int mtk_probe(struct platform_device *pdev)
3926 struct device_node *mac_np;
3927 struct mtk_eth *eth;
3930 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
3934 eth->soc = of_device_get_match_data(&pdev->dev);
3936 eth->dev = &pdev->dev;
3937 eth->dma_dev = &pdev->dev;
3938 eth->base = devm_platform_ioremap_resource(pdev, 0);
3939 if (IS_ERR(eth->base))
3940 return PTR_ERR(eth->base);
3942 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3943 eth->ip_align = NET_IP_ALIGN;
3945 spin_lock_init(ð->page_lock);
3946 spin_lock_init(ð->tx_irq_lock);
3947 spin_lock_init(ð->rx_irq_lock);
3948 spin_lock_init(ð->dim_lock);
3950 eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
3951 INIT_WORK(ð->rx_dim.work, mtk_dim_rx);
3953 eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
3954 INIT_WORK(ð->tx_dim.work, mtk_dim_tx);
3956 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3957 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3959 if (IS_ERR(eth->ethsys)) {
3960 dev_err(&pdev->dev, "no ethsys regmap found\n");
3961 return PTR_ERR(eth->ethsys);
3965 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
3966 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3967 "mediatek,infracfg");
3968 if (IS_ERR(eth->infra)) {
3969 dev_err(&pdev->dev, "no infracfg regmap found\n");
3970 return PTR_ERR(eth->infra);
3974 if (of_dma_is_coherent(pdev->dev.of_node)) {
3977 cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3978 "cci-control-port");
3979 /* enable CPU/bus coherency */
3981 regmap_write(cci, 0, 3);
3984 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
3985 eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
3990 err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
3991 eth->soc->ana_rgc3);
3997 if (eth->soc->required_pctl) {
3998 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4000 if (IS_ERR(eth->pctl)) {
4001 dev_err(&pdev->dev, "no pctl regmap found\n");
4002 return PTR_ERR(eth->pctl);
4007 struct device_node *np = of_parse_phandle(pdev->dev.of_node,
4009 static const u32 wdma_regs[] = {
4015 if (!np || i >= ARRAY_SIZE(wdma_regs))
4018 wdma = eth->base + wdma_regs[i];
4019 mtk_wed_add_hw(np, eth, wdma, i);
4022 for (i = 0; i < 3; i++) {
4023 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
4024 eth->irq[i] = eth->irq[0];
4026 eth->irq[i] = platform_get_irq(pdev, i);
4027 if (eth->irq[i] < 0) {
4028 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
4032 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
4033 eth->clks[i] = devm_clk_get(eth->dev,
4034 mtk_clks_source_name[i]);
4035 if (IS_ERR(eth->clks[i])) {
4036 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
4037 return -EPROBE_DEFER;
4038 if (eth->soc->required_clks & BIT(i)) {
4039 dev_err(&pdev->dev, "clock %s not found\n",
4040 mtk_clks_source_name[i]);
4043 eth->clks[i] = NULL;
4047 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
4048 INIT_WORK(ð->pending_work, mtk_pending_work);
4050 err = mtk_hw_init(eth);
4054 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
4056 for_each_child_of_node(pdev->dev.of_node, mac_np) {
4057 if (!of_device_is_compatible(mac_np,
4058 "mediatek,eth-mac"))
4061 if (!of_device_is_available(mac_np))
4064 err = mtk_add_mac(eth, mac_np);
4066 of_node_put(mac_np);
4071 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
4072 err = devm_request_irq(eth->dev, eth->irq[0],
4074 dev_name(eth->dev), eth);
4076 err = devm_request_irq(eth->dev, eth->irq[1],
4077 mtk_handle_irq_tx, 0,
4078 dev_name(eth->dev), eth);
4082 err = devm_request_irq(eth->dev, eth->irq[2],
4083 mtk_handle_irq_rx, 0,
4084 dev_name(eth->dev), eth);
4089 /* No MT7628/88 support yet */
4090 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4091 err = mtk_mdio_init(eth);
4096 if (eth->soc->offload_version) {
4097 eth->ppe = mtk_ppe_init(eth, eth->base + MTK_ETH_PPE_BASE, 2);
4103 err = mtk_eth_offload_init(eth);
4108 for (i = 0; i < MTK_MAX_DEVS; i++) {
4109 if (!eth->netdev[i])
4112 err = register_netdev(eth->netdev[i]);
4114 dev_err(eth->dev, "error bringing up device\n");
4115 goto err_deinit_mdio;
4117 netif_info(eth, probe, eth->netdev[i],
4118 "mediatek frame engine at 0x%08lx, irq %d\n",
4119 eth->netdev[i]->base_addr, eth->irq[0]);
4122 /* we run 2 devices on the same DMA ring so we need a dummy device
4125 init_dummy_netdev(ð->dummy_dev);
4126 netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx,
4128 netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx,
4131 platform_set_drvdata(pdev, eth);
4136 mtk_mdio_cleanup(eth);
4145 static int mtk_remove(struct platform_device *pdev)
4147 struct mtk_eth *eth = platform_get_drvdata(pdev);
4148 struct mtk_mac *mac;
4151 /* stop all devices to make sure that dma is properly shut down */
4152 for (i = 0; i < MTK_MAC_COUNT; i++) {
4153 if (!eth->netdev[i])
4155 mtk_stop(eth->netdev[i]);
4156 mac = netdev_priv(eth->netdev[i]);
4157 phylink_disconnect_phy(mac->phylink);
4162 netif_napi_del(ð->tx_napi);
4163 netif_napi_del(ð->rx_napi);
4165 mtk_mdio_cleanup(eth);
4170 static const struct mtk_soc_data mt2701_data = {
4171 .reg_map = &mtk_reg_map,
4172 .caps = MT7623_CAPS | MTK_HWLRO,
4173 .hw_features = MTK_HW_FEATURES,
4174 .required_clks = MT7623_CLKS_BITMAP,
4175 .required_pctl = true,
4177 .txd_size = sizeof(struct mtk_tx_dma),
4178 .rxd_size = sizeof(struct mtk_rx_dma),
4179 .rx_irq_done_mask = MTK_RX_DONE_INT,
4180 .rx_dma_l4_valid = RX_DMA_L4_VALID,
4181 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4182 .dma_len_offset = 16,
4186 static const struct mtk_soc_data mt7621_data = {
4187 .reg_map = &mtk_reg_map,
4188 .caps = MT7621_CAPS,
4189 .hw_features = MTK_HW_FEATURES,
4190 .required_clks = MT7621_CLKS_BITMAP,
4191 .required_pctl = false,
4192 .offload_version = 2,
4194 .txd_size = sizeof(struct mtk_tx_dma),
4195 .rxd_size = sizeof(struct mtk_rx_dma),
4196 .rx_irq_done_mask = MTK_RX_DONE_INT,
4197 .rx_dma_l4_valid = RX_DMA_L4_VALID,
4198 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4199 .dma_len_offset = 16,
4203 static const struct mtk_soc_data mt7622_data = {
4204 .reg_map = &mtk_reg_map,
4206 .caps = MT7622_CAPS | MTK_HWLRO,
4207 .hw_features = MTK_HW_FEATURES,
4208 .required_clks = MT7622_CLKS_BITMAP,
4209 .required_pctl = false,
4210 .offload_version = 2,
4212 .txd_size = sizeof(struct mtk_tx_dma),
4213 .rxd_size = sizeof(struct mtk_rx_dma),
4214 .rx_irq_done_mask = MTK_RX_DONE_INT,
4215 .rx_dma_l4_valid = RX_DMA_L4_VALID,
4216 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4217 .dma_len_offset = 16,
4221 static const struct mtk_soc_data mt7623_data = {
4222 .reg_map = &mtk_reg_map,
4223 .caps = MT7623_CAPS | MTK_HWLRO,
4224 .hw_features = MTK_HW_FEATURES,
4225 .required_clks = MT7623_CLKS_BITMAP,
4226 .required_pctl = true,
4227 .offload_version = 2,
4229 .txd_size = sizeof(struct mtk_tx_dma),
4230 .rxd_size = sizeof(struct mtk_rx_dma),
4231 .rx_irq_done_mask = MTK_RX_DONE_INT,
4232 .rx_dma_l4_valid = RX_DMA_L4_VALID,
4233 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4234 .dma_len_offset = 16,
4238 static const struct mtk_soc_data mt7629_data = {
4239 .reg_map = &mtk_reg_map,
4241 .caps = MT7629_CAPS | MTK_HWLRO,
4242 .hw_features = MTK_HW_FEATURES,
4243 .required_clks = MT7629_CLKS_BITMAP,
4244 .required_pctl = false,
4246 .txd_size = sizeof(struct mtk_tx_dma),
4247 .rxd_size = sizeof(struct mtk_rx_dma),
4248 .rx_irq_done_mask = MTK_RX_DONE_INT,
4249 .rx_dma_l4_valid = RX_DMA_L4_VALID,
4250 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4251 .dma_len_offset = 16,
4255 static const struct mtk_soc_data mt7986_data = {
4256 .reg_map = &mt7986_reg_map,
4258 .caps = MT7986_CAPS,
4259 .required_clks = MT7986_CLKS_BITMAP,
4260 .required_pctl = false,
4262 .txd_size = sizeof(struct mtk_tx_dma_v2),
4263 .rxd_size = sizeof(struct mtk_rx_dma_v2),
4264 .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
4265 .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
4266 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
4267 .dma_len_offset = 8,
4271 static const struct mtk_soc_data rt5350_data = {
4272 .reg_map = &mt7628_reg_map,
4273 .caps = MT7628_CAPS,
4274 .hw_features = MTK_HW_FEATURES_MT7628,
4275 .required_clks = MT7628_CLKS_BITMAP,
4276 .required_pctl = false,
4278 .txd_size = sizeof(struct mtk_tx_dma),
4279 .rxd_size = sizeof(struct mtk_rx_dma),
4280 .rx_irq_done_mask = MTK_RX_DONE_INT,
4281 .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
4282 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4283 .dma_len_offset = 16,
4287 const struct of_device_id of_mtk_match[] = {
4288 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
4289 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
4290 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
4291 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
4292 { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
4293 { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
4294 { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
4297 MODULE_DEVICE_TABLE(of, of_mtk_match);
4299 static struct platform_driver mtk_driver = {
4301 .remove = mtk_remove,
4303 .name = "mtk_soc_eth",
4304 .of_match_table = of_mtk_match,
4308 module_platform_driver(mtk_driver);
4310 MODULE_LICENSE("GPL");
4311 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
4312 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");