1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
9 #include <linux/of_device.h>
10 #include <linux/of_mdio.h>
11 #include <linux/of_net.h>
12 #include <linux/of_address.h>
13 #include <linux/mfd/syscon.h>
14 #include <linux/regmap.h>
15 #include <linux/clk.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/if_vlan.h>
18 #include <linux/reset.h>
19 #include <linux/tcp.h>
20 #include <linux/interrupt.h>
21 #include <linux/pinctrl/devinfo.h>
22 #include <linux/phylink.h>
23 #include <linux/pcs/pcs-mtk-lynxi.h>
24 #include <linux/jhash.h>
25 #include <linux/bitfield.h>
27 #include <net/dst_metadata.h>
29 #include "mtk_eth_soc.h"
32 static int mtk_msg_level = -1;
33 module_param_named(msg_level, mtk_msg_level, int, 0);
34 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
36 #define MTK_ETHTOOL_STAT(x) { #x, \
37 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
39 #define MTK_ETHTOOL_XDP_STAT(x) { #x, \
40 offsetof(struct mtk_hw_stats, xdp_stats.x) / \
43 static const struct mtk_reg_map mtk_reg_map = {
44 .tx_irq_mask = 0x1a1c,
45 .tx_irq_status = 0x1a18,
55 .adma_rx_dbg0 = 0x0a38,
68 .tx_sch_rate = 0x1a14,
81 .gdma_to_ppe = 0x4444,
91 static const struct mtk_reg_map mt7628_reg_map = {
92 .tx_irq_mask = 0x0a28,
93 .tx_irq_status = 0x0a20,
101 .irq_status = 0x0a20,
107 static const struct mtk_reg_map mt7986_reg_map = {
108 .tx_irq_mask = 0x461c,
109 .tx_irq_status = 0x4618,
112 .rx_cnt_cfg = 0x6104,
117 .irq_status = 0x6220,
119 .adma_rx_dbg0 = 0x6238,
126 .rx_cnt_cfg = 0x4504,
142 .tx_sch_rate = 0x4798,
145 .gdma_to_ppe = 0x3333,
151 .pse_iq_sta = 0x0180,
152 .pse_oq_sta = 0x01a0,
155 /* strings used by ethtool */
156 static const struct mtk_ethtool_stats {
157 char str[ETH_GSTRING_LEN];
159 } mtk_ethtool_stats[] = {
160 MTK_ETHTOOL_STAT(tx_bytes),
161 MTK_ETHTOOL_STAT(tx_packets),
162 MTK_ETHTOOL_STAT(tx_skip),
163 MTK_ETHTOOL_STAT(tx_collisions),
164 MTK_ETHTOOL_STAT(rx_bytes),
165 MTK_ETHTOOL_STAT(rx_packets),
166 MTK_ETHTOOL_STAT(rx_overflow),
167 MTK_ETHTOOL_STAT(rx_fcs_errors),
168 MTK_ETHTOOL_STAT(rx_short_errors),
169 MTK_ETHTOOL_STAT(rx_long_errors),
170 MTK_ETHTOOL_STAT(rx_checksum_errors),
171 MTK_ETHTOOL_STAT(rx_flow_control_packets),
172 MTK_ETHTOOL_XDP_STAT(rx_xdp_redirect),
173 MTK_ETHTOOL_XDP_STAT(rx_xdp_pass),
174 MTK_ETHTOOL_XDP_STAT(rx_xdp_drop),
175 MTK_ETHTOOL_XDP_STAT(rx_xdp_tx),
176 MTK_ETHTOOL_XDP_STAT(rx_xdp_tx_errors),
177 MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit),
178 MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit_errors),
181 static const char * const mtk_clks_source_name[] = {
182 "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
183 "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
184 "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
185 "sgmii_ck", "eth2pll", "wocpu0", "wocpu1", "netsys0", "netsys1"
188 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
190 __raw_writel(val, eth->base + reg);
193 u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
195 return __raw_readl(eth->base + reg);
198 static u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
202 val = mtk_r32(eth, reg);
205 mtk_w32(eth, val, reg);
209 static int mtk_mdio_busy_wait(struct mtk_eth *eth)
211 unsigned long t_start = jiffies;
214 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
216 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
221 dev_err(eth->dev, "mdio: MDIO timeout\n");
225 static int _mtk_mdio_write_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
230 ret = mtk_mdio_busy_wait(eth);
234 mtk_w32(eth, PHY_IAC_ACCESS |
237 PHY_IAC_REG(phy_reg) |
238 PHY_IAC_ADDR(phy_addr) |
239 PHY_IAC_DATA(write_data),
242 ret = mtk_mdio_busy_wait(eth);
249 static int _mtk_mdio_write_c45(struct mtk_eth *eth, u32 phy_addr,
250 u32 devad, u32 phy_reg, u32 write_data)
254 ret = mtk_mdio_busy_wait(eth);
258 mtk_w32(eth, PHY_IAC_ACCESS |
260 PHY_IAC_CMD_C45_ADDR |
262 PHY_IAC_ADDR(phy_addr) |
263 PHY_IAC_DATA(phy_reg),
266 ret = mtk_mdio_busy_wait(eth);
270 mtk_w32(eth, PHY_IAC_ACCESS |
274 PHY_IAC_ADDR(phy_addr) |
275 PHY_IAC_DATA(write_data),
278 ret = mtk_mdio_busy_wait(eth);
285 static int _mtk_mdio_read_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
289 ret = mtk_mdio_busy_wait(eth);
293 mtk_w32(eth, PHY_IAC_ACCESS |
295 PHY_IAC_CMD_C22_READ |
296 PHY_IAC_REG(phy_reg) |
297 PHY_IAC_ADDR(phy_addr),
300 ret = mtk_mdio_busy_wait(eth);
304 return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
307 static int _mtk_mdio_read_c45(struct mtk_eth *eth, u32 phy_addr,
308 u32 devad, u32 phy_reg)
312 ret = mtk_mdio_busy_wait(eth);
316 mtk_w32(eth, PHY_IAC_ACCESS |
318 PHY_IAC_CMD_C45_ADDR |
320 PHY_IAC_ADDR(phy_addr) |
321 PHY_IAC_DATA(phy_reg),
324 ret = mtk_mdio_busy_wait(eth);
328 mtk_w32(eth, PHY_IAC_ACCESS |
330 PHY_IAC_CMD_C45_READ |
332 PHY_IAC_ADDR(phy_addr),
335 ret = mtk_mdio_busy_wait(eth);
339 return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
342 static int mtk_mdio_write_c22(struct mii_bus *bus, int phy_addr,
343 int phy_reg, u16 val)
345 struct mtk_eth *eth = bus->priv;
347 return _mtk_mdio_write_c22(eth, phy_addr, phy_reg, val);
350 static int mtk_mdio_write_c45(struct mii_bus *bus, int phy_addr,
351 int devad, int phy_reg, u16 val)
353 struct mtk_eth *eth = bus->priv;
355 return _mtk_mdio_write_c45(eth, phy_addr, devad, phy_reg, val);
358 static int mtk_mdio_read_c22(struct mii_bus *bus, int phy_addr, int phy_reg)
360 struct mtk_eth *eth = bus->priv;
362 return _mtk_mdio_read_c22(eth, phy_addr, phy_reg);
365 static int mtk_mdio_read_c45(struct mii_bus *bus, int phy_addr, int devad,
368 struct mtk_eth *eth = bus->priv;
370 return _mtk_mdio_read_c45(eth, phy_addr, devad, phy_reg);
373 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
374 phy_interface_t interface)
378 val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
379 ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
381 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
382 ETHSYS_TRGMII_MT7621_MASK, val);
387 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
388 phy_interface_t interface, int speed)
394 if (interface == PHY_INTERFACE_MODE_TRGMII) {
395 mtk_w32(eth, TRGMII_MODE, INTF_MODE);
396 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], 500000000);
398 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
402 if (speed == SPEED_1000) {
403 intf = INTF_MODE_RGMII_1000;
405 rck = RCK_CTRL_RGMII_1000;
406 tck = TCK_CTRL_RGMII_1000;
408 intf = INTF_MODE_RGMII_10_100;
410 rck = RCK_CTRL_RGMII_10_100;
411 tck = TCK_CTRL_RGMII_10_100;
414 mtk_w32(eth, intf, INTF_MODE);
416 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
417 ETHSYS_TRGMII_CLK_SEL362_5,
418 ETHSYS_TRGMII_CLK_SEL362_5);
420 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], rate);
422 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
424 mtk_w32(eth, rck, TRGMII_RCK_CTRL);
425 mtk_w32(eth, tck, TRGMII_TCK_CTRL);
428 static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
429 phy_interface_t interface)
431 struct mtk_mac *mac = container_of(config, struct mtk_mac,
433 struct mtk_eth *eth = mac->hw;
436 if (interface == PHY_INTERFACE_MODE_SGMII ||
437 phy_interface_mode_is_8023z(interface)) {
438 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
441 return eth->sgmii_pcs[sid];
447 static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
448 const struct phylink_link_state *state)
450 struct mtk_mac *mac = container_of(config, struct mtk_mac,
452 struct mtk_eth *eth = mac->hw;
453 int val, ge_mode, err = 0;
456 /* MT76x8 has no hardware settings between for the MAC */
457 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
458 mac->interface != state->interface) {
459 /* Setup soc pin functions */
460 switch (state->interface) {
461 case PHY_INTERFACE_MODE_TRGMII:
462 case PHY_INTERFACE_MODE_RGMII_TXID:
463 case PHY_INTERFACE_MODE_RGMII_RXID:
464 case PHY_INTERFACE_MODE_RGMII_ID:
465 case PHY_INTERFACE_MODE_RGMII:
466 case PHY_INTERFACE_MODE_MII:
467 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
468 err = mtk_gmac_rgmii_path_setup(eth, mac->id);
473 case PHY_INTERFACE_MODE_1000BASEX:
474 case PHY_INTERFACE_MODE_2500BASEX:
475 case PHY_INTERFACE_MODE_SGMII:
476 err = mtk_gmac_sgmii_path_setup(eth, mac->id);
480 case PHY_INTERFACE_MODE_GMII:
481 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
482 err = mtk_gmac_gephy_path_setup(eth, mac->id);
491 /* Setup clock for 1st gmac */
492 if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
493 !phy_interface_mode_is_8023z(state->interface) &&
494 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
495 if (MTK_HAS_CAPS(mac->hw->soc->caps,
496 MTK_TRGMII_MT7621_CLK)) {
497 if (mt7621_gmac0_rgmii_adjust(mac->hw,
501 /* FIXME: this is incorrect. Not only does it
502 * use state->speed (which is not guaranteed
503 * to be correct) but it also makes use of it
504 * in a code path that will only be reachable
505 * when the PHY interface mode changes, not
506 * when the speed changes. Consequently, RGMII
507 * is probably broken.
509 mtk_gmac0_rgmii_adjust(mac->hw,
513 /* mt7623_pad_clk_setup */
514 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
516 TD_DM_DRVP(8) | TD_DM_DRVN(8),
519 /* Assert/release MT7623 RXC reset */
520 mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
522 mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
526 switch (state->interface) {
527 case PHY_INTERFACE_MODE_MII:
528 case PHY_INTERFACE_MODE_GMII:
536 /* put the gmac into the right mode */
537 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
538 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
539 val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
540 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
542 mac->interface = state->interface;
546 if (state->interface == PHY_INTERFACE_MODE_SGMII ||
547 phy_interface_mode_is_8023z(state->interface)) {
548 /* The path GMAC to SGMII will be enabled once the SGMIISYS is
551 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
553 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
555 ~(u32)SYSCFG0_SGMII_MASK);
557 /* Save the syscfg0 value for mac_finish */
559 } else if (phylink_autoneg_inband(mode)) {
561 "In-band mode not supported in non SGMII mode!\n");
568 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
569 mac->id, phy_modes(state->interface));
573 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
574 mac->id, phy_modes(state->interface), err);
577 static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
578 phy_interface_t interface)
580 struct mtk_mac *mac = container_of(config, struct mtk_mac,
582 struct mtk_eth *eth = mac->hw;
583 u32 mcr_cur, mcr_new;
586 if (interface == PHY_INTERFACE_MODE_SGMII ||
587 phy_interface_mode_is_8023z(interface))
588 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
589 SYSCFG0_SGMII_MASK, mac->syscfg0);
592 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
594 mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
595 MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK |
596 MAC_MCR_RX_FIFO_CLR_DIS;
598 /* Only update control register when needed! */
599 if (mcr_new != mcr_cur)
600 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
605 static void mtk_mac_pcs_get_state(struct phylink_config *config,
606 struct phylink_link_state *state)
608 struct mtk_mac *mac = container_of(config, struct mtk_mac,
610 u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
612 state->link = (pmsr & MAC_MSR_LINK);
613 state->duplex = (pmsr & MAC_MSR_DPX) >> 1;
615 switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) {
617 state->speed = SPEED_10;
619 case MAC_MSR_SPEED_100:
620 state->speed = SPEED_100;
622 case MAC_MSR_SPEED_1000:
623 state->speed = SPEED_1000;
626 state->speed = SPEED_UNKNOWN;
630 state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
631 if (pmsr & MAC_MSR_RX_FC)
632 state->pause |= MLO_PAUSE_RX;
633 if (pmsr & MAC_MSR_TX_FC)
634 state->pause |= MLO_PAUSE_TX;
637 static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
638 phy_interface_t interface)
640 struct mtk_mac *mac = container_of(config, struct mtk_mac,
642 u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
644 mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
645 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
648 static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
651 const struct mtk_soc_data *soc = eth->soc;
654 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
657 val = MTK_QTX_SCH_MIN_RATE_EN |
658 /* minimum: 10 Mbps */
659 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
660 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
661 MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
662 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
663 val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
665 if (IS_ENABLED(CONFIG_SOC_MT7621)) {
668 val |= MTK_QTX_SCH_MAX_RATE_EN |
669 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
670 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 2) |
671 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
674 val |= MTK_QTX_SCH_MAX_RATE_EN |
675 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
676 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3);
677 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
680 val |= MTK_QTX_SCH_MAX_RATE_EN |
681 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 105) |
682 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
683 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
691 val |= MTK_QTX_SCH_MAX_RATE_EN |
692 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
693 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
694 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
697 val |= MTK_QTX_SCH_MAX_RATE_EN |
698 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
699 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5);
700 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
703 val |= MTK_QTX_SCH_MAX_RATE_EN |
704 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 10) |
705 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
706 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
713 ofs = MTK_QTX_OFFSET * idx;
714 mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
717 static void mtk_mac_link_up(struct phylink_config *config,
718 struct phy_device *phy,
719 unsigned int mode, phy_interface_t interface,
720 int speed, int duplex, bool tx_pause, bool rx_pause)
722 struct mtk_mac *mac = container_of(config, struct mtk_mac,
726 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
727 mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
728 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
729 MAC_MCR_FORCE_RX_FC);
731 /* Configure speed */
736 mcr |= MAC_MCR_SPEED_1000;
739 mcr |= MAC_MCR_SPEED_100;
743 /* Configure duplex */
744 if (duplex == DUPLEX_FULL)
745 mcr |= MAC_MCR_FORCE_DPX;
747 /* Configure pause modes - phylink will avoid these for half duplex */
749 mcr |= MAC_MCR_FORCE_TX_FC;
751 mcr |= MAC_MCR_FORCE_RX_FC;
753 mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
754 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
757 static const struct phylink_mac_ops mtk_phylink_ops = {
758 .mac_select_pcs = mtk_mac_select_pcs,
759 .mac_pcs_get_state = mtk_mac_pcs_get_state,
760 .mac_config = mtk_mac_config,
761 .mac_finish = mtk_mac_finish,
762 .mac_link_down = mtk_mac_link_down,
763 .mac_link_up = mtk_mac_link_up,
766 static int mtk_mdio_init(struct mtk_eth *eth)
768 unsigned int max_clk = 2500000, divider;
769 struct device_node *mii_np;
773 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
775 dev_err(eth->dev, "no %s child node found", "mdio-bus");
779 if (!of_device_is_available(mii_np)) {
784 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
790 eth->mii_bus->name = "mdio";
791 eth->mii_bus->read = mtk_mdio_read_c22;
792 eth->mii_bus->write = mtk_mdio_write_c22;
793 eth->mii_bus->read_c45 = mtk_mdio_read_c45;
794 eth->mii_bus->write_c45 = mtk_mdio_write_c45;
795 eth->mii_bus->priv = eth;
796 eth->mii_bus->parent = eth->dev;
798 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
800 if (!of_property_read_u32(mii_np, "clock-frequency", &val)) {
801 if (val > MDC_MAX_FREQ || val < MDC_MAX_FREQ / MDC_MAX_DIVIDER) {
802 dev_err(eth->dev, "MDIO clock frequency out of range");
808 divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
810 /* Configure MDC Divider */
811 val = mtk_r32(eth, MTK_PPSC);
812 val &= ~PPSC_MDC_CFG;
813 val |= FIELD_PREP(PPSC_MDC_CFG, divider) | PPSC_MDC_TURBO;
814 mtk_w32(eth, val, MTK_PPSC);
816 dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / divider);
818 ret = of_mdiobus_register(eth->mii_bus, mii_np);
825 static void mtk_mdio_cleanup(struct mtk_eth *eth)
830 mdiobus_unregister(eth->mii_bus);
833 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
838 spin_lock_irqsave(ð->tx_irq_lock, flags);
839 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
840 mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
841 spin_unlock_irqrestore(ð->tx_irq_lock, flags);
844 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
849 spin_lock_irqsave(ð->tx_irq_lock, flags);
850 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
851 mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
852 spin_unlock_irqrestore(ð->tx_irq_lock, flags);
855 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
860 spin_lock_irqsave(ð->rx_irq_lock, flags);
861 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
862 mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
863 spin_unlock_irqrestore(ð->rx_irq_lock, flags);
866 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
871 spin_lock_irqsave(ð->rx_irq_lock, flags);
872 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
873 mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
874 spin_unlock_irqrestore(ð->rx_irq_lock, flags);
877 static int mtk_set_mac_address(struct net_device *dev, void *p)
879 int ret = eth_mac_addr(dev, p);
880 struct mtk_mac *mac = netdev_priv(dev);
881 struct mtk_eth *eth = mac->hw;
882 const char *macaddr = dev->dev_addr;
887 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
890 spin_lock_bh(&mac->hw->page_lock);
891 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
892 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
893 MT7628_SDM_MAC_ADRH);
894 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
895 (macaddr[4] << 8) | macaddr[5],
896 MT7628_SDM_MAC_ADRL);
898 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
899 MTK_GDMA_MAC_ADRH(mac->id));
900 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
901 (macaddr[4] << 8) | macaddr[5],
902 MTK_GDMA_MAC_ADRL(mac->id));
904 spin_unlock_bh(&mac->hw->page_lock);
909 void mtk_stats_update_mac(struct mtk_mac *mac)
911 struct mtk_hw_stats *hw_stats = mac->hw_stats;
912 struct mtk_eth *eth = mac->hw;
914 u64_stats_update_begin(&hw_stats->syncp);
916 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
917 hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
918 hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
919 hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
920 hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
921 hw_stats->rx_checksum_errors +=
922 mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
924 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
925 unsigned int offs = hw_stats->reg_offset;
928 hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
929 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
931 hw_stats->rx_bytes += (stats << 32);
932 hw_stats->rx_packets +=
933 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs);
934 hw_stats->rx_overflow +=
935 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
936 hw_stats->rx_fcs_errors +=
937 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
938 hw_stats->rx_short_errors +=
939 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
940 hw_stats->rx_long_errors +=
941 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
942 hw_stats->rx_checksum_errors +=
943 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
944 hw_stats->rx_flow_control_packets +=
945 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
947 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
948 hw_stats->tx_collisions +=
949 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
950 hw_stats->tx_bytes +=
951 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
952 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
954 hw_stats->tx_bytes += (stats << 32);
955 hw_stats->tx_packets +=
956 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
959 u64_stats_update_end(&hw_stats->syncp);
962 static void mtk_stats_update(struct mtk_eth *eth)
966 for (i = 0; i < MTK_MAC_COUNT; i++) {
967 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
969 if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) {
970 mtk_stats_update_mac(eth->mac[i]);
971 spin_unlock(ð->mac[i]->hw_stats->stats_lock);
976 static void mtk_get_stats64(struct net_device *dev,
977 struct rtnl_link_stats64 *storage)
979 struct mtk_mac *mac = netdev_priv(dev);
980 struct mtk_hw_stats *hw_stats = mac->hw_stats;
983 if (netif_running(dev) && netif_device_present(dev)) {
984 if (spin_trylock_bh(&hw_stats->stats_lock)) {
985 mtk_stats_update_mac(mac);
986 spin_unlock_bh(&hw_stats->stats_lock);
991 start = u64_stats_fetch_begin(&hw_stats->syncp);
992 storage->rx_packets = hw_stats->rx_packets;
993 storage->tx_packets = hw_stats->tx_packets;
994 storage->rx_bytes = hw_stats->rx_bytes;
995 storage->tx_bytes = hw_stats->tx_bytes;
996 storage->collisions = hw_stats->tx_collisions;
997 storage->rx_length_errors = hw_stats->rx_short_errors +
998 hw_stats->rx_long_errors;
999 storage->rx_over_errors = hw_stats->rx_overflow;
1000 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
1001 storage->rx_errors = hw_stats->rx_checksum_errors;
1002 storage->tx_aborted_errors = hw_stats->tx_skip;
1003 } while (u64_stats_fetch_retry(&hw_stats->syncp, start));
1005 storage->tx_errors = dev->stats.tx_errors;
1006 storage->rx_dropped = dev->stats.rx_dropped;
1007 storage->tx_dropped = dev->stats.tx_dropped;
1010 static inline int mtk_max_frag_size(int mtu)
1012 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
1013 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH_2K)
1014 mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
1016 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
1017 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1020 static inline int mtk_max_buf_size(int frag_size)
1022 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
1023 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1025 WARN_ON(buf_size < MTK_MAX_RX_LENGTH_2K);
1030 static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
1031 struct mtk_rx_dma_v2 *dma_rxd)
1033 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
1034 if (!(rxd->rxd2 & RX_DMA_DONE))
1037 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
1038 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
1039 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
1040 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
1041 rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
1042 rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
1048 static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
1050 unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
1053 data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
1056 return (void *)data;
1059 /* the qdma core needs scratch memory to be setup */
1060 static int mtk_init_fq_dma(struct mtk_eth *eth)
1062 const struct mtk_soc_data *soc = eth->soc;
1063 dma_addr_t phy_ring_tail;
1064 int cnt = MTK_QDMA_RING_SIZE;
1065 dma_addr_t dma_addr;
1068 eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
1069 cnt * soc->txrx.txd_size,
1070 ð->phy_scratch_ring,
1072 if (unlikely(!eth->scratch_ring))
1075 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
1076 if (unlikely(!eth->scratch_head))
1079 dma_addr = dma_map_single(eth->dma_dev,
1080 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
1082 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
1085 phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
1087 for (i = 0; i < cnt; i++) {
1088 struct mtk_tx_dma_v2 *txd;
1090 txd = eth->scratch_ring + i * soc->txrx.txd_size;
1091 txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
1093 txd->txd2 = eth->phy_scratch_ring +
1094 (i + 1) * soc->txrx.txd_size;
1096 txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
1098 if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
1106 mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
1107 mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
1108 mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
1109 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
1114 static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
1116 return ring->dma + (desc - ring->phys);
1119 static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
1120 void *txd, u32 txd_size)
1122 int idx = (txd - ring->dma) / txd_size;
1124 return &ring->buf[idx];
1127 static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
1128 struct mtk_tx_dma *dma)
1130 return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma;
1133 static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
1135 return (dma - ring->dma) / txd_size;
1138 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1139 struct xdp_frame_bulk *bq, bool napi)
1141 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1142 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
1143 dma_unmap_single(eth->dma_dev,
1144 dma_unmap_addr(tx_buf, dma_addr0),
1145 dma_unmap_len(tx_buf, dma_len0),
1147 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
1148 dma_unmap_page(eth->dma_dev,
1149 dma_unmap_addr(tx_buf, dma_addr0),
1150 dma_unmap_len(tx_buf, dma_len0),
1154 if (dma_unmap_len(tx_buf, dma_len0)) {
1155 dma_unmap_page(eth->dma_dev,
1156 dma_unmap_addr(tx_buf, dma_addr0),
1157 dma_unmap_len(tx_buf, dma_len0),
1161 if (dma_unmap_len(tx_buf, dma_len1)) {
1162 dma_unmap_page(eth->dma_dev,
1163 dma_unmap_addr(tx_buf, dma_addr1),
1164 dma_unmap_len(tx_buf, dma_len1),
1169 if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
1170 if (tx_buf->type == MTK_TYPE_SKB) {
1171 struct sk_buff *skb = tx_buf->data;
1174 napi_consume_skb(skb, napi);
1176 dev_kfree_skb_any(skb);
1178 struct xdp_frame *xdpf = tx_buf->data;
1180 if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
1181 xdp_return_frame_rx_napi(xdpf);
1183 xdp_return_frame_bulk(xdpf, bq);
1185 xdp_return_frame(xdpf);
1189 tx_buf->data = NULL;
1192 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1193 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
1194 size_t size, int idx)
1196 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1197 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1198 dma_unmap_len_set(tx_buf, dma_len0, size);
1201 txd->txd3 = mapped_addr;
1202 txd->txd2 |= TX_DMA_PLEN1(size);
1203 dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
1204 dma_unmap_len_set(tx_buf, dma_len1, size);
1206 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1207 txd->txd1 = mapped_addr;
1208 txd->txd2 = TX_DMA_PLEN0(size);
1209 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1210 dma_unmap_len_set(tx_buf, dma_len0, size);
1215 static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
1216 struct mtk_tx_dma_desc_info *info)
1218 struct mtk_mac *mac = netdev_priv(dev);
1219 struct mtk_eth *eth = mac->hw;
1220 struct mtk_tx_dma *desc = txd;
1223 WRITE_ONCE(desc->txd1, info->addr);
1225 data = TX_DMA_SWC | TX_DMA_PLEN0(info->size) |
1226 FIELD_PREP(TX_DMA_PQID, info->qid);
1229 WRITE_ONCE(desc->txd3, data);
1231 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1235 /* tx checksum offload */
1237 data |= TX_DMA_CHKSUM;
1238 /* vlan header offload */
1240 data |= TX_DMA_INS_VLAN | info->vlan_tci;
1242 WRITE_ONCE(desc->txd4, data);
1245 static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
1246 struct mtk_tx_dma_desc_info *info)
1248 struct mtk_mac *mac = netdev_priv(dev);
1249 struct mtk_tx_dma_v2 *desc = txd;
1250 struct mtk_eth *eth = mac->hw;
1253 WRITE_ONCE(desc->txd1, info->addr);
1255 data = TX_DMA_PLEN0(info->size);
1258 WRITE_ONCE(desc->txd3, data);
1260 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
1261 data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
1262 WRITE_ONCE(desc->txd4, data);
1267 data |= TX_DMA_TSO_V2;
1268 /* tx checksum offload */
1270 data |= TX_DMA_CHKSUM_V2;
1272 WRITE_ONCE(desc->txd5, data);
1275 if (info->first && info->vlan)
1276 data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1277 WRITE_ONCE(desc->txd6, data);
1279 WRITE_ONCE(desc->txd7, 0);
1280 WRITE_ONCE(desc->txd8, 0);
1283 static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
1284 struct mtk_tx_dma_desc_info *info)
1286 struct mtk_mac *mac = netdev_priv(dev);
1287 struct mtk_eth *eth = mac->hw;
1289 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1290 mtk_tx_set_dma_desc_v2(dev, txd, info);
1292 mtk_tx_set_dma_desc_v1(dev, txd, info);
1295 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
1296 int tx_num, struct mtk_tx_ring *ring, bool gso)
1298 struct mtk_tx_dma_desc_info txd_info = {
1299 .size = skb_headlen(skb),
1301 .csum = skb->ip_summed == CHECKSUM_PARTIAL,
1302 .vlan = skb_vlan_tag_present(skb),
1303 .qid = skb_get_queue_mapping(skb),
1304 .vlan_tci = skb_vlan_tag_get(skb),
1306 .last = !skb_is_nonlinear(skb),
1308 struct netdev_queue *txq;
1309 struct mtk_mac *mac = netdev_priv(dev);
1310 struct mtk_eth *eth = mac->hw;
1311 const struct mtk_soc_data *soc = eth->soc;
1312 struct mtk_tx_dma *itxd, *txd;
1313 struct mtk_tx_dma *itxd_pdma, *txd_pdma;
1314 struct mtk_tx_buf *itx_buf, *tx_buf;
1316 int queue = skb_get_queue_mapping(skb);
1319 txq = netdev_get_tx_queue(dev, queue);
1320 itxd = ring->next_free;
1321 itxd_pdma = qdma_to_pdma(ring, itxd);
1322 if (itxd == ring->last_free)
1325 itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
1326 memset(itx_buf, 0, sizeof(*itx_buf));
1328 txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
1330 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1333 mtk_tx_set_dma_desc(dev, itxd, &txd_info);
1335 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1336 itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1337 MTK_TX_FLAGS_FPORT1;
1338 setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
1343 txd_pdma = qdma_to_pdma(ring, txd);
1345 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1346 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1347 unsigned int offset = 0;
1348 int frag_size = skb_frag_size(frag);
1351 bool new_desc = true;
1353 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
1355 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1356 txd_pdma = qdma_to_pdma(ring, txd);
1357 if (txd == ring->last_free)
1365 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1366 txd_info.size = min_t(unsigned int, frag_size,
1367 soc->txrx.dma_max_len);
1368 txd_info.qid = queue;
1369 txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
1370 !(frag_size - txd_info.size);
1371 txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
1372 offset, txd_info.size,
1374 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1377 mtk_tx_set_dma_desc(dev, txd, &txd_info);
1379 tx_buf = mtk_desc_to_tx_buf(ring, txd,
1380 soc->txrx.txd_size);
1382 memset(tx_buf, 0, sizeof(*tx_buf));
1383 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1384 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
1385 tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1386 MTK_TX_FLAGS_FPORT1;
1388 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
1389 txd_info.size, k++);
1391 frag_size -= txd_info.size;
1392 offset += txd_info.size;
1396 /* store skb to cleanup */
1397 itx_buf->type = MTK_TYPE_SKB;
1398 itx_buf->data = skb;
1400 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1402 txd_pdma->txd2 |= TX_DMA_LS0;
1404 txd_pdma->txd2 |= TX_DMA_LS1;
1407 netdev_tx_sent_queue(txq, skb->len);
1408 skb_tx_timestamp(skb);
1410 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1411 atomic_sub(n_desc, &ring->free_count);
1413 /* make sure that all changes to the dma ring are flushed before we
1418 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1419 if (netif_xmit_stopped(txq) || !netdev_xmit_more())
1420 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1424 next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
1426 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1433 tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
1436 mtk_tx_unmap(eth, tx_buf, NULL, false);
1438 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1439 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
1440 itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1442 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1443 itxd_pdma = qdma_to_pdma(ring, itxd);
1444 } while (itxd != txd);
1449 static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
1454 if (skb_is_gso(skb)) {
1455 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1456 frag = &skb_shinfo(skb)->frags[i];
1457 nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1458 eth->soc->txrx.dma_max_len);
1461 nfrags += skb_shinfo(skb)->nr_frags;
1467 static int mtk_queue_stopped(struct mtk_eth *eth)
1471 for (i = 0; i < MTK_MAC_COUNT; i++) {
1472 if (!eth->netdev[i])
1474 if (netif_queue_stopped(eth->netdev[i]))
1481 static void mtk_wake_queue(struct mtk_eth *eth)
1485 for (i = 0; i < MTK_MAC_COUNT; i++) {
1486 if (!eth->netdev[i])
1488 netif_tx_wake_all_queues(eth->netdev[i]);
1492 static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1494 struct mtk_mac *mac = netdev_priv(dev);
1495 struct mtk_eth *eth = mac->hw;
1496 struct mtk_tx_ring *ring = ð->tx_ring;
1497 struct net_device_stats *stats = &dev->stats;
1501 /* normally we can rely on the stack not calling this more than once,
1502 * however we have 2 queues running on the same ring so we need to lock
1505 spin_lock(ð->page_lock);
1507 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1510 tx_num = mtk_cal_txd_req(eth, skb);
1511 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1512 netif_tx_stop_all_queues(dev);
1513 netif_err(eth, tx_queued, dev,
1514 "Tx Ring full when queue awake!\n");
1515 spin_unlock(ð->page_lock);
1516 return NETDEV_TX_BUSY;
1519 /* TSO: fill MSS info in tcp checksum field */
1520 if (skb_is_gso(skb)) {
1521 if (skb_cow_head(skb, 0)) {
1522 netif_warn(eth, tx_err, dev,
1523 "GSO expand head fail.\n");
1527 if (skb_shinfo(skb)->gso_type &
1528 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1530 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1534 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1537 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1538 netif_tx_stop_all_queues(dev);
1540 spin_unlock(ð->page_lock);
1542 return NETDEV_TX_OK;
1545 spin_unlock(ð->page_lock);
1546 stats->tx_dropped++;
1547 dev_kfree_skb_any(skb);
1548 return NETDEV_TX_OK;
1551 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1554 struct mtk_rx_ring *ring;
1558 return ð->rx_ring[0];
1560 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1561 struct mtk_rx_dma *rxd;
1563 ring = ð->rx_ring[i];
1564 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1565 rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
1566 if (rxd->rxd2 & RX_DMA_DONE) {
1567 ring->calc_idx_update = true;
1575 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
1577 struct mtk_rx_ring *ring;
1581 ring = ð->rx_ring[0];
1582 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1584 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1585 ring = ð->rx_ring[i];
1586 if (ring->calc_idx_update) {
1587 ring->calc_idx_update = false;
1588 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1594 static bool mtk_page_pool_enabled(struct mtk_eth *eth)
1596 return MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2);
1599 static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
1600 struct xdp_rxq_info *xdp_q,
1603 struct page_pool_params pp_params = {
1605 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
1607 .nid = NUMA_NO_NODE,
1608 .dev = eth->dma_dev,
1609 .offset = MTK_PP_HEADROOM,
1610 .max_len = MTK_PP_MAX_BUF_SIZE,
1612 struct page_pool *pp;
1615 pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL
1617 pp = page_pool_create(&pp_params);
1621 err = __xdp_rxq_info_reg(xdp_q, ð->dummy_dev, id,
1622 eth->rx_napi.napi_id, PAGE_SIZE);
1626 err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp);
1628 goto err_unregister_rxq;
1633 xdp_rxq_info_unreg(xdp_q);
1635 page_pool_destroy(pp);
1637 return ERR_PTR(err);
1640 static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
1645 page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
1649 *dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM;
1650 return page_address(page);
1653 static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
1655 if (ring->page_pool)
1656 page_pool_put_full_page(ring->page_pool,
1657 virt_to_head_page(data), napi);
1659 skb_free_frag(data);
1662 static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
1663 struct mtk_tx_dma_desc_info *txd_info,
1664 struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf,
1665 void *data, u16 headroom, int index, bool dma_map)
1667 struct mtk_tx_ring *ring = ð->tx_ring;
1668 struct mtk_mac *mac = netdev_priv(dev);
1669 struct mtk_tx_dma *txd_pdma;
1671 if (dma_map) { /* ndo_xdp_xmit */
1672 txd_info->addr = dma_map_single(eth->dma_dev, data,
1673 txd_info->size, DMA_TO_DEVICE);
1674 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr)))
1677 tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1679 struct page *page = virt_to_head_page(data);
1681 txd_info->addr = page_pool_get_dma_addr(page) +
1682 sizeof(struct xdp_frame) + headroom;
1683 dma_sync_single_for_device(eth->dma_dev, txd_info->addr,
1684 txd_info->size, DMA_BIDIRECTIONAL);
1686 mtk_tx_set_dma_desc(dev, txd, txd_info);
1688 tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
1689 tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
1690 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1692 txd_pdma = qdma_to_pdma(ring, txd);
1693 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
1699 static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
1700 struct net_device *dev, bool dma_map)
1702 struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
1703 const struct mtk_soc_data *soc = eth->soc;
1704 struct mtk_tx_ring *ring = ð->tx_ring;
1705 struct mtk_mac *mac = netdev_priv(dev);
1706 struct mtk_tx_dma_desc_info txd_info = {
1709 .last = !xdp_frame_has_frags(xdpf),
1712 int err, index = 0, n_desc = 1, nr_frags;
1713 struct mtk_tx_buf *htx_buf, *tx_buf;
1714 struct mtk_tx_dma *htxd, *txd;
1715 void *data = xdpf->data;
1717 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1720 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
1721 if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
1724 spin_lock(ð->page_lock);
1726 txd = ring->next_free;
1727 if (txd == ring->last_free) {
1728 spin_unlock(ð->page_lock);
1733 tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
1734 memset(tx_buf, 0, sizeof(*tx_buf));
1738 err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
1739 data, xdpf->headroom, index, dma_map);
1746 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
1747 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1748 if (txd == ring->last_free)
1751 tx_buf = mtk_desc_to_tx_buf(ring, txd,
1752 soc->txrx.txd_size);
1753 memset(tx_buf, 0, sizeof(*tx_buf));
1757 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1758 txd_info.size = skb_frag_size(&sinfo->frags[index]);
1759 txd_info.last = index + 1 == nr_frags;
1760 txd_info.qid = mac->id;
1761 data = skb_frag_address(&sinfo->frags[index]);
1765 /* store xdpf for cleanup */
1766 htx_buf->data = xdpf;
1768 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1769 struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, txd);
1772 txd_pdma->txd2 |= TX_DMA_LS0;
1774 txd_pdma->txd2 |= TX_DMA_LS1;
1777 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1778 atomic_sub(n_desc, &ring->free_count);
1780 /* make sure that all changes to the dma ring are flushed before we
1785 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1786 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1790 idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
1791 mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
1792 MT7628_TX_CTX_IDX0);
1795 spin_unlock(ð->page_lock);
1800 while (htxd != txd) {
1801 tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
1802 mtk_tx_unmap(eth, tx_buf, NULL, false);
1804 htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1805 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1806 struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, htxd);
1808 txd_pdma->txd2 = TX_DMA_DESP2_DEF;
1811 htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
1814 spin_unlock(ð->page_lock);
1819 static int mtk_xdp_xmit(struct net_device *dev, int num_frame,
1820 struct xdp_frame **frames, u32 flags)
1822 struct mtk_mac *mac = netdev_priv(dev);
1823 struct mtk_hw_stats *hw_stats = mac->hw_stats;
1824 struct mtk_eth *eth = mac->hw;
1827 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1830 for (i = 0; i < num_frame; i++) {
1831 if (mtk_xdp_submit_frame(eth, frames[i], dev, true))
1836 u64_stats_update_begin(&hw_stats->syncp);
1837 hw_stats->xdp_stats.tx_xdp_xmit += nxmit;
1838 hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit;
1839 u64_stats_update_end(&hw_stats->syncp);
1844 static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
1845 struct xdp_buff *xdp, struct net_device *dev)
1847 struct mtk_mac *mac = netdev_priv(dev);
1848 struct mtk_hw_stats *hw_stats = mac->hw_stats;
1849 u64 *count = &hw_stats->xdp_stats.rx_xdp_drop;
1850 struct bpf_prog *prog;
1855 prog = rcu_dereference(eth->prog);
1859 act = bpf_prog_run_xdp(prog, xdp);
1862 count = &hw_stats->xdp_stats.rx_xdp_pass;
1865 if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
1870 count = &hw_stats->xdp_stats.rx_xdp_redirect;
1873 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
1875 if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
1876 count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
1881 count = &hw_stats->xdp_stats.rx_xdp_tx;
1885 bpf_warn_invalid_xdp_action(dev, prog, act);
1888 trace_xdp_exception(dev, prog, act);
1894 page_pool_put_full_page(ring->page_pool,
1895 virt_to_head_page(xdp->data), true);
1898 u64_stats_update_begin(&hw_stats->syncp);
1899 *count = *count + 1;
1900 u64_stats_update_end(&hw_stats->syncp);
1907 static int mtk_poll_rx(struct napi_struct *napi, int budget,
1908 struct mtk_eth *eth)
1910 struct dim_sample dim_sample = {};
1911 struct mtk_rx_ring *ring;
1912 bool xdp_flush = false;
1914 struct sk_buff *skb;
1915 u8 *data, *new_data;
1916 struct mtk_rx_dma_v2 *rxd, trxd;
1917 int done = 0, bytes = 0;
1919 while (done < budget) {
1920 unsigned int pktlen, *rxdcsum;
1921 bool has_hwaccel_tag = false;
1922 struct net_device *netdev;
1923 u16 vlan_proto, vlan_tci;
1924 dma_addr_t dma_addr;
1928 ring = mtk_get_rx_ring(eth);
1929 if (unlikely(!ring))
1932 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1933 rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
1934 data = ring->data[idx];
1936 if (!mtk_rx_get_desc(eth, &trxd, rxd))
1939 /* find out which mac the packet come from. values start at 1 */
1940 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1941 mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
1942 else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
1943 !(trxd.rxd4 & RX_DMA_SPECIAL_TAG))
1944 mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
1946 if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
1950 netdev = eth->netdev[mac];
1952 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1955 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
1957 /* alloc new buffer */
1958 if (ring->page_pool) {
1959 struct page *page = virt_to_head_page(data);
1960 struct xdp_buff xdp;
1963 new_data = mtk_page_pool_get_buff(ring->page_pool,
1966 if (unlikely(!new_data)) {
1967 netdev->stats.rx_dropped++;
1971 dma_sync_single_for_cpu(eth->dma_dev,
1972 page_pool_get_dma_addr(page) + MTK_PP_HEADROOM,
1973 pktlen, page_pool_get_dma_dir(ring->page_pool));
1975 xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
1976 xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
1978 xdp_buff_clear_frags_flag(&xdp);
1980 ret = mtk_xdp_run(eth, ring, &xdp, netdev);
1981 if (ret == XDP_REDIRECT)
1984 if (ret != XDP_PASS)
1987 skb = build_skb(data, PAGE_SIZE);
1988 if (unlikely(!skb)) {
1989 page_pool_put_full_page(ring->page_pool,
1991 netdev->stats.rx_dropped++;
1995 skb_reserve(skb, xdp.data - xdp.data_hard_start);
1996 skb_put(skb, xdp.data_end - xdp.data);
1997 skb_mark_for_recycle(skb);
1999 if (ring->frag_size <= PAGE_SIZE)
2000 new_data = napi_alloc_frag(ring->frag_size);
2002 new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
2004 if (unlikely(!new_data)) {
2005 netdev->stats.rx_dropped++;
2009 dma_addr = dma_map_single(eth->dma_dev,
2010 new_data + NET_SKB_PAD + eth->ip_align,
2011 ring->buf_size, DMA_FROM_DEVICE);
2012 if (unlikely(dma_mapping_error(eth->dma_dev,
2014 skb_free_frag(new_data);
2015 netdev->stats.rx_dropped++;
2019 dma_unmap_single(eth->dma_dev, trxd.rxd1,
2020 ring->buf_size, DMA_FROM_DEVICE);
2022 skb = build_skb(data, ring->frag_size);
2023 if (unlikely(!skb)) {
2024 netdev->stats.rx_dropped++;
2025 skb_free_frag(data);
2029 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
2030 skb_put(skb, pktlen);
2036 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2037 reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
2038 hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
2039 if (hash != MTK_RXD5_FOE_ENTRY)
2040 skb_set_hash(skb, jhash_1word(hash, 0),
2042 rxdcsum = &trxd.rxd3;
2044 reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
2045 hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
2046 if (hash != MTK_RXD4_FOE_ENTRY)
2047 skb_set_hash(skb, jhash_1word(hash, 0),
2049 rxdcsum = &trxd.rxd4;
2052 if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
2053 skb->ip_summed = CHECKSUM_UNNECESSARY;
2055 skb_checksum_none_assert(skb);
2056 skb->protocol = eth_type_trans(skb, netdev);
2058 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
2059 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2060 if (trxd.rxd3 & RX_DMA_VTAG_V2) {
2061 vlan_proto = RX_DMA_VPID(trxd.rxd4);
2062 vlan_tci = RX_DMA_VID(trxd.rxd4);
2063 has_hwaccel_tag = true;
2065 } else if (trxd.rxd2 & RX_DMA_VTAG) {
2066 vlan_proto = RX_DMA_VPID(trxd.rxd3);
2067 vlan_tci = RX_DMA_VID(trxd.rxd3);
2068 has_hwaccel_tag = true;
2072 /* When using VLAN untagging in combination with DSA, the
2073 * hardware treats the MTK special tag as a VLAN and untags it.
2075 if (has_hwaccel_tag && netdev_uses_dsa(netdev)) {
2076 unsigned int port = vlan_proto & GENMASK(2, 0);
2078 if (port < ARRAY_SIZE(eth->dsa_meta) &&
2079 eth->dsa_meta[port])
2080 skb_dst_set_noref(skb, ð->dsa_meta[port]->dst);
2081 } else if (has_hwaccel_tag) {
2082 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan_tci);
2085 if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
2086 mtk_ppe_check_skb(eth->ppe[0], skb, hash);
2088 skb_record_rx_queue(skb, 0);
2089 napi_gro_receive(napi, skb);
2092 ring->data[idx] = new_data;
2093 rxd->rxd1 = (unsigned int)dma_addr;
2095 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2096 rxd->rxd2 = RX_DMA_LSO;
2098 rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2100 ring->calc_idx = idx;
2106 /* make sure that all changes to the dma ring are flushed before
2110 mtk_update_rx_cpu_idx(eth);
2113 eth->rx_packets += done;
2114 eth->rx_bytes += bytes;
2115 dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes,
2117 net_dim(ð->rx_dim, dim_sample);
2125 struct mtk_poll_state {
2126 struct netdev_queue *txq;
2133 mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac,
2134 struct sk_buff *skb)
2136 struct netdev_queue *txq;
2137 struct net_device *dev;
2138 unsigned int bytes = skb->len;
2142 eth->tx_bytes += bytes;
2144 dev = eth->netdev[mac];
2148 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2149 if (state->txq == txq) {
2151 state->bytes += bytes;
2156 netdev_tx_completed_queue(state->txq, state->done, state->bytes);
2160 state->bytes = bytes;
2163 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
2164 struct mtk_poll_state *state)
2166 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2167 struct mtk_tx_ring *ring = ð->tx_ring;
2168 struct mtk_tx_buf *tx_buf;
2169 struct xdp_frame_bulk bq;
2170 struct mtk_tx_dma *desc;
2173 cpu = ring->last_free_ptr;
2174 dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
2176 desc = mtk_qdma_phys_to_virt(ring, cpu);
2177 xdp_frame_bulk_init(&bq);
2179 while ((cpu != dma) && budget) {
2180 u32 next_cpu = desc->txd2;
2183 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
2184 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
2187 tx_buf = mtk_desc_to_tx_buf(ring, desc,
2188 eth->soc->txrx.txd_size);
2189 if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
2195 if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2196 if (tx_buf->type == MTK_TYPE_SKB)
2197 mtk_poll_tx_done(eth, state, mac, tx_buf->data);
2201 mtk_tx_unmap(eth, tx_buf, &bq, true);
2203 ring->last_free = desc;
2204 atomic_inc(&ring->free_count);
2208 xdp_flush_frame_bulk(&bq);
2210 ring->last_free_ptr = cpu;
2211 mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
2216 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
2217 struct mtk_poll_state *state)
2219 struct mtk_tx_ring *ring = ð->tx_ring;
2220 struct mtk_tx_buf *tx_buf;
2221 struct xdp_frame_bulk bq;
2222 struct mtk_tx_dma *desc;
2225 cpu = ring->cpu_idx;
2226 dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
2227 xdp_frame_bulk_init(&bq);
2229 while ((cpu != dma) && budget) {
2230 tx_buf = &ring->buf[cpu];
2234 if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2235 if (tx_buf->type == MTK_TYPE_SKB)
2236 mtk_poll_tx_done(eth, state, 0, tx_buf->data);
2239 mtk_tx_unmap(eth, tx_buf, &bq, true);
2241 desc = ring->dma + cpu * eth->soc->txrx.txd_size;
2242 ring->last_free = desc;
2243 atomic_inc(&ring->free_count);
2245 cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
2247 xdp_flush_frame_bulk(&bq);
2249 ring->cpu_idx = cpu;
2254 static int mtk_poll_tx(struct mtk_eth *eth, int budget)
2256 struct mtk_tx_ring *ring = ð->tx_ring;
2257 struct dim_sample dim_sample = {};
2258 struct mtk_poll_state state = {};
2260 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2261 budget = mtk_poll_tx_qdma(eth, budget, &state);
2263 budget = mtk_poll_tx_pdma(eth, budget, &state);
2266 netdev_tx_completed_queue(state.txq, state.done, state.bytes);
2268 dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
2270 net_dim(ð->tx_dim, dim_sample);
2272 if (mtk_queue_stopped(eth) &&
2273 (atomic_read(&ring->free_count) > ring->thresh))
2274 mtk_wake_queue(eth);
2279 static void mtk_handle_status_irq(struct mtk_eth *eth)
2281 u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
2283 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
2284 mtk_stats_update(eth);
2285 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
2290 static int mtk_napi_tx(struct napi_struct *napi, int budget)
2292 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
2293 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2296 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2297 mtk_handle_status_irq(eth);
2298 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
2299 tx_done = mtk_poll_tx(eth, budget);
2301 if (unlikely(netif_msg_intr(eth))) {
2303 "done tx %d, intr 0x%08x/0x%x\n", tx_done,
2304 mtk_r32(eth, reg_map->tx_irq_status),
2305 mtk_r32(eth, reg_map->tx_irq_mask));
2308 if (tx_done == budget)
2311 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
2314 if (napi_complete_done(napi, tx_done))
2315 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2320 static int mtk_napi_rx(struct napi_struct *napi, int budget)
2322 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
2323 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2324 int rx_done_total = 0;
2326 mtk_handle_status_irq(eth);
2331 mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
2332 reg_map->pdma.irq_status);
2333 rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
2334 rx_done_total += rx_done;
2336 if (unlikely(netif_msg_intr(eth))) {
2338 "done rx %d, intr 0x%08x/0x%x\n", rx_done,
2339 mtk_r32(eth, reg_map->pdma.irq_status),
2340 mtk_r32(eth, reg_map->pdma.irq_mask));
2343 if (rx_done_total == budget)
2346 } while (mtk_r32(eth, reg_map->pdma.irq_status) &
2347 eth->soc->txrx.rx_irq_done_mask);
2349 if (napi_complete_done(napi, rx_done_total))
2350 mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
2352 return rx_done_total;
2355 static int mtk_tx_alloc(struct mtk_eth *eth)
2357 const struct mtk_soc_data *soc = eth->soc;
2358 struct mtk_tx_ring *ring = ð->tx_ring;
2359 int i, sz = soc->txrx.txd_size;
2360 struct mtk_tx_dma_v2 *txd;
2364 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
2365 ring_size = MTK_QDMA_RING_SIZE;
2367 ring_size = MTK_DMA_SIZE;
2369 ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
2374 ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
2375 &ring->phys, GFP_KERNEL);
2379 for (i = 0; i < ring_size; i++) {
2380 int next = (i + 1) % ring_size;
2381 u32 next_ptr = ring->phys + next * sz;
2383 txd = ring->dma + i * sz;
2384 txd->txd2 = next_ptr;
2385 txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
2387 if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
2395 /* On MT7688 (PDMA only) this driver uses the ring->dma structs
2396 * only as the framework. The real HW descriptors are the PDMA
2397 * descriptors in ring->dma_pdma.
2399 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2400 ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
2401 &ring->phys_pdma, GFP_KERNEL);
2402 if (!ring->dma_pdma)
2405 for (i = 0; i < ring_size; i++) {
2406 ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
2407 ring->dma_pdma[i].txd4 = 0;
2411 ring->dma_size = ring_size;
2412 atomic_set(&ring->free_count, ring_size - 2);
2413 ring->next_free = ring->dma;
2414 ring->last_free = (void *)txd;
2415 ring->last_free_ptr = (u32)(ring->phys + ((ring_size - 1) * sz));
2416 ring->thresh = MAX_SKB_FRAGS;
2418 /* make sure that all changes to the dma ring are flushed before we
2423 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2424 mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
2425 mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
2427 ring->phys + ((ring_size - 1) * sz),
2428 soc->reg_map->qdma.crx_ptr);
2429 mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
2431 for (i = 0, ofs = 0; i < MTK_QDMA_NUM_QUEUES; i++) {
2432 val = (QDMA_RES_THRES << 8) | QDMA_RES_THRES;
2433 mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs);
2435 val = MTK_QTX_SCH_MIN_RATE_EN |
2436 /* minimum: 10 Mbps */
2437 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
2438 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
2439 MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
2440 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
2441 val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
2442 mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
2443 ofs += MTK_QTX_OFFSET;
2445 val = MTK_QDMA_TX_SCH_MAX_WFQ | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
2446 mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate);
2447 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
2448 mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4);
2450 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
2451 mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0);
2452 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
2453 mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
2462 static void mtk_tx_clean(struct mtk_eth *eth)
2464 const struct mtk_soc_data *soc = eth->soc;
2465 struct mtk_tx_ring *ring = ð->tx_ring;
2469 for (i = 0; i < ring->dma_size; i++)
2470 mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
2476 dma_free_coherent(eth->dma_dev,
2477 ring->dma_size * soc->txrx.txd_size,
2478 ring->dma, ring->phys);
2482 if (ring->dma_pdma) {
2483 dma_free_coherent(eth->dma_dev,
2484 ring->dma_size * soc->txrx.txd_size,
2485 ring->dma_pdma, ring->phys_pdma);
2486 ring->dma_pdma = NULL;
2490 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
2492 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2493 struct mtk_rx_ring *ring;
2494 int rx_data_len, rx_dma_size;
2497 if (rx_flag == MTK_RX_FLAGS_QDMA) {
2500 ring = ð->rx_ring_qdma;
2502 ring = ð->rx_ring[ring_no];
2505 if (rx_flag == MTK_RX_FLAGS_HWLRO) {
2506 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
2507 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
2509 rx_data_len = ETH_DATA_LEN;
2510 rx_dma_size = MTK_DMA_SIZE;
2513 ring->frag_size = mtk_max_frag_size(rx_data_len);
2514 ring->buf_size = mtk_max_buf_size(ring->frag_size);
2515 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
2520 if (mtk_page_pool_enabled(eth)) {
2521 struct page_pool *pp;
2523 pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
2528 ring->page_pool = pp;
2531 ring->dma = dma_alloc_coherent(eth->dma_dev,
2532 rx_dma_size * eth->soc->txrx.rxd_size,
2533 &ring->phys, GFP_KERNEL);
2537 for (i = 0; i < rx_dma_size; i++) {
2538 struct mtk_rx_dma_v2 *rxd;
2539 dma_addr_t dma_addr;
2542 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2543 if (ring->page_pool) {
2544 data = mtk_page_pool_get_buff(ring->page_pool,
2545 &dma_addr, GFP_KERNEL);
2549 if (ring->frag_size <= PAGE_SIZE)
2550 data = netdev_alloc_frag(ring->frag_size);
2552 data = mtk_max_lro_buf_alloc(GFP_KERNEL);
2557 dma_addr = dma_map_single(eth->dma_dev,
2558 data + NET_SKB_PAD + eth->ip_align,
2559 ring->buf_size, DMA_FROM_DEVICE);
2560 if (unlikely(dma_mapping_error(eth->dma_dev,
2562 skb_free_frag(data);
2566 rxd->rxd1 = (unsigned int)dma_addr;
2567 ring->data[i] = data;
2569 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2570 rxd->rxd2 = RX_DMA_LSO;
2572 rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2576 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
2584 ring->dma_size = rx_dma_size;
2585 ring->calc_idx_update = false;
2586 ring->calc_idx = rx_dma_size - 1;
2587 if (rx_flag == MTK_RX_FLAGS_QDMA)
2588 ring->crx_idx_reg = reg_map->qdma.qcrx_ptr +
2589 ring_no * MTK_QRX_OFFSET;
2591 ring->crx_idx_reg = reg_map->pdma.pcrx_ptr +
2592 ring_no * MTK_QRX_OFFSET;
2593 /* make sure that all changes to the dma ring are flushed before we
2598 if (rx_flag == MTK_RX_FLAGS_QDMA) {
2599 mtk_w32(eth, ring->phys,
2600 reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2601 mtk_w32(eth, rx_dma_size,
2602 reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2603 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2604 reg_map->qdma.rst_idx);
2606 mtk_w32(eth, ring->phys,
2607 reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2608 mtk_w32(eth, rx_dma_size,
2609 reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2610 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2611 reg_map->pdma.rst_idx);
2613 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
2618 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
2622 if (ring->data && ring->dma) {
2623 for (i = 0; i < ring->dma_size; i++) {
2624 struct mtk_rx_dma *rxd;
2629 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
2633 dma_unmap_single(eth->dma_dev, rxd->rxd1,
2634 ring->buf_size, DMA_FROM_DEVICE);
2635 mtk_rx_put_buff(ring, ring->data[i], false);
2642 dma_free_coherent(eth->dma_dev,
2643 ring->dma_size * eth->soc->txrx.rxd_size,
2644 ring->dma, ring->phys);
2648 if (ring->page_pool) {
2649 if (xdp_rxq_info_is_reg(&ring->xdp_q))
2650 xdp_rxq_info_unreg(&ring->xdp_q);
2651 page_pool_destroy(ring->page_pool);
2652 ring->page_pool = NULL;
2656 static int mtk_hwlro_rx_init(struct mtk_eth *eth)
2659 u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
2660 u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
2662 /* set LRO rings to auto-learn modes */
2663 ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
2665 /* validate LRO ring */
2666 ring_ctrl_dw2 |= MTK_RING_VLD;
2668 /* set AGE timer (unit: 20us) */
2669 ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
2670 ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
2672 /* set max AGG timer (unit: 20us) */
2673 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
2675 /* set max LRO AGG count */
2676 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
2677 ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
2679 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2680 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
2681 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
2682 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
2685 /* IPv4 checksum update enable */
2686 lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
2688 /* switch priority comparison to packet count mode */
2689 lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
2691 /* bandwidth threshold setting */
2692 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
2694 /* auto-learn score delta setting */
2695 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
2697 /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
2698 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
2699 MTK_PDMA_LRO_ALT_REFRESH_TIMER);
2701 /* set HW LRO mode & the max aggregation count for rx packets */
2702 lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
2704 /* the minimal remaining room of SDL0 in RXD for lro aggregation */
2705 lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
2708 lro_ctrl_dw0 |= MTK_LRO_EN;
2710 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
2711 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
2716 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
2721 /* relinquish lro rings, flush aggregated packets */
2722 mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
2724 /* wait for relinquishments done */
2725 for (i = 0; i < 10; i++) {
2726 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
2727 if (val & MTK_LRO_RING_RELINQUISH_DONE) {
2734 /* invalidate lro rings */
2735 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
2736 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
2738 /* disable HW LRO */
2739 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
2742 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
2746 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2748 /* invalidate the IP setting */
2749 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2751 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
2753 /* validate the IP setting */
2754 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2757 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
2761 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2763 /* invalidate the IP setting */
2764 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2766 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
2769 static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
2774 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2775 if (mac->hwlro_ip[i])
2782 static int mtk_hwlro_add_ipaddr(struct net_device *dev,
2783 struct ethtool_rxnfc *cmd)
2785 struct ethtool_rx_flow_spec *fsp =
2786 (struct ethtool_rx_flow_spec *)&cmd->fs;
2787 struct mtk_mac *mac = netdev_priv(dev);
2788 struct mtk_eth *eth = mac->hw;
2791 if ((fsp->flow_type != TCP_V4_FLOW) ||
2792 (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
2793 (fsp->location > 1))
2796 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
2797 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2799 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2801 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
2806 static int mtk_hwlro_del_ipaddr(struct net_device *dev,
2807 struct ethtool_rxnfc *cmd)
2809 struct ethtool_rx_flow_spec *fsp =
2810 (struct ethtool_rx_flow_spec *)&cmd->fs;
2811 struct mtk_mac *mac = netdev_priv(dev);
2812 struct mtk_eth *eth = mac->hw;
2815 if (fsp->location > 1)
2818 mac->hwlro_ip[fsp->location] = 0;
2819 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2821 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2823 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2828 static void mtk_hwlro_netdev_disable(struct net_device *dev)
2830 struct mtk_mac *mac = netdev_priv(dev);
2831 struct mtk_eth *eth = mac->hw;
2834 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2835 mac->hwlro_ip[i] = 0;
2836 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
2838 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2841 mac->hwlro_ip_cnt = 0;
2844 static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
2845 struct ethtool_rxnfc *cmd)
2847 struct mtk_mac *mac = netdev_priv(dev);
2848 struct ethtool_rx_flow_spec *fsp =
2849 (struct ethtool_rx_flow_spec *)&cmd->fs;
2851 if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
2854 /* only tcp dst ipv4 is meaningful, others are meaningless */
2855 fsp->flow_type = TCP_V4_FLOW;
2856 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
2857 fsp->m_u.tcp_ip4_spec.ip4dst = 0;
2859 fsp->h_u.tcp_ip4_spec.ip4src = 0;
2860 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
2861 fsp->h_u.tcp_ip4_spec.psrc = 0;
2862 fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
2863 fsp->h_u.tcp_ip4_spec.pdst = 0;
2864 fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
2865 fsp->h_u.tcp_ip4_spec.tos = 0;
2866 fsp->m_u.tcp_ip4_spec.tos = 0xff;
2871 static int mtk_hwlro_get_fdir_all(struct net_device *dev,
2872 struct ethtool_rxnfc *cmd,
2875 struct mtk_mac *mac = netdev_priv(dev);
2879 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2880 if (mac->hwlro_ip[i]) {
2886 cmd->rule_cnt = cnt;
2891 static netdev_features_t mtk_fix_features(struct net_device *dev,
2892 netdev_features_t features)
2894 if (!(features & NETIF_F_LRO)) {
2895 struct mtk_mac *mac = netdev_priv(dev);
2896 int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2899 netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
2901 features |= NETIF_F_LRO;
2908 static int mtk_set_features(struct net_device *dev, netdev_features_t features)
2910 struct mtk_mac *mac = netdev_priv(dev);
2911 struct mtk_eth *eth = mac->hw;
2912 netdev_features_t diff = dev->features ^ features;
2915 if ((diff & NETIF_F_LRO) && !(features & NETIF_F_LRO))
2916 mtk_hwlro_netdev_disable(dev);
2918 /* Set RX VLAN offloading */
2919 if (!(diff & NETIF_F_HW_VLAN_CTAG_RX))
2922 mtk_w32(eth, !!(features & NETIF_F_HW_VLAN_CTAG_RX),
2925 /* sync features with other MAC */
2926 for (i = 0; i < MTK_MAC_COUNT; i++) {
2927 if (!eth->netdev[i] || eth->netdev[i] == dev)
2929 eth->netdev[i]->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
2930 eth->netdev[i]->features |= features & NETIF_F_HW_VLAN_CTAG_RX;
2936 /* wait for DMA to finish whatever it is doing before we start using it again */
2937 static int mtk_dma_busy_wait(struct mtk_eth *eth)
2943 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2944 reg = eth->soc->reg_map->qdma.glo_cfg;
2946 reg = eth->soc->reg_map->pdma.glo_cfg;
2948 ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
2949 !(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
2950 5, MTK_DMA_BUSY_TIMEOUT_US);
2952 dev_err(eth->dev, "DMA init timeout\n");
2957 static int mtk_dma_init(struct mtk_eth *eth)
2962 if (mtk_dma_busy_wait(eth))
2965 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2966 /* QDMA needs scratch memory for internal reordering of the
2969 err = mtk_init_fq_dma(eth);
2974 err = mtk_tx_alloc(eth);
2978 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2979 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
2984 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
2989 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2990 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
2994 err = mtk_hwlro_rx_init(eth);
2999 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3000 /* Enable random early drop and set drop threshold
3003 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
3004 FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
3005 mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred);
3011 static void mtk_dma_free(struct mtk_eth *eth)
3013 const struct mtk_soc_data *soc = eth->soc;
3016 for (i = 0; i < MTK_MAC_COUNT; i++)
3018 netdev_reset_queue(eth->netdev[i]);
3019 if (eth->scratch_ring) {
3020 dma_free_coherent(eth->dma_dev,
3021 MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
3022 eth->scratch_ring, eth->phy_scratch_ring);
3023 eth->scratch_ring = NULL;
3024 eth->phy_scratch_ring = 0;
3027 mtk_rx_clean(eth, ð->rx_ring[0]);
3028 mtk_rx_clean(eth, ð->rx_ring_qdma);
3031 mtk_hwlro_rx_uninit(eth);
3032 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
3033 mtk_rx_clean(eth, ð->rx_ring[i]);
3036 kfree(eth->scratch_head);
3039 static bool mtk_hw_reset_check(struct mtk_eth *eth)
3041 u32 val = mtk_r32(eth, MTK_INT_STATUS2);
3043 return (val & MTK_FE_INT_FQ_EMPTY) || (val & MTK_FE_INT_RFIFO_UF) ||
3044 (val & MTK_FE_INT_RFIFO_OV) || (val & MTK_FE_INT_TSO_FAIL) ||
3045 (val & MTK_FE_INT_TSO_ALIGN) || (val & MTK_FE_INT_TSO_ILLEGAL);
3048 static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
3050 struct mtk_mac *mac = netdev_priv(dev);
3051 struct mtk_eth *eth = mac->hw;
3053 if (test_bit(MTK_RESETTING, ð->state))
3056 if (!mtk_hw_reset_check(eth))
3059 eth->netdev[mac->id]->stats.tx_errors++;
3060 netif_err(eth, tx_err, dev, "transmit timed out\n");
3062 schedule_work(ð->pending_work);
3065 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
3067 struct mtk_eth *eth = _eth;
3070 if (likely(napi_schedule_prep(ð->rx_napi))) {
3071 __napi_schedule(ð->rx_napi);
3072 mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
3078 static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
3080 struct mtk_eth *eth = _eth;
3083 if (likely(napi_schedule_prep(ð->tx_napi))) {
3084 __napi_schedule(ð->tx_napi);
3085 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3091 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
3093 struct mtk_eth *eth = _eth;
3094 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3096 if (mtk_r32(eth, reg_map->pdma.irq_mask) &
3097 eth->soc->txrx.rx_irq_done_mask) {
3098 if (mtk_r32(eth, reg_map->pdma.irq_status) &
3099 eth->soc->txrx.rx_irq_done_mask)
3100 mtk_handle_irq_rx(irq, _eth);
3102 if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
3103 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
3104 mtk_handle_irq_tx(irq, _eth);
3110 #ifdef CONFIG_NET_POLL_CONTROLLER
3111 static void mtk_poll_controller(struct net_device *dev)
3113 struct mtk_mac *mac = netdev_priv(dev);
3114 struct mtk_eth *eth = mac->hw;
3116 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3117 mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
3118 mtk_handle_irq_rx(eth->irq[2], dev);
3119 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3120 mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
3124 static int mtk_start_dma(struct mtk_eth *eth)
3126 u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
3127 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3130 err = mtk_dma_init(eth);
3136 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3137 val = mtk_r32(eth, reg_map->qdma.glo_cfg);
3138 val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3139 MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
3140 MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
3142 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
3143 val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
3144 MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
3145 MTK_CHK_DDONE_EN | MTK_LEAKY_BUCKET_EN;
3147 val |= MTK_RX_BT_32DWORDS;
3148 mtk_w32(eth, val, reg_map->qdma.glo_cfg);
3151 MTK_RX_DMA_EN | rx_2b_offset |
3152 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
3153 reg_map->pdma.glo_cfg);
3155 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3156 MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
3157 reg_map->pdma.glo_cfg);
3163 static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
3167 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3170 for (i = 0; i < MTK_MAC_COUNT; i++) {
3171 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
3173 /* default setup the forward port to send frame to PDMA */
3176 /* Enable RX checksum */
3177 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
3181 if (eth->netdev[i] && netdev_uses_dsa(eth->netdev[i]))
3182 val |= MTK_GDMA_SPECIAL_TAG;
3184 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
3186 /* Reset and enable PSE */
3187 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
3188 mtk_w32(eth, 0, MTK_RST_GL);
3192 static bool mtk_uses_dsa(struct net_device *dev)
3194 #if IS_ENABLED(CONFIG_NET_DSA)
3195 return netdev_uses_dsa(dev) &&
3196 dev->dsa_ptr->tag_ops->proto == DSA_TAG_PROTO_MTK;
3202 static int mtk_device_event(struct notifier_block *n, unsigned long event, void *ptr)
3204 struct mtk_mac *mac = container_of(n, struct mtk_mac, device_notifier);
3205 struct mtk_eth *eth = mac->hw;
3206 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3207 struct ethtool_link_ksettings s;
3208 struct net_device *ldev;
3209 struct list_head *iter;
3210 struct dsa_port *dp;
3212 if (event != NETDEV_CHANGE)
3215 netdev_for_each_lower_dev(dev, ldev, iter) {
3216 if (netdev_priv(ldev) == mac)
3223 if (!dsa_slave_dev_check(dev))
3226 if (__ethtool_get_link_ksettings(dev, &s))
3229 if (s.base.speed == 0 || s.base.speed == ((__u32)-1))
3232 dp = dsa_port_from_netdev(dev);
3233 if (dp->index >= MTK_QDMA_NUM_QUEUES)
3236 if (mac->speed > 0 && mac->speed <= s.base.speed)
3239 mtk_set_queue_speed(eth, dp->index + 3, s.base.speed);
3244 static int mtk_open(struct net_device *dev)
3246 struct mtk_mac *mac = netdev_priv(dev);
3247 struct mtk_eth *eth = mac->hw;
3250 if (mtk_uses_dsa(dev) && !eth->prog) {
3251 for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
3252 struct metadata_dst *md_dst = eth->dsa_meta[i];
3257 md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
3262 md_dst->u.port_info.port_id = i;
3263 eth->dsa_meta[i] = md_dst;
3266 /* Hardware special tag parsing needs to be disabled if at least
3267 * one MAC does not use DSA.
3269 u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3270 val &= ~MTK_CDMP_STAG_EN;
3271 mtk_w32(eth, val, MTK_CDMP_IG_CTRL);
3274 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
3276 netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
3281 /* we run 2 netdevs on the same dma ring so we only bring it up once */
3282 if (!refcount_read(ð->dma_refcnt)) {
3283 const struct mtk_soc_data *soc = eth->soc;
3287 err = mtk_start_dma(eth);
3289 phylink_disconnect_phy(mac->phylink);
3293 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3294 mtk_ppe_start(eth->ppe[i]);
3296 gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe
3298 mtk_gdm_config(eth, gdm_config);
3300 napi_enable(ð->tx_napi);
3301 napi_enable(ð->rx_napi);
3302 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3303 mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
3304 refcount_set(ð->dma_refcnt, 1);
3307 refcount_inc(ð->dma_refcnt);
3309 phylink_start(mac->phylink);
3310 netif_tx_start_all_queues(dev);
3315 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
3320 /* stop the dma engine */
3321 spin_lock_bh(ð->page_lock);
3322 val = mtk_r32(eth, glo_cfg);
3323 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
3325 spin_unlock_bh(ð->page_lock);
3327 /* wait for dma stop */
3328 for (i = 0; i < 10; i++) {
3329 val = mtk_r32(eth, glo_cfg);
3330 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
3338 static int mtk_stop(struct net_device *dev)
3340 struct mtk_mac *mac = netdev_priv(dev);
3341 struct mtk_eth *eth = mac->hw;
3344 phylink_stop(mac->phylink);
3346 netif_tx_disable(dev);
3348 phylink_disconnect_phy(mac->phylink);
3350 /* only shutdown DMA if this is the last user */
3351 if (!refcount_dec_and_test(ð->dma_refcnt))
3354 mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
3356 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3357 mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
3358 napi_disable(ð->tx_napi);
3359 napi_disable(ð->rx_napi);
3361 cancel_work_sync(ð->rx_dim.work);
3362 cancel_work_sync(ð->tx_dim.work);
3364 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3365 mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
3366 mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
3370 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3371 mtk_ppe_stop(eth->ppe[i]);
3376 static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
3377 struct netlink_ext_ack *extack)
3379 struct mtk_mac *mac = netdev_priv(dev);
3380 struct mtk_eth *eth = mac->hw;
3381 struct bpf_prog *old_prog;
3385 NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO");
3389 if (dev->mtu > MTK_PP_MAX_BUF_SIZE) {
3390 NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
3394 need_update = !!eth->prog != !!prog;
3395 if (netif_running(dev) && need_update)
3398 old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
3400 bpf_prog_put(old_prog);
3402 if (netif_running(dev) && need_update)
3403 return mtk_open(dev);
3408 static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3410 switch (xdp->command) {
3411 case XDP_SETUP_PROG:
3412 return mtk_xdp_setup(dev, xdp->prog, xdp->extack);
3418 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
3420 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3424 usleep_range(1000, 1100);
3425 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3431 static void mtk_clk_disable(struct mtk_eth *eth)
3435 for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
3436 clk_disable_unprepare(eth->clks[clk]);
3439 static int mtk_clk_enable(struct mtk_eth *eth)
3443 for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
3444 ret = clk_prepare_enable(eth->clks[clk]);
3446 goto err_disable_clks;
3453 clk_disable_unprepare(eth->clks[clk]);
3458 static void mtk_dim_rx(struct work_struct *work)
3460 struct dim *dim = container_of(work, struct dim, work);
3461 struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
3462 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3463 struct dim_cq_moder cur_profile;
3466 cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode,
3468 spin_lock_bh(ð->dim_lock);
3470 val = mtk_r32(eth, reg_map->pdma.delay_irq);
3471 val &= MTK_PDMA_DELAY_TX_MASK;
3472 val |= MTK_PDMA_DELAY_RX_EN;
3474 cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3475 val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT;
3477 cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3478 val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
3480 mtk_w32(eth, val, reg_map->pdma.delay_irq);
3481 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3482 mtk_w32(eth, val, reg_map->qdma.delay_irq);
3484 spin_unlock_bh(ð->dim_lock);
3486 dim->state = DIM_START_MEASURE;
3489 static void mtk_dim_tx(struct work_struct *work)
3491 struct dim *dim = container_of(work, struct dim, work);
3492 struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
3493 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3494 struct dim_cq_moder cur_profile;
3497 cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode,
3499 spin_lock_bh(ð->dim_lock);
3501 val = mtk_r32(eth, reg_map->pdma.delay_irq);
3502 val &= MTK_PDMA_DELAY_RX_MASK;
3503 val |= MTK_PDMA_DELAY_TX_EN;
3505 cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3506 val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT;
3508 cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3509 val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
3511 mtk_w32(eth, val, reg_map->pdma.delay_irq);
3512 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3513 mtk_w32(eth, val, reg_map->qdma.delay_irq);
3515 spin_unlock_bh(ð->dim_lock);
3517 dim->state = DIM_START_MEASURE;
3520 static void mtk_set_mcr_max_rx(struct mtk_mac *mac, u32 val)
3522 struct mtk_eth *eth = mac->hw;
3523 u32 mcr_cur, mcr_new;
3525 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3528 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
3529 mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
3532 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
3533 else if (val <= 1536)
3534 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
3535 else if (val <= 1552)
3536 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
3538 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
3540 if (mcr_new != mcr_cur)
3541 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
3544 static void mtk_hw_reset(struct mtk_eth *eth)
3548 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
3549 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
3550 val = RSTCTRL_PPE0_V2;
3555 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3556 val |= RSTCTRL_PPE1;
3558 ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
3560 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
3561 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
3565 static u32 mtk_hw_reset_read(struct mtk_eth *eth)
3569 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
3573 static void mtk_hw_warm_reset(struct mtk_eth *eth)
3577 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, RSTCTRL_FE,
3579 if (readx_poll_timeout_atomic(mtk_hw_reset_read, eth, val,
3580 val & RSTCTRL_FE, 1, 1000)) {
3581 dev_err(eth->dev, "warm reset failed\n");
3586 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
3587 rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V2;
3589 rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0;
3591 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3592 rst_mask |= RSTCTRL_PPE1;
3594 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask);
3597 val = mtk_hw_reset_read(eth);
3598 if (!(val & rst_mask))
3599 dev_err(eth->dev, "warm reset stage0 failed %08x (%08x)\n",
3602 rst_mask |= RSTCTRL_FE;
3603 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, ~rst_mask);
3606 val = mtk_hw_reset_read(eth);
3608 dev_err(eth->dev, "warm reset stage1 failed %08x (%08x)\n",
3612 static bool mtk_hw_check_dma_hang(struct mtk_eth *eth)
3614 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3615 bool gmac1_tx, gmac2_tx, gdm1_tx, gdm2_tx;
3616 bool oq_hang, cdm1_busy, adma_busy;
3617 bool wtx_busy, cdm_full, oq_free;
3618 u32 wdidx, val, gdm1_fc, gdm2_fc;
3619 bool qfsm_hang, qfwd_hang;
3622 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3625 /* WDMA sanity checks */
3626 wdidx = mtk_r32(eth, reg_map->wdma_base[0] + 0xc);
3628 val = mtk_r32(eth, reg_map->wdma_base[0] + 0x204);
3629 wtx_busy = FIELD_GET(MTK_TX_DMA_BUSY, val);
3631 val = mtk_r32(eth, reg_map->wdma_base[0] + 0x230);
3632 cdm_full = !FIELD_GET(MTK_CDM_TXFIFO_RDY, val);
3634 oq_free = (!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(24, 16)) &&
3635 !(mtk_r32(eth, reg_map->pse_oq_sta + 0x4) & GENMASK(8, 0)) &&
3636 !(mtk_r32(eth, reg_map->pse_oq_sta + 0x10) & GENMASK(24, 16)));
3638 if (wdidx == eth->reset.wdidx && wtx_busy && cdm_full && oq_free) {
3639 if (++eth->reset.wdma_hang_count > 2) {
3640 eth->reset.wdma_hang_count = 0;
3646 /* QDMA sanity checks */
3647 qfsm_hang = !!mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x234);
3648 qfwd_hang = !mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x308);
3650 gdm1_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM1_FSM)) > 0;
3651 gdm2_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM2_FSM)) > 0;
3652 gmac1_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(0))) != 1;
3653 gmac2_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(1))) != 1;
3654 gdm1_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x24);
3655 gdm2_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x64);
3657 if (qfsm_hang && qfwd_hang &&
3658 ((gdm1_tx && gmac1_tx && gdm1_fc < 1) ||
3659 (gdm2_tx && gmac2_tx && gdm2_fc < 1))) {
3660 if (++eth->reset.qdma_hang_count > 2) {
3661 eth->reset.qdma_hang_count = 0;
3667 /* ADMA sanity checks */
3668 oq_hang = !!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(8, 0));
3669 cdm1_busy = !!(mtk_r32(eth, MTK_FE_CDM1_FSM) & GENMASK(31, 16));
3670 adma_busy = !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & GENMASK(4, 0)) &&
3671 !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & BIT(6));
3673 if (oq_hang && cdm1_busy && adma_busy) {
3674 if (++eth->reset.adma_hang_count > 2) {
3675 eth->reset.adma_hang_count = 0;
3681 eth->reset.wdma_hang_count = 0;
3682 eth->reset.qdma_hang_count = 0;
3683 eth->reset.adma_hang_count = 0;
3685 eth->reset.wdidx = wdidx;
3690 static void mtk_hw_reset_monitor_work(struct work_struct *work)
3692 struct delayed_work *del_work = to_delayed_work(work);
3693 struct mtk_eth *eth = container_of(del_work, struct mtk_eth,
3694 reset.monitor_work);
3696 if (test_bit(MTK_RESETTING, ð->state))
3699 /* DMA stuck checks */
3700 if (mtk_hw_check_dma_hang(eth))
3701 schedule_work(ð->pending_work);
3704 schedule_delayed_work(ð->reset.monitor_work,
3705 MTK_DMA_MONITOR_TIMEOUT);
3708 static int mtk_hw_init(struct mtk_eth *eth, bool reset)
3710 u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
3711 ETHSYS_DMA_AG_MAP_PPE;
3712 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3715 if (!reset && test_and_set_bit(MTK_HW_INIT, ð->state))
3719 pm_runtime_enable(eth->dev);
3720 pm_runtime_get_sync(eth->dev);
3722 ret = mtk_clk_enable(eth);
3724 goto err_disable_pm;
3728 regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
3729 of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
3731 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3732 ret = device_reset(eth->dev);
3734 dev_err(eth->dev, "MAC reset failed!\n");
3735 goto err_disable_pm;
3738 /* set interrupt delays based on current Net DIM sample */
3739 mtk_dim_rx(ð->rx_dim.work);
3740 mtk_dim_tx(ð->tx_dim.work);
3742 /* disable delay and normal interrupt */
3743 mtk_tx_irq_disable(eth, ~0);
3744 mtk_rx_irq_disable(eth, ~0);
3752 mtk_hw_warm_reset(eth);
3756 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
3757 /* Set FE to PDMAv2 if necessary */
3758 val = mtk_r32(eth, MTK_FE_GLO_MISC);
3759 mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
3763 /* Set GE2 driving and slew rate */
3764 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
3767 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
3770 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
3773 /* Set linkdown as the default for each GMAC. Its own MCR would be set
3774 * up with the more appropriate value when mtk_mac_config call is being
3777 for (i = 0; i < MTK_MAC_COUNT; i++) {
3778 struct net_device *dev = eth->netdev[i];
3780 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
3782 struct mtk_mac *mac = netdev_priv(dev);
3784 mtk_set_mcr_max_rx(mac, dev->mtu + MTK_RX_ETH_HLEN);
3788 /* Indicates CDM to parse the MTK special tag from CPU
3789 * which also is working out for untag packets.
3791 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
3792 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
3793 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
3794 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3795 mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
3798 /* Enable RX VLan Offloading */
3799 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
3801 /* set interrupt delays based on current Net DIM sample */
3802 mtk_dim_rx(ð->rx_dim.work);
3803 mtk_dim_tx(ð->tx_dim.work);
3805 /* disable delay and normal interrupt */
3806 mtk_tx_irq_disable(eth, ~0);
3807 mtk_rx_irq_disable(eth, ~0);
3809 /* FE int grouping */
3810 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
3811 mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
3812 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
3813 mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
3814 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
3816 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
3817 /* PSE should not drop port8 and port9 packets from WDMA Tx */
3818 mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
3820 /* PSE should drop packets to port 8/9 on WDMA Rx ring full */
3821 mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
3823 /* PSE Free Queue Flow Control */
3824 mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
3826 /* PSE config input queue threshold */
3827 mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
3828 mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
3829 mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
3830 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
3831 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
3832 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
3833 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
3834 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
3836 /* PSE config output queue threshold */
3837 mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
3838 mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
3839 mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
3840 mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
3841 mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
3842 mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
3843 mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
3844 mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
3846 /* GDM and CDM Threshold */
3847 mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
3848 mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
3849 mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
3850 mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
3851 mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
3852 mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
3859 pm_runtime_put_sync(eth->dev);
3860 pm_runtime_disable(eth->dev);
3866 static int mtk_hw_deinit(struct mtk_eth *eth)
3868 if (!test_and_clear_bit(MTK_HW_INIT, ð->state))
3871 mtk_clk_disable(eth);
3873 pm_runtime_put_sync(eth->dev);
3874 pm_runtime_disable(eth->dev);
3879 static int __init mtk_init(struct net_device *dev)
3881 struct mtk_mac *mac = netdev_priv(dev);
3882 struct mtk_eth *eth = mac->hw;
3885 ret = of_get_ethdev_address(mac->of_node, dev);
3887 /* If the mac address is invalid, use random mac address */
3888 eth_hw_addr_random(dev);
3889 dev_err(eth->dev, "generated random MAC address %pM\n",
3896 static void mtk_uninit(struct net_device *dev)
3898 struct mtk_mac *mac = netdev_priv(dev);
3899 struct mtk_eth *eth = mac->hw;
3901 phylink_disconnect_phy(mac->phylink);
3902 mtk_tx_irq_disable(eth, ~0);
3903 mtk_rx_irq_disable(eth, ~0);
3906 static int mtk_change_mtu(struct net_device *dev, int new_mtu)
3908 int length = new_mtu + MTK_RX_ETH_HLEN;
3909 struct mtk_mac *mac = netdev_priv(dev);
3910 struct mtk_eth *eth = mac->hw;
3912 if (rcu_access_pointer(eth->prog) &&
3913 length > MTK_PP_MAX_BUF_SIZE) {
3914 netdev_err(dev, "Invalid MTU for XDP mode\n");
3918 mtk_set_mcr_max_rx(mac, length);
3924 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3926 struct mtk_mac *mac = netdev_priv(dev);
3932 return phylink_mii_ioctl(mac->phylink, ifr, cmd);
3940 static void mtk_prepare_for_reset(struct mtk_eth *eth)
3945 /* disabe FE P3 and P4 */
3946 val = mtk_r32(eth, MTK_FE_GLO_CFG) | MTK_FE_LINK_DOWN_P3;
3947 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3948 val |= MTK_FE_LINK_DOWN_P4;
3949 mtk_w32(eth, val, MTK_FE_GLO_CFG);
3951 /* adjust PPE configurations to prepare for reset */
3952 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3953 mtk_ppe_prepare_reset(eth->ppe[i]);
3955 /* disable NETSYS interrupts */
3956 mtk_w32(eth, 0, MTK_FE_INT_ENABLE);
3958 /* force link down GMAC */
3959 for (i = 0; i < 2; i++) {
3960 val = mtk_r32(eth, MTK_MAC_MCR(i)) & ~MAC_MCR_FORCE_LINK;
3961 mtk_w32(eth, val, MTK_MAC_MCR(i));
3965 static void mtk_pending_work(struct work_struct *work)
3967 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
3968 unsigned long restart = 0;
3973 set_bit(MTK_RESETTING, ð->state);
3975 mtk_prepare_for_reset(eth);
3977 /* Run again reset preliminary configuration in order to avoid any
3978 * possible race during FE reset since it can run releasing RTNL lock.
3980 mtk_prepare_for_reset(eth);
3982 /* stop all devices to make sure that dma is properly shut down */
3983 for (i = 0; i < MTK_MAC_COUNT; i++) {
3984 if (!eth->netdev[i] || !netif_running(eth->netdev[i]))
3987 mtk_stop(eth->netdev[i]);
3988 __set_bit(i, &restart);
3991 usleep_range(15000, 16000);
3994 pinctrl_select_state(eth->dev->pins->p,
3995 eth->dev->pins->default_state);
3996 mtk_hw_init(eth, true);
3998 /* restart DMA and enable IRQs */
3999 for (i = 0; i < MTK_MAC_COUNT; i++) {
4000 if (!test_bit(i, &restart))
4003 if (mtk_open(eth->netdev[i])) {
4004 netif_alert(eth, ifup, eth->netdev[i],
4005 "Driver up/down cycle failed\n");
4006 dev_close(eth->netdev[i]);
4010 /* enabe FE P3 and P4 */
4011 val = mtk_r32(eth, MTK_FE_GLO_CFG) & ~MTK_FE_LINK_DOWN_P3;
4012 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
4013 val &= ~MTK_FE_LINK_DOWN_P4;
4014 mtk_w32(eth, val, MTK_FE_GLO_CFG);
4016 clear_bit(MTK_RESETTING, ð->state);
4018 mtk_wed_fe_reset_complete();
4023 static int mtk_free_dev(struct mtk_eth *eth)
4027 for (i = 0; i < MTK_MAC_COUNT; i++) {
4028 if (!eth->netdev[i])
4030 free_netdev(eth->netdev[i]);
4033 for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
4034 if (!eth->dsa_meta[i])
4036 metadata_dst_free(eth->dsa_meta[i]);
4042 static int mtk_unreg_dev(struct mtk_eth *eth)
4046 for (i = 0; i < MTK_MAC_COUNT; i++) {
4047 struct mtk_mac *mac;
4048 if (!eth->netdev[i])
4050 mac = netdev_priv(eth->netdev[i]);
4051 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4052 unregister_netdevice_notifier(&mac->device_notifier);
4053 unregister_netdev(eth->netdev[i]);
4059 static void mtk_sgmii_destroy(struct mtk_eth *eth)
4063 for (i = 0; i < MTK_MAX_DEVS; i++)
4064 mtk_pcs_lynxi_destroy(eth->sgmii_pcs[i]);
4067 static int mtk_cleanup(struct mtk_eth *eth)
4069 mtk_sgmii_destroy(eth);
4072 cancel_work_sync(ð->pending_work);
4073 cancel_delayed_work_sync(ð->reset.monitor_work);
4078 static int mtk_get_link_ksettings(struct net_device *ndev,
4079 struct ethtool_link_ksettings *cmd)
4081 struct mtk_mac *mac = netdev_priv(ndev);
4083 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4086 return phylink_ethtool_ksettings_get(mac->phylink, cmd);
4089 static int mtk_set_link_ksettings(struct net_device *ndev,
4090 const struct ethtool_link_ksettings *cmd)
4092 struct mtk_mac *mac = netdev_priv(ndev);
4094 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4097 return phylink_ethtool_ksettings_set(mac->phylink, cmd);
4100 static void mtk_get_drvinfo(struct net_device *dev,
4101 struct ethtool_drvinfo *info)
4103 struct mtk_mac *mac = netdev_priv(dev);
4105 strscpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
4106 strscpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
4107 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
4110 static u32 mtk_get_msglevel(struct net_device *dev)
4112 struct mtk_mac *mac = netdev_priv(dev);
4114 return mac->hw->msg_enable;
4117 static void mtk_set_msglevel(struct net_device *dev, u32 value)
4119 struct mtk_mac *mac = netdev_priv(dev);
4121 mac->hw->msg_enable = value;
4124 static int mtk_nway_reset(struct net_device *dev)
4126 struct mtk_mac *mac = netdev_priv(dev);
4128 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4134 return phylink_ethtool_nway_reset(mac->phylink);
4137 static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4141 switch (stringset) {
4142 case ETH_SS_STATS: {
4143 struct mtk_mac *mac = netdev_priv(dev);
4145 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
4146 memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
4147 data += ETH_GSTRING_LEN;
4149 if (mtk_page_pool_enabled(mac->hw))
4150 page_pool_ethtool_stats_get_strings(data);
4158 static int mtk_get_sset_count(struct net_device *dev, int sset)
4161 case ETH_SS_STATS: {
4162 int count = ARRAY_SIZE(mtk_ethtool_stats);
4163 struct mtk_mac *mac = netdev_priv(dev);
4165 if (mtk_page_pool_enabled(mac->hw))
4166 count += page_pool_ethtool_stats_get_count();
4174 static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data)
4176 struct page_pool_stats stats = {};
4179 for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) {
4180 struct mtk_rx_ring *ring = ð->rx_ring[i];
4182 if (!ring->page_pool)
4185 page_pool_get_stats(ring->page_pool, &stats);
4187 page_pool_ethtool_stats_get(data, &stats);
4190 static void mtk_get_ethtool_stats(struct net_device *dev,
4191 struct ethtool_stats *stats, u64 *data)
4193 struct mtk_mac *mac = netdev_priv(dev);
4194 struct mtk_hw_stats *hwstats = mac->hw_stats;
4195 u64 *data_src, *data_dst;
4199 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4202 if (netif_running(dev) && netif_device_present(dev)) {
4203 if (spin_trylock_bh(&hwstats->stats_lock)) {
4204 mtk_stats_update_mac(mac);
4205 spin_unlock_bh(&hwstats->stats_lock);
4209 data_src = (u64 *)hwstats;
4213 start = u64_stats_fetch_begin(&hwstats->syncp);
4215 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
4216 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
4217 if (mtk_page_pool_enabled(mac->hw))
4218 mtk_ethtool_pp_stats(mac->hw, data_dst);
4219 } while (u64_stats_fetch_retry(&hwstats->syncp, start));
4222 static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
4225 int ret = -EOPNOTSUPP;
4228 case ETHTOOL_GRXRINGS:
4229 if (dev->hw_features & NETIF_F_LRO) {
4230 cmd->data = MTK_MAX_RX_RING_NUM;
4234 case ETHTOOL_GRXCLSRLCNT:
4235 if (dev->hw_features & NETIF_F_LRO) {
4236 struct mtk_mac *mac = netdev_priv(dev);
4238 cmd->rule_cnt = mac->hwlro_ip_cnt;
4242 case ETHTOOL_GRXCLSRULE:
4243 if (dev->hw_features & NETIF_F_LRO)
4244 ret = mtk_hwlro_get_fdir_entry(dev, cmd);
4246 case ETHTOOL_GRXCLSRLALL:
4247 if (dev->hw_features & NETIF_F_LRO)
4248 ret = mtk_hwlro_get_fdir_all(dev, cmd,
4258 static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
4260 int ret = -EOPNOTSUPP;
4263 case ETHTOOL_SRXCLSRLINS:
4264 if (dev->hw_features & NETIF_F_LRO)
4265 ret = mtk_hwlro_add_ipaddr(dev, cmd);
4267 case ETHTOOL_SRXCLSRLDEL:
4268 if (dev->hw_features & NETIF_F_LRO)
4269 ret = mtk_hwlro_del_ipaddr(dev, cmd);
4278 static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
4279 struct net_device *sb_dev)
4281 struct mtk_mac *mac = netdev_priv(dev);
4282 unsigned int queue = 0;
4284 if (netdev_uses_dsa(dev))
4285 queue = skb_get_queue_mapping(skb) + 3;
4289 if (queue >= dev->num_tx_queues)
4295 static const struct ethtool_ops mtk_ethtool_ops = {
4296 .get_link_ksettings = mtk_get_link_ksettings,
4297 .set_link_ksettings = mtk_set_link_ksettings,
4298 .get_drvinfo = mtk_get_drvinfo,
4299 .get_msglevel = mtk_get_msglevel,
4300 .set_msglevel = mtk_set_msglevel,
4301 .nway_reset = mtk_nway_reset,
4302 .get_link = ethtool_op_get_link,
4303 .get_strings = mtk_get_strings,
4304 .get_sset_count = mtk_get_sset_count,
4305 .get_ethtool_stats = mtk_get_ethtool_stats,
4306 .get_rxnfc = mtk_get_rxnfc,
4307 .set_rxnfc = mtk_set_rxnfc,
4310 static const struct net_device_ops mtk_netdev_ops = {
4311 .ndo_init = mtk_init,
4312 .ndo_uninit = mtk_uninit,
4313 .ndo_open = mtk_open,
4314 .ndo_stop = mtk_stop,
4315 .ndo_start_xmit = mtk_start_xmit,
4316 .ndo_set_mac_address = mtk_set_mac_address,
4317 .ndo_validate_addr = eth_validate_addr,
4318 .ndo_eth_ioctl = mtk_do_ioctl,
4319 .ndo_change_mtu = mtk_change_mtu,
4320 .ndo_tx_timeout = mtk_tx_timeout,
4321 .ndo_get_stats64 = mtk_get_stats64,
4322 .ndo_fix_features = mtk_fix_features,
4323 .ndo_set_features = mtk_set_features,
4324 #ifdef CONFIG_NET_POLL_CONTROLLER
4325 .ndo_poll_controller = mtk_poll_controller,
4327 .ndo_setup_tc = mtk_eth_setup_tc,
4329 .ndo_xdp_xmit = mtk_xdp_xmit,
4330 .ndo_select_queue = mtk_select_queue,
4333 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
4335 const __be32 *_id = of_get_property(np, "reg", NULL);
4336 phy_interface_t phy_mode;
4337 struct phylink *phylink;
4338 struct mtk_mac *mac;
4344 dev_err(eth->dev, "missing mac id\n");
4348 id = be32_to_cpup(_id);
4349 if (id >= MTK_MAC_COUNT) {
4350 dev_err(eth->dev, "%d is not a valid mac id\n", id);
4354 if (eth->netdev[id]) {
4355 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
4359 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4360 txqs = MTK_QDMA_NUM_QUEUES;
4362 eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1);
4363 if (!eth->netdev[id]) {
4364 dev_err(eth->dev, "alloc_etherdev failed\n");
4367 mac = netdev_priv(eth->netdev[id]);
4373 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
4374 mac->hwlro_ip_cnt = 0;
4376 mac->hw_stats = devm_kzalloc(eth->dev,
4377 sizeof(*mac->hw_stats),
4379 if (!mac->hw_stats) {
4380 dev_err(eth->dev, "failed to allocate counter memory\n");
4384 spin_lock_init(&mac->hw_stats->stats_lock);
4385 u64_stats_init(&mac->hw_stats->syncp);
4386 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
4388 /* phylink create */
4389 err = of_get_phy_mode(np, &phy_mode);
4391 dev_err(eth->dev, "incorrect phy-mode\n");
4395 /* mac config is not set */
4396 mac->interface = PHY_INTERFACE_MODE_NA;
4397 mac->speed = SPEED_UNKNOWN;
4399 mac->phylink_config.dev = ð->netdev[id]->dev;
4400 mac->phylink_config.type = PHYLINK_NETDEV;
4401 /* This driver makes use of state->speed in mac_config */
4402 mac->phylink_config.legacy_pre_march2020 = true;
4403 mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
4404 MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
4406 __set_bit(PHY_INTERFACE_MODE_MII,
4407 mac->phylink_config.supported_interfaces);
4408 __set_bit(PHY_INTERFACE_MODE_GMII,
4409 mac->phylink_config.supported_interfaces);
4411 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
4412 phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
4414 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id)
4415 __set_bit(PHY_INTERFACE_MODE_TRGMII,
4416 mac->phylink_config.supported_interfaces);
4418 /* TRGMII is not permitted on MT7621 if using DDR2 */
4419 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) &&
4420 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII_MT7621_CLK)) {
4421 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
4422 if (val & SYSCFG_DRAM_TYPE_DDR2)
4423 __clear_bit(PHY_INTERFACE_MODE_TRGMII,
4424 mac->phylink_config.supported_interfaces);
4427 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
4428 __set_bit(PHY_INTERFACE_MODE_SGMII,
4429 mac->phylink_config.supported_interfaces);
4430 __set_bit(PHY_INTERFACE_MODE_1000BASEX,
4431 mac->phylink_config.supported_interfaces);
4432 __set_bit(PHY_INTERFACE_MODE_2500BASEX,
4433 mac->phylink_config.supported_interfaces);
4436 phylink = phylink_create(&mac->phylink_config,
4437 of_fwnode_handle(mac->of_node),
4438 phy_mode, &mtk_phylink_ops);
4439 if (IS_ERR(phylink)) {
4440 err = PTR_ERR(phylink);
4444 mac->phylink = phylink;
4446 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
4447 eth->netdev[id]->watchdog_timeo = 5 * HZ;
4448 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
4449 eth->netdev[id]->base_addr = (unsigned long)eth->base;
4451 eth->netdev[id]->hw_features = eth->soc->hw_features;
4453 eth->netdev[id]->hw_features |= NETIF_F_LRO;
4455 eth->netdev[id]->vlan_features = eth->soc->hw_features &
4456 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
4457 eth->netdev[id]->features |= eth->soc->hw_features;
4458 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
4460 eth->netdev[id]->irq = eth->irq[0];
4461 eth->netdev[id]->dev.of_node = np;
4463 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4464 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
4466 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
4468 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
4469 mac->device_notifier.notifier_call = mtk_device_event;
4470 register_netdevice_notifier(&mac->device_notifier);
4473 if (mtk_page_pool_enabled(eth))
4474 eth->netdev[id]->xdp_features = NETDEV_XDP_ACT_BASIC |
4475 NETDEV_XDP_ACT_REDIRECT |
4476 NETDEV_XDP_ACT_NDO_XMIT |
4477 NETDEV_XDP_ACT_NDO_XMIT_SG;
4482 free_netdev(eth->netdev[id]);
4486 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
4488 struct net_device *dev, *tmp;
4489 LIST_HEAD(dev_list);
4494 for (i = 0; i < MTK_MAC_COUNT; i++) {
4495 dev = eth->netdev[i];
4497 if (!dev || !(dev->flags & IFF_UP))
4500 list_add_tail(&dev->close_list, &dev_list);
4503 dev_close_many(&dev_list, false);
4505 eth->dma_dev = dma_dev;
4507 list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
4508 list_del_init(&dev->close_list);
4509 dev_open(dev, NULL);
4515 static int mtk_sgmii_init(struct mtk_eth *eth)
4517 struct device_node *np;
4518 struct regmap *regmap;
4522 for (i = 0; i < MTK_MAX_DEVS; i++) {
4523 np = of_parse_phandle(eth->dev->of_node, "mediatek,sgmiisys", i);
4527 regmap = syscon_node_to_regmap(np);
4529 if (of_property_read_bool(np, "mediatek,pnswap"))
4530 flags |= MTK_SGMII_FLAG_PN_SWAP;
4535 return PTR_ERR(regmap);
4537 eth->sgmii_pcs[i] = mtk_pcs_lynxi_create(eth->dev, regmap,
4545 static int mtk_probe(struct platform_device *pdev)
4547 struct resource *res = NULL;
4548 struct device_node *mac_np;
4549 struct mtk_eth *eth;
4552 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
4556 eth->soc = of_device_get_match_data(&pdev->dev);
4558 eth->dev = &pdev->dev;
4559 eth->dma_dev = &pdev->dev;
4560 eth->base = devm_platform_ioremap_resource(pdev, 0);
4561 if (IS_ERR(eth->base))
4562 return PTR_ERR(eth->base);
4564 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4565 eth->ip_align = NET_IP_ALIGN;
4567 spin_lock_init(ð->page_lock);
4568 spin_lock_init(ð->tx_irq_lock);
4569 spin_lock_init(ð->rx_irq_lock);
4570 spin_lock_init(ð->dim_lock);
4572 eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4573 INIT_WORK(ð->rx_dim.work, mtk_dim_rx);
4574 INIT_DELAYED_WORK(ð->reset.monitor_work, mtk_hw_reset_monitor_work);
4576 eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4577 INIT_WORK(ð->tx_dim.work, mtk_dim_tx);
4579 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4580 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4582 if (IS_ERR(eth->ethsys)) {
4583 dev_err(&pdev->dev, "no ethsys regmap found\n");
4584 return PTR_ERR(eth->ethsys);
4588 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
4589 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4590 "mediatek,infracfg");
4591 if (IS_ERR(eth->infra)) {
4592 dev_err(&pdev->dev, "no infracfg regmap found\n");
4593 return PTR_ERR(eth->infra);
4597 if (of_dma_is_coherent(pdev->dev.of_node)) {
4600 cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4601 "cci-control-port");
4602 /* enable CPU/bus coherency */
4604 regmap_write(cci, 0, 3);
4607 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
4608 err = mtk_sgmii_init(eth);
4614 if (eth->soc->required_pctl) {
4615 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4617 if (IS_ERR(eth->pctl)) {
4618 dev_err(&pdev->dev, "no pctl regmap found\n");
4619 err = PTR_ERR(eth->pctl);
4620 goto err_destroy_sgmii;
4624 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
4625 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4628 goto err_destroy_sgmii;
4632 if (eth->soc->offload_version) {
4634 struct device_node *np;
4635 phys_addr_t wdma_phy;
4638 if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
4641 np = of_parse_phandle(pdev->dev.of_node,
4646 wdma_base = eth->soc->reg_map->wdma_base[i];
4647 wdma_phy = res ? res->start + wdma_base : 0;
4648 mtk_wed_add_hw(np, eth, eth->base + wdma_base,
4653 for (i = 0; i < 3; i++) {
4654 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
4655 eth->irq[i] = eth->irq[0];
4657 eth->irq[i] = platform_get_irq(pdev, i);
4658 if (eth->irq[i] < 0) {
4659 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
4664 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
4665 eth->clks[i] = devm_clk_get(eth->dev,
4666 mtk_clks_source_name[i]);
4667 if (IS_ERR(eth->clks[i])) {
4668 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) {
4669 err = -EPROBE_DEFER;
4672 if (eth->soc->required_clks & BIT(i)) {
4673 dev_err(&pdev->dev, "clock %s not found\n",
4674 mtk_clks_source_name[i]);
4678 eth->clks[i] = NULL;
4682 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
4683 INIT_WORK(ð->pending_work, mtk_pending_work);
4685 err = mtk_hw_init(eth, false);
4689 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
4691 for_each_child_of_node(pdev->dev.of_node, mac_np) {
4692 if (!of_device_is_compatible(mac_np,
4693 "mediatek,eth-mac"))
4696 if (!of_device_is_available(mac_np))
4699 err = mtk_add_mac(eth, mac_np);
4701 of_node_put(mac_np);
4706 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
4707 err = devm_request_irq(eth->dev, eth->irq[0],
4709 dev_name(eth->dev), eth);
4711 err = devm_request_irq(eth->dev, eth->irq[1],
4712 mtk_handle_irq_tx, 0,
4713 dev_name(eth->dev), eth);
4717 err = devm_request_irq(eth->dev, eth->irq[2],
4718 mtk_handle_irq_rx, 0,
4719 dev_name(eth->dev), eth);
4724 /* No MT7628/88 support yet */
4725 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4726 err = mtk_mdio_init(eth);
4731 if (eth->soc->offload_version) {
4734 num_ppe = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
4735 num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
4736 for (i = 0; i < num_ppe; i++) {
4737 u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
4739 eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, i);
4743 goto err_deinit_ppe;
4747 err = mtk_eth_offload_init(eth);
4749 goto err_deinit_ppe;
4752 for (i = 0; i < MTK_MAX_DEVS; i++) {
4753 if (!eth->netdev[i])
4756 err = register_netdev(eth->netdev[i]);
4758 dev_err(eth->dev, "error bringing up device\n");
4759 goto err_deinit_ppe;
4761 netif_info(eth, probe, eth->netdev[i],
4762 "mediatek frame engine at 0x%08lx, irq %d\n",
4763 eth->netdev[i]->base_addr, eth->irq[0]);
4766 /* we run 2 devices on the same DMA ring so we need a dummy device
4769 init_dummy_netdev(ð->dummy_dev);
4770 netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx);
4771 netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx);
4773 platform_set_drvdata(pdev, eth);
4774 schedule_delayed_work(ð->reset.monitor_work,
4775 MTK_DMA_MONITOR_TIMEOUT);
4780 mtk_ppe_deinit(eth);
4781 mtk_mdio_cleanup(eth);
4789 mtk_sgmii_destroy(eth);
4794 static int mtk_remove(struct platform_device *pdev)
4796 struct mtk_eth *eth = platform_get_drvdata(pdev);
4797 struct mtk_mac *mac;
4800 /* stop all devices to make sure that dma is properly shut down */
4801 for (i = 0; i < MTK_MAC_COUNT; i++) {
4802 if (!eth->netdev[i])
4804 mtk_stop(eth->netdev[i]);
4805 mac = netdev_priv(eth->netdev[i]);
4806 phylink_disconnect_phy(mac->phylink);
4812 netif_napi_del(ð->tx_napi);
4813 netif_napi_del(ð->rx_napi);
4815 mtk_mdio_cleanup(eth);
4820 static const struct mtk_soc_data mt2701_data = {
4821 .reg_map = &mtk_reg_map,
4822 .caps = MT7623_CAPS | MTK_HWLRO,
4823 .hw_features = MTK_HW_FEATURES,
4824 .required_clks = MT7623_CLKS_BITMAP,
4825 .required_pctl = true,
4827 .txd_size = sizeof(struct mtk_tx_dma),
4828 .rxd_size = sizeof(struct mtk_rx_dma),
4829 .rx_irq_done_mask = MTK_RX_DONE_INT,
4830 .rx_dma_l4_valid = RX_DMA_L4_VALID,
4831 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4832 .dma_len_offset = 16,
4836 static const struct mtk_soc_data mt7621_data = {
4837 .reg_map = &mtk_reg_map,
4838 .caps = MT7621_CAPS,
4839 .hw_features = MTK_HW_FEATURES,
4840 .required_clks = MT7621_CLKS_BITMAP,
4841 .required_pctl = false,
4842 .offload_version = 1,
4844 .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
4846 .txd_size = sizeof(struct mtk_tx_dma),
4847 .rxd_size = sizeof(struct mtk_rx_dma),
4848 .rx_irq_done_mask = MTK_RX_DONE_INT,
4849 .rx_dma_l4_valid = RX_DMA_L4_VALID,
4850 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4851 .dma_len_offset = 16,
4855 static const struct mtk_soc_data mt7622_data = {
4856 .reg_map = &mtk_reg_map,
4858 .caps = MT7622_CAPS | MTK_HWLRO,
4859 .hw_features = MTK_HW_FEATURES,
4860 .required_clks = MT7622_CLKS_BITMAP,
4861 .required_pctl = false,
4862 .offload_version = 2,
4864 .has_accounting = true,
4865 .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
4867 .txd_size = sizeof(struct mtk_tx_dma),
4868 .rxd_size = sizeof(struct mtk_rx_dma),
4869 .rx_irq_done_mask = MTK_RX_DONE_INT,
4870 .rx_dma_l4_valid = RX_DMA_L4_VALID,
4871 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4872 .dma_len_offset = 16,
4876 static const struct mtk_soc_data mt7623_data = {
4877 .reg_map = &mtk_reg_map,
4878 .caps = MT7623_CAPS | MTK_HWLRO,
4879 .hw_features = MTK_HW_FEATURES,
4880 .required_clks = MT7623_CLKS_BITMAP,
4881 .required_pctl = true,
4882 .offload_version = 1,
4884 .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
4886 .txd_size = sizeof(struct mtk_tx_dma),
4887 .rxd_size = sizeof(struct mtk_rx_dma),
4888 .rx_irq_done_mask = MTK_RX_DONE_INT,
4889 .rx_dma_l4_valid = RX_DMA_L4_VALID,
4890 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4891 .dma_len_offset = 16,
4895 static const struct mtk_soc_data mt7629_data = {
4896 .reg_map = &mtk_reg_map,
4898 .caps = MT7629_CAPS | MTK_HWLRO,
4899 .hw_features = MTK_HW_FEATURES,
4900 .required_clks = MT7629_CLKS_BITMAP,
4901 .required_pctl = false,
4902 .has_accounting = true,
4904 .txd_size = sizeof(struct mtk_tx_dma),
4905 .rxd_size = sizeof(struct mtk_rx_dma),
4906 .rx_irq_done_mask = MTK_RX_DONE_INT,
4907 .rx_dma_l4_valid = RX_DMA_L4_VALID,
4908 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4909 .dma_len_offset = 16,
4913 static const struct mtk_soc_data mt7981_data = {
4914 .reg_map = &mt7986_reg_map,
4916 .caps = MT7981_CAPS,
4917 .hw_features = MTK_HW_FEATURES,
4918 .required_clks = MT7981_CLKS_BITMAP,
4919 .required_pctl = false,
4920 .offload_version = 2,
4922 .foe_entry_size = sizeof(struct mtk_foe_entry),
4923 .has_accounting = true,
4925 .txd_size = sizeof(struct mtk_tx_dma_v2),
4926 .rxd_size = sizeof(struct mtk_rx_dma_v2),
4927 .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
4928 .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
4929 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
4930 .dma_len_offset = 8,
4934 static const struct mtk_soc_data mt7986_data = {
4935 .reg_map = &mt7986_reg_map,
4937 .caps = MT7986_CAPS,
4938 .hw_features = MTK_HW_FEATURES,
4939 .required_clks = MT7986_CLKS_BITMAP,
4940 .required_pctl = false,
4941 .offload_version = 2,
4943 .foe_entry_size = sizeof(struct mtk_foe_entry),
4944 .has_accounting = true,
4946 .txd_size = sizeof(struct mtk_tx_dma_v2),
4947 .rxd_size = sizeof(struct mtk_rx_dma_v2),
4948 .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
4949 .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
4950 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
4951 .dma_len_offset = 8,
4955 static const struct mtk_soc_data rt5350_data = {
4956 .reg_map = &mt7628_reg_map,
4957 .caps = MT7628_CAPS,
4958 .hw_features = MTK_HW_FEATURES_MT7628,
4959 .required_clks = MT7628_CLKS_BITMAP,
4960 .required_pctl = false,
4962 .txd_size = sizeof(struct mtk_tx_dma),
4963 .rxd_size = sizeof(struct mtk_rx_dma),
4964 .rx_irq_done_mask = MTK_RX_DONE_INT,
4965 .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
4966 .dma_max_len = MTK_TX_DMA_BUF_LEN,
4967 .dma_len_offset = 16,
4971 const struct of_device_id of_mtk_match[] = {
4972 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
4973 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
4974 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
4975 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
4976 { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
4977 { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data},
4978 { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
4979 { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
4982 MODULE_DEVICE_TABLE(of, of_mtk_match);
4984 static struct platform_driver mtk_driver = {
4986 .remove = mtk_remove,
4988 .name = "mtk_soc_eth",
4989 .of_match_table = of_mtk_match,
4993 module_platform_driver(mtk_driver);
4995 MODULE_LICENSE("GPL");
4996 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
4997 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");