#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 2)
#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 3)
+/* FPE defines */
+#define FPE_EVENT_UNKNOWN 0
+#define FPE_EVENT_TRSP BIT(0)
+#define FPE_EVENT_TVER BIT(1)
+#define FPE_EVENT_RRSP BIT(2)
+#define FPE_EVENT_RVER BIT(3)
+
#define CORE_IRQ_MTL_RX_OVERFLOW BIT(8)
/* Physical Coding Sublayer */
if (hw->pcs)
value |= GMAC_PCS_IRQ_DEFAULT;
+ /* Enable FPE interrupt */
+ if ((GMAC_HW_FEAT_FPESEL & readl(ioaddr + GMAC_HW_FEATURE3)) >> 26)
+ value |= GMAC_INT_FPE_EN;
+
writel(value, ioaddr + GMAC_INT_EN);
}
.config_l4_filter = dwmac4_config_l4_filter,
.est_configure = dwmac5_est_configure,
.fpe_configure = dwmac5_fpe_configure,
+ .fpe_send_mpacket = dwmac5_fpe_send_mpacket,
+ .fpe_irq_status = dwmac5_fpe_irq_status,
.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
.config_l4_filter = dwmac4_config_l4_filter,
.est_configure = dwmac5_est_configure,
.fpe_configure = dwmac5_fpe_configure,
+ .fpe_send_mpacket = dwmac5_fpe_send_mpacket,
+ .fpe_irq_status = dwmac5_fpe_irq_status,
.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
value |= EFPE;
writel(value, ioaddr + MAC_FPE_CTRL_STS);
}
+
+int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev)
+{
+ u32 value;
+ int status;
+
+ status = FPE_EVENT_UNKNOWN;
+
+ value = readl(ioaddr + MAC_FPE_CTRL_STS);
+
+ if (value & TRSP) {
+ status |= FPE_EVENT_TRSP;
+ netdev_info(dev, "FPE: Respond mPacket is transmitted\n");
+ }
+
+ if (value & TVER) {
+ status |= FPE_EVENT_TVER;
+ netdev_info(dev, "FPE: Verify mPacket is transmitted\n");
+ }
+
+ if (value & RRSP) {
+ status |= FPE_EVENT_RRSP;
+ netdev_info(dev, "FPE: Respond mPacket is received\n");
+ }
+
+ if (value & RVER) {
+ status |= FPE_EVENT_RVER;
+ netdev_info(dev, "FPE: Verify mPacket is received\n");
+ }
+
+ return status;
+}
+
+void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, enum stmmac_mpacket_type type)
+{
+ u32 value;
+
+ value = readl(ioaddr + MAC_FPE_CTRL_STS);
+
+ if (type == MPACKET_VERIFY) {
+ value &= ~SRSP;
+ value |= SVER;
+ } else {
+ value &= ~SVER;
+ value |= SRSP;
+ }
+
+ writel(value, ioaddr + MAC_FPE_CTRL_STS);
+}
#define TMOUTEN BIT(0)
#define MAC_FPE_CTRL_STS 0x00000234
+#define TRSP BIT(19)
+#define TVER BIT(18)
+#define RRSP BIT(17)
+#define RVER BIT(16)
+#define SRSP BIT(2)
+#define SVER BIT(1)
#define EFPE BIT(0)
#define MAC_PPS_CONTROL 0x00000b70
#define GMAC_RXQCTRL_VFFQ_SHIFT 17
#define GMAC_RXQCTRL_VFFQE BIT(16)
+#define GMAC_INT_FPE_EN BIT(17)
+
int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp);
int dwmac5_safety_feat_irq_status(struct net_device *ndev,
void __iomem *ioaddr, unsigned int asp,
struct stmmac_extra_stats *x, u32 txqcnt);
void dwmac5_fpe_configure(void __iomem *ioaddr, u32 num_txq, u32 num_rxq,
bool enable);
+void dwmac5_fpe_send_mpacket(void __iomem *ioaddr,
+ enum stmmac_mpacket_type type);
+int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev);
#endif /* __DWMAC5_H__ */
struct stmmac_extra_stats *x, u32 txqcnt);
void (*fpe_configure)(void __iomem *ioaddr, u32 num_txq, u32 num_rxq,
bool enable);
+ void (*fpe_send_mpacket)(void __iomem *ioaddr,
+ enum stmmac_mpacket_type type);
+ int (*fpe_irq_status)(void __iomem *ioaddr, struct net_device *dev);
};
#define stmmac_core_init(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, est_irq_status, __args)
#define stmmac_fpe_configure(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, fpe_configure, __args)
+#define stmmac_fpe_send_mpacket(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, fpe_send_mpacket, __args)
+#define stmmac_fpe_irq_status(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, fpe_irq_status, __args)
/* PTP and HW Timer helpers */
struct stmmac_hwtimestamp {
struct workqueue_struct *wq;
struct work_struct service_task;
+ /* Workqueue for handling FPE hand-shaking */
+ unsigned long fpe_task_state;
+ struct workqueue_struct *fpe_wq;
+ struct work_struct fpe_task;
+ char wq_name[IFNAMSIZ + 4];
+
/* TC Handling */
unsigned int tc_entries_max;
unsigned int tc_off_max;
int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt);
int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size);
int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled);
+void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable);
#if IS_ENABLED(CONFIG_STMMAC_SELFTESTS)
void stmmac_selftest_run(struct net_device *dev,
/* Not Supported */
}
+static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
+{
+ struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
+ enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
+ enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
+ bool *hs_enable = &fpe_cfg->hs_enable;
+
+ if (is_up && *hs_enable) {
+ stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
+ } else {
+ *lo_state = FPE_EVENT_UNKNOWN;
+ *lp_state = FPE_EVENT_UNKNOWN;
+ }
+}
+
static void stmmac_mac_link_down(struct phylink_config *config,
unsigned int mode, phy_interface_t interface)
{
priv->tx_lpi_enabled = false;
stmmac_eee_init(priv);
stmmac_set_eee_pls(priv, priv->hw, false);
+
+ stmmac_fpe_link_state_handle(priv, false);
}
static void stmmac_mac_link_up(struct phylink_config *config,
priv->tx_lpi_enabled = priv->eee_enabled;
stmmac_set_eee_pls(priv, priv->hw, true);
}
+
+ stmmac_fpe_link_state_handle(priv, true);
}
static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
}
}
+static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
+{
+ char *name;
+
+ clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
+
+ name = priv->wq_name;
+ sprintf(name, "%s-fpe", priv->dev->name);
+
+ priv->fpe_wq = create_singlethread_workqueue(name);
+ if (!priv->fpe_wq) {
+ netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
+
+ return -ENOMEM;
+ }
+ netdev_info(priv->dev, "FPE workqueue start");
+
+ return 0;
+}
+
/**
* stmmac_hw_setup - setup mac in a usable state.
* @dev : pointer to the device structure.
/* Start the ball rolling... */
stmmac_start_all_dma(priv);
+ if (priv->dma_cap.fpesel) {
+ stmmac_fpe_start_wq(priv);
+
+ if (priv->plat->fpe_cfg->enable)
+ stmmac_fpe_handshake(priv, true);
+ }
+
return 0;
}
return ret;
}
+static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
+{
+ set_bit(__FPE_REMOVING, &priv->fpe_task_state);
+
+ if (priv->fpe_wq)
+ destroy_workqueue(priv->fpe_wq);
+
+ netdev_info(priv->dev, "FPE workqueue stop");
+}
+
/**
* stmmac_release - close entry point of the driver
* @dev : device pointer.
pm_runtime_put(priv->device);
+ if (priv->dma_cap.fpesel)
+ stmmac_fpe_stop_wq(priv);
+
return 0;
}
return 0;
}
+static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
+{
+ struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
+ enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
+ enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
+ bool *hs_enable = &fpe_cfg->hs_enable;
+
+ if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
+ return;
+
+ /* If LP has sent verify mPacket, LP is FPE capable */
+ if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
+ if (*lp_state < FPE_STATE_CAPABLE)
+ *lp_state = FPE_STATE_CAPABLE;
+
+ /* If user has requested FPE enable, quickly response */
+ if (*hs_enable)
+ stmmac_fpe_send_mpacket(priv, priv->ioaddr,
+ MPACKET_RESPONSE);
+ }
+
+ /* If Local has sent verify mPacket, Local is FPE capable */
+ if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
+ if (*lo_state < FPE_STATE_CAPABLE)
+ *lo_state = FPE_STATE_CAPABLE;
+ }
+
+ /* If LP has sent response mPacket, LP is entering FPE ON */
+ if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
+ *lp_state = FPE_STATE_ENTERING_ON;
+
+ /* If Local has sent response mPacket, Local is entering FPE ON */
+ if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
+ *lo_state = FPE_STATE_ENTERING_ON;
+
+ if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
+ !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
+ priv->fpe_wq) {
+ queue_work(priv->fpe_wq, &priv->fpe_task);
+ }
+}
+
/**
* stmmac_interrupt - main ISR
* @irq: interrupt number.
stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
&priv->xstats, tx_cnt);
+ if (priv->dma_cap.fpesel) {
+ int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
+ priv->dev);
+
+ stmmac_fpe_event_status(priv, status);
+ }
+
/* To handle GMAC own interrupts */
if ((priv->plat->has_gmac) || xmac) {
int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
return ret;
}
+#define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
+static void stmmac_fpe_lp_task(struct work_struct *work)
+{
+ struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
+ fpe_task);
+ struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
+ enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
+ enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
+ bool *hs_enable = &fpe_cfg->hs_enable;
+ bool *enable = &fpe_cfg->enable;
+ int retries = 20;
+
+ while (retries-- > 0) {
+ /* Bail out immediately if FPE handshake is OFF */
+ if (*lo_state == FPE_STATE_OFF || !*hs_enable)
+ break;
+
+ if (*lo_state == FPE_STATE_ENTERING_ON &&
+ *lp_state == FPE_STATE_ENTERING_ON) {
+ stmmac_fpe_configure(priv, priv->ioaddr,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use,
+ *enable);
+
+ netdev_info(priv->dev, "configured FPE\n");
+
+ *lo_state = FPE_STATE_ON;
+ *lp_state = FPE_STATE_ON;
+ netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
+ break;
+ }
+
+ if ((*lo_state == FPE_STATE_CAPABLE ||
+ *lo_state == FPE_STATE_ENTERING_ON) &&
+ *lp_state != FPE_STATE_ON) {
+ netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
+ *lo_state, *lp_state);
+ stmmac_fpe_send_mpacket(priv, priv->ioaddr,
+ MPACKET_VERIFY);
+ }
+ /* Sleep then retry */
+ msleep(500);
+ }
+
+ clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
+}
+
+void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
+{
+ if (priv->plat->fpe_cfg->hs_enable != enable) {
+ if (enable) {
+ stmmac_fpe_send_mpacket(priv, priv->ioaddr,
+ MPACKET_VERIFY);
+ } else {
+ priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
+ priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
+ }
+
+ priv->plat->fpe_cfg->hs_enable = enable;
+ }
+}
+
/**
* stmmac_dvr_probe
* @device: device pointer
INIT_WORK(&priv->service_task, stmmac_service_task);
+ /* Initialize Link Partner FPE workqueue */
+ INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
+
/* Override with kernel parameters if supplied XXX CRS XXX
* this needs to have multiple instances
*/
if (ret)
return ret;
}
+
mutex_unlock(&priv->lock);
+ if (priv->dma_cap.fpesel) {
+ /* Disable FPE */
+ stmmac_fpe_configure(priv, priv->ioaddr,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use, false);
+
+ stmmac_fpe_handshake(priv, false);
+ }
+
priv->speed = SPEED_UNKNOWN;
return 0;
}
dev_info(priv->device, "Enabling HW TC (entries=%d, max_off=%d)\n",
priv->tc_entries_max, priv->tc_off_max);
+
+ if (!priv->plat->fpe_cfg) {
+ priv->plat->fpe_cfg = devm_kzalloc(priv->device,
+ sizeof(*priv->plat->fpe_cfg),
+ GFP_KERNEL);
+ if (!priv->plat->fpe_cfg)
+ return -ENOMEM;
+ } else {
+ memset(priv->plat->fpe_cfg, 0, sizeof(*priv->plat->fpe_cfg));
+ }
+
return 0;
}
if (fpe && !priv->dma_cap.fpesel)
return -EOPNOTSUPP;
- ret = stmmac_fpe_configure(priv, priv->ioaddr,
- priv->plat->tx_queues_to_use,
- priv->plat->rx_queues_to_use, fpe);
- if (ret && fpe) {
- netdev_err(priv->dev, "failed to enable Frame Preemption\n");
- return ret;
- }
+ /* Actual FPE register configuration will be done after FPE handshake
+ * is success.
+ */
+ priv->plat->fpe_cfg->enable = fpe;
ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
priv->plat->clk_ptp_rate);
}
netdev_info(priv->dev, "configured EST\n");
+
+ if (fpe) {
+ stmmac_fpe_handshake(priv, true);
+ netdev_info(priv->dev, "start FPE handshake\n");
+ }
+
return 0;
disable:
priv->plat->est->enable = false;
stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
priv->plat->clk_ptp_rate);
+
+ priv->plat->fpe_cfg->enable = false;
+ stmmac_fpe_configure(priv, priv->ioaddr,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use,
+ false);
+ netdev_info(priv->dev, "disabled FPE\n");
+
+ stmmac_fpe_handshake(priv, false);
+ netdev_info(priv->dev, "stop FPE handshake\n");
+
return ret;
}
int tbs_en;
};
+/* FPE link state */
+enum stmmac_fpe_state {
+ FPE_STATE_OFF = 0,
+ FPE_STATE_CAPABLE = 1,
+ FPE_STATE_ENTERING_ON = 2,
+ FPE_STATE_ON = 3,
+};
+
+/* FPE link-partner hand-shaking mPacket type */
+enum stmmac_mpacket_type {
+ MPACKET_VERIFY = 0,
+ MPACKET_RESPONSE = 1,
+};
+
+enum stmmac_fpe_task_state_t {
+ __FPE_REMOVING,
+ __FPE_TASK_SCHED,
+};
+
+struct stmmac_fpe_cfg {
+ bool enable; /* FPE enable */
+ bool hs_enable; /* FPE handshake enable */
+ enum stmmac_fpe_state lp_fpe_state; /* Link Partner FPE state */
+ enum stmmac_fpe_state lo_fpe_state; /* Local station FPE state */
+};
+
struct plat_stmmacenet_data {
int bus_id;
int phy_addr;
struct device_node *mdio_node;
struct stmmac_dma_cfg *dma_cfg;
struct stmmac_est *est;
+ struct stmmac_fpe_cfg *fpe_cfg;
int clk_csr;
int has_gmac;
int enh_desc;