net: stmmac: only enable DMA interrupts when ready
authorVincent Whitchurch <vincent.whitchurch@axis.com>
Thu, 24 Feb 2022 11:38:29 +0000 (12:38 +0100)
committerDavid S. Miller <davem@davemloft.net>
Fri, 25 Feb 2022 10:34:22 +0000 (10:34 +0000)
In this driver's ->ndo_open() callback, it enables DMA interrupts,
starts the DMA channels, then requests interrupts with request_irq(),
and then finally enables napi.

If RX DMA interrupts are received before napi is enabled, no processing
is done because napi_schedule_prep() will return false.  If the network
has a lot of broadcast/multicast traffic, then the RX ring could fill up
completely before napi is enabled.  When this happens, no further RX
interrupts will be delivered, and the driver will fail to receive any
packets.

Fix this by only enabling DMA interrupts after all other initialization
is complete.

Fixes: 523f11b5d4fd72efb ("net: stmmac: move hardware setup for stmmac_open to new function")
Reported-by: Lars Persson <larper@axis.com>
Signed-off-by: Vincent Whitchurch <vincent.whitchurch@axis.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c

index bde76ea2deecf0ac927a1577a34a048efe2214a9..cb9b6e08780cdff96fae6c6b6d7696806fe4911e 100644 (file)
@@ -2262,6 +2262,23 @@ static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
        stmmac_stop_tx(priv, priv->ioaddr, chan);
 }
 
+static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
+{
+       u32 rx_channels_count = priv->plat->rx_queues_to_use;
+       u32 tx_channels_count = priv->plat->tx_queues_to_use;
+       u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
+       u32 chan;
+
+       for (chan = 0; chan < dma_csr_ch; chan++) {
+               struct stmmac_channel *ch = &priv->channel[chan];
+               unsigned long flags;
+
+               spin_lock_irqsave(&ch->lock, flags);
+               stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+               spin_unlock_irqrestore(&ch->lock, flags);
+       }
+}
+
 /**
  * stmmac_start_all_dma - start all RX and TX DMA channels
  * @priv: driver private structure
@@ -2904,8 +2921,10 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
                stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
 
        /* DMA CSR Channel configuration */
-       for (chan = 0; chan < dma_csr_ch; chan++)
+       for (chan = 0; chan < dma_csr_ch; chan++) {
                stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+               stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+       }
 
        /* DMA RX Channel Configuration */
        for (chan = 0; chan < rx_channels_count; chan++) {
@@ -3761,6 +3780,7 @@ static int stmmac_open(struct net_device *dev)
 
        stmmac_enable_all_queues(priv);
        netif_tx_start_all_queues(priv->dev);
+       stmmac_enable_all_dma_irq(priv);
 
        return 0;
 
@@ -6510,8 +6530,10 @@ int stmmac_xdp_open(struct net_device *dev)
        }
 
        /* DMA CSR Channel configuration */
-       for (chan = 0; chan < dma_csr_ch; chan++)
+       for (chan = 0; chan < dma_csr_ch; chan++) {
                stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+               stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+       }
 
        /* Adjust Split header */
        sph_en = (priv->hw->rx_csum > 0) && priv->sph;
@@ -6572,6 +6594,7 @@ int stmmac_xdp_open(struct net_device *dev)
        stmmac_enable_all_queues(priv);
        netif_carrier_on(dev);
        netif_tx_start_all_queues(dev);
+       stmmac_enable_all_dma_irq(priv);
 
        return 0;
 
@@ -7451,6 +7474,7 @@ int stmmac_resume(struct device *dev)
        stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
 
        stmmac_enable_all_queues(priv);
+       stmmac_enable_all_dma_irq(priv);
 
        mutex_unlock(&priv->lock);
        rtnl_unlock();