stmmac_stop_tx(priv, priv->ioaddr, chan);
}
+static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
+{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
+ u32 chan;
+
+ for (chan = 0; chan < dma_csr_ch; chan++) {
+ struct stmmac_channel *ch = &priv->channel[chan];
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+}
+
/**
* stmmac_start_all_dma - start all RX and TX DMA channels
* @priv: driver private structure
stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
/* DMA CSR Channel configuration */
- for (chan = 0; chan < dma_csr_ch; chan++)
+ for (chan = 0; chan < dma_csr_ch; chan++) {
stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+ }
/* DMA RX Channel Configuration */
for (chan = 0; chan < rx_channels_count; chan++) {
stmmac_enable_all_queues(priv);
netif_tx_start_all_queues(priv->dev);
+ stmmac_enable_all_dma_irq(priv);
return 0;
}
/* DMA CSR Channel configuration */
- for (chan = 0; chan < dma_csr_ch; chan++)
+ for (chan = 0; chan < dma_csr_ch; chan++) {
stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+ }
/* Adjust Split header */
sph_en = (priv->hw->rx_csum > 0) && priv->sph;
stmmac_enable_all_queues(priv);
netif_carrier_on(dev);
netif_tx_start_all_queues(dev);
+ stmmac_enable_all_dma_irq(priv);
return 0;
stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
stmmac_enable_all_queues(priv);
+ stmmac_enable_all_dma_irq(priv);
mutex_unlock(&priv->lock);
rtnl_unlock();