#define cpsw_slave_index(priv) \
((priv->data.dual_emac) ? priv->emac_port : \
priv->data.active_slave)
+#define IRQ_NUM 2
static int debug_level;
module_param(debug_level, int, 0);
struct cpsw_host_regs __iomem *host_port_regs;
struct cpdma_ctlr *dma;
struct cpdma_chan *txch, *rxch;
+ bool quirk_irq;
+ bool rx_irq_disabled;
+ bool tx_irq_disabled;
+ u32 irqs_table[IRQ_NUM];
};
struct cpsw_priv {
struct cpsw_ale *ale;
bool rx_pause;
bool tx_pause;
- bool quirk_irq;
- bool rx_irq_disabled;
- bool tx_irq_disabled;
- /* snapshot of IRQ numbers */
- u32 irqs_table[4];
- u32 num_irqs;
struct cpts *cpts;
u32 emac_port;
struct cpsw_common *cpsw;
writel(0, &cpsw->wr_regs->tx_en);
cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX);
- if (priv->quirk_irq) {
- disable_irq_nosync(priv->irqs_table[1]);
- priv->tx_irq_disabled = true;
+ if (cpsw->quirk_irq) {
+ disable_irq_nosync(cpsw->irqs_table[1]);
+ cpsw->tx_irq_disabled = true;
}
napi_schedule(&priv->napi_tx);
cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
writel(0, &cpsw->wr_regs->rx_en);
- if (priv->quirk_irq) {
- disable_irq_nosync(priv->irqs_table[0]);
- priv->rx_irq_disabled = true;
+ if (cpsw->quirk_irq) {
+ disable_irq_nosync(cpsw->irqs_table[0]);
+ cpsw->rx_irq_disabled = true;
}
napi_schedule(&priv->napi_rx);
if (num_tx < budget) {
napi_complete(napi_tx);
writel(0xff, &cpsw->wr_regs->tx_en);
- if (priv->quirk_irq && priv->tx_irq_disabled) {
- priv->tx_irq_disabled = false;
- enable_irq(priv->irqs_table[1]);
+ if (cpsw->quirk_irq && cpsw->tx_irq_disabled) {
+ cpsw->tx_irq_disabled = false;
+ enable_irq(cpsw->irqs_table[1]);
}
}
if (num_rx < budget) {
napi_complete(napi_rx);
writel(0xff, &cpsw->wr_regs->rx_en);
- if (priv->quirk_irq && priv->rx_irq_disabled) {
- priv->rx_irq_disabled = false;
- enable_irq(priv->irqs_table[0]);
+ if (cpsw->quirk_irq && cpsw->rx_irq_disabled) {
+ cpsw->rx_irq_disabled = false;
+ enable_irq(cpsw->irqs_table[0]);
}
}
napi_enable(&priv_sl0->napi_rx);
napi_enable(&priv_sl0->napi_tx);
- if (priv_sl0->tx_irq_disabled) {
- priv_sl0->tx_irq_disabled = false;
- enable_irq(priv->irqs_table[1]);
+ if (cpsw->tx_irq_disabled) {
+ cpsw->tx_irq_disabled = false;
+ enable_irq(cpsw->irqs_table[1]);
}
- if (priv_sl0->rx_irq_disabled) {
- priv_sl0->rx_irq_disabled = false;
- enable_irq(priv->irqs_table[0]);
+ if (cpsw->rx_irq_disabled) {
+ cpsw->rx_irq_disabled = false;
+ enable_irq(cpsw->irqs_table[0]);
}
buf_num = cpdma_chan_get_rx_buf_num(cpsw->dma);
struct cpsw_common *cpsw = priv->cpsw;
cpsw_intr_disable(priv->cpsw);
- cpsw_rx_interrupt(priv->irqs_table[0], priv);
- cpsw_tx_interrupt(priv->irqs_table[1], priv);
+ cpsw_rx_interrupt(cpsw->irqs_table[0], priv);
+ cpsw_tx_interrupt(cpsw->irqs_table[1], priv);
cpsw_intr_enable(priv->cpsw);
}
#endif
struct cpsw_platform_data *data = &priv->data;
struct net_device *ndev;
struct cpsw_priv *priv_sl2;
- int ret = 0, i;
+ int ret = 0;
struct cpsw_common *cpsw = priv->cpsw;
ndev = alloc_etherdev(sizeof(struct cpsw_priv));
priv->slaves[1].ndev = ndev;
priv_sl2->cpts = priv->cpts;
priv_sl2->version = priv->version;
-
- for (i = 0; i < priv->num_irqs; i++) {
- priv_sl2->irqs_table[i] = priv->irqs_table[i];
- priv_sl2->num_irqs = priv->num_irqs;
- }
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->netdev_ops = &cpsw_netdev_ops;
if (of_id) {
pdev->id_entry = of_id->data;
if (pdev->id_entry->driver_data)
- priv->quirk_irq = true;
+ cpsw->quirk_irq = true;
}
/* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
goto clean_ale_ret;
}
- priv->irqs_table[0] = irq;
+ cpsw->irqs_table[0] = irq;
ret = devm_request_irq(&pdev->dev, irq, cpsw_rx_interrupt,
0, dev_name(&pdev->dev), priv);
if (ret < 0) {
goto clean_ale_ret;
}
- priv->irqs_table[1] = irq;
+ cpsw->irqs_table[1] = irq;
ret = devm_request_irq(&pdev->dev, irq, cpsw_tx_interrupt,
0, dev_name(&pdev->dev), priv);
if (ret < 0) {
dev_err(priv->dev, "error attaching irq (%d)\n", ret);
goto clean_ale_ret;
}
- priv->num_irqs = 2;
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;