return received;
}
-/* non NAPI receive function */
-static int fs_enet_rx_non_napi(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- const struct fs_platform_info *fpi = fep->fpi;
- cbd_t __iomem *bdp;
- struct sk_buff *skb, *skbn, *skbt;
- int received = 0;
- u16 pkt_len, sc;
- int curidx;
- /*
- * First, grab all of the stats for the incoming packet.
- * These get messed up if we get called due to a busy condition.
- */
- bdp = fep->cur_rx;
-
- while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
-
- curidx = bdp - fep->rx_bd_base;
-
- /*
- * Since we have allocated space to hold a complete frame,
- * the last indicator should be set.
- */
- if ((sc & BD_ENET_RX_LAST) == 0)
- dev_warn(fep->dev, "rcv is not +last\n");
-
- /*
- * Check for errors.
- */
- if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
- BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
- fep->stats.rx_errors++;
- /* Frame too long or too short. */
- if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
- fep->stats.rx_length_errors++;
- /* Frame alignment */
- if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
- fep->stats.rx_frame_errors++;
- /* CRC Error */
- if (sc & BD_ENET_RX_CR)
- fep->stats.rx_crc_errors++;
- /* FIFO overrun */
- if (sc & BD_ENET_RX_OV)
- fep->stats.rx_crc_errors++;
-
- skb = fep->rx_skbuff[curidx];
-
- dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
- L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
- DMA_FROM_DEVICE);
-
- skbn = skb;
-
- } else {
-
- skb = fep->rx_skbuff[curidx];
-
- dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
- L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
- DMA_FROM_DEVICE);
-
- /*
- * Process the incoming frame.
- */
- fep->stats.rx_packets++;
- pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
- fep->stats.rx_bytes += pkt_len + 4;
-
- if (pkt_len <= fpi->rx_copybreak) {
- /* +2 to make IP header L1 cache aligned */
- skbn = netdev_alloc_skb(dev, pkt_len + 2);
- if (skbn != NULL) {
- skb_reserve(skbn, 2); /* align IP header */
- skb_copy_from_linear_data(skb,
- skbn->data, pkt_len);
- /* swap */
- skbt = skb;
- skb = skbn;
- skbn = skbt;
- }
- } else {
- skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
-
- if (skbn)
- skb_align(skbn, ENET_RX_ALIGN);
- }
-
- if (skbn != NULL) {
- skb_put(skb, pkt_len); /* Make room */
- skb->protocol = eth_type_trans(skb, dev);
- received++;
- netif_rx(skb);
- } else {
- fep->stats.rx_dropped++;
- skbn = skb;
- }
- }
-
- fep->rx_skbuff[curidx] = skbn;
- CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
- L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
- DMA_FROM_DEVICE));
- CBDW_DATLEN(bdp, 0);
- CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
-
- /*
- * Update BD pointer to next entry.
- */
- if ((sc & BD_ENET_RX_WRAP) == 0)
- bdp++;
- else
- bdp = fep->rx_bd_base;
-
- (*fep->ops->rx_bd_done)(dev);
- }
-
- fep->cur_rx = bdp;
-
- return 0;
-}
-
static void fs_enet_tx(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
nr++;
int_clr_events = int_events;
- if (fpi->use_napi)
- int_clr_events &= ~fep->ev_napi_rx;
+ int_clr_events &= ~fep->ev_napi_rx;
(*fep->ops->clear_int_events)(dev, int_clr_events);
(*fep->ops->ev_error)(dev, int_events);
if (int_events & fep->ev_rx) {
- if (!fpi->use_napi)
- fs_enet_rx_non_napi(dev);
- else {
- napi_ok = napi_schedule_prep(&fep->napi);
-
- (*fep->ops->napi_disable_rx)(dev);
- (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);
-
- /* NOTE: it is possible for FCCs in NAPI mode */
- /* to submit a spurious interrupt while in poll */
- if (napi_ok)
- __napi_schedule(&fep->napi);
- }
+ napi_ok = napi_schedule_prep(&fep->napi);
+
+ (*fep->ops->napi_disable_rx)(dev);
+ (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);
+
+ /* NOTE: it is possible for FCCs in NAPI mode */
+ /* to submit a spurious interrupt while in poll */
+ if (napi_ok)
+ __napi_schedule(&fep->napi);
}
if (int_events & fep->ev_tx)
/* not doing this, will cause a crash in fs_enet_rx_napi */
fs_init_bds(fep->ndev);
- if (fep->fpi->use_napi)
- napi_enable(&fep->napi);
+ napi_enable(&fep->napi);
/* Install our interrupt handler. */
r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED,
"fs_enet-mac", dev);
if (r != 0) {
dev_err(fep->dev, "Could not allocate FS_ENET IRQ!");
- if (fep->fpi->use_napi)
- napi_disable(&fep->napi);
+ napi_disable(&fep->napi);
return -EINVAL;
}
err = fs_init_phy(dev);
if (err) {
free_irq(fep->interrupt, dev);
- if (fep->fpi->use_napi)
- napi_disable(&fep->napi);
+ napi_disable(&fep->napi);
return err;
}
phy_start(fep->phydev);
netif_stop_queue(dev);
netif_carrier_off(dev);
- if (fep->fpi->use_napi)
- napi_disable(&fep->napi);
+ napi_disable(&fep->napi);
phy_stop(fep->phydev);
spin_lock_irqsave(&fep->lock, flags);
fpi->rx_ring = 32;
fpi->tx_ring = 32;
fpi->rx_copybreak = 240;
- fpi->use_napi = 1;
fpi->napi_weight = 17;
fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
if (!fpi->phy_node && of_phy_is_fixed_link(ofdev->dev.of_node)) {
ndev->netdev_ops = &fs_enet_netdev_ops;
ndev->watchdog_timeo = 2 * HZ;
- if (fpi->use_napi)
- netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi,
- fpi->napi_weight);
+ netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, fpi->napi_weight);
ndev->ethtool_ops = &fs_ethtool_ops;