static inline void tg3_netif_stop(struct tg3 *tp)
{
tp->dev->trans_start = jiffies; /* prevent tx timeout */
- napi_disable(&tp->napi);
+ napi_disable(&tp->napi[0].napi);
netif_tx_disable(tp->dev);
}
* so long as all callers are assured to have free tx slots
* (such as after tg3_init_hw)
*/
- napi_enable(&tp->napi);
+ napi_enable(&tp->napi[0].napi);
tp->hw_status->status |= SD_STATUS_UPDATED;
tg3_enable_ints(tp);
}
src_map->skb = NULL;
}
-#if TG3_VLAN_TAG_USED
-static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
-{
- return vlan_gro_receive(&tp->napi, tp->vlgrp, vlan_tag, skb);
-}
-#endif
-
/* The RX ring scheme is composed of multiple rings which post fresh
* buffers to the chip, and one special ring the chip uses to report
* status back to the host.
#if TG3_VLAN_TAG_USED
if (tp->vlgrp != NULL &&
desc->type_flags & RXD_FLAG_VLAN) {
- tg3_vlan_rx(tp, skb,
- desc->err_vlan & RXD_VLAN_MASK);
+ vlan_gro_receive(&tp->napi[0].napi, tp->vlgrp,
+ desc->err_vlan & RXD_VLAN_MASK, skb);
} else
#endif
- napi_gro_receive(&tp->napi, skb);
+ napi_gro_receive(&tp->napi[0].napi, skb);
received++;
budget--;
static int tg3_poll(struct napi_struct *napi, int budget)
{
- struct tg3 *tp = container_of(napi, struct tg3, napi);
+ struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
+ struct tg3 *tp = tnapi->tp;
int work_done = 0;
struct tg3_hw_status *sblk = tp->hw_status;
prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
if (likely(!tg3_irq_sync(tp)))
- napi_schedule(&tp->napi);
+ napi_schedule(&tp->napi[0].napi);
return IRQ_HANDLED;
}
*/
tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
if (likely(!tg3_irq_sync(tp)))
- napi_schedule(&tp->napi);
+ napi_schedule(&tp->napi[0].napi);
return IRQ_RETVAL(1);
}
sblk->status &= ~SD_STATUS_UPDATED;
if (likely(tg3_has_work(tp))) {
prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
- napi_schedule(&tp->napi);
+ napi_schedule(&tp->napi[0].napi);
} else {
/* No work, shared interrupt perhaps? re-enable
* interrupts, and flush that PCI write
prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
- napi_schedule(&tp->napi);
+ napi_schedule(&tp->napi[0].napi);
out:
return IRQ_RETVAL(handled);
tg3_full_unlock(tp);
del_timer_sync(&tp->timer);
tp->irq_sync = 0;
- napi_enable(&tp->napi);
+ napi_enable(&tp->napi[0].napi);
dev_close(tp->dev);
tg3_full_lock(tp, 0);
}
tg3_ints_init(tp);
- napi_enable(&tp->napi);
+ napi_enable(&tp->napi[0].napi);
err = tg3_request_irq(tp);
free_irq(tp->pdev->irq, dev);
err_out1:
- napi_disable(&tp->napi);
+ napi_disable(&tp->napi[0].napi);
tg3_ints_fini(tp);
tg3_free_consistent(tp);
return err;
{
struct tg3 *tp = netdev_priv(dev);
- napi_disable(&tp->napi);
+ napi_disable(&tp->napi[0].napi);
cancel_work_sync(&tp->reset_task);
netif_stop_queue(dev);
tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
tp->tx_pending = TG3_DEF_TX_RING_PENDING;
- netif_napi_add(dev, &tp->napi, tg3_poll, 64);
+ tp->napi[0].tp = tp;
+ netif_napi_add(dev, &tp->napi[0].napi, tg3_poll, 64);
dev->ethtool_ops = &tg3_ethtool_ops;
dev->watchdog_timeo = TG3_TX_TIMEOUT;
dev->irq = pdev->irq;