struct ioc3_etxd *txr;
struct sk_buff *rx_skbs[512];
struct sk_buff *tx_skbs[128];
- struct net_device_stats stats;
int rx_ci; /* RX consumer index */
int rx_pi; /* RX producer index */
int tx_ci; /* TX consumer index */
struct ioc3_private *ip = netdev_priv(dev);
struct ioc3 *ioc3 = ip->regs;
- ip->stats.collisions += (ioc3_r_etcdc() & ETCDC_COLLCNT_MASK);
- return &ip->stats;
+ dev->stats.collisions += (ioc3_r_etcdc() & ETCDC_COLLCNT_MASK);
+ return &dev->stats;
}
static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len)
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
-static inline void ioc3_rx(struct ioc3_private *ip)
+static inline void ioc3_rx(struct net_device *dev)
{
+ struct ioc3_private *ip = netdev_priv(dev);
struct sk_buff *skb, *new_skb;
struct ioc3 *ioc3 = ip->regs;
int rx_entry, n_entry, len;
if (err & ERXBUF_GOODPKT) {
len = ((w0 >> ERXBUF_BYTECNT_SHIFT) & 0x7ff) - 4;
skb_trim(skb, len);
- skb->protocol = eth_type_trans(skb, priv_netdev(ip));
+ skb->protocol = eth_type_trans(skb, dev);
new_skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
if (!new_skb) {
/* Ouch, drop packet and just recycle packet
to keep the ring filled. */
- ip->stats.rx_dropped++;
+ dev->stats.rx_dropped++;
new_skb = skb;
goto next;
}
rxb = (struct ioc3_erxbuf *) new_skb->data;
skb_reserve(new_skb, RX_OFFSET);
- ip->stats.rx_packets++; /* Statistics */
- ip->stats.rx_bytes += len;
+ dev->stats.rx_packets++; /* Statistics */
+ dev->stats.rx_bytes += len;
} else {
- /* The frame is invalid and the skb never
- reached the network layer so we can just
- recycle it. */
- new_skb = skb;
- ip->stats.rx_errors++;
+ /* The frame is invalid and the skb never
+ reached the network layer so we can just
+ recycle it. */
+ new_skb = skb;
+ dev->stats.rx_errors++;
}
if (err & ERXBUF_CRCERR) /* Statistics */
- ip->stats.rx_crc_errors++;
+ dev->stats.rx_crc_errors++;
if (err & ERXBUF_FRAMERR)
- ip->stats.rx_frame_errors++;
+ dev->stats.rx_frame_errors++;
next:
ip->rx_skbs[n_entry] = new_skb;
rxr[n_entry] = cpu_to_be64(ioc3_map(rxb, 1));
ip->rx_ci = rx_entry;
}
-static inline void ioc3_tx(struct ioc3_private *ip)
+static inline void ioc3_tx(struct net_device *dev)
{
+ struct ioc3_private *ip = netdev_priv(dev);
unsigned long packets, bytes;
struct ioc3 *ioc3 = ip->regs;
int tx_entry, o_entry;
tx_entry = (etcir >> 7) & 127;
}
- ip->stats.tx_packets += packets;
- ip->stats.tx_bytes += bytes;
+ dev->stats.tx_packets += packets;
+ dev->stats.tx_bytes += bytes;
ip->txqlen -= packets;
if (ip->txqlen < 128)
- netif_wake_queue(priv_netdev(ip));
+ netif_wake_queue(dev);
ip->tx_ci = o_entry;
spin_unlock(&ip->ioc3_lock);
* with such error interrupts if something really goes wrong, so we might
* also consider to take the interface down.
*/
-static void ioc3_error(struct ioc3_private *ip, u32 eisr)
+static void ioc3_error(struct net_device *dev, u32 eisr)
{
- struct net_device *dev = priv_netdev(ip);
+ struct ioc3_private *ip = netdev_priv(dev);
unsigned char *iface = dev->name;
spin_lock(&ip->ioc3_lock);
if (eisr & (EISR_RXOFLO | EISR_RXBUFOFLO | EISR_RXMEMERR |
EISR_RXPARERR | EISR_TXBUFUFLO | EISR_TXMEMERR))
- ioc3_error(ip, eisr);
+ ioc3_error(dev, eisr);
if (eisr & EISR_RXTIMERINT)
- ioc3_rx(ip);
+ ioc3_rx(dev);
if (eisr & EISR_TXEXPLICIT)
- ioc3_tx(ip);
+ ioc3_tx(dev);
return IRQ_HANDLED;
}