Replaced deprecating dev_alloc_skb with netdev_alloc_skb in drivers/net/ethernet
- Removed extra skb->dev = dev after netdev_alloc_skb
Signed-off-by: Pradeep A Dalvi <netdev@pradeepdalvi.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
vp->rx_ring[i].next = 0;
vp->rx_ring[i].status = 0; /* Clear complete bit. */
vp->rx_ring[i].length = PKT_BUF_SZ | 0x80000000;
- skb = dev_alloc_skb(PKT_BUF_SZ);
+ skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
vp->rx_skbuff[i] = skb;
if (skb == NULL)
break; /* Bad news! */
- skb->dev = dev; /* Mark as being used by this device. */
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
vp->rx_ring[i].addr = isa_virt_to_bus(skb->data);
}
short pkt_len = rx_status & 0x1fff;
struct sk_buff *skb;
- skb = dev_alloc_skb(pkt_len + 5 + 2);
+ skb = netdev_alloc_skb(dev, pkt_len + 5 + 2);
if (corkscrew_debug > 4)
pr_debug("Receiving packet size %d status %4.4x.\n",
pkt_len, rx_status);
/* Check if the packet is long enough to just accept without
copying to a properly sized skbuff. */
if (pkt_len < rx_copybreak &&
- (skb = dev_alloc_skb(pkt_len + 4)) != NULL) {
+ (skb = netdev_alloc_skb(dev, pkt_len + 4)) != NULL) {
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
/* 'skb_put()' points to the start of sk_buff data area. */
memcpy(skb_put(skb, pkt_len),
struct sk_buff *skb;
entry = vp->dirty_rx % RX_RING_SIZE;
if (vp->rx_skbuff[entry] == NULL) {
- skb = dev_alloc_skb(PKT_BUF_SZ);
+ skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
if (skb == NULL)
break; /* Bad news! */
- skb->dev = dev; /* Mark as being used by this device. */
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
vp->rx_ring[entry].addr = isa_virt_to_bus(skb->data);
vp->rx_skbuff[entry] = skb;
}
}
-static int desc_list_init(void)
+static int desc_list_init(struct net_device *dev)
{
int i;
struct sk_buff *new_skb;
struct dma_descriptor *b = &(r->desc_b);
/* allocate a new skb for next time receive */
- new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
+ new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
if (!new_skb) {
pr_notice("init: low on mem - packet dropped\n");
goto init_error;
/* allocate a new skb for next time receive */
skb = current_rx_ptr->skb;
- new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
+ new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
if (!new_skb) {
netdev_notice(dev, "rx: low on mem - packet dropped\n");
dev->stats.rx_dropped++;
}
/* initial rx and tx list */
- ret = desc_list_init();
+ ret = desc_list_init(dev);
if (ret)
return ret;
}
static int
-bmac_init_rx_ring(struct bmac_data *bp)
+bmac_init_rx_ring(struct net_device *dev)
{
+ struct bmac_data *bp = netdev_priv(dev);
volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
int i;
struct sk_buff *skb;
(N_RX_RING + 1) * sizeof(struct dbdma_cmd));
for (i = 0; i < N_RX_RING; i++) {
if ((skb = bp->rx_bufs[i]) == NULL) {
- bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
+ bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
if (skb != NULL)
skb_reserve(skb, 2);
}
++dev->stats.rx_dropped;
}
if ((skb = bp->rx_bufs[i]) == NULL) {
- bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
+ bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
if (skb != NULL)
skb_reserve(bp->rx_bufs[i], 2);
}
spin_lock_irqsave(&bp->lock, flags);
bmac_enable_and_reset_chip(dev);
bmac_init_tx_ring(bp);
- bmac_init_rx_ring(bp);
+ bmac_init_rx_ring(dev);
bmac_init_chip(dev);
bmac_start_chip(dev);
bmwrite(dev, INTDISABLE, EnableNormal);
* It seems that the bmac can't receive until it's transmitted
* a packet. So we give it a dummy packet to transmit.
*/
- skb = dev_alloc_skb(ETHERMINPACKET);
+ skb = netdev_alloc_skb(dev, ETHERMINPACKET);
if (skb != NULL) {
data = skb_put(skb, ETHERMINPACKET);
memset(data, 0, ETHERMINPACKET);
#ifdef CONFIG_NET_POLL_CONTROLLER
static void poll_dmfe (struct net_device *dev);
#endif
-static void dmfe_descriptor_init(struct dmfe_board_info *, unsigned long);
-static void allocate_rx_buffer(struct dmfe_board_info *);
+static void dmfe_descriptor_init(struct net_device *, unsigned long);
+static void allocate_rx_buffer(struct net_device *);
static void update_cr6(u32, unsigned long);
static void send_filter_frame(struct DEVICE *);
static void dm9132_id_table(struct DEVICE *);
db->op_mode = db->media_mode; /* Force Mode */
/* Initialize Transmit/Receive decriptor and CR3/4 */
- dmfe_descriptor_init(db, ioaddr);
+ dmfe_descriptor_init(dev, ioaddr);
/* Init CR6 to program DM910x operation */
update_cr6(db->cr6_data, ioaddr);
/* reallocate rx descriptor buffer */
if (db->rx_avail_cnt<RX_DESC_CNT)
- allocate_rx_buffer(db);
+ allocate_rx_buffer(dev);
/* Free the transmitted descriptor */
if ( db->cr5_data & 0x01)
/* Good packet, send to upper layer */
/* Shorst packet used new SKB */
if ((rxlen < RX_COPY_SIZE) &&
- ((newskb = dev_alloc_skb(rxlen + 2))
+ ((newskb = netdev_alloc_skb(dev, rxlen + 2))
!= NULL)) {
skb = newskb;
* Using Chain structure, and allocate Tx/Rx buffer
*/
-static void dmfe_descriptor_init(struct dmfe_board_info *db, unsigned long ioaddr)
+static void dmfe_descriptor_init(struct net_device *dev, unsigned long ioaddr)
{
+ struct dmfe_board_info *db = netdev_priv(dev);
struct tx_desc *tmp_tx;
struct rx_desc *tmp_rx;
unsigned char *tmp_buf;
tmp_rx->next_rx_desc = db->first_rx_desc;
/* pre-allocate Rx buffer */
- allocate_rx_buffer(db);
+ allocate_rx_buffer(dev);
}
* As possible as allocate maxiumn Rx buffer
*/
-static void allocate_rx_buffer(struct dmfe_board_info *db)
+static void allocate_rx_buffer(struct net_device *dev)
{
+ struct dmfe_board_info *db = netdev_priv(dev);
struct rx_desc *rxptr;
struct sk_buff *skb;
rxptr = db->rx_insert_ptr;
while(db->rx_avail_cnt < RX_DESC_CNT) {
- if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
+ if ( ( skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE) ) == NULL )
break;
rxptr->rx_skb_ptr = skb; /* FIXME (?) */
rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data,
#ifdef CONFIG_NET_POLL_CONTROLLER
static void uli526x_poll(struct net_device *dev);
#endif
-static void uli526x_descriptor_init(struct uli526x_board_info *, unsigned long);
-static void allocate_rx_buffer(struct uli526x_board_info *);
+static void uli526x_descriptor_init(struct net_device *, unsigned long);
+static void allocate_rx_buffer(struct net_device *);
static void update_cr6(u32, unsigned long);
static void send_filter_frame(struct net_device *, int);
static u16 phy_read(unsigned long, u8, u8, u32);
db->op_mode = db->media_mode; /* Force Mode */
/* Initialize Transmit/Receive decriptor and CR3/4 */
- uli526x_descriptor_init(db, ioaddr);
+ uli526x_descriptor_init(dev, ioaddr);
/* Init CR6 to program M526X operation */
update_cr6(db->cr6_data, ioaddr);
/* reallocate rx descriptor buffer */
if (db->rx_avail_cnt<RX_DESC_CNT)
- allocate_rx_buffer(db);
+ allocate_rx_buffer(dev);
/* Free the transmitted descriptor */
if ( db->cr5_data & 0x01)
/* Good packet, send to upper layer */
/* Shorst packet used new SKB */
if ((rxlen < RX_COPY_SIZE) &&
- (((new_skb = dev_alloc_skb(rxlen + 2)) != NULL))) {
+ (((new_skb = netdev_alloc_skb(dev, rxlen + 2)) != NULL))) {
skb = new_skb;
/* size less than COPY_SIZE, allocate a rxlen SKB */
skb_reserve(skb, 2); /* 16byte align */
* Using Chain structure, and allocate Tx/Rx buffer
*/
-static void uli526x_descriptor_init(struct uli526x_board_info *db, unsigned long ioaddr)
+static void uli526x_descriptor_init(struct net_device *dev, unsigned long ioaddr)
{
+ struct uli526x_board_info *db = netdev_priv(dev);
struct tx_desc *tmp_tx;
struct rx_desc *tmp_rx;
unsigned char *tmp_buf;
tmp_rx->next_rx_desc = db->first_rx_desc;
/* pre-allocate Rx buffer */
- allocate_rx_buffer(db);
+ allocate_rx_buffer(dev);
}
* As possible as allocate maxiumn Rx buffer
*/
-static void allocate_rx_buffer(struct uli526x_board_info *db)
+static void allocate_rx_buffer(struct net_device *dev)
{
+ struct uli526x_board_info *db = netdev_priv(dev);
struct rx_desc *rxptr;
struct sk_buff *skb;
rxptr = db->rx_insert_ptr;
while(db->rx_avail_cnt < RX_DESC_CNT) {
- if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
+ skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE);
+ if (skb == NULL)
break;
rxptr->rx_skb_ptr = skb; /* FIXME (?) */
rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev,
} link;
};
-static void s6gmac_rx_fillfifo(struct s6gmac *pd)
+static void s6gmac_rx_fillfifo(struct net_device *dev)
{
+ struct s6gmac *pd = netdev_priv(dev);
struct sk_buff *skb;
while ((((u8)(pd->rx_skb_i - pd->rx_skb_o)) < S6_NUM_RX_SKB) &&
(!s6dmac_fifo_full(pd->rx_dma, pd->rx_chan)) &&
- (skb = dev_alloc_skb(S6_MAX_FRLEN + 2))) {
+ (skb = netdev_alloc_skb(dev, S6_MAX_FRLEN + 2))) {
pd->rx_skb[(pd->rx_skb_i++) % S6_NUM_RX_SKB] = skb;
s6dmac_put_fifo_cache(pd->rx_dma, pd->rx_chan,
pd->io, (u32)skb->data, S6_MAX_FRLEN);
spin_lock(&pd->lock);
if (s6dmac_termcnt_irq(pd->rx_dma, pd->rx_chan))
s6gmac_rx_interrupt(dev);
- s6gmac_rx_fillfifo(pd);
+ s6gmac_rx_fillfifo(dev);
if (s6dmac_termcnt_irq(pd->tx_dma, pd->tx_chan))
s6gmac_tx_interrupt(dev);
s6gmac_stats_interrupt(pd, 0);
s6gmac_init_device(dev);
s6gmac_init_stats(dev);
s6gmac_init_dmac(dev);
- s6gmac_rx_fillfifo(pd);
+ s6gmac_rx_fillfifo(dev);
s6dmac_enable_chan(pd->rx_dma, pd->rx_chan,
2, 1, 0, 1, 0, 0, 0, 7, -1, 2, 0, 1);
s6dmac_enable_chan(pd->tx_dma, pd->tx_chan,