Replaced deprecating dev_alloc_skb with netdev_alloc_skb in drivers/net/ethernet
- Removed extra skb->dev = dev after netdev_alloc_skb
Signed-off-by: Pradeep A Dalvi <netdev@pradeepdalvi.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
else { /* Ok so now we should have a good packet */
struct sk_buff *skb;
- skb = dev_alloc_skb(pkt_len + 3);
+ skb = netdev_alloc_skb(dev, pkt_len + 3);
if( skb == NULL ) {
printk(KERN_WARNING "%s: Could'n allocate memory for packet (len %d)\n",
dev->name, pkt_len);
dev->stats.rx_errors++;
break;
}
- skb = dev_alloc_skb(pkt_len+2);
+ skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) {
netdev_notice(dev, "Memory squeeze, dropping packet (len %d)\n",
pkt_len);
/* Note: This depends on the alloc_skb functions allocating more
* space than requested, i.e. aligning to 16bytes */
- ringptr->skb = dev_alloc_skb(roundup(MAX_ETHER_SIZE + 2, 4));
+ ringptr->skb = netdev_alloc_skb(dev, roundup(MAX_ETHER_SIZE + 2, 4));
if (NULL != ringptr->skb) {
/*
*/
skb_reserve(ringptr->skb, 2);
- ringptr->skb->dev = dev;
ringptr->skb->data = (u_char *) skb_put(ringptr->skb, MAX_ETHER_SIZE);
/* ringptr->pdl points to the beginning of the PDL, i.e. the PDH */
#endif
/* Now we allocate the skb and transfer the data into it. */
- skb = dev_alloc_skb(pkt_len+2);
+ skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) { /* Not enough memory->drop packet */
#ifdef HP100_DEBUG
printk("hp100: %s: rx: couldn't allocate a sk_buff of size %d\n",
unsigned long flags;
rlen = (len + 1) & ~1;
- skb = dev_alloc_skb(rlen + 2);
+ skb = netdev_alloc_skb(dev, rlen + 2);
if (!skb) {
pr_warning("%s: memory squeeze, dropping packet\n", dev->name);
struct sk_buff *skb;
pkt_len &= 0x3fff;
- skb = dev_alloc_skb(pkt_len+2);
+ skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) {
pr_err("%s: Memory squeeze, dropping packet.\n",
dev->name);
if ((totlen = rbd->status) & RBD_LAST) { /* the first and the last buffer? */
totlen &= RBD_MASK; /* length of this frame */
rbd->status = 0;
- skb = (struct sk_buff *) dev_alloc_skb(totlen + 2);
+ skb = netdev_alloc_skb(dev, totlen + 2);
if (skb != NULL) {
skb_reserve(skb, 2); /* 16 byte alignment */
skb_put(skb,totlen);
/* Try to save time by avoiding a copy on big frames */
if ((length > RX_COPYBREAK) &&
- ((newskb=dev_alloc_skb(1532)) != NULL))
+ ((newskb = netdev_alloc_skb(dev, 1532)) != NULL))
{
skb=lp->rx_ring[rx_ring_tail].skb;
skb_put(skb, length);
}
else
{
- skb=dev_alloc_skb(length+2);
+ skb = netdev_alloc_skb(dev, length + 2);
if(skb==NULL) {
dev->stats.rx_dropped++;
/* First build the Receive Buffer Descriptor List */
for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
- struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
+ struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
if (skb == NULL) {
remove_rx_bufs(dev);
return -ENOMEM;
}
- skb->dev = dev;
rbd->v_next = rbd+1;
rbd->b_next = WSWAPrbd(virt_to_bus(rbd+1));
rbd->b_addr = WSWAPrbd(virt_to_bus(rbd));
struct sk_buff *newskb;
/* Get fresh skbuff to replace filled one. */
- newskb = dev_alloc_skb(PKT_BUF_SZ);
+ newskb = netdev_alloc_skb(dev, PKT_BUF_SZ);
if (newskb == NULL) {
skb = NULL; /* drop pkt */
goto memory_squeeze;
skb_put(skb, pkt_len);
rx_in_place = 1;
rbd->skb = newskb;
- newskb->dev = dev;
rbd->v_data = newskb->data;
rbd->b_data = WSWAPchar(virt_to_bus(newskb->data));
#ifdef __mc68000__
#endif
}
else
- skb = dev_alloc_skb(pkt_len + 2);
+ skb = netdev_alloc_skb(dev, pkt_len + 2);
memory_squeeze:
if (skb == NULL) {
/* XXX tulip.c can defer packets here!! */
dev->stats.rx_bytes+=rcv_size;
rcv_size &= 0x3fff;
- skb = dev_alloc_skb(rcv_size+5);
+ skb = netdev_alloc_skb(dev, rcv_size + 5);
if (skb == NULL) {
printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
dev->stats.rx_dropped++;
{
struct sk_buff *skb;
pkt_len &= 0x3fff;
- skb = dev_alloc_skb(pkt_len+16);
+ skb = netdev_alloc_skb(dev, pkt_len + 16);
if (skb == NULL)
{
printk(KERN_WARNING "%s: Memory squeeze, dropping packet\n",dev->name);
struct sk_buff *skb;
length = (length + 1) & ~1;
- skb = dev_alloc_skb (length + 2);
+ skb = netdev_alloc_skb(dev, length + 2);
if (skb) {
skb_reserve (skb, 2);
if (rfd->stat & RFD_STAT_OK) {
/* a good frame */
int pkt_len = (rfd->count & 0x3fff);
- struct sk_buff *skb = dev_alloc_skb(pkt_len);
+ struct sk_buff *skb = netdev_alloc_skb(dev, pkt_len);
(*frames)++;
/* the first and the last buffer? */
totlen &= RBD_MASK; /* length of this frame */
writew(0x00, &rbd->status);
- skb = (struct sk_buff *)dev_alloc_skb(totlen+2);
+ skb = netdev_alloc_skb(dev, totlen + 2);
if (skb != NULL) {
skb_reserve(skb, 2);
skb_put(skb, totlen);
{
totlen &= RBD_MASK; /* length of this frame */
rbd->status = 0;
- skb = (struct sk_buff *) dev_alloc_skb(totlen+2);
+ skb = netdev_alloc_skb(dev, totlen + 2);
if(skb != NULL)
{
skb_reserve(skb,2);
/* Malloc up new buffer. */
struct sk_buff *skb;
- skb = dev_alloc_skb(pkt_len);
+ skb = netdev_alloc_skb(dev, pkt_len);
if (skb == NULL) {
if (znet_debug)
printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name);
return -ENOMEM;
}
- /* Associate the receive buffer with the IPG NIC. */
- skb->dev = dev;
-
/* Save the address of the sk_buff structure. */
sp->rx_buff[entry] = skb;
static int
ltq_etop_alloc_skb(struct ltq_etop_chan *ch)
{
- ch->skb[ch->dma.desc] = dev_alloc_skb(MAX_DMA_DATA_LEN);
+ ch->skb[ch->dma.desc] = netdev_alloc_skb(ch->netdev, MAX_DMA_DATA_LEN);
if (!ch->skb[ch->dma.desc])
return -ENOMEM;
ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL,
skb = __skb_dequeue(&mp->rx_recycle);
if (skb == NULL)
- skb = dev_alloc_skb(mp->skb_size);
+ skb = netdev_alloc_skb(mp->dev, mp->skb_size);
if (skb == NULL) {
mp->oom = 1;
while (pep->rx_desc_count < pep->rx_ring_size) {
int size;
- skb = dev_alloc_skb(pep->skb_size);
+ skb = netdev_alloc_skb(dev, pep->skb_size);
if (!skb)
break;
if (SKB_DMA_REALIGN)
int used_frags;
dma_addr_t dma;
- skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN);
+ skb = netdev_alloc_skb(priv->dev, SMALL_PACKET_SIZE + NET_IP_ALIGN);
if (!skb) {
en_dbg(RX_ERR, priv, "Failed allocating skb\n");
return NULL;
}
- skb->dev = priv->dev;
skb_reserve(skb, NET_IP_ALIGN);
skb->len = length;
for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
if (!ksp->rx_buffers[buff_n].skb) {
- struct sk_buff *skb = dev_alloc_skb(MAX_RXBUF_SIZE);
+ struct sk_buff *skb =
+ netdev_alloc_skb(ksp->ndev, MAX_RXBUF_SIZE);
dma_addr_t mapping;
ksp->rx_buffers[buff_n].skb = skb;
break;
}
ksp->rx_buffers[buff_n].dma_ptr = mapping;
- skb->dev = ksp->ndev;
ksp->rx_buffers[buff_n].length = MAX_RXBUF_SIZE;
/* Record this into the DMA ring */
frame_hdr = ks->frame_head_info;
while (ks->frame_cnt--) {
- skb = dev_alloc_skb(frame_hdr->len + 16);
+ skb = netdev_alloc_skb(netdev, frame_hdr->len + 16);
if (likely(skb && (frame_hdr->sts & RXFSHR_RXFV) &&
(frame_hdr->len < RX_BUF_SIZE) && frame_hdr->len)) {
skb_reserve(skb, 2);
memset(&skb->data[skb->len], 0, 50 - skb->len);
skb->len = 50;
} else {
- skb = dev_alloc_skb(50);
+ skb = netdev_alloc_skb(dev, 50);
if (!skb)
return NETDEV_TX_BUSY;
memcpy(skb->data, org_skb->data, org_skb->len);
(ETH_P_IPV6 == htons(skb->protocol)))) {
struct sk_buff *org_skb = skb;
- skb = dev_alloc_skb(org_skb->len);
+ skb = netdev_alloc_skb(dev, org_skb->len);
if (!skb) {
rc = NETDEV_TX_BUSY;
goto unlock;
do {
/* skb->data != skb->head */
- skb = dev_alloc_skb(packet_len + 2);
+ skb = netdev_alloc_skb(dev, packet_len + 2);
if (!skb) {
dev->stats.rx_dropped++;
return -ENOMEM;
if (len > MAX_FRAMELEN)
ndev->stats.rx_over_errors++;
} else {
- skb = dev_alloc_skb(len + NET_IP_ALIGN);
+ skb = netdev_alloc_skb(ndev, len + NET_IP_ALIGN);
if (!skb) {
if (netif_msg_rx_err(priv))
dev_err(&ndev->dev,
"out of memory for Rx'd frame\n");
ndev->stats.rx_dropped++;
} else {
- skb->dev = ndev;
skb_reserve(skb, NET_IP_ALIGN);
/* copy the packet from the receive buffer */
enc28j60_mem_read(priv,
if (!len)
return len;
- skb = dev_alloc_skb(len + NET_IP_ALIGN);
+ skb = netdev_alloc_skb(dev, len + NET_IP_ALIGN);
if (!skb) {
dev->stats.rx_dropped++;
return -ENOMEM;
/* fetch buffer */
- skb = dev_alloc_skb(rda.length + 2);
+ skb = netdev_alloc_skb(dev, rda.length + 2);
if (skb == NULL)
dev->stats.rx_dropped++;
else {
int entry = np->dirty_rx % RX_RING_SIZE;
if (np->rx_skbuff[entry] == NULL) {
unsigned int buflen = np->rx_buf_sz+NATSEMI_PADDING;
- skb = dev_alloc_skb(buflen);
+ skb = netdev_alloc_skb(dev, buflen);
np->rx_skbuff[entry] = skb;
if (skb == NULL)
break; /* Better luck next round. */
- skb->dev = dev; /* Mark as being used by this device. */
np->rx_dma[entry] = pci_map_single(np->pci_dev,
skb->data, buflen, PCI_DMA_FROMDEVICE);
np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]);
/* Check if the packet is long enough to accept
* without copying to a minimally-sized skbuff. */
if (pkt_len < rx_copybreak &&
- (skb = dev_alloc_skb(pkt_len + RX_OFFSET)) != NULL) {
+ (skb = netdev_alloc_skb(dev, pkt_len + RX_OFFSET)) != NULL) {
/* 16 byte align the IP header */
skb_reserve(skb, RX_OFFSET);
pci_dma_sync_single_for_cpu(np->pci_dev,
printk("sonic_open: initializing sonic driver.\n");
for (i = 0; i < SONIC_NUM_RRS; i++) {
- struct sk_buff *skb = dev_alloc_skb(SONIC_RBSIZE + 2);
+ struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
if (skb == NULL) {
while(i > 0) { /* free any that were allocated successfully */
i--;
status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
if (status & SONIC_RCR_PRX) {
/* Malloc up new buffer. */
- new_skb = dev_alloc_skb(SONIC_RBSIZE + 2);
+ new_skb = netdev_alloc_skb(SONIC_RBSIZE + 2);
if (new_skb == NULL) {
printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", dev->name);
lp->stats.rx_dropped++;
size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
/* allocate skb */
- skb = dev_alloc_skb(size);
+ skb = netdev_alloc_skb(nic->dev, size);
if (!skb) {
DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
ring->dev->name);
*/
rxdp1->Buffer0_ptr = *temp0;
} else {
- *skb = dev_alloc_skb(size);
+ *skb = netdev_alloc_skb(dev, size);
if (!(*skb)) {
DBG_PRINT(INFO_DBG,
"%s: Out of memory to allocate %s\n",
rxdp3->Buffer0_ptr = *temp0;
rxdp3->Buffer1_ptr = *temp1;
} else {
- *skb = dev_alloc_skb(size);
+ *skb = netdev_alloc_skb(dev, size);
if (!(*skb)) {
DBG_PRINT(INFO_DBG,
"%s: Out of memory to allocate %s\n",