}
static inline int
-bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
+bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
{
dma_addr_t mapping;
struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
struct rx_bd *rxbd =
&rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
- struct page *page = alloc_page(GFP_ATOMIC);
+ struct page *page = alloc_page(gfp);
if (!page)
return -ENOMEM;
}
static inline int
-bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
+bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
{
struct sk_buff *skb;
struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
unsigned long align;
- skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+ skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp);
if (skb == NULL) {
return -ENOMEM;
}
int err;
u16 prod = ring_idx & 0xffff;
- err = bnx2_alloc_rx_skb(bp, rxr, prod);
+ err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC);
if (unlikely(err)) {
bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
if (hdr_len) {
rx_pg->page = NULL;
err = bnx2_alloc_rx_page(bp, rxr,
- RX_PG_RING_IDX(pg_prod));
+ RX_PG_RING_IDX(pg_prod),
+ GFP_ATOMIC);
if (unlikely(err)) {
rxr->rx_pg_cons = pg_cons;
rxr->rx_pg_prod = pg_prod;
ring_prod = prod = rxr->rx_pg_prod;
for (i = 0; i < bp->rx_pg_ring_size; i++) {
- if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) {
+ if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
ring_num, i, bp->rx_pg_ring_size);
break;
ring_prod = prod = rxr->rx_prod;
for (i = 0; i < bp->rx_ring_size; i++) {
- if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) {
+ if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
ring_num, i, bp->rx_ring_size);
break;