memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
}
ring->queue_id = bp->q_info[j].queue_id;
+ if (i < bp->tx_nr_rings_xdp)
+ continue;
if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
j++;
}
for (i = 0; i < bp->tx_nr_rings; i++, j++) {
bp->tx_ring[i].bnapi = bp->bnapi[j];
bp->bnapi[j]->tx_ring = &bp->tx_ring[i];
- bp->tx_ring_map[i] = i;
- bp->tx_ring[i].txq_index = i;
+ bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
+ if (i >= bp->tx_nr_rings_xdp)
+ bp->tx_ring[i].txq_index = i -
+ bp->tx_nr_rings_xdp;
}
rc = bnxt_alloc_stats(bp);
int rc;
struct net_device *dev = bp->dev;
- rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings);
+ rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
+ bp->tx_nr_rings_xdp);
if (rc)
return rc;
}
/* Under rtnl_lock */
-int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs)
+int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs, int tx_xdp)
{
int max_rx, max_tx, tx_sets = 1;
int tx_rings_needed;
if (max_rx < rx)
return -ENOMEM;
- tx_rings_needed = tx * tx_sets;
+ tx_rings_needed = tx * tx_sets + tx_xdp;
if (max_tx < tx_rings_needed)
return -ENOMEM;
if (bnxt_hwrm_reserve_tx_rings(bp, &tx_rings_needed) ||
- tx_rings_needed < (tx * tx_sets))
+ tx_rings_needed < (tx * tx_sets + tx_xdp))
return -ENOMEM;
return 0;
}
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
sh = true;
- rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc,
- bp->rx_nr_rings, tc);
+ rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
+ tc, bp->tx_nr_rings_xdp);
if (rc)
return rc;
int tx_nr_pages;
int tx_nr_rings;
int tx_nr_rings_per_tc;
+ int tx_nr_rings_xdp;
int tx_wake_thresh;
int tx_push_thresh;
int bnxt_hwrm_fw_set_time(struct bnxt *);
int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_close_nic(struct bnxt *, bool, bool);
-int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs);
+int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs, int tx_xdp);
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
void bnxt_restore_pf_fw_resources(struct bnxt *bp);
struct bnxt *bp = netdev_priv(dev);
int req_tx_rings, req_rx_rings, tcs;
bool sh = false;
+ int tx_xdp = 0;
int rc = 0;
if (channel->other_count)
req_tx_rings = sh ? channel->combined_count : channel->tx_count;
req_rx_rings = sh ? channel->combined_count : channel->rx_count;
- rc = bnxt_reserve_rings(bp, req_tx_rings, req_rx_rings, tcs);
+ if (bp->tx_nr_rings_xdp) {
+ if (!sh) {
+ netdev_err(dev, "Only combined mode supported when XDP is enabled.\n");
+ return -EINVAL;
+ }
+ tx_xdp = req_rx_rings;
+ }
+ rc = bnxt_reserve_rings(bp, req_tx_rings, req_rx_rings, tcs, tx_xdp);
if (rc) {
netdev_warn(dev, "Unable to allocate the requested rings\n");
return rc;
bp->rx_nr_rings = channel->rx_count;
bp->tx_nr_rings_per_tc = channel->tx_count;
}
-
- bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+ bp->tx_nr_rings_xdp = tx_xdp;
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp;
if (tcs > 1)
- bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp;
bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
bp->tx_nr_rings + bp->rx_nr_rings;