[ Upstream commit
508c58f76ca510956625c945f9b8eb104f2c8208 ]
current implementation is such that tot_tx_queues contains both
xdp queues and normal tx queues. which will be allocated in interface
open calls and deallocated on interface down calls respectively.
With addition of QOS, where send quees are allocated/deallacated upon
user request Qos send queues won't be part of tot_tx_queues. So this
patch renames tot_tx_queues to non_qos_queues.
Signed-off-by: Hariprasad Kelam <hkelam@marvell.com>
Reviewed-by: Simon Horman <simon.horman@corigine.com>
Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Stable-dep-of:
3423ca23e08b ("octeontx2-pf: Free pending and dropped SQEs")
Signed-off-by: Sasha Levin <sashal@kernel.org>
int timeout = 1000;
ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
- for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) {
+ for (qidx = 0; qidx < pfvf->hw.non_qos_queues; qidx++) {
incr = (u64)qidx << 32;
while (timeout) {
val = otx2_atomic64_add(incr, ptr);
}
/* Initialize TX queues */
- for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) {
+ for (qidx = 0; qidx < pfvf->hw.non_qos_queues; qidx++) {
u16 sqb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
err = otx2_sq_init(pfvf, qidx, sqb_aura);
/* Set RQ/SQ/CQ counts */
nixlf->rq_cnt = pfvf->hw.rx_queues;
- nixlf->sq_cnt = pfvf->hw.tot_tx_queues;
+ nixlf->sq_cnt = pfvf->hw.non_qos_queues;
nixlf->cq_cnt = pfvf->qset.cq_cnt;
nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE;
nixlf->rss_grps = MAX_RSS_GROUPS;
int sqb, qidx;
u64 iova, pa;
- for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
+ for (qidx = 0; qidx < hw->non_qos_queues; qidx++) {
sq = &qset->sq[qidx];
if (!sq->sqb_ptrs)
continue;
stack_pages =
(num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
- for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
+ for (qidx = 0; qidx < hw->non_qos_queues; qidx++) {
pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
/* Initialize aura context */
err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs);
goto fail;
/* Allocate pointers and free them to aura/pool */
- for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
+ for (qidx = 0; qidx < hw->non_qos_queues; qidx++) {
pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
pool = &pfvf->qset.pool[pool_id];
u16 rx_queues;
u16 tx_queues;
u16 xdp_queues;
- u16 tot_tx_queues;
+ u16 non_qos_queues; /* tx queues plus xdp queues */
u16 max_queues;
u16 pool_cnt;
u16 rqpool_cnt;
}
/* SQ */
- for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) {
+ for (qidx = 0; qidx < pf->hw.non_qos_queues; qidx++) {
u64 sq_op_err_dbg, mnq_err_dbg, snd_err_dbg;
u8 sq_op_err_code, mnq_err_code, snd_err_code;
otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false);
/* Free SQB pointers */
otx2_sq_free_sqbs(pf);
- for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) {
+ for (qidx = 0; qidx < pf->hw.non_qos_queues; qidx++) {
sq = &qset->sq[qidx];
qmem_free(pf->dev, sq->sqe);
qmem_free(pf->dev, sq->tso_hdrs);
* so, aura count = pool count.
*/
hw->rqpool_cnt = hw->rx_queues;
- hw->sqpool_cnt = hw->tot_tx_queues;
+ hw->sqpool_cnt = hw->non_qos_queues;
hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
/* Maximum hardware supported transmit length */
netif_carrier_off(netdev);
- pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tot_tx_queues;
+ pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.non_qos_queues;
/* RQ and SQs are mapped to different CQs,
* so find out max CQ IRQs (i.e CINTs) needed.
*/
if (!qset->cq)
goto err_free_mem;
- qset->sq = kcalloc(pf->hw.tot_tx_queues,
+ qset->sq = kcalloc(pf->hw.non_qos_queues,
sizeof(struct otx2_snd_queue), GFP_KERNEL);
if (!qset->sq)
goto err_free_mem;
else
pf->hw.xdp_queues = 0;
- pf->hw.tot_tx_queues += pf->hw.xdp_queues;
+ pf->hw.non_qos_queues += pf->hw.xdp_queues;
if (if_up)
otx2_open(pf->netdev);
hw->pdev = pdev;
hw->rx_queues = qcount;
hw->tx_queues = qcount;
- hw->tot_tx_queues = qcount;
+ hw->non_qos_queues = qcount;
hw->max_queues = qcount;
hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
/* Use CQE of 128 byte descriptor size by default */
hw->rx_queues = qcount;
hw->tx_queues = qcount;
hw->max_queues = qcount;
- hw->tot_tx_queues = qcount;
+ hw->non_qos_queues = qcount;
hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
/* Use CQE of 128 byte descriptor size by default */
hw->xqe_size = 128;