octeontx2-af: Allow freeing single TLx Tx schedule queue
authorNithin Dabilpuram <ndabilpuram@marvell.com>
Sun, 2 Dec 2018 12:47:41 +0000 (18:17 +0530)
committerDavid S. Miller <davem@davemloft.net>
Tue, 4 Dec 2018 00:23:08 +0000 (16:23 -0800)
The default behavior was to free all the TLx Tx schedule
queues. This patch adds support for freeing a single Tx
schedule queue if TXSCHQ_FREE_ALL flag is not set.

Signed-off-by: Krzysztof Kanas <kkanas@marvell.com>
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
Signed-off-by: Jerin Jacob <jerinj@marvell.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c

index 0d4929b..741728c 100644 (file)
@@ -1252,11 +1252,81 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
        return 0;
 }
 
+static int nix_txschq_free_one(struct rvu *rvu,
+                              struct nix_txsch_free_req *req)
+{
+       int lvl, schq, nixlf, blkaddr, rc;
+       struct rvu_hwinfo *hw = rvu->hw;
+       u16 pcifunc = req->hdr.pcifunc;
+       struct nix_txsch *txsch;
+       struct nix_hw *nix_hw;
+       u32 *pfvf_map;
+       u64 cfg;
+
+       blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+       if (blkaddr < 0)
+               return NIX_AF_ERR_AF_LF_INVALID;
+
+       nix_hw = get_nix_hw(rvu->hw, blkaddr);
+       if (!nix_hw)
+               return -EINVAL;
+
+       nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+       if (nixlf < 0)
+               return NIX_AF_ERR_AF_LF_INVALID;
+
+       lvl = req->schq_lvl;
+       schq = req->schq;
+       txsch = &nix_hw->txsch[lvl];
+
+       /* Don't allow freeing TL1 */
+       if (lvl > NIX_TXSCH_LVL_TL2 ||
+           schq >= txsch->schq.max)
+               goto err;
+
+       pfvf_map = txsch->pfvf_map;
+       mutex_lock(&rvu->rsrc_lock);
+
+       if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
+               mutex_unlock(&rvu->rsrc_lock);
+               goto err;
+       }
+
+       /* Flush if it is a SMQ. Onus of disabling
+        * TL2/3 queue links before SMQ flush is on user
+        */
+       if (lvl == NIX_TXSCH_LVL_SMQ) {
+               cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
+               /* Do SMQ flush and set enqueue xoff */
+               cfg |= BIT_ULL(50) | BIT_ULL(49);
+               rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
+
+               /* Wait for flush to complete */
+               rc = rvu_poll_reg(rvu, blkaddr,
+                                 NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true);
+               if (rc) {
+                       dev_err(rvu->dev,
+                               "NIXLF%d: SMQ%d flush failed\n", nixlf, schq);
+               }
+       }
+
+       /* Free the resource */
+       rvu_free_rsrc(&txsch->schq, schq);
+       txsch->pfvf_map[schq] = 0;
+       mutex_unlock(&rvu->rsrc_lock);
+       return 0;
+err:
+       return NIX_AF_ERR_TLX_INVALID;
+}
+
 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
                                    struct nix_txsch_free_req *req,
                                    struct msg_rsp *rsp)
 {
-       return nix_txschq_free(rvu, req->hdr.pcifunc);
+       if (req->flags & TXSCHQ_FREE_ALL)
+               return nix_txschq_free(rvu, req->hdr.pcifunc);
+       else
+               return nix_txschq_free_one(rvu, req);
 }
 
 static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,