RDMA/bnxt_re: Protect the PD table bitmap
authorSelvin Xavier <selvin.xavier@broadcom.com>
Mon, 14 Aug 2023 17:00:19 +0000 (10:00 -0700)
committerLeon Romanovsky <leon@kernel.org>
Tue, 15 Aug 2023 06:06:06 +0000 (09:06 +0300)
Syncrhonization is required to avoid simultaneous allocation
of the PD. Add a new mutex lock to handle allocation from
the PD table.

Signed-off-by: Kashyap Desai <kashyap.desai@broadcom.com>
Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
Link: https://lore.kernel.org/r/1692032419-21680-2-git-send-email-selvin.xavier@broadcom.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/hw/bnxt_re/ib_verbs.c
drivers/infiniband/hw/bnxt_re/qplib_res.c
drivers/infiniband/hw/bnxt_re/qplib_res.h

index c0a7181..b19334c 100644 (file)
@@ -619,7 +619,7 @@ int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
        int rc = 0;
 
        pd->rdev = rdev;
-       if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
+       if (bnxt_qplib_alloc_pd(&rdev->qplib_res, &pd->qplib_pd)) {
                ibdev_err(&rdev->ibdev, "Failed to allocate HW PD");
                rc = -ENOMEM;
                goto fail;
index 6f1e8b7..79c43c2 100644 (file)
@@ -642,31 +642,44 @@ static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
 }
 
 /* PDs */
-int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd)
+int bnxt_qplib_alloc_pd(struct bnxt_qplib_res  *res, struct bnxt_qplib_pd *pd)
 {
+       struct bnxt_qplib_pd_tbl *pdt = &res->pd_tbl;
        u32 bit_num;
+       int rc = 0;
 
+       mutex_lock(&res->pd_tbl_lock);
        bit_num = find_first_bit(pdt->tbl, pdt->max);
-       if (bit_num == pdt->max)
-               return -ENOMEM;
+       if (bit_num == pdt->max) {
+               rc = -ENOMEM;
+               goto exit;
+       }
 
        /* Found unused PD */
        clear_bit(bit_num, pdt->tbl);
        pd->id = bit_num;
-       return 0;
+exit:
+       mutex_unlock(&res->pd_tbl_lock);
+       return rc;
 }
 
 int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
                          struct bnxt_qplib_pd_tbl *pdt,
                          struct bnxt_qplib_pd *pd)
 {
+       int rc = 0;
+
+       mutex_lock(&res->pd_tbl_lock);
        if (test_and_set_bit(pd->id, pdt->tbl)) {
                dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d\n",
                         pd->id);
-               return -EINVAL;
+               rc = -EINVAL;
+               goto exit;
        }
        pd->id = 0;
-       return 0;
+exit:
+       mutex_unlock(&res->pd_tbl_lock);
+       return rc;
 }
 
 static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl *pdt)
@@ -691,6 +704,7 @@ static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res *res,
 
        pdt->max = max;
        memset((u8 *)pdt->tbl, 0xFF, bytes);
+       mutex_init(&res->pd_tbl_lock);
 
        return 0;
 }
index 57161d3..5949f00 100644 (file)
@@ -277,6 +277,8 @@ struct bnxt_qplib_res {
        struct net_device               *netdev;
        struct bnxt_qplib_rcfw          *rcfw;
        struct bnxt_qplib_pd_tbl        pd_tbl;
+       /* To protect the pd table bit map */
+       struct mutex                    pd_tbl_lock;
        struct bnxt_qplib_sgid_tbl      sgid_tbl;
        struct bnxt_qplib_dpi_tbl       dpi_tbl;
        /* To protect the dpi table bit map */
@@ -368,7 +370,7 @@ void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res,
                         struct bnxt_qplib_hwq *hwq);
 int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
                              struct bnxt_qplib_hwq_attr *hwq_attr);
-int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pd_tbl,
+int bnxt_qplib_alloc_pd(struct bnxt_qplib_res *res,
                        struct bnxt_qplib_pd *pd);
 int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
                          struct bnxt_qplib_pd_tbl *pd_tbl,