RDMA/hns: Use IDA interface to manage pd index
authorYangyang Li <liyangyang20@huawei.com>
Thu, 10 Jun 2021 11:50:13 +0000 (19:50 +0800)
committerJason Gunthorpe <jgg@nvidia.com>
Mon, 21 Jun 2021 18:42:54 +0000 (15:42 -0300)
Switch pd index allocation and release from hns own bitmap interface
to IDA interface.

Link: https://lore.kernel.org/r/1623325814-55737-6-git-send-email-liweihang@huawei.com
Signed-off-by: Yangyang Li <liyangyang20@huawei.com>
Signed-off-by: Weihang Li <liweihang@huawei.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/hw/hns/hns_roce_alloc.c
drivers/infiniband/hw/hns/hns_roce_device.h
drivers/infiniband/hw/hns/hns_roce_main.c
drivers/infiniband/hw/hns/hns_roce_pd.c

index dc1f28a..dcdfcc7 100644 (file)
@@ -252,6 +252,6 @@ void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
        hns_roce_cleanup_qp_table(hr_dev);
        hns_roce_cleanup_cq_table(hr_dev);
        ida_destroy(&hr_dev->mr_table.mtpt_ida.ida);
-       hns_roce_cleanup_pd_table(hr_dev);
+       ida_destroy(&hr_dev->pd_ida.ida);
        hns_roce_cleanup_uar_table(hr_dev);
 }
index aa23041..f9c5e4d 100644 (file)
@@ -961,7 +961,7 @@ struct hns_roce_dev {
        void __iomem            *priv_addr;
 
        struct hns_roce_cmdq    cmd;
-       struct hns_roce_bitmap    pd_bitmap;
+       struct hns_roce_ida pd_ida;
        struct hns_roce_bitmap xrcd_bitmap;
        struct hns_roce_uar_table uar_table;
        struct hns_roce_mr_table  mr_table;
@@ -1143,14 +1143,13 @@ void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev,
 int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
                     dma_addr_t *pages, unsigned int page_cnt);
 
-int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
+void hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
 void hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
 void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
 int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev);
 int hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev);
 
-void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev);
 void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev);
 void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);
index 1faadd3..0e558b5 100644 (file)
@@ -748,11 +748,7 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
                goto err_uar_table_free;
        }
 
-       ret = hns_roce_init_pd_table(hr_dev);
-       if (ret) {
-               dev_err(dev, "Failed to init protected domain table.\n");
-               goto err_uar_alloc_free;
-       }
+       hns_roce_init_pd_table(hr_dev);
 
        if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) {
                ret = hns_roce_init_xrcd_table(hr_dev);
@@ -795,9 +791,7 @@ err_cq_table_free:
                hns_roce_cleanup_xrcd_table(hr_dev);
 
 err_pd_table_free:
-       hns_roce_cleanup_pd_table(hr_dev);
-
-err_uar_alloc_free:
+       ida_destroy(&hr_dev->pd_ida.ida);
        hns_roce_uar_free(hr_dev, &hr_dev->priv_uar);
 
 err_uar_table_free:
index 25e52cd..c2f67a7 100644 (file)
 #include <linux/pci.h>
 #include "hns_roce_device.h"
 
-static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn)
+void hns_roce_init_pd_table(struct hns_roce_dev *hr_dev)
 {
-       return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn) ? -ENOMEM : 0;
-}
-
-static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn)
-{
-       hns_roce_bitmap_free(&hr_dev->pd_bitmap, pdn);
-}
-
-int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev)
-{
-       return hns_roce_bitmap_init(&hr_dev->pd_bitmap, hr_dev->caps.num_pds,
-                                   hr_dev->caps.num_pds - 1,
-                                   hr_dev->caps.reserved_pds, 0);
-}
+       struct hns_roce_ida *pd_ida = &hr_dev->pd_ida;
 
-void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev)
-{
-       hns_roce_bitmap_cleanup(&hr_dev->pd_bitmap);
+       ida_init(&pd_ida->ida);
+       pd_ida->max = hr_dev->caps.num_pds - 1;
+       pd_ida->min = hr_dev->caps.reserved_pds;
 }
 
 int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
 {
        struct ib_device *ib_dev = ibpd->device;
+       struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
+       struct hns_roce_ida *pd_ida = &hr_dev->pd_ida;
        struct hns_roce_pd *pd = to_hr_pd(ibpd);
-       int ret;
+       int ret = 0;
+       int id;
 
-       ret = hns_roce_pd_alloc(to_hr_dev(ib_dev), &pd->pdn);
-       if (ret) {
-               ibdev_err(ib_dev, "failed to alloc pd, ret = %d.\n", ret);
-               return ret;
+       id = ida_alloc_range(&pd_ida->ida, pd_ida->min, pd_ida->max,
+                            GFP_KERNEL);
+       if (id < 0) {
+               ibdev_err(ib_dev, "failed to alloc pd, id = %d.\n", id);
+               return -ENOMEM;
        }
+       pd->pdn = (unsigned long)id;
 
        if (udata) {
                struct hns_roce_ib_alloc_pd_resp resp = {.pdn = pd->pdn};
@@ -74,7 +66,7 @@ int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
                ret = ib_copy_to_udata(udata, &resp,
                                       min(udata->outlen, sizeof(resp)));
                if (ret) {
-                       hns_roce_pd_free(to_hr_dev(ib_dev), pd->pdn);
+                       ida_free(&pd_ida->ida, id);
                        ibdev_err(ib_dev, "failed to copy to udata, ret = %d\n", ret);
                }
        }
@@ -84,7 +76,10 @@ int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
 
 int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
 {
-       hns_roce_pd_free(to_hr_dev(pd->device), to_hr_pd(pd)->pdn);
+       struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
+
+       ida_free(&hr_dev->pd_ida.ida, (int)to_hr_pd(pd)->pdn);
+
        return 0;
 }