RDMA/hns: Use IDA interface to manage xrcd index
authorYangyang Li <liyangyang20@huawei.com>
Thu, 10 Jun 2021 11:50:14 +0000 (19:50 +0800)
committerJason Gunthorpe <jgg@nvidia.com>
Mon, 21 Jun 2021 18:42:54 +0000 (15:42 -0300)
Switch xrcd index allocation and release from hns own bitmap interface
to IDA interface.

Link: https://lore.kernel.org/r/1623325814-55737-7-git-send-email-liweihang@huawei.com
Signed-off-by: Yangyang Li <liyangyang20@huawei.com>
Signed-off-by: Weihang Li <liweihang@huawei.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/hw/hns/hns_roce_alloc.c
drivers/infiniband/hw/hns/hns_roce_device.h
drivers/infiniband/hw/hns/hns_roce_main.c
drivers/infiniband/hw/hns/hns_roce_pd.c

index dcdfcc78588ec4394889b07194edee1f56510c28..1b02d3bc9bae2d218b92e88eb0f573f7ad7799c4 100644 (file)
@@ -245,7 +245,7 @@ done:
 void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
 {
        if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
-               hns_roce_cleanup_xrcd_table(hr_dev);
+               ida_destroy(&hr_dev->xrcd_ida.ida);
 
        if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
                hns_roce_cleanup_srq_table(hr_dev);
index f9c5e4d3459b7372873bfd57532f14a25085cd31..905c2e20e35a934193a19636819b15fb74fe6837 100644 (file)
@@ -962,7 +962,7 @@ struct hns_roce_dev {
 
        struct hns_roce_cmdq    cmd;
        struct hns_roce_ida pd_ida;
-       struct hns_roce_bitmap xrcd_bitmap;
+       struct hns_roce_ida xrcd_ida;
        struct hns_roce_uar_table uar_table;
        struct hns_roce_mr_table  mr_table;
        struct hns_roce_cq_table  cq_table;
@@ -1148,13 +1148,12 @@ void hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
 void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
 int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev);
-int hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev);
+void hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev);
 
 void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev);
 void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);
 void hns_roce_cleanup_srq_table(struct hns_roce_dev *hr_dev);
-void hns_roce_cleanup_xrcd_table(struct hns_roce_dev *hr_dev);
 
 int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj);
 void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj);
index 0e558b53756997170f8dac14a796b9a47a68b5b3..2d79cf64008baa37a0a88764c961b19b1df561a3 100644 (file)
@@ -750,14 +750,8 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
 
        hns_roce_init_pd_table(hr_dev);
 
-       if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) {
-               ret = hns_roce_init_xrcd_table(hr_dev);
-               if (ret) {
-                       dev_err(dev, "failed to init xrcd table, ret = %d.\n",
-                               ret);
-                       goto err_pd_table_free;
-               }
-       }
+       if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
+               hns_roce_init_xrcd_table(hr_dev);
 
        hns_roce_init_mr_table(hr_dev);
 
@@ -788,9 +782,8 @@ err_cq_table_free:
        ida_destroy(&hr_dev->mr_table.mtpt_ida.ida);
 
        if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
-               hns_roce_cleanup_xrcd_table(hr_dev);
+               ida_destroy(&hr_dev->xrcd_ida.ida);
 
-err_pd_table_free:
        ida_destroy(&hr_dev->pd_ida.ida);
        hns_roce_uar_free(hr_dev, &hr_dev->priv_uar);
 
index c2f67a7fbe022bcb16e87d0437a77b7dfc8eb142..ea566363098564fa4bc3ec9412cfd5ea6d980f4d 100644 (file)
@@ -134,35 +134,27 @@ void hns_roce_cleanup_uar_table(struct hns_roce_dev *hr_dev)
 
 static int hns_roce_xrcd_alloc(struct hns_roce_dev *hr_dev, u32 *xrcdn)
 {
-       unsigned long obj;
-       int ret;
-
-       ret = hns_roce_bitmap_alloc(&hr_dev->xrcd_bitmap, &obj);
-       if (ret)
-               return ret;
+       struct hns_roce_ida *xrcd_ida = &hr_dev->xrcd_ida;
+       int id;
 
-       *xrcdn = obj;
+       id = ida_alloc_range(&xrcd_ida->ida, xrcd_ida->min, xrcd_ida->max,
+                            GFP_KERNEL);
+       if (id < 0) {
+               ibdev_err(&hr_dev->ib_dev, "failed to alloc xrcdn(%d).\n", id);
+               return -ENOMEM;
+       }
+       *xrcdn = (u32)id;
 
        return 0;
 }
 
-static void hns_roce_xrcd_free(struct hns_roce_dev *hr_dev,
-                              u32 xrcdn)
+void hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev)
 {
-       hns_roce_bitmap_free(&hr_dev->xrcd_bitmap, xrcdn);
-}
+       struct hns_roce_ida *xrcd_ida = &hr_dev->xrcd_ida;
 
-int hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev)
-{
-       return hns_roce_bitmap_init(&hr_dev->xrcd_bitmap,
-                                   hr_dev->caps.num_xrcds,
-                                   hr_dev->caps.num_xrcds - 1,
-                                   hr_dev->caps.reserved_xrcds, 0);
-}
-
-void hns_roce_cleanup_xrcd_table(struct hns_roce_dev *hr_dev)
-{
-       hns_roce_bitmap_cleanup(&hr_dev->xrcd_bitmap);
+       ida_init(&xrcd_ida->ida);
+       xrcd_ida->max = hr_dev->caps.num_xrcds - 1;
+       xrcd_ida->min = hr_dev->caps.reserved_xrcds;
 }
 
 int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata)
@@ -175,18 +167,18 @@ int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata)
                return -EINVAL;
 
        ret = hns_roce_xrcd_alloc(hr_dev, &xrcd->xrcdn);
-       if (ret) {
-               dev_err(hr_dev->dev, "failed to alloc xrcdn, ret = %d.\n", ret);
+       if (ret)
                return ret;
-       }
 
        return 0;
 }
 
 int hns_roce_dealloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata)
 {
-       hns_roce_xrcd_free(to_hr_dev(ib_xrcd->device),
-                          to_hr_xrcd(ib_xrcd)->xrcdn);
+       struct hns_roce_dev *hr_dev = to_hr_dev(ib_xrcd->device);
+       u32 xrcdn = to_hr_xrcd(ib_xrcd)->xrcdn;
+
+       ida_free(&hr_dev->xrcd_ida.ida, (int)xrcdn);
 
        return 0;
 }