RDMA: Add a dedicated CQ resource tracker function
authorMaor Gottlieb <maorg@mellanox.com>
Tue, 23 Jun 2020 11:30:37 +0000 (14:30 +0300)
committerJason Gunthorpe <jgg@nvidia.com>
Tue, 23 Jun 2020 14:46:27 +0000 (11:46 -0300)
In order to avoid double multiplexing of the resource when it is a CQ, add
a dedicated callback function.

Link: https://lore.kernel.org/r/20200623113043.1228482-6-leon@kernel.org
Signed-off-by: Maor Gottlieb <maorg@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/core/device.c
drivers/infiniband/core/nldev.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/cxgb4/provider.c
drivers/infiniband/hw/cxgb4/restrack.c
drivers/infiniband/hw/hns/hns_roce_device.h
drivers/infiniband/hw/hns/hns_roce_main.c
drivers/infiniband/hw/hns/hns_roce_restrack.c
include/rdma/ib_verbs.h

index ffdf978..9eeac8c 100644 (file)
@@ -2617,6 +2617,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
        SET_DEVICE_OP(dev_ops, drain_rq);
        SET_DEVICE_OP(dev_ops, drain_sq);
        SET_DEVICE_OP(dev_ops, enable_driver);
+       SET_DEVICE_OP(dev_ops, fill_res_cq_entry);
        SET_DEVICE_OP(dev_ops, fill_res_entry);
        SET_DEVICE_OP(dev_ops, fill_res_mr_entry);
        SET_DEVICE_OP(dev_ops, fill_stat_mr_entry);
index a4f3f83..707f724 100644 (file)
@@ -598,9 +598,8 @@ static int fill_res_cq_entry(struct sk_buff *msg, bool has_cap_net_admin,
        if (fill_res_name_pid(msg, res))
                goto err;
 
-       if (fill_res_entry(dev, msg, res))
-               goto err;
-
+       if (dev->ops.fill_res_cq_entry)
+               return dev->ops.fill_res_cq_entry(msg, cq);
        return 0;
 
 err:   return -EMSGSIZE;
index 5b9884c..18a2c1a 100644 (file)
@@ -1056,6 +1056,7 @@ struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp);
 typedef int c4iw_restrack_func(struct sk_buff *msg,
                               struct rdma_restrack_entry *res);
 int c4iw_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr);
+int c4iw_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ibcq);
 extern c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX];
 
 #endif
index 36eeb59..d6b20aa 100644 (file)
@@ -485,6 +485,7 @@ static const struct ib_device_ops c4iw_dev_ops = {
        .destroy_cq = c4iw_destroy_cq,
        .destroy_qp = c4iw_destroy_qp,
        .destroy_srq = c4iw_destroy_srq,
+       .fill_res_cq_entry = c4iw_fill_res_cq_entry,
        .fill_res_entry = fill_res_entry,
        .fill_res_mr_entry = c4iw_fill_res_mr_entry,
        .get_dev_fw_str = get_dev_fw_str,
index 9a5ca91..ead2cd0 100644 (file)
@@ -372,10 +372,8 @@ err:
        return -EMSGSIZE;
 }
 
-static int fill_res_cq_entry(struct sk_buff *msg,
-                            struct rdma_restrack_entry *res)
+int c4iw_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ibcq)
 {
-       struct ib_cq *ibcq = container_of(res, struct ib_cq, res);
        struct c4iw_cq *chp = to_c4iw_cq(ibcq);
        struct nlattr *table_attr;
        struct t4_cqe hwcqes[2];
@@ -494,5 +492,4 @@ err:
 c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX] = {
        [RDMA_RESTRACK_QP]      = fill_res_qp_entry,
        [RDMA_RESTRACK_CM_ID]   = fill_res_ep_entry,
-       [RDMA_RESTRACK_CQ]      = fill_res_cq_entry,
 };
index a77fa67..a61f0c4 100644 (file)
@@ -1266,6 +1266,6 @@ void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev);
 int hns_roce_init(struct hns_roce_dev *hr_dev);
 void hns_roce_exit(struct hns_roce_dev *hr_dev);
 
-int hns_roce_fill_res_entry(struct sk_buff *msg,
-                           struct rdma_restrack_entry *res);
+int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
+                              struct ib_cq *ib_cq);
 #endif /* _HNS_ROCE_DEVICE_H */
index 50763cf..5907cfd 100644 (file)
@@ -428,7 +428,7 @@ static const struct ib_device_ops hns_roce_dev_ops = {
        .destroy_ah = hns_roce_destroy_ah,
        .destroy_cq = hns_roce_destroy_cq,
        .disassociate_ucontext = hns_roce_disassociate_ucontext,
-       .fill_res_entry = hns_roce_fill_res_entry,
+       .fill_res_cq_entry = hns_roce_fill_res_cq_entry,
        .get_dma_mr = hns_roce_get_dma_mr,
        .get_link_layer = hns_roce_get_link_layer,
        .get_port_immutable = hns_roce_port_immutable,
index 0687173..259444c 100644 (file)
@@ -76,10 +76,9 @@ err:
        return -EMSGSIZE;
 }
 
-static int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
-                                     struct rdma_restrack_entry *res)
+int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
+                              struct ib_cq *ib_cq)
 {
-       struct ib_cq *ib_cq = container_of(res, struct ib_cq, res);
        struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
        struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
        struct hns_roce_v2_cq_context *context;
@@ -119,12 +118,3 @@ err:
        kfree(context);
        return ret;
 }
-
-int hns_roce_fill_res_entry(struct sk_buff *msg,
-                           struct rdma_restrack_entry *res)
-{
-       if (res->type == RDMA_RESTRACK_CQ)
-               return hns_roce_fill_res_cq_entry(msg, res);
-
-       return 0;
-}
index 117a0e8..097b1d4 100644 (file)
@@ -2584,6 +2584,7 @@ struct ib_device_ops {
        int (*fill_res_entry)(struct sk_buff *msg,
                              struct rdma_restrack_entry *entry);
        int (*fill_res_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
+       int (*fill_res_cq_entry)(struct sk_buff *msg, struct ib_cq *ibcq);
 
        /* Device lifecycle callbacks */
        /*