RDMA/rxe: Collect mca init code in a subroutine
authorBob Pearson <rpearsonhpe@gmail.com>
Wed, 23 Feb 2022 23:07:04 +0000 (17:07 -0600)
committerJason Gunthorpe <jgg@nvidia.com>
Thu, 24 Feb 2022 00:29:15 +0000 (20:29 -0400)
Collect initialization code for struct rxe_mca into a subroutine,
__rxe_init_mca(), to cleanup rxe_attach_mcg() in rxe_mcast.c. Check
limit on total number of attached qp's.

Link: https://lore.kernel.org/r/20220223230706.50332-3-rpearsonhpe@gmail.com
Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/sw/rxe/rxe_mcast.c
drivers/infiniband/sw/rxe/rxe_verbs.h

index 4935fe5..a0a7f87 100644 (file)
@@ -259,6 +259,46 @@ static void rxe_destroy_mcg(struct rxe_mcg *mcg)
        spin_unlock_irqrestore(&mcg->rxe->mcg_lock, flags);
 }
 
+/**
+ * __rxe_init_mca - initialize a new mca holding lock
+ * @qp: qp object
+ * @mcg: mcg object
+ * @mca: empty space for new mca
+ *
+ * Context: caller must hold references on qp and mcg, rxe->mcg_lock
+ * and pass memory for new mca
+ *
+ * Returns: 0 on success else an error
+ */
+static int __rxe_init_mca(struct rxe_qp *qp, struct rxe_mcg *mcg,
+                         struct rxe_mca *mca)
+{
+       struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+       int n;
+
+       n = atomic_inc_return(&rxe->mcg_attach);
+       if (n > rxe->attr.max_total_mcast_qp_attach) {
+               atomic_dec(&rxe->mcg_attach);
+               return -ENOMEM;
+       }
+
+       n = atomic_inc_return(&mcg->qp_num);
+       if (n > rxe->attr.max_mcast_qp_attach) {
+               atomic_dec(&mcg->qp_num);
+               atomic_dec(&rxe->mcg_attach);
+               return -ENOMEM;
+       }
+
+       atomic_inc(&qp->mcg_num);
+
+       rxe_add_ref(qp);
+       mca->qp = qp;
+
+       list_add_tail(&mca->qp_list, &mcg->qp_list);
+
+       return 0;
+}
+
 static int rxe_attach_mcg(struct rxe_dev *rxe, struct rxe_qp *qp,
                                  struct rxe_mcg *mcg)
 {
@@ -291,22 +331,9 @@ static int rxe_attach_mcg(struct rxe_dev *rxe, struct rxe_qp *qp,
                }
        }
 
-       /* check limits after checking if already attached */
-       if (atomic_inc_return(&mcg->qp_num) > rxe->attr.max_mcast_qp_attach) {
-               atomic_dec(&mcg->qp_num);
+       err = __rxe_init_mca(qp, mcg, mca);
+       if (err)
                kfree(mca);
-               err = -ENOMEM;
-               goto out;
-       }
-
-       /* protect pointer to qp in mca */
-       rxe_add_ref(qp);
-       mca->qp = qp;
-
-       atomic_inc(&qp->mcg_num);
-       list_add(&mca->qp_list, &mcg->qp_list);
-
-       err = 0;
 out:
        spin_unlock_irqrestore(&rxe->mcg_lock, flags);
        return err;
@@ -329,6 +356,7 @@ static int rxe_detach_mcg(struct rxe_dev *rxe, struct rxe_qp *qp,
                if (mca->qp == qp) {
                        list_del(&mca->qp_list);
                        atomic_dec(&qp->mcg_num);
+                       atomic_dec(&rxe->mcg_attach);
                        rxe_drop_ref(qp);
 
                        /* if the number of qp's attached to the
index 20fe3ee..6b15251 100644 (file)
@@ -401,6 +401,7 @@ struct rxe_dev {
        spinlock_t              mcg_lock;
        struct rb_root          mcg_tree;
        atomic_t                mcg_num;
+       atomic_t                mcg_attach;
 
        spinlock_t              pending_lock; /* guard pending_mmaps */
        struct list_head        pending_mmaps;