RDMA/irdma: Split QP handler into irdma_reg_user_mr_type_qp
authorZhu Yanjun <yanjun.zhu@linux.dev>
Mon, 16 Jan 2023 19:35:01 +0000 (14:35 -0500)
committerLeon Romanovsky <leon@kernel.org>
Thu, 26 Jan 2023 10:58:46 +0000 (12:58 +0200)
Split the source codes related with QP handling into a new function.

Reviewed-by: Shiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
Link: https://lore.kernel.org/r/20230116193502.66540-4-yanjun.zhu@intel.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/hw/irdma/verbs.c

index 1fc9761..93a8997 100644 (file)
@@ -2835,6 +2835,38 @@ static void irdma_free_iwmr(struct irdma_mr *iwmr)
        kfree(iwmr);
 }
 
+static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
+                                    struct ib_udata *udata,
+                                    struct irdma_mr *iwmr)
+{
+       struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
+       struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+       struct irdma_ucontext *ucontext = NULL;
+       unsigned long flags;
+       bool use_pbles;
+       u32 total;
+       int err;
+
+       total = req.sq_pages + req.rq_pages + 1;
+       if (total > iwmr->page_cnt)
+               return -EINVAL;
+
+       total = req.sq_pages + req.rq_pages;
+       use_pbles = total > 2;
+       err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
+       if (err)
+               return err;
+
+       ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
+                                            ibucontext);
+       spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
+       list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
+       iwpbl->on_list = true;
+       spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
+
+       return 0;
+}
+
 /**
  * irdma_reg_user_mr - Register a user memory region
  * @pd: ptr of pd
@@ -2890,23 +2922,10 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
 
        switch (req.reg_type) {
        case IRDMA_MEMREG_TYPE_QP:
-               total = req.sq_pages + req.rq_pages + shadow_pgcnt;
-               if (total > iwmr->page_cnt) {
-                       err = -EINVAL;
-                       goto error;
-               }
-               total = req.sq_pages + req.rq_pages;
-               use_pbles = (total > 2);
-               err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
+               err = irdma_reg_user_mr_type_qp(req, udata, iwmr);
                if (err)
                        goto error;
 
-               ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
-                                                    ibucontext);
-               spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
-               list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
-               iwpbl->on_list = true;
-               spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
                break;
        case IRDMA_MEMREG_TYPE_CQ:
                if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)