RDMA/rtrs: Check device max_qp_wr limit when create QP
authorJack Wang <jinpu.wang@ionos.com>
Mon, 14 Jun 2021 09:03:37 +0000 (11:03 +0200)
committerJason Gunthorpe <jgg@nvidia.com>
Fri, 18 Jun 2021 16:47:13 +0000 (13:47 -0300)
Currently we only check device max_qp_wr limit for IO connection, but not
for service connection. We should check for both.

So save the max_qp_wr device limit in wr_limit, and use it for both IO
connections and service connections.

While at it, also remove an outdated comments.

Link: https://lore.kernel.org/r/20210614090337.29557-6-jinpu.wang@ionos.com
Suggested-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Jack Wang <jinpu.wang@ionos.com>
Signed-off-by: Gioh Kim <gi-oh.kim@ionos.com>
Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/ulp/rtrs/rtrs-clt.c
drivers/infiniband/ulp/rtrs/rtrs-srv.c

index 67ff5bf..125e0be 100644 (file)
@@ -1572,21 +1572,12 @@ static void destroy_con(struct rtrs_clt_con *con)
 static int create_con_cq_qp(struct rtrs_clt_con *con)
 {
        struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
-       u32 max_send_wr, max_recv_wr, cq_num, max_send_sge;
+       u32 max_send_wr, max_recv_wr, cq_num, max_send_sge, wr_limit;
        int err, cq_vector;
        struct rtrs_msg_rkey_rsp *rsp;
 
        lockdep_assert_held(&con->con_mutex);
        if (con->c.cid == 0) {
-               /*
-                * Two (request + registration) completion for send
-                * Two for recv if always_invalidate is set on server
-                * or one for recv.
-                * + 2 for drain and heartbeat
-                * in case qp gets into error state.
-                */
-               max_send_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2;
-               max_recv_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2;
                max_send_sge = 1;
                /* We must be the first here */
                if (WARN_ON(sess->s.dev))
@@ -1606,6 +1597,17 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
                }
                sess->s.dev_ref = 1;
                query_fast_reg_mode(sess);
+               wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr;
+               /*
+                * Two (request + registration) completion for send
+                * Two for recv if always_invalidate is set on server
+                * or one for recv.
+                * + 2 for drain and heartbeat
+                * in case qp gets into error state.
+                */
+               max_send_wr =
+                       min_t(int, wr_limit, SERVICE_CON_QUEUE_DEPTH * 2 + 2);
+               max_recv_wr = max_send_wr;
        } else {
                /*
                 * Here we assume that session members are correctly set.
@@ -1617,14 +1619,13 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
                if (WARN_ON(!sess->queue_depth))
                        return -EINVAL;
 
+               wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr;
                /* Shared between connections */
                sess->s.dev_ref++;
-               max_send_wr =
-                       min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr,
+               max_send_wr = min_t(int, wr_limit,
                              /* QD * (REQ + RSP + FR REGS or INVS) + drain */
                              sess->queue_depth * 3 + 1);
-               max_recv_wr =
-                       min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr,
+               max_recv_wr = min_t(int, wr_limit,
                              sess->queue_depth * 3 + 1);
                max_send_sge = sess->clt->max_segments + 1;
        }
index c10dfc2..1a30fd8 100644 (file)
@@ -1649,22 +1649,17 @@ static int create_con(struct rtrs_srv_sess *sess,
        con->c.sess = &sess->s;
        con->c.cid = cid;
        atomic_set(&con->wr_cnt, 1);
+       wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr;
 
        if (con->c.cid == 0) {
                /*
                 * All receive and all send (each requiring invalidate)
                 * + 2 for drain and heartbeat
                 */
-               max_send_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2;
-               max_recv_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2;
+               max_send_wr = min_t(int, wr_limit,
+                                   SERVICE_CON_QUEUE_DEPTH * 2 + 2);
+               max_recv_wr = max_send_wr;
        } else {
-               /*
-                * In theory we might have queue_depth * 32
-                * outstanding requests if an unsafe global key is used
-                * and we have queue_depth read requests each consisting
-                * of 32 different addresses. div 3 for mlx5.
-                */
-               wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr / 3;
                /* when always_invlaidate enalbed, we need linv+rinv+mr+imm */
                if (always_invalidate)
                        max_send_wr =