RDMA/rtrs-srv: Pass the correct number of entries for dma mapped SGL
authorJack Wang <jinpu.wang@ionos.com>
Thu, 18 Aug 2022 10:53:55 +0000 (12:53 +0200)
committerLeon Romanovsky <leonro@nvidia.com>
Sun, 21 Aug 2022 09:25:13 +0000 (12:25 +0300)
ib_dma_map_sg() augments the SGL into a 'dma mapped SGL'. This process
may change the number of entries and the lengths of each entry.

Code that touches dma_address is iterating over the 'dma mapped SGL'
and must use dma_nents which returned from ib_dma_map_sg().

We should use the return count from ib_dma_map_sg for futher usage.

Fixes: 9cb837480424e ("RDMA/rtrs: server: main functionality")
Link: https://lore.kernel.org/r/20220818105355.110344-4-haris.iqbal@ionos.com
Signed-off-by: Jack Wang <jinpu.wang@ionos.com>
Reviewed-by: Aleksei Marov <aleksei.marov@ionos.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/ulp/rtrs/rtrs-srv.c

index 34c03bd..4894e73 100644 (file)
@@ -595,7 +595,7 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path)
                struct sg_table *sgt = &srv_mr->sgt;
                struct scatterlist *s;
                struct ib_mr *mr;
-               int nr, chunks;
+               int nr, nr_sgt, chunks;
 
                chunks = chunks_per_mr * mri;
                if (!always_invalidate)
@@ -610,19 +610,19 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path)
                        sg_set_page(s, srv->chunks[chunks + i],
                                    max_chunk_size, 0);
 
-               nr = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl,
+               nr_sgt = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl,
                                   sgt->nents, DMA_BIDIRECTIONAL);
-               if (nr < sgt->nents) {
-                       err = nr < 0 ? nr : -EINVAL;
+               if (!nr_sgt) {
+                       err = -EINVAL;
                        goto free_sg;
                }
                mr = ib_alloc_mr(srv_path->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
-                                sgt->nents);
+                                nr_sgt);
                if (IS_ERR(mr)) {
                        err = PTR_ERR(mr);
                        goto unmap_sg;
                }
-               nr = ib_map_mr_sg(mr, sgt->sgl, sgt->nents,
+               nr = ib_map_mr_sg(mr, sgt->sgl, nr_sgt,
                                  NULL, max_chunk_size);
                if (nr < 0 || nr < sgt->nents) {
                        err = nr < 0 ? nr : -EINVAL;
@@ -641,7 +641,7 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path)
                        }
                }
                /* Eventually dma addr for each chunk can be cached */
-               for_each_sg(sgt->sgl, s, sgt->orig_nents, i)
+               for_each_sg(sgt->sgl, s, nr_sgt, i)
                        srv_path->dma_addr[chunks + i] = sg_dma_address(s);
 
                ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));