RDMA/core: Fix umem iterator when PAGE_SIZE is greater then HCA pgsz
[platform/kernel/linux-starfive.git] / include / rdma / ib_umem.h
index 9589647..565a850 100644 (file)
@@ -77,6 +77,13 @@ static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
 {
        __rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl,
                                umem->sgt_append.sgt.nents, pgsz);
+       biter->__sg_advance = ib_umem_offset(umem) & ~(pgsz - 1);
+       biter->__sg_numblocks = ib_umem_num_dma_blocks(umem, pgsz);
+}
+
+static inline bool __rdma_umem_block_iter_next(struct ib_block_iter *biter)
+{
+       return __rdma_block_iter_next(biter) && biter->__sg_numblocks--;
 }
 
 /**
@@ -92,7 +99,7 @@ static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
  */
 #define rdma_umem_for_each_dma_block(umem, biter, pgsz)                        \
        for (__rdma_umem_block_iter_start(biter, umem, pgsz);                  \
-            __rdma_block_iter_next(biter);)
+            __rdma_umem_block_iter_next(biter);)
 
 #ifdef CONFIG_INFINIBAND_USER_MEM