RDMA/umem: Add rdma_umem_for_each_dma_block()
authorJason Gunthorpe <jgg@nvidia.com>
Fri, 4 Sep 2020 22:41:45 +0000 (19:41 -0300)
committerJason Gunthorpe <jgg@nvidia.com>
Wed, 9 Sep 2020 18:33:17 +0000 (15:33 -0300)
This helper does the same as rdma_for_each_block(), except it works on a
umem. This simplifies most of the call sites.

Link: https://lore.kernel.org/r/4-v2-270386b7e60b+28f4-umem_1_jgg@nvidia.com
Acked-by: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com>
Acked-by: Shiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
.clang-format
drivers/infiniband/hw/bnxt_re/ib_verbs.c
drivers/infiniband/hw/efa/efa_verbs.c
drivers/infiniband/hw/hns/hns_roce_alloc.c
drivers/infiniband/hw/i40iw/i40iw_verbs.c
include/rdma/ib_umem.h

index a0a96088c74f49a961a80bc0851a84214b0a9f83..311ef2c61a1bdf55a205e8cce3811e5641a33450 100644 (file)
@@ -415,6 +415,7 @@ ForEachMacros:
   - 'rbtree_postorder_for_each_entry_safe'
   - 'rdma_for_each_block'
   - 'rdma_for_each_port'
+  - 'rdma_umem_for_each_dma_block'
   - 'resource_list_for_each_entry'
   - 'resource_list_for_each_entry_safe'
   - 'rhl_for_each_entry_rcu'
index 4f07011e04eb5981098afc71c2d34d6b26df67fd..7f63f28ec210ab8b1513fc0b057f0098d487411f 100644 (file)
@@ -3787,7 +3787,7 @@ static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
        u64 page_size =  BIT_ULL(page_shift);
        struct ib_block_iter biter;
 
-       rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap, page_size)
+       rdma_umem_for_each_dma_block(umem, &biter, page_size)
                *pbl_tbl++ = rdma_block_iter_dma_address(&biter);
 
        return pbl_tbl - pbl_tbl_orig;
index 57910bcfc5723f69cd402126f736ad897798d114..81db565c098aab6e4011ffdf25c0b9e18cef1991 100644 (file)
@@ -1144,8 +1144,7 @@ static int umem_to_page_list(struct efa_dev *dev,
        ibdev_dbg(&dev->ibdev, "hp_cnt[%u], pages_in_hp[%u]\n",
                  hp_cnt, pages_in_hp);
 
-       rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap,
-                           BIT(hp_shift))
+       rdma_umem_for_each_dma_block(umem, &biter, BIT(hp_shift))
                page_list[hp_idx++] = rdma_block_iter_dma_address(&biter);
 
        return 0;
index a522cb2d29eabca8a1f7fdf98bd7e0d76ec30ddd..a6b23dec1adcf677437bc9ef4b03a9c1689cee16 100644 (file)
@@ -268,8 +268,7 @@ int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
        }
 
        /* convert system page cnt to hw page cnt */
-       rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap,
-                           1 << page_shift) {
+       rdma_umem_for_each_dma_block(umem, &biter, 1 << page_shift) {
                addr = rdma_block_iter_dma_address(&biter);
                if (idx >= start) {
                        bufs[total++] = addr;
index 6f40d1d82a25d0047c6e604fd858c569e4d1b40c..a9278ef10ace7ad563e823bc2b9f77571ef82d6c 100644 (file)
@@ -1322,8 +1322,7 @@ static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
        if (iwmr->type == IW_MEMREG_TYPE_QP)
                iwpbl->qp_mr.sq_page = sg_page(region->sg_head.sgl);
 
-       rdma_for_each_block(region->sg_head.sgl, &biter, region->nmap,
-                           iwmr->page_size) {
+       rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) {
                *pbl = rdma_block_iter_dma_address(&biter);
                pbl = i40iw_next_pbl_addr(pbl, &pinfo, &idx);
        }
index 07a764eb692eede9de8053f63484f4f6b6e3f564..b880512ba95f166ab011683c857d1e2b8a864dad 100644 (file)
@@ -40,6 +40,26 @@ static inline size_t ib_umem_num_pages(struct ib_umem *umem)
               PAGE_SHIFT;
 }
 
+static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
+                                               struct ib_umem *umem,
+                                               unsigned long pgsz)
+{
+       __rdma_block_iter_start(biter, umem->sg_head.sgl, umem->nmap, pgsz);
+}
+
+/**
+ * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem
+ * @umem: umem to iterate over
+ * @pgsz: Page size to split the list into
+ *
+ * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The
+ * returned DMA blocks will be aligned to pgsz and span the range:
+ * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz)
+ */
+#define rdma_umem_for_each_dma_block(umem, biter, pgsz)                        \
+       for (__rdma_umem_block_iter_start(biter, umem, pgsz);                  \
+            __rdma_block_iter_next(biter);)
+
 #ifdef CONFIG_INFINIBAND_USER_MEM
 
 struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,