1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
3 * Copyright (c) 2007 Cisco Systems. All rights reserved.
4 * Copyright (c) 2020 Intel Corporation. All rights reserved.
10 #include <linux/list.h>
11 #include <linux/scatterlist.h>
12 #include <linux/workqueue.h>
13 #include <rdma/ib_verbs.h>
17 struct dma_buf_attach_ops;
20 struct ib_device *ibdev;
21 struct mm_struct *owning_mm;
24 unsigned long address;
28 struct work_struct work;
29 struct sg_append_table sgt_append;
32 struct ib_umem_dmabuf {
34 struct dma_buf_attachment *attach;
36 struct scatterlist *first_sg;
37 struct scatterlist *last_sg;
38 unsigned long first_sg_offset;
39 unsigned long last_sg_trim;
44 static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem)
46 return container_of(umem, struct ib_umem_dmabuf, umem);
49 /* Returns the offset of the umem start relative to the first page. */
50 static inline int ib_umem_offset(struct ib_umem *umem)
52 return umem->address & ~PAGE_MASK;
55 static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem,
58 return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) &
62 static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem,
65 return (size_t)((ALIGN(umem->iova + umem->length, pgsz) -
66 ALIGN_DOWN(umem->iova, pgsz))) /
70 static inline size_t ib_umem_num_pages(struct ib_umem *umem)
72 return ib_umem_num_dma_blocks(umem, PAGE_SIZE);
75 static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
79 __rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl,
80 umem->sgt_append.sgt.nents, pgsz);
84 * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem
85 * @umem: umem to iterate over
86 * @pgsz: Page size to split the list into
88 * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The
89 * returned DMA blocks will be aligned to pgsz and span the range:
90 * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz)
92 * Performs exactly ib_umem_num_dma_blocks() iterations.
94 #define rdma_umem_for_each_dma_block(umem, biter, pgsz) \
95 for (__rdma_umem_block_iter_start(biter, umem, pgsz); \
96 __rdma_block_iter_next(biter);)
98 #ifdef CONFIG_INFINIBAND_USER_MEM
100 struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
101 size_t size, int access);
102 void ib_umem_release(struct ib_umem *umem);
103 int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
105 unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
106 unsigned long pgsz_bitmap,
110 * ib_umem_find_best_pgoff - Find best HW page size
113 * @pgsz_bitmap bitmap of HW supported page sizes
114 * @pgoff_bitmask: Mask of bits that can be represented with an offset
116 * This is very similar to ib_umem_find_best_pgsz() except instead of accepting
117 * an IOVA it accepts a bitmask specifying what address bits can be represented
118 * with a page offset.
120 * For instance if the HW has multiple page sizes, requires 64 byte alignemnt,
121 * and can support aligned offsets up to 4032 then pgoff_bitmask would be
124 * If the pgoff_bitmask requires either alignment in the low bit or an
125 * unavailable page size for the high bits, this function returns 0.
127 static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
128 unsigned long pgsz_bitmap,
131 struct scatterlist *sg = umem->sgt_append.sgt.sgl;
134 dma_addr = sg_dma_address(sg) + (umem->address & ~PAGE_MASK);
135 return ib_umem_find_best_pgsz(umem, pgsz_bitmap,
136 dma_addr & pgoff_bitmask);
139 struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
140 unsigned long offset, size_t size,
142 const struct dma_buf_attach_ops *ops);
143 struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
144 unsigned long offset,
147 int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf);
148 void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf);
149 void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf);
151 #else /* CONFIG_INFINIBAND_USER_MEM */
153 #include <linux/err.h>
155 static inline struct ib_umem *ib_umem_get(struct ib_device *device,
156 unsigned long addr, size_t size,
159 return ERR_PTR(-EOPNOTSUPP);
161 static inline void ib_umem_release(struct ib_umem *umem) { }
162 static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
166 static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
167 unsigned long pgsz_bitmap,
172 static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
173 unsigned long pgsz_bitmap,
179 struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
180 unsigned long offset,
183 struct dma_buf_attach_ops *ops)
185 return ERR_PTR(-EOPNOTSUPP);
187 static inline struct ib_umem_dmabuf *
188 ib_umem_dmabuf_get_pinned(struct ib_device *device, unsigned long offset,
189 size_t size, int fd, int access)
191 return ERR_PTR(-EOPNOTSUPP);
193 static inline int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
197 static inline void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf) { }
198 static inline void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) { }
200 #endif /* CONFIG_INFINIBAND_USER_MEM */
201 #endif /* IB_UMEM_H */