Merge tag 'mm-slub-5.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka...
[platform/kernel/linux-rpi.git] / include / rdma / ib_umem.h
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3  * Copyright (c) 2007 Cisco Systems.  All rights reserved.
4  * Copyright (c) 2020 Intel Corporation.  All rights reserved.
5  */
6
7 #ifndef IB_UMEM_H
8 #define IB_UMEM_H
9
10 #include <linux/list.h>
11 #include <linux/scatterlist.h>
12 #include <linux/workqueue.h>
13 #include <rdma/ib_verbs.h>
14
15 struct ib_ucontext;
16 struct ib_umem_odp;
17 struct dma_buf_attach_ops;
18
19 struct ib_umem {
20         struct ib_device       *ibdev;
21         struct mm_struct       *owning_mm;
22         u64 iova;
23         size_t                  length;
24         unsigned long           address;
25         u32 writable : 1;
26         u32 is_odp : 1;
27         u32 is_dmabuf : 1;
28         struct work_struct      work;
29         struct sg_append_table sgt_append;
30 };
31
32 struct ib_umem_dmabuf {
33         struct ib_umem umem;
34         struct dma_buf_attachment *attach;
35         struct sg_table *sgt;
36         struct scatterlist *first_sg;
37         struct scatterlist *last_sg;
38         unsigned long first_sg_offset;
39         unsigned long last_sg_trim;
40         void *private;
41 };
42
43 static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem)
44 {
45         return container_of(umem, struct ib_umem_dmabuf, umem);
46 }
47
48 /* Returns the offset of the umem start relative to the first page. */
49 static inline int ib_umem_offset(struct ib_umem *umem)
50 {
51         return umem->address & ~PAGE_MASK;
52 }
53
54 static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem,
55                                                unsigned long pgsz)
56 {
57         return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) &
58                (pgsz - 1);
59 }
60
61 static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem,
62                                             unsigned long pgsz)
63 {
64         return (size_t)((ALIGN(umem->iova + umem->length, pgsz) -
65                          ALIGN_DOWN(umem->iova, pgsz))) /
66                pgsz;
67 }
68
69 static inline size_t ib_umem_num_pages(struct ib_umem *umem)
70 {
71         return ib_umem_num_dma_blocks(umem, PAGE_SIZE);
72 }
73
74 static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
75                                                 struct ib_umem *umem,
76                                                 unsigned long pgsz)
77 {
78         __rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl,
79                                 umem->sgt_append.sgt.nents, pgsz);
80 }
81
82 /**
83  * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem
84  * @umem: umem to iterate over
85  * @pgsz: Page size to split the list into
86  *
87  * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The
88  * returned DMA blocks will be aligned to pgsz and span the range:
89  * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz)
90  *
91  * Performs exactly ib_umem_num_dma_blocks() iterations.
92  */
93 #define rdma_umem_for_each_dma_block(umem, biter, pgsz)                        \
94         for (__rdma_umem_block_iter_start(biter, umem, pgsz);                  \
95              __rdma_block_iter_next(biter);)
96
97 #ifdef CONFIG_INFINIBAND_USER_MEM
98
99 struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
100                             size_t size, int access);
101 void ib_umem_release(struct ib_umem *umem);
102 int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
103                       size_t length);
104 unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
105                                      unsigned long pgsz_bitmap,
106                                      unsigned long virt);
107
108 /**
109  * ib_umem_find_best_pgoff - Find best HW page size
110  *
111  * @umem: umem struct
112  * @pgsz_bitmap bitmap of HW supported page sizes
113  * @pgoff_bitmask: Mask of bits that can be represented with an offset
114  *
115  * This is very similar to ib_umem_find_best_pgsz() except instead of accepting
116  * an IOVA it accepts a bitmask specifying what address bits can be represented
117  * with a page offset.
118  *
119  * For instance if the HW has multiple page sizes, requires 64 byte alignemnt,
120  * and can support aligned offsets up to 4032 then pgoff_bitmask would be
121  * "111111000000".
122  *
123  * If the pgoff_bitmask requires either alignment in the low bit or an
124  * unavailable page size for the high bits, this function returns 0.
125  */
126 static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
127                                                     unsigned long pgsz_bitmap,
128                                                     u64 pgoff_bitmask)
129 {
130         struct scatterlist *sg = umem->sgt_append.sgt.sgl;
131         dma_addr_t dma_addr;
132
133         dma_addr = sg_dma_address(sg) + (umem->address & ~PAGE_MASK);
134         return ib_umem_find_best_pgsz(umem, pgsz_bitmap,
135                                       dma_addr & pgoff_bitmask);
136 }
137
138 struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
139                                           unsigned long offset, size_t size,
140                                           int fd, int access,
141                                           const struct dma_buf_attach_ops *ops);
142 int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf);
143 void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf);
144 void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf);
145
146 #else /* CONFIG_INFINIBAND_USER_MEM */
147
148 #include <linux/err.h>
149
150 static inline struct ib_umem *ib_umem_get(struct ib_device *device,
151                                           unsigned long addr, size_t size,
152                                           int access)
153 {
154         return ERR_PTR(-EOPNOTSUPP);
155 }
156 static inline void ib_umem_release(struct ib_umem *umem) { }
157 static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
158                                     size_t length) {
159         return -EOPNOTSUPP;
160 }
161 static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
162                                                    unsigned long pgsz_bitmap,
163                                                    unsigned long virt)
164 {
165         return 0;
166 }
167 static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
168                                                     unsigned long pgsz_bitmap,
169                                                     u64 pgoff_bitmask)
170 {
171         return 0;
172 }
173 static inline
174 struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
175                                           unsigned long offset,
176                                           size_t size, int fd,
177                                           int access,
178                                           struct dma_buf_attach_ops *ops)
179 {
180         return ERR_PTR(-EOPNOTSUPP);
181 }
182 static inline int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
183 {
184         return -EOPNOTSUPP;
185 }
186 static inline void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf) { }
187 static inline void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) { }
188
189 #endif /* CONFIG_INFINIBAND_USER_MEM */
190 #endif /* IB_UMEM_H */