2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2020 Intel Corporation. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/dma-mapping.h>
38 #include <linux/sched/signal.h>
39 #include <linux/sched/mm.h>
40 #include <linux/export.h>
41 #include <linux/slab.h>
42 #include <linux/pagemap.h>
43 #include <linux/count_zeros.h>
44 #include <rdma/ib_umem_odp.h>
48 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
50 bool make_dirty = umem->writable && dirty;
51 struct scatterlist *sg;
55 ib_dma_unmap_sg(dev, umem->sg_head.sgl, umem->sg_nents,
58 for_each_sg(umem->sg_head.sgl, sg, umem->sg_nents, i)
59 unpin_user_page_range_dirty_lock(sg_page(sg),
60 DIV_ROUND_UP(sg->length, PAGE_SIZE), make_dirty);
62 sg_free_table(&umem->sg_head);
66 * ib_umem_find_best_pgsz - Find best HW page size to use for this MR
69 * @pgsz_bitmap: bitmap of HW supported page sizes
72 * This helper is intended for HW that support multiple page
73 * sizes but can do only a single page size in an MR.
75 * Returns 0 if the umem requires page sizes not supported by
76 * the driver to be mapped. Drivers always supporting PAGE_SIZE
77 * or smaller will never see a 0 result.
79 unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
80 unsigned long pgsz_bitmap,
83 struct scatterlist *sg;
84 unsigned long va, pgoff;
89 unsigned int page_size = BIT(to_ib_umem_odp(umem)->page_shift);
91 /* ODP must always be self consistent. */
92 if (!(pgsz_bitmap & page_size))
97 /* rdma_for_each_block() has a bug if the page size is smaller than the
98 * page size used to build the umem. For now prevent smaller page sizes
99 * from being returned.
101 pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, PAGE_SHIFT);
103 umem->iova = va = virt;
104 /* The best result is the smallest page size that results in the minimum
105 * number of required pages. Compute the largest page size that could
106 * work based on VA address bits that don't change.
109 GENMASK(BITS_PER_LONG - 1,
110 bits_per((umem->length - 1 + virt) ^ virt));
111 /* offset into first SGL */
112 pgoff = umem->address & ~PAGE_MASK;
114 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
115 /* Walk SGL and reduce max page size if VA/PA bits differ
118 mask |= (sg_dma_address(sg) + pgoff) ^ va;
119 va += sg_dma_len(sg) - pgoff;
120 /* Except for the last entry, the ending iova alignment sets
121 * the maximum possible page size as the low bits of the iova
122 * must be zero when starting the next chunk.
124 if (i != (umem->nmap - 1))
129 /* The mask accumulates 1's in each position where the VA and physical
130 * address differ, thus the length of trailing 0 is the largest page
131 * size that can pass the VA through to the physical.
134 pgsz_bitmap &= GENMASK(count_trailing_zeros(mask), 0);
135 return pgsz_bitmap ? rounddown_pow_of_two(pgsz_bitmap) : 0;
137 EXPORT_SYMBOL(ib_umem_find_best_pgsz);
140 * ib_umem_get - Pin and DMA map userspace memory.
142 * @device: IB device to connect UMEM
143 * @addr: userspace virtual address to start at
144 * @size: length of region to pin
145 * @access: IB_ACCESS_xxx flags for memory being pinned
147 struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
148 size_t size, int access)
150 struct ib_umem *umem;
151 struct page **page_list;
152 unsigned long lock_limit;
153 unsigned long new_pinned;
154 unsigned long cur_base;
155 unsigned long dma_attr = 0;
156 struct mm_struct *mm;
157 unsigned long npages;
159 struct scatterlist *sg = NULL;
160 unsigned int gup_flags = FOLL_WRITE;
163 * If the combination of the addr and size requested for this memory
164 * region causes an integer overflow, return error.
166 if (((addr + size) < addr) ||
167 PAGE_ALIGN(addr + size) < (addr + size))
168 return ERR_PTR(-EINVAL);
171 return ERR_PTR(-EPERM);
173 if (access & IB_ACCESS_ON_DEMAND)
174 return ERR_PTR(-EOPNOTSUPP);
176 umem = kzalloc(sizeof(*umem), GFP_KERNEL);
178 return ERR_PTR(-ENOMEM);
179 umem->ibdev = device;
181 umem->address = addr;
183 * Drivers should call ib_umem_find_best_pgsz() to set the iova
187 umem->writable = ib_access_writable(access);
188 umem->owning_mm = mm = current->mm;
191 page_list = (struct page **) __get_free_page(GFP_KERNEL);
197 npages = ib_umem_num_pages(umem);
198 if (npages == 0 || npages > UINT_MAX) {
203 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
205 new_pinned = atomic64_add_return(npages, &mm->pinned_vm);
206 if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
207 atomic64_sub(npages, &mm->pinned_vm);
212 cur_base = addr & PAGE_MASK;
215 gup_flags |= FOLL_FORCE;
219 ret = pin_user_pages_fast(cur_base,
220 min_t(unsigned long, npages,
222 sizeof(struct page *)),
223 gup_flags | FOLL_LONGTERM, page_list);
227 cur_base += ret * PAGE_SIZE;
229 sg = __sg_alloc_table_from_pages(&umem->sg_head, page_list, ret,
230 0, ret << PAGE_SHIFT,
231 ib_dma_max_seg_size(device), sg, npages,
233 umem->sg_nents = umem->sg_head.nents;
235 unpin_user_pages_dirty_lock(page_list, ret, 0);
241 if (access & IB_ACCESS_RELAXED_ORDERING)
242 dma_attr |= DMA_ATTR_WEAK_ORDERING;
245 ib_dma_map_sg_attrs(device, umem->sg_head.sgl, umem->sg_nents,
246 DMA_BIDIRECTIONAL, dma_attr);
257 __ib_umem_release(device, umem, 0);
258 atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm);
260 free_page((unsigned long) page_list);
263 mmdrop(umem->owning_mm);
266 return ret ? ERR_PTR(ret) : umem;
268 EXPORT_SYMBOL(ib_umem_get);
271 * ib_umem_release - release memory pinned with ib_umem_get
272 * @umem: umem struct to release
274 void ib_umem_release(struct ib_umem *umem)
279 return ib_umem_dmabuf_release(to_ib_umem_dmabuf(umem));
281 return ib_umem_odp_release(to_ib_umem_odp(umem));
283 __ib_umem_release(umem->ibdev, umem, 1);
285 atomic64_sub(ib_umem_num_pages(umem), &umem->owning_mm->pinned_vm);
286 mmdrop(umem->owning_mm);
289 EXPORT_SYMBOL(ib_umem_release);
292 * Copy from the given ib_umem's pages to the given buffer.
294 * umem - the umem to copy from
295 * offset - offset to start copying from
296 * dst - destination buffer
297 * length - buffer length
299 * Returns 0 on success, or an error code.
301 int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
304 size_t end = offset + length;
307 if (offset > umem->length || length > umem->length - offset) {
308 pr_err("%s not in range. offset: %zd umem length: %zd end: %zd\n",
309 __func__, offset, umem->length, end);
313 ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->sg_nents, dst, length,
314 offset + ib_umem_offset(umem));
318 else if (ret != length)
323 EXPORT_SYMBOL(ib_umem_copy_from);