2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2020 Intel Corporation. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/dma-mapping.h>
38 #include <linux/sched/signal.h>
39 #include <linux/sched/mm.h>
40 #include <linux/export.h>
41 #include <linux/slab.h>
42 #include <linux/pagemap.h>
43 #include <linux/count_zeros.h>
44 #include <rdma/ib_umem_odp.h>
48 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
50 bool make_dirty = umem->writable && dirty;
51 struct scatterlist *sg;
55 ib_dma_unmap_sgtable_attrs(dev, &umem->sgt_append.sgt,
56 DMA_BIDIRECTIONAL, 0);
58 for_each_sgtable_sg(&umem->sgt_append.sgt, sg, i)
59 unpin_user_page_range_dirty_lock(sg_page(sg),
60 DIV_ROUND_UP(sg->length, PAGE_SIZE), make_dirty);
62 sg_free_append_table(&umem->sgt_append);
66 * ib_umem_find_best_pgsz - Find best HW page size to use for this MR
69 * @pgsz_bitmap: bitmap of HW supported page sizes
72 * This helper is intended for HW that support multiple page
73 * sizes but can do only a single page size in an MR.
75 * Returns 0 if the umem requires page sizes not supported by
76 * the driver to be mapped. Drivers always supporting PAGE_SIZE
77 * or smaller will never see a 0 result.
79 unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
80 unsigned long pgsz_bitmap,
83 struct scatterlist *sg;
84 unsigned long va, pgoff;
88 umem->iova = va = virt;
91 unsigned int page_size = BIT(to_ib_umem_odp(umem)->page_shift);
93 /* ODP must always be self consistent. */
94 if (!(pgsz_bitmap & page_size))
99 /* rdma_for_each_block() has a bug if the page size is smaller than the
100 * page size used to build the umem. For now prevent smaller page sizes
101 * from being returned.
103 pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, PAGE_SHIFT);
105 /* The best result is the smallest page size that results in the minimum
106 * number of required pages. Compute the largest page size that could
107 * work based on VA address bits that don't change.
110 GENMASK(BITS_PER_LONG - 1,
111 bits_per((umem->length - 1 + virt) ^ virt));
112 /* offset into first SGL */
113 pgoff = umem->address & ~PAGE_MASK;
115 for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) {
116 /* Walk SGL and reduce max page size if VA/PA bits differ
119 mask |= (sg_dma_address(sg) + pgoff) ^ va;
120 va += sg_dma_len(sg) - pgoff;
121 /* Except for the last entry, the ending iova alignment sets
122 * the maximum possible page size as the low bits of the iova
123 * must be zero when starting the next chunk.
125 if (i != (umem->sgt_append.sgt.nents - 1))
130 /* The mask accumulates 1's in each position where the VA and physical
131 * address differ, thus the length of trailing 0 is the largest page
132 * size that can pass the VA through to the physical.
135 pgsz_bitmap &= GENMASK(count_trailing_zeros(mask), 0);
136 return pgsz_bitmap ? rounddown_pow_of_two(pgsz_bitmap) : 0;
138 EXPORT_SYMBOL(ib_umem_find_best_pgsz);
141 * ib_umem_get - Pin and DMA map userspace memory.
143 * @device: IB device to connect UMEM
144 * @addr: userspace virtual address to start at
145 * @size: length of region to pin
146 * @access: IB_ACCESS_xxx flags for memory being pinned
148 struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
149 size_t size, int access)
151 struct ib_umem *umem;
152 struct page **page_list;
153 unsigned long lock_limit;
154 unsigned long new_pinned;
155 unsigned long cur_base;
156 unsigned long dma_attr = 0;
157 struct mm_struct *mm;
158 unsigned long npages;
160 unsigned int gup_flags = FOLL_LONGTERM;
163 * If the combination of the addr and size requested for this memory
164 * region causes an integer overflow, return error.
166 if (((addr + size) < addr) ||
167 PAGE_ALIGN(addr + size) < (addr + size))
168 return ERR_PTR(-EINVAL);
171 return ERR_PTR(-EPERM);
173 if (access & IB_ACCESS_ON_DEMAND)
174 return ERR_PTR(-EOPNOTSUPP);
176 umem = kzalloc(sizeof(*umem), GFP_KERNEL);
178 return ERR_PTR(-ENOMEM);
179 umem->ibdev = device;
181 umem->address = addr;
183 * Drivers should call ib_umem_find_best_pgsz() to set the iova
187 umem->writable = ib_access_writable(access);
188 umem->owning_mm = mm = current->mm;
191 page_list = (struct page **) __get_free_page(GFP_KERNEL);
197 npages = ib_umem_num_pages(umem);
198 if (npages == 0 || npages > UINT_MAX) {
203 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
205 new_pinned = atomic64_add_return(npages, &mm->pinned_vm);
206 if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
207 atomic64_sub(npages, &mm->pinned_vm);
212 cur_base = addr & PAGE_MASK;
215 gup_flags |= FOLL_WRITE;
219 pinned = pin_user_pages_fast(cur_base,
220 min_t(unsigned long, npages,
222 sizeof(struct page *)),
223 gup_flags, page_list);
229 cur_base += pinned * PAGE_SIZE;
231 ret = sg_alloc_append_table_from_pages(
232 &umem->sgt_append, page_list, pinned, 0,
233 pinned << PAGE_SHIFT, ib_dma_max_seg_size(device),
236 unpin_user_pages_dirty_lock(page_list, pinned, 0);
241 if (access & IB_ACCESS_RELAXED_ORDERING)
242 dma_attr |= DMA_ATTR_WEAK_ORDERING;
244 ret = ib_dma_map_sgtable_attrs(device, &umem->sgt_append.sgt,
245 DMA_BIDIRECTIONAL, dma_attr);
251 __ib_umem_release(device, umem, 0);
252 atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm);
254 free_page((unsigned long) page_list);
257 mmdrop(umem->owning_mm);
260 return ret ? ERR_PTR(ret) : umem;
262 EXPORT_SYMBOL(ib_umem_get);
265 * ib_umem_release - release memory pinned with ib_umem_get
266 * @umem: umem struct to release
268 void ib_umem_release(struct ib_umem *umem)
273 return ib_umem_dmabuf_release(to_ib_umem_dmabuf(umem));
275 return ib_umem_odp_release(to_ib_umem_odp(umem));
277 __ib_umem_release(umem->ibdev, umem, 1);
279 atomic64_sub(ib_umem_num_pages(umem), &umem->owning_mm->pinned_vm);
280 mmdrop(umem->owning_mm);
283 EXPORT_SYMBOL(ib_umem_release);
286 * Copy from the given ib_umem's pages to the given buffer.
288 * umem - the umem to copy from
289 * offset - offset to start copying from
290 * dst - destination buffer
291 * length - buffer length
293 * Returns 0 on success, or an error code.
295 int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
298 size_t end = offset + length;
301 if (offset > umem->length || length > umem->length - offset) {
302 pr_err("%s not in range. offset: %zd umem length: %zd end: %zd\n",
303 __func__, offset, umem->length, end);
307 ret = sg_pcopy_to_buffer(umem->sgt_append.sgt.sgl,
308 umem->sgt_append.sgt.orig_nents, dst, length,
309 offset + ib_umem_offset(umem));
313 else if (ret != length)
318 EXPORT_SYMBOL(ib_umem_copy_from);