1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/cred.h>
3 #include <linux/device.h>
4 #include <linux/dma-buf.h>
5 #include <linux/dma-resv.h>
6 #include <linux/highmem.h>
7 #include <linux/init.h>
8 #include <linux/kernel.h>
9 #include <linux/memfd.h>
10 #include <linux/miscdevice.h>
11 #include <linux/module.h>
12 #include <linux/shmem_fs.h>
13 #include <linux/slab.h>
14 #include <linux/udmabuf.h>
15 #include <linux/hugetlb.h>
16 #include <linux/vmalloc.h>
17 #include <linux/iosys-map.h>
19 static int list_limit = 1024;
20 module_param(list_limit, int, 0644);
21 MODULE_PARM_DESC(list_limit, "udmabuf_create_list->count limit. Default is 1024.");
23 static int size_limit_mb = 64;
24 module_param(size_limit_mb, int, 0644);
25 MODULE_PARM_DESC(size_limit_mb, "Max size of a dmabuf, in megabytes. Default is 64.");
31 struct miscdevice *device;
34 static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
36 struct vm_area_struct *vma = vmf->vma;
37 struct udmabuf *ubuf = vma->vm_private_data;
38 pgoff_t pgoff = vmf->pgoff;
40 if (pgoff >= ubuf->pagecount)
41 return VM_FAULT_SIGBUS;
42 vmf->page = ubuf->pages[pgoff];
47 static const struct vm_operations_struct udmabuf_vm_ops = {
48 .fault = udmabuf_vm_fault,
51 static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
53 struct udmabuf *ubuf = buf->priv;
55 dma_resv_assert_held(buf->resv);
57 if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
60 vma->vm_ops = &udmabuf_vm_ops;
61 vma->vm_private_data = ubuf;
65 static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
67 struct udmabuf *ubuf = buf->priv;
70 dma_resv_assert_held(buf->resv);
72 vaddr = vm_map_ram(ubuf->pages, ubuf->pagecount, -1);
76 iosys_map_set_vaddr(map, vaddr);
80 static void vunmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
82 struct udmabuf *ubuf = buf->priv;
84 dma_resv_assert_held(buf->resv);
86 vm_unmap_ram(map->vaddr, ubuf->pagecount);
89 static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
90 enum dma_data_direction direction)
92 struct udmabuf *ubuf = buf->priv;
96 sg = kzalloc(sizeof(*sg), GFP_KERNEL);
98 return ERR_PTR(-ENOMEM);
99 ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->pagecount,
100 0, ubuf->pagecount << PAGE_SHIFT,
104 ret = dma_map_sgtable(dev, sg, direction, 0);
115 static void put_sg_table(struct device *dev, struct sg_table *sg,
116 enum dma_data_direction direction)
118 dma_unmap_sgtable(dev, sg, direction, 0);
123 static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
124 enum dma_data_direction direction)
126 return get_sg_table(at->dev, at->dmabuf, direction);
129 static void unmap_udmabuf(struct dma_buf_attachment *at,
131 enum dma_data_direction direction)
133 return put_sg_table(at->dev, sg, direction);
136 static void release_udmabuf(struct dma_buf *buf)
138 struct udmabuf *ubuf = buf->priv;
139 struct device *dev = ubuf->device->this_device;
143 put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
145 for (pg = 0; pg < ubuf->pagecount; pg++)
146 put_page(ubuf->pages[pg]);
151 static int begin_cpu_udmabuf(struct dma_buf *buf,
152 enum dma_data_direction direction)
154 struct udmabuf *ubuf = buf->priv;
155 struct device *dev = ubuf->device->this_device;
159 ubuf->sg = get_sg_table(dev, buf, direction);
160 if (IS_ERR(ubuf->sg)) {
161 ret = PTR_ERR(ubuf->sg);
165 dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents,
172 static int end_cpu_udmabuf(struct dma_buf *buf,
173 enum dma_data_direction direction)
175 struct udmabuf *ubuf = buf->priv;
176 struct device *dev = ubuf->device->this_device;
181 dma_sync_sg_for_device(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
185 static const struct dma_buf_ops udmabuf_ops = {
186 .cache_sgt_mapping = true,
187 .map_dma_buf = map_udmabuf,
188 .unmap_dma_buf = unmap_udmabuf,
189 .release = release_udmabuf,
190 .mmap = mmap_udmabuf,
191 .vmap = vmap_udmabuf,
192 .vunmap = vunmap_udmabuf,
193 .begin_cpu_access = begin_cpu_udmabuf,
194 .end_cpu_access = end_cpu_udmabuf,
197 #define SEALS_WANTED (F_SEAL_SHRINK)
198 #define SEALS_DENIED (F_SEAL_WRITE)
200 static long udmabuf_create(struct miscdevice *device,
201 struct udmabuf_create_list *head,
202 struct udmabuf_create_item *list)
204 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
205 struct file *memfd = NULL;
206 struct address_space *mapping = NULL;
207 struct udmabuf *ubuf;
209 pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit;
210 struct page *page, *hpage = NULL;
211 pgoff_t subpgoff, maxsubpgs;
212 struct hstate *hpstate;
213 int seals, ret = -EINVAL;
216 ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
220 pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
221 for (i = 0; i < head->count; i++) {
222 if (!IS_ALIGNED(list[i].offset, PAGE_SIZE))
224 if (!IS_ALIGNED(list[i].size, PAGE_SIZE))
226 ubuf->pagecount += list[i].size >> PAGE_SHIFT;
227 if (ubuf->pagecount > pglimit)
231 if (!ubuf->pagecount)
234 ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->pages),
242 for (i = 0; i < head->count; i++) {
244 memfd = fget(list[i].memfd);
247 mapping = memfd->f_mapping;
248 if (!shmem_mapping(mapping) && !is_file_hugepages(memfd))
250 seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
251 if (seals == -EINVAL)
254 if ((seals & SEALS_WANTED) != SEALS_WANTED ||
255 (seals & SEALS_DENIED) != 0)
257 pgoff = list[i].offset >> PAGE_SHIFT;
258 pgcnt = list[i].size >> PAGE_SHIFT;
259 if (is_file_hugepages(memfd)) {
260 hpstate = hstate_file(memfd);
261 pgoff = list[i].offset >> huge_page_shift(hpstate);
262 subpgoff = (list[i].offset &
263 ~huge_page_mask(hpstate)) >> PAGE_SHIFT;
264 maxsubpgs = huge_page_size(hpstate) >> PAGE_SHIFT;
266 for (pgidx = 0; pgidx < pgcnt; pgidx++) {
267 if (is_file_hugepages(memfd)) {
269 hpage = find_get_page_flags(mapping, pgoff,
276 page = hpage + subpgoff;
279 if (subpgoff == maxsubpgs) {
286 page = shmem_read_mapping_page(mapping,
293 ubuf->pages[pgbuf++] = page;
303 exp_info.ops = &udmabuf_ops;
304 exp_info.size = ubuf->pagecount << PAGE_SHIFT;
305 exp_info.priv = ubuf;
306 exp_info.flags = O_RDWR;
308 ubuf->device = device;
309 buf = dma_buf_export(&exp_info);
316 if (head->flags & UDMABUF_FLAGS_CLOEXEC)
318 return dma_buf_fd(buf, flags);
322 put_page(ubuf->pages[--pgbuf]);
330 static long udmabuf_ioctl_create(struct file *filp, unsigned long arg)
332 struct udmabuf_create create;
333 struct udmabuf_create_list head;
334 struct udmabuf_create_item list;
336 if (copy_from_user(&create, (void __user *)arg,
340 head.flags = create.flags;
342 list.memfd = create.memfd;
343 list.offset = create.offset;
344 list.size = create.size;
346 return udmabuf_create(filp->private_data, &head, &list);
349 static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
351 struct udmabuf_create_list head;
352 struct udmabuf_create_item *list;
356 if (copy_from_user(&head, (void __user *)arg, sizeof(head)))
358 if (head.count > list_limit)
360 lsize = sizeof(struct udmabuf_create_item) * head.count;
361 list = memdup_user((void __user *)(arg + sizeof(head)), lsize);
363 return PTR_ERR(list);
365 ret = udmabuf_create(filp->private_data, &head, list);
370 static long udmabuf_ioctl(struct file *filp, unsigned int ioctl,
377 ret = udmabuf_ioctl_create(filp, arg);
379 case UDMABUF_CREATE_LIST:
380 ret = udmabuf_ioctl_create_list(filp, arg);
389 static const struct file_operations udmabuf_fops = {
390 .owner = THIS_MODULE,
391 .unlocked_ioctl = udmabuf_ioctl,
393 .compat_ioctl = udmabuf_ioctl,
397 static struct miscdevice udmabuf_misc = {
398 .minor = MISC_DYNAMIC_MINOR,
400 .fops = &udmabuf_fops,
403 static int __init udmabuf_dev_init(void)
407 ret = misc_register(&udmabuf_misc);
409 pr_err("Could not initialize udmabuf device\n");
413 ret = dma_coerce_mask_and_coherent(udmabuf_misc.this_device,
416 pr_err("Could not setup DMA mask for udmabuf device\n");
417 misc_deregister(&udmabuf_misc);
424 static void __exit udmabuf_dev_exit(void)
426 misc_deregister(&udmabuf_misc);
429 module_init(udmabuf_dev_init)
430 module_exit(udmabuf_dev_exit)
432 MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");