1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/cred.h>
3 #include <linux/device.h>
4 #include <linux/dma-buf.h>
5 #include <linux/dma-resv.h>
6 #include <linux/highmem.h>
7 #include <linux/init.h>
8 #include <linux/kernel.h>
9 #include <linux/memfd.h>
10 #include <linux/miscdevice.h>
11 #include <linux/module.h>
12 #include <linux/shmem_fs.h>
13 #include <linux/slab.h>
14 #include <linux/udmabuf.h>
15 #include <linux/vmalloc.h>
16 #include <linux/iosys-map.h>
18 static int list_limit = 1024;
19 module_param(list_limit, int, 0644);
20 MODULE_PARM_DESC(list_limit, "udmabuf_create_list->count limit. Default is 1024.");
22 static int size_limit_mb = 64;
23 module_param(size_limit_mb, int, 0644);
24 MODULE_PARM_DESC(size_limit_mb, "Max size of a dmabuf, in megabytes. Default is 64.");
30 struct miscdevice *device;
33 static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
35 struct vm_area_struct *vma = vmf->vma;
36 struct udmabuf *ubuf = vma->vm_private_data;
37 pgoff_t pgoff = vmf->pgoff;
39 if (pgoff >= ubuf->pagecount)
40 return VM_FAULT_SIGBUS;
41 vmf->page = ubuf->pages[pgoff];
46 static const struct vm_operations_struct udmabuf_vm_ops = {
47 .fault = udmabuf_vm_fault,
50 static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
52 struct udmabuf *ubuf = buf->priv;
54 if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
57 vma->vm_ops = &udmabuf_vm_ops;
58 vma->vm_private_data = ubuf;
62 static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
64 struct udmabuf *ubuf = buf->priv;
67 dma_resv_assert_held(buf->resv);
69 vaddr = vm_map_ram(ubuf->pages, ubuf->pagecount, -1);
73 iosys_map_set_vaddr(map, vaddr);
77 static void vunmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
79 struct udmabuf *ubuf = buf->priv;
81 dma_resv_assert_held(buf->resv);
83 vm_unmap_ram(map->vaddr, ubuf->pagecount);
86 static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
87 enum dma_data_direction direction)
89 struct udmabuf *ubuf = buf->priv;
93 sg = kzalloc(sizeof(*sg), GFP_KERNEL);
95 return ERR_PTR(-ENOMEM);
96 ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->pagecount,
97 0, ubuf->pagecount << PAGE_SHIFT,
101 ret = dma_map_sgtable(dev, sg, direction, 0);
112 static void put_sg_table(struct device *dev, struct sg_table *sg,
113 enum dma_data_direction direction)
115 dma_unmap_sgtable(dev, sg, direction, 0);
120 static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
121 enum dma_data_direction direction)
123 return get_sg_table(at->dev, at->dmabuf, direction);
126 static void unmap_udmabuf(struct dma_buf_attachment *at,
128 enum dma_data_direction direction)
130 return put_sg_table(at->dev, sg, direction);
133 static void release_udmabuf(struct dma_buf *buf)
135 struct udmabuf *ubuf = buf->priv;
136 struct device *dev = ubuf->device->this_device;
140 put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
142 for (pg = 0; pg < ubuf->pagecount; pg++)
143 put_page(ubuf->pages[pg]);
148 static int begin_cpu_udmabuf(struct dma_buf *buf,
149 enum dma_data_direction direction)
151 struct udmabuf *ubuf = buf->priv;
152 struct device *dev = ubuf->device->this_device;
156 ubuf->sg = get_sg_table(dev, buf, direction);
157 if (IS_ERR(ubuf->sg)) {
158 ret = PTR_ERR(ubuf->sg);
162 dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents,
169 static int end_cpu_udmabuf(struct dma_buf *buf,
170 enum dma_data_direction direction)
172 struct udmabuf *ubuf = buf->priv;
173 struct device *dev = ubuf->device->this_device;
178 dma_sync_sg_for_device(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
182 static const struct dma_buf_ops udmabuf_ops = {
183 .cache_sgt_mapping = true,
184 .map_dma_buf = map_udmabuf,
185 .unmap_dma_buf = unmap_udmabuf,
186 .release = release_udmabuf,
187 .mmap = mmap_udmabuf,
188 .vmap = vmap_udmabuf,
189 .vunmap = vunmap_udmabuf,
190 .begin_cpu_access = begin_cpu_udmabuf,
191 .end_cpu_access = end_cpu_udmabuf,
194 #define SEALS_WANTED (F_SEAL_SHRINK)
195 #define SEALS_DENIED (F_SEAL_WRITE)
197 static long udmabuf_create(struct miscdevice *device,
198 struct udmabuf_create_list *head,
199 struct udmabuf_create_item *list)
201 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
202 struct file *memfd = NULL;
203 struct address_space *mapping = NULL;
204 struct udmabuf *ubuf;
206 pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit;
208 int seals, ret = -EINVAL;
211 ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
215 pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
216 for (i = 0; i < head->count; i++) {
217 if (!IS_ALIGNED(list[i].offset, PAGE_SIZE))
219 if (!IS_ALIGNED(list[i].size, PAGE_SIZE))
221 ubuf->pagecount += list[i].size >> PAGE_SHIFT;
222 if (ubuf->pagecount > pglimit)
226 if (!ubuf->pagecount)
229 ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->pages),
237 for (i = 0; i < head->count; i++) {
239 memfd = fget(list[i].memfd);
242 mapping = memfd->f_mapping;
243 if (!shmem_mapping(mapping))
245 seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
246 if (seals == -EINVAL)
249 if ((seals & SEALS_WANTED) != SEALS_WANTED ||
250 (seals & SEALS_DENIED) != 0)
252 pgoff = list[i].offset >> PAGE_SHIFT;
253 pgcnt = list[i].size >> PAGE_SHIFT;
254 for (pgidx = 0; pgidx < pgcnt; pgidx++) {
255 page = shmem_read_mapping_page(mapping, pgoff + pgidx);
260 ubuf->pages[pgbuf++] = page;
266 exp_info.ops = &udmabuf_ops;
267 exp_info.size = ubuf->pagecount << PAGE_SHIFT;
268 exp_info.priv = ubuf;
269 exp_info.flags = O_RDWR;
271 ubuf->device = device;
272 buf = dma_buf_export(&exp_info);
279 if (head->flags & UDMABUF_FLAGS_CLOEXEC)
281 return dma_buf_fd(buf, flags);
285 put_page(ubuf->pages[--pgbuf]);
293 static long udmabuf_ioctl_create(struct file *filp, unsigned long arg)
295 struct udmabuf_create create;
296 struct udmabuf_create_list head;
297 struct udmabuf_create_item list;
299 if (copy_from_user(&create, (void __user *)arg,
303 head.flags = create.flags;
305 list.memfd = create.memfd;
306 list.offset = create.offset;
307 list.size = create.size;
309 return udmabuf_create(filp->private_data, &head, &list);
312 static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
314 struct udmabuf_create_list head;
315 struct udmabuf_create_item *list;
319 if (copy_from_user(&head, (void __user *)arg, sizeof(head)))
321 if (head.count > list_limit)
323 lsize = sizeof(struct udmabuf_create_item) * head.count;
324 list = memdup_user((void __user *)(arg + sizeof(head)), lsize);
326 return PTR_ERR(list);
328 ret = udmabuf_create(filp->private_data, &head, list);
333 static long udmabuf_ioctl(struct file *filp, unsigned int ioctl,
340 ret = udmabuf_ioctl_create(filp, arg);
342 case UDMABUF_CREATE_LIST:
343 ret = udmabuf_ioctl_create_list(filp, arg);
352 static const struct file_operations udmabuf_fops = {
353 .owner = THIS_MODULE,
354 .unlocked_ioctl = udmabuf_ioctl,
356 .compat_ioctl = udmabuf_ioctl,
360 static struct miscdevice udmabuf_misc = {
361 .minor = MISC_DYNAMIC_MINOR,
363 .fops = &udmabuf_fops,
366 static int __init udmabuf_dev_init(void)
370 ret = misc_register(&udmabuf_misc);
372 pr_err("Could not initialize udmabuf device\n");
376 ret = dma_coerce_mask_and_coherent(udmabuf_misc.this_device,
379 pr_err("Could not setup DMA mask for udmabuf device\n");
380 misc_deregister(&udmabuf_misc);
387 static void __exit udmabuf_dev_exit(void)
389 misc_deregister(&udmabuf_misc);
392 module_init(udmabuf_dev_init)
393 module_exit(udmabuf_dev_exit)
395 MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");