1 // SPDX-License-Identifier: GPL-2.0
4 * Xen dma-buf functionality for gntdev.
6 * DMA buffer implementation is based on drivers/gpu/drm/drm_prime.c.
8 * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/dma-buf.h>
14 #include <linux/slab.h>
15 #include <linux/types.h>
16 #include <linux/uaccess.h>
17 #include <linux/module.h>
20 #include <xen/grant_table.h>
22 #include "gntdev-common.h"
23 #include "gntdev-dmabuf.h"
25 MODULE_IMPORT_NS(DMA_BUF);
27 struct gntdev_dmabuf {
28 struct gntdev_dmabuf_priv *priv;
29 struct dma_buf *dmabuf;
30 struct list_head next;
35 /* Exported buffers are reference counted. */
38 struct gntdev_priv *priv;
39 struct gntdev_grant_map *map;
42 /* Granted references of the imported buffer. */
44 /* Scatter-gather table of the imported buffer. */
46 /* dma-buf attachment of the imported buffer. */
47 struct dma_buf_attachment *attach;
51 /* Number of pages this buffer has. */
53 /* Pages of this buffer. */
57 struct gntdev_dmabuf_wait_obj {
58 struct list_head next;
59 struct gntdev_dmabuf *gntdev_dmabuf;
60 struct completion completion;
63 struct gntdev_dmabuf_attachment {
65 enum dma_data_direction dir;
68 struct gntdev_dmabuf_priv {
69 /* List of exported DMA buffers. */
70 struct list_head exp_list;
71 /* List of wait objects. */
72 struct list_head exp_wait_list;
73 /* List of imported DMA buffers. */
74 struct list_head imp_list;
75 /* This is the lock which protects dma_buf_xxx lists. */
78 * We reference this file while exporting dma-bufs, so
79 * the grant device context is not destroyed while there are
80 * external users alive.
85 /* DMA buffer export support. */
87 /* Implementation of wait for exported DMA buffer to be released. */
89 static void dmabuf_exp_release(struct kref *kref);
91 static struct gntdev_dmabuf_wait_obj *
92 dmabuf_exp_wait_obj_new(struct gntdev_dmabuf_priv *priv,
93 struct gntdev_dmabuf *gntdev_dmabuf)
95 struct gntdev_dmabuf_wait_obj *obj;
97 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
99 return ERR_PTR(-ENOMEM);
101 init_completion(&obj->completion);
102 obj->gntdev_dmabuf = gntdev_dmabuf;
104 mutex_lock(&priv->lock);
105 list_add(&obj->next, &priv->exp_wait_list);
106 /* Put our reference and wait for gntdev_dmabuf's release to fire. */
107 kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
108 mutex_unlock(&priv->lock);
112 static void dmabuf_exp_wait_obj_free(struct gntdev_dmabuf_priv *priv,
113 struct gntdev_dmabuf_wait_obj *obj)
115 mutex_lock(&priv->lock);
116 list_del(&obj->next);
117 mutex_unlock(&priv->lock);
121 static int dmabuf_exp_wait_obj_wait(struct gntdev_dmabuf_wait_obj *obj,
124 if (wait_for_completion_timeout(&obj->completion,
125 msecs_to_jiffies(wait_to_ms)) <= 0)
131 static void dmabuf_exp_wait_obj_signal(struct gntdev_dmabuf_priv *priv,
132 struct gntdev_dmabuf *gntdev_dmabuf)
134 struct gntdev_dmabuf_wait_obj *obj;
136 list_for_each_entry(obj, &priv->exp_wait_list, next)
137 if (obj->gntdev_dmabuf == gntdev_dmabuf) {
138 pr_debug("Found gntdev_dmabuf in the wait list, wake\n");
139 complete_all(&obj->completion);
144 static struct gntdev_dmabuf *
145 dmabuf_exp_wait_obj_get_dmabuf(struct gntdev_dmabuf_priv *priv, int fd)
147 struct gntdev_dmabuf *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
149 mutex_lock(&priv->lock);
150 list_for_each_entry(gntdev_dmabuf, &priv->exp_list, next)
151 if (gntdev_dmabuf->fd == fd) {
152 pr_debug("Found gntdev_dmabuf in the wait list\n");
153 kref_get(&gntdev_dmabuf->u.exp.refcount);
157 mutex_unlock(&priv->lock);
161 static int dmabuf_exp_wait_released(struct gntdev_dmabuf_priv *priv, int fd,
164 struct gntdev_dmabuf *gntdev_dmabuf;
165 struct gntdev_dmabuf_wait_obj *obj;
168 pr_debug("Will wait for dma-buf with fd %d\n", fd);
170 * Try to find the DMA buffer: if not found means that
171 * either the buffer has already been released or file descriptor
174 gntdev_dmabuf = dmabuf_exp_wait_obj_get_dmabuf(priv, fd);
175 if (IS_ERR(gntdev_dmabuf))
176 return PTR_ERR(gntdev_dmabuf);
179 * gntdev_dmabuf still exists and is reference count locked by us now,
180 * so prepare to wait: allocate wait object and add it to the wait list,
181 * so we can find it on release.
183 obj = dmabuf_exp_wait_obj_new(priv, gntdev_dmabuf);
187 ret = dmabuf_exp_wait_obj_wait(obj, wait_to_ms);
188 dmabuf_exp_wait_obj_free(priv, obj);
192 /* DMA buffer export support. */
194 static struct sg_table *
195 dmabuf_pages_to_sgt(struct page **pages, unsigned int nr_pages)
197 struct sg_table *sgt;
200 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
206 ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0,
207 nr_pages << PAGE_SHIFT,
219 static int dmabuf_exp_ops_attach(struct dma_buf *dma_buf,
220 struct dma_buf_attachment *attach)
222 struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach;
224 gntdev_dmabuf_attach = kzalloc(sizeof(*gntdev_dmabuf_attach),
226 if (!gntdev_dmabuf_attach)
229 gntdev_dmabuf_attach->dir = DMA_NONE;
230 attach->priv = gntdev_dmabuf_attach;
234 static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf,
235 struct dma_buf_attachment *attach)
237 struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
239 if (gntdev_dmabuf_attach) {
240 struct sg_table *sgt = gntdev_dmabuf_attach->sgt;
243 if (gntdev_dmabuf_attach->dir != DMA_NONE)
244 dma_unmap_sgtable(attach->dev, sgt,
245 gntdev_dmabuf_attach->dir,
246 DMA_ATTR_SKIP_CPU_SYNC);
251 kfree(gntdev_dmabuf_attach);
256 static struct sg_table *
257 dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach,
258 enum dma_data_direction dir)
260 struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
261 struct gntdev_dmabuf *gntdev_dmabuf = attach->dmabuf->priv;
262 struct sg_table *sgt;
264 pr_debug("Mapping %d pages for dev %p\n", gntdev_dmabuf->nr_pages,
267 if (dir == DMA_NONE || !gntdev_dmabuf_attach)
268 return ERR_PTR(-EINVAL);
270 /* Return the cached mapping when possible. */
271 if (gntdev_dmabuf_attach->dir == dir)
272 return gntdev_dmabuf_attach->sgt;
275 * Two mappings with different directions for the same attachment are
278 if (gntdev_dmabuf_attach->dir != DMA_NONE)
279 return ERR_PTR(-EBUSY);
281 sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages,
282 gntdev_dmabuf->nr_pages);
284 if (dma_map_sgtable(attach->dev, sgt, dir,
285 DMA_ATTR_SKIP_CPU_SYNC)) {
288 sgt = ERR_PTR(-ENOMEM);
290 gntdev_dmabuf_attach->sgt = sgt;
291 gntdev_dmabuf_attach->dir = dir;
295 pr_debug("Failed to map sg table for dev %p\n", attach->dev);
299 static void dmabuf_exp_ops_unmap_dma_buf(struct dma_buf_attachment *attach,
300 struct sg_table *sgt,
301 enum dma_data_direction dir)
303 /* Not implemented. The unmap is done at dmabuf_exp_ops_detach(). */
306 static void dmabuf_exp_release(struct kref *kref)
308 struct gntdev_dmabuf *gntdev_dmabuf =
309 container_of(kref, struct gntdev_dmabuf, u.exp.refcount);
311 dmabuf_exp_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf);
312 list_del(&gntdev_dmabuf->next);
313 fput(gntdev_dmabuf->priv->filp);
314 kfree(gntdev_dmabuf);
317 static void dmabuf_exp_remove_map(struct gntdev_priv *priv,
318 struct gntdev_grant_map *map)
320 mutex_lock(&priv->lock);
321 list_del(&map->next);
322 gntdev_put_map(NULL /* already removed */, map);
323 mutex_unlock(&priv->lock);
326 static void dmabuf_exp_ops_release(struct dma_buf *dma_buf)
328 struct gntdev_dmabuf *gntdev_dmabuf = dma_buf->priv;
329 struct gntdev_dmabuf_priv *priv = gntdev_dmabuf->priv;
331 dmabuf_exp_remove_map(gntdev_dmabuf->u.exp.priv,
332 gntdev_dmabuf->u.exp.map);
333 mutex_lock(&priv->lock);
334 kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
335 mutex_unlock(&priv->lock);
338 static const struct dma_buf_ops dmabuf_exp_ops = {
339 .attach = dmabuf_exp_ops_attach,
340 .detach = dmabuf_exp_ops_detach,
341 .map_dma_buf = dmabuf_exp_ops_map_dma_buf,
342 .unmap_dma_buf = dmabuf_exp_ops_unmap_dma_buf,
343 .release = dmabuf_exp_ops_release,
346 struct gntdev_dmabuf_export_args {
347 struct gntdev_priv *priv;
348 struct gntdev_grant_map *map;
349 struct gntdev_dmabuf_priv *dmabuf_priv;
356 static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args)
358 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
359 struct gntdev_dmabuf *gntdev_dmabuf;
362 gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
366 kref_init(&gntdev_dmabuf->u.exp.refcount);
368 gntdev_dmabuf->priv = args->dmabuf_priv;
369 gntdev_dmabuf->nr_pages = args->count;
370 gntdev_dmabuf->pages = args->pages;
371 gntdev_dmabuf->u.exp.priv = args->priv;
372 gntdev_dmabuf->u.exp.map = args->map;
374 exp_info.exp_name = KBUILD_MODNAME;
375 if (args->dev->driver && args->dev->driver->owner)
376 exp_info.owner = args->dev->driver->owner;
378 exp_info.owner = THIS_MODULE;
379 exp_info.ops = &dmabuf_exp_ops;
380 exp_info.size = args->count << PAGE_SHIFT;
381 exp_info.flags = O_RDWR;
382 exp_info.priv = gntdev_dmabuf;
384 gntdev_dmabuf->dmabuf = dma_buf_export(&exp_info);
385 if (IS_ERR(gntdev_dmabuf->dmabuf)) {
386 ret = PTR_ERR(gntdev_dmabuf->dmabuf);
387 gntdev_dmabuf->dmabuf = NULL;
391 ret = dma_buf_fd(gntdev_dmabuf->dmabuf, O_CLOEXEC);
395 gntdev_dmabuf->fd = ret;
398 pr_debug("Exporting DMA buffer with fd %d\n", ret);
400 mutex_lock(&args->dmabuf_priv->lock);
401 list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list);
402 mutex_unlock(&args->dmabuf_priv->lock);
403 get_file(gntdev_dmabuf->priv->filp);
407 if (gntdev_dmabuf->dmabuf)
408 dma_buf_put(gntdev_dmabuf->dmabuf);
409 kfree(gntdev_dmabuf);
413 static struct gntdev_grant_map *
414 dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags,
417 struct gntdev_grant_map *map;
419 if (unlikely(gntdev_test_page_count(count)))
420 return ERR_PTR(-EINVAL);
422 if ((dmabuf_flags & GNTDEV_DMA_FLAG_WC) &&
423 (dmabuf_flags & GNTDEV_DMA_FLAG_COHERENT)) {
424 pr_debug("Wrong dma-buf flags: 0x%x\n", dmabuf_flags);
425 return ERR_PTR(-EINVAL);
428 map = gntdev_alloc_map(priv, count, dmabuf_flags);
430 return ERR_PTR(-ENOMEM);
435 static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
436 int count, u32 domid, u32 *refs, u32 *fd)
438 struct gntdev_grant_map *map;
439 struct gntdev_dmabuf_export_args args;
442 map = dmabuf_exp_alloc_backing_storage(priv, flags, count);
446 for (i = 0; i < count; i++) {
447 map->grants[i].domid = domid;
448 map->grants[i].ref = refs[i];
451 mutex_lock(&priv->lock);
452 gntdev_add_map(priv, map);
453 mutex_unlock(&priv->lock);
455 map->flags |= GNTMAP_host_map;
456 #if defined(CONFIG_X86)
457 map->flags |= GNTMAP_device_map;
460 ret = gntdev_map_grant_pages(map);
466 args.dev = priv->dma_dev;
467 args.dmabuf_priv = priv->dmabuf_priv;
468 args.count = map->count;
469 args.pages = map->pages;
470 args.fd = -1; /* Shut up unnecessary gcc warning for i386 */
472 ret = dmabuf_exp_from_pages(&args);
480 dmabuf_exp_remove_map(priv, map);
484 /* DMA buffer import support. */
487 dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
488 int count, int domid)
490 grant_ref_t priv_gref_head;
493 ret = gnttab_alloc_grant_references(count, &priv_gref_head);
495 pr_debug("Cannot allocate grant references, ret %d\n", ret);
499 for (i = 0; i < count; i++) {
502 cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
505 pr_debug("Cannot claim grant reference, ret %d\n", ret);
509 gnttab_grant_foreign_access_ref(cur_ref, domid,
510 xen_page_to_gfn(pages[i]), 0);
517 gnttab_free_grant_references(priv_gref_head);
521 static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
525 for (i = 0; i < count; i++)
526 if (refs[i] != INVALID_GRANT_REF)
527 gnttab_end_foreign_access(refs[i], NULL);
530 static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
532 kfree(gntdev_dmabuf->pages);
533 kfree(gntdev_dmabuf->u.imp.refs);
534 kfree(gntdev_dmabuf);
537 static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
539 struct gntdev_dmabuf *gntdev_dmabuf;
542 gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
546 gntdev_dmabuf->u.imp.refs = kcalloc(count,
547 sizeof(gntdev_dmabuf->u.imp.refs[0]),
549 if (!gntdev_dmabuf->u.imp.refs)
552 gntdev_dmabuf->pages = kcalloc(count,
553 sizeof(gntdev_dmabuf->pages[0]),
555 if (!gntdev_dmabuf->pages)
558 gntdev_dmabuf->nr_pages = count;
560 for (i = 0; i < count; i++)
561 gntdev_dmabuf->u.imp.refs[i] = INVALID_GRANT_REF;
563 return gntdev_dmabuf;
566 dmabuf_imp_free_storage(gntdev_dmabuf);
568 return ERR_PTR(-ENOMEM);
571 static struct gntdev_dmabuf *
572 dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
573 int fd, int count, int domid)
575 struct gntdev_dmabuf *gntdev_dmabuf, *ret;
576 struct dma_buf *dma_buf;
577 struct dma_buf_attachment *attach;
578 struct sg_table *sgt;
579 struct sg_page_iter sg_iter;
582 dma_buf = dma_buf_get(fd);
584 return ERR_CAST(dma_buf);
586 gntdev_dmabuf = dmabuf_imp_alloc_storage(count);
587 if (IS_ERR(gntdev_dmabuf)) {
592 gntdev_dmabuf->priv = priv;
593 gntdev_dmabuf->fd = fd;
595 attach = dma_buf_attach(dma_buf, dev);
596 if (IS_ERR(attach)) {
597 ret = ERR_CAST(attach);
601 gntdev_dmabuf->u.imp.attach = attach;
603 sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL);
609 /* Check that we have zero offset. */
610 if (sgt->sgl->offset) {
611 ret = ERR_PTR(-EINVAL);
612 pr_debug("DMA buffer has %d bytes offset, user-space expects 0\n",
617 /* Check number of pages that imported buffer has. */
618 if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) {
619 ret = ERR_PTR(-EINVAL);
620 pr_debug("DMA buffer has %zu pages, user-space expects %d\n",
621 attach->dmabuf->size, gntdev_dmabuf->nr_pages);
625 gntdev_dmabuf->u.imp.sgt = sgt;
627 /* Now convert sgt to array of pages and check for page validity. */
629 for_each_sgtable_page(sgt, &sg_iter, 0) {
630 struct page *page = sg_page_iter_page(&sg_iter);
632 * Check if page is valid: this can happen if we are given
633 * a page from VRAM or other resources which are not backed
636 if (!pfn_valid(page_to_pfn(page))) {
637 ret = ERR_PTR(-EINVAL);
641 gntdev_dmabuf->pages[i++] = page;
644 ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gntdev_dmabuf->pages,
645 gntdev_dmabuf->u.imp.refs,
648 goto fail_end_access;
650 pr_debug("Imported DMA buffer with fd %d\n", fd);
652 mutex_lock(&priv->lock);
653 list_add(&gntdev_dmabuf->next, &priv->imp_list);
654 mutex_unlock(&priv->lock);
656 return gntdev_dmabuf;
659 dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs, count);
661 dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL);
663 dma_buf_detach(dma_buf, attach);
665 dmabuf_imp_free_storage(gntdev_dmabuf);
667 dma_buf_put(dma_buf);
672 * Find the hyper dma-buf by its file descriptor and remove
673 * it from the buffer's list.
675 static struct gntdev_dmabuf *
676 dmabuf_imp_find_unlink(struct gntdev_dmabuf_priv *priv, int fd)
678 struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
680 mutex_lock(&priv->lock);
681 list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) {
682 if (gntdev_dmabuf->fd == fd) {
683 pr_debug("Found gntdev_dmabuf in the import list\n");
685 list_del(&gntdev_dmabuf->next);
689 mutex_unlock(&priv->lock);
693 static int dmabuf_imp_release(struct gntdev_dmabuf_priv *priv, u32 fd)
695 struct gntdev_dmabuf *gntdev_dmabuf;
696 struct dma_buf_attachment *attach;
697 struct dma_buf *dma_buf;
699 gntdev_dmabuf = dmabuf_imp_find_unlink(priv, fd);
700 if (IS_ERR(gntdev_dmabuf))
701 return PTR_ERR(gntdev_dmabuf);
703 pr_debug("Releasing DMA buffer with fd %d\n", fd);
705 dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs,
706 gntdev_dmabuf->nr_pages);
708 attach = gntdev_dmabuf->u.imp.attach;
710 if (gntdev_dmabuf->u.imp.sgt)
711 dma_buf_unmap_attachment_unlocked(attach, gntdev_dmabuf->u.imp.sgt,
713 dma_buf = attach->dmabuf;
714 dma_buf_detach(attach->dmabuf, attach);
715 dma_buf_put(dma_buf);
717 dmabuf_imp_free_storage(gntdev_dmabuf);
721 static void dmabuf_imp_release_all(struct gntdev_dmabuf_priv *priv)
723 struct gntdev_dmabuf *q, *gntdev_dmabuf;
725 list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next)
726 dmabuf_imp_release(priv, gntdev_dmabuf->fd);
729 /* DMA buffer IOCTL support. */
731 long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
732 struct ioctl_gntdev_dmabuf_exp_from_refs __user *u)
734 struct ioctl_gntdev_dmabuf_exp_from_refs op;
739 pr_debug("Cannot provide dma-buf: use_ptemode %d\n",
744 if (copy_from_user(&op, u, sizeof(op)) != 0)
747 if (unlikely(gntdev_test_page_count(op.count)))
750 refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
754 if (copy_from_user(refs, u->refs, sizeof(*refs) * op.count) != 0) {
759 ret = dmabuf_exp_from_refs(priv, op.flags, op.count,
760 op.domid, refs, &op.fd);
764 if (copy_to_user(u, &op, sizeof(op)) != 0)
772 long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv *priv,
773 struct ioctl_gntdev_dmabuf_exp_wait_released __user *u)
775 struct ioctl_gntdev_dmabuf_exp_wait_released op;
777 if (copy_from_user(&op, u, sizeof(op)) != 0)
780 return dmabuf_exp_wait_released(priv->dmabuf_priv, op.fd,
784 long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv,
785 struct ioctl_gntdev_dmabuf_imp_to_refs __user *u)
787 struct ioctl_gntdev_dmabuf_imp_to_refs op;
788 struct gntdev_dmabuf *gntdev_dmabuf;
791 if (copy_from_user(&op, u, sizeof(op)) != 0)
794 if (unlikely(gntdev_test_page_count(op.count)))
797 gntdev_dmabuf = dmabuf_imp_to_refs(priv->dmabuf_priv,
798 priv->dma_dev, op.fd,
800 if (IS_ERR(gntdev_dmabuf))
801 return PTR_ERR(gntdev_dmabuf);
803 if (copy_to_user(u->refs, gntdev_dmabuf->u.imp.refs,
804 sizeof(*u->refs) * op.count) != 0) {
811 dmabuf_imp_release(priv->dmabuf_priv, op.fd);
815 long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv,
816 struct ioctl_gntdev_dmabuf_imp_release __user *u)
818 struct ioctl_gntdev_dmabuf_imp_release op;
820 if (copy_from_user(&op, u, sizeof(op)) != 0)
823 return dmabuf_imp_release(priv->dmabuf_priv, op.fd);
826 struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp)
828 struct gntdev_dmabuf_priv *priv;
830 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
832 return ERR_PTR(-ENOMEM);
834 mutex_init(&priv->lock);
835 INIT_LIST_HEAD(&priv->exp_list);
836 INIT_LIST_HEAD(&priv->exp_wait_list);
837 INIT_LIST_HEAD(&priv->imp_list);
844 void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv)
846 dmabuf_imp_release_all(priv);