dma-buf: Add ioctls to allow userspace to flush 61/115761/5
authorDaniel Vetter <daniel.vetter@intel.com>
Tue, 21 Feb 2017 07:37:38 +0000 (16:37 +0900)
committerSeung-Woo Kim <sw0312.kim@samsung.com>
Wed, 8 Mar 2017 10:07:11 +0000 (02:07 -0800)
The userspace might need some sort of cache coherency management e.g. when CPU
and GPU domains are being accessed through dma-buf at the same time. To
circumvent this problem there are begin/end coherency markers, that forward
directly to existing dma-buf device drivers vfunc hooks. Userspace can make use
of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The sequence would be
used like following:
     - mmap dma-buf fd
     - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
       to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
       want (with the new data being consumed by the GPU or say scanout device)
     - munmap once you don't need the buffer any more

v2 (Tiago): Fix header file type names (u64 -> __u64)
v3 (Tiago): Add documentation. Use enum dma_buf_sync_flags to the begin/end
dma-buf functions. Check for overflows in start/length.
v4 (Tiago): use 2d regions for sync.
v5 (Tiago): forget about 2d regions (v4); use _IOW in DMA_BUF_IOCTL_SYNC and
remove range information from struct dma_buf_sync.
v6 (Tiago): use __u64 structured padded flags instead enum. Adjust
documentation about the recommendation on using sync ioctls.
v7 (Tiago): Alex' nit on flags definition and being even more wording in the
doc about sync usage.
v9 (Tiago): remove useless is_dma_buf_file check. Fix sync.flags conditionals
and its mask order check. Add <linux/types.h> include in dma-buf.h.

Change-Id: Id20b5354ba481e2d896b08500e1323eedcb16e5b
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Cc: David Herrmann <dh.herrmann@gmail.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Reviewed-by: Stéphane Marchesin <marcheu@chromium.org>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Signed-off-by: Tiago Vignatti <tiago.vignatti@intel.com>
Reviewed-by: David Herrmann <dh.herrmann@gmail.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1455228291-29640-1-git-send-email-tiago.vignatti@intel.com
And below things are included,
- backport of mainline commit 831e9da7dc5c22fd2a5fb64e999f6e077a4338c3
- fix build errors incurred by argument mismatches of dma_buf_begin/end_cpu_access function calls in v4l2/mali/ion drivers.
Signed-off-by: Inki Dae <inki.dae@samsung.com>
drivers/dma-buf/dma-buf.c
drivers/gpu/arm_tizen/t72x/r12p0/mali_kbase_softjobs.c
drivers/media/v4l2-core/videobuf2-ion.c
drivers/staging/android/ion/ion.c
include/linux/dma-buf.h
include/uapi/linux/dma-buf.h [new file with mode: 0644]

index 891d5b2e6a0f9439293783e216ea9eb348df1055..34cef83b769a261df1e8a30d60a62ef3b70a1363 100644 (file)
 #include <linux/poll.h>
 #include <linux/reservation.h>
 
+#include <asm/uaccess.h>
+
+#include <uapi/linux/dma-buf.h>
+
 static inline int is_dma_buf_file(struct file *);
 
 struct dma_buf_list {
@@ -249,11 +253,55 @@ out:
        return events;
 }
 
+static long dma_buf_ioctl(struct file *file,
+                         unsigned int cmd, unsigned long arg)
+{
+       struct dma_buf *dmabuf;
+       struct dma_buf_sync sync;
+       enum dma_data_direction direction;
+
+       dmabuf = file->private_data;
+
+       switch (cmd) {
+       case DMA_BUF_IOCTL_SYNC:
+               if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
+                       return -EFAULT;
+
+               if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
+                       return -EINVAL;
+
+               switch (sync.flags & DMA_BUF_SYNC_RW) {
+               case DMA_BUF_SYNC_READ:
+                       direction = DMA_FROM_DEVICE;
+                       break;
+               case DMA_BUF_SYNC_WRITE:
+                       direction = DMA_TO_DEVICE;
+                       break;
+               case DMA_BUF_SYNC_RW:
+                       direction = DMA_BIDIRECTIONAL;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+
+               if (sync.flags & DMA_BUF_SYNC_END)
+                       dma_buf_end_cpu_access(dmabuf, direction);
+               else
+                       dma_buf_begin_cpu_access(dmabuf, direction);
+
+               return 0;
+       default:
+               return -ENOTTY;
+       }
+}
+
+
 static const struct file_operations dma_buf_fops = {
        .release        = dma_buf_release,
        .mmap           = dma_buf_mmap_internal,
        .llseek         = dma_buf_llseek,
        .poll           = dma_buf_poll,
+       .unlocked_ioctl = dma_buf_ioctl,
 };
 
 /*
@@ -555,13 +603,11 @@ EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
  * preparations. Coherency is only guaranteed in the specified range for the
  * specified access direction.
  * @dmabuf:    [in]    buffer to prepare cpu access for.
- * @start:     [in]    start of range for cpu access.
- * @len:       [in]    length of range for cpu access.
  * @direction: [in]    length of range for cpu access.
  *
  * Can return negative error values, returns 0 on success.
  */
-int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
+int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
                             enum dma_data_direction direction)
 {
        int ret = 0;
@@ -570,7 +616,7 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
                return -EINVAL;
 
        if (dmabuf->ops->begin_cpu_access)
-               ret = dmabuf->ops->begin_cpu_access(dmabuf, start, len, direction);
+               ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
 
        return ret;
 }
@@ -582,19 +628,17 @@ EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
  * actions. Coherency is only guaranteed in the specified range for the
  * specified access direction.
  * @dmabuf:    [in]    buffer to complete cpu access for.
- * @start:     [in]    start of range for cpu access.
- * @len:       [in]    length of range for cpu access.
  * @direction: [in]    length of range for cpu access.
  *
  * This call must always succeed.
  */
-void dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
+void dma_buf_end_cpu_access(struct dma_buf *dmabuf,
                            enum dma_data_direction direction)
 {
        WARN_ON(!dmabuf);
 
        if (dmabuf->ops->end_cpu_access)
-               dmabuf->ops->end_cpu_access(dmabuf, start, len, direction);
+               dmabuf->ops->end_cpu_access(dmabuf, direction);
 }
 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
 
index 3b4d3750cbf56a1d6250c105c4d8349a5d78fcf7..e9104e78786c07db14116dd4d137be53171df3e5 100644 (file)
@@ -820,9 +820,7 @@ static int kbase_mem_copy_from_extres(struct kbase_context *kctx,
 
                KBASE_DEBUG_ASSERT(dma_buf != NULL);
 
-               ret = dma_buf_begin_cpu_access(dma_buf, 0,
-                               buf_data->nr_extres_pages*PAGE_SIZE,
-                               DMA_FROM_DEVICE);
+               ret = dma_buf_begin_cpu_access(dma_buf, DMA_FROM_DEVICE);
                if (ret)
                        goto out_unlock;
 
@@ -841,9 +839,7 @@ static int kbase_mem_copy_from_extres(struct kbase_context *kctx,
                        if (target_page_nr >= buf_data->nr_pages)
                                break;
                }
-               dma_buf_end_cpu_access(dma_buf, 0,
-                               buf_data->nr_extres_pages*PAGE_SIZE,
-                               DMA_FROM_DEVICE);
+               dma_buf_end_cpu_access(dma_buf, DMA_FROM_DEVICE);
                break;
        }
 #endif
index 1f52c66191085eb17f144bf9645cf0c016d5997b..301ec0e60ad7dd870105d702ab397c4e5d6720f4 100644 (file)
@@ -317,15 +317,13 @@ static void *vb2_ion_vaddr(void *buf_priv)
        if (buf->handle)
                return vb2_ion_private_vaddr(&buf->cookie);
 
-       if (dma_buf_begin_cpu_access(buf->dma_buf,
-               0, buf->size, buf->direction))
+       if (dma_buf_begin_cpu_access(buf->dma_buf, buf->direction))
                return NULL;
 
        buf->kva = dma_buf_kmap(buf->dma_buf, buf->cookie.offset / PAGE_SIZE);
 
        if (buf->kva == NULL)
-               dma_buf_end_cpu_access(buf->dma_buf, 0,
-                       buf->size, buf->direction);
+               dma_buf_end_cpu_access(buf->dma_buf, buf->direction);
        else
                buf->kva += buf->cookie.offset & ~PAGE_MASK;
 
@@ -463,7 +461,7 @@ static void vb2_ion_detach_dmabuf(void *mem_priv)
 
        if (buf->kva != NULL) {
                dma_buf_kunmap(buf->dma_buf, 0, buf->kva);
-               dma_buf_end_cpu_access(buf->dma_buf, 0, buf->size, 0);
+               dma_buf_end_cpu_access(buf->dma_buf, 0);
        }
 
        /* detach this attachment */
@@ -723,8 +721,7 @@ static void vb2_ion_put_userptr(void *mem_priv)
        if (buf->kva) {
                dma_buf_kunmap(buf->dma_buf, buf->cookie.offset / PAGE_SIZE,
                                buf->kva - (buf->cookie.offset & ~PAGE_SIZE));
-               dma_buf_end_cpu_access(buf->dma_buf, buf->cookie.offset,
-                                       buf->size, DMA_FROM_DEVICE);
+               dma_buf_end_cpu_access(buf->dma_buf, DMA_FROM_DEVICE);
        }
 
        if (buf->dma_buf)
index 6eab5e0c797b0fde38466bf96f96a85b87e08d1d..77eede4d849834f8e66d590d33e1457360859f5e 100644 (file)
@@ -1430,8 +1430,7 @@ static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
 {
 }
 
-static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
-                                       size_t len,
+static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
                                        enum dma_data_direction direction)
 {
        struct ion_buffer *buffer = dmabuf->priv;
@@ -1449,8 +1448,7 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
        return PTR_ERR_OR_ZERO(vaddr);
 }
 
-static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
-                                      size_t len,
+static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
                                       enum dma_data_direction direction)
 {
        struct ion_buffer *buffer = dmabuf->priv;
index cb710072cd676ca6dcc408855b2b4a09cdd39868..86f7a92ba5652aa92e0815b978f36fb1bf5b2e67 100644 (file)
@@ -93,10 +93,8 @@ struct dma_buf_ops {
        /* after final dma_buf_put() */
        void (*release)(struct dma_buf *);
 
-       int (*begin_cpu_access)(struct dma_buf *, size_t, size_t,
-                               enum dma_data_direction);
-       void (*end_cpu_access)(struct dma_buf *, size_t, size_t,
-                              enum dma_data_direction);
+       int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
+       void (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
        void *(*kmap_atomic)(struct dma_buf *, unsigned long);
        void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
        void *(*kmap)(struct dma_buf *, unsigned long);
@@ -196,9 +194,9 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
                                        enum dma_data_direction);
 void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
                                enum dma_data_direction);
-int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
+int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
                             enum dma_data_direction dir);
-void dma_buf_end_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
+void dma_buf_end_cpu_access(struct dma_buf *dma_buf,
                            enum dma_data_direction dir);
 void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
 void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
diff --git a/include/uapi/linux/dma-buf.h b/include/uapi/linux/dma-buf.h
new file mode 100644 (file)
index 0000000..fb0dedb
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Framework for buffer objects that can be shared across devices/subsystems.
+ *
+ * Copyright(C) 2015 Intel Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DMA_BUF_UAPI_H_
+#define _DMA_BUF_UAPI_H_
+
+#include <linux/types.h>
+
+/* begin/end dma-buf functions used for userspace mmap. */
+struct dma_buf_sync {
+       __u64 flags;
+};
+
+#define DMA_BUF_SYNC_READ      (1 << 0)
+#define DMA_BUF_SYNC_WRITE     (2 << 0)
+#define DMA_BUF_SYNC_RW        (DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE)
+#define DMA_BUF_SYNC_START     (0 << 2)
+#define DMA_BUF_SYNC_END       (1 << 2)
+#define DMA_BUF_SYNC_VALID_FLAGS_MASK \
+       (DMA_BUF_SYNC_RW | DMA_BUF_SYNC_END)
+
+#define DMA_BUF_BASE           'b'
+#define DMA_BUF_IOCTL_SYNC     _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync)
+
+#endif