From: Daniel Vetter Date: Tue, 21 Feb 2017 07:37:38 +0000 (+0900) Subject: dma-buf: Add ioctls to allow userspace to flush X-Git-Tag: submit/tizen/20170309.044430~11 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=cec7f0c531f2d2aff0504983c858463cc975da36;p=profile%2Fwearable%2Fplatform%2Fkernel%2Flinux-3.18-exynos7270.git dma-buf: Add ioctls to allow userspace to flush The userspace might need some sort of cache coherency management e.g. when CPU and GPU domains are being accessed through dma-buf at the same time. To circumvent this problem there are begin/end coherency markers, that forward directly to existing dma-buf device drivers vfunc hooks. Userspace can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The sequence would be used like following: - mmap dma-buf fd - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write to mmap area 3. SYNC_END ioctl. This can be repeated as often as you want (with the new data being consumed by the GPU or say scanout device) - munmap once you don't need the buffer any more v2 (Tiago): Fix header file type names (u64 -> __u64) v3 (Tiago): Add documentation. Use enum dma_buf_sync_flags to the begin/end dma-buf functions. Check for overflows in start/length. v4 (Tiago): use 2d regions for sync. v5 (Tiago): forget about 2d regions (v4); use _IOW in DMA_BUF_IOCTL_SYNC and remove range information from struct dma_buf_sync. v6 (Tiago): use __u64 structured padded flags instead enum. Adjust documentation about the recommendation on using sync ioctls. v7 (Tiago): Alex' nit on flags definition and being even more wording in the doc about sync usage. v9 (Tiago): remove useless is_dma_buf_file check. Fix sync.flags conditionals and its mask order check. Add include in dma-buf.h. Change-Id: Id20b5354ba481e2d896b08500e1323eedcb16e5b Cc: Ville Syrjälä Cc: David Herrmann Cc: Sumit Semwal Reviewed-by: Stéphane Marchesin Signed-off-by: Daniel Vetter Signed-off-by: Tiago Vignatti Reviewed-by: David Herrmann Signed-off-by: Daniel Vetter Signed-off-by: Marek Szyprowski Link: http://patchwork.freedesktop.org/patch/msgid/1455228291-29640-1-git-send-email-tiago.vignatti@intel.com [backport of mainline commit 18b862dcd57a3e23e34c8cd1e939f68548c1209a] And below things are included, - backport of mainline commit 831e9da7dc5c22fd2a5fb64e999f6e077a4338c3 - fix build errors incurred by argument mismatches of dma_buf_begin/end_cpu_access function calls in v4l2/mali/ion drivers. Signed-off-by: Inki Dae --- diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 891d5b2e6a0..34cef83b769 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -33,6 +33,10 @@ #include #include +#include + +#include + static inline int is_dma_buf_file(struct file *); struct dma_buf_list { @@ -249,11 +253,55 @@ out: return events; } +static long dma_buf_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct dma_buf *dmabuf; + struct dma_buf_sync sync; + enum dma_data_direction direction; + + dmabuf = file->private_data; + + switch (cmd) { + case DMA_BUF_IOCTL_SYNC: + if (copy_from_user(&sync, (void __user *) arg, sizeof(sync))) + return -EFAULT; + + if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK) + return -EINVAL; + + switch (sync.flags & DMA_BUF_SYNC_RW) { + case DMA_BUF_SYNC_READ: + direction = DMA_FROM_DEVICE; + break; + case DMA_BUF_SYNC_WRITE: + direction = DMA_TO_DEVICE; + break; + case DMA_BUF_SYNC_RW: + direction = DMA_BIDIRECTIONAL; + break; + default: + return -EINVAL; + } + + if (sync.flags & DMA_BUF_SYNC_END) + dma_buf_end_cpu_access(dmabuf, direction); + else + dma_buf_begin_cpu_access(dmabuf, direction); + + return 0; + default: + return -ENOTTY; + } +} + + static const struct file_operations dma_buf_fops = { .release = dma_buf_release, .mmap = dma_buf_mmap_internal, .llseek = dma_buf_llseek, .poll = dma_buf_poll, + .unlocked_ioctl = dma_buf_ioctl, }; /* @@ -555,13 +603,11 @@ EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); * preparations. Coherency is only guaranteed in the specified range for the * specified access direction. * @dmabuf: [in] buffer to prepare cpu access for. - * @start: [in] start of range for cpu access. - * @len: [in] length of range for cpu access. * @direction: [in] length of range for cpu access. * * Can return negative error values, returns 0 on success. */ -int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len, +int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction) { int ret = 0; @@ -570,7 +616,7 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len, return -EINVAL; if (dmabuf->ops->begin_cpu_access) - ret = dmabuf->ops->begin_cpu_access(dmabuf, start, len, direction); + ret = dmabuf->ops->begin_cpu_access(dmabuf, direction); return ret; } @@ -582,19 +628,17 @@ EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access); * actions. Coherency is only guaranteed in the specified range for the * specified access direction. * @dmabuf: [in] buffer to complete cpu access for. - * @start: [in] start of range for cpu access. - * @len: [in] length of range for cpu access. * @direction: [in] length of range for cpu access. * * This call must always succeed. */ -void dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len, +void dma_buf_end_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction) { WARN_ON(!dmabuf); if (dmabuf->ops->end_cpu_access) - dmabuf->ops->end_cpu_access(dmabuf, start, len, direction); + dmabuf->ops->end_cpu_access(dmabuf, direction); } EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access); diff --git a/drivers/gpu/arm_tizen/t72x/r12p0/mali_kbase_softjobs.c b/drivers/gpu/arm_tizen/t72x/r12p0/mali_kbase_softjobs.c index 3b4d3750cbf..e9104e78786 100644 --- a/drivers/gpu/arm_tizen/t72x/r12p0/mali_kbase_softjobs.c +++ b/drivers/gpu/arm_tizen/t72x/r12p0/mali_kbase_softjobs.c @@ -820,9 +820,7 @@ static int kbase_mem_copy_from_extres(struct kbase_context *kctx, KBASE_DEBUG_ASSERT(dma_buf != NULL); - ret = dma_buf_begin_cpu_access(dma_buf, 0, - buf_data->nr_extres_pages*PAGE_SIZE, - DMA_FROM_DEVICE); + ret = dma_buf_begin_cpu_access(dma_buf, DMA_FROM_DEVICE); if (ret) goto out_unlock; @@ -841,9 +839,7 @@ static int kbase_mem_copy_from_extres(struct kbase_context *kctx, if (target_page_nr >= buf_data->nr_pages) break; } - dma_buf_end_cpu_access(dma_buf, 0, - buf_data->nr_extres_pages*PAGE_SIZE, - DMA_FROM_DEVICE); + dma_buf_end_cpu_access(dma_buf, DMA_FROM_DEVICE); break; } #endif diff --git a/drivers/media/v4l2-core/videobuf2-ion.c b/drivers/media/v4l2-core/videobuf2-ion.c index 1f52c661910..301ec0e60ad 100644 --- a/drivers/media/v4l2-core/videobuf2-ion.c +++ b/drivers/media/v4l2-core/videobuf2-ion.c @@ -317,15 +317,13 @@ static void *vb2_ion_vaddr(void *buf_priv) if (buf->handle) return vb2_ion_private_vaddr(&buf->cookie); - if (dma_buf_begin_cpu_access(buf->dma_buf, - 0, buf->size, buf->direction)) + if (dma_buf_begin_cpu_access(buf->dma_buf, buf->direction)) return NULL; buf->kva = dma_buf_kmap(buf->dma_buf, buf->cookie.offset / PAGE_SIZE); if (buf->kva == NULL) - dma_buf_end_cpu_access(buf->dma_buf, 0, - buf->size, buf->direction); + dma_buf_end_cpu_access(buf->dma_buf, buf->direction); else buf->kva += buf->cookie.offset & ~PAGE_MASK; @@ -463,7 +461,7 @@ static void vb2_ion_detach_dmabuf(void *mem_priv) if (buf->kva != NULL) { dma_buf_kunmap(buf->dma_buf, 0, buf->kva); - dma_buf_end_cpu_access(buf->dma_buf, 0, buf->size, 0); + dma_buf_end_cpu_access(buf->dma_buf, 0); } /* detach this attachment */ @@ -723,8 +721,7 @@ static void vb2_ion_put_userptr(void *mem_priv) if (buf->kva) { dma_buf_kunmap(buf->dma_buf, buf->cookie.offset / PAGE_SIZE, buf->kva - (buf->cookie.offset & ~PAGE_SIZE)); - dma_buf_end_cpu_access(buf->dma_buf, buf->cookie.offset, - buf->size, DMA_FROM_DEVICE); + dma_buf_end_cpu_access(buf->dma_buf, DMA_FROM_DEVICE); } if (buf->dma_buf) diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index 6eab5e0c797..77eede4d849 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -1430,8 +1430,7 @@ static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset, { } -static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, - size_t len, +static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction) { struct ion_buffer *buffer = dmabuf->priv; @@ -1449,8 +1448,7 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, return PTR_ERR_OR_ZERO(vaddr); } -static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, - size_t len, +static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction) { struct ion_buffer *buffer = dmabuf->priv; diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index cb710072cd6..86f7a92ba56 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -93,10 +93,8 @@ struct dma_buf_ops { /* after final dma_buf_put() */ void (*release)(struct dma_buf *); - int (*begin_cpu_access)(struct dma_buf *, size_t, size_t, - enum dma_data_direction); - void (*end_cpu_access)(struct dma_buf *, size_t, size_t, - enum dma_data_direction); + int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); + void (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); void *(*kmap_atomic)(struct dma_buf *, unsigned long); void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *); void *(*kmap)(struct dma_buf *, unsigned long); @@ -196,9 +194,9 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *, enum dma_data_direction); void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *, enum dma_data_direction); -int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len, +int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction dir); -void dma_buf_end_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len, +void dma_buf_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction dir); void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long); void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *); diff --git a/include/uapi/linux/dma-buf.h b/include/uapi/linux/dma-buf.h new file mode 100644 index 00000000000..fb0dedb7c12 --- /dev/null +++ b/include/uapi/linux/dma-buf.h @@ -0,0 +1,40 @@ +/* + * Framework for buffer objects that can be shared across devices/subsystems. + * + * Copyright(C) 2015 Intel Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef _DMA_BUF_UAPI_H_ +#define _DMA_BUF_UAPI_H_ + +#include + +/* begin/end dma-buf functions used for userspace mmap. */ +struct dma_buf_sync { + __u64 flags; +}; + +#define DMA_BUF_SYNC_READ (1 << 0) +#define DMA_BUF_SYNC_WRITE (2 << 0) +#define DMA_BUF_SYNC_RW (DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE) +#define DMA_BUF_SYNC_START (0 << 2) +#define DMA_BUF_SYNC_END (1 << 2) +#define DMA_BUF_SYNC_VALID_FLAGS_MASK \ + (DMA_BUF_SYNC_RW | DMA_BUF_SYNC_END) + +#define DMA_BUF_BASE 'b' +#define DMA_BUF_IOCTL_SYNC _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync) + +#endif