From: SooChan Lim Date: Tue, 2 Feb 2021 23:55:47 +0000 (+0900) Subject: move source files to src/libtbm-exynos X-Git-Tag: submit/tizen/20210604.025052~2 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=2a2962baf3143748ec938483ee2a32e4104879d1;p=platform%2Fadaptation%2Fsamsung_exynos%2Flibtbm-exynos.git move source files to src/libtbm-exynos Change-Id: I17b23b61ffb8145758d164de8b0173083815c5e9 --- diff --git a/configure.ac b/configure.ac index e2d0733..9204d9c 100644 --- a/configure.ac +++ b/configure.ac @@ -62,6 +62,7 @@ AC_SUBST(bufmgr_dir) AC_OUTPUT([ Makefile + src/libtbm-exynos/Makefile src/Makefile]) echo "" diff --git a/src/Makefile.am b/src/Makefile.am index 8cf9236..2f29455 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -1,11 +1 @@ -AM_CFLAGS = \ - @LIBTBM_EXYNOS_CFLAGS@ \ - -I$(top_srcdir) \ - -I$(top_srcdir)/src - -libtbm_exynos_la_LTLIBRARIES = libtbm_exynos.la -libtbm_exynos_ladir = /${bufmgr_dir} -libtbm_exynos_la_LIBADD = @LIBTBM_EXYNOS_LIBS@ - -libtbm_exynos_la_SOURCES = \ - tbm_bufmgr_exynos.c +SUBDIRS = libtbm-exynos \ No newline at end of file diff --git a/src/libtbm-exynos/Makefile.am b/src/libtbm-exynos/Makefile.am new file mode 100644 index 0000000..8cf9236 --- /dev/null +++ b/src/libtbm-exynos/Makefile.am @@ -0,0 +1,11 @@ +AM_CFLAGS = \ + @LIBTBM_EXYNOS_CFLAGS@ \ + -I$(top_srcdir) \ + -I$(top_srcdir)/src + +libtbm_exynos_la_LTLIBRARIES = libtbm_exynos.la +libtbm_exynos_ladir = /${bufmgr_dir} +libtbm_exynos_la_LIBADD = @LIBTBM_EXYNOS_LIBS@ + +libtbm_exynos_la_SOURCES = \ + tbm_bufmgr_exynos.c diff --git a/src/libtbm-exynos/tbm_bufmgr_exynos.c b/src/libtbm-exynos/tbm_bufmgr_exynos.c new file mode 100644 index 0000000..0e1e3ca --- /dev/null +++ b/src/libtbm-exynos/tbm_bufmgr_exynos.c @@ -0,0 +1,2205 @@ +/************************************************************************** + +libtbm_exynos + +Copyright 2012 Samsung Electronics co., Ltd. All Rights Reserved. + +Contact: SooChan Lim , Sangjin Lee + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sub license, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. +IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR +ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +**************************************************************************/ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "tbm_bufmgr_tgl.h" + +#define TBM_COLOR_FORMAT_COUNT 4 + +#define EXYNOS_DRM_NAME "exynos" + +#define STRERR_BUFSIZE 128 + +#define SIZE_ALIGN(value, base) (((value) + ((base) - 1)) & ~((base) - 1)) +#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) +#define MAX(a, b) ((a) > (b) ? (a) : (b)) + +static unsigned int g_tbm_surface_alignment_plane; +static unsigned int g_tbm_surface_alignment_pitch_rgb; + +#define TBM_SURFACE_ALIGNMENT_PLANE_NV12 (4096) +#define TBM_SURFACE_ALIGNMENT_PITCH_YUV (16) + +#define SZ_1M 0x00100000 +#define S5P_FIMV_MAX_FRAME_SIZE (2 * SZ_1M) +#define S5P_FIMV_D_ALIGN_PLANE_SIZE 64 +#define S5P_FIMV_NUM_PIXELS_IN_MB_ROW 16 +#define S5P_FIMV_NUM_PIXELS_IN_MB_COL 16 +#define S5P_FIMV_DEC_BUF_ALIGN (8 * 1024) +#define S5P_FIMV_NV12MT_HALIGN 128 +#define S5P_FIMV_NV12MT_VALIGN 64 + +/* cache control at backend */ +static unsigned int g_enable_cache_ctrl = 0; + +struct dma_buf_info { + unsigned long size; + unsigned int fence_supported; + unsigned int padding; +}; + +#define DMA_BUF_ACCESS_READ 0x1 +#define DMA_BUF_ACCESS_WRITE 0x2 +#define DMA_BUF_ACCESS_DMA 0x4 +#define DMA_BUF_ACCESS_MAX 0x8 + +#define DMA_FENCE_LIST_MAX 5 + +struct dma_buf_fence { + unsigned long ctx; + unsigned int type; +}; + +#define DMABUF_IOCTL_BASE 'F' +#define DMABUF_IOWR(nr, type) _IOWR(DMABUF_IOCTL_BASE, nr, type) + +#define DMABUF_IOCTL_GET_INFO DMABUF_IOWR(0x00, struct dma_buf_info) +#define DMABUF_IOCTL_GET_FENCE DMABUF_IOWR(0x01, struct dma_buf_fence) +#define DMABUF_IOCTL_PUT_FENCE DMABUF_IOWR(0x02, struct dma_buf_fence) + +/* tgl key values */ +#define GLOBAL_KEY ((unsigned int)(-1)) +/* TBM_CACHE */ +#define TBM_EXYNOS_CACHE_INV 0x01 /**< cache invalidate */ +#define TBM_EXYNOS_CACHE_CLN 0x02 /**< cache clean */ +#define TBM_EXYNOS_CACHE_ALL 0x10 /**< cache all */ +#define TBM_EXYNOS_CACHE_FLUSH (TBM_EXYNOS_CACHE_INV|TBM_EXYNOS_CACHE_CLN) /**< cache flush */ +#define TBM_EXYNOS_CACHE_FLUSH_ALL (TBM_EXYNOS_CACHE_FLUSH|TBM_EXYNOS_CACHE_ALL) /**< cache flush all */ + +enum { + DEVICE_NONE = 0, + DEVICE_CA, /* cache aware device */ + DEVICE_CO /* cache oblivious device */ +}; + +typedef union _tbm_bo_cache_state tbm_bo_cache_state; + +union _tbm_bo_cache_state { + unsigned int val; + struct { + unsigned int cntFlush:16; /*Flush all index for sync */ + unsigned int isCached:1; + unsigned int isDirtied:2; + } data; +}; + +typedef struct _tbm_bufmgr_exynos *tbm_bufmgr_exynos; +typedef struct _tbm_bo_exynos *tbm_bo_exynos; + +/* tbm buffor object for exynos */ +struct _tbm_bo_exynos { + int fd; + + unsigned int name; /* FLINK ID */ + + unsigned int gem; /* GEM Handle */ + + unsigned int dmabuf; /* fd for dmabuf */ + + void *pBase; /* virtual address */ + + unsigned int size; + + unsigned int flags_exynos; + unsigned int flags_tbm; + + pthread_mutex_t mutex; + struct dma_buf_fence dma_fence[DMA_FENCE_LIST_MAX]; + int device; + int opt; + + tbm_bo_cache_state cache_state; + unsigned int map_cnt; + int last_map_device; + + tbm_bufmgr_exynos bufmgr_exynos; +}; + +/* tbm bufmgr private for exynos */ +struct _tbm_bufmgr_exynos { + int fd; + int isLocal; + void *hashBos; + + int use_dma_fence; + + int tgl_fd; + + char *device_name; + void *bind_display; + + tbm_backend_bufmgr_func *bufmgr_func; + tbm_backend_bo_func *bo_func; + + tbm_bufmgr bufmgr; +}; + +const static char *STR_DEVICE[] = { + "DEF", + "CPU", + "2D", + "3D", + "MM" +}; + +const static char *STR_OPT[] = { + "NONE", + "RD", + "WR", + "RDWR" +}; + +static int _get_render_node(int is_master); + +static inline int +_tgl_init(int fd, unsigned int key) +{ + struct tgl_reg_data data; + int err; + char buf[STRERR_BUFSIZE]; + + data.key = key; + data.timeout_ms = 1000; + + err = ioctl(fd, TGL_IOCTL_REGISTER, &data); + if (err) { + TBM_ERR("error(%s) key:%d\n", + strerror_r(errno, buf, STRERR_BUFSIZE), key); + return 0; + } + + return 1; +} + +static inline int +_tgl_destroy(int fd, unsigned int key) +{ + struct tgl_reg_data data; + int err; + char buf[STRERR_BUFSIZE]; + + data.key = key; + err = ioctl(fd, TGL_IOCTL_UNREGISTER, &data); + if (err) { + TBM_ERR("error(%s) key:%d\n", + strerror_r(errno, buf, STRERR_BUFSIZE), key); + return 0; + } + + return 1; +} + +static inline int +_tgl_lock(int fd, unsigned int key, int opt) +{ + struct tgl_lock_data data; + enum tgl_type_data tgl_type; + int err; + char buf[STRERR_BUFSIZE]; + + switch (opt) { + case TBM_OPTION_READ: + tgl_type = TGL_TYPE_READ; + break; + case TBM_OPTION_WRITE: + tgl_type = TGL_TYPE_WRITE; + break; + default: + tgl_type = TGL_TYPE_NONE; + break; + } + + data.key = key; + data.type = tgl_type; + + err = ioctl(fd, TGL_IOCTL_LOCK, &data); + if (err) { + TBM_ERR("error(%s) key:%d opt:%d\n", + strerror_r(errno, buf, STRERR_BUFSIZE), key, opt); + return 0; + } + + return 1; +} + +static inline int +_tgl_unlock(int fd, unsigned int key) +{ + struct tgl_lock_data data; + int err; + char buf[STRERR_BUFSIZE]; + + data.key = key; + data.type = TGL_TYPE_NONE; + + err = ioctl(fd, TGL_IOCTL_UNLOCK, &data); + if (err) { + TBM_ERR("error(%s) key:%d\n", + strerror_r(errno, buf, STRERR_BUFSIZE), key); + return 0; + } + + return 1; +} + +static inline int +_tgl_set_data(int fd, unsigned int key, unsigned int val) +{ + struct tgl_usr_data data; + int err; + char buf[STRERR_BUFSIZE]; + + data.key = key; + data.data1 = val; + + err = ioctl(fd, TGL_IOCTL_SET_DATA, &data); + if (err) { + TBM_ERR("error(%s) key:%d\n", + strerror_r(errno, buf, STRERR_BUFSIZE), key); + return 0; + } + + return 1; +} + +static inline unsigned int +_tgl_get_data(int fd, unsigned int key) +{ + struct tgl_usr_data data = { 0, }; + int err; + char buf[STRERR_BUFSIZE]; + + data.key = key; + + err = ioctl(fd, TGL_IOCTL_GET_DATA, &data); + if (err) { + TBM_ERR("error(%s) key:%d\n", + strerror_r(errno, buf, STRERR_BUFSIZE), key); + return 0; + } + + return data.data1; +} + +static int +_exynos_cache_flush(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos, int flags) +{ + TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0); + + /* cache flush is managed by kernel side when using dma-fence. */ + if (bufmgr_exynos->use_dma_fence) + return 1; + + struct drm_exynos_gem_cache_op cache_op = {0, }; + int ret; + + /* if bo_exynos is null, do cache_flush_all */ + if (bo_exynos) { + cache_op.flags = 0; + cache_op.usr_addr = (uint64_t)((uintptr_t)bo_exynos->pBase); + cache_op.size = bo_exynos->size; + } else { + flags = TBM_EXYNOS_CACHE_FLUSH_ALL; + cache_op.flags = 0; + cache_op.usr_addr = 0; + cache_op.size = 0; + } + + if (flags & TBM_EXYNOS_CACHE_INV) { + if (flags & TBM_EXYNOS_CACHE_ALL) + cache_op.flags |= EXYNOS_DRM_CACHE_INV_ALL; + else + cache_op.flags |= EXYNOS_DRM_CACHE_INV_RANGE; + } + + if (flags & TBM_EXYNOS_CACHE_CLN) { + if (flags & TBM_EXYNOS_CACHE_ALL) + cache_op.flags |= EXYNOS_DRM_CACHE_CLN_ALL; + else + cache_op.flags |= EXYNOS_DRM_CACHE_CLN_RANGE; + } + + if (flags & TBM_EXYNOS_CACHE_ALL) + cache_op.flags |= EXYNOS_DRM_ALL_CACHES_CORES; + + ret = drmCommandWriteRead(bufmgr_exynos->fd, DRM_EXYNOS_GEM_CACHE_OP, &cache_op, + sizeof(cache_op)); + if (ret) { + TBM_ERR("fail to flush the cache.\n"); + return 0; + } + + return 1; +} + +static int +_bo_init_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos, int import) +{ + /* check whether cache control do or not */ + if (!g_enable_cache_ctrl) + return 1; + + TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0); + TBM_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0); + + if (bufmgr_exynos->use_dma_fence) + return 1; + + _tgl_init(bufmgr_exynos->tgl_fd, bo_exynos->name); + + tbm_bo_cache_state cache_state; + + if (import == 0) { + cache_state.data.isDirtied = DEVICE_NONE; + cache_state.data.isCached = 0; + cache_state.data.cntFlush = 0; + + _tgl_set_data(bufmgr_exynos->tgl_fd, bo_exynos->name, cache_state.val); + } + + return 1; +} + +static int +_bo_set_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos, int device, int opt) +{ + /* check whether cache control do or not */ + if (!g_enable_cache_ctrl) + return 1; + + TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0); + TBM_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0); + + if (bufmgr_exynos->use_dma_fence) + return 1; + + char need_flush = 0; + unsigned short cntFlush = 0; + + if (!(bo_exynos->flags_exynos & EXYNOS_BO_CACHABLE)) + return 1; + + /* get cache state of a bo_exynos */ + bo_exynos->cache_state.val = _tgl_get_data(bufmgr_exynos->tgl_fd, + bo_exynos->name); + + /* get global cache flush count */ + cntFlush = (unsigned short)_tgl_get_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY); + + if (device == TBM_DEVICE_CPU) { + if (bo_exynos->cache_state.data.isDirtied == DEVICE_CO && + bo_exynos->cache_state.data.isCached) + need_flush = TBM_EXYNOS_CACHE_INV; + + bo_exynos->cache_state.data.isCached = 1; + if (opt & TBM_OPTION_WRITE) + bo_exynos->cache_state.data.isDirtied = DEVICE_CA; + else { + if (bo_exynos->cache_state.data.isDirtied != DEVICE_CA) + bo_exynos->cache_state.data.isDirtied = DEVICE_NONE; + } + } else { + if (bo_exynos->cache_state.data.isDirtied == DEVICE_CA && + bo_exynos->cache_state.data.isCached && + bo_exynos->cache_state.data.cntFlush == cntFlush) + need_flush = TBM_EXYNOS_CACHE_CLN | TBM_EXYNOS_CACHE_ALL; + + if (opt & TBM_OPTION_WRITE) + bo_exynos->cache_state.data.isDirtied = DEVICE_CO; + else { + if (bo_exynos->cache_state.data.isDirtied != DEVICE_CO) + bo_exynos->cache_state.data.isDirtied = DEVICE_NONE; + } + } + + if (need_flush) { + if (need_flush & TBM_EXYNOS_CACHE_ALL) + _tgl_set_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY, (unsigned int)(++cntFlush)); + + /* call cache flush */ + _exynos_cache_flush(bufmgr_exynos, bo_exynos, need_flush); + + TBM_DBG(" \tcache(%d,%d)....flush:0x%x, cntFlush(%d)\n", + bo_exynos->cache_state.data.isCached, + bo_exynos->cache_state.data.isDirtied, + need_flush, + cntFlush); + } + + return 1; +} + +static int +_bo_save_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos) +{ + /* check whether cache control do or not */ + if (!g_enable_cache_ctrl) + return 1; + + TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0); + TBM_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0); + + if (bufmgr_exynos->use_dma_fence) + return 1; + + unsigned short cntFlush = 0; + + /* get global cache flush count */ + cntFlush = (unsigned short)_tgl_get_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY); + + /* save global cache flush count */ + bo_exynos->cache_state.data.cntFlush = cntFlush; + _tgl_set_data(bufmgr_exynos->tgl_fd, bo_exynos->name, + bo_exynos->cache_state.val); + + return 1; +} + +static void +_bo_destroy_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos) +{ + /* check whether cache control do or not */ + if (!g_enable_cache_ctrl) + return; + + TBM_RETURN_IF_FAIL(bufmgr_exynos != NULL); + TBM_RETURN_IF_FAIL(bo_exynos != NULL); + + if (bufmgr_exynos->use_dma_fence) + return ; + + _tgl_destroy(bufmgr_exynos->tgl_fd, bo_exynos->name); +} + +static int +_bufmgr_init_cache_state(tbm_bufmgr_exynos bufmgr_exynos) +{ + /* check whether cache control do or not */ + if (!g_enable_cache_ctrl) + return 1; + + TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0); + + if (bufmgr_exynos->use_dma_fence) + return 1; + + /* open tgl fd for saving cache flush data */ + bufmgr_exynos->tgl_fd = open(tgl_devfile, O_RDWR); + + if (bufmgr_exynos->tgl_fd < 0) { + bufmgr_exynos->tgl_fd = open(tgl_devfile1, O_RDWR); + if (bufmgr_exynos->tgl_fd < 0) { + TBM_ERR("fail to open global_lock:%s\n", + tgl_devfile1); + return 0; + } + } + + if (!_tgl_init(bufmgr_exynos->tgl_fd, GLOBAL_KEY)) { + TBM_ERR("fail to initialize the tgl\n"); + close(bufmgr_exynos->tgl_fd); + return 0; + } + + return 1; +} + +static void +_bufmgr_deinit_cache_state(tbm_bufmgr_exynos bufmgr_exynos) +{ + /* check whether cache control do or not */ + if (!g_enable_cache_ctrl) + return; + + TBM_RETURN_IF_FAIL(bufmgr_exynos != NULL); + + if (bufmgr_exynos->use_dma_fence) + return; + + if (bufmgr_exynos->tgl_fd >= 0) + close(bufmgr_exynos->tgl_fd); +} + +static int +_tbm_exynos_open_drm() +{ + int fd = -1; + + fd = drmOpen(EXYNOS_DRM_NAME, NULL); + if (fd < 0) { + TBM_ERR("fail to open drm.(%s)\n", EXYNOS_DRM_NAME); + } + + if (fd < 0) { + fd = _get_render_node(1); + if (fd < 0) { + TBM_ERR("cannot find render_node\n"); + } + } + + return fd; +} + +static int +_get_render_node(int is_master) +{ + struct udev *udev = NULL; + struct udev_enumerate *e = NULL; + struct udev_list_entry *entry = NULL; + struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL; + const char *filepath; + struct stat s; + int fd = -1; + int ret; + + TBM_DBG("search drm-device by udev(is_master:%d)\n", is_master); + + udev = udev_new(); + if (!udev) { + TBM_ERR("udev_new() failed.\n"); + return -1; + } + + e = udev_enumerate_new(udev); + udev_enumerate_add_match_subsystem(e, "drm"); + if (is_master) + udev_enumerate_add_match_sysname(e, "card[0-9]*"); + else + udev_enumerate_add_match_sysname(e, "renderD[0-9]*"); + udev_enumerate_scan_devices(e); + + udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) { + device = udev_device_new_from_syspath(udev_enumerate_get_udev(e), + udev_list_entry_get_name(entry)); + device_parent = udev_device_get_parent(device); + /* Not need unref device_parent. device_parent and device have same refcnt */ + if (device_parent) { + if (strcmp(udev_device_get_sysname(device_parent), "exynos-drm") == 0) { + drm_device = device; + TBM_DBG("Found render device: '%s' (%s)\n", + udev_device_get_syspath(drm_device), + udev_device_get_sysname(device_parent)); + break; + } + } + udev_device_unref(device); + } + + udev_enumerate_unref(e); + + if (!drm_device) { + TBM_ERR("failed to find device\n"); + udev_unref(udev); + return -1; + } + + /* Get device file path. */ + filepath = udev_device_get_devnode(drm_device); + if (!filepath) { + TBM_ERR("udev_device_get_devnode() failed.\n"); + udev_device_unref(drm_device); + udev_unref(udev); + return -1; + } + + /* Open DRM device file and check validity. */ + fd = open(filepath, O_RDWR | O_CLOEXEC); + if (fd < 0) { + TBM_ERR("open(%s, O_RDWR | O_CLOEXEC) failed.\n"); + udev_device_unref(drm_device); + udev_unref(udev); + return -1; + } + + ret = fstat(fd, &s); + if (ret) { + TBM_ERR("fstat() failed %s.\n"); + udev_device_unref(drm_device); + udev_unref(udev); + close(fd); + return -1; + } + + udev_device_unref(drm_device); + udev_unref(udev); + + return fd; +} + +static unsigned int +_get_exynos_flag_from_tbm(unsigned int ftbm) +{ + unsigned int flags = 0; + + if (ftbm & TBM_BO_SCANOUT) + flags |= EXYNOS_BO_CONTIG; + else + flags |= EXYNOS_BO_NONCONTIG; + + if (ftbm & TBM_BO_WC) + flags |= EXYNOS_BO_WC; + else if (ftbm & TBM_BO_NONCACHABLE) + flags |= EXYNOS_BO_NONCACHABLE; + else + flags |= EXYNOS_BO_CACHABLE; + + return flags; +} + +static unsigned int +_get_tbm_flag_from_exynos(unsigned int fexynos) +{ + unsigned int flags = 0; + + if (fexynos & EXYNOS_BO_NONCONTIG) + flags |= TBM_BO_DEFAULT; + else + flags |= TBM_BO_SCANOUT; + + if (fexynos & EXYNOS_BO_WC) + flags |= TBM_BO_WC; + else if (fexynos & EXYNOS_BO_CACHABLE) + flags |= TBM_BO_DEFAULT; + else + flags |= TBM_BO_NONCACHABLE; + + return flags; +} + +static unsigned int +_get_name(int fd, unsigned int gem) +{ + struct drm_gem_flink arg = {0,}; + + arg.handle = gem; + if (drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &arg)) { + TBM_ERR("fail to DRM_IOCTL_GEM_FLINK gem:%d", gem); + return 0; + } + + return (unsigned int)arg.name; +} + +static tbm_bo_handle +_exynos_bo_handle(tbm_bo_exynos bo_exynos, int device) +{ + tbm_bo_handle bo_handle; + + memset(&bo_handle, 0x0, sizeof(uint64_t)); + + switch (device) { + case TBM_DEVICE_DEFAULT: + case TBM_DEVICE_2D: + bo_handle.u32 = (uint32_t)bo_exynos->gem; + break; + case TBM_DEVICE_CPU: + if (!bo_exynos->pBase) { + struct drm_exynos_gem_map arg = {0,}; + void *map = NULL; + + arg.handle = bo_exynos->gem; + if (drmCommandWriteRead(bo_exynos->fd, DRM_EXYNOS_GEM_MAP, &arg, + sizeof(arg))) { + TBM_ERR("Cannot map_exynos gem=%d\n", bo_exynos->gem); + return (tbm_bo_handle) NULL; + } + + map = mmap(NULL, bo_exynos->size, PROT_READ | PROT_WRITE, MAP_SHARED, + bo_exynos->fd, arg.offset); + if (map == MAP_FAILED) { + TBM_ERR("Cannot usrptr gem=%d\n", bo_exynos->gem); + return (tbm_bo_handle) NULL; + } + bo_exynos->pBase = map; + } + bo_handle.ptr = (void *)bo_exynos->pBase; + break; + case TBM_DEVICE_3D: + case TBM_DEVICE_MM: + if (!bo_exynos->dmabuf) { + struct drm_prime_handle arg = {0, }; + + arg.handle = bo_exynos->gem; + if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) { + TBM_ERR("Cannot dmabuf=%d\n", bo_exynos->gem); + return (tbm_bo_handle) NULL; + } + bo_exynos->dmabuf = arg.fd; + } + + bo_handle.u32 = (uint32_t)bo_exynos->dmabuf; + break; + default: + TBM_ERR("Not supported device:%d\n", device); + bo_handle.ptr = (void *) NULL; + break; + } + + return bo_handle; +} + +static int +_new_calc_plane_nv12(int width, int height) +{ + int mbX, mbY; + + mbX = DIV_ROUND_UP(width, S5P_FIMV_NUM_PIXELS_IN_MB_ROW); + mbY = DIV_ROUND_UP(height, S5P_FIMV_NUM_PIXELS_IN_MB_COL); + + if (width * height < S5P_FIMV_MAX_FRAME_SIZE) + mbY = (mbY + 1) / 2 * 2; + + return ((mbX * S5P_FIMV_NUM_PIXELS_IN_MB_COL) * (mbY * + S5P_FIMV_NUM_PIXELS_IN_MB_ROW)); +} + +static int +_calc_yplane_nv12(int width, int height) +{ + int mbX, mbY; + + mbX = SIZE_ALIGN(width + 24, S5P_FIMV_NV12MT_HALIGN); + mbY = SIZE_ALIGN(height + 16, S5P_FIMV_NV12MT_VALIGN); + + return SIZE_ALIGN(mbX * mbY, S5P_FIMV_DEC_BUF_ALIGN); +} + +static int +_calc_uvplane_nv12(int width, int height) +{ + int mbX, mbY; + + mbX = SIZE_ALIGN(width + 16, S5P_FIMV_NV12MT_HALIGN); + mbY = SIZE_ALIGN(height + 4, S5P_FIMV_NV12MT_VALIGN); + + return SIZE_ALIGN((mbX * mbY) >> 1, S5P_FIMV_DEC_BUF_ALIGN); +} + +static int +_new_calc_yplane_nv12(int width, int height) +{ + return SIZE_ALIGN(_new_calc_plane_nv12(width, + height) + S5P_FIMV_D_ALIGN_PLANE_SIZE, + TBM_SURFACE_ALIGNMENT_PLANE_NV12); +} + +static int +_new_calc_uvplane_nv12(int width, int height) +{ + return SIZE_ALIGN((_new_calc_plane_nv12(width, + height) >> 1) + S5P_FIMV_D_ALIGN_PLANE_SIZE, + TBM_SURFACE_ALIGNMENT_PLANE_NV12); +} + +static tbm_bufmgr_capability +tbm_exynos_bufmgr_get_capabilities(tbm_backend_bufmgr_data *bufmgr_data, tbm_error_e *error) +{ + tbm_bufmgr_capability capabilities = TBM_BUFMGR_CAPABILITY_NONE; + + capabilities = TBM_BUFMGR_CAPABILITY_SHARE_KEY|TBM_BUFMGR_CAPABILITY_SHARE_FD; + + if (error) + *error = TBM_ERROR_NONE; + + return capabilities; +} + +static tbm_error_e +tbm_exynos_bufmgr_bind_native_display(tbm_backend_bufmgr_data *bufmgr_data, tbm_native_display *native_display) +{ + tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)bufmgr_data; + TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, TBM_ERROR_INVALID_PARAMETER); + + if (!tbm_drm_helper_wl_auth_server_init(native_display, bufmgr_exynos->fd, + bufmgr_exynos->device_name, 0)) { + TBM_ERR("fail to tbm_drm_helper_wl_server_init\n"); + return TBM_ERROR_INVALID_OPERATION; + } + + bufmgr_exynos->bind_display = native_display; + + return TBM_ERROR_NONE; +} + +static tbm_error_e +tbm_exynos_bufmgr_get_supported_formats(tbm_backend_bufmgr_data *bufmgr_data, + uint32_t **formats, uint32_t *num) +{ + const static uint32_t tbm_exynos_color_format_list[TBM_COLOR_FORMAT_COUNT] = { + TBM_FORMAT_ARGB8888, + TBM_FORMAT_XRGB8888, + TBM_FORMAT_NV12, + TBM_FORMAT_YUV420 + }; + + tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)bufmgr_data; + uint32_t *color_formats; + + TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, TBM_ERROR_INVALID_PARAMETER); + + color_formats = (uint32_t *)calloc(1, sizeof(uint32_t) * TBM_COLOR_FORMAT_COUNT); + if (color_formats == NULL) + return TBM_ERROR_OUT_OF_MEMORY; + + memcpy(color_formats, tbm_exynos_color_format_list, sizeof(uint32_t)*TBM_COLOR_FORMAT_COUNT); + + *formats = color_formats; + *num = TBM_COLOR_FORMAT_COUNT; + + TBM_DBG("supported format count = %d\n", *num); + + return TBM_ERROR_NONE; +} + +static tbm_error_e +tbm_exynos_bufmgr_get_plane_data(tbm_backend_bufmgr_data *bufmgr_data, + tbm_format format, int plane_idx, int width, + int height, uint32_t *size, uint32_t *offset, + uint32_t *pitch, int *bo_idx) +{ + tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)bufmgr_data; + int bpp; + int _offset = 0; + int _pitch = 0; + int _size = 0; + int _bo_idx = 0; + + TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, TBM_ERROR_INVALID_PARAMETER); + + switch (format) { + /* 16 bpp RGB */ + case TBM_FORMAT_XRGB4444: + case TBM_FORMAT_XBGR4444: + case TBM_FORMAT_RGBX4444: + case TBM_FORMAT_BGRX4444: + case TBM_FORMAT_ARGB4444: + case TBM_FORMAT_ABGR4444: + case TBM_FORMAT_RGBA4444: + case TBM_FORMAT_BGRA4444: + case TBM_FORMAT_XRGB1555: + case TBM_FORMAT_XBGR1555: + case TBM_FORMAT_RGBX5551: + case TBM_FORMAT_BGRX5551: + case TBM_FORMAT_ARGB1555: + case TBM_FORMAT_ABGR1555: + case TBM_FORMAT_RGBA5551: + case TBM_FORMAT_BGRA5551: + case TBM_FORMAT_RGB565: + bpp = 16; + _offset = 0; + _pitch = SIZE_ALIGN((width * bpp) >> 3, g_tbm_surface_alignment_pitch_rgb); + _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane); + _bo_idx = 0; + break; + /* 24 bpp RGB */ + case TBM_FORMAT_RGB888: + case TBM_FORMAT_BGR888: + bpp = 24; + _offset = 0; + _pitch = SIZE_ALIGN((width * bpp) >> 3, g_tbm_surface_alignment_pitch_rgb); + _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane); + _bo_idx = 0; + break; + /* 32 bpp RGB */ + case TBM_FORMAT_XRGB8888: + case TBM_FORMAT_XBGR8888: + case TBM_FORMAT_RGBX8888: + case TBM_FORMAT_BGRX8888: + case TBM_FORMAT_ARGB8888: + case TBM_FORMAT_ABGR8888: + case TBM_FORMAT_RGBA8888: + case TBM_FORMAT_BGRA8888: + bpp = 32; + _offset = 0; + _pitch = SIZE_ALIGN((width * bpp) >> 3, g_tbm_surface_alignment_pitch_rgb); + _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane); + _bo_idx = 0; + break; + + /* packed YCbCr */ + case TBM_FORMAT_YUYV: + case TBM_FORMAT_YVYU: + case TBM_FORMAT_UYVY: + case TBM_FORMAT_VYUY: + case TBM_FORMAT_AYUV: + bpp = 32; + _offset = 0; + _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_YUV); + _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane); + _bo_idx = 0; + break; + + /* + * 2 plane YCbCr + * index 0 = Y plane, [7:0] Y + * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian + * or + * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian + */ + case TBM_FORMAT_NV12: + case TBM_FORMAT_NV21: + bpp = 12; + if (plane_idx == 0) { + _offset = 0; + _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV); + _size = MAX(_calc_yplane_nv12(width, height), + _new_calc_yplane_nv12(width, height)); + _bo_idx = 0; + } else if (plane_idx == 1) { + _offset = 0; + _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV); + _size = MAX(_calc_uvplane_nv12(width, height), + _new_calc_uvplane_nv12(width, height)); + _bo_idx = 1; + } + break; + case TBM_FORMAT_NV16: + case TBM_FORMAT_NV61: + bpp = 16; + /*if(plane_idx == 0)*/ + { + _offset = 0; + _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV); + _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane); + _bo_idx = 0; + if (plane_idx == 0) + break; + } + /*else if( plane_idx ==1 )*/ + { + _offset += _size; + _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV); + _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane); + _bo_idx = 0; + } + break; + + /* + * 3 plane YCbCr + * index 0: Y plane, [7:0] Y + * index 1: Cb plane, [7:0] Cb + * index 2: Cr plane, [7:0] Cr + * or + * index 1: Cr plane, [7:0] Cr + * index 2: Cb plane, [7:0] Cb + */ + + /* + * NATIVE_BUFFER_FORMAT_YV12 + * NATIVE_BUFFER_FORMAT_I420 + */ + case TBM_FORMAT_YUV410: + case TBM_FORMAT_YVU410: + bpp = 9; + /*if(plane_idx == 0)*/ + { + _offset = 0; + _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV); + _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane); + _bo_idx = 0; + if (plane_idx == 0) + break; + } + /*else if(plane_idx == 1)*/ + { + _offset += _size; + _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4); + _size = SIZE_ALIGN(_pitch * (height / 4), g_tbm_surface_alignment_plane); + _bo_idx = 0; + if (plane_idx == 1) + break; + } + /*else if (plane_idx == 2)*/ + { + _offset += _size; + _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4); + _size = SIZE_ALIGN(_pitch * (height / 4), g_tbm_surface_alignment_plane); + _bo_idx = 0; + } + break; + case TBM_FORMAT_YUV411: + case TBM_FORMAT_YVU411: + case TBM_FORMAT_YUV420: + case TBM_FORMAT_YVU420: + bpp = 12; + /*if(plane_idx == 0)*/ + { + _offset = 0; + _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV); + _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane); + _bo_idx = 0; + if (plane_idx == 0) + break; + } + /*else if(plane_idx == 1)*/ + { + _offset += _size; + _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2); + _size = SIZE_ALIGN(_pitch * (height / 2), g_tbm_surface_alignment_plane); + _bo_idx = 0; + if (plane_idx == 1) + break; + } + /*else if (plane_idx == 2)*/ + { + _offset += _size; + _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2); + _size = SIZE_ALIGN(_pitch * (height / 2), g_tbm_surface_alignment_plane); + _bo_idx = 0; + } + break; + case TBM_FORMAT_YUV422: + case TBM_FORMAT_YVU422: + bpp = 16; + /*if(plane_idx == 0)*/ + { + _offset = 0; + _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV); + _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane); + _bo_idx = 0; + if (plane_idx == 0) + break; + } + /*else if(plane_idx == 1)*/ + { + _offset += _size; + _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2); + _size = SIZE_ALIGN(_pitch * (height), g_tbm_surface_alignment_plane); + _bo_idx = 0; + if (plane_idx == 1) + break; + } + /*else if (plane_idx == 2)*/ + { + _offset += _size; + _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2); + _size = SIZE_ALIGN(_pitch * (height), g_tbm_surface_alignment_plane); + _bo_idx = 0; + } + break; + case TBM_FORMAT_YUV444: + case TBM_FORMAT_YVU444: + bpp = 24; + /*if(plane_idx == 0)*/ + { + _offset = 0; + _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV); + _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane); + _bo_idx = 0; + if (plane_idx == 0) + break; + } + /*else if(plane_idx == 1)*/ + { + _offset += _size; + _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV); + _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane); + _bo_idx = 0; + if (plane_idx == 1) + break; + } + /*else if (plane_idx == 2)*/ + { + _offset += _size; + _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV); + _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane); + _bo_idx = 0; + } + break; + default: + bpp = 0; + break; + } + + *size = _size; + *offset = _offset; + *pitch = _pitch; + *bo_idx = _bo_idx; + + return TBM_ERROR_NONE; +} + +static tbm_backend_bo_data * +tbm_exynos_bufmgr_alloc_bo(tbm_backend_bufmgr_data *bufmgr_data, unsigned int size, + tbm_bo_memory_type flags, tbm_error_e *error) +{ + tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)bufmgr_data; + tbm_bo_exynos bo_exynos; + unsigned int exynos_flags; + + if (bufmgr_exynos == NULL) { + TBM_ERR("bufmgr_data is null\n"); + if (error) + *error = TBM_ERROR_INVALID_PARAMETER; + return NULL; + } + + bo_exynos = calloc(1, sizeof(struct _tbm_bo_exynos)); + if (!bo_exynos) { + TBM_ERR("fail to allocate the bo_exynos private\n"); + if (error) + *error = TBM_ERROR_OUT_OF_MEMORY; + return NULL; + } + bo_exynos->bufmgr_exynos = bufmgr_exynos; + + exynos_flags = _get_exynos_flag_from_tbm(flags); + if ((flags & TBM_BO_SCANOUT) && + size <= 4 * 1024) { + exynos_flags |= EXYNOS_BO_NONCONTIG; + } + + struct drm_exynos_gem_create arg = {0, }; + + arg.size = (uint64_t)size; + arg.flags = exynos_flags; + if (drmCommandWriteRead(bufmgr_exynos->fd, DRM_EXYNOS_GEM_CREATE, &arg, + sizeof(arg))) { + TBM_ERR("Cannot create bo_exynos(flag:%x, size:%d)\n", arg.flags, + (unsigned int)arg.size); + free(bo_exynos); + if (error) + *error = TBM_ERROR_INVALID_OPERATION; + return NULL; + } + + bo_exynos->fd = bufmgr_exynos->fd; + bo_exynos->gem = arg.handle; + bo_exynos->size = size; + bo_exynos->flags_tbm = flags; + bo_exynos->flags_exynos = exynos_flags; + bo_exynos->name = _get_name(bo_exynos->fd, bo_exynos->gem); + + if (!_bo_init_cache_state(bufmgr_exynos, bo_exynos, 0)) { + TBM_ERR("fail init cache state(%d)\n", bo_exynos->name); + free(bo_exynos); + if (error) + *error = TBM_ERROR_INVALID_OPERATION; + return NULL; + } + + pthread_mutex_init(&bo_exynos->mutex, NULL); + + if (bufmgr_exynos->use_dma_fence && !bo_exynos->dmabuf) { + struct drm_prime_handle arg = {0, }; + + arg.handle = bo_exynos->gem; + if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) { + TBM_ERR("Cannot dmabuf=%d\n", bo_exynos->gem); + free(bo_exynos); + if (error) + *error = TBM_ERROR_INVALID_OPERATION; + return NULL; + } + bo_exynos->dmabuf = arg.fd; + } + + /* add bo_exynos to hash */ + if (drmHashInsert(bufmgr_exynos->hashBos, bo_exynos->name, (void *)bo_exynos) < 0) + TBM_ERR("Cannot insert bo_exynos to Hash(%d)\n", bo_exynos->name); + + TBM_DBG(" bo_exynos:%p, gem:%d(%d), flags:%d(%d), size:%d\n", + bo_exynos, + bo_exynos->gem, bo_exynos->name, + flags, exynos_flags, + bo_exynos->size); + + if (error) + *error = TBM_ERROR_NONE; + + return (tbm_backend_bo_data *)bo_exynos; +} + +static tbm_backend_bo_data * +tbm_exynos_bufmgr_import_fd(tbm_backend_bufmgr_data *bufmgr_data, tbm_fd key, tbm_error_e *error) +{ + tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)bufmgr_data; + tbm_bo_exynos bo_exynos; + unsigned int gem = 0; + unsigned int name; + int ret; + char buf[STRERR_BUFSIZE]; + + if (bufmgr_exynos == NULL) { + TBM_ERR("bufmgr_data is null\n"); + if (error) + *error = TBM_ERROR_INVALID_PARAMETER; + return NULL; + } + + /*getting handle from fd*/ + struct drm_prime_handle arg = {0, }; + + arg.fd = key; + arg.flags = 0; + if (drmIoctl(bufmgr_exynos->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &arg)) { + TBM_ERR("Cannot get gem handle from fd:%d (%s)\n", + arg.fd, strerror_r(errno, buf, STRERR_BUFSIZE)); + if (error) + *error = TBM_ERROR_INVALID_OPERATION; + return NULL; + } + gem = arg.handle; + + name = _get_name(bufmgr_exynos->fd, gem); + if (!name) { + TBM_ERR("Cannot get name from gem:%d, fd:%d (%s)\n", + gem, key, strerror_r(errno, buf, STRERR_BUFSIZE)); + if (error) + *error = TBM_ERROR_INVALID_OPERATION; + return NULL; + } + + ret = drmHashLookup(bufmgr_exynos->hashBos, name, (void **)&bo_exynos); + if (ret == 0) { + if (gem == bo_exynos->gem) { + if (error) + *error = TBM_ERROR_NONE; + return bo_exynos; + } + } + + /* Determine size of bo_exynos. The fd-to-handle ioctl really should + * return the size, but it doesn't. If we have kernel 3.12 or + * later, we can lseek on the prime fd to get the size. Older + * kernels will just fail, in which case we fall back to the + * provided (estimated or guess size). + */ + unsigned int real_size = -1; + struct drm_exynos_gem_info info = {0, }; + + real_size = lseek(key, 0, SEEK_END); + + info.handle = gem; + if (drmCommandWriteRead(bufmgr_exynos->fd, + DRM_EXYNOS_GEM_GET, + &info, + sizeof(struct drm_exynos_gem_info))) { + TBM_ERR("Cannot get gem info from gem:%d, fd:%d (%s)\n", + gem, key, strerror_r(errno, buf, STRERR_BUFSIZE)); + if (error) + *error = TBM_ERROR_INVALID_OPERATION; + return NULL; + } + + if (real_size == -1) + real_size = info.size; + + bo_exynos = calloc(1, sizeof(struct _tbm_bo_exynos)); + if (!bo_exynos) { + TBM_ERR("bo_exynos:%p fail to allocate the bo_exynos\n", bo_exynos); + if (error) + *error = TBM_ERROR_OUT_OF_MEMORY; + return NULL; + } + bo_exynos->bufmgr_exynos = bufmgr_exynos; + + bo_exynos->fd = bufmgr_exynos->fd; + bo_exynos->gem = gem; + bo_exynos->size = real_size; + bo_exynos->flags_exynos = info.flags; + bo_exynos->flags_tbm = _get_tbm_flag_from_exynos(bo_exynos->flags_exynos); + bo_exynos->name = name; + + if (!_bo_init_cache_state(bufmgr_exynos, bo_exynos, 1)) { + TBM_ERR("fail init cache state(%d)\n", bo_exynos->name); + free(bo_exynos); + if (error) + *error = TBM_ERROR_INVALID_OPERATION; + return NULL; + } + + /* add bo_exynos to hash */ + if (drmHashInsert(bufmgr_exynos->hashBos, bo_exynos->name, (void *)bo_exynos) < 0) + TBM_ERR("bo_exynos:%p Cannot insert bo_exynos to Hash(%d) from gem:%d, fd:%d\n", + bo_exynos, bo_exynos->name, gem, key); + + TBM_DBG(" bo_exynos:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d(%d), size:%d\n", + bo_exynos, + bo_exynos->gem, bo_exynos->name, + bo_exynos->dmabuf, + key, + bo_exynos->flags_tbm, bo_exynos->flags_exynos, + bo_exynos->size); + + if (error) + *error = TBM_ERROR_NONE; + + return (tbm_backend_bo_data *)bo_exynos; +} + +static tbm_backend_bo_data * +tbm_exynos_bufmgr_import_key(tbm_backend_bufmgr_data *bufmgr_data, tbm_key key, tbm_error_e *error) +{ + tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)bufmgr_data; + tbm_bo_exynos bo_exynos; + int ret; + + if (bufmgr_exynos == NULL) { + TBM_ERR("bufmgr_data is null\n"); + if (error) + *error = TBM_ERROR_INVALID_PARAMETER; + return NULL; + } + + ret = drmHashLookup(bufmgr_exynos->hashBos, key, (void **)&bo_exynos); + if (ret == 0) { + if (error) + *error = TBM_ERROR_NONE; + return (tbm_backend_bo_data *)bo_exynos; + } + + struct drm_gem_open arg = {0, }; + struct drm_exynos_gem_info info = {0, }; + + arg.name = key; + if (drmIoctl(bufmgr_exynos->fd, DRM_IOCTL_GEM_OPEN, &arg)) { + TBM_ERR("Cannot open gem name=%d\n", key); + if (error) + *error = TBM_ERROR_INVALID_OPERATION; + return NULL; + } + + info.handle = arg.handle; + if (drmCommandWriteRead(bufmgr_exynos->fd, + DRM_EXYNOS_GEM_GET, + &info, + sizeof(struct drm_exynos_gem_info))) { + TBM_ERR("Cannot get gem info=%d\n", key); + if (error) + *error = TBM_ERROR_INVALID_OPERATION; + return NULL; + } + + bo_exynos = calloc(1, sizeof(struct _tbm_bo_exynos)); + if (!bo_exynos) { + TBM_ERR("fail to allocate the bo_exynos private\n"); + if (error) + *error = TBM_ERROR_OUT_OF_MEMORY; + return NULL; + } + bo_exynos->bufmgr_exynos = bufmgr_exynos; + + bo_exynos->fd = bufmgr_exynos->fd; + bo_exynos->gem = arg.handle; + bo_exynos->size = arg.size; + bo_exynos->flags_exynos = info.flags; + bo_exynos->name = key; + bo_exynos->flags_tbm = _get_tbm_flag_from_exynos(bo_exynos->flags_exynos); + + if (!_bo_init_cache_state(bufmgr_exynos, bo_exynos, 1)) { + TBM_ERR("fail init cache state(%d)\n", bo_exynos->name); + free(bo_exynos); + if (error) + *error = TBM_ERROR_INVALID_OPERATION; + return NULL; + } + + if (!bo_exynos->dmabuf) { + struct drm_prime_handle arg = {0, }; + + arg.handle = bo_exynos->gem; + if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) { + TBM_ERR("fail to DRM_IOCTL_PRIME_HANDLE_TO_FD gem=%d\n", bo_exynos->gem); + if (error) + *error = TBM_ERROR_INVALID_OPERATION; + free(bo_exynos); + return NULL; + } + bo_exynos->dmabuf = arg.fd; + } + + /* add bo_exynos to hash */ + if (drmHashInsert(bufmgr_exynos->hashBos, bo_exynos->name, (void *)bo_exynos) < 0) + TBM_ERR("Cannot insert bo_exynos to Hash(%d)\n", bo_exynos->name); + + TBM_DBG(" bo_exynos:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d\n", + bo_exynos, + bo_exynos->gem, bo_exynos->name, + bo_exynos->dmabuf, + bo_exynos->flags_tbm, bo_exynos->flags_exynos, + bo_exynos->size); + + if (error) + *error = TBM_ERROR_NONE; + + return (tbm_backend_bo_data *)bo_exynos; +} + +static void +tbm_exynos_bo_free(tbm_backend_bo_data *bo_data) +{ + tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data; + tbm_bo_exynos temp; + tbm_bufmgr_exynos bufmgr_exynos; + char buf[STRERR_BUFSIZE]; + int ret; + + if (!bo_data) + return; + + bufmgr_exynos = bo_exynos->bufmgr_exynos; + if (!bufmgr_exynos) + return; + + TBM_DBG(" bo_exynos:%p, gem:%d(%d), fd:%d, size:%d\n", + bo_exynos, + bo_exynos->gem, bo_exynos->name, + bo_exynos->dmabuf, + bo_exynos->size); + + if (bo_exynos->pBase) { + if (munmap(bo_exynos->pBase, bo_exynos->size) == -1) { + TBM_ERR("bo_exynos:%p fail to munmap(%s)\n", + bo_exynos, strerror_r(errno, buf, STRERR_BUFSIZE)); + } + } + + /* close dmabuf */ + if (bo_exynos->dmabuf) { + close(bo_exynos->dmabuf); + bo_exynos->dmabuf = 0; + } + + /* delete bo_exynos from hash */ + ret = drmHashLookup(bufmgr_exynos->hashBos, bo_exynos->name, (void **)&temp); + if (ret == 0) + drmHashDelete(bufmgr_exynos->hashBos, bo_exynos->name); + else + TBM_ERR("Cannot find bo_exynos to Hash(%d), ret=%d\n", bo_exynos->name, ret); + + if (temp != bo_exynos) + TBM_ERR("hashBos probably has several BOs with same name!!!\n"); + + _bo_destroy_cache_state(bufmgr_exynos, bo_exynos); + + /* Free gem handle */ + struct drm_gem_close arg = {0, }; + + memset(&arg, 0, sizeof(arg)); + arg.handle = bo_exynos->gem; + if (drmIoctl(bo_exynos->fd, DRM_IOCTL_GEM_CLOSE, &arg)) + TBM_ERR("bo_exynos:%p fail to gem close.(%s)\n", + bo_exynos, strerror_r(errno, buf, STRERR_BUFSIZE)); + + free(bo_exynos); +} + +static int +tbm_exynos_bo_get_size(tbm_backend_bo_data *bo_data, tbm_error_e *error) +{ + tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data; + + if (!bo_exynos) { + if (error) + *error = TBM_ERROR_INVALID_PARAMETER; + return 0; + } + + if (error) + *error = TBM_ERROR_NONE; + + return bo_exynos->size; +} + +static tbm_bo_memory_type +tbm_exynos_bo_get_memory_type(tbm_backend_bo_data *bo_data, tbm_error_e *error) +{ + tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data; + + if (!bo_exynos) { + if (error) + *error = TBM_ERROR_INVALID_PARAMETER; + return TBM_BO_DEFAULT; + } + + if (error) + *error = TBM_ERROR_NONE; + + return bo_exynos->flags_tbm; +} + +static tbm_bo_handle +tbm_exynos_bo_get_handle(tbm_backend_bo_data *bo_data, tbm_bo_device_type device, tbm_error_e *error) +{ + tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data; + tbm_bo_handle bo_handle; + + if (!bo_exynos) { + if (error) + *error = TBM_ERROR_INVALID_PARAMETER; + return (tbm_bo_handle) NULL; + } + + if (!bo_exynos->gem) { + TBM_ERR("Cannot map gem=%d\n", bo_exynos->gem); + if (error) + *error = TBM_ERROR_INVALID_PARAMETER; + return (tbm_bo_handle) NULL; + } + + TBM_DBG("bo_exynos:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d, %s\n", + bo_exynos, + bo_exynos->gem, bo_exynos->name, + bo_exynos->dmabuf, + bo_exynos->flags_tbm, bo_exynos->flags_exynos, + bo_exynos->size, + STR_DEVICE[device]); + + /*Get mapped bo_handle*/ + bo_handle = _exynos_bo_handle(bo_exynos, device); + if (bo_handle.ptr == NULL) { + TBM_ERR("Cannot get handle: gem:%d, device:%d\n", + bo_exynos->gem, device); + if (error) + *error = TBM_ERROR_INVALID_OPERATION; + return (tbm_bo_handle) NULL; + } + + if (error) + *error = TBM_ERROR_NONE; + + return bo_handle; +} + +static tbm_bo_handle +tbm_exynos_bo_map(tbm_backend_bo_data *bo_data, tbm_bo_device_type device, + tbm_bo_access_option opt, tbm_error_e *error) +{ + tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data; + tbm_bo_handle bo_handle; + tbm_bufmgr_exynos bufmgr_exynos; + + if (!bo_exynos) { + if (error) + *error = TBM_ERROR_INVALID_PARAMETER; + return (tbm_bo_handle) NULL; + } + + bufmgr_exynos = bo_exynos->bufmgr_exynos; + if (!bufmgr_exynos) { + if (error) + *error = TBM_ERROR_INVALID_PARAMETER; + return (tbm_bo_handle) NULL; + } + + if (!bo_exynos->gem) { + TBM_ERR("Cannot map gem=%d\n", bo_exynos->gem); + if (error) + *error = TBM_ERROR_INVALID_PARAMETER; + return (tbm_bo_handle) NULL; + } + + TBM_DBG(" bo_exynos:%p, gem:%d(%d), fd:%d, %s, %s\n", + bo_exynos, + bo_exynos->gem, bo_exynos->name, + bo_exynos->dmabuf, + STR_DEVICE[device], + STR_OPT[opt]); + + /*Get mapped bo_handle*/ + bo_handle = _exynos_bo_handle(bo_exynos, device); + if (bo_handle.ptr == NULL) { + TBM_ERR("Cannot get handle: gem:%d, device:%d, opt:%d\n", + bo_exynos->gem, device, opt); + if (error) + *error = TBM_ERROR_INVALID_OPERATION; + return (tbm_bo_handle) NULL; + } + + if (bo_exynos->map_cnt == 0) + _bo_set_cache_state(bufmgr_exynos, bo_exynos, device, opt); + + bo_exynos->last_map_device = device; + + bo_exynos->map_cnt++; + + if (error) + *error = TBM_ERROR_NONE; + + return bo_handle; +} + +static tbm_error_e +tbm_exynos_bo_unmap(tbm_backend_bo_data *bo_data) +{ + tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data; + tbm_bufmgr_exynos bufmgr_exynos; + + if (!bo_exynos) + return TBM_ERROR_INVALID_PARAMETER; + + bufmgr_exynos = bo_exynos->bufmgr_exynos; + if (!bufmgr_exynos) + return TBM_ERROR_INVALID_PARAMETER; + + if (!bo_exynos->gem) + return TBM_ERROR_INVALID_PARAMETER; + + bo_exynos->map_cnt--; + + if (bo_exynos->map_cnt == 0) + _bo_save_cache_state(bufmgr_exynos, bo_exynos); + + /* check whether cache control do or not */ + if (g_enable_cache_ctrl && bo_exynos->last_map_device == TBM_DEVICE_CPU) + _exynos_cache_flush(bufmgr_exynos, bo_exynos, TBM_EXYNOS_CACHE_FLUSH_ALL); + + bo_exynos->last_map_device = -1; + + TBM_DBG(" bo_exynos:%p, gem:%d(%d), fd:%d\n", + bo_exynos, + bo_exynos->gem, bo_exynos->name, + bo_exynos->dmabuf); + + return TBM_ERROR_NONE; +} + +static tbm_error_e +tbm_exynos_bo_lock(tbm_backend_bo_data *bo_data, tbm_bo_device_type device, + tbm_bo_access_option opt) +{ +#ifndef ALWAYS_BACKEND_CTRL + tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data; + tbm_bufmgr_exynos bufmgr_exynos; + struct dma_buf_fence fence; + struct flock filelock; + int ret = 0; + char buf[STRERR_BUFSIZE]; + + if (!bo_exynos) + return TBM_ERROR_INVALID_PARAMETER; + + bufmgr_exynos = bo_exynos->bufmgr_exynos; + if (!bufmgr_exynos) + return TBM_ERROR_INVALID_PARAMETER; + + if (device != TBM_DEVICE_3D && device != TBM_DEVICE_CPU) { + TBM_DBG("Not support device type,\n"); + return TBM_ERROR_INVALID_OPERATION; + } + + memset(&fence, 0, sizeof(struct dma_buf_fence)); + + /* Check if the given type is valid or not. */ + if (opt & TBM_OPTION_WRITE) { + if (device == TBM_DEVICE_3D) + fence.type = DMA_BUF_ACCESS_WRITE | DMA_BUF_ACCESS_DMA; + } else if (opt & TBM_OPTION_READ) { + if (device == TBM_DEVICE_3D) + fence.type = DMA_BUF_ACCESS_READ | DMA_BUF_ACCESS_DMA; + } else { + TBM_ERR("Invalid argument\n"); + return TBM_ERROR_INVALID_PARAMETER; + } + + /* Check if the tbm manager supports dma fence or not. */ + if (!bufmgr_exynos->use_dma_fence) { + TBM_ERR("Not support DMA FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE)); + return TBM_ERROR_INVALID_OPERATION; + + } + + if (device == TBM_DEVICE_3D) { + ret = ioctl(bo_exynos->dmabuf, DMABUF_IOCTL_GET_FENCE, &fence); + if (ret < 0) { + TBM_ERR("Cannot set GET FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE)); + return TBM_ERROR_INVALID_OPERATION; + } + } else { + if (opt & TBM_OPTION_WRITE) + filelock.l_type = F_WRLCK; + else + filelock.l_type = F_RDLCK; + + filelock.l_whence = SEEK_CUR; + filelock.l_start = 0; + filelock.l_len = 0; + + if (-1 == fcntl(bo_exynos->dmabuf, F_SETLKW, &filelock)) + return TBM_ERROR_INVALID_OPERATION; + } + + pthread_mutex_lock(&bo_exynos->mutex); + + if (device == TBM_DEVICE_3D) { + int i; + + for (i = 0; i < DMA_FENCE_LIST_MAX; i++) { + if (bo_exynos->dma_fence[i].ctx == 0) { + bo_exynos->dma_fence[i].type = fence.type; + bo_exynos->dma_fence[i].ctx = fence.ctx; + break; + } + } + + if (i == DMA_FENCE_LIST_MAX) { + /*TODO: if dma_fence list is full, it needs realloc. I will fix this. by minseok3.kim*/ + TBM_ERR("fence list is full\n"); + } + } + + pthread_mutex_unlock(&bo_exynos->mutex); + + TBM_DBG("DMABUF_IOCTL_GET_FENCE! bo_exynos:%p, gem:%d(%d), fd:%ds\n", + bo_exynos, + bo_exynos->gem, bo_exynos->name, + bo_exynos->dmabuf); +#endif /* ALWAYS_BACKEND_CTRL */ + + return TBM_ERROR_NONE; +} + +static tbm_error_e +tbm_exynos_bo_unlock(tbm_backend_bo_data *bo_data) +{ +#ifndef ALWAYS_BACKEND_CTRL + tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data; + struct dma_buf_fence fence; + struct flock filelock; + unsigned int dma_type = 0; + int ret = 0; + char buf[STRERR_BUFSIZE]; + + bufmgr_exynos = bo_exynos->bufmgr_exynos; + if (!bufmgr_exynos) + return TBM_ERROR_INVALID_PARAMETER; + + if (bo_exynos->dma_fence[0].type & DMA_BUF_ACCESS_DMA) + dma_type = 1; + + if (!bo_exynos->dma_fence[0].ctx && dma_type) { + TBM_DBG("FENCE not support or ignored,\n"); + return TBM_ERROR_INVALID_OPERATION; + } + + if (!bo_exynos->dma_fence[0].ctx && dma_type) { + TBM_DBG("device type is not 3D/CPU,\n"); + return TBM_ERROR_INVALID_OPERATION; + } + + pthread_mutex_lock(&bo_exynos->mutex); + + if (dma_type) { + fence.type = bo_exynos->dma_fence[0].type; + fence.ctx = bo_exynos->dma_fence[0].ctx; + int i; + + for (i = 1; i < DMA_FENCE_LIST_MAX; i++) { + bo_exynos->dma_fence[i - 1].type = bo_exynos->dma_fence[i].type; + bo_exynos->dma_fence[i - 1].ctx = bo_exynos->dma_fence[i].ctx; + } + bo_exynos->dma_fence[DMA_FENCE_LIST_MAX - 1].type = 0; + bo_exynos->dma_fence[DMA_FENCE_LIST_MAX - 1].ctx = 0; + } + pthread_mutex_unlock(&bo_exynos->mutex); + + if (dma_type) { + ret = ioctl(bo_exynos->dmabuf, DMABUF_IOCTL_PUT_FENCE, &fence); + if (ret < 0) { + TBM_ERR("Can not set PUT FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE)); + return TBM_ERROR_INVALID_OPERATION; + } + } else { + filelock.l_type = F_UNLCK; + filelock.l_whence = SEEK_CUR; + filelock.l_start = 0; + filelock.l_len = 0; + + if (-1 == fcntl(bo_exynos->dmabuf, F_SETLKW, &filelock)) + return TBM_ERROR_INVALID_OPERATION; + } + + TBM_DBG("DMABUF_IOCTL_PUT_FENCE! bo_exynos:%p, gem:%d(%d), fd:%ds\n", + bo_exynos, + bo_exynos->gem, bo_exynos->name, + bo_exynos->dmabuf); +#endif /* ALWAYS_BACKEND_CTRL */ + + return TBM_ERROR_NONE; +} +static tbm_fd +tbm_exynos_bo_export_fd(tbm_backend_bo_data *bo_data, tbm_error_e *error) +{ + tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data; + int ret; + char buf[STRERR_BUFSIZE]; + + if (!bo_exynos) { + if (error) + *error = TBM_ERROR_INVALID_PARAMETER; + return -1; + } + + struct drm_prime_handle arg = {0, }; + + arg.handle = bo_exynos->gem; + ret = drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg); + if (ret) { + TBM_ERR("bo_exynos:%p Cannot dmabuf=%d (%s)\n", + bo_exynos, bo_exynos->gem, strerror_r(errno, buf, STRERR_BUFSIZE)); + if (error) + *error = TBM_ERROR_INVALID_OPERATION; + return (tbm_fd) ret; + } + + TBM_DBG(" bo_exynos:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d(%d), size:%d\n", + bo_exynos, + bo_exynos->gem, bo_exynos->name, + bo_exynos->dmabuf, + arg.fd, + bo_exynos->flags_tbm, bo_exynos->flags_exynos, + bo_exynos->size); + + if (error) + *error = TBM_ERROR_NONE; + + return (tbm_fd)arg.fd; +} + +static tbm_key +tbm_exynos_bo_export_key(tbm_backend_bo_data *bo_data, tbm_error_e *error) +{ + tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data; + + if (!bo_exynos) { + if (error) + *error = TBM_ERROR_INVALID_PARAMETER; + return 0; + } + + if (!bo_exynos->name) { + bo_exynos->name = _get_name(bo_exynos->fd, bo_exynos->gem); + if (!bo_exynos->name) { + TBM_ERR("error Cannot get name\n"); + if (error) + *error = TBM_ERROR_INVALID_PARAMETER; + return 0; + } + } + + TBM_DBG(" bo_exynos:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d\n", + bo_exynos, + bo_exynos->gem, bo_exynos->name, + bo_exynos->dmabuf, + bo_exynos->flags_tbm, bo_exynos->flags_exynos, + bo_exynos->size); + + if (error) + *error = TBM_ERROR_NONE; + + return (tbm_key)bo_exynos->name; +} + +static void +tbm_exynos_deinit(tbm_backend_bufmgr_data *bufmgr_data) +{ + tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)bufmgr_data; + tbm_bufmgr bufmgr; + tbm_error_e error; + unsigned long key; + void *value; + + TBM_RETURN_IF_FAIL(bufmgr_exynos != NULL); + + bufmgr = bufmgr_exynos->bufmgr; + + tbm_backend_bufmgr_free_bufmgr_func(bufmgr, bufmgr_exynos->bufmgr_func); + tbm_backend_bufmgr_free_bo_func(bufmgr, bufmgr_exynos->bo_func); + + if (bufmgr_exynos->hashBos) { + while (drmHashFirst(bufmgr_exynos->hashBos, &key, &value) > 0) { + free(value); + drmHashDelete(bufmgr_exynos->hashBos, key); + } + + drmHashDestroy(bufmgr_exynos->hashBos); + bufmgr_exynos->hashBos = NULL; + } + + _bufmgr_deinit_cache_state(bufmgr_exynos); + + if (bufmgr_exynos->bind_display) + tbm_drm_helper_wl_auth_server_deinit(); + + if (bufmgr_exynos->device_name) + free(bufmgr_exynos->device_name); + + if (tbm_backend_bufmgr_query_display_server(bufmgr, &error)) + tbm_drm_helper_unset_tbm_master_fd(); + else + tbm_drm_helper_unset_fd(); + + close(bufmgr_exynos->fd); + + free(bufmgr_exynos); +} + +static tbm_backend_bufmgr_data * +tbm_exynos_init(tbm_bufmgr bufmgr, tbm_error_e *error) +{ + tbm_bufmgr_exynos bufmgr_exynos = NULL; + tbm_backend_bufmgr_func *bufmgr_func = NULL; + tbm_backend_bo_func *bo_func = NULL; + int fp; + tbm_error_e err; + int set_master = 0; + char *value = NULL; + + if (!bufmgr) { + TBM_ERR("bufmgr is null.\n"); + if (error) + *error = TBM_ERROR_INVALID_PARAMETER; + return NULL; + } + + bufmgr_exynos = calloc(1, sizeof(struct _tbm_bufmgr_exynos)); + if (!bufmgr_exynos) { + TBM_ERR("fail to alloc bufmgr_exynos!\n"); + if (error) + *error = TBM_ERROR_OUT_OF_MEMORY; + return NULL; + } + + /* check the master_fd which already had opened */ + bufmgr_exynos->fd = tbm_drm_helper_get_master_fd(); + if (bufmgr_exynos->fd < 0) { + bufmgr_exynos->fd = _tbm_exynos_open_drm(); + if (bufmgr_exynos->fd < 0) { + TBM_ERR("fail to open drm!\n"); + if (error) + *error = TBM_ERROR_INVALID_OPERATION; + goto fail_open_drm; + } + + if (drmIsMaster(bufmgr_exynos->fd)) { + tbm_drm_helper_set_tbm_master_fd(bufmgr_exynos->fd); + set_master = 1; + + bufmgr_exynos->device_name = drmGetDeviceNameFromFd(bufmgr_exynos->fd); + if (!bufmgr_exynos->device_name) { + TBM_ERR("fail to get device name!\n"); + tbm_drm_helper_unset_tbm_master_fd(); + if (error) + *error = TBM_ERROR_INVALID_OPERATION; + goto fail_get_device_name; + } + TBM_INFO("This is Master FD(%d) from open_drm.", bufmgr_exynos->fd); + } else { + /* close the fd and get the authenticated fd from the master fd */ + close(bufmgr_exynos->fd); +#ifdef USE_RENDER_NODE + bufmgr_exynos->fd = _get_render_node(0); +#else + bufmgr_exynos->fd = -1; +#endif + /* get the authenticated drm fd from the master fd */ + if (!tbm_drm_helper_get_auth_info(&(bufmgr_exynos->fd), &(bufmgr_exynos->device_name), NULL)) { + TBM_ERR("fail to get auth drm info!\n"); + if (error) + *error = TBM_ERROR_INVALID_OPERATION; + goto fail_get_auth_info; + } + TBM_INFO("This is Authenticated FD(%d)", bufmgr_exynos->fd); + } + } else { + bufmgr_exynos->device_name = drmGetDeviceNameFromFd(bufmgr_exynos->fd); + if (!bufmgr_exynos->device_name) { + TBM_ERR("fail to get device name!\n"); + tbm_drm_helper_unset_tbm_master_fd(); + if (error) + *error = TBM_ERROR_INVALID_OPERATION; + goto fail_get_device_name; + } + TBM_INFO("This is Master FD from tbm_drm_helper_get_master_fd(%d)", bufmgr_exynos->fd); + } + tbm_drm_helper_set_fd(bufmgr_exynos->fd); + + //Check if the tbm manager supports dma fence or not. + fp = open("/sys/module/dmabuf_sync/parameters/enabled", O_RDONLY); + if (fp != -1) { + char buf[1]; + int length = read(fp, buf, 1); + + if (length == 1 && buf[0] == '1') + bufmgr_exynos->use_dma_fence = 1; + + close(fp); + } + + /* get the model name from the capi-system-info. + * The alignment_plane and alignment_pitch_rgb is different accoring to the target. + * There will be the stride issue when the right alignment_plane and alignment_pitch_rgb + * is not set to the backend. + */ + if (system_info_get_platform_string("http://tizen.org/system/model_name", &value) != SYSTEM_INFO_ERROR_NONE) { + TBM_ERR("Cannot get the \"http://tizen.org/system/model_name\" key value from system-info.\n"); + TBM_ERR("May not set the right value on libtbm-exynos backend.\n"); + } else { + if (!strncmp(value, "TW1", 4)) { + g_tbm_surface_alignment_plane = 8; + g_tbm_surface_alignment_pitch_rgb = 8; + g_enable_cache_ctrl = 1; + } else { + g_tbm_surface_alignment_plane = 64; + g_tbm_surface_alignment_pitch_rgb = 64; + } + } + + free(value); + + if (!_bufmgr_init_cache_state(bufmgr_exynos)) { + TBM_ERR("fail to init bufmgr cache state\n"); + if (error) + *error = TBM_ERROR_INVALID_OPERATION; + goto fail_init_cache_state; + } + + /*Create Hash Table*/ + bufmgr_exynos->hashBos = drmHashCreate(); + + /* alloc and register bufmgr_funcs */ + bufmgr_func = tbm_backend_bufmgr_alloc_bufmgr_func(bufmgr, &err); + if (!bufmgr_func) { + TBM_ERR("fail to alloc bufmgr_func! err(%d)\n", err); + if (error) + *error = TBM_ERROR_OUT_OF_MEMORY; + goto fail_alloc_bufmgr_func; + } + + bufmgr_func->bufmgr_get_capabilities = tbm_exynos_bufmgr_get_capabilities; + //if (tbm_backend_bufmgr_query_display_server(bufmgr, &err) && !_check_render_node()) + bufmgr_func->bufmgr_bind_native_display = tbm_exynos_bufmgr_bind_native_display; + bufmgr_func->bufmgr_get_supported_formats = tbm_exynos_bufmgr_get_supported_formats; + bufmgr_func->bufmgr_get_plane_data = tbm_exynos_bufmgr_get_plane_data; + bufmgr_func->bufmgr_alloc_bo = tbm_exynos_bufmgr_alloc_bo; + bufmgr_func->bufmgr_alloc_bo_with_format = NULL; + bufmgr_func->bufmgr_import_fd = tbm_exynos_bufmgr_import_fd; + bufmgr_func->bufmgr_import_key = tbm_exynos_bufmgr_import_key; + + err = tbm_backend_bufmgr_register_bufmgr_func(bufmgr, bufmgr_func); + if (err != TBM_ERROR_NONE) { + TBM_ERR("fail to register bufmgr_func! err(%d)\n", err); + if (error) + *error = TBM_ERROR_INVALID_OPERATION; + goto fail_register_bufmgr_func; + } + bufmgr_exynos->bufmgr_func = bufmgr_func; + + /* alloc and register bo_funcs */ + bo_func = tbm_backend_bufmgr_alloc_bo_func(bufmgr, &err); + if (!bo_func) { + TBM_ERR("fail to alloc bo_func! err(%d)\n", err); + if (error) + *error = TBM_ERROR_OUT_OF_MEMORY; + goto fail_alloc_bo_func; + } + + bo_func->bo_free = tbm_exynos_bo_free; + bo_func->bo_get_size = tbm_exynos_bo_get_size; + bo_func->bo_get_memory_types = tbm_exynos_bo_get_memory_type; + bo_func->bo_get_handle = tbm_exynos_bo_get_handle; + bo_func->bo_map = tbm_exynos_bo_map; + bo_func->bo_unmap = tbm_exynos_bo_unmap; + bo_func->bo_lock = tbm_exynos_bo_lock; + bo_func->bo_unlock = tbm_exynos_bo_unlock; + bo_func->bo_export_fd = tbm_exynos_bo_export_fd; + bo_func->bo_export_key = tbm_exynos_bo_export_key; + + err = tbm_backend_bufmgr_register_bo_func(bufmgr, bo_func); + if (err != TBM_ERROR_NONE) { + TBM_ERR("fail to register bo_func! err(%d)\n", err); + if (error) + *error = TBM_ERROR_INVALID_OPERATION; + goto fail_register_bo_func; + } + bufmgr_exynos->bo_func = bo_func; + + TBM_DBG("drm_fd:%d\n", bufmgr_exynos->fd); + + if (error) + *error = TBM_ERROR_NONE; + + bufmgr_exynos->bufmgr = bufmgr; + + return (tbm_backend_bufmgr_data *)bufmgr_exynos; + +fail_register_bo_func: + tbm_backend_bufmgr_free_bo_func(bufmgr, bo_func); +fail_alloc_bo_func: +fail_register_bufmgr_func: + tbm_backend_bufmgr_free_bufmgr_func(bufmgr, bufmgr_func); +fail_alloc_bufmgr_func: + _bufmgr_deinit_cache_state(bufmgr_exynos); + if (bufmgr_exynos->hashBos) + drmHashDestroy(bufmgr_exynos->hashBos); +fail_init_cache_state: + if (set_master) + tbm_drm_helper_unset_tbm_master_fd(); + tbm_drm_helper_unset_fd(); +fail_get_device_name: + if (bufmgr_exynos->fd >= 0) + close(bufmgr_exynos->fd); +fail_get_auth_info: +fail_open_drm: + free(bufmgr_exynos); + return NULL; +} + +tbm_backend_module tbm_backend_module_data = { + "exynos", + "Samsung", + TBM_BACKEND_ABI_VERSION_3_0, + tbm_exynos_init, + tbm_exynos_deinit +}; diff --git a/src/libtbm-exynos/tbm_bufmgr_tgl.h b/src/libtbm-exynos/tbm_bufmgr_tgl.h new file mode 100644 index 0000000..b442f73 --- /dev/null +++ b/src/libtbm-exynos/tbm_bufmgr_tgl.h @@ -0,0 +1,175 @@ +/************************************************************************** + * + * libtbm + * + * Copyright 2012 Samsung Electronics co., Ltd. All Rights Reserved. + * + * Contact: SooChan Lim , Sangjin Lee + * Boram Park , Changyeon Lee + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * **************************************************************************/ + +#ifndef __TBM_BUFMGR_TGL_H__ +#define __TBM_BUFMGR_TGL_H__ + +#include + +static char tgl_devfile[] = "/dev/slp_global_lock"; +static char tgl_devfile1[] = "/dev/tgl"; + +#define TGL_IOCTL_BASE 0x32 +#define TGL_IO(nr) _IO(TGL_IOCTL_BASE, nr) +#define TGL_IOR(nr, type) _IOR(TGL_IOCTL_BASE, nr, type) +#define TGL_IOW(nr, type) _IOW(TGL_IOCTL_BASE, nr, type) +#define TGL_IOWR(nr, type) _IOWR(TGL_IOCTL_BASE, nr, type) + +/** + * struct tgl_ver_data - tgl version data structure + * @major: major version + * @minor: minor version + */ +struct tgl_ver_data { + unsigned int major; + unsigned int minor; +}; + +/** + * struct tgl_reg_data - tgl data structure + * @key: lookup key + * @timeout_ms: timeout value for waiting event + */ +struct tgl_reg_data { + unsigned int key; + unsigned int timeout_ms; +}; + +enum tgl_type_data { + TGL_TYPE_NONE = 0, + TGL_TYPE_READ = (1 << 0), + TGL_TYPE_WRITE = (1 << 1), +}; + +/** + * struct tgl_lock_data - tgl lock data structure + * @key: lookup key + * @type: lock type that is in tgl_type_data + */ +struct tgl_lock_data { + unsigned int key; + enum tgl_type_data type; +}; + +enum tgl_status_data { + TGL_STATUS_UNLOCKED, + TGL_STATUS_LOCKED, +}; + +/** + * struct tgl_usr_data - tgl user data structure + * @key: lookup key + * @data1: user data 1 + * @data2: user data 2 + * @status: lock status that is in tgl_status_data + */ +struct tgl_usr_data { + unsigned int key; + unsigned int data1; + unsigned int data2; + enum tgl_status_data status; +}; + +enum { + _TGL_GET_VERSION, + _TGL_REGISTER, + _TGL_UNREGISTER, + _TGL_LOCK, + _TGL_UNLOCK, + _TGL_SET_DATA, + _TGL_GET_DATA, +}; + +/* get version information */ +#define TGL_IOCTL_GET_VERSION TGL_IOR(_TGL_GET_VERSION, struct tgl_ver_data) +/* register key */ +#define TGL_IOCTL_REGISTER TGL_IOW(_TGL_REGISTER, struct tgl_reg_data) +/* unregister key */ +#define TGL_IOCTL_UNREGISTER TGL_IOW(_TGL_UNREGISTER, struct tgl_reg_data) +/* lock with key */ +#define TGL_IOCTL_LOCK TGL_IOW(_TGL_LOCK, struct tgl_lock_data) +/* unlock with key */ +#define TGL_IOCTL_UNLOCK TGL_IOW(_TGL_UNLOCK, struct tgl_lock_data) +/* set user data with key */ +#define TGL_IOCTL_SET_DATA TGL_IOW(_TGL_SET_DATA, struct tgl_usr_data) +/* get user data with key */ +#define TGL_IOCTL_GET_DATA TGL_IOR(_TGL_GET_DATA, struct tgl_usr_data) + +/* indicate cache units. */ +enum e_drm_exynos_gem_cache_sel { + EXYNOS_DRM_L1_CACHE = 1 << 0, + EXYNOS_DRM_L2_CACHE = 1 << 1, + EXYNOS_DRM_ALL_CORES = 1 << 2, + EXYNOS_DRM_ALL_CACHES = EXYNOS_DRM_L1_CACHE | + EXYNOS_DRM_L2_CACHE, + EXYNOS_DRM_ALL_CACHES_CORES = EXYNOS_DRM_L1_CACHE | + EXYNOS_DRM_L2_CACHE | + EXYNOS_DRM_ALL_CORES, + EXYNOS_DRM_CACHE_SEL_MASK = EXYNOS_DRM_ALL_CACHES_CORES +}; + +/* indicate cache operation types. */ +enum e_drm_exynos_gem_cache_op { + EXYNOS_DRM_CACHE_INV_ALL = 1 << 3, + EXYNOS_DRM_CACHE_INV_RANGE = 1 << 4, + EXYNOS_DRM_CACHE_CLN_ALL = 1 << 5, + EXYNOS_DRM_CACHE_CLN_RANGE = 1 << 6, + EXYNOS_DRM_CACHE_FSH_ALL = EXYNOS_DRM_CACHE_INV_ALL | + EXYNOS_DRM_CACHE_CLN_ALL, + EXYNOS_DRM_CACHE_FSH_RANGE = EXYNOS_DRM_CACHE_INV_RANGE | + EXYNOS_DRM_CACHE_CLN_RANGE, + EXYNOS_DRM_CACHE_OP_MASK = EXYNOS_DRM_CACHE_FSH_ALL | + EXYNOS_DRM_CACHE_FSH_RANGE +}; + +/** + * A structure for cache operation. + * + * @usr_addr: user space address. + * P.S. it SHOULD BE user space. + * @size: buffer size for cache operation. + * @flags: select cache unit and cache operation. + * @gem_handle: a handle to a gem object. + * this gem handle is needed for cache range operation to L2 cache. + */ +struct drm_exynos_gem_cache_op { + uint64_t usr_addr; + unsigned int size; + unsigned int flags; + unsigned int gem_handle; +}; + +#define DRM_EXYNOS_GEM_CACHE_OP 0x12 + +#define DRM_IOCTL_EXYNOS_GEM_CACHE_OP DRM_IOWR(DRM_COMMAND_BASE + \ + DRM_EXYNOS_GEM_CACHE_OP, struct drm_exynos_gem_cache_op) + +#endif /* __TBM_BUFMGR_TGL_H__ */ diff --git a/src/tbm_bufmgr_exynos.c b/src/tbm_bufmgr_exynos.c deleted file mode 100644 index 0e1e3ca..0000000 --- a/src/tbm_bufmgr_exynos.c +++ /dev/null @@ -1,2205 +0,0 @@ -/************************************************************************** - -libtbm_exynos - -Copyright 2012 Samsung Electronics co., Ltd. All Rights Reserved. - -Contact: SooChan Lim , Sangjin Lee - -Permission is hereby granted, free of charge, to any person obtaining a -copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sub license, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice (including the -next paragraph) shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. -IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR -ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -**************************************************************************/ - -#ifdef HAVE_CONFIG_H -#include "config.h" -#endif - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "tbm_bufmgr_tgl.h" - -#define TBM_COLOR_FORMAT_COUNT 4 - -#define EXYNOS_DRM_NAME "exynos" - -#define STRERR_BUFSIZE 128 - -#define SIZE_ALIGN(value, base) (((value) + ((base) - 1)) & ~((base) - 1)) -#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) -#define MAX(a, b) ((a) > (b) ? (a) : (b)) - -static unsigned int g_tbm_surface_alignment_plane; -static unsigned int g_tbm_surface_alignment_pitch_rgb; - -#define TBM_SURFACE_ALIGNMENT_PLANE_NV12 (4096) -#define TBM_SURFACE_ALIGNMENT_PITCH_YUV (16) - -#define SZ_1M 0x00100000 -#define S5P_FIMV_MAX_FRAME_SIZE (2 * SZ_1M) -#define S5P_FIMV_D_ALIGN_PLANE_SIZE 64 -#define S5P_FIMV_NUM_PIXELS_IN_MB_ROW 16 -#define S5P_FIMV_NUM_PIXELS_IN_MB_COL 16 -#define S5P_FIMV_DEC_BUF_ALIGN (8 * 1024) -#define S5P_FIMV_NV12MT_HALIGN 128 -#define S5P_FIMV_NV12MT_VALIGN 64 - -/* cache control at backend */ -static unsigned int g_enable_cache_ctrl = 0; - -struct dma_buf_info { - unsigned long size; - unsigned int fence_supported; - unsigned int padding; -}; - -#define DMA_BUF_ACCESS_READ 0x1 -#define DMA_BUF_ACCESS_WRITE 0x2 -#define DMA_BUF_ACCESS_DMA 0x4 -#define DMA_BUF_ACCESS_MAX 0x8 - -#define DMA_FENCE_LIST_MAX 5 - -struct dma_buf_fence { - unsigned long ctx; - unsigned int type; -}; - -#define DMABUF_IOCTL_BASE 'F' -#define DMABUF_IOWR(nr, type) _IOWR(DMABUF_IOCTL_BASE, nr, type) - -#define DMABUF_IOCTL_GET_INFO DMABUF_IOWR(0x00, struct dma_buf_info) -#define DMABUF_IOCTL_GET_FENCE DMABUF_IOWR(0x01, struct dma_buf_fence) -#define DMABUF_IOCTL_PUT_FENCE DMABUF_IOWR(0x02, struct dma_buf_fence) - -/* tgl key values */ -#define GLOBAL_KEY ((unsigned int)(-1)) -/* TBM_CACHE */ -#define TBM_EXYNOS_CACHE_INV 0x01 /**< cache invalidate */ -#define TBM_EXYNOS_CACHE_CLN 0x02 /**< cache clean */ -#define TBM_EXYNOS_CACHE_ALL 0x10 /**< cache all */ -#define TBM_EXYNOS_CACHE_FLUSH (TBM_EXYNOS_CACHE_INV|TBM_EXYNOS_CACHE_CLN) /**< cache flush */ -#define TBM_EXYNOS_CACHE_FLUSH_ALL (TBM_EXYNOS_CACHE_FLUSH|TBM_EXYNOS_CACHE_ALL) /**< cache flush all */ - -enum { - DEVICE_NONE = 0, - DEVICE_CA, /* cache aware device */ - DEVICE_CO /* cache oblivious device */ -}; - -typedef union _tbm_bo_cache_state tbm_bo_cache_state; - -union _tbm_bo_cache_state { - unsigned int val; - struct { - unsigned int cntFlush:16; /*Flush all index for sync */ - unsigned int isCached:1; - unsigned int isDirtied:2; - } data; -}; - -typedef struct _tbm_bufmgr_exynos *tbm_bufmgr_exynos; -typedef struct _tbm_bo_exynos *tbm_bo_exynos; - -/* tbm buffor object for exynos */ -struct _tbm_bo_exynos { - int fd; - - unsigned int name; /* FLINK ID */ - - unsigned int gem; /* GEM Handle */ - - unsigned int dmabuf; /* fd for dmabuf */ - - void *pBase; /* virtual address */ - - unsigned int size; - - unsigned int flags_exynos; - unsigned int flags_tbm; - - pthread_mutex_t mutex; - struct dma_buf_fence dma_fence[DMA_FENCE_LIST_MAX]; - int device; - int opt; - - tbm_bo_cache_state cache_state; - unsigned int map_cnt; - int last_map_device; - - tbm_bufmgr_exynos bufmgr_exynos; -}; - -/* tbm bufmgr private for exynos */ -struct _tbm_bufmgr_exynos { - int fd; - int isLocal; - void *hashBos; - - int use_dma_fence; - - int tgl_fd; - - char *device_name; - void *bind_display; - - tbm_backend_bufmgr_func *bufmgr_func; - tbm_backend_bo_func *bo_func; - - tbm_bufmgr bufmgr; -}; - -const static char *STR_DEVICE[] = { - "DEF", - "CPU", - "2D", - "3D", - "MM" -}; - -const static char *STR_OPT[] = { - "NONE", - "RD", - "WR", - "RDWR" -}; - -static int _get_render_node(int is_master); - -static inline int -_tgl_init(int fd, unsigned int key) -{ - struct tgl_reg_data data; - int err; - char buf[STRERR_BUFSIZE]; - - data.key = key; - data.timeout_ms = 1000; - - err = ioctl(fd, TGL_IOCTL_REGISTER, &data); - if (err) { - TBM_ERR("error(%s) key:%d\n", - strerror_r(errno, buf, STRERR_BUFSIZE), key); - return 0; - } - - return 1; -} - -static inline int -_tgl_destroy(int fd, unsigned int key) -{ - struct tgl_reg_data data; - int err; - char buf[STRERR_BUFSIZE]; - - data.key = key; - err = ioctl(fd, TGL_IOCTL_UNREGISTER, &data); - if (err) { - TBM_ERR("error(%s) key:%d\n", - strerror_r(errno, buf, STRERR_BUFSIZE), key); - return 0; - } - - return 1; -} - -static inline int -_tgl_lock(int fd, unsigned int key, int opt) -{ - struct tgl_lock_data data; - enum tgl_type_data tgl_type; - int err; - char buf[STRERR_BUFSIZE]; - - switch (opt) { - case TBM_OPTION_READ: - tgl_type = TGL_TYPE_READ; - break; - case TBM_OPTION_WRITE: - tgl_type = TGL_TYPE_WRITE; - break; - default: - tgl_type = TGL_TYPE_NONE; - break; - } - - data.key = key; - data.type = tgl_type; - - err = ioctl(fd, TGL_IOCTL_LOCK, &data); - if (err) { - TBM_ERR("error(%s) key:%d opt:%d\n", - strerror_r(errno, buf, STRERR_BUFSIZE), key, opt); - return 0; - } - - return 1; -} - -static inline int -_tgl_unlock(int fd, unsigned int key) -{ - struct tgl_lock_data data; - int err; - char buf[STRERR_BUFSIZE]; - - data.key = key; - data.type = TGL_TYPE_NONE; - - err = ioctl(fd, TGL_IOCTL_UNLOCK, &data); - if (err) { - TBM_ERR("error(%s) key:%d\n", - strerror_r(errno, buf, STRERR_BUFSIZE), key); - return 0; - } - - return 1; -} - -static inline int -_tgl_set_data(int fd, unsigned int key, unsigned int val) -{ - struct tgl_usr_data data; - int err; - char buf[STRERR_BUFSIZE]; - - data.key = key; - data.data1 = val; - - err = ioctl(fd, TGL_IOCTL_SET_DATA, &data); - if (err) { - TBM_ERR("error(%s) key:%d\n", - strerror_r(errno, buf, STRERR_BUFSIZE), key); - return 0; - } - - return 1; -} - -static inline unsigned int -_tgl_get_data(int fd, unsigned int key) -{ - struct tgl_usr_data data = { 0, }; - int err; - char buf[STRERR_BUFSIZE]; - - data.key = key; - - err = ioctl(fd, TGL_IOCTL_GET_DATA, &data); - if (err) { - TBM_ERR("error(%s) key:%d\n", - strerror_r(errno, buf, STRERR_BUFSIZE), key); - return 0; - } - - return data.data1; -} - -static int -_exynos_cache_flush(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos, int flags) -{ - TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0); - - /* cache flush is managed by kernel side when using dma-fence. */ - if (bufmgr_exynos->use_dma_fence) - return 1; - - struct drm_exynos_gem_cache_op cache_op = {0, }; - int ret; - - /* if bo_exynos is null, do cache_flush_all */ - if (bo_exynos) { - cache_op.flags = 0; - cache_op.usr_addr = (uint64_t)((uintptr_t)bo_exynos->pBase); - cache_op.size = bo_exynos->size; - } else { - flags = TBM_EXYNOS_CACHE_FLUSH_ALL; - cache_op.flags = 0; - cache_op.usr_addr = 0; - cache_op.size = 0; - } - - if (flags & TBM_EXYNOS_CACHE_INV) { - if (flags & TBM_EXYNOS_CACHE_ALL) - cache_op.flags |= EXYNOS_DRM_CACHE_INV_ALL; - else - cache_op.flags |= EXYNOS_DRM_CACHE_INV_RANGE; - } - - if (flags & TBM_EXYNOS_CACHE_CLN) { - if (flags & TBM_EXYNOS_CACHE_ALL) - cache_op.flags |= EXYNOS_DRM_CACHE_CLN_ALL; - else - cache_op.flags |= EXYNOS_DRM_CACHE_CLN_RANGE; - } - - if (flags & TBM_EXYNOS_CACHE_ALL) - cache_op.flags |= EXYNOS_DRM_ALL_CACHES_CORES; - - ret = drmCommandWriteRead(bufmgr_exynos->fd, DRM_EXYNOS_GEM_CACHE_OP, &cache_op, - sizeof(cache_op)); - if (ret) { - TBM_ERR("fail to flush the cache.\n"); - return 0; - } - - return 1; -} - -static int -_bo_init_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos, int import) -{ - /* check whether cache control do or not */ - if (!g_enable_cache_ctrl) - return 1; - - TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0); - TBM_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0); - - if (bufmgr_exynos->use_dma_fence) - return 1; - - _tgl_init(bufmgr_exynos->tgl_fd, bo_exynos->name); - - tbm_bo_cache_state cache_state; - - if (import == 0) { - cache_state.data.isDirtied = DEVICE_NONE; - cache_state.data.isCached = 0; - cache_state.data.cntFlush = 0; - - _tgl_set_data(bufmgr_exynos->tgl_fd, bo_exynos->name, cache_state.val); - } - - return 1; -} - -static int -_bo_set_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos, int device, int opt) -{ - /* check whether cache control do or not */ - if (!g_enable_cache_ctrl) - return 1; - - TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0); - TBM_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0); - - if (bufmgr_exynos->use_dma_fence) - return 1; - - char need_flush = 0; - unsigned short cntFlush = 0; - - if (!(bo_exynos->flags_exynos & EXYNOS_BO_CACHABLE)) - return 1; - - /* get cache state of a bo_exynos */ - bo_exynos->cache_state.val = _tgl_get_data(bufmgr_exynos->tgl_fd, - bo_exynos->name); - - /* get global cache flush count */ - cntFlush = (unsigned short)_tgl_get_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY); - - if (device == TBM_DEVICE_CPU) { - if (bo_exynos->cache_state.data.isDirtied == DEVICE_CO && - bo_exynos->cache_state.data.isCached) - need_flush = TBM_EXYNOS_CACHE_INV; - - bo_exynos->cache_state.data.isCached = 1; - if (opt & TBM_OPTION_WRITE) - bo_exynos->cache_state.data.isDirtied = DEVICE_CA; - else { - if (bo_exynos->cache_state.data.isDirtied != DEVICE_CA) - bo_exynos->cache_state.data.isDirtied = DEVICE_NONE; - } - } else { - if (bo_exynos->cache_state.data.isDirtied == DEVICE_CA && - bo_exynos->cache_state.data.isCached && - bo_exynos->cache_state.data.cntFlush == cntFlush) - need_flush = TBM_EXYNOS_CACHE_CLN | TBM_EXYNOS_CACHE_ALL; - - if (opt & TBM_OPTION_WRITE) - bo_exynos->cache_state.data.isDirtied = DEVICE_CO; - else { - if (bo_exynos->cache_state.data.isDirtied != DEVICE_CO) - bo_exynos->cache_state.data.isDirtied = DEVICE_NONE; - } - } - - if (need_flush) { - if (need_flush & TBM_EXYNOS_CACHE_ALL) - _tgl_set_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY, (unsigned int)(++cntFlush)); - - /* call cache flush */ - _exynos_cache_flush(bufmgr_exynos, bo_exynos, need_flush); - - TBM_DBG(" \tcache(%d,%d)....flush:0x%x, cntFlush(%d)\n", - bo_exynos->cache_state.data.isCached, - bo_exynos->cache_state.data.isDirtied, - need_flush, - cntFlush); - } - - return 1; -} - -static int -_bo_save_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos) -{ - /* check whether cache control do or not */ - if (!g_enable_cache_ctrl) - return 1; - - TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0); - TBM_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0); - - if (bufmgr_exynos->use_dma_fence) - return 1; - - unsigned short cntFlush = 0; - - /* get global cache flush count */ - cntFlush = (unsigned short)_tgl_get_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY); - - /* save global cache flush count */ - bo_exynos->cache_state.data.cntFlush = cntFlush; - _tgl_set_data(bufmgr_exynos->tgl_fd, bo_exynos->name, - bo_exynos->cache_state.val); - - return 1; -} - -static void -_bo_destroy_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos) -{ - /* check whether cache control do or not */ - if (!g_enable_cache_ctrl) - return; - - TBM_RETURN_IF_FAIL(bufmgr_exynos != NULL); - TBM_RETURN_IF_FAIL(bo_exynos != NULL); - - if (bufmgr_exynos->use_dma_fence) - return ; - - _tgl_destroy(bufmgr_exynos->tgl_fd, bo_exynos->name); -} - -static int -_bufmgr_init_cache_state(tbm_bufmgr_exynos bufmgr_exynos) -{ - /* check whether cache control do or not */ - if (!g_enable_cache_ctrl) - return 1; - - TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0); - - if (bufmgr_exynos->use_dma_fence) - return 1; - - /* open tgl fd for saving cache flush data */ - bufmgr_exynos->tgl_fd = open(tgl_devfile, O_RDWR); - - if (bufmgr_exynos->tgl_fd < 0) { - bufmgr_exynos->tgl_fd = open(tgl_devfile1, O_RDWR); - if (bufmgr_exynos->tgl_fd < 0) { - TBM_ERR("fail to open global_lock:%s\n", - tgl_devfile1); - return 0; - } - } - - if (!_tgl_init(bufmgr_exynos->tgl_fd, GLOBAL_KEY)) { - TBM_ERR("fail to initialize the tgl\n"); - close(bufmgr_exynos->tgl_fd); - return 0; - } - - return 1; -} - -static void -_bufmgr_deinit_cache_state(tbm_bufmgr_exynos bufmgr_exynos) -{ - /* check whether cache control do or not */ - if (!g_enable_cache_ctrl) - return; - - TBM_RETURN_IF_FAIL(bufmgr_exynos != NULL); - - if (bufmgr_exynos->use_dma_fence) - return; - - if (bufmgr_exynos->tgl_fd >= 0) - close(bufmgr_exynos->tgl_fd); -} - -static int -_tbm_exynos_open_drm() -{ - int fd = -1; - - fd = drmOpen(EXYNOS_DRM_NAME, NULL); - if (fd < 0) { - TBM_ERR("fail to open drm.(%s)\n", EXYNOS_DRM_NAME); - } - - if (fd < 0) { - fd = _get_render_node(1); - if (fd < 0) { - TBM_ERR("cannot find render_node\n"); - } - } - - return fd; -} - -static int -_get_render_node(int is_master) -{ - struct udev *udev = NULL; - struct udev_enumerate *e = NULL; - struct udev_list_entry *entry = NULL; - struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL; - const char *filepath; - struct stat s; - int fd = -1; - int ret; - - TBM_DBG("search drm-device by udev(is_master:%d)\n", is_master); - - udev = udev_new(); - if (!udev) { - TBM_ERR("udev_new() failed.\n"); - return -1; - } - - e = udev_enumerate_new(udev); - udev_enumerate_add_match_subsystem(e, "drm"); - if (is_master) - udev_enumerate_add_match_sysname(e, "card[0-9]*"); - else - udev_enumerate_add_match_sysname(e, "renderD[0-9]*"); - udev_enumerate_scan_devices(e); - - udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) { - device = udev_device_new_from_syspath(udev_enumerate_get_udev(e), - udev_list_entry_get_name(entry)); - device_parent = udev_device_get_parent(device); - /* Not need unref device_parent. device_parent and device have same refcnt */ - if (device_parent) { - if (strcmp(udev_device_get_sysname(device_parent), "exynos-drm") == 0) { - drm_device = device; - TBM_DBG("Found render device: '%s' (%s)\n", - udev_device_get_syspath(drm_device), - udev_device_get_sysname(device_parent)); - break; - } - } - udev_device_unref(device); - } - - udev_enumerate_unref(e); - - if (!drm_device) { - TBM_ERR("failed to find device\n"); - udev_unref(udev); - return -1; - } - - /* Get device file path. */ - filepath = udev_device_get_devnode(drm_device); - if (!filepath) { - TBM_ERR("udev_device_get_devnode() failed.\n"); - udev_device_unref(drm_device); - udev_unref(udev); - return -1; - } - - /* Open DRM device file and check validity. */ - fd = open(filepath, O_RDWR | O_CLOEXEC); - if (fd < 0) { - TBM_ERR("open(%s, O_RDWR | O_CLOEXEC) failed.\n"); - udev_device_unref(drm_device); - udev_unref(udev); - return -1; - } - - ret = fstat(fd, &s); - if (ret) { - TBM_ERR("fstat() failed %s.\n"); - udev_device_unref(drm_device); - udev_unref(udev); - close(fd); - return -1; - } - - udev_device_unref(drm_device); - udev_unref(udev); - - return fd; -} - -static unsigned int -_get_exynos_flag_from_tbm(unsigned int ftbm) -{ - unsigned int flags = 0; - - if (ftbm & TBM_BO_SCANOUT) - flags |= EXYNOS_BO_CONTIG; - else - flags |= EXYNOS_BO_NONCONTIG; - - if (ftbm & TBM_BO_WC) - flags |= EXYNOS_BO_WC; - else if (ftbm & TBM_BO_NONCACHABLE) - flags |= EXYNOS_BO_NONCACHABLE; - else - flags |= EXYNOS_BO_CACHABLE; - - return flags; -} - -static unsigned int -_get_tbm_flag_from_exynos(unsigned int fexynos) -{ - unsigned int flags = 0; - - if (fexynos & EXYNOS_BO_NONCONTIG) - flags |= TBM_BO_DEFAULT; - else - flags |= TBM_BO_SCANOUT; - - if (fexynos & EXYNOS_BO_WC) - flags |= TBM_BO_WC; - else if (fexynos & EXYNOS_BO_CACHABLE) - flags |= TBM_BO_DEFAULT; - else - flags |= TBM_BO_NONCACHABLE; - - return flags; -} - -static unsigned int -_get_name(int fd, unsigned int gem) -{ - struct drm_gem_flink arg = {0,}; - - arg.handle = gem; - if (drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &arg)) { - TBM_ERR("fail to DRM_IOCTL_GEM_FLINK gem:%d", gem); - return 0; - } - - return (unsigned int)arg.name; -} - -static tbm_bo_handle -_exynos_bo_handle(tbm_bo_exynos bo_exynos, int device) -{ - tbm_bo_handle bo_handle; - - memset(&bo_handle, 0x0, sizeof(uint64_t)); - - switch (device) { - case TBM_DEVICE_DEFAULT: - case TBM_DEVICE_2D: - bo_handle.u32 = (uint32_t)bo_exynos->gem; - break; - case TBM_DEVICE_CPU: - if (!bo_exynos->pBase) { - struct drm_exynos_gem_map arg = {0,}; - void *map = NULL; - - arg.handle = bo_exynos->gem; - if (drmCommandWriteRead(bo_exynos->fd, DRM_EXYNOS_GEM_MAP, &arg, - sizeof(arg))) { - TBM_ERR("Cannot map_exynos gem=%d\n", bo_exynos->gem); - return (tbm_bo_handle) NULL; - } - - map = mmap(NULL, bo_exynos->size, PROT_READ | PROT_WRITE, MAP_SHARED, - bo_exynos->fd, arg.offset); - if (map == MAP_FAILED) { - TBM_ERR("Cannot usrptr gem=%d\n", bo_exynos->gem); - return (tbm_bo_handle) NULL; - } - bo_exynos->pBase = map; - } - bo_handle.ptr = (void *)bo_exynos->pBase; - break; - case TBM_DEVICE_3D: - case TBM_DEVICE_MM: - if (!bo_exynos->dmabuf) { - struct drm_prime_handle arg = {0, }; - - arg.handle = bo_exynos->gem; - if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) { - TBM_ERR("Cannot dmabuf=%d\n", bo_exynos->gem); - return (tbm_bo_handle) NULL; - } - bo_exynos->dmabuf = arg.fd; - } - - bo_handle.u32 = (uint32_t)bo_exynos->dmabuf; - break; - default: - TBM_ERR("Not supported device:%d\n", device); - bo_handle.ptr = (void *) NULL; - break; - } - - return bo_handle; -} - -static int -_new_calc_plane_nv12(int width, int height) -{ - int mbX, mbY; - - mbX = DIV_ROUND_UP(width, S5P_FIMV_NUM_PIXELS_IN_MB_ROW); - mbY = DIV_ROUND_UP(height, S5P_FIMV_NUM_PIXELS_IN_MB_COL); - - if (width * height < S5P_FIMV_MAX_FRAME_SIZE) - mbY = (mbY + 1) / 2 * 2; - - return ((mbX * S5P_FIMV_NUM_PIXELS_IN_MB_COL) * (mbY * - S5P_FIMV_NUM_PIXELS_IN_MB_ROW)); -} - -static int -_calc_yplane_nv12(int width, int height) -{ - int mbX, mbY; - - mbX = SIZE_ALIGN(width + 24, S5P_FIMV_NV12MT_HALIGN); - mbY = SIZE_ALIGN(height + 16, S5P_FIMV_NV12MT_VALIGN); - - return SIZE_ALIGN(mbX * mbY, S5P_FIMV_DEC_BUF_ALIGN); -} - -static int -_calc_uvplane_nv12(int width, int height) -{ - int mbX, mbY; - - mbX = SIZE_ALIGN(width + 16, S5P_FIMV_NV12MT_HALIGN); - mbY = SIZE_ALIGN(height + 4, S5P_FIMV_NV12MT_VALIGN); - - return SIZE_ALIGN((mbX * mbY) >> 1, S5P_FIMV_DEC_BUF_ALIGN); -} - -static int -_new_calc_yplane_nv12(int width, int height) -{ - return SIZE_ALIGN(_new_calc_plane_nv12(width, - height) + S5P_FIMV_D_ALIGN_PLANE_SIZE, - TBM_SURFACE_ALIGNMENT_PLANE_NV12); -} - -static int -_new_calc_uvplane_nv12(int width, int height) -{ - return SIZE_ALIGN((_new_calc_plane_nv12(width, - height) >> 1) + S5P_FIMV_D_ALIGN_PLANE_SIZE, - TBM_SURFACE_ALIGNMENT_PLANE_NV12); -} - -static tbm_bufmgr_capability -tbm_exynos_bufmgr_get_capabilities(tbm_backend_bufmgr_data *bufmgr_data, tbm_error_e *error) -{ - tbm_bufmgr_capability capabilities = TBM_BUFMGR_CAPABILITY_NONE; - - capabilities = TBM_BUFMGR_CAPABILITY_SHARE_KEY|TBM_BUFMGR_CAPABILITY_SHARE_FD; - - if (error) - *error = TBM_ERROR_NONE; - - return capabilities; -} - -static tbm_error_e -tbm_exynos_bufmgr_bind_native_display(tbm_backend_bufmgr_data *bufmgr_data, tbm_native_display *native_display) -{ - tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)bufmgr_data; - TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, TBM_ERROR_INVALID_PARAMETER); - - if (!tbm_drm_helper_wl_auth_server_init(native_display, bufmgr_exynos->fd, - bufmgr_exynos->device_name, 0)) { - TBM_ERR("fail to tbm_drm_helper_wl_server_init\n"); - return TBM_ERROR_INVALID_OPERATION; - } - - bufmgr_exynos->bind_display = native_display; - - return TBM_ERROR_NONE; -} - -static tbm_error_e -tbm_exynos_bufmgr_get_supported_formats(tbm_backend_bufmgr_data *bufmgr_data, - uint32_t **formats, uint32_t *num) -{ - const static uint32_t tbm_exynos_color_format_list[TBM_COLOR_FORMAT_COUNT] = { - TBM_FORMAT_ARGB8888, - TBM_FORMAT_XRGB8888, - TBM_FORMAT_NV12, - TBM_FORMAT_YUV420 - }; - - tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)bufmgr_data; - uint32_t *color_formats; - - TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, TBM_ERROR_INVALID_PARAMETER); - - color_formats = (uint32_t *)calloc(1, sizeof(uint32_t) * TBM_COLOR_FORMAT_COUNT); - if (color_formats == NULL) - return TBM_ERROR_OUT_OF_MEMORY; - - memcpy(color_formats, tbm_exynos_color_format_list, sizeof(uint32_t)*TBM_COLOR_FORMAT_COUNT); - - *formats = color_formats; - *num = TBM_COLOR_FORMAT_COUNT; - - TBM_DBG("supported format count = %d\n", *num); - - return TBM_ERROR_NONE; -} - -static tbm_error_e -tbm_exynos_bufmgr_get_plane_data(tbm_backend_bufmgr_data *bufmgr_data, - tbm_format format, int plane_idx, int width, - int height, uint32_t *size, uint32_t *offset, - uint32_t *pitch, int *bo_idx) -{ - tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)bufmgr_data; - int bpp; - int _offset = 0; - int _pitch = 0; - int _size = 0; - int _bo_idx = 0; - - TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, TBM_ERROR_INVALID_PARAMETER); - - switch (format) { - /* 16 bpp RGB */ - case TBM_FORMAT_XRGB4444: - case TBM_FORMAT_XBGR4444: - case TBM_FORMAT_RGBX4444: - case TBM_FORMAT_BGRX4444: - case TBM_FORMAT_ARGB4444: - case TBM_FORMAT_ABGR4444: - case TBM_FORMAT_RGBA4444: - case TBM_FORMAT_BGRA4444: - case TBM_FORMAT_XRGB1555: - case TBM_FORMAT_XBGR1555: - case TBM_FORMAT_RGBX5551: - case TBM_FORMAT_BGRX5551: - case TBM_FORMAT_ARGB1555: - case TBM_FORMAT_ABGR1555: - case TBM_FORMAT_RGBA5551: - case TBM_FORMAT_BGRA5551: - case TBM_FORMAT_RGB565: - bpp = 16; - _offset = 0; - _pitch = SIZE_ALIGN((width * bpp) >> 3, g_tbm_surface_alignment_pitch_rgb); - _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane); - _bo_idx = 0; - break; - /* 24 bpp RGB */ - case TBM_FORMAT_RGB888: - case TBM_FORMAT_BGR888: - bpp = 24; - _offset = 0; - _pitch = SIZE_ALIGN((width * bpp) >> 3, g_tbm_surface_alignment_pitch_rgb); - _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane); - _bo_idx = 0; - break; - /* 32 bpp RGB */ - case TBM_FORMAT_XRGB8888: - case TBM_FORMAT_XBGR8888: - case TBM_FORMAT_RGBX8888: - case TBM_FORMAT_BGRX8888: - case TBM_FORMAT_ARGB8888: - case TBM_FORMAT_ABGR8888: - case TBM_FORMAT_RGBA8888: - case TBM_FORMAT_BGRA8888: - bpp = 32; - _offset = 0; - _pitch = SIZE_ALIGN((width * bpp) >> 3, g_tbm_surface_alignment_pitch_rgb); - _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane); - _bo_idx = 0; - break; - - /* packed YCbCr */ - case TBM_FORMAT_YUYV: - case TBM_FORMAT_YVYU: - case TBM_FORMAT_UYVY: - case TBM_FORMAT_VYUY: - case TBM_FORMAT_AYUV: - bpp = 32; - _offset = 0; - _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_YUV); - _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane); - _bo_idx = 0; - break; - - /* - * 2 plane YCbCr - * index 0 = Y plane, [7:0] Y - * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian - * or - * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian - */ - case TBM_FORMAT_NV12: - case TBM_FORMAT_NV21: - bpp = 12; - if (plane_idx == 0) { - _offset = 0; - _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV); - _size = MAX(_calc_yplane_nv12(width, height), - _new_calc_yplane_nv12(width, height)); - _bo_idx = 0; - } else if (plane_idx == 1) { - _offset = 0; - _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV); - _size = MAX(_calc_uvplane_nv12(width, height), - _new_calc_uvplane_nv12(width, height)); - _bo_idx = 1; - } - break; - case TBM_FORMAT_NV16: - case TBM_FORMAT_NV61: - bpp = 16; - /*if(plane_idx == 0)*/ - { - _offset = 0; - _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV); - _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane); - _bo_idx = 0; - if (plane_idx == 0) - break; - } - /*else if( plane_idx ==1 )*/ - { - _offset += _size; - _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV); - _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane); - _bo_idx = 0; - } - break; - - /* - * 3 plane YCbCr - * index 0: Y plane, [7:0] Y - * index 1: Cb plane, [7:0] Cb - * index 2: Cr plane, [7:0] Cr - * or - * index 1: Cr plane, [7:0] Cr - * index 2: Cb plane, [7:0] Cb - */ - - /* - * NATIVE_BUFFER_FORMAT_YV12 - * NATIVE_BUFFER_FORMAT_I420 - */ - case TBM_FORMAT_YUV410: - case TBM_FORMAT_YVU410: - bpp = 9; - /*if(plane_idx == 0)*/ - { - _offset = 0; - _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV); - _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane); - _bo_idx = 0; - if (plane_idx == 0) - break; - } - /*else if(plane_idx == 1)*/ - { - _offset += _size; - _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4); - _size = SIZE_ALIGN(_pitch * (height / 4), g_tbm_surface_alignment_plane); - _bo_idx = 0; - if (plane_idx == 1) - break; - } - /*else if (plane_idx == 2)*/ - { - _offset += _size; - _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4); - _size = SIZE_ALIGN(_pitch * (height / 4), g_tbm_surface_alignment_plane); - _bo_idx = 0; - } - break; - case TBM_FORMAT_YUV411: - case TBM_FORMAT_YVU411: - case TBM_FORMAT_YUV420: - case TBM_FORMAT_YVU420: - bpp = 12; - /*if(plane_idx == 0)*/ - { - _offset = 0; - _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV); - _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane); - _bo_idx = 0; - if (plane_idx == 0) - break; - } - /*else if(plane_idx == 1)*/ - { - _offset += _size; - _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2); - _size = SIZE_ALIGN(_pitch * (height / 2), g_tbm_surface_alignment_plane); - _bo_idx = 0; - if (plane_idx == 1) - break; - } - /*else if (plane_idx == 2)*/ - { - _offset += _size; - _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2); - _size = SIZE_ALIGN(_pitch * (height / 2), g_tbm_surface_alignment_plane); - _bo_idx = 0; - } - break; - case TBM_FORMAT_YUV422: - case TBM_FORMAT_YVU422: - bpp = 16; - /*if(plane_idx == 0)*/ - { - _offset = 0; - _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV); - _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane); - _bo_idx = 0; - if (plane_idx == 0) - break; - } - /*else if(plane_idx == 1)*/ - { - _offset += _size; - _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2); - _size = SIZE_ALIGN(_pitch * (height), g_tbm_surface_alignment_plane); - _bo_idx = 0; - if (plane_idx == 1) - break; - } - /*else if (plane_idx == 2)*/ - { - _offset += _size; - _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2); - _size = SIZE_ALIGN(_pitch * (height), g_tbm_surface_alignment_plane); - _bo_idx = 0; - } - break; - case TBM_FORMAT_YUV444: - case TBM_FORMAT_YVU444: - bpp = 24; - /*if(plane_idx == 0)*/ - { - _offset = 0; - _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV); - _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane); - _bo_idx = 0; - if (plane_idx == 0) - break; - } - /*else if(plane_idx == 1)*/ - { - _offset += _size; - _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV); - _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane); - _bo_idx = 0; - if (plane_idx == 1) - break; - } - /*else if (plane_idx == 2)*/ - { - _offset += _size; - _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV); - _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane); - _bo_idx = 0; - } - break; - default: - bpp = 0; - break; - } - - *size = _size; - *offset = _offset; - *pitch = _pitch; - *bo_idx = _bo_idx; - - return TBM_ERROR_NONE; -} - -static tbm_backend_bo_data * -tbm_exynos_bufmgr_alloc_bo(tbm_backend_bufmgr_data *bufmgr_data, unsigned int size, - tbm_bo_memory_type flags, tbm_error_e *error) -{ - tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)bufmgr_data; - tbm_bo_exynos bo_exynos; - unsigned int exynos_flags; - - if (bufmgr_exynos == NULL) { - TBM_ERR("bufmgr_data is null\n"); - if (error) - *error = TBM_ERROR_INVALID_PARAMETER; - return NULL; - } - - bo_exynos = calloc(1, sizeof(struct _tbm_bo_exynos)); - if (!bo_exynos) { - TBM_ERR("fail to allocate the bo_exynos private\n"); - if (error) - *error = TBM_ERROR_OUT_OF_MEMORY; - return NULL; - } - bo_exynos->bufmgr_exynos = bufmgr_exynos; - - exynos_flags = _get_exynos_flag_from_tbm(flags); - if ((flags & TBM_BO_SCANOUT) && - size <= 4 * 1024) { - exynos_flags |= EXYNOS_BO_NONCONTIG; - } - - struct drm_exynos_gem_create arg = {0, }; - - arg.size = (uint64_t)size; - arg.flags = exynos_flags; - if (drmCommandWriteRead(bufmgr_exynos->fd, DRM_EXYNOS_GEM_CREATE, &arg, - sizeof(arg))) { - TBM_ERR("Cannot create bo_exynos(flag:%x, size:%d)\n", arg.flags, - (unsigned int)arg.size); - free(bo_exynos); - if (error) - *error = TBM_ERROR_INVALID_OPERATION; - return NULL; - } - - bo_exynos->fd = bufmgr_exynos->fd; - bo_exynos->gem = arg.handle; - bo_exynos->size = size; - bo_exynos->flags_tbm = flags; - bo_exynos->flags_exynos = exynos_flags; - bo_exynos->name = _get_name(bo_exynos->fd, bo_exynos->gem); - - if (!_bo_init_cache_state(bufmgr_exynos, bo_exynos, 0)) { - TBM_ERR("fail init cache state(%d)\n", bo_exynos->name); - free(bo_exynos); - if (error) - *error = TBM_ERROR_INVALID_OPERATION; - return NULL; - } - - pthread_mutex_init(&bo_exynos->mutex, NULL); - - if (bufmgr_exynos->use_dma_fence && !bo_exynos->dmabuf) { - struct drm_prime_handle arg = {0, }; - - arg.handle = bo_exynos->gem; - if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) { - TBM_ERR("Cannot dmabuf=%d\n", bo_exynos->gem); - free(bo_exynos); - if (error) - *error = TBM_ERROR_INVALID_OPERATION; - return NULL; - } - bo_exynos->dmabuf = arg.fd; - } - - /* add bo_exynos to hash */ - if (drmHashInsert(bufmgr_exynos->hashBos, bo_exynos->name, (void *)bo_exynos) < 0) - TBM_ERR("Cannot insert bo_exynos to Hash(%d)\n", bo_exynos->name); - - TBM_DBG(" bo_exynos:%p, gem:%d(%d), flags:%d(%d), size:%d\n", - bo_exynos, - bo_exynos->gem, bo_exynos->name, - flags, exynos_flags, - bo_exynos->size); - - if (error) - *error = TBM_ERROR_NONE; - - return (tbm_backend_bo_data *)bo_exynos; -} - -static tbm_backend_bo_data * -tbm_exynos_bufmgr_import_fd(tbm_backend_bufmgr_data *bufmgr_data, tbm_fd key, tbm_error_e *error) -{ - tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)bufmgr_data; - tbm_bo_exynos bo_exynos; - unsigned int gem = 0; - unsigned int name; - int ret; - char buf[STRERR_BUFSIZE]; - - if (bufmgr_exynos == NULL) { - TBM_ERR("bufmgr_data is null\n"); - if (error) - *error = TBM_ERROR_INVALID_PARAMETER; - return NULL; - } - - /*getting handle from fd*/ - struct drm_prime_handle arg = {0, }; - - arg.fd = key; - arg.flags = 0; - if (drmIoctl(bufmgr_exynos->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &arg)) { - TBM_ERR("Cannot get gem handle from fd:%d (%s)\n", - arg.fd, strerror_r(errno, buf, STRERR_BUFSIZE)); - if (error) - *error = TBM_ERROR_INVALID_OPERATION; - return NULL; - } - gem = arg.handle; - - name = _get_name(bufmgr_exynos->fd, gem); - if (!name) { - TBM_ERR("Cannot get name from gem:%d, fd:%d (%s)\n", - gem, key, strerror_r(errno, buf, STRERR_BUFSIZE)); - if (error) - *error = TBM_ERROR_INVALID_OPERATION; - return NULL; - } - - ret = drmHashLookup(bufmgr_exynos->hashBos, name, (void **)&bo_exynos); - if (ret == 0) { - if (gem == bo_exynos->gem) { - if (error) - *error = TBM_ERROR_NONE; - return bo_exynos; - } - } - - /* Determine size of bo_exynos. The fd-to-handle ioctl really should - * return the size, but it doesn't. If we have kernel 3.12 or - * later, we can lseek on the prime fd to get the size. Older - * kernels will just fail, in which case we fall back to the - * provided (estimated or guess size). - */ - unsigned int real_size = -1; - struct drm_exynos_gem_info info = {0, }; - - real_size = lseek(key, 0, SEEK_END); - - info.handle = gem; - if (drmCommandWriteRead(bufmgr_exynos->fd, - DRM_EXYNOS_GEM_GET, - &info, - sizeof(struct drm_exynos_gem_info))) { - TBM_ERR("Cannot get gem info from gem:%d, fd:%d (%s)\n", - gem, key, strerror_r(errno, buf, STRERR_BUFSIZE)); - if (error) - *error = TBM_ERROR_INVALID_OPERATION; - return NULL; - } - - if (real_size == -1) - real_size = info.size; - - bo_exynos = calloc(1, sizeof(struct _tbm_bo_exynos)); - if (!bo_exynos) { - TBM_ERR("bo_exynos:%p fail to allocate the bo_exynos\n", bo_exynos); - if (error) - *error = TBM_ERROR_OUT_OF_MEMORY; - return NULL; - } - bo_exynos->bufmgr_exynos = bufmgr_exynos; - - bo_exynos->fd = bufmgr_exynos->fd; - bo_exynos->gem = gem; - bo_exynos->size = real_size; - bo_exynos->flags_exynos = info.flags; - bo_exynos->flags_tbm = _get_tbm_flag_from_exynos(bo_exynos->flags_exynos); - bo_exynos->name = name; - - if (!_bo_init_cache_state(bufmgr_exynos, bo_exynos, 1)) { - TBM_ERR("fail init cache state(%d)\n", bo_exynos->name); - free(bo_exynos); - if (error) - *error = TBM_ERROR_INVALID_OPERATION; - return NULL; - } - - /* add bo_exynos to hash */ - if (drmHashInsert(bufmgr_exynos->hashBos, bo_exynos->name, (void *)bo_exynos) < 0) - TBM_ERR("bo_exynos:%p Cannot insert bo_exynos to Hash(%d) from gem:%d, fd:%d\n", - bo_exynos, bo_exynos->name, gem, key); - - TBM_DBG(" bo_exynos:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d(%d), size:%d\n", - bo_exynos, - bo_exynos->gem, bo_exynos->name, - bo_exynos->dmabuf, - key, - bo_exynos->flags_tbm, bo_exynos->flags_exynos, - bo_exynos->size); - - if (error) - *error = TBM_ERROR_NONE; - - return (tbm_backend_bo_data *)bo_exynos; -} - -static tbm_backend_bo_data * -tbm_exynos_bufmgr_import_key(tbm_backend_bufmgr_data *bufmgr_data, tbm_key key, tbm_error_e *error) -{ - tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)bufmgr_data; - tbm_bo_exynos bo_exynos; - int ret; - - if (bufmgr_exynos == NULL) { - TBM_ERR("bufmgr_data is null\n"); - if (error) - *error = TBM_ERROR_INVALID_PARAMETER; - return NULL; - } - - ret = drmHashLookup(bufmgr_exynos->hashBos, key, (void **)&bo_exynos); - if (ret == 0) { - if (error) - *error = TBM_ERROR_NONE; - return (tbm_backend_bo_data *)bo_exynos; - } - - struct drm_gem_open arg = {0, }; - struct drm_exynos_gem_info info = {0, }; - - arg.name = key; - if (drmIoctl(bufmgr_exynos->fd, DRM_IOCTL_GEM_OPEN, &arg)) { - TBM_ERR("Cannot open gem name=%d\n", key); - if (error) - *error = TBM_ERROR_INVALID_OPERATION; - return NULL; - } - - info.handle = arg.handle; - if (drmCommandWriteRead(bufmgr_exynos->fd, - DRM_EXYNOS_GEM_GET, - &info, - sizeof(struct drm_exynos_gem_info))) { - TBM_ERR("Cannot get gem info=%d\n", key); - if (error) - *error = TBM_ERROR_INVALID_OPERATION; - return NULL; - } - - bo_exynos = calloc(1, sizeof(struct _tbm_bo_exynos)); - if (!bo_exynos) { - TBM_ERR("fail to allocate the bo_exynos private\n"); - if (error) - *error = TBM_ERROR_OUT_OF_MEMORY; - return NULL; - } - bo_exynos->bufmgr_exynos = bufmgr_exynos; - - bo_exynos->fd = bufmgr_exynos->fd; - bo_exynos->gem = arg.handle; - bo_exynos->size = arg.size; - bo_exynos->flags_exynos = info.flags; - bo_exynos->name = key; - bo_exynos->flags_tbm = _get_tbm_flag_from_exynos(bo_exynos->flags_exynos); - - if (!_bo_init_cache_state(bufmgr_exynos, bo_exynos, 1)) { - TBM_ERR("fail init cache state(%d)\n", bo_exynos->name); - free(bo_exynos); - if (error) - *error = TBM_ERROR_INVALID_OPERATION; - return NULL; - } - - if (!bo_exynos->dmabuf) { - struct drm_prime_handle arg = {0, }; - - arg.handle = bo_exynos->gem; - if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) { - TBM_ERR("fail to DRM_IOCTL_PRIME_HANDLE_TO_FD gem=%d\n", bo_exynos->gem); - if (error) - *error = TBM_ERROR_INVALID_OPERATION; - free(bo_exynos); - return NULL; - } - bo_exynos->dmabuf = arg.fd; - } - - /* add bo_exynos to hash */ - if (drmHashInsert(bufmgr_exynos->hashBos, bo_exynos->name, (void *)bo_exynos) < 0) - TBM_ERR("Cannot insert bo_exynos to Hash(%d)\n", bo_exynos->name); - - TBM_DBG(" bo_exynos:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d\n", - bo_exynos, - bo_exynos->gem, bo_exynos->name, - bo_exynos->dmabuf, - bo_exynos->flags_tbm, bo_exynos->flags_exynos, - bo_exynos->size); - - if (error) - *error = TBM_ERROR_NONE; - - return (tbm_backend_bo_data *)bo_exynos; -} - -static void -tbm_exynos_bo_free(tbm_backend_bo_data *bo_data) -{ - tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data; - tbm_bo_exynos temp; - tbm_bufmgr_exynos bufmgr_exynos; - char buf[STRERR_BUFSIZE]; - int ret; - - if (!bo_data) - return; - - bufmgr_exynos = bo_exynos->bufmgr_exynos; - if (!bufmgr_exynos) - return; - - TBM_DBG(" bo_exynos:%p, gem:%d(%d), fd:%d, size:%d\n", - bo_exynos, - bo_exynos->gem, bo_exynos->name, - bo_exynos->dmabuf, - bo_exynos->size); - - if (bo_exynos->pBase) { - if (munmap(bo_exynos->pBase, bo_exynos->size) == -1) { - TBM_ERR("bo_exynos:%p fail to munmap(%s)\n", - bo_exynos, strerror_r(errno, buf, STRERR_BUFSIZE)); - } - } - - /* close dmabuf */ - if (bo_exynos->dmabuf) { - close(bo_exynos->dmabuf); - bo_exynos->dmabuf = 0; - } - - /* delete bo_exynos from hash */ - ret = drmHashLookup(bufmgr_exynos->hashBos, bo_exynos->name, (void **)&temp); - if (ret == 0) - drmHashDelete(bufmgr_exynos->hashBos, bo_exynos->name); - else - TBM_ERR("Cannot find bo_exynos to Hash(%d), ret=%d\n", bo_exynos->name, ret); - - if (temp != bo_exynos) - TBM_ERR("hashBos probably has several BOs with same name!!!\n"); - - _bo_destroy_cache_state(bufmgr_exynos, bo_exynos); - - /* Free gem handle */ - struct drm_gem_close arg = {0, }; - - memset(&arg, 0, sizeof(arg)); - arg.handle = bo_exynos->gem; - if (drmIoctl(bo_exynos->fd, DRM_IOCTL_GEM_CLOSE, &arg)) - TBM_ERR("bo_exynos:%p fail to gem close.(%s)\n", - bo_exynos, strerror_r(errno, buf, STRERR_BUFSIZE)); - - free(bo_exynos); -} - -static int -tbm_exynos_bo_get_size(tbm_backend_bo_data *bo_data, tbm_error_e *error) -{ - tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data; - - if (!bo_exynos) { - if (error) - *error = TBM_ERROR_INVALID_PARAMETER; - return 0; - } - - if (error) - *error = TBM_ERROR_NONE; - - return bo_exynos->size; -} - -static tbm_bo_memory_type -tbm_exynos_bo_get_memory_type(tbm_backend_bo_data *bo_data, tbm_error_e *error) -{ - tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data; - - if (!bo_exynos) { - if (error) - *error = TBM_ERROR_INVALID_PARAMETER; - return TBM_BO_DEFAULT; - } - - if (error) - *error = TBM_ERROR_NONE; - - return bo_exynos->flags_tbm; -} - -static tbm_bo_handle -tbm_exynos_bo_get_handle(tbm_backend_bo_data *bo_data, tbm_bo_device_type device, tbm_error_e *error) -{ - tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data; - tbm_bo_handle bo_handle; - - if (!bo_exynos) { - if (error) - *error = TBM_ERROR_INVALID_PARAMETER; - return (tbm_bo_handle) NULL; - } - - if (!bo_exynos->gem) { - TBM_ERR("Cannot map gem=%d\n", bo_exynos->gem); - if (error) - *error = TBM_ERROR_INVALID_PARAMETER; - return (tbm_bo_handle) NULL; - } - - TBM_DBG("bo_exynos:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d, %s\n", - bo_exynos, - bo_exynos->gem, bo_exynos->name, - bo_exynos->dmabuf, - bo_exynos->flags_tbm, bo_exynos->flags_exynos, - bo_exynos->size, - STR_DEVICE[device]); - - /*Get mapped bo_handle*/ - bo_handle = _exynos_bo_handle(bo_exynos, device); - if (bo_handle.ptr == NULL) { - TBM_ERR("Cannot get handle: gem:%d, device:%d\n", - bo_exynos->gem, device); - if (error) - *error = TBM_ERROR_INVALID_OPERATION; - return (tbm_bo_handle) NULL; - } - - if (error) - *error = TBM_ERROR_NONE; - - return bo_handle; -} - -static tbm_bo_handle -tbm_exynos_bo_map(tbm_backend_bo_data *bo_data, tbm_bo_device_type device, - tbm_bo_access_option opt, tbm_error_e *error) -{ - tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data; - tbm_bo_handle bo_handle; - tbm_bufmgr_exynos bufmgr_exynos; - - if (!bo_exynos) { - if (error) - *error = TBM_ERROR_INVALID_PARAMETER; - return (tbm_bo_handle) NULL; - } - - bufmgr_exynos = bo_exynos->bufmgr_exynos; - if (!bufmgr_exynos) { - if (error) - *error = TBM_ERROR_INVALID_PARAMETER; - return (tbm_bo_handle) NULL; - } - - if (!bo_exynos->gem) { - TBM_ERR("Cannot map gem=%d\n", bo_exynos->gem); - if (error) - *error = TBM_ERROR_INVALID_PARAMETER; - return (tbm_bo_handle) NULL; - } - - TBM_DBG(" bo_exynos:%p, gem:%d(%d), fd:%d, %s, %s\n", - bo_exynos, - bo_exynos->gem, bo_exynos->name, - bo_exynos->dmabuf, - STR_DEVICE[device], - STR_OPT[opt]); - - /*Get mapped bo_handle*/ - bo_handle = _exynos_bo_handle(bo_exynos, device); - if (bo_handle.ptr == NULL) { - TBM_ERR("Cannot get handle: gem:%d, device:%d, opt:%d\n", - bo_exynos->gem, device, opt); - if (error) - *error = TBM_ERROR_INVALID_OPERATION; - return (tbm_bo_handle) NULL; - } - - if (bo_exynos->map_cnt == 0) - _bo_set_cache_state(bufmgr_exynos, bo_exynos, device, opt); - - bo_exynos->last_map_device = device; - - bo_exynos->map_cnt++; - - if (error) - *error = TBM_ERROR_NONE; - - return bo_handle; -} - -static tbm_error_e -tbm_exynos_bo_unmap(tbm_backend_bo_data *bo_data) -{ - tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data; - tbm_bufmgr_exynos bufmgr_exynos; - - if (!bo_exynos) - return TBM_ERROR_INVALID_PARAMETER; - - bufmgr_exynos = bo_exynos->bufmgr_exynos; - if (!bufmgr_exynos) - return TBM_ERROR_INVALID_PARAMETER; - - if (!bo_exynos->gem) - return TBM_ERROR_INVALID_PARAMETER; - - bo_exynos->map_cnt--; - - if (bo_exynos->map_cnt == 0) - _bo_save_cache_state(bufmgr_exynos, bo_exynos); - - /* check whether cache control do or not */ - if (g_enable_cache_ctrl && bo_exynos->last_map_device == TBM_DEVICE_CPU) - _exynos_cache_flush(bufmgr_exynos, bo_exynos, TBM_EXYNOS_CACHE_FLUSH_ALL); - - bo_exynos->last_map_device = -1; - - TBM_DBG(" bo_exynos:%p, gem:%d(%d), fd:%d\n", - bo_exynos, - bo_exynos->gem, bo_exynos->name, - bo_exynos->dmabuf); - - return TBM_ERROR_NONE; -} - -static tbm_error_e -tbm_exynos_bo_lock(tbm_backend_bo_data *bo_data, tbm_bo_device_type device, - tbm_bo_access_option opt) -{ -#ifndef ALWAYS_BACKEND_CTRL - tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data; - tbm_bufmgr_exynos bufmgr_exynos; - struct dma_buf_fence fence; - struct flock filelock; - int ret = 0; - char buf[STRERR_BUFSIZE]; - - if (!bo_exynos) - return TBM_ERROR_INVALID_PARAMETER; - - bufmgr_exynos = bo_exynos->bufmgr_exynos; - if (!bufmgr_exynos) - return TBM_ERROR_INVALID_PARAMETER; - - if (device != TBM_DEVICE_3D && device != TBM_DEVICE_CPU) { - TBM_DBG("Not support device type,\n"); - return TBM_ERROR_INVALID_OPERATION; - } - - memset(&fence, 0, sizeof(struct dma_buf_fence)); - - /* Check if the given type is valid or not. */ - if (opt & TBM_OPTION_WRITE) { - if (device == TBM_DEVICE_3D) - fence.type = DMA_BUF_ACCESS_WRITE | DMA_BUF_ACCESS_DMA; - } else if (opt & TBM_OPTION_READ) { - if (device == TBM_DEVICE_3D) - fence.type = DMA_BUF_ACCESS_READ | DMA_BUF_ACCESS_DMA; - } else { - TBM_ERR("Invalid argument\n"); - return TBM_ERROR_INVALID_PARAMETER; - } - - /* Check if the tbm manager supports dma fence or not. */ - if (!bufmgr_exynos->use_dma_fence) { - TBM_ERR("Not support DMA FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE)); - return TBM_ERROR_INVALID_OPERATION; - - } - - if (device == TBM_DEVICE_3D) { - ret = ioctl(bo_exynos->dmabuf, DMABUF_IOCTL_GET_FENCE, &fence); - if (ret < 0) { - TBM_ERR("Cannot set GET FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE)); - return TBM_ERROR_INVALID_OPERATION; - } - } else { - if (opt & TBM_OPTION_WRITE) - filelock.l_type = F_WRLCK; - else - filelock.l_type = F_RDLCK; - - filelock.l_whence = SEEK_CUR; - filelock.l_start = 0; - filelock.l_len = 0; - - if (-1 == fcntl(bo_exynos->dmabuf, F_SETLKW, &filelock)) - return TBM_ERROR_INVALID_OPERATION; - } - - pthread_mutex_lock(&bo_exynos->mutex); - - if (device == TBM_DEVICE_3D) { - int i; - - for (i = 0; i < DMA_FENCE_LIST_MAX; i++) { - if (bo_exynos->dma_fence[i].ctx == 0) { - bo_exynos->dma_fence[i].type = fence.type; - bo_exynos->dma_fence[i].ctx = fence.ctx; - break; - } - } - - if (i == DMA_FENCE_LIST_MAX) { - /*TODO: if dma_fence list is full, it needs realloc. I will fix this. by minseok3.kim*/ - TBM_ERR("fence list is full\n"); - } - } - - pthread_mutex_unlock(&bo_exynos->mutex); - - TBM_DBG("DMABUF_IOCTL_GET_FENCE! bo_exynos:%p, gem:%d(%d), fd:%ds\n", - bo_exynos, - bo_exynos->gem, bo_exynos->name, - bo_exynos->dmabuf); -#endif /* ALWAYS_BACKEND_CTRL */ - - return TBM_ERROR_NONE; -} - -static tbm_error_e -tbm_exynos_bo_unlock(tbm_backend_bo_data *bo_data) -{ -#ifndef ALWAYS_BACKEND_CTRL - tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data; - struct dma_buf_fence fence; - struct flock filelock; - unsigned int dma_type = 0; - int ret = 0; - char buf[STRERR_BUFSIZE]; - - bufmgr_exynos = bo_exynos->bufmgr_exynos; - if (!bufmgr_exynos) - return TBM_ERROR_INVALID_PARAMETER; - - if (bo_exynos->dma_fence[0].type & DMA_BUF_ACCESS_DMA) - dma_type = 1; - - if (!bo_exynos->dma_fence[0].ctx && dma_type) { - TBM_DBG("FENCE not support or ignored,\n"); - return TBM_ERROR_INVALID_OPERATION; - } - - if (!bo_exynos->dma_fence[0].ctx && dma_type) { - TBM_DBG("device type is not 3D/CPU,\n"); - return TBM_ERROR_INVALID_OPERATION; - } - - pthread_mutex_lock(&bo_exynos->mutex); - - if (dma_type) { - fence.type = bo_exynos->dma_fence[0].type; - fence.ctx = bo_exynos->dma_fence[0].ctx; - int i; - - for (i = 1; i < DMA_FENCE_LIST_MAX; i++) { - bo_exynos->dma_fence[i - 1].type = bo_exynos->dma_fence[i].type; - bo_exynos->dma_fence[i - 1].ctx = bo_exynos->dma_fence[i].ctx; - } - bo_exynos->dma_fence[DMA_FENCE_LIST_MAX - 1].type = 0; - bo_exynos->dma_fence[DMA_FENCE_LIST_MAX - 1].ctx = 0; - } - pthread_mutex_unlock(&bo_exynos->mutex); - - if (dma_type) { - ret = ioctl(bo_exynos->dmabuf, DMABUF_IOCTL_PUT_FENCE, &fence); - if (ret < 0) { - TBM_ERR("Can not set PUT FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE)); - return TBM_ERROR_INVALID_OPERATION; - } - } else { - filelock.l_type = F_UNLCK; - filelock.l_whence = SEEK_CUR; - filelock.l_start = 0; - filelock.l_len = 0; - - if (-1 == fcntl(bo_exynos->dmabuf, F_SETLKW, &filelock)) - return TBM_ERROR_INVALID_OPERATION; - } - - TBM_DBG("DMABUF_IOCTL_PUT_FENCE! bo_exynos:%p, gem:%d(%d), fd:%ds\n", - bo_exynos, - bo_exynos->gem, bo_exynos->name, - bo_exynos->dmabuf); -#endif /* ALWAYS_BACKEND_CTRL */ - - return TBM_ERROR_NONE; -} -static tbm_fd -tbm_exynos_bo_export_fd(tbm_backend_bo_data *bo_data, tbm_error_e *error) -{ - tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data; - int ret; - char buf[STRERR_BUFSIZE]; - - if (!bo_exynos) { - if (error) - *error = TBM_ERROR_INVALID_PARAMETER; - return -1; - } - - struct drm_prime_handle arg = {0, }; - - arg.handle = bo_exynos->gem; - ret = drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg); - if (ret) { - TBM_ERR("bo_exynos:%p Cannot dmabuf=%d (%s)\n", - bo_exynos, bo_exynos->gem, strerror_r(errno, buf, STRERR_BUFSIZE)); - if (error) - *error = TBM_ERROR_INVALID_OPERATION; - return (tbm_fd) ret; - } - - TBM_DBG(" bo_exynos:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d(%d), size:%d\n", - bo_exynos, - bo_exynos->gem, bo_exynos->name, - bo_exynos->dmabuf, - arg.fd, - bo_exynos->flags_tbm, bo_exynos->flags_exynos, - bo_exynos->size); - - if (error) - *error = TBM_ERROR_NONE; - - return (tbm_fd)arg.fd; -} - -static tbm_key -tbm_exynos_bo_export_key(tbm_backend_bo_data *bo_data, tbm_error_e *error) -{ - tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data; - - if (!bo_exynos) { - if (error) - *error = TBM_ERROR_INVALID_PARAMETER; - return 0; - } - - if (!bo_exynos->name) { - bo_exynos->name = _get_name(bo_exynos->fd, bo_exynos->gem); - if (!bo_exynos->name) { - TBM_ERR("error Cannot get name\n"); - if (error) - *error = TBM_ERROR_INVALID_PARAMETER; - return 0; - } - } - - TBM_DBG(" bo_exynos:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d\n", - bo_exynos, - bo_exynos->gem, bo_exynos->name, - bo_exynos->dmabuf, - bo_exynos->flags_tbm, bo_exynos->flags_exynos, - bo_exynos->size); - - if (error) - *error = TBM_ERROR_NONE; - - return (tbm_key)bo_exynos->name; -} - -static void -tbm_exynos_deinit(tbm_backend_bufmgr_data *bufmgr_data) -{ - tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)bufmgr_data; - tbm_bufmgr bufmgr; - tbm_error_e error; - unsigned long key; - void *value; - - TBM_RETURN_IF_FAIL(bufmgr_exynos != NULL); - - bufmgr = bufmgr_exynos->bufmgr; - - tbm_backend_bufmgr_free_bufmgr_func(bufmgr, bufmgr_exynos->bufmgr_func); - tbm_backend_bufmgr_free_bo_func(bufmgr, bufmgr_exynos->bo_func); - - if (bufmgr_exynos->hashBos) { - while (drmHashFirst(bufmgr_exynos->hashBos, &key, &value) > 0) { - free(value); - drmHashDelete(bufmgr_exynos->hashBos, key); - } - - drmHashDestroy(bufmgr_exynos->hashBos); - bufmgr_exynos->hashBos = NULL; - } - - _bufmgr_deinit_cache_state(bufmgr_exynos); - - if (bufmgr_exynos->bind_display) - tbm_drm_helper_wl_auth_server_deinit(); - - if (bufmgr_exynos->device_name) - free(bufmgr_exynos->device_name); - - if (tbm_backend_bufmgr_query_display_server(bufmgr, &error)) - tbm_drm_helper_unset_tbm_master_fd(); - else - tbm_drm_helper_unset_fd(); - - close(bufmgr_exynos->fd); - - free(bufmgr_exynos); -} - -static tbm_backend_bufmgr_data * -tbm_exynos_init(tbm_bufmgr bufmgr, tbm_error_e *error) -{ - tbm_bufmgr_exynos bufmgr_exynos = NULL; - tbm_backend_bufmgr_func *bufmgr_func = NULL; - tbm_backend_bo_func *bo_func = NULL; - int fp; - tbm_error_e err; - int set_master = 0; - char *value = NULL; - - if (!bufmgr) { - TBM_ERR("bufmgr is null.\n"); - if (error) - *error = TBM_ERROR_INVALID_PARAMETER; - return NULL; - } - - bufmgr_exynos = calloc(1, sizeof(struct _tbm_bufmgr_exynos)); - if (!bufmgr_exynos) { - TBM_ERR("fail to alloc bufmgr_exynos!\n"); - if (error) - *error = TBM_ERROR_OUT_OF_MEMORY; - return NULL; - } - - /* check the master_fd which already had opened */ - bufmgr_exynos->fd = tbm_drm_helper_get_master_fd(); - if (bufmgr_exynos->fd < 0) { - bufmgr_exynos->fd = _tbm_exynos_open_drm(); - if (bufmgr_exynos->fd < 0) { - TBM_ERR("fail to open drm!\n"); - if (error) - *error = TBM_ERROR_INVALID_OPERATION; - goto fail_open_drm; - } - - if (drmIsMaster(bufmgr_exynos->fd)) { - tbm_drm_helper_set_tbm_master_fd(bufmgr_exynos->fd); - set_master = 1; - - bufmgr_exynos->device_name = drmGetDeviceNameFromFd(bufmgr_exynos->fd); - if (!bufmgr_exynos->device_name) { - TBM_ERR("fail to get device name!\n"); - tbm_drm_helper_unset_tbm_master_fd(); - if (error) - *error = TBM_ERROR_INVALID_OPERATION; - goto fail_get_device_name; - } - TBM_INFO("This is Master FD(%d) from open_drm.", bufmgr_exynos->fd); - } else { - /* close the fd and get the authenticated fd from the master fd */ - close(bufmgr_exynos->fd); -#ifdef USE_RENDER_NODE - bufmgr_exynos->fd = _get_render_node(0); -#else - bufmgr_exynos->fd = -1; -#endif - /* get the authenticated drm fd from the master fd */ - if (!tbm_drm_helper_get_auth_info(&(bufmgr_exynos->fd), &(bufmgr_exynos->device_name), NULL)) { - TBM_ERR("fail to get auth drm info!\n"); - if (error) - *error = TBM_ERROR_INVALID_OPERATION; - goto fail_get_auth_info; - } - TBM_INFO("This is Authenticated FD(%d)", bufmgr_exynos->fd); - } - } else { - bufmgr_exynos->device_name = drmGetDeviceNameFromFd(bufmgr_exynos->fd); - if (!bufmgr_exynos->device_name) { - TBM_ERR("fail to get device name!\n"); - tbm_drm_helper_unset_tbm_master_fd(); - if (error) - *error = TBM_ERROR_INVALID_OPERATION; - goto fail_get_device_name; - } - TBM_INFO("This is Master FD from tbm_drm_helper_get_master_fd(%d)", bufmgr_exynos->fd); - } - tbm_drm_helper_set_fd(bufmgr_exynos->fd); - - //Check if the tbm manager supports dma fence or not. - fp = open("/sys/module/dmabuf_sync/parameters/enabled", O_RDONLY); - if (fp != -1) { - char buf[1]; - int length = read(fp, buf, 1); - - if (length == 1 && buf[0] == '1') - bufmgr_exynos->use_dma_fence = 1; - - close(fp); - } - - /* get the model name from the capi-system-info. - * The alignment_plane and alignment_pitch_rgb is different accoring to the target. - * There will be the stride issue when the right alignment_plane and alignment_pitch_rgb - * is not set to the backend. - */ - if (system_info_get_platform_string("http://tizen.org/system/model_name", &value) != SYSTEM_INFO_ERROR_NONE) { - TBM_ERR("Cannot get the \"http://tizen.org/system/model_name\" key value from system-info.\n"); - TBM_ERR("May not set the right value on libtbm-exynos backend.\n"); - } else { - if (!strncmp(value, "TW1", 4)) { - g_tbm_surface_alignment_plane = 8; - g_tbm_surface_alignment_pitch_rgb = 8; - g_enable_cache_ctrl = 1; - } else { - g_tbm_surface_alignment_plane = 64; - g_tbm_surface_alignment_pitch_rgb = 64; - } - } - - free(value); - - if (!_bufmgr_init_cache_state(bufmgr_exynos)) { - TBM_ERR("fail to init bufmgr cache state\n"); - if (error) - *error = TBM_ERROR_INVALID_OPERATION; - goto fail_init_cache_state; - } - - /*Create Hash Table*/ - bufmgr_exynos->hashBos = drmHashCreate(); - - /* alloc and register bufmgr_funcs */ - bufmgr_func = tbm_backend_bufmgr_alloc_bufmgr_func(bufmgr, &err); - if (!bufmgr_func) { - TBM_ERR("fail to alloc bufmgr_func! err(%d)\n", err); - if (error) - *error = TBM_ERROR_OUT_OF_MEMORY; - goto fail_alloc_bufmgr_func; - } - - bufmgr_func->bufmgr_get_capabilities = tbm_exynos_bufmgr_get_capabilities; - //if (tbm_backend_bufmgr_query_display_server(bufmgr, &err) && !_check_render_node()) - bufmgr_func->bufmgr_bind_native_display = tbm_exynos_bufmgr_bind_native_display; - bufmgr_func->bufmgr_get_supported_formats = tbm_exynos_bufmgr_get_supported_formats; - bufmgr_func->bufmgr_get_plane_data = tbm_exynos_bufmgr_get_plane_data; - bufmgr_func->bufmgr_alloc_bo = tbm_exynos_bufmgr_alloc_bo; - bufmgr_func->bufmgr_alloc_bo_with_format = NULL; - bufmgr_func->bufmgr_import_fd = tbm_exynos_bufmgr_import_fd; - bufmgr_func->bufmgr_import_key = tbm_exynos_bufmgr_import_key; - - err = tbm_backend_bufmgr_register_bufmgr_func(bufmgr, bufmgr_func); - if (err != TBM_ERROR_NONE) { - TBM_ERR("fail to register bufmgr_func! err(%d)\n", err); - if (error) - *error = TBM_ERROR_INVALID_OPERATION; - goto fail_register_bufmgr_func; - } - bufmgr_exynos->bufmgr_func = bufmgr_func; - - /* alloc and register bo_funcs */ - bo_func = tbm_backend_bufmgr_alloc_bo_func(bufmgr, &err); - if (!bo_func) { - TBM_ERR("fail to alloc bo_func! err(%d)\n", err); - if (error) - *error = TBM_ERROR_OUT_OF_MEMORY; - goto fail_alloc_bo_func; - } - - bo_func->bo_free = tbm_exynos_bo_free; - bo_func->bo_get_size = tbm_exynos_bo_get_size; - bo_func->bo_get_memory_types = tbm_exynos_bo_get_memory_type; - bo_func->bo_get_handle = tbm_exynos_bo_get_handle; - bo_func->bo_map = tbm_exynos_bo_map; - bo_func->bo_unmap = tbm_exynos_bo_unmap; - bo_func->bo_lock = tbm_exynos_bo_lock; - bo_func->bo_unlock = tbm_exynos_bo_unlock; - bo_func->bo_export_fd = tbm_exynos_bo_export_fd; - bo_func->bo_export_key = tbm_exynos_bo_export_key; - - err = tbm_backend_bufmgr_register_bo_func(bufmgr, bo_func); - if (err != TBM_ERROR_NONE) { - TBM_ERR("fail to register bo_func! err(%d)\n", err); - if (error) - *error = TBM_ERROR_INVALID_OPERATION; - goto fail_register_bo_func; - } - bufmgr_exynos->bo_func = bo_func; - - TBM_DBG("drm_fd:%d\n", bufmgr_exynos->fd); - - if (error) - *error = TBM_ERROR_NONE; - - bufmgr_exynos->bufmgr = bufmgr; - - return (tbm_backend_bufmgr_data *)bufmgr_exynos; - -fail_register_bo_func: - tbm_backend_bufmgr_free_bo_func(bufmgr, bo_func); -fail_alloc_bo_func: -fail_register_bufmgr_func: - tbm_backend_bufmgr_free_bufmgr_func(bufmgr, bufmgr_func); -fail_alloc_bufmgr_func: - _bufmgr_deinit_cache_state(bufmgr_exynos); - if (bufmgr_exynos->hashBos) - drmHashDestroy(bufmgr_exynos->hashBos); -fail_init_cache_state: - if (set_master) - tbm_drm_helper_unset_tbm_master_fd(); - tbm_drm_helper_unset_fd(); -fail_get_device_name: - if (bufmgr_exynos->fd >= 0) - close(bufmgr_exynos->fd); -fail_get_auth_info: -fail_open_drm: - free(bufmgr_exynos); - return NULL; -} - -tbm_backend_module tbm_backend_module_data = { - "exynos", - "Samsung", - TBM_BACKEND_ABI_VERSION_3_0, - tbm_exynos_init, - tbm_exynos_deinit -}; diff --git a/src/tbm_bufmgr_tgl.h b/src/tbm_bufmgr_tgl.h deleted file mode 100644 index b442f73..0000000 --- a/src/tbm_bufmgr_tgl.h +++ /dev/null @@ -1,175 +0,0 @@ -/************************************************************************** - * - * libtbm - * - * Copyright 2012 Samsung Electronics co., Ltd. All Rights Reserved. - * - * Contact: SooChan Lim , Sangjin Lee - * Boram Park , Changyeon Lee - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. - * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - * **************************************************************************/ - -#ifndef __TBM_BUFMGR_TGL_H__ -#define __TBM_BUFMGR_TGL_H__ - -#include - -static char tgl_devfile[] = "/dev/slp_global_lock"; -static char tgl_devfile1[] = "/dev/tgl"; - -#define TGL_IOCTL_BASE 0x32 -#define TGL_IO(nr) _IO(TGL_IOCTL_BASE, nr) -#define TGL_IOR(nr, type) _IOR(TGL_IOCTL_BASE, nr, type) -#define TGL_IOW(nr, type) _IOW(TGL_IOCTL_BASE, nr, type) -#define TGL_IOWR(nr, type) _IOWR(TGL_IOCTL_BASE, nr, type) - -/** - * struct tgl_ver_data - tgl version data structure - * @major: major version - * @minor: minor version - */ -struct tgl_ver_data { - unsigned int major; - unsigned int minor; -}; - -/** - * struct tgl_reg_data - tgl data structure - * @key: lookup key - * @timeout_ms: timeout value for waiting event - */ -struct tgl_reg_data { - unsigned int key; - unsigned int timeout_ms; -}; - -enum tgl_type_data { - TGL_TYPE_NONE = 0, - TGL_TYPE_READ = (1 << 0), - TGL_TYPE_WRITE = (1 << 1), -}; - -/** - * struct tgl_lock_data - tgl lock data structure - * @key: lookup key - * @type: lock type that is in tgl_type_data - */ -struct tgl_lock_data { - unsigned int key; - enum tgl_type_data type; -}; - -enum tgl_status_data { - TGL_STATUS_UNLOCKED, - TGL_STATUS_LOCKED, -}; - -/** - * struct tgl_usr_data - tgl user data structure - * @key: lookup key - * @data1: user data 1 - * @data2: user data 2 - * @status: lock status that is in tgl_status_data - */ -struct tgl_usr_data { - unsigned int key; - unsigned int data1; - unsigned int data2; - enum tgl_status_data status; -}; - -enum { - _TGL_GET_VERSION, - _TGL_REGISTER, - _TGL_UNREGISTER, - _TGL_LOCK, - _TGL_UNLOCK, - _TGL_SET_DATA, - _TGL_GET_DATA, -}; - -/* get version information */ -#define TGL_IOCTL_GET_VERSION TGL_IOR(_TGL_GET_VERSION, struct tgl_ver_data) -/* register key */ -#define TGL_IOCTL_REGISTER TGL_IOW(_TGL_REGISTER, struct tgl_reg_data) -/* unregister key */ -#define TGL_IOCTL_UNREGISTER TGL_IOW(_TGL_UNREGISTER, struct tgl_reg_data) -/* lock with key */ -#define TGL_IOCTL_LOCK TGL_IOW(_TGL_LOCK, struct tgl_lock_data) -/* unlock with key */ -#define TGL_IOCTL_UNLOCK TGL_IOW(_TGL_UNLOCK, struct tgl_lock_data) -/* set user data with key */ -#define TGL_IOCTL_SET_DATA TGL_IOW(_TGL_SET_DATA, struct tgl_usr_data) -/* get user data with key */ -#define TGL_IOCTL_GET_DATA TGL_IOR(_TGL_GET_DATA, struct tgl_usr_data) - -/* indicate cache units. */ -enum e_drm_exynos_gem_cache_sel { - EXYNOS_DRM_L1_CACHE = 1 << 0, - EXYNOS_DRM_L2_CACHE = 1 << 1, - EXYNOS_DRM_ALL_CORES = 1 << 2, - EXYNOS_DRM_ALL_CACHES = EXYNOS_DRM_L1_CACHE | - EXYNOS_DRM_L2_CACHE, - EXYNOS_DRM_ALL_CACHES_CORES = EXYNOS_DRM_L1_CACHE | - EXYNOS_DRM_L2_CACHE | - EXYNOS_DRM_ALL_CORES, - EXYNOS_DRM_CACHE_SEL_MASK = EXYNOS_DRM_ALL_CACHES_CORES -}; - -/* indicate cache operation types. */ -enum e_drm_exynos_gem_cache_op { - EXYNOS_DRM_CACHE_INV_ALL = 1 << 3, - EXYNOS_DRM_CACHE_INV_RANGE = 1 << 4, - EXYNOS_DRM_CACHE_CLN_ALL = 1 << 5, - EXYNOS_DRM_CACHE_CLN_RANGE = 1 << 6, - EXYNOS_DRM_CACHE_FSH_ALL = EXYNOS_DRM_CACHE_INV_ALL | - EXYNOS_DRM_CACHE_CLN_ALL, - EXYNOS_DRM_CACHE_FSH_RANGE = EXYNOS_DRM_CACHE_INV_RANGE | - EXYNOS_DRM_CACHE_CLN_RANGE, - EXYNOS_DRM_CACHE_OP_MASK = EXYNOS_DRM_CACHE_FSH_ALL | - EXYNOS_DRM_CACHE_FSH_RANGE -}; - -/** - * A structure for cache operation. - * - * @usr_addr: user space address. - * P.S. it SHOULD BE user space. - * @size: buffer size for cache operation. - * @flags: select cache unit and cache operation. - * @gem_handle: a handle to a gem object. - * this gem handle is needed for cache range operation to L2 cache. - */ -struct drm_exynos_gem_cache_op { - uint64_t usr_addr; - unsigned int size; - unsigned int flags; - unsigned int gem_handle; -}; - -#define DRM_EXYNOS_GEM_CACHE_OP 0x12 - -#define DRM_IOCTL_EXYNOS_GEM_CACHE_OP DRM_IOWR(DRM_COMMAND_BASE + \ - DRM_EXYNOS_GEM_CACHE_OP, struct drm_exynos_gem_cache_op) - -#endif /* __TBM_BUFMGR_TGL_H__ */