# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
AC_PREREQ(2.60)
-AC_INIT(libtbm-exynos, 1.0.5)
+AC_INIT(hal-backend-tbm-exynos, 1.0.5)
AC_USE_SYSTEM_EXTENSIONS
AC_CONFIG_SRCDIR([Makefile.am])
AM_INIT_AUTOMAKE([dist-bzip2])
PKG_CHECK_MODULES(LIBDRM, libdrm)
PKG_CHECK_MODULES(LIBDRM_EXYNOS, libdrm_exynos)
-PKG_CHECK_MODULES(LIBTBM, libtbm)
PKG_CHECK_MODULES(HAL_API_COMMON, hal-api-common)
PKG_CHECK_MODULES(HAL_API_TBM, hal-api-tbm)
PKG_CHECK_MODULES(DLOG, dlog)
AC_DEFINE(ALWAYS_BACKEND_CTRL, 1, [Enable always backend ctrl])
fi
-LIBTBM_EXYNOS_CFLAGS="$LIBDRM_CFLAGS $LIBDRM_EXYNOS_CFLAGS $LIBTBM_CFLAGS $DLOG_CFLAGS $LIBUDEV_CFLAGS $SYSTEM_INFO_CFLAGS"
-LIBTBM_EXYNOS_LIBS="$LIBDRM_LIBS $LIBDRM_EXYNOS_LIBS $LIBTBM_LIBS $DLOG_LIBS $LIBUDEV_LIBS $SYSTEM_INFO_LIBS"
-AC_SUBST(LIBTBM_EXYNOS_CFLAGS)
-AC_SUBST(LIBTBM_EXYNOS_LIBS)
-
-bufmgr_dir=${libdir#*/}
-AC_SUBST(bufmgr_dir)
-
-# for libhal-backend-tbm-exynos
LIBHAL_BACKEND_TBM_EXYNOS_CFLAGS="$HAL_API_COMMON_CFLAGS $HAL_API_TBM_CFLAGS $LIBDRM_CFLAGS $LIBDRM_EXYNOS_CFLAGS $DLOG_CFLAGS $LIBUDEV_CFLAGS $SYSTEM_INFO_CFLAGS"
LIBHAL_BACKEND_TBM_EXYNOS_LIBS="$HAL_API_COMMON_LIBS $HAL_API_TBM_LIBS $LIBDRM_LIBS $LIBDRM_EXYNOS_LIBS $DLOG_LIBS $LIBUDEV_LIBS $SYSTEM_INFO_LIBS"
+
AC_SUBST(LIBHAL_BACKEND_TBM_EXYNOS_CFLAGS)
AC_SUBST(LIBHAL_BACKEND_TBM_EXYNOS_LIBS)
AC_OUTPUT([
Makefile
- src/libtbm-exynos/Makefile
- src/libhal-backend-tbm-exynos/Makefile
src/Makefile])
echo ""
echo "CFLAGS : $CFLAGS"
echo "LDFLAGS : $LDFLAGS"
-echo "LIBTBM_EXYNOS_CFLAGS : $LIBTBM_EXYNOS_CFLAGS"
-echo "LIBTBM_EXYNOS_LIBS : $LIBTBM_EXYNOS_LIBS"
-echo "bufmgr_dir : $bufmgr_dir"
echo "LIBHAL_BACKEND_TBM_EXYNOS_CFLAGS : $LIBHAL_BACKEND_TBM_EXYNOS_CFLAGS"
echo "LIBHAL_BACKEND_TBM_EXYNOS_LIBS : $LIBHAL_BACKEND_TBM_EXYNOS_LIBS"
-echo "hal-libdir : $HAL_LIBDIR"
+echo "HAL_LIBDIR : $HAL_LIBDIR"
echo ""
--- /dev/null
+<manifest>
+ <request>
+ <domain name="_"/>
+ </request>
+</manifest>
--- /dev/null
+Name: hal-backend-tbm-exynos
+Version: 3.0.1
+Release: 1
+License: MIT
+Summary: hal-backend-tbm module for exynos
+Group: System/Libraries
+Source0: %{name}-%{version}.tar.gz
+Source1001: %{name}.manifest
+
+BuildRequires: pkgconfig(libdrm)
+BuildRequires: pkgconfig(libdrm_exynos)
+BuildRequires: pkgconfig(hal-api-common)
+BuildRequires: pkgconfig(hal-api-tbm)
+BuildRequires: pkgconfig(dlog)
+BuildRequires: pkgconfig(libudev)
+BuildRequires: pkgconfig(capi-system-info)
+ExclusiveArch: %{arm} aarch64
+
+%description
+descriptionion: hal tbm backend module for exynos
+
+%prep
+%setup -q
+cp %{SOURCE1001} .
+
+%build
+
+%reconfigure --prefix=%{_prefix} --libdir=%{_libdir} \
+ --with-hal-libdir=%{_hal_libdir} \
+ CFLAGS="${CFLAGS} -Wall -Werror" LDFLAGS="${LDFLAGS} -Wl,--hash-style=both -Wl,--as-needed"
+
+make %{?_smp_mflags}
+
+%install
+rm -rf %{buildroot}
+%make_install
+
+# make rule and license files
+mkdir -p %{buildroot}%{_hal_libdir}/udev/rules.d/
+cp -af rules/99-libhal-backend-tbm-exynos.rules %{buildroot}%{_hal_libdir}/udev/rules.d/
+mkdir -p %{buildroot}%{_hal_licensedir}/libhal-backend-tbm-exynos
+cp -af COPYING %{buildroot}%{_hal_licensedir}/libhal-backend-tbm-exynos
+
+%post
+if [ -f %{_hal_libdir}/libhal-backend-tbm.so ]; then
+ rm -rf %{_hal_libdir}/libhal-backend-tbm.so
+fi
+ln -s libhal-backend-tbm-exynos.so %{_hal_libdir}/libhal-backend-tbm.so
+
+%postun -p /sbin/ldconfig
+
+%files
+%manifest %{name}.manifest
+%{_hal_licensedir}/libhal-backend-tbm-exynos/COPYING
+%{_hal_libdir}/libhal-backend-*.so*
+%{_hal_libdir}/udev/rules.d/99-libhal-backend-tbm-exynos.rules
+++ /dev/null
-<manifest>
- <request>
- <domain name="_"/>
- </request>
-</manifest>
+++ /dev/null
-<manifest>
- <request>
- <domain name="_"/>
- </request>
-</manifest>
+++ /dev/null
-Name: libtbm-exynos
-Version: 3.0.1
-Release: 1
-License: MIT
-Summary: Tizen Buffer Manager - exynos backend
-Group: System/Libraries
-Source0: %{name}-%{version}.tar.gz
-Source1001: %{name}.manifest
-Source1002: libhal-backend-tbm-exynos.manifest
-
-BuildRequires: pkgconfig(libdrm)
-BuildRequires: pkgconfig(libdrm_exynos)
-BuildRequires: pkgconfig(libtbm)
-BuildRequires: pkgconfig(hal-api-common)
-BuildRequires: pkgconfig(hal-api-tbm)
-BuildRequires: pkgconfig(dlog)
-BuildRequires: pkgconfig(libudev)
-BuildRequires: pkgconfig(capi-system-info)
-ExclusiveArch: %{arm} aarch64
-
-%description
-descriptionion: Tizen Buffer manager backend module for exynos
-
-%package -n hal-backend-tbm-exynos
-Summary: hal-backend-tbm module for exynos
-Group: System/Libraries
-Requires: hal-api-tbm
-Requires: hal-api-common
-
-%description -n hal-backend-tbm-exynos
-descriptionion: hal tbm backend module for exynos
-
-%prep
-%setup -q
-cp %{SOURCE1001} .
-cp %{SOURCE1002} .
-
-%build
-
-%reconfigure --prefix=%{_prefix} --libdir=%{_libdir}/bufmgr \
- --with-hal-libdir=%{_hal_libdir} \
- CFLAGS="${CFLAGS} -Wall -Werror" LDFLAGS="${LDFLAGS} -Wl,--hash-style=both -Wl,--as-needed"
-
-make %{?_smp_mflags}
-
-%install
-rm -rf %{buildroot}
-%make_install
-
-# make rule for tgl
-mkdir -p %{buildroot}%{_libdir}/udev/rules.d/
-cp -af rules/99-libtbm_exynos.rules %{buildroot}%{_libdir}/udev/rules.d/
-
-# make rule and license files
-mkdir -p %{buildroot}%{_hal_libdir}/udev/rules.d/
-cp -af rules/99-libhal-backend-tbm-exynos.rules %{buildroot}%{_hal_libdir}/udev/rules.d/
-mkdir -p %{buildroot}%{_hal_licensedir}/libhal-backend-tbm-exynos
-cp -af COPYING %{buildroot}%{_hal_licensedir}/libhal-backend-tbm-exynos
-
-%post
-if [ -f %{_libdir}/bufmgr/libtbm_default.so ]; then
- rm -rf %{_libdir}/bufmgr/libtbm_default.so
-fi
-ln -s libtbm_exynos.so %{_libdir}/bufmgr/libtbm_default.so
-
-%postun -p /sbin/ldconfig
-
-%post -n hal-backend-tbm-exynos
-if [ -f %{_hal_libdir}/libhal-backend-tbm.so ]; then
- rm -rf %{_hal_libdir}/libhal-backend-tbm.so
-fi
-ln -s libhal-backend-tbm-exynos.so %{_hal_libdir}/libhal-backend-tbm.so
-
-%postun -n hal-backend-tbm-exynos -p /sbin/ldconfig
-
-%files
-%manifest %{name}.manifest
-%license COPYING
-%{_libdir}/bufmgr/libtbm_*.so*
-%{_libdir}/udev/rules.d/99-libtbm_exynos.rules
-
-%files -n hal-backend-tbm-exynos
-%manifest libhal-backend-tbm-exynos.manifest
-%{_hal_licensedir}/libhal-backend-tbm-exynos/COPYING
-%{_hal_libdir}/libhal-backend-*.so*
-%{_hal_libdir}/udev/rules.d/99-libhal-backend-tbm-exynos.rules
+++ /dev/null
-KERNEL=="tgl", MODE="0666", GROUP="display", SECLABEL{smack}="*"
-KERNEL=="slp_global_lock*", MODE="0666", GROUP="display", SECLABEL{smack}="*"
-SUBDIRS = libtbm-exynos libhal-backend-tbm-exynos
\ No newline at end of file
+AM_CFLAGS = \
+ @LIBHAL_BACKEND_TBM_EXYNOS_CFLAGS@ \
+ -I$(top_srcdir) \
+ -I$(top_srcdir)/src
+
+libhal_backend_tbm_exynos_la_LTLIBRARIES = libhal-backend-tbm-exynos.la
+libhal_backend_tbm_exynos_ladir = @HAL_LIBDIR@
+libhal_backend_tbm_exynos_la_LIBADD = @LIBHAL_BACKEND_TBM_EXYNOS_LIBS@
+
+libhal_backend_tbm_exynos_la_SOURCES = \
+ tbm_backend_log.c \
+ tbm_backend_exynos.c
+++ /dev/null
-AM_CFLAGS = \
- @LIBHAL_BACKEND_TBM_EXYNOS_CFLAGS@ \
- -I$(top_srcdir) \
- -I$(top_srcdir)/src/libhal-backend-tbm-exynos
-
-libhal_backend_tbm_exynos_la_LTLIBRARIES = libhal-backend-tbm-exynos.la
-libhal_backend_tbm_exynos_ladir = @HAL_LIBDIR@
-libhal_backend_tbm_exynos_la_LIBADD = @LIBHAL_BACKEND_TBM_EXYNOS_LIBS@
-
-libhal_backend_tbm_exynos_la_SOURCES = \
- tbm_backend_log.c \
- tbm_backend_exynos.c
+++ /dev/null
-/**************************************************************************
-
-libtbm_exynos
-
-Copyright 2012 Samsung Electronics co., Ltd. All Rights Reserved.
-
-Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a
-copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sub license, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice (including the
-next paragraph) shall be included in all copies or substantial portions
-of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
-IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
-ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-**************************************************************************/
-
-#ifdef HAVE_CONFIG_H
-#include "config.h"
-#endif
-
-#include <libudev.h>
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include <string.h>
-#include <sys/ioctl.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <errno.h>
-#include <xf86drm.h>
-#include <exynos_drm.h>
-#include <pthread.h>
-#include <hal-common.h>
-#include <hal-tbm-types.h>
-#include <hal-tbm-interface.h>
-#include <system_info.h>
-#include "tbm_bufmgr_tgl.h"
-#include "tbm_backend_log.h"
-
-#define EXYNOS_DRM_NAME "exynos"
-
-#define TBM_COLOR_FORMAT_COUNT 4
-#define STRERR_BUFSIZE 128
-#define SIZE_ALIGN(value, base) (((value) + ((base) - 1)) & ~((base) - 1))
-#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
-#define MAX(a, b) ((a) > (b) ? (a) : (b))
-
-static unsigned int g_tbm_surface_alignment_plane;
-static unsigned int g_tbm_surface_alignment_pitch_rgb;
-
-#define TBM_SURFACE_ALIGNMENT_PLANE_NV12 (4096)
-#define TBM_SURFACE_ALIGNMENT_PITCH_YUV (16)
-
-#define SZ_1M 0x00100000
-#define S5P_FIMV_MAX_FRAME_SIZE (2 * SZ_1M)
-#define S5P_FIMV_D_ALIGN_PLANE_SIZE 64
-#define S5P_FIMV_NUM_PIXELS_IN_MB_ROW 16
-#define S5P_FIMV_NUM_PIXELS_IN_MB_COL 16
-#define S5P_FIMV_DEC_BUF_ALIGN (8 * 1024)
-#define S5P_FIMV_NV12MT_HALIGN 128
-#define S5P_FIMV_NV12MT_VALIGN 64
-
-/* cache control at backend */
-static unsigned int g_enable_cache_ctrl = 0;
-
-struct dma_buf_info {
- unsigned long size;
- unsigned int fence_supported;
- unsigned int padding;
-};
-
-#define DMA_BUF_ACCESS_READ 0x1
-#define DMA_BUF_ACCESS_WRITE 0x2
-#define DMA_BUF_ACCESS_DMA 0x4
-#define DMA_BUF_ACCESS_MAX 0x8
-
-#define DMA_FENCE_LIST_MAX 5
-
-struct dma_buf_fence {
- unsigned long ctx;
- unsigned int type;
-};
-
-#define DMABUF_IOCTL_BASE 'F'
-#define DMABUF_IOWR(nr, type) _IOWR(DMABUF_IOCTL_BASE, nr, type)
-
-#define DMABUF_IOCTL_GET_INFO DMABUF_IOWR(0x00, struct dma_buf_info)
-#define DMABUF_IOCTL_GET_FENCE DMABUF_IOWR(0x01, struct dma_buf_fence)
-#define DMABUF_IOCTL_PUT_FENCE DMABUF_IOWR(0x02, struct dma_buf_fence)
-
-/* tgl key values */
-#define GLOBAL_KEY ((unsigned int)(-1))
-/* TBM_CACHE */
-#define TBM_EXYNOS_CACHE_INV 0x01 /**< cache invalidate */
-#define TBM_EXYNOS_CACHE_CLN 0x02 /**< cache clean */
-#define TBM_EXYNOS_CACHE_ALL 0x10 /**< cache all */
-#define TBM_EXYNOS_CACHE_FLUSH (TBM_EXYNOS_CACHE_INV|TBM_EXYNOS_CACHE_CLN) /**< cache flush */
-#define TBM_EXYNOS_CACHE_FLUSH_ALL (TBM_EXYNOS_CACHE_FLUSH|TBM_EXYNOS_CACHE_ALL) /**< cache flush all */
-
-enum {
- DEVICE_NONE = 0,
- DEVICE_CA, /* cache aware device */
- DEVICE_CO /* cache oblivious device */
-};
-
-typedef union _tbm_bo_cache_state tbm_bo_cache_state;
-
-union _tbm_bo_cache_state {
- unsigned int val;
- struct {
- unsigned int cntFlush:16; /*Flush all index for sync */
- unsigned int isCached:1;
- unsigned int isDirtied:2;
- } data;
-};
-
-typedef struct _tbm_exynos_bufmgr tbm_exynos_bufmgr;
-typedef struct _tbm_exynos_bo tbm_exynos_bo;
-
-/* tbm buffor object for exynos */
-struct _tbm_exynos_bo {
- int fd;
-
- unsigned int name; /* FLINK ID */
-
- unsigned int gem; /* GEM Handle */
-
- unsigned int dmabuf; /* fd for dmabuf */
-
- void *pBase; /* virtual address */
-
- unsigned int size;
-
- unsigned int flags_exynos;
- unsigned int flags_tbm;
-
- pthread_mutex_t mutex;
- struct dma_buf_fence dma_fence[DMA_FENCE_LIST_MAX];
- int device;
- int opt;
-
- tbm_bo_cache_state cache_state;
- unsigned int map_cnt;
- int last_map_device;
-
- tbm_exynos_bufmgr *bufmgr_data;
-};
-
-/* tbm bufmgr private for exynos */
-struct _tbm_exynos_bufmgr {
- int fd;
- int isLocal;
- void *hashBos;
-
- int use_dma_fence;
-
- int tgl_fd;
-};
-
-static char *STR_DEVICE[] = {
- "DEF",
- "CPU",
- "2D",
- "3D",
- "MM"
-};
-
-static char *STR_OPT[] = {
- "NONE",
- "RD",
- "WR",
- "RDWR"
-};
-
-static inline int
-_tgl_init(int fd, unsigned int key)
-{
- struct tgl_reg_data data;
- int err;
- char buf[STRERR_BUFSIZE];
-
- data.key = key;
- data.timeout_ms = 1000;
-
- err = ioctl(fd, TGL_IOCTL_REGISTER, &data);
- if (err) {
- TBM_BACKEND_ERR("error(%s) key:%d\n",
- strerror_r(errno, buf, STRERR_BUFSIZE), key);
- return 0;
- }
-
- return 1;
-}
-
-static inline int
-_tgl_destroy(int fd, unsigned int key)
-{
- struct tgl_reg_data data;
- int err;
- char buf[STRERR_BUFSIZE];
-
- data.key = key;
- err = ioctl(fd, TGL_IOCTL_UNREGISTER, &data);
- if (err) {
- TBM_BACKEND_ERR("error(%s) key:%d\n",
- strerror_r(errno, buf, STRERR_BUFSIZE), key);
- return 0;
- }
-
- return 1;
-}
-
-static inline int
-_tgl_lock(int fd, unsigned int key, int opt)
-{
- struct tgl_lock_data data;
- enum tgl_type_data tgl_type;
- int err;
- char buf[STRERR_BUFSIZE];
-
- switch (opt) {
- case HAL_TBM_OPTION_READ:
- tgl_type = TGL_TYPE_READ;
- break;
- case HAL_TBM_OPTION_WRITE:
- tgl_type = TGL_TYPE_WRITE;
- break;
- default:
- tgl_type = TGL_TYPE_NONE;
- break;
- }
-
- data.key = key;
- data.type = tgl_type;
-
- err = ioctl(fd, TGL_IOCTL_LOCK, &data);
- if (err) {
- TBM_BACKEND_ERR("error(%s) key:%d opt:%d\n",
- strerror_r(errno, buf, STRERR_BUFSIZE), key, opt);
- return 0;
- }
-
- return 1;
-}
-
-static inline int
-_tgl_unlock(int fd, unsigned int key)
-{
- struct tgl_lock_data data;
- int err;
- char buf[STRERR_BUFSIZE];
-
- data.key = key;
- data.type = TGL_TYPE_NONE;
-
- err = ioctl(fd, TGL_IOCTL_UNLOCK, &data);
- if (err) {
- TBM_BACKEND_ERR("error(%s) key:%d\n",
- strerror_r(errno, buf, STRERR_BUFSIZE), key);
- return 0;
- }
-
- return 1;
-}
-
-static inline int
-_tgl_set_data(int fd, unsigned int key, unsigned int val)
-{
- struct tgl_usr_data data;
- int err;
- char buf[STRERR_BUFSIZE];
-
- data.key = key;
- data.data1 = val;
-
- err = ioctl(fd, TGL_IOCTL_SET_DATA, &data);
- if (err) {
- TBM_BACKEND_ERR("error(%s) key:%d\n",
- strerror_r(errno, buf, STRERR_BUFSIZE), key);
- return 0;
- }
-
- return 1;
-}
-
-static inline unsigned int
-_tgl_get_data(int fd, unsigned int key)
-{
- struct tgl_usr_data data = { 0, };
- int err;
- char buf[STRERR_BUFSIZE];
-
- data.key = key;
-
- err = ioctl(fd, TGL_IOCTL_GET_DATA, &data);
- if (err) {
- TBM_BACKEND_ERR("error(%s) key:%d\n",
- strerror_r(errno, buf, STRERR_BUFSIZE), key);
- return 0;
- }
-
- return data.data1;
-}
-
-static int
-_exynos_cache_flush(tbm_exynos_bufmgr *bufmgr_data, tbm_exynos_bo *bo_data, int flags)
-{
- TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, 0);
-
- /* cache flush is managed by kernel side when using dma-fence. */
- if (bufmgr_data->use_dma_fence)
- return 1;
-
- struct drm_exynos_gem_cache_op cache_op = {0, };
- int ret;
-
- /* if bo_data is null, do cache_flush_all */
- if (bo_data) {
- cache_op.flags = 0;
- cache_op.usr_addr = (uint64_t)((uintptr_t)bo_data->pBase);
- cache_op.size = bo_data->size;
- } else {
- flags = TBM_EXYNOS_CACHE_FLUSH_ALL;
- cache_op.flags = 0;
- cache_op.usr_addr = 0;
- cache_op.size = 0;
- }
-
- if (flags & TBM_EXYNOS_CACHE_INV) {
- if (flags & TBM_EXYNOS_CACHE_ALL)
- cache_op.flags |= EXYNOS_DRM_CACHE_INV_ALL;
- else
- cache_op.flags |= EXYNOS_DRM_CACHE_INV_RANGE;
- }
-
- if (flags & TBM_EXYNOS_CACHE_CLN) {
- if (flags & TBM_EXYNOS_CACHE_ALL)
- cache_op.flags |= EXYNOS_DRM_CACHE_CLN_ALL;
- else
- cache_op.flags |= EXYNOS_DRM_CACHE_CLN_RANGE;
- }
-
- if (flags & TBM_EXYNOS_CACHE_ALL)
- cache_op.flags |= EXYNOS_DRM_ALL_CACHES_CORES;
-
- ret = drmCommandWriteRead(bufmgr_data->fd, DRM_EXYNOS_GEM_CACHE_OP, &cache_op,
- sizeof(cache_op));
- if (ret) {
- TBM_BACKEND_ERR("fail to flush the cache.\n");
- return 0;
- }
-
- return 1;
-}
-
-static int
-_bo_init_cache_state(tbm_exynos_bufmgr *bufmgr_data, tbm_exynos_bo *bo_data, int import)
-{
- /* check whether cache control do or not */
- if (!g_enable_cache_ctrl)
- return 1;
-
- TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, 0);
- TBM_BACKEND_RETURN_VAL_IF_FAIL(bo_data != NULL, 0);
-
- if (bufmgr_data->use_dma_fence)
- return 1;
-
- _tgl_init(bufmgr_data->tgl_fd, bo_data->name);
-
- tbm_bo_cache_state cache_state;
-
- if (import == 0) {
- cache_state.data.isDirtied = DEVICE_NONE;
- cache_state.data.isCached = 0;
- cache_state.data.cntFlush = 0;
-
- _tgl_set_data(bufmgr_data->tgl_fd, bo_data->name, cache_state.val);
- }
-
- return 1;
-}
-
-static int
-_bo_set_cache_state(tbm_exynos_bufmgr *bufmgr_data, tbm_exynos_bo *bo_data, int device, int opt)
-{
- /* check whether cache control do or not */
- if (!g_enable_cache_ctrl)
- return 1;
-
- TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, 0);
- TBM_BACKEND_RETURN_VAL_IF_FAIL(bo_data != NULL, 0);
-
- if (bufmgr_data->use_dma_fence)
- return 1;
-
- char need_flush = 0;
- unsigned short cntFlush = 0;
-
- if (!(bo_data->flags_exynos & EXYNOS_BO_CACHABLE))
- return 1;
-
- /* get cache state of a bo_data */
- bo_data->cache_state.val = _tgl_get_data(bufmgr_data->tgl_fd,
- bo_data->name);
-
- /* get global cache flush count */
- cntFlush = (unsigned short)_tgl_get_data(bufmgr_data->tgl_fd, GLOBAL_KEY);
-
- if (device == HAL_TBM_DEVICE_CPU) {
- if (bo_data->cache_state.data.isDirtied == DEVICE_CO &&
- bo_data->cache_state.data.isCached)
- need_flush = TBM_EXYNOS_CACHE_INV;
-
- bo_data->cache_state.data.isCached = 1;
- if (opt & HAL_TBM_OPTION_WRITE)
- bo_data->cache_state.data.isDirtied = DEVICE_CA;
- else {
- if (bo_data->cache_state.data.isDirtied != DEVICE_CA)
- bo_data->cache_state.data.isDirtied = DEVICE_NONE;
- }
- } else {
- if (bo_data->cache_state.data.isDirtied == DEVICE_CA &&
- bo_data->cache_state.data.isCached &&
- bo_data->cache_state.data.cntFlush == cntFlush)
- need_flush = TBM_EXYNOS_CACHE_CLN | TBM_EXYNOS_CACHE_ALL;
-
- if (opt & HAL_TBM_OPTION_WRITE)
- bo_data->cache_state.data.isDirtied = DEVICE_CO;
- else {
- if (bo_data->cache_state.data.isDirtied != DEVICE_CO)
- bo_data->cache_state.data.isDirtied = DEVICE_NONE;
- }
- }
-
- if (need_flush) {
- if (need_flush & TBM_EXYNOS_CACHE_ALL)
- _tgl_set_data(bufmgr_data->tgl_fd, GLOBAL_KEY, (unsigned int)(++cntFlush));
-
- /* call cache flush */
- _exynos_cache_flush(bufmgr_data, bo_data, need_flush);
-
- TBM_BACKEND_DBG(" \tcache(%d,%d)....flush:0x%x, cntFlush(%d)\n",
- bo_data->cache_state.data.isCached,
- bo_data->cache_state.data.isDirtied,
- need_flush,
- cntFlush);
- }
-
- return 1;
-}
-
-static int
-_bo_save_cache_state(tbm_exynos_bufmgr *bufmgr_data, tbm_exynos_bo *bo_data)
-{
- /* check whether cache control do or not */
- if (!g_enable_cache_ctrl)
- return 1;
-
- TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, 0);
- TBM_BACKEND_RETURN_VAL_IF_FAIL(bo_data != NULL, 0);
-
- if (bufmgr_data->use_dma_fence)
- return 1;
-
- unsigned short cntFlush = 0;
-
- /* get global cache flush count */
- cntFlush = (unsigned short)_tgl_get_data(bufmgr_data->tgl_fd, GLOBAL_KEY);
-
- /* save global cache flush count */
- bo_data->cache_state.data.cntFlush = cntFlush;
- _tgl_set_data(bufmgr_data->tgl_fd, bo_data->name,
- bo_data->cache_state.val);
-
- return 1;
-}
-
-static void
-_bo_destroy_cache_state(tbm_exynos_bufmgr *bufmgr_data, tbm_exynos_bo *bo_data)
-{
- /* check whether cache control do or not */
- if (!g_enable_cache_ctrl)
- return;
-
- TBM_BACKEND_RETURN_IF_FAIL(bufmgr_data != NULL);
- TBM_BACKEND_RETURN_IF_FAIL(bo_data != NULL);
-
- if (bufmgr_data->use_dma_fence)
- return ;
-
- _tgl_destroy(bufmgr_data->tgl_fd, bo_data->name);
-}
-
-static int
-_bufmgr_init_cache_state(tbm_exynos_bufmgr *bufmgr_data)
-{
- /* check whether cache control do or not */
- if (!g_enable_cache_ctrl)
- return 1;
-
- TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, 0);
-
- if (bufmgr_data->use_dma_fence)
- return 1;
-
- /* open tgl fd for saving cache flush data */
- bufmgr_data->tgl_fd = open(tgl_devfile, O_RDWR);
-
- if (bufmgr_data->tgl_fd < 0) {
- bufmgr_data->tgl_fd = open(tgl_devfile1, O_RDWR);
- if (bufmgr_data->tgl_fd < 0) {
- TBM_BACKEND_ERR("fail to open global_lock:%s\n", tgl_devfile1);
- return 0;
- }
- }
-
- if (!_tgl_init(bufmgr_data->tgl_fd, GLOBAL_KEY)) {
- TBM_BACKEND_ERR("fail to initialize the tgl\n");
- close(bufmgr_data->tgl_fd);
- return 0;
- }
-
- return 1;
-}
-
-static void
-_bufmgr_deinit_cache_state(tbm_exynos_bufmgr *bufmgr_data)
-{
- /* check whether cache control do or not */
- if (!g_enable_cache_ctrl)
- return;
-
- TBM_BACKEND_RETURN_IF_FAIL(bufmgr_data != NULL);
-
- if (bufmgr_data->use_dma_fence)
- return;
-
- if (bufmgr_data->tgl_fd >= 0)
- close(bufmgr_data->tgl_fd);
-}
-
-static int
-_tbm_exynos_open_drm()
-{
- int fd = -1;
-
- fd = drmOpen(EXYNOS_DRM_NAME, NULL);
- if (fd < 0) {
- TBM_BACKEND_ERR("fail to open drm.(%s)\n", EXYNOS_DRM_NAME);
- }
-
- if (fd < 0) {
- struct udev *udev = NULL;
- struct udev_enumerate *e = NULL;
- struct udev_list_entry *entry = NULL;
- struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
- const char *filepath;
- struct stat s;
- int ret;
-
- TBM_BACKEND_DBG("search drm-device by udev\n");
-
- udev = udev_new();
- if (!udev) {
- TBM_BACKEND_ERR("udev_new() failed.\n");
- return -1;
- }
-
- e = udev_enumerate_new(udev);
- udev_enumerate_add_match_subsystem(e, "drm");
- udev_enumerate_add_match_sysname(e, "card[0-9]*");
- udev_enumerate_scan_devices(e);
-
- udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
- device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
- udev_list_entry_get_name(entry));
- device_parent = udev_device_get_parent(device);
- /* Not need unref device_parent. device_parent and device have same refcnt */
- if (device_parent) {
- if (strcmp(udev_device_get_sysname(device_parent), "vc4-drm") == 0) {
- drm_device = device;
- TBM_BACKEND_DBG("Found render device: '%s' (%s)\n",
- udev_device_get_syspath(drm_device),
- udev_device_get_sysname(device_parent));
- break;
- }
- }
- udev_device_unref(device);
- }
-
- udev_enumerate_unref(e);
-
- if (!drm_device) {
- TBM_BACKEND_ERR("failed to find device\n");
- udev_unref(udev);
- return -1;
- }
-
- /* Get device file path. */
- filepath = udev_device_get_devnode(drm_device);
- if (!filepath) {
- TBM_BACKEND_ERR("udev_device_get_devnode() failed.\n");
- udev_device_unref(drm_device);
- udev_unref(udev);
- return -1;
- }
-
- /* Open DRM device file and check validity. */
- fd = open(filepath, O_RDWR | O_CLOEXEC);
- if (fd < 0) {
- TBM_BACKEND_ERR("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
- udev_device_unref(drm_device);
- udev_unref(udev);
- return -1;
- }
-
- ret = fstat(fd, &s);
- if (ret) {
- TBM_BACKEND_ERR("fstat() failed %s.\n");
- close(fd);
- udev_device_unref(drm_device);
- udev_unref(udev);
- return -1;
- }
-
- udev_device_unref(drm_device);
- udev_unref(udev);
- }
-
- return fd;
-}
-
-#if 0
-static int
-_get_render_node(int is_master)
-{
- struct udev *udev = NULL;
- struct udev_enumerate *e = NULL;
- struct udev_list_entry *entry = NULL;
- struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
- const char *filepath;
- struct stat s;
- int fd = -1;
- int ret;
-
- TBM_BACKEND_DBG("search drm-device by udev(is_master:%d)\n", is_master);
-
- udev = udev_new();
- if (!udev) {
- TBM_BACKEND_ERR("udev_new() failed.\n");
- return -1;
- }
-
- e = udev_enumerate_new(udev);
- udev_enumerate_add_match_subsystem(e, "drm");
- if (is_master)
- udev_enumerate_add_match_sysname(e, "card[0-9]*");
- else
- udev_enumerate_add_match_sysname(e, "renderD[0-9]*");
- udev_enumerate_scan_devices(e);
-
- udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
- device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
- udev_list_entry_get_name(entry));
- device_parent = udev_device_get_parent(device);
- /* Not need unref device_parent. device_parent and device have same refcnt */
- if (device_parent) {
- if (strcmp(udev_device_get_sysname(device_parent), "exynos-drm") == 0) {
- drm_device = device;
- TBM_BACKEND_DBG("Found render device: '%s' (%s)\n",
- udev_device_get_syspath(drm_device),
- udev_device_get_sysname(device_parent));
- break;
- }
- }
- udev_device_unref(device);
- }
-
- udev_enumerate_unref(e);
-
- if (!drm_device) {
- TBM_BACKEND_ERR("failed to find device\n");
- udev_unref(udev);
- return -1;
- }
-
- /* Get device file path. */
- filepath = udev_device_get_devnode(drm_device);
- if (!filepath) {
- TBM_BACKEND_ERR("udev_device_get_devnode() failed.\n");
- udev_device_unref(drm_device);
- udev_unref(udev);
- return -1;
- }
-
- /* Open DRM device file and check validity. */
- fd = open(filepath, O_RDWR | O_CLOEXEC);
- if (fd < 0) {
- TBM_BACKEND_ERR("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
- udev_device_unref(drm_device);
- udev_unref(udev);
- return -1;
- }
-
- ret = fstat(fd, &s);
- if (ret) {
- TBM_BACKEND_ERR("fstat() failed %s.\n");
- udev_device_unref(drm_device);
- udev_unref(udev);
- close(fd);
- return -1;
- }
-
- udev_device_unref(drm_device);
- udev_unref(udev);
-
- return fd;
-}
-#endif
-
-static unsigned int
-_get_exynos_flag_from_tbm(unsigned int ftbm)
-{
- unsigned int flags = 0;
-
- if (ftbm & HAL_TBM_BO_SCANOUT)
- flags |= EXYNOS_BO_CONTIG;
- else
- flags |= EXYNOS_BO_NONCONTIG;
-
- if (ftbm & HAL_TBM_BO_WC)
- flags |= EXYNOS_BO_WC;
- else if (ftbm & HAL_TBM_BO_NONCACHABLE)
- flags |= EXYNOS_BO_NONCACHABLE;
- else
- flags |= EXYNOS_BO_CACHABLE;
-
- return flags;
-}
-
-static unsigned int
-_get_tbm_flag_from_exynos(unsigned int fexynos)
-{
- unsigned int flags = 0;
-
- if (fexynos & EXYNOS_BO_NONCONTIG)
- flags |= HAL_TBM_BO_DEFAULT;
- else
- flags |= HAL_TBM_BO_SCANOUT;
-
- if (fexynos & EXYNOS_BO_WC)
- flags |= HAL_TBM_BO_WC;
- else if (fexynos & EXYNOS_BO_CACHABLE)
- flags |= HAL_TBM_BO_DEFAULT;
- else
- flags |= HAL_TBM_BO_NONCACHABLE;
-
- return flags;
-}
-
-static unsigned int
-_get_name(int fd, unsigned int gem)
-{
- struct drm_gem_flink arg = {0,};
-
- arg.handle = gem;
- if (drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &arg)) {
- TBM_BACKEND_ERR("fail to DRM_IOCTL_GEM_FLINK gem:%d", gem);
- return 0;
- }
-
- return (unsigned int)arg.name;
-}
-
-static hal_tbm_bo_handle
-_exynos_bo_handle(tbm_exynos_bo *bo_data, int device)
-{
- hal_tbm_bo_handle bo_handle;
-
- memset(&bo_handle, 0x0, sizeof(uint64_t));
-
- switch (device) {
- case HAL_TBM_DEVICE_DEFAULT:
- case HAL_TBM_DEVICE_2D:
- bo_handle.u32 = (uint32_t)bo_data->gem;
- break;
- case HAL_TBM_DEVICE_CPU:
- if (!bo_data->pBase) {
- struct drm_exynos_gem_map arg = {0,};
- void *map = NULL;
-
- arg.handle = bo_data->gem;
- if (drmCommandWriteRead(bo_data->fd, DRM_EXYNOS_GEM_MAP, &arg, sizeof(arg))) {
- TBM_BACKEND_ERR("Cannot map_exynos gem=%d\n", bo_data->gem);
- return (hal_tbm_bo_handle) NULL;
- }
-
- map = mmap(NULL, bo_data->size, PROT_READ | PROT_WRITE, MAP_SHARED,
- bo_data->fd, arg.offset);
- if (map == MAP_FAILED) {
- TBM_BACKEND_ERR("Cannot usrptr gem=%d\n", bo_data->gem);
- return (hal_tbm_bo_handle) NULL;
- }
- bo_data->pBase = map;
- }
- bo_handle.ptr = (void *)bo_data->pBase;
- break;
- case HAL_TBM_DEVICE_3D:
- case HAL_TBM_DEVICE_MM:
- if (!bo_data->dmabuf) {
- struct drm_prime_handle arg = {0, };
-
- arg.handle = bo_data->gem;
- if (drmIoctl(bo_data->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
- TBM_BACKEND_ERR("Cannot dmabuf=%d\n", bo_data->gem);
- return (hal_tbm_bo_handle) NULL;
- }
- bo_data->dmabuf = arg.fd;
- }
-
- bo_handle.u32 = (uint32_t)bo_data->dmabuf;
- break;
- default:
- TBM_BACKEND_ERR("Not supported device:%d\n", device);
- bo_handle.ptr = (void *) NULL;
- break;
- }
-
- return bo_handle;
-}
-
-static int
-_new_calc_plane_nv12(int width, int height)
-{
- int mbX, mbY;
-
- mbX = DIV_ROUND_UP(width, S5P_FIMV_NUM_PIXELS_IN_MB_ROW);
- mbY = DIV_ROUND_UP(height, S5P_FIMV_NUM_PIXELS_IN_MB_COL);
-
- if (width * height < S5P_FIMV_MAX_FRAME_SIZE)
- mbY = (mbY + 1) / 2 * 2;
-
- return ((mbX * S5P_FIMV_NUM_PIXELS_IN_MB_COL) * (mbY *
- S5P_FIMV_NUM_PIXELS_IN_MB_ROW));
-}
-
-static int
-_calc_yplane_nv12(int width, int height)
-{
- int mbX, mbY;
-
- mbX = SIZE_ALIGN(width + 24, S5P_FIMV_NV12MT_HALIGN);
- mbY = SIZE_ALIGN(height + 16, S5P_FIMV_NV12MT_VALIGN);
-
- return SIZE_ALIGN(mbX * mbY, S5P_FIMV_DEC_BUF_ALIGN);
-}
-
-static int
-_calc_uvplane_nv12(int width, int height)
-{
- int mbX, mbY;
-
- mbX = SIZE_ALIGN(width + 16, S5P_FIMV_NV12MT_HALIGN);
- mbY = SIZE_ALIGN(height + 4, S5P_FIMV_NV12MT_VALIGN);
-
- return SIZE_ALIGN((mbX * mbY) >> 1, S5P_FIMV_DEC_BUF_ALIGN);
-}
-
-static int
-_new_calc_yplane_nv12(int width, int height)
-{
- return SIZE_ALIGN(_new_calc_plane_nv12(width,
- height) + S5P_FIMV_D_ALIGN_PLANE_SIZE,
- TBM_SURFACE_ALIGNMENT_PLANE_NV12);
-}
-
-static int
-_new_calc_uvplane_nv12(int width, int height)
-{
- return SIZE_ALIGN((_new_calc_plane_nv12(width,
- height) >> 1) + S5P_FIMV_D_ALIGN_PLANE_SIZE,
- TBM_SURFACE_ALIGNMENT_PLANE_NV12);
-}
-
-static hal_tbm_bufmgr_capability
-tbm_exynos_bufmgr_get_capabilities(hal_tbm_bufmgr *bufmgr, hal_tbm_error *error)
-{
- hal_tbm_bufmgr_capability capabilities = HAL_TBM_BUFMGR_CAPABILITY_NONE;
-
- capabilities = HAL_TBM_BUFMGR_CAPABILITY_SHARE_KEY | HAL_TBM_BUFMGR_CAPABILITY_SHARE_FD;
-
- if (error)
- *error = HAL_TBM_ERROR_NONE;
-
- return capabilities;
-}
-
-static hal_tbm_error
-tbm_exynos_bufmgr_get_supported_formats(hal_tbm_bufmgr *bufmgr,
- uint32_t **formats, uint32_t *num)
-{
- const static uint32_t tbm_exynos_color_format_list[TBM_COLOR_FORMAT_COUNT] = {
- HAL_TBM_FORMAT_ARGB8888,
- HAL_TBM_FORMAT_XRGB8888,
- HAL_TBM_FORMAT_NV12,
- HAL_TBM_FORMAT_YUV420
- };
-
- tbm_exynos_bufmgr *bufmgr_data = (tbm_exynos_bufmgr *)bufmgr;
- uint32_t *color_formats;
-
- TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, HAL_TBM_ERROR_INVALID_PARAMETER);
-
- color_formats = (uint32_t *)calloc(1, sizeof(uint32_t) * TBM_COLOR_FORMAT_COUNT);
- if (color_formats == NULL)
- return HAL_TBM_ERROR_OUT_OF_MEMORY;
-
- memcpy(color_formats, tbm_exynos_color_format_list, sizeof(uint32_t)*TBM_COLOR_FORMAT_COUNT);
-
- *formats = color_formats;
- *num = TBM_COLOR_FORMAT_COUNT;
-
- TBM_BACKEND_DBG("supported format count = %d\n", *num);
-
- return HAL_TBM_ERROR_NONE;
-}
-
-static hal_tbm_error
-tbm_exynos_bufmgr_get_plane_data(hal_tbm_bufmgr *bufmgr,
- hal_tbm_format format, int plane_idx, int width,
- int height, uint32_t *size, uint32_t *offset,
- uint32_t *pitch, int *bo_idx)
-{
- tbm_exynos_bufmgr *bufmgr_data = (tbm_exynos_bufmgr *)bufmgr;
- int bpp;
- int _offset = 0;
- int _pitch = 0;
- int _size = 0;
- int _bo_idx = 0;
-
- TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, HAL_TBM_ERROR_INVALID_PARAMETER);
-
- switch (format) {
- /* 16 bpp RGB */
- case HAL_TBM_FORMAT_XRGB4444:
- case HAL_TBM_FORMAT_XBGR4444:
- case HAL_TBM_FORMAT_RGBX4444:
- case HAL_TBM_FORMAT_BGRX4444:
- case HAL_TBM_FORMAT_ARGB4444:
- case HAL_TBM_FORMAT_ABGR4444:
- case HAL_TBM_FORMAT_RGBA4444:
- case HAL_TBM_FORMAT_BGRA4444:
- case HAL_TBM_FORMAT_XRGB1555:
- case HAL_TBM_FORMAT_XBGR1555:
- case HAL_TBM_FORMAT_RGBX5551:
- case HAL_TBM_FORMAT_BGRX5551:
- case HAL_TBM_FORMAT_ARGB1555:
- case HAL_TBM_FORMAT_ABGR1555:
- case HAL_TBM_FORMAT_RGBA5551:
- case HAL_TBM_FORMAT_BGRA5551:
- case HAL_TBM_FORMAT_RGB565:
- bpp = 16;
- _offset = 0;
- _pitch = SIZE_ALIGN((width * bpp) >> 3, g_tbm_surface_alignment_pitch_rgb);
- _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- break;
- /* 24 bpp RGB */
- case HAL_TBM_FORMAT_RGB888:
- case HAL_TBM_FORMAT_BGR888:
- bpp = 24;
- _offset = 0;
- _pitch = SIZE_ALIGN((width * bpp) >> 3, g_tbm_surface_alignment_pitch_rgb);
- _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- break;
- /* 32 bpp RGB */
- case HAL_TBM_FORMAT_XRGB8888:
- case HAL_TBM_FORMAT_XBGR8888:
- case HAL_TBM_FORMAT_RGBX8888:
- case HAL_TBM_FORMAT_BGRX8888:
- case HAL_TBM_FORMAT_ARGB8888:
- case HAL_TBM_FORMAT_ABGR8888:
- case HAL_TBM_FORMAT_RGBA8888:
- case HAL_TBM_FORMAT_BGRA8888:
- bpp = 32;
- _offset = 0;
- _pitch = SIZE_ALIGN((width * bpp) >> 3, g_tbm_surface_alignment_pitch_rgb);
- _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- break;
-
- /* packed YCbCr */
- case HAL_TBM_FORMAT_YUYV:
- case HAL_TBM_FORMAT_YVYU:
- case HAL_TBM_FORMAT_UYVY:
- case HAL_TBM_FORMAT_VYUY:
- case HAL_TBM_FORMAT_AYUV:
- bpp = 32;
- _offset = 0;
- _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- break;
-
- /*
- * 2 plane YCbCr
- * index 0 = Y plane, [7:0] Y
- * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
- * or
- * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
- */
- case HAL_TBM_FORMAT_NV12:
- case HAL_TBM_FORMAT_NV21:
- bpp = 12;
- if (plane_idx == 0) {
- _offset = 0;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _size = MAX(_calc_yplane_nv12(width, height),
- _new_calc_yplane_nv12(width, height));
- _bo_idx = 0;
- } else if (plane_idx == 1) {
- _offset = 0;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _size = MAX(_calc_uvplane_nv12(width, height),
- _new_calc_uvplane_nv12(width, height));
- _bo_idx = 1;
- }
- break;
- case HAL_TBM_FORMAT_NV16:
- case HAL_TBM_FORMAT_NV61:
- bpp = 16;
- /*if(plane_idx == 0)*/
- {
- _offset = 0;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- if (plane_idx == 0)
- break;
- }
- /*else if( plane_idx ==1 )*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- }
- break;
-
- /*
- * 3 plane YCbCr
- * index 0: Y plane, [7:0] Y
- * index 1: Cb plane, [7:0] Cb
- * index 2: Cr plane, [7:0] Cr
- * or
- * index 1: Cr plane, [7:0] Cr
- * index 2: Cb plane, [7:0] Cb
- */
-
- /*
- * NATIVE_BUFFER_FORMAT_YV12
- * NATIVE_BUFFER_FORMAT_I420
- */
- case HAL_TBM_FORMAT_YUV410:
- case HAL_TBM_FORMAT_YVU410:
- bpp = 9;
- /*if(plane_idx == 0)*/
- {
- _offset = 0;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- if (plane_idx == 0)
- break;
- }
- /*else if(plane_idx == 1)*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4);
- _size = SIZE_ALIGN(_pitch * (height / 4), g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- if (plane_idx == 1)
- break;
- }
- /*else if (plane_idx == 2)*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4);
- _size = SIZE_ALIGN(_pitch * (height / 4), g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- }
- break;
- case HAL_TBM_FORMAT_YUV411:
- case HAL_TBM_FORMAT_YVU411:
- case HAL_TBM_FORMAT_YUV420:
- case HAL_TBM_FORMAT_YVU420:
- bpp = 12;
- /*if(plane_idx == 0)*/
- {
- _offset = 0;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- if (plane_idx == 0)
- break;
- }
- /*else if(plane_idx == 1)*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
- _size = SIZE_ALIGN(_pitch * (height / 2), g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- if (plane_idx == 1)
- break;
- }
- /*else if (plane_idx == 2)*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
- _size = SIZE_ALIGN(_pitch * (height / 2), g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- }
- break;
- case HAL_TBM_FORMAT_YUV422:
- case HAL_TBM_FORMAT_YVU422:
- bpp = 16;
- /*if(plane_idx == 0)*/
- {
- _offset = 0;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- if (plane_idx == 0)
- break;
- }
- /*else if(plane_idx == 1)*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
- _size = SIZE_ALIGN(_pitch * (height), g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- if (plane_idx == 1)
- break;
- }
- /*else if (plane_idx == 2)*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
- _size = SIZE_ALIGN(_pitch * (height), g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- }
- break;
- case HAL_TBM_FORMAT_YUV444:
- case HAL_TBM_FORMAT_YVU444:
- bpp = 24;
- /*if(plane_idx == 0)*/
- {
- _offset = 0;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- if (plane_idx == 0)
- break;
- }
- /*else if(plane_idx == 1)*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- if (plane_idx == 1)
- break;
- }
- /*else if (plane_idx == 2)*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- }
- break;
- default:
- bpp = 0;
- break;
- }
-
- *size = _size;
- *offset = _offset;
- *pitch = _pitch;
- *bo_idx = _bo_idx;
-
- return HAL_TBM_ERROR_NONE;
-}
-
-static hal_tbm_bo *
-tbm_exynos_bufmgr_alloc_bo(hal_tbm_bufmgr *bufmgr, unsigned int size,
- hal_tbm_bo_memory_type flags, hal_tbm_error *error)
-{
- tbm_exynos_bufmgr *bufmgr_data = (tbm_exynos_bufmgr *)bufmgr;
- tbm_exynos_bo *bo_data;
- unsigned int exynos_flags;
-
- if (bufmgr_data == NULL) {
- TBM_BACKEND_ERR("bufmgr_data is null\n");
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return NULL;
- }
-
- bo_data = calloc(1, sizeof(struct _tbm_exynos_bo));
- if (!bo_data) {
- TBM_BACKEND_ERR("fail to allocate the bo_data private\n");
- if (error)
- *error = HAL_TBM_ERROR_OUT_OF_MEMORY;
- return NULL;
- }
- bo_data->bufmgr_data = bufmgr_data;
-
- exynos_flags = _get_exynos_flag_from_tbm(flags);
- if ((flags & HAL_TBM_BO_SCANOUT) &&
- size <= 4 * 1024) {
- exynos_flags |= EXYNOS_BO_NONCONTIG;
- }
-
- struct drm_exynos_gem_create arg = {0, };
-
- arg.size = (uint64_t)size;
- arg.flags = exynos_flags;
- if (drmCommandWriteRead(bufmgr_data->fd, DRM_EXYNOS_GEM_CREATE, &arg,
- sizeof(arg))) {
- TBM_BACKEND_ERR("Cannot create bo_data(flag:%x, size:%d)\n", arg.flags,
- (unsigned int)arg.size);
- free(bo_data);
- if (error)
- *error = HAL_TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
-
- bo_data->fd = bufmgr_data->fd;
- bo_data->gem = arg.handle;
- bo_data->size = size;
- bo_data->flags_tbm = flags;
- bo_data->flags_exynos = exynos_flags;
- bo_data->name = _get_name(bo_data->fd, bo_data->gem);
-
- if (!_bo_init_cache_state(bufmgr_data, bo_data, 0)) {
- TBM_BACKEND_ERR("fail init cache state(%d)\n", bo_data->name);
- free(bo_data);
- if (error)
- *error = HAL_TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
-
- pthread_mutex_init(&bo_data->mutex, NULL);
-
- if (bufmgr_data->use_dma_fence && !bo_data->dmabuf) {
- struct drm_prime_handle arg = {0, };
-
- arg.handle = bo_data->gem;
- if (drmIoctl(bo_data->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
- TBM_BACKEND_ERR("Cannot dmabuf=%d\n", bo_data->gem);
- free(bo_data);
- if (error)
- *error = HAL_TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
- bo_data->dmabuf = arg.fd;
- }
-
- /* add bo_data to hash */
- if (drmHashInsert(bufmgr_data->hashBos, bo_data->name, (void *)bo_data) < 0)
- TBM_BACKEND_ERR("Cannot insert bo_data to Hash(%d)\n", bo_data->name);
-
- TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), flags:%d(%d), size:%d\n",
- bo_data,
- bo_data->gem, bo_data->name,
- flags, exynos_flags,
- bo_data->size);
-
- if (error)
- *error = HAL_TBM_ERROR_NONE;
-
- return (hal_tbm_bo *)bo_data;
-}
-
-static hal_tbm_bo *
-tbm_exynos_bufmgr_import_fd(hal_tbm_bufmgr *bufmgr, hal_tbm_fd key, hal_tbm_error *error)
-{
- tbm_exynos_bufmgr *bufmgr_data = (tbm_exynos_bufmgr *)bufmgr;
- tbm_exynos_bo *bo_data;
- unsigned int gem = 0;
- unsigned int name;
- int ret;
- char buf[STRERR_BUFSIZE];
-
- if (bufmgr_data == NULL) {
- TBM_BACKEND_ERR("bufmgr_data is null\n");
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return NULL;
- }
-
- /*getting handle from fd*/
- struct drm_prime_handle arg = {0, };
-
- arg.fd = key;
- arg.flags = 0;
- if (drmIoctl(bufmgr_data->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &arg)) {
- TBM_BACKEND_ERR("Cannot get gem handle from fd:%d (%s)\n",
- arg.fd, strerror_r(errno, buf, STRERR_BUFSIZE));
- if (error)
- *error = HAL_TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
- gem = arg.handle;
-
- name = _get_name(bufmgr_data->fd, gem);
- if (!name) {
- TBM_BACKEND_ERR("Cannot get name from gem:%d, fd:%d (%s)\n",
- gem, key, strerror_r(errno, buf, STRERR_BUFSIZE));
- if (error)
- *error = HAL_TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
-
- ret = drmHashLookup(bufmgr_data->hashBos, name, (void **)&bo_data);
- if (ret == 0) {
- if (gem == bo_data->gem) {
- if (error)
- *error = HAL_TBM_ERROR_NONE;
- return bo_data;
- }
- }
-
- /* Determine size of bo_data. The fd-to-handle ioctl really should
- * return the size, but it doesn't. If we have kernel 3.12 or
- * later, we can lseek on the prime fd to get the size. Older
- * kernels will just fail, in which case we fall back to the
- * provided (estimated or guess size).
- */
- unsigned int real_size = -1;
- struct drm_exynos_gem_info info = {0, };
-
- real_size = lseek(key, 0, SEEK_END);
-
- info.handle = gem;
- if (drmCommandWriteRead(bufmgr_data->fd,
- DRM_EXYNOS_GEM_GET,
- &info,
- sizeof(struct drm_exynos_gem_info))) {
- TBM_BACKEND_ERR("Cannot get gem info from gem:%d, fd:%d (%s)\n",
- gem, key, strerror_r(errno, buf, STRERR_BUFSIZE));
- if (error)
- *error = HAL_TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
-
- if (real_size == -1)
- real_size = info.size;
-
- bo_data = calloc(1, sizeof(struct _tbm_exynos_bo));
- if (!bo_data) {
- TBM_BACKEND_ERR("bo_data:%p fail to allocate the bo_data\n", bo_data);
- if (error)
- *error = HAL_TBM_ERROR_OUT_OF_MEMORY;
- return NULL;
- }
- bo_data->bufmgr_data = bufmgr_data;
-
- bo_data->fd = bufmgr_data->fd;
- bo_data->gem = gem;
- bo_data->size = real_size;
- bo_data->flags_exynos = info.flags;
- bo_data->flags_tbm = _get_tbm_flag_from_exynos(bo_data->flags_exynos);
- bo_data->name = name;
-
- if (!_bo_init_cache_state(bufmgr_data, bo_data, 1)) {
- TBM_BACKEND_ERR("fail init cache state(%d)\n", bo_data->name);
- free(bo_data);
- if (error)
- *error = HAL_TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
-
- /* add bo_data to hash */
- if (drmHashInsert(bufmgr_data->hashBos, bo_data->name, (void *)bo_data) < 0)
- TBM_BACKEND_ERR("bo_data:%p Cannot insert bo_data to Hash(%d) from gem:%d, fd:%d\n",
- bo_data, bo_data->name, gem, key);
-
- TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d(%d), size:%d\n",
- bo_data,
- bo_data->gem, bo_data->name,
- bo_data->dmabuf,
- key,
- bo_data->flags_tbm, bo_data->flags_exynos,
- bo_data->size);
-
- if (error)
- *error = HAL_TBM_ERROR_NONE;
-
- return (hal_tbm_bo *)bo_data;
-}
-
-static hal_tbm_bo *
-tbm_exynos_bufmgr_import_key(hal_tbm_bufmgr *bufmgr, hal_tbm_key key, hal_tbm_error *error)
-{
- tbm_exynos_bufmgr *bufmgr_data = (tbm_exynos_bufmgr *)bufmgr;
- tbm_exynos_bo *bo_data;
- int ret;
-
- if (bufmgr_data == NULL) {
- TBM_BACKEND_ERR("bufmgr_data is null\n");
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return NULL;
- }
-
- ret = drmHashLookup(bufmgr_data->hashBos, key, (void **)&bo_data);
- if (ret == 0) {
- if (error)
- *error = HAL_TBM_ERROR_NONE;
- return (hal_tbm_bo *)bo_data;
- }
-
- struct drm_gem_open arg = {0, };
- struct drm_exynos_gem_info info = {0, };
-
- arg.name = key;
- if (drmIoctl(bufmgr_data->fd, DRM_IOCTL_GEM_OPEN, &arg)) {
- TBM_BACKEND_ERR("Cannot open gem name=%d\n", key);
- if (error)
- *error = HAL_TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
-
- info.handle = arg.handle;
- if (drmCommandWriteRead(bufmgr_data->fd,
- DRM_EXYNOS_GEM_GET,
- &info,
- sizeof(struct drm_exynos_gem_info))) {
- TBM_BACKEND_ERR("Cannot get gem info=%d\n", key);
- if (error)
- *error = HAL_TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
-
- bo_data = calloc(1, sizeof(struct _tbm_exynos_bo));
- if (!bo_data) {
- TBM_BACKEND_ERR("fail to allocate the bo_data private\n");
- if (error)
- *error = HAL_TBM_ERROR_OUT_OF_MEMORY;
- return NULL;
- }
- bo_data->bufmgr_data = bufmgr_data;
-
- bo_data->fd = bufmgr_data->fd;
- bo_data->gem = arg.handle;
- bo_data->size = arg.size;
- bo_data->flags_exynos = info.flags;
- bo_data->name = key;
- bo_data->flags_tbm = _get_tbm_flag_from_exynos(bo_data->flags_exynos);
-
- if (!_bo_init_cache_state(bufmgr_data, bo_data, 1)) {
- TBM_BACKEND_ERR("fail init cache state(%d)\n", bo_data->name);
- free(bo_data);
- if (error)
- *error = HAL_TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
-
- if (!bo_data->dmabuf) {
- struct drm_prime_handle arg = {0, };
-
- arg.handle = bo_data->gem;
- if (drmIoctl(bo_data->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
- TBM_BACKEND_ERR("fail to DRM_IOCTL_PRIME_HANDLE_TO_FD gem=%d\n", bo_data->gem);
- if (error)
- *error = HAL_TBM_ERROR_INVALID_OPERATION;
- free(bo_data);
- return NULL;
- }
- bo_data->dmabuf = arg.fd;
- }
-
- /* add bo_data to hash */
- if (drmHashInsert(bufmgr_data->hashBos, bo_data->name, (void *)bo_data) < 0)
- TBM_BACKEND_ERR("Cannot insert bo_data to Hash(%d)\n", bo_data->name);
-
- TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d\n",
- bo_data,
- bo_data->gem, bo_data->name,
- bo_data->dmabuf,
- bo_data->flags_tbm, bo_data->flags_exynos,
- bo_data->size);
-
- if (error)
- *error = HAL_TBM_ERROR_NONE;
-
- return (hal_tbm_bo *)bo_data;
-}
-
-static void
-tbm_exynos_bo_free(hal_tbm_bo *bo)
-{
- tbm_exynos_bo *bo_data = (tbm_exynos_bo *)bo;
- tbm_exynos_bo *temp;
- tbm_exynos_bufmgr *bufmgr_data;
- char buf[STRERR_BUFSIZE];
- int ret;
-
- if (!bo_data)
- return;
-
- bufmgr_data = bo_data->bufmgr_data;
- if (!bufmgr_data)
- return;
-
- TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, size:%d\n",
- bo_data,
- bo_data->gem, bo_data->name,
- bo_data->dmabuf,
- bo_data->size);
-
- if (bo_data->pBase) {
- if (munmap(bo_data->pBase, bo_data->size) == -1) {
- TBM_BACKEND_ERR("bo_data:%p fail to munmap(%s)\n",
- bo_data, strerror_r(errno, buf, STRERR_BUFSIZE));
- }
- }
-
- /* close dmabuf */
- if (bo_data->dmabuf) {
- close(bo_data->dmabuf);
- bo_data->dmabuf = 0;
- }
-
- /* delete bo_data from hash */
- ret = drmHashLookup(bufmgr_data->hashBos, bo_data->name, (void **)&temp);
- if (ret == 0)
- drmHashDelete(bufmgr_data->hashBos, bo_data->name);
- else
- TBM_BACKEND_ERR("Cannot find bo_data to Hash(%d), ret=%d\n", bo_data->name, ret);
-
- if (temp != bo_data)
- TBM_BACKEND_ERR("hashBos probably has several BOs with same name!!!\n");
-
- _bo_destroy_cache_state(bufmgr_data, bo_data);
-
- /* Free gem handle */
- struct drm_gem_close arg = {0, };
-
- memset(&arg, 0, sizeof(arg));
- arg.handle = bo_data->gem;
- if (drmIoctl(bo_data->fd, DRM_IOCTL_GEM_CLOSE, &arg))
- TBM_BACKEND_ERR("bo_data:%p fail to gem close.(%s)\n",
- bo_data, strerror_r(errno, buf, STRERR_BUFSIZE));
-
- free(bo_data);
-}
-
-static int
-tbm_exynos_bo_get_size(hal_tbm_bo *bo, hal_tbm_error *error)
-{
- tbm_exynos_bo *bo_data = (tbm_exynos_bo *)bo;
-
- if (!bo_data) {
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return 0;
- }
-
- if (error)
- *error = HAL_TBM_ERROR_NONE;
-
- return bo_data->size;
-}
-
-static hal_tbm_bo_memory_type
-tbm_exynos_bo_get_memory_type(hal_tbm_bo *bo, hal_tbm_error *error)
-{
- tbm_exynos_bo *bo_data = (tbm_exynos_bo *)bo;
-
- if (!bo_data) {
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return HAL_TBM_BO_DEFAULT;
- }
-
- if (error)
- *error = HAL_TBM_ERROR_NONE;
-
- return bo_data->flags_tbm;
-}
-
-static hal_tbm_bo_handle
-tbm_exynos_bo_get_handle(hal_tbm_bo *bo, hal_tbm_bo_device_type device, hal_tbm_error *error)
-{
- tbm_exynos_bo *bo_data = (tbm_exynos_bo *)bo;
- hal_tbm_bo_handle bo_handle;
-
- if (!bo_data) {
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return (hal_tbm_bo_handle) NULL;
- }
-
- if (!bo_data->gem) {
- TBM_BACKEND_ERR("Cannot map gem=%d\n", bo_data->gem);
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return (hal_tbm_bo_handle) NULL;
- }
-
- TBM_BACKEND_DBG("bo_data:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d, %s\n",
- bo_data,
- bo_data->gem, bo_data->name,
- bo_data->dmabuf,
- bo_data->flags_tbm, bo_data->flags_exynos,
- bo_data->size,
- STR_DEVICE[device]);
-
- /*Get mapped bo_handle*/
- bo_handle = _exynos_bo_handle(bo_data, device);
- if (bo_handle.ptr == NULL) {
- TBM_BACKEND_ERR("Cannot get handle: gem:%d, device:%d\n",
- bo_data->gem, device);
- if (error)
- *error = HAL_TBM_ERROR_INVALID_OPERATION;
- return (hal_tbm_bo_handle) NULL;
- }
-
- if (error)
- *error = HAL_TBM_ERROR_NONE;
-
- return bo_handle;
-}
-
-static hal_tbm_bo_handle
-tbm_exynos_bo_map(hal_tbm_bo *bo, hal_tbm_bo_device_type device,
- hal_tbm_bo_access_option opt, hal_tbm_error *error)
-{
- tbm_exynos_bo *bo_data = (tbm_exynos_bo *)bo;
- hal_tbm_bo_handle bo_handle;
- tbm_exynos_bufmgr *bufmgr_data;
-
- if (!bo_data) {
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return (hal_tbm_bo_handle) NULL;
- }
-
- bufmgr_data = bo_data->bufmgr_data;
- if (!bufmgr_data) {
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return (hal_tbm_bo_handle) NULL;
- }
-
- if (!bo_data->gem) {
- TBM_BACKEND_ERR("Cannot map gem=%d\n", bo_data->gem);
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return (hal_tbm_bo_handle) NULL;
- }
-
- TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, %s, %s\n",
- bo_data,
- bo_data->gem, bo_data->name,
- bo_data->dmabuf,
- STR_DEVICE[device],
- STR_OPT[opt]);
-
- /*Get mapped bo_handle*/
- bo_handle = _exynos_bo_handle(bo_data, device);
- if (bo_handle.ptr == NULL) {
- TBM_BACKEND_ERR("Cannot get handle: gem:%d, device:%d, opt:%d\n",
- bo_data->gem, device, opt);
- if (error)
- *error = HAL_TBM_ERROR_INVALID_OPERATION;
- return (hal_tbm_bo_handle) NULL;
- }
-
- if (bo_data->map_cnt == 0)
- _bo_set_cache_state(bufmgr_data, bo_data, device, opt);
-
- bo_data->last_map_device = device;
-
- bo_data->map_cnt++;
-
- if (error)
- *error = HAL_TBM_ERROR_NONE;
-
- return bo_handle;
-}
-
-static hal_tbm_error
-tbm_exynos_bo_unmap(hal_tbm_bo *bo)
-{
- tbm_exynos_bo *bo_data = (tbm_exynos_bo *)bo;
- tbm_exynos_bufmgr *bufmgr_data;
-
- if (!bo_data)
- return HAL_TBM_ERROR_INVALID_PARAMETER;
-
- bufmgr_data = bo_data->bufmgr_data;
- if (!bufmgr_data)
- return HAL_TBM_ERROR_INVALID_PARAMETER;
-
- if (!bo_data->gem)
- return HAL_TBM_ERROR_INVALID_PARAMETER;
-
- bo_data->map_cnt--;
-
- if (bo_data->map_cnt == 0)
- _bo_save_cache_state(bufmgr_data, bo_data);
-
- /* check whether cache control do or not */
- if (g_enable_cache_ctrl && bo_data->last_map_device == HAL_TBM_DEVICE_CPU)
- _exynos_cache_flush(bufmgr_data, bo_data, TBM_EXYNOS_CACHE_FLUSH_ALL);
-
- bo_data->last_map_device = -1;
-
- TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d\n",
- bo_data,
- bo_data->gem, bo_data->name,
- bo_data->dmabuf);
-
- return HAL_TBM_ERROR_NONE;
-}
-
-static hal_tbm_error
-tbm_exynos_bo_lock(hal_tbm_bo *bo, hal_tbm_bo_device_type device,
- hal_tbm_bo_access_option opt)
-{
-#ifndef ALWAYS_BACKEND_CTRL
- tbm_exynos_bo *bo_data = (tbm_exynos_bo *)bo;
- tbm_exynos_bufmgr *bufmgr_data;
- struct dma_buf_fence fence;
- struct flock filelock;
- int ret = 0;
- char buf[STRERR_BUFSIZE];
-
- if (!bo_data)
- return HAL_TBM_ERROR_INVALID_PARAMETER;
-
- bufmgr_data = bo_data->bufmgr_data;
- if (!bufmgr_data)
- return HAL_TBM_ERROR_INVALID_PARAMETER;
-
- if (device != TBM_DEVICE_3D && device != HAL_TBM_DEVICE_CPU) {
- TBM_BACKEND_DBG("Not support device type,\n");
- return HAL_TBM_ERROR_INVALID_OPERATION;
- }
-
- memset(&fence, 0, sizeof(struct dma_buf_fence));
-
- /* Check if the given type is valid or not. */
- if (opt & HAL_TBM_OPTION_WRITE) {
- if (device == TBM_DEVICE_3D)
- fence.type = DMA_BUF_ACCESS_WRITE | DMA_BUF_ACCESS_DMA;
- } else if (opt & HAL_TBM_OPTION_READ) {
- if (device == TBM_DEVICE_3D)
- fence.type = DMA_BUF_ACCESS_READ | DMA_BUF_ACCESS_DMA;
- } else {
- TBM_BACKEND_ERR("Invalid argument\n");
- return HAL_TBM_ERROR_INVALID_PARAMETER;
- }
-
- /* Check if the tbm manager supports dma fence or not. */
- if (!bufmgr_data->use_dma_fence) {
- TBM_BACKEND_ERR("Not support DMA FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
- return HAL_TBM_ERROR_INVALID_OPERATION;
-
- }
-
- if (device == TBM_DEVICE_3D) {
- ret = ioctl(bo_data->dmabuf, DMABUF_IOCTL_GET_FENCE, &fence);
- if (ret < 0) {
- TBM_BACKEND_ERR("Cannot set GET FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
- return HAL_TBM_ERROR_INVALID_OPERATION;
- }
- } else {
- if (opt & HAL_TBM_OPTION_WRITE)
- filelock.l_type = F_WRLCK;
- else
- filelock.l_type = F_RDLCK;
-
- filelock.l_whence = SEEK_CUR;
- filelock.l_start = 0;
- filelock.l_len = 0;
-
- if (-1 == fcntl(bo_data->dmabuf, F_SETLKW, &filelock))
- return HAL_TBM_ERROR_INVALID_OPERATION;
- }
-
- pthread_mutex_lock(&bo_data->mutex);
-
- if (device == TBM_DEVICE_3D) {
- int i;
-
- for (i = 0; i < DMA_FENCE_LIST_MAX; i++) {
- if (bo_data->dma_fence[i].ctx == 0) {
- bo_data->dma_fence[i].type = fence.type;
- bo_data->dma_fence[i].ctx = fence.ctx;
- break;
- }
- }
-
- if (i == DMA_FENCE_LIST_MAX) {
- /*TODO: if dma_fence list is full, it needs realloc. I will fix this. by minseok3.kim*/
- TBM_BACKEND_ERR("fence list is full\n");
- }
- }
-
- pthread_mutex_unlock(&bo_data->mutex);
-
- TBM_BACKEND_DBG("DMABUF_IOCTL_GET_FENCE! bo_data:%p, gem:%d(%d), fd:%ds\n",
- bo_data,
- bo_data->gem, bo_data->name,
- bo_data->dmabuf);
-#endif /* ALWAYS_BACKEND_CTRL */
-
- return HAL_TBM_ERROR_NONE;
-}
-
-static hal_tbm_error
-tbm_exynos_bo_unlock(hal_tbm_bo *bo)
-{
-#ifndef ALWAYS_BACKEND_CTRL
- tbm_exynos_bo *bo_data = (tbm_exynos_bo *)bo;
- tbm_exynos_bufmgr *bufmgr_data = NULL;
- struct dma_buf_fence fence;
- struct flock filelock;
- unsigned int dma_type = 0;
- int ret = 0;
- char buf[STRERR_BUFSIZE];
-
- bufmgr_data = bo_data->bufmgr_data;
- if (!bufmgr_data)
- return HAL_TBM_ERROR_INVALID_PARAMETER;
-
- if (bo_data->dma_fence[0].type & DMA_BUF_ACCESS_DMA)
- dma_type = 1;
-
- if (!bo_data->dma_fence[0].ctx && dma_type) {
- TBM_BACKEND_DBG("FENCE not support or ignored,\n");
- return HAL_TBM_ERROR_INVALID_OPERATION;
- }
-
- if (!bo_data->dma_fence[0].ctx && dma_type) {
- TBM_BACKEND_DBG("device type is not 3D/CPU,\n");
- return HAL_TBM_ERROR_INVALID_OPERATION;
- }
-
- pthread_mutex_lock(&bo_data->mutex);
-
- if (dma_type) {
- fence.type = bo_data->dma_fence[0].type;
- fence.ctx = bo_data->dma_fence[0].ctx;
- int i;
-
- for (i = 1; i < DMA_FENCE_LIST_MAX; i++) {
- bo_data->dma_fence[i - 1].type = bo_data->dma_fence[i].type;
- bo_data->dma_fence[i - 1].ctx = bo_data->dma_fence[i].ctx;
- }
- bo_data->dma_fence[DMA_FENCE_LIST_MAX - 1].type = 0;
- bo_data->dma_fence[DMA_FENCE_LIST_MAX - 1].ctx = 0;
- }
- pthread_mutex_unlock(&bo_data->mutex);
-
- if (dma_type) {
- ret = ioctl(bo_data->dmabuf, DMABUF_IOCTL_PUT_FENCE, &fence);
- if (ret < 0) {
- TBM_BACKEND_ERR("Can not set PUT FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
- return HAL_TBM_ERROR_INVALID_OPERATION;
- }
- } else {
- filelock.l_type = F_UNLCK;
- filelock.l_whence = SEEK_CUR;
- filelock.l_start = 0;
- filelock.l_len = 0;
-
- if (-1 == fcntl(bo_data->dmabuf, F_SETLKW, &filelock))
- return HAL_TBM_ERROR_INVALID_OPERATION;
- }
-
- TBM_BACKEND_DBG("DMABUF_IOCTL_PUT_FENCE! bo_data:%p, gem:%d(%d), fd:%ds\n",
- bo_data,
- bo_data->gem, bo_data->name,
- bo_data->dmabuf);
-#endif /* ALWAYS_BACKEND_CTRL */
-
- return HAL_TBM_ERROR_NONE;
-}
-static hal_tbm_fd
-tbm_exynos_bo_export_fd(hal_tbm_bo *bo, hal_tbm_error *error)
-{
- tbm_exynos_bo *bo_data = (tbm_exynos_bo *)bo;
- int ret;
- char buf[STRERR_BUFSIZE];
-
- if (!bo_data) {
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return -1;
- }
-
- struct drm_prime_handle arg = {0, };
-
- arg.handle = bo_data->gem;
- ret = drmIoctl(bo_data->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg);
- if (ret) {
- TBM_BACKEND_ERR("bo_data:%p Cannot dmabuf=%d (%s)\n",
- bo_data, bo_data->gem, strerror_r(errno, buf, STRERR_BUFSIZE));
- if (error)
- *error = HAL_TBM_ERROR_INVALID_OPERATION;
- return (hal_tbm_fd) ret;
- }
-
- TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d(%d), size:%d\n",
- bo_data,
- bo_data->gem, bo_data->name,
- bo_data->dmabuf,
- arg.fd,
- bo_data->flags_tbm, bo_data->flags_exynos,
- bo_data->size);
-
- if (error)
- *error = HAL_TBM_ERROR_NONE;
-
- return (hal_tbm_fd)arg.fd;
-}
-
-static hal_tbm_key
-tbm_exynos_bo_export_key(hal_tbm_bo *bo, hal_tbm_error *error)
-{
- tbm_exynos_bo *bo_data = (tbm_exynos_bo *)bo;
-
- if (!bo_data) {
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return 0;
- }
-
- if (!bo_data->name) {
- bo_data->name = _get_name(bo_data->fd, bo_data->gem);
- if (!bo_data->name) {
- TBM_BACKEND_ERR("error Cannot get name\n");
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return 0;
- }
- }
-
- TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d\n",
- bo_data,
- bo_data->gem, bo_data->name,
- bo_data->dmabuf,
- bo_data->flags_tbm, bo_data->flags_exynos,
- bo_data->size);
-
- if (error)
- *error = HAL_TBM_ERROR_NONE;
-
- return (hal_tbm_key)bo_data->name;
-}
-
-static hal_tbm_error
-_tbm_exynos_authenticated_drm_fd_handler(hal_tbm_fd auth_fd, void *user_data)
-{
- tbm_exynos_bufmgr *bufmgr_data = (tbm_exynos_bufmgr *)user_data;
-
- TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, HAL_TBM_ERROR_INVALID_PARAMETER);
-
- bufmgr_data->fd = auth_fd;
- TBM_BACKEND_INFO("Get the authenticated drm_fd(%d)!\n", bufmgr_data->fd);
-
- return HAL_TBM_ERROR_NONE;
-}
-
-static int
-hal_backend_tbm_exynos_exit(void *data)
-{
- hal_tbm_backend_data *backend_data = (hal_tbm_backend_data *)data;
- tbm_exynos_bufmgr *bufmgr_data;
- unsigned long key;
- void *value;
-
- TBM_BACKEND_RETURN_VAL_IF_FAIL(backend_data != NULL, -1);
-
- bufmgr_data = (tbm_exynos_bufmgr *)backend_data->bufmgr;
- TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, -1);
-
- if (backend_data->bufmgr_funcs)
- free(backend_data->bufmgr_funcs);
- if (backend_data->bo_funcs)
- free(backend_data->bo_funcs);
-
- if (bufmgr_data->hashBos) {
- while (drmHashFirst(bufmgr_data->hashBos, &key, &value) > 0) {
- free(value);
- drmHashDelete(bufmgr_data->hashBos, key);
- }
-
- drmHashDestroy(bufmgr_data->hashBos);
- bufmgr_data->hashBos = NULL;
- }
-
- _bufmgr_deinit_cache_state(bufmgr_data);
-
- close(bufmgr_data->fd);
-
- free(backend_data->bufmgr);
- free(backend_data);
-
- return HAL_TBM_ERROR_NONE;
-}
-
-static int
-hal_backend_tbm_exynos_init(void **data)
-{
- hal_tbm_backend_data *backend_data = NULL;
- hal_tbm_bufmgr_funcs *bufmgr_funcs = NULL;
- hal_tbm_bo_funcs *bo_funcs = NULL;
- tbm_exynos_bufmgr *bufmgr_data = NULL;
- int drm_fd = -1;
- int fp;
- char *value = NULL;
-
- /* allocate a hal_tbm_backend_data */
- backend_data = calloc(1, sizeof(struct _hal_tbm_backend_data));
- if (!backend_data) {
- TBM_BACKEND_ERR("fail to alloc backend_data!\n");
- *data = NULL;
- return -1;
- }
- *data = backend_data;
-
- /* allocate a hal_tbm_bufmgr */
- bufmgr_data = calloc(1, sizeof(struct _tbm_exynos_bufmgr));
- if (!bufmgr_data) {
- TBM_BACKEND_ERR("fail to alloc bufmgr_data!\n");
- goto fail_alloc_bufmgr_data;
- }
- backend_data->bufmgr = (hal_tbm_bufmgr *)bufmgr_data;
-
- // open drm_fd
- drm_fd = _tbm_exynos_open_drm();
- if (drm_fd < 0) {
- TBM_BACKEND_ERR("fail to open drm!\n");
- goto fail_open_drm;
- }
-
- // set true when backend has a drm_device.
- backend_data->has_drm_device = 1;
-
- // check if drm_fd is master_drm_fd.
- if (drmIsMaster(drm_fd)) {
- // drm_fd is a master_drm_fd.
- backend_data->drm_info.drm_fd = drm_fd;
- backend_data->drm_info.is_master = 1;
-
- bufmgr_data->fd = drm_fd;
- TBM_BACKEND_INFO("Get the master drm_fd(%d)!\n", bufmgr_data->fd);
- } else {
- // drm_fd is not a master_drm_fd.
- // request authenticated fd
- close(drm_fd);
- backend_data->drm_info.drm_fd = -1;
- backend_data->drm_info.is_master = 0;
- backend_data->drm_info.auth_drm_fd_func = _tbm_exynos_authenticated_drm_fd_handler;
- backend_data->drm_info.user_data = bufmgr_data;
-
- TBM_BACKEND_INFO("A backend requests an authenticated drm_fd.\n");
- }
-
- //Check if the tbm manager supports dma fence or not.
- fp = open("/sys/module/dmabuf_sync/parameters/enabled", O_RDONLY);
- if (fp != -1) {
- char buf[1];
- int length = read(fp, buf, 1);
-
- if (length == 1 && buf[0] == '1')
- bufmgr_data->use_dma_fence = 1;
-
- close(fp);
- }
-
- /* get the model name from the capi-system-info.
- * The alignment_plane and alignment_pitch_rgb is different accoring to the target.
- * There will be the stride issue when the right alignment_plane and alignment_pitch_rgb
- * is not set to the backend.
- */
- if (system_info_get_platform_string("http://tizen.org/system/model_name", &value) != SYSTEM_INFO_ERROR_NONE) {
- TBM_BACKEND_ERR("Cannot get the \"http://tizen.org/system/model_name\" key value from system-info.\n");
- TBM_BACKEND_ERR("May not set the right value on libtbm-exynos backend.\n");
- } else {
- if (!strncmp(value, "TW1", 4)) {
- g_tbm_surface_alignment_plane = 8;
- g_tbm_surface_alignment_pitch_rgb = 8;
- g_enable_cache_ctrl = 1;
- } else {
- g_tbm_surface_alignment_plane = 64;
- g_tbm_surface_alignment_pitch_rgb = 64;
- }
- }
-
- free(value);
-
- if (!_bufmgr_init_cache_state(bufmgr_data)) {
- TBM_BACKEND_ERR("fail to init bufmgr cache state\n");
- goto fail_init_cache_state;
- }
-
- /*Create Hash Table*/
- bufmgr_data->hashBos = drmHashCreate();
-
- /* alloc and register bufmgr_funcs */
- bufmgr_funcs = calloc(1, sizeof(struct _hal_tbm_bufmgr_funcs));
- if (!bufmgr_funcs) {
- TBM_BACKEND_ERR("fail to alloc bufmgr_funcs!\n");
- goto fail_alloc_bufmgr_funcs;
- }
- backend_data->bufmgr_funcs = bufmgr_funcs;
-
- bufmgr_funcs->bufmgr_get_capabilities = tbm_exynos_bufmgr_get_capabilities;
- bufmgr_funcs->bufmgr_get_supported_formats = tbm_exynos_bufmgr_get_supported_formats;
- bufmgr_funcs->bufmgr_get_plane_data = tbm_exynos_bufmgr_get_plane_data;
- bufmgr_funcs->bufmgr_alloc_bo = tbm_exynos_bufmgr_alloc_bo;
- bufmgr_funcs->bufmgr_alloc_bo_with_format = NULL;
- bufmgr_funcs->bufmgr_import_fd = tbm_exynos_bufmgr_import_fd;
- bufmgr_funcs->bufmgr_import_key = tbm_exynos_bufmgr_import_key;
-
- /* alloc and register bo_funcs */
- bo_funcs = calloc(1, sizeof(struct _hal_tbm_bo_funcs));
- if (!bo_funcs) {
- TBM_BACKEND_ERR("fail to alloc bo_funcs!\n");
- goto fail_alloc_bo_funcs;
- }
- backend_data->bo_funcs = bo_funcs;
-
- bo_funcs->bo_free = tbm_exynos_bo_free;
- bo_funcs->bo_get_size = tbm_exynos_bo_get_size;
- bo_funcs->bo_get_memory_types = tbm_exynos_bo_get_memory_type;
- bo_funcs->bo_get_handle = tbm_exynos_bo_get_handle;
- bo_funcs->bo_map = tbm_exynos_bo_map;
- bo_funcs->bo_unmap = tbm_exynos_bo_unmap;
- bo_funcs->bo_lock = tbm_exynos_bo_lock;
- bo_funcs->bo_unlock = tbm_exynos_bo_unlock;
- bo_funcs->bo_export_fd = tbm_exynos_bo_export_fd;
- bo_funcs->bo_export_key = tbm_exynos_bo_export_key;
-
- TBM_BACKEND_DBG("drm_fd:%d\n", bufmgr_data->fd);
-
- return HAL_TBM_ERROR_NONE;
-
-fail_alloc_bo_funcs:
- free(bufmgr_funcs);
-fail_alloc_bufmgr_funcs:
- _bufmgr_deinit_cache_state(bufmgr_data);
- if (bufmgr_data->hashBos)
- drmHashDestroy(bufmgr_data->hashBos);
-fail_init_cache_state:
- close(bufmgr_data->fd);
-fail_open_drm:
- free(bufmgr_data);
-fail_alloc_bufmgr_data:
- free(backend_data);
-
- *data = NULL;
-
- return -1;
-}
-
-hal_backend hal_backend_tbm_data = {
- "exynos",
- "Samsung",
- HAL_ABI_VERSION_TIZEN_6_5,
- hal_backend_tbm_exynos_init,
- hal_backend_tbm_exynos_exit
-};
+++ /dev/null
-/**************************************************************************
-
-libtbm_exynos
-
-Copyright 2021 Samsung Electronics co., Ltd. All Rights Reserved.
-
-Contact: SooChan Lim <sc1.lim@samsung.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a
-copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sub license, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice (including the
-next paragraph) shall be included in all copies or substantial portions
-of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
-IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
-ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-**************************************************************************/
-
-#include "tbm_backend_log.h"
-
-#undef LOG_TAG
-#define LOG_TAG "TBM_BACKEND"
-
-unsigned int tbm_log_debug_level = TBM_BACKEND_LOG_LEVEL_INFO;
-
-static void
-_tbm_backend_log_dlog_print(int level, const char *fmt, va_list arg)
-{
- log_priority dlog_prio;
-
- switch (level) {
- case TBM_BACKEND_LOG_LEVEL_ERR:
- dlog_prio = DLOG_ERROR;
- break;
- case TBM_BACKEND_LOG_LEVEL_WRN:
- dlog_prio = DLOG_WARN;
- break;
- case TBM_BACKEND_LOG_LEVEL_INFO:
- dlog_prio = DLOG_INFO;
- break;
- case TBM_BACKEND_LOG_LEVEL_DBG:
- dlog_prio = DLOG_DEBUG;
- break;
- default:
- return;
- }
- __dlog_vprint(LOG_ID_SYSTEM, dlog_prio, LOG_TAG, fmt, arg);
-}
-
-void
-tbm_backend_log_print(int level, const char *fmt, ...)
-{
- va_list arg;
-
- if (level > tbm_log_debug_level)
- return;
-
- va_start(arg, fmt);
- _tbm_backend_log_dlog_print(level, fmt, arg);
- va_end(arg);
-}
-
+++ /dev/null
-/**************************************************************************
-
-libtbm_exynos
-
-Copyright 2021 Samsung Electronics co., Ltd. All Rights Reserved.
-
-Contact: SooChan Lim <sc1.lim@samsung.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a
-copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sub license, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice (including the
-next paragraph) shall be included in all copies or substantial portions
-of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
-IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
-ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-**************************************************************************/
-
-#ifndef __TBM_BACKEND_LOG_H__
-#define __TBM_BACKEND_LOG_H__
-
-#include <sys/syscall.h>
-#include <time.h>
-#include <dlog.h>
-
-enum {
- TBM_BACKEND_LOG_LEVEL_NONE,
- TBM_BACKEND_LOG_LEVEL_ERR,
- TBM_BACKEND_LOG_LEVEL_WRN,
- TBM_BACKEND_LOG_LEVEL_INFO,
- TBM_BACKEND_LOG_LEVEL_DBG,
-};
-
-
-/* log level */
-void tbm_backend_log_print(int level, const char *fmt, ...);
-
-#define TBM_BACKEND_DBG(fmt, args...) \
- do { \
- struct timespec ts; \
- clock_gettime(CLOCK_MONOTONIC, &ts); \
- tbm_backend_log_print(TBM_BACKEND_LOG_LEVEL_DBG, "[%5d.%06d][%d][%s %d]"fmt, \
- (int)ts.tv_sec, (int)ts.tv_nsec / 1000, \
- (int)syscall(SYS_gettid), __FUNCTION__, __LINE__, ##args); \
- } while (0)
-
-#define TBM_BACKEND_INFO(fmt, args...) \
- do { \
- struct timespec ts; \
- clock_gettime(CLOCK_MONOTONIC, &ts); \
- tbm_backend_log_print(TBM_BACKEND_LOG_LEVEL_INFO, "[%5d.%06d][%d][%s %d]"fmt, \
- (int)ts.tv_sec, (int)ts.tv_nsec / 1000, \
- (int)syscall(SYS_gettid), __FUNCTION__, __LINE__, ##args); \
- } while (0)
-
-#define TBM_BACKEND_WRN(fmt, args...) \
- do { \
- struct timespec ts; \
- clock_gettime(CLOCK_MONOTONIC, &ts); \
- tbm_backend_log_print(TBM_BACKEND_LOG_LEVEL_WRN, "[%5d.%06d][%d][%s %d]"fmt, \
- (int)ts.tv_sec, (int)ts.tv_nsec / 1000, \
- (int)syscall(SYS_gettid), __FUNCTION__, __LINE__, ##args); \
- } while (0)
-
-#define TBM_BACKEND_ERR(fmt, args...) \
- do { \
- struct timespec ts; \
- clock_gettime(CLOCK_MONOTONIC, &ts); \
- tbm_backend_log_print(TBM_BACKEND_LOG_LEVEL_ERR, "[%5d.%06d][%d][%s %d]"fmt, \
- (int)ts.tv_sec, (int)ts.tv_nsec / 1000, \
- (int)syscall(SYS_gettid), __FUNCTION__, __LINE__, ##args); \
- } while (0)
-
-#define TBM_BACKEND_RETURN_IF_FAIL(cond) {\
- if (!(cond)) {\
- TBM_BACKEND_ERR("'%s' failed.\n", #cond);\
- return;\
- } \
-}
-#define TBM_BACKEND_RETURN_VAL_IF_FAIL(cond, val) {\
- if (!(cond)) {\
- TBM_BACKEND_ERR("'%s' failed.\n", #cond);\
- return val;\
- } \
-}
-#define TBM_BACKEND_GOTO_VAL_IF_FAIL(cond, val) {\
- if (!(cond)) {\
- TBM_BACKEND_ERR("'%s' failed.\n", #cond);\
- goto val;\
- } \
-}
-
-#endif /* __TBM_BACKEND_LOG_H__ */
+++ /dev/null
-/**************************************************************************
- *
- * libtbm
- *
- * Copyright 2012 Samsung Electronics co., Ltd. All Rights Reserved.
- *
- * Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
- * Boram Park <boram1288.park@samsung.com>, Changyeon Lee <cyeon.lee@samsung.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * **************************************************************************/
-
-#ifndef __TBM_BUFMGR_TGL_H__
-#define __TBM_BUFMGR_TGL_H__
-
-#include <linux/ioctl.h>
-
-static char tgl_devfile[] = "/dev/slp_global_lock";
-static char tgl_devfile1[] = "/dev/tgl";
-
-#define TGL_IOCTL_BASE 0x32
-#define TGL_IO(nr) _IO(TGL_IOCTL_BASE, nr)
-#define TGL_IOR(nr, type) _IOR(TGL_IOCTL_BASE, nr, type)
-#define TGL_IOW(nr, type) _IOW(TGL_IOCTL_BASE, nr, type)
-#define TGL_IOWR(nr, type) _IOWR(TGL_IOCTL_BASE, nr, type)
-
-/**
- * struct tgl_ver_data - tgl version data structure
- * @major: major version
- * @minor: minor version
- */
-struct tgl_ver_data {
- unsigned int major;
- unsigned int minor;
-};
-
-/**
- * struct tgl_reg_data - tgl data structure
- * @key: lookup key
- * @timeout_ms: timeout value for waiting event
- */
-struct tgl_reg_data {
- unsigned int key;
- unsigned int timeout_ms;
-};
-
-enum tgl_type_data {
- TGL_TYPE_NONE = 0,
- TGL_TYPE_READ = (1 << 0),
- TGL_TYPE_WRITE = (1 << 1),
-};
-
-/**
- * struct tgl_lock_data - tgl lock data structure
- * @key: lookup key
- * @type: lock type that is in tgl_type_data
- */
-struct tgl_lock_data {
- unsigned int key;
- enum tgl_type_data type;
-};
-
-enum tgl_status_data {
- TGL_STATUS_UNLOCKED,
- TGL_STATUS_LOCKED,
-};
-
-/**
- * struct tgl_usr_data - tgl user data structure
- * @key: lookup key
- * @data1: user data 1
- * @data2: user data 2
- * @status: lock status that is in tgl_status_data
- */
-struct tgl_usr_data {
- unsigned int key;
- unsigned int data1;
- unsigned int data2;
- enum tgl_status_data status;
-};
-
-enum {
- _TGL_GET_VERSION,
- _TGL_REGISTER,
- _TGL_UNREGISTER,
- _TGL_LOCK,
- _TGL_UNLOCK,
- _TGL_SET_DATA,
- _TGL_GET_DATA,
-};
-
-/* get version information */
-#define TGL_IOCTL_GET_VERSION TGL_IOR(_TGL_GET_VERSION, struct tgl_ver_data)
-/* register key */
-#define TGL_IOCTL_REGISTER TGL_IOW(_TGL_REGISTER, struct tgl_reg_data)
-/* unregister key */
-#define TGL_IOCTL_UNREGISTER TGL_IOW(_TGL_UNREGISTER, struct tgl_reg_data)
-/* lock with key */
-#define TGL_IOCTL_LOCK TGL_IOW(_TGL_LOCK, struct tgl_lock_data)
-/* unlock with key */
-#define TGL_IOCTL_UNLOCK TGL_IOW(_TGL_UNLOCK, struct tgl_lock_data)
-/* set user data with key */
-#define TGL_IOCTL_SET_DATA TGL_IOW(_TGL_SET_DATA, struct tgl_usr_data)
-/* get user data with key */
-#define TGL_IOCTL_GET_DATA TGL_IOR(_TGL_GET_DATA, struct tgl_usr_data)
-
-/* indicate cache units. */
-enum e_drm_exynos_gem_cache_sel {
- EXYNOS_DRM_L1_CACHE = 1 << 0,
- EXYNOS_DRM_L2_CACHE = 1 << 1,
- EXYNOS_DRM_ALL_CORES = 1 << 2,
- EXYNOS_DRM_ALL_CACHES = EXYNOS_DRM_L1_CACHE |
- EXYNOS_DRM_L2_CACHE,
- EXYNOS_DRM_ALL_CACHES_CORES = EXYNOS_DRM_L1_CACHE |
- EXYNOS_DRM_L2_CACHE |
- EXYNOS_DRM_ALL_CORES,
- EXYNOS_DRM_CACHE_SEL_MASK = EXYNOS_DRM_ALL_CACHES_CORES
-};
-
-/* indicate cache operation types. */
-enum e_drm_exynos_gem_cache_op {
- EXYNOS_DRM_CACHE_INV_ALL = 1 << 3,
- EXYNOS_DRM_CACHE_INV_RANGE = 1 << 4,
- EXYNOS_DRM_CACHE_CLN_ALL = 1 << 5,
- EXYNOS_DRM_CACHE_CLN_RANGE = 1 << 6,
- EXYNOS_DRM_CACHE_FSH_ALL = EXYNOS_DRM_CACHE_INV_ALL |
- EXYNOS_DRM_CACHE_CLN_ALL,
- EXYNOS_DRM_CACHE_FSH_RANGE = EXYNOS_DRM_CACHE_INV_RANGE |
- EXYNOS_DRM_CACHE_CLN_RANGE,
- EXYNOS_DRM_CACHE_OP_MASK = EXYNOS_DRM_CACHE_FSH_ALL |
- EXYNOS_DRM_CACHE_FSH_RANGE
-};
-
-/**
- * A structure for cache operation.
- *
- * @usr_addr: user space address.
- * P.S. it SHOULD BE user space.
- * @size: buffer size for cache operation.
- * @flags: select cache unit and cache operation.
- * @gem_handle: a handle to a gem object.
- * this gem handle is needed for cache range operation to L2 cache.
- */
-struct drm_exynos_gem_cache_op {
- uint64_t usr_addr;
- unsigned int size;
- unsigned int flags;
- unsigned int gem_handle;
-};
-
-#define DRM_EXYNOS_GEM_CACHE_OP 0x12
-
-#define DRM_IOCTL_EXYNOS_GEM_CACHE_OP DRM_IOWR(DRM_COMMAND_BASE + \
- DRM_EXYNOS_GEM_CACHE_OP, struct drm_exynos_gem_cache_op)
-
-#endif /* __TBM_BUFMGR_TGL_H__ */
+++ /dev/null
-AM_CFLAGS = \
- @LIBTBM_EXYNOS_CFLAGS@ \
- -I$(top_srcdir) \
- -I$(top_srcdir)/src
-
-libtbm_exynos_la_LTLIBRARIES = libtbm_exynos.la
-libtbm_exynos_ladir = /${bufmgr_dir}
-libtbm_exynos_la_LIBADD = @LIBTBM_EXYNOS_LIBS@
-
-libtbm_exynos_la_SOURCES = \
- tbm_bufmgr_exynos.c
+++ /dev/null
-/**************************************************************************
-
-libtbm_exynos
-
-Copyright 2012 Samsung Electronics co., Ltd. All Rights Reserved.
-
-Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a
-copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sub license, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice (including the
-next paragraph) shall be included in all copies or substantial portions
-of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
-IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
-ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-**************************************************************************/
-
-#ifdef HAVE_CONFIG_H
-#include "config.h"
-#endif
-
-#include <libudev.h>
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include <string.h>
-#include <sys/ioctl.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <errno.h>
-#include <xf86drm.h>
-#include <exynos_drm.h>
-#include <pthread.h>
-#include <tbm_backend.h>
-#include <tbm_drm_helper.h>
-#include <tbm_log.h>
-#include <system_info.h>
-#include "tbm_bufmgr_tgl.h"
-
-#define TBM_COLOR_FORMAT_COUNT 4
-
-#define EXYNOS_DRM_NAME "exynos"
-
-#define STRERR_BUFSIZE 128
-
-#define SIZE_ALIGN(value, base) (((value) + ((base) - 1)) & ~((base) - 1))
-#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
-#define MAX(a, b) ((a) > (b) ? (a) : (b))
-
-static unsigned int g_tbm_surface_alignment_plane;
-static unsigned int g_tbm_surface_alignment_pitch_rgb;
-
-#define TBM_SURFACE_ALIGNMENT_PLANE_NV12 (4096)
-#define TBM_SURFACE_ALIGNMENT_PITCH_YUV (16)
-
-#define SZ_1M 0x00100000
-#define S5P_FIMV_MAX_FRAME_SIZE (2 * SZ_1M)
-#define S5P_FIMV_D_ALIGN_PLANE_SIZE 64
-#define S5P_FIMV_NUM_PIXELS_IN_MB_ROW 16
-#define S5P_FIMV_NUM_PIXELS_IN_MB_COL 16
-#define S5P_FIMV_DEC_BUF_ALIGN (8 * 1024)
-#define S5P_FIMV_NV12MT_HALIGN 128
-#define S5P_FIMV_NV12MT_VALIGN 64
-
-/* cache control at backend */
-static unsigned int g_enable_cache_ctrl = 0;
-
-struct dma_buf_info {
- unsigned long size;
- unsigned int fence_supported;
- unsigned int padding;
-};
-
-#define DMA_BUF_ACCESS_READ 0x1
-#define DMA_BUF_ACCESS_WRITE 0x2
-#define DMA_BUF_ACCESS_DMA 0x4
-#define DMA_BUF_ACCESS_MAX 0x8
-
-#define DMA_FENCE_LIST_MAX 5
-
-struct dma_buf_fence {
- unsigned long ctx;
- unsigned int type;
-};
-
-#define DMABUF_IOCTL_BASE 'F'
-#define DMABUF_IOWR(nr, type) _IOWR(DMABUF_IOCTL_BASE, nr, type)
-
-#define DMABUF_IOCTL_GET_INFO DMABUF_IOWR(0x00, struct dma_buf_info)
-#define DMABUF_IOCTL_GET_FENCE DMABUF_IOWR(0x01, struct dma_buf_fence)
-#define DMABUF_IOCTL_PUT_FENCE DMABUF_IOWR(0x02, struct dma_buf_fence)
-
-/* tgl key values */
-#define GLOBAL_KEY ((unsigned int)(-1))
-/* TBM_CACHE */
-#define TBM_EXYNOS_CACHE_INV 0x01 /**< cache invalidate */
-#define TBM_EXYNOS_CACHE_CLN 0x02 /**< cache clean */
-#define TBM_EXYNOS_CACHE_ALL 0x10 /**< cache all */
-#define TBM_EXYNOS_CACHE_FLUSH (TBM_EXYNOS_CACHE_INV|TBM_EXYNOS_CACHE_CLN) /**< cache flush */
-#define TBM_EXYNOS_CACHE_FLUSH_ALL (TBM_EXYNOS_CACHE_FLUSH|TBM_EXYNOS_CACHE_ALL) /**< cache flush all */
-
-enum {
- DEVICE_NONE = 0,
- DEVICE_CA, /* cache aware device */
- DEVICE_CO /* cache oblivious device */
-};
-
-typedef union _tbm_bo_cache_state tbm_bo_cache_state;
-
-union _tbm_bo_cache_state {
- unsigned int val;
- struct {
- unsigned int cntFlush:16; /*Flush all index for sync */
- unsigned int isCached:1;
- unsigned int isDirtied:2;
- } data;
-};
-
-typedef struct _tbm_bufmgr_exynos *tbm_bufmgr_exynos;
-typedef struct _tbm_bo_exynos *tbm_bo_exynos;
-
-/* tbm buffor object for exynos */
-struct _tbm_bo_exynos {
- int fd;
-
- unsigned int name; /* FLINK ID */
-
- unsigned int gem; /* GEM Handle */
-
- unsigned int dmabuf; /* fd for dmabuf */
-
- void *pBase; /* virtual address */
-
- unsigned int size;
-
- unsigned int flags_exynos;
- unsigned int flags_tbm;
-
- pthread_mutex_t mutex;
- struct dma_buf_fence dma_fence[DMA_FENCE_LIST_MAX];
- int device;
- int opt;
-
- tbm_bo_cache_state cache_state;
- unsigned int map_cnt;
- int last_map_device;
-
- tbm_bufmgr_exynos bufmgr_exynos;
-};
-
-/* tbm bufmgr private for exynos */
-struct _tbm_bufmgr_exynos {
- int fd;
- int isLocal;
- void *hashBos;
-
- int use_dma_fence;
-
- int tgl_fd;
-
- char *device_name;
- void *bind_display;
-
- tbm_backend_bufmgr_func *bufmgr_func;
- tbm_backend_bo_func *bo_func;
-
- tbm_bufmgr bufmgr;
-};
-
-const static char *STR_DEVICE[] = {
- "DEF",
- "CPU",
- "2D",
- "3D",
- "MM"
-};
-
-const static char *STR_OPT[] = {
- "NONE",
- "RD",
- "WR",
- "RDWR"
-};
-
-static int _get_render_node(int is_master);
-
-static inline int
-_tgl_init(int fd, unsigned int key)
-{
- struct tgl_reg_data data;
- int err;
- char buf[STRERR_BUFSIZE];
-
- data.key = key;
- data.timeout_ms = 1000;
-
- err = ioctl(fd, TGL_IOCTL_REGISTER, &data);
- if (err) {
- TBM_ERR("error(%s) key:%d\n",
- strerror_r(errno, buf, STRERR_BUFSIZE), key);
- return 0;
- }
-
- return 1;
-}
-
-static inline int
-_tgl_destroy(int fd, unsigned int key)
-{
- struct tgl_reg_data data;
- int err;
- char buf[STRERR_BUFSIZE];
-
- data.key = key;
- err = ioctl(fd, TGL_IOCTL_UNREGISTER, &data);
- if (err) {
- TBM_ERR("error(%s) key:%d\n",
- strerror_r(errno, buf, STRERR_BUFSIZE), key);
- return 0;
- }
-
- return 1;
-}
-
-static inline int
-_tgl_lock(int fd, unsigned int key, int opt)
-{
- struct tgl_lock_data data;
- enum tgl_type_data tgl_type;
- int err;
- char buf[STRERR_BUFSIZE];
-
- switch (opt) {
- case TBM_OPTION_READ:
- tgl_type = TGL_TYPE_READ;
- break;
- case TBM_OPTION_WRITE:
- tgl_type = TGL_TYPE_WRITE;
- break;
- default:
- tgl_type = TGL_TYPE_NONE;
- break;
- }
-
- data.key = key;
- data.type = tgl_type;
-
- err = ioctl(fd, TGL_IOCTL_LOCK, &data);
- if (err) {
- TBM_ERR("error(%s) key:%d opt:%d\n",
- strerror_r(errno, buf, STRERR_BUFSIZE), key, opt);
- return 0;
- }
-
- return 1;
-}
-
-static inline int
-_tgl_unlock(int fd, unsigned int key)
-{
- struct tgl_lock_data data;
- int err;
- char buf[STRERR_BUFSIZE];
-
- data.key = key;
- data.type = TGL_TYPE_NONE;
-
- err = ioctl(fd, TGL_IOCTL_UNLOCK, &data);
- if (err) {
- TBM_ERR("error(%s) key:%d\n",
- strerror_r(errno, buf, STRERR_BUFSIZE), key);
- return 0;
- }
-
- return 1;
-}
-
-static inline int
-_tgl_set_data(int fd, unsigned int key, unsigned int val)
-{
- struct tgl_usr_data data;
- int err;
- char buf[STRERR_BUFSIZE];
-
- data.key = key;
- data.data1 = val;
-
- err = ioctl(fd, TGL_IOCTL_SET_DATA, &data);
- if (err) {
- TBM_ERR("error(%s) key:%d\n",
- strerror_r(errno, buf, STRERR_BUFSIZE), key);
- return 0;
- }
-
- return 1;
-}
-
-static inline unsigned int
-_tgl_get_data(int fd, unsigned int key)
-{
- struct tgl_usr_data data = { 0, };
- int err;
- char buf[STRERR_BUFSIZE];
-
- data.key = key;
-
- err = ioctl(fd, TGL_IOCTL_GET_DATA, &data);
- if (err) {
- TBM_ERR("error(%s) key:%d\n",
- strerror_r(errno, buf, STRERR_BUFSIZE), key);
- return 0;
- }
-
- return data.data1;
-}
-
-static int
-_exynos_cache_flush(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos, int flags)
-{
- TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
-
- /* cache flush is managed by kernel side when using dma-fence. */
- if (bufmgr_exynos->use_dma_fence)
- return 1;
-
- struct drm_exynos_gem_cache_op cache_op = {0, };
- int ret;
-
- /* if bo_exynos is null, do cache_flush_all */
- if (bo_exynos) {
- cache_op.flags = 0;
- cache_op.usr_addr = (uint64_t)((uintptr_t)bo_exynos->pBase);
- cache_op.size = bo_exynos->size;
- } else {
- flags = TBM_EXYNOS_CACHE_FLUSH_ALL;
- cache_op.flags = 0;
- cache_op.usr_addr = 0;
- cache_op.size = 0;
- }
-
- if (flags & TBM_EXYNOS_CACHE_INV) {
- if (flags & TBM_EXYNOS_CACHE_ALL)
- cache_op.flags |= EXYNOS_DRM_CACHE_INV_ALL;
- else
- cache_op.flags |= EXYNOS_DRM_CACHE_INV_RANGE;
- }
-
- if (flags & TBM_EXYNOS_CACHE_CLN) {
- if (flags & TBM_EXYNOS_CACHE_ALL)
- cache_op.flags |= EXYNOS_DRM_CACHE_CLN_ALL;
- else
- cache_op.flags |= EXYNOS_DRM_CACHE_CLN_RANGE;
- }
-
- if (flags & TBM_EXYNOS_CACHE_ALL)
- cache_op.flags |= EXYNOS_DRM_ALL_CACHES_CORES;
-
- ret = drmCommandWriteRead(bufmgr_exynos->fd, DRM_EXYNOS_GEM_CACHE_OP, &cache_op,
- sizeof(cache_op));
- if (ret) {
- TBM_ERR("fail to flush the cache.\n");
- return 0;
- }
-
- return 1;
-}
-
-static int
-_bo_init_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos, int import)
-{
- /* check whether cache control do or not */
- if (!g_enable_cache_ctrl)
- return 1;
-
- TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
- TBM_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
-
- if (bufmgr_exynos->use_dma_fence)
- return 1;
-
- _tgl_init(bufmgr_exynos->tgl_fd, bo_exynos->name);
-
- tbm_bo_cache_state cache_state;
-
- if (import == 0) {
- cache_state.data.isDirtied = DEVICE_NONE;
- cache_state.data.isCached = 0;
- cache_state.data.cntFlush = 0;
-
- _tgl_set_data(bufmgr_exynos->tgl_fd, bo_exynos->name, cache_state.val);
- }
-
- return 1;
-}
-
-static int
-_bo_set_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos, int device, int opt)
-{
- /* check whether cache control do or not */
- if (!g_enable_cache_ctrl)
- return 1;
-
- TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
- TBM_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
-
- if (bufmgr_exynos->use_dma_fence)
- return 1;
-
- char need_flush = 0;
- unsigned short cntFlush = 0;
-
- if (!(bo_exynos->flags_exynos & EXYNOS_BO_CACHABLE))
- return 1;
-
- /* get cache state of a bo_exynos */
- bo_exynos->cache_state.val = _tgl_get_data(bufmgr_exynos->tgl_fd,
- bo_exynos->name);
-
- /* get global cache flush count */
- cntFlush = (unsigned short)_tgl_get_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY);
-
- if (device == TBM_DEVICE_CPU) {
- if (bo_exynos->cache_state.data.isDirtied == DEVICE_CO &&
- bo_exynos->cache_state.data.isCached)
- need_flush = TBM_EXYNOS_CACHE_INV;
-
- bo_exynos->cache_state.data.isCached = 1;
- if (opt & TBM_OPTION_WRITE)
- bo_exynos->cache_state.data.isDirtied = DEVICE_CA;
- else {
- if (bo_exynos->cache_state.data.isDirtied != DEVICE_CA)
- bo_exynos->cache_state.data.isDirtied = DEVICE_NONE;
- }
- } else {
- if (bo_exynos->cache_state.data.isDirtied == DEVICE_CA &&
- bo_exynos->cache_state.data.isCached &&
- bo_exynos->cache_state.data.cntFlush == cntFlush)
- need_flush = TBM_EXYNOS_CACHE_CLN | TBM_EXYNOS_CACHE_ALL;
-
- if (opt & TBM_OPTION_WRITE)
- bo_exynos->cache_state.data.isDirtied = DEVICE_CO;
- else {
- if (bo_exynos->cache_state.data.isDirtied != DEVICE_CO)
- bo_exynos->cache_state.data.isDirtied = DEVICE_NONE;
- }
- }
-
- if (need_flush) {
- if (need_flush & TBM_EXYNOS_CACHE_ALL)
- _tgl_set_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY, (unsigned int)(++cntFlush));
-
- /* call cache flush */
- _exynos_cache_flush(bufmgr_exynos, bo_exynos, need_flush);
-
- TBM_DBG(" \tcache(%d,%d)....flush:0x%x, cntFlush(%d)\n",
- bo_exynos->cache_state.data.isCached,
- bo_exynos->cache_state.data.isDirtied,
- need_flush,
- cntFlush);
- }
-
- return 1;
-}
-
-static int
-_bo_save_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos)
-{
- /* check whether cache control do or not */
- if (!g_enable_cache_ctrl)
- return 1;
-
- TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
- TBM_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
-
- if (bufmgr_exynos->use_dma_fence)
- return 1;
-
- unsigned short cntFlush = 0;
-
- /* get global cache flush count */
- cntFlush = (unsigned short)_tgl_get_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY);
-
- /* save global cache flush count */
- bo_exynos->cache_state.data.cntFlush = cntFlush;
- _tgl_set_data(bufmgr_exynos->tgl_fd, bo_exynos->name,
- bo_exynos->cache_state.val);
-
- return 1;
-}
-
-static void
-_bo_destroy_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos)
-{
- /* check whether cache control do or not */
- if (!g_enable_cache_ctrl)
- return;
-
- TBM_RETURN_IF_FAIL(bufmgr_exynos != NULL);
- TBM_RETURN_IF_FAIL(bo_exynos != NULL);
-
- if (bufmgr_exynos->use_dma_fence)
- return ;
-
- _tgl_destroy(bufmgr_exynos->tgl_fd, bo_exynos->name);
-}
-
-static int
-_bufmgr_init_cache_state(tbm_bufmgr_exynos bufmgr_exynos)
-{
- /* check whether cache control do or not */
- if (!g_enable_cache_ctrl)
- return 1;
-
- TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
-
- if (bufmgr_exynos->use_dma_fence)
- return 1;
-
- /* open tgl fd for saving cache flush data */
- bufmgr_exynos->tgl_fd = open(tgl_devfile, O_RDWR);
-
- if (bufmgr_exynos->tgl_fd < 0) {
- bufmgr_exynos->tgl_fd = open(tgl_devfile1, O_RDWR);
- if (bufmgr_exynos->tgl_fd < 0) {
- TBM_ERR("fail to open global_lock:%s\n",
- tgl_devfile1);
- return 0;
- }
- }
-
- if (!_tgl_init(bufmgr_exynos->tgl_fd, GLOBAL_KEY)) {
- TBM_ERR("fail to initialize the tgl\n");
- close(bufmgr_exynos->tgl_fd);
- return 0;
- }
-
- return 1;
-}
-
-static void
-_bufmgr_deinit_cache_state(tbm_bufmgr_exynos bufmgr_exynos)
-{
- /* check whether cache control do or not */
- if (!g_enable_cache_ctrl)
- return;
-
- TBM_RETURN_IF_FAIL(bufmgr_exynos != NULL);
-
- if (bufmgr_exynos->use_dma_fence)
- return;
-
- if (bufmgr_exynos->tgl_fd >= 0)
- close(bufmgr_exynos->tgl_fd);
-}
-
-static int
-_tbm_exynos_open_drm()
-{
- int fd = -1;
-
- fd = drmOpen(EXYNOS_DRM_NAME, NULL);
- if (fd < 0) {
- TBM_ERR("fail to open drm.(%s)\n", EXYNOS_DRM_NAME);
- }
-
- if (fd < 0) {
- fd = _get_render_node(1);
- if (fd < 0) {
- TBM_ERR("cannot find render_node\n");
- }
- }
-
- return fd;
-}
-
-static int
-_get_render_node(int is_master)
-{
- struct udev *udev = NULL;
- struct udev_enumerate *e = NULL;
- struct udev_list_entry *entry = NULL;
- struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
- const char *filepath;
- struct stat s;
- int fd = -1;
- int ret;
-
- TBM_DBG("search drm-device by udev(is_master:%d)\n", is_master);
-
- udev = udev_new();
- if (!udev) {
- TBM_ERR("udev_new() failed.\n");
- return -1;
- }
-
- e = udev_enumerate_new(udev);
- udev_enumerate_add_match_subsystem(e, "drm");
- if (is_master)
- udev_enumerate_add_match_sysname(e, "card[0-9]*");
- else
- udev_enumerate_add_match_sysname(e, "renderD[0-9]*");
- udev_enumerate_scan_devices(e);
-
- udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
- device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
- udev_list_entry_get_name(entry));
- device_parent = udev_device_get_parent(device);
- /* Not need unref device_parent. device_parent and device have same refcnt */
- if (device_parent) {
- if (strcmp(udev_device_get_sysname(device_parent), "exynos-drm") == 0) {
- drm_device = device;
- TBM_DBG("Found render device: '%s' (%s)\n",
- udev_device_get_syspath(drm_device),
- udev_device_get_sysname(device_parent));
- break;
- }
- }
- udev_device_unref(device);
- }
-
- udev_enumerate_unref(e);
-
- if (!drm_device) {
- TBM_ERR("failed to find device\n");
- udev_unref(udev);
- return -1;
- }
-
- /* Get device file path. */
- filepath = udev_device_get_devnode(drm_device);
- if (!filepath) {
- TBM_ERR("udev_device_get_devnode() failed.\n");
- udev_device_unref(drm_device);
- udev_unref(udev);
- return -1;
- }
-
- /* Open DRM device file and check validity. */
- fd = open(filepath, O_RDWR | O_CLOEXEC);
- if (fd < 0) {
- TBM_ERR("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
- udev_device_unref(drm_device);
- udev_unref(udev);
- return -1;
- }
-
- ret = fstat(fd, &s);
- if (ret) {
- TBM_ERR("fstat() failed %s.\n");
- udev_device_unref(drm_device);
- udev_unref(udev);
- close(fd);
- return -1;
- }
-
- udev_device_unref(drm_device);
- udev_unref(udev);
-
- return fd;
-}
-
-static unsigned int
-_get_exynos_flag_from_tbm(unsigned int ftbm)
-{
- unsigned int flags = 0;
-
- if (ftbm & TBM_BO_SCANOUT)
- flags |= EXYNOS_BO_CONTIG;
- else
- flags |= EXYNOS_BO_NONCONTIG;
-
- if (ftbm & TBM_BO_WC)
- flags |= EXYNOS_BO_WC;
- else if (ftbm & TBM_BO_NONCACHABLE)
- flags |= EXYNOS_BO_NONCACHABLE;
- else
- flags |= EXYNOS_BO_CACHABLE;
-
- return flags;
-}
-
-static unsigned int
-_get_tbm_flag_from_exynos(unsigned int fexynos)
-{
- unsigned int flags = 0;
-
- if (fexynos & EXYNOS_BO_NONCONTIG)
- flags |= TBM_BO_DEFAULT;
- else
- flags |= TBM_BO_SCANOUT;
-
- if (fexynos & EXYNOS_BO_WC)
- flags |= TBM_BO_WC;
- else if (fexynos & EXYNOS_BO_CACHABLE)
- flags |= TBM_BO_DEFAULT;
- else
- flags |= TBM_BO_NONCACHABLE;
-
- return flags;
-}
-
-static unsigned int
-_get_name(int fd, unsigned int gem)
-{
- struct drm_gem_flink arg = {0,};
-
- arg.handle = gem;
- if (drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &arg)) {
- TBM_ERR("fail to DRM_IOCTL_GEM_FLINK gem:%d", gem);
- return 0;
- }
-
- return (unsigned int)arg.name;
-}
-
-static tbm_bo_handle
-_exynos_bo_handle(tbm_bo_exynos bo_exynos, int device)
-{
- tbm_bo_handle bo_handle;
-
- memset(&bo_handle, 0x0, sizeof(uint64_t));
-
- switch (device) {
- case TBM_DEVICE_DEFAULT:
- case TBM_DEVICE_2D:
- bo_handle.u32 = (uint32_t)bo_exynos->gem;
- break;
- case TBM_DEVICE_CPU:
- if (!bo_exynos->pBase) {
- struct drm_exynos_gem_map arg = {0,};
- void *map = NULL;
-
- arg.handle = bo_exynos->gem;
- if (drmCommandWriteRead(bo_exynos->fd, DRM_EXYNOS_GEM_MAP, &arg,
- sizeof(arg))) {
- TBM_ERR("Cannot map_exynos gem=%d\n", bo_exynos->gem);
- return (tbm_bo_handle) NULL;
- }
-
- map = mmap(NULL, bo_exynos->size, PROT_READ | PROT_WRITE, MAP_SHARED,
- bo_exynos->fd, arg.offset);
- if (map == MAP_FAILED) {
- TBM_ERR("Cannot usrptr gem=%d\n", bo_exynos->gem);
- return (tbm_bo_handle) NULL;
- }
- bo_exynos->pBase = map;
- }
- bo_handle.ptr = (void *)bo_exynos->pBase;
- break;
- case TBM_DEVICE_3D:
- case TBM_DEVICE_MM:
- if (!bo_exynos->dmabuf) {
- struct drm_prime_handle arg = {0, };
-
- arg.handle = bo_exynos->gem;
- if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
- TBM_ERR("Cannot dmabuf=%d\n", bo_exynos->gem);
- return (tbm_bo_handle) NULL;
- }
- bo_exynos->dmabuf = arg.fd;
- }
-
- bo_handle.u32 = (uint32_t)bo_exynos->dmabuf;
- break;
- default:
- TBM_ERR("Not supported device:%d\n", device);
- bo_handle.ptr = (void *) NULL;
- break;
- }
-
- return bo_handle;
-}
-
-static int
-_new_calc_plane_nv12(int width, int height)
-{
- int mbX, mbY;
-
- mbX = DIV_ROUND_UP(width, S5P_FIMV_NUM_PIXELS_IN_MB_ROW);
- mbY = DIV_ROUND_UP(height, S5P_FIMV_NUM_PIXELS_IN_MB_COL);
-
- if (width * height < S5P_FIMV_MAX_FRAME_SIZE)
- mbY = (mbY + 1) / 2 * 2;
-
- return ((mbX * S5P_FIMV_NUM_PIXELS_IN_MB_COL) * (mbY *
- S5P_FIMV_NUM_PIXELS_IN_MB_ROW));
-}
-
-static int
-_calc_yplane_nv12(int width, int height)
-{
- int mbX, mbY;
-
- mbX = SIZE_ALIGN(width + 24, S5P_FIMV_NV12MT_HALIGN);
- mbY = SIZE_ALIGN(height + 16, S5P_FIMV_NV12MT_VALIGN);
-
- return SIZE_ALIGN(mbX * mbY, S5P_FIMV_DEC_BUF_ALIGN);
-}
-
-static int
-_calc_uvplane_nv12(int width, int height)
-{
- int mbX, mbY;
-
- mbX = SIZE_ALIGN(width + 16, S5P_FIMV_NV12MT_HALIGN);
- mbY = SIZE_ALIGN(height + 4, S5P_FIMV_NV12MT_VALIGN);
-
- return SIZE_ALIGN((mbX * mbY) >> 1, S5P_FIMV_DEC_BUF_ALIGN);
-}
-
-static int
-_new_calc_yplane_nv12(int width, int height)
-{
- return SIZE_ALIGN(_new_calc_plane_nv12(width,
- height) + S5P_FIMV_D_ALIGN_PLANE_SIZE,
- TBM_SURFACE_ALIGNMENT_PLANE_NV12);
-}
-
-static int
-_new_calc_uvplane_nv12(int width, int height)
-{
- return SIZE_ALIGN((_new_calc_plane_nv12(width,
- height) >> 1) + S5P_FIMV_D_ALIGN_PLANE_SIZE,
- TBM_SURFACE_ALIGNMENT_PLANE_NV12);
-}
-
-static tbm_bufmgr_capability
-tbm_exynos_bufmgr_get_capabilities(tbm_backend_bufmgr_data *bufmgr_data, tbm_error_e *error)
-{
- tbm_bufmgr_capability capabilities = TBM_BUFMGR_CAPABILITY_NONE;
-
- capabilities = TBM_BUFMGR_CAPABILITY_SHARE_KEY|TBM_BUFMGR_CAPABILITY_SHARE_FD;
-
- if (error)
- *error = TBM_ERROR_NONE;
-
- return capabilities;
-}
-
-static tbm_error_e
-tbm_exynos_bufmgr_bind_native_display(tbm_backend_bufmgr_data *bufmgr_data, tbm_native_display *native_display)
-{
- tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)bufmgr_data;
- TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, TBM_ERROR_INVALID_PARAMETER);
-
- if (!tbm_drm_helper_wl_auth_server_init(native_display, bufmgr_exynos->fd,
- bufmgr_exynos->device_name, 0)) {
- TBM_ERR("fail to tbm_drm_helper_wl_server_init\n");
- return TBM_ERROR_INVALID_OPERATION;
- }
-
- bufmgr_exynos->bind_display = native_display;
-
- return TBM_ERROR_NONE;
-}
-
-static tbm_error_e
-tbm_exynos_bufmgr_get_supported_formats(tbm_backend_bufmgr_data *bufmgr_data,
- uint32_t **formats, uint32_t *num)
-{
- const static uint32_t tbm_exynos_color_format_list[TBM_COLOR_FORMAT_COUNT] = {
- TBM_FORMAT_ARGB8888,
- TBM_FORMAT_XRGB8888,
- TBM_FORMAT_NV12,
- TBM_FORMAT_YUV420
- };
-
- tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)bufmgr_data;
- uint32_t *color_formats;
-
- TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, TBM_ERROR_INVALID_PARAMETER);
-
- color_formats = (uint32_t *)calloc(1, sizeof(uint32_t) * TBM_COLOR_FORMAT_COUNT);
- if (color_formats == NULL)
- return TBM_ERROR_OUT_OF_MEMORY;
-
- memcpy(color_formats, tbm_exynos_color_format_list, sizeof(uint32_t)*TBM_COLOR_FORMAT_COUNT);
-
- *formats = color_formats;
- *num = TBM_COLOR_FORMAT_COUNT;
-
- TBM_DBG("supported format count = %d\n", *num);
-
- return TBM_ERROR_NONE;
-}
-
-static tbm_error_e
-tbm_exynos_bufmgr_get_plane_data(tbm_backend_bufmgr_data *bufmgr_data,
- tbm_format format, int plane_idx, int width,
- int height, uint32_t *size, uint32_t *offset,
- uint32_t *pitch, int *bo_idx)
-{
- tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)bufmgr_data;
- int bpp;
- int _offset = 0;
- int _pitch = 0;
- int _size = 0;
- int _bo_idx = 0;
-
- TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, TBM_ERROR_INVALID_PARAMETER);
-
- switch (format) {
- /* 16 bpp RGB */
- case TBM_FORMAT_XRGB4444:
- case TBM_FORMAT_XBGR4444:
- case TBM_FORMAT_RGBX4444:
- case TBM_FORMAT_BGRX4444:
- case TBM_FORMAT_ARGB4444:
- case TBM_FORMAT_ABGR4444:
- case TBM_FORMAT_RGBA4444:
- case TBM_FORMAT_BGRA4444:
- case TBM_FORMAT_XRGB1555:
- case TBM_FORMAT_XBGR1555:
- case TBM_FORMAT_RGBX5551:
- case TBM_FORMAT_BGRX5551:
- case TBM_FORMAT_ARGB1555:
- case TBM_FORMAT_ABGR1555:
- case TBM_FORMAT_RGBA5551:
- case TBM_FORMAT_BGRA5551:
- case TBM_FORMAT_RGB565:
- bpp = 16;
- _offset = 0;
- _pitch = SIZE_ALIGN((width * bpp) >> 3, g_tbm_surface_alignment_pitch_rgb);
- _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- break;
- /* 24 bpp RGB */
- case TBM_FORMAT_RGB888:
- case TBM_FORMAT_BGR888:
- bpp = 24;
- _offset = 0;
- _pitch = SIZE_ALIGN((width * bpp) >> 3, g_tbm_surface_alignment_pitch_rgb);
- _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- break;
- /* 32 bpp RGB */
- case TBM_FORMAT_XRGB8888:
- case TBM_FORMAT_XBGR8888:
- case TBM_FORMAT_RGBX8888:
- case TBM_FORMAT_BGRX8888:
- case TBM_FORMAT_ARGB8888:
- case TBM_FORMAT_ABGR8888:
- case TBM_FORMAT_RGBA8888:
- case TBM_FORMAT_BGRA8888:
- bpp = 32;
- _offset = 0;
- _pitch = SIZE_ALIGN((width * bpp) >> 3, g_tbm_surface_alignment_pitch_rgb);
- _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- break;
-
- /* packed YCbCr */
- case TBM_FORMAT_YUYV:
- case TBM_FORMAT_YVYU:
- case TBM_FORMAT_UYVY:
- case TBM_FORMAT_VYUY:
- case TBM_FORMAT_AYUV:
- bpp = 32;
- _offset = 0;
- _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- break;
-
- /*
- * 2 plane YCbCr
- * index 0 = Y plane, [7:0] Y
- * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
- * or
- * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
- */
- case TBM_FORMAT_NV12:
- case TBM_FORMAT_NV21:
- bpp = 12;
- if (plane_idx == 0) {
- _offset = 0;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _size = MAX(_calc_yplane_nv12(width, height),
- _new_calc_yplane_nv12(width, height));
- _bo_idx = 0;
- } else if (plane_idx == 1) {
- _offset = 0;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _size = MAX(_calc_uvplane_nv12(width, height),
- _new_calc_uvplane_nv12(width, height));
- _bo_idx = 1;
- }
- break;
- case TBM_FORMAT_NV16:
- case TBM_FORMAT_NV61:
- bpp = 16;
- /*if(plane_idx == 0)*/
- {
- _offset = 0;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- if (plane_idx == 0)
- break;
- }
- /*else if( plane_idx ==1 )*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- }
- break;
-
- /*
- * 3 plane YCbCr
- * index 0: Y plane, [7:0] Y
- * index 1: Cb plane, [7:0] Cb
- * index 2: Cr plane, [7:0] Cr
- * or
- * index 1: Cr plane, [7:0] Cr
- * index 2: Cb plane, [7:0] Cb
- */
-
- /*
- * NATIVE_BUFFER_FORMAT_YV12
- * NATIVE_BUFFER_FORMAT_I420
- */
- case TBM_FORMAT_YUV410:
- case TBM_FORMAT_YVU410:
- bpp = 9;
- /*if(plane_idx == 0)*/
- {
- _offset = 0;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- if (plane_idx == 0)
- break;
- }
- /*else if(plane_idx == 1)*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4);
- _size = SIZE_ALIGN(_pitch * (height / 4), g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- if (plane_idx == 1)
- break;
- }
- /*else if (plane_idx == 2)*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4);
- _size = SIZE_ALIGN(_pitch * (height / 4), g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- }
- break;
- case TBM_FORMAT_YUV411:
- case TBM_FORMAT_YVU411:
- case TBM_FORMAT_YUV420:
- case TBM_FORMAT_YVU420:
- bpp = 12;
- /*if(plane_idx == 0)*/
- {
- _offset = 0;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- if (plane_idx == 0)
- break;
- }
- /*else if(plane_idx == 1)*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
- _size = SIZE_ALIGN(_pitch * (height / 2), g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- if (plane_idx == 1)
- break;
- }
- /*else if (plane_idx == 2)*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
- _size = SIZE_ALIGN(_pitch * (height / 2), g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- }
- break;
- case TBM_FORMAT_YUV422:
- case TBM_FORMAT_YVU422:
- bpp = 16;
- /*if(plane_idx == 0)*/
- {
- _offset = 0;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- if (plane_idx == 0)
- break;
- }
- /*else if(plane_idx == 1)*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
- _size = SIZE_ALIGN(_pitch * (height), g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- if (plane_idx == 1)
- break;
- }
- /*else if (plane_idx == 2)*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
- _size = SIZE_ALIGN(_pitch * (height), g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- }
- break;
- case TBM_FORMAT_YUV444:
- case TBM_FORMAT_YVU444:
- bpp = 24;
- /*if(plane_idx == 0)*/
- {
- _offset = 0;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- if (plane_idx == 0)
- break;
- }
- /*else if(plane_idx == 1)*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- if (plane_idx == 1)
- break;
- }
- /*else if (plane_idx == 2)*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
- _bo_idx = 0;
- }
- break;
- default:
- bpp = 0;
- break;
- }
-
- *size = _size;
- *offset = _offset;
- *pitch = _pitch;
- *bo_idx = _bo_idx;
-
- return TBM_ERROR_NONE;
-}
-
-static tbm_backend_bo_data *
-tbm_exynos_bufmgr_alloc_bo(tbm_backend_bufmgr_data *bufmgr_data, unsigned int size,
- tbm_bo_memory_type flags, tbm_error_e *error)
-{
- tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)bufmgr_data;
- tbm_bo_exynos bo_exynos;
- unsigned int exynos_flags;
-
- if (bufmgr_exynos == NULL) {
- TBM_ERR("bufmgr_data is null\n");
- if (error)
- *error = TBM_ERROR_INVALID_PARAMETER;
- return NULL;
- }
-
- bo_exynos = calloc(1, sizeof(struct _tbm_bo_exynos));
- if (!bo_exynos) {
- TBM_ERR("fail to allocate the bo_exynos private\n");
- if (error)
- *error = TBM_ERROR_OUT_OF_MEMORY;
- return NULL;
- }
- bo_exynos->bufmgr_exynos = bufmgr_exynos;
-
- exynos_flags = _get_exynos_flag_from_tbm(flags);
- if ((flags & TBM_BO_SCANOUT) &&
- size <= 4 * 1024) {
- exynos_flags |= EXYNOS_BO_NONCONTIG;
- }
-
- struct drm_exynos_gem_create arg = {0, };
-
- arg.size = (uint64_t)size;
- arg.flags = exynos_flags;
- if (drmCommandWriteRead(bufmgr_exynos->fd, DRM_EXYNOS_GEM_CREATE, &arg,
- sizeof(arg))) {
- TBM_ERR("Cannot create bo_exynos(flag:%x, size:%d)\n", arg.flags,
- (unsigned int)arg.size);
- free(bo_exynos);
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
-
- bo_exynos->fd = bufmgr_exynos->fd;
- bo_exynos->gem = arg.handle;
- bo_exynos->size = size;
- bo_exynos->flags_tbm = flags;
- bo_exynos->flags_exynos = exynos_flags;
- bo_exynos->name = _get_name(bo_exynos->fd, bo_exynos->gem);
-
- if (!_bo_init_cache_state(bufmgr_exynos, bo_exynos, 0)) {
- TBM_ERR("fail init cache state(%d)\n", bo_exynos->name);
- free(bo_exynos);
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
-
- pthread_mutex_init(&bo_exynos->mutex, NULL);
-
- if (bufmgr_exynos->use_dma_fence && !bo_exynos->dmabuf) {
- struct drm_prime_handle arg = {0, };
-
- arg.handle = bo_exynos->gem;
- if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
- TBM_ERR("Cannot dmabuf=%d\n", bo_exynos->gem);
- free(bo_exynos);
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
- bo_exynos->dmabuf = arg.fd;
- }
-
- /* add bo_exynos to hash */
- if (drmHashInsert(bufmgr_exynos->hashBos, bo_exynos->name, (void *)bo_exynos) < 0)
- TBM_ERR("Cannot insert bo_exynos to Hash(%d)\n", bo_exynos->name);
-
- TBM_DBG(" bo_exynos:%p, gem:%d(%d), flags:%d(%d), size:%d\n",
- bo_exynos,
- bo_exynos->gem, bo_exynos->name,
- flags, exynos_flags,
- bo_exynos->size);
-
- if (error)
- *error = TBM_ERROR_NONE;
-
- return (tbm_backend_bo_data *)bo_exynos;
-}
-
-static tbm_backend_bo_data *
-tbm_exynos_bufmgr_import_fd(tbm_backend_bufmgr_data *bufmgr_data, tbm_fd key, tbm_error_e *error)
-{
- tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)bufmgr_data;
- tbm_bo_exynos bo_exynos;
- unsigned int gem = 0;
- unsigned int name;
- int ret;
- char buf[STRERR_BUFSIZE];
-
- if (bufmgr_exynos == NULL) {
- TBM_ERR("bufmgr_data is null\n");
- if (error)
- *error = TBM_ERROR_INVALID_PARAMETER;
- return NULL;
- }
-
- /*getting handle from fd*/
- struct drm_prime_handle arg = {0, };
-
- arg.fd = key;
- arg.flags = 0;
- if (drmIoctl(bufmgr_exynos->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &arg)) {
- TBM_ERR("Cannot get gem handle from fd:%d (%s)\n",
- arg.fd, strerror_r(errno, buf, STRERR_BUFSIZE));
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
- gem = arg.handle;
-
- name = _get_name(bufmgr_exynos->fd, gem);
- if (!name) {
- TBM_ERR("Cannot get name from gem:%d, fd:%d (%s)\n",
- gem, key, strerror_r(errno, buf, STRERR_BUFSIZE));
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
-
- ret = drmHashLookup(bufmgr_exynos->hashBos, name, (void **)&bo_exynos);
- if (ret == 0) {
- if (gem == bo_exynos->gem) {
- if (error)
- *error = TBM_ERROR_NONE;
- return bo_exynos;
- }
- }
-
- /* Determine size of bo_exynos. The fd-to-handle ioctl really should
- * return the size, but it doesn't. If we have kernel 3.12 or
- * later, we can lseek on the prime fd to get the size. Older
- * kernels will just fail, in which case we fall back to the
- * provided (estimated or guess size).
- */
- unsigned int real_size = -1;
- struct drm_exynos_gem_info info = {0, };
-
- real_size = lseek(key, 0, SEEK_END);
-
- info.handle = gem;
- if (drmCommandWriteRead(bufmgr_exynos->fd,
- DRM_EXYNOS_GEM_GET,
- &info,
- sizeof(struct drm_exynos_gem_info))) {
- TBM_ERR("Cannot get gem info from gem:%d, fd:%d (%s)\n",
- gem, key, strerror_r(errno, buf, STRERR_BUFSIZE));
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
-
- if (real_size == -1)
- real_size = info.size;
-
- bo_exynos = calloc(1, sizeof(struct _tbm_bo_exynos));
- if (!bo_exynos) {
- TBM_ERR("bo_exynos:%p fail to allocate the bo_exynos\n", bo_exynos);
- if (error)
- *error = TBM_ERROR_OUT_OF_MEMORY;
- return NULL;
- }
- bo_exynos->bufmgr_exynos = bufmgr_exynos;
-
- bo_exynos->fd = bufmgr_exynos->fd;
- bo_exynos->gem = gem;
- bo_exynos->size = real_size;
- bo_exynos->flags_exynos = info.flags;
- bo_exynos->flags_tbm = _get_tbm_flag_from_exynos(bo_exynos->flags_exynos);
- bo_exynos->name = name;
-
- if (!_bo_init_cache_state(bufmgr_exynos, bo_exynos, 1)) {
- TBM_ERR("fail init cache state(%d)\n", bo_exynos->name);
- free(bo_exynos);
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
-
- /* add bo_exynos to hash */
- if (drmHashInsert(bufmgr_exynos->hashBos, bo_exynos->name, (void *)bo_exynos) < 0)
- TBM_ERR("bo_exynos:%p Cannot insert bo_exynos to Hash(%d) from gem:%d, fd:%d\n",
- bo_exynos, bo_exynos->name, gem, key);
-
- TBM_DBG(" bo_exynos:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d(%d), size:%d\n",
- bo_exynos,
- bo_exynos->gem, bo_exynos->name,
- bo_exynos->dmabuf,
- key,
- bo_exynos->flags_tbm, bo_exynos->flags_exynos,
- bo_exynos->size);
-
- if (error)
- *error = TBM_ERROR_NONE;
-
- return (tbm_backend_bo_data *)bo_exynos;
-}
-
-static tbm_backend_bo_data *
-tbm_exynos_bufmgr_import_key(tbm_backend_bufmgr_data *bufmgr_data, tbm_key key, tbm_error_e *error)
-{
- tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)bufmgr_data;
- tbm_bo_exynos bo_exynos;
- int ret;
-
- if (bufmgr_exynos == NULL) {
- TBM_ERR("bufmgr_data is null\n");
- if (error)
- *error = TBM_ERROR_INVALID_PARAMETER;
- return NULL;
- }
-
- ret = drmHashLookup(bufmgr_exynos->hashBos, key, (void **)&bo_exynos);
- if (ret == 0) {
- if (error)
- *error = TBM_ERROR_NONE;
- return (tbm_backend_bo_data *)bo_exynos;
- }
-
- struct drm_gem_open arg = {0, };
- struct drm_exynos_gem_info info = {0, };
-
- arg.name = key;
- if (drmIoctl(bufmgr_exynos->fd, DRM_IOCTL_GEM_OPEN, &arg)) {
- TBM_ERR("Cannot open gem name=%d\n", key);
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
-
- info.handle = arg.handle;
- if (drmCommandWriteRead(bufmgr_exynos->fd,
- DRM_EXYNOS_GEM_GET,
- &info,
- sizeof(struct drm_exynos_gem_info))) {
- TBM_ERR("Cannot get gem info=%d\n", key);
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
-
- bo_exynos = calloc(1, sizeof(struct _tbm_bo_exynos));
- if (!bo_exynos) {
- TBM_ERR("fail to allocate the bo_exynos private\n");
- if (error)
- *error = TBM_ERROR_OUT_OF_MEMORY;
- return NULL;
- }
- bo_exynos->bufmgr_exynos = bufmgr_exynos;
-
- bo_exynos->fd = bufmgr_exynos->fd;
- bo_exynos->gem = arg.handle;
- bo_exynos->size = arg.size;
- bo_exynos->flags_exynos = info.flags;
- bo_exynos->name = key;
- bo_exynos->flags_tbm = _get_tbm_flag_from_exynos(bo_exynos->flags_exynos);
-
- if (!_bo_init_cache_state(bufmgr_exynos, bo_exynos, 1)) {
- TBM_ERR("fail init cache state(%d)\n", bo_exynos->name);
- free(bo_exynos);
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
-
- if (!bo_exynos->dmabuf) {
- struct drm_prime_handle arg = {0, };
-
- arg.handle = bo_exynos->gem;
- if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
- TBM_ERR("fail to DRM_IOCTL_PRIME_HANDLE_TO_FD gem=%d\n", bo_exynos->gem);
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- free(bo_exynos);
- return NULL;
- }
- bo_exynos->dmabuf = arg.fd;
- }
-
- /* add bo_exynos to hash */
- if (drmHashInsert(bufmgr_exynos->hashBos, bo_exynos->name, (void *)bo_exynos) < 0)
- TBM_ERR("Cannot insert bo_exynos to Hash(%d)\n", bo_exynos->name);
-
- TBM_DBG(" bo_exynos:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d\n",
- bo_exynos,
- bo_exynos->gem, bo_exynos->name,
- bo_exynos->dmabuf,
- bo_exynos->flags_tbm, bo_exynos->flags_exynos,
- bo_exynos->size);
-
- if (error)
- *error = TBM_ERROR_NONE;
-
- return (tbm_backend_bo_data *)bo_exynos;
-}
-
-static void
-tbm_exynos_bo_free(tbm_backend_bo_data *bo_data)
-{
- tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data;
- tbm_bo_exynos temp;
- tbm_bufmgr_exynos bufmgr_exynos;
- char buf[STRERR_BUFSIZE];
- int ret;
-
- if (!bo_data)
- return;
-
- bufmgr_exynos = bo_exynos->bufmgr_exynos;
- if (!bufmgr_exynos)
- return;
-
- TBM_DBG(" bo_exynos:%p, gem:%d(%d), fd:%d, size:%d\n",
- bo_exynos,
- bo_exynos->gem, bo_exynos->name,
- bo_exynos->dmabuf,
- bo_exynos->size);
-
- if (bo_exynos->pBase) {
- if (munmap(bo_exynos->pBase, bo_exynos->size) == -1) {
- TBM_ERR("bo_exynos:%p fail to munmap(%s)\n",
- bo_exynos, strerror_r(errno, buf, STRERR_BUFSIZE));
- }
- }
-
- /* close dmabuf */
- if (bo_exynos->dmabuf) {
- close(bo_exynos->dmabuf);
- bo_exynos->dmabuf = 0;
- }
-
- /* delete bo_exynos from hash */
- ret = drmHashLookup(bufmgr_exynos->hashBos, bo_exynos->name, (void **)&temp);
- if (ret == 0)
- drmHashDelete(bufmgr_exynos->hashBos, bo_exynos->name);
- else
- TBM_ERR("Cannot find bo_exynos to Hash(%d), ret=%d\n", bo_exynos->name, ret);
-
- if (temp != bo_exynos)
- TBM_ERR("hashBos probably has several BOs with same name!!!\n");
-
- _bo_destroy_cache_state(bufmgr_exynos, bo_exynos);
-
- /* Free gem handle */
- struct drm_gem_close arg = {0, };
-
- memset(&arg, 0, sizeof(arg));
- arg.handle = bo_exynos->gem;
- if (drmIoctl(bo_exynos->fd, DRM_IOCTL_GEM_CLOSE, &arg))
- TBM_ERR("bo_exynos:%p fail to gem close.(%s)\n",
- bo_exynos, strerror_r(errno, buf, STRERR_BUFSIZE));
-
- free(bo_exynos);
-}
-
-static int
-tbm_exynos_bo_get_size(tbm_backend_bo_data *bo_data, tbm_error_e *error)
-{
- tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data;
-
- if (!bo_exynos) {
- if (error)
- *error = TBM_ERROR_INVALID_PARAMETER;
- return 0;
- }
-
- if (error)
- *error = TBM_ERROR_NONE;
-
- return bo_exynos->size;
-}
-
-static tbm_bo_memory_type
-tbm_exynos_bo_get_memory_type(tbm_backend_bo_data *bo_data, tbm_error_e *error)
-{
- tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data;
-
- if (!bo_exynos) {
- if (error)
- *error = TBM_ERROR_INVALID_PARAMETER;
- return TBM_BO_DEFAULT;
- }
-
- if (error)
- *error = TBM_ERROR_NONE;
-
- return bo_exynos->flags_tbm;
-}
-
-static tbm_bo_handle
-tbm_exynos_bo_get_handle(tbm_backend_bo_data *bo_data, tbm_bo_device_type device, tbm_error_e *error)
-{
- tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data;
- tbm_bo_handle bo_handle;
-
- if (!bo_exynos) {
- if (error)
- *error = TBM_ERROR_INVALID_PARAMETER;
- return (tbm_bo_handle) NULL;
- }
-
- if (!bo_exynos->gem) {
- TBM_ERR("Cannot map gem=%d\n", bo_exynos->gem);
- if (error)
- *error = TBM_ERROR_INVALID_PARAMETER;
- return (tbm_bo_handle) NULL;
- }
-
- TBM_DBG("bo_exynos:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d, %s\n",
- bo_exynos,
- bo_exynos->gem, bo_exynos->name,
- bo_exynos->dmabuf,
- bo_exynos->flags_tbm, bo_exynos->flags_exynos,
- bo_exynos->size,
- STR_DEVICE[device]);
-
- /*Get mapped bo_handle*/
- bo_handle = _exynos_bo_handle(bo_exynos, device);
- if (bo_handle.ptr == NULL) {
- TBM_ERR("Cannot get handle: gem:%d, device:%d\n",
- bo_exynos->gem, device);
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- return (tbm_bo_handle) NULL;
- }
-
- if (error)
- *error = TBM_ERROR_NONE;
-
- return bo_handle;
-}
-
-static tbm_bo_handle
-tbm_exynos_bo_map(tbm_backend_bo_data *bo_data, tbm_bo_device_type device,
- tbm_bo_access_option opt, tbm_error_e *error)
-{
- tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data;
- tbm_bo_handle bo_handle;
- tbm_bufmgr_exynos bufmgr_exynos;
-
- if (!bo_exynos) {
- if (error)
- *error = TBM_ERROR_INVALID_PARAMETER;
- return (tbm_bo_handle) NULL;
- }
-
- bufmgr_exynos = bo_exynos->bufmgr_exynos;
- if (!bufmgr_exynos) {
- if (error)
- *error = TBM_ERROR_INVALID_PARAMETER;
- return (tbm_bo_handle) NULL;
- }
-
- if (!bo_exynos->gem) {
- TBM_ERR("Cannot map gem=%d\n", bo_exynos->gem);
- if (error)
- *error = TBM_ERROR_INVALID_PARAMETER;
- return (tbm_bo_handle) NULL;
- }
-
- TBM_DBG(" bo_exynos:%p, gem:%d(%d), fd:%d, %s, %s\n",
- bo_exynos,
- bo_exynos->gem, bo_exynos->name,
- bo_exynos->dmabuf,
- STR_DEVICE[device],
- STR_OPT[opt]);
-
- /*Get mapped bo_handle*/
- bo_handle = _exynos_bo_handle(bo_exynos, device);
- if (bo_handle.ptr == NULL) {
- TBM_ERR("Cannot get handle: gem:%d, device:%d, opt:%d\n",
- bo_exynos->gem, device, opt);
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- return (tbm_bo_handle) NULL;
- }
-
- if (bo_exynos->map_cnt == 0)
- _bo_set_cache_state(bufmgr_exynos, bo_exynos, device, opt);
-
- bo_exynos->last_map_device = device;
-
- bo_exynos->map_cnt++;
-
- if (error)
- *error = TBM_ERROR_NONE;
-
- return bo_handle;
-}
-
-static tbm_error_e
-tbm_exynos_bo_unmap(tbm_backend_bo_data *bo_data)
-{
- tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data;
- tbm_bufmgr_exynos bufmgr_exynos;
-
- if (!bo_exynos)
- return TBM_ERROR_INVALID_PARAMETER;
-
- bufmgr_exynos = bo_exynos->bufmgr_exynos;
- if (!bufmgr_exynos)
- return TBM_ERROR_INVALID_PARAMETER;
-
- if (!bo_exynos->gem)
- return TBM_ERROR_INVALID_PARAMETER;
-
- bo_exynos->map_cnt--;
-
- if (bo_exynos->map_cnt == 0)
- _bo_save_cache_state(bufmgr_exynos, bo_exynos);
-
- /* check whether cache control do or not */
- if (g_enable_cache_ctrl && bo_exynos->last_map_device == TBM_DEVICE_CPU)
- _exynos_cache_flush(bufmgr_exynos, bo_exynos, TBM_EXYNOS_CACHE_FLUSH_ALL);
-
- bo_exynos->last_map_device = -1;
-
- TBM_DBG(" bo_exynos:%p, gem:%d(%d), fd:%d\n",
- bo_exynos,
- bo_exynos->gem, bo_exynos->name,
- bo_exynos->dmabuf);
-
- return TBM_ERROR_NONE;
-}
-
-static tbm_error_e
-tbm_exynos_bo_lock(tbm_backend_bo_data *bo_data, tbm_bo_device_type device,
- tbm_bo_access_option opt)
-{
-#ifndef ALWAYS_BACKEND_CTRL
- tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data;
- tbm_bufmgr_exynos bufmgr_exynos;
- struct dma_buf_fence fence;
- struct flock filelock;
- int ret = 0;
- char buf[STRERR_BUFSIZE];
-
- if (!bo_exynos)
- return TBM_ERROR_INVALID_PARAMETER;
-
- bufmgr_exynos = bo_exynos->bufmgr_exynos;
- if (!bufmgr_exynos)
- return TBM_ERROR_INVALID_PARAMETER;
-
- if (device != TBM_DEVICE_3D && device != TBM_DEVICE_CPU) {
- TBM_DBG("Not support device type,\n");
- return TBM_ERROR_INVALID_OPERATION;
- }
-
- memset(&fence, 0, sizeof(struct dma_buf_fence));
-
- /* Check if the given type is valid or not. */
- if (opt & TBM_OPTION_WRITE) {
- if (device == TBM_DEVICE_3D)
- fence.type = DMA_BUF_ACCESS_WRITE | DMA_BUF_ACCESS_DMA;
- } else if (opt & TBM_OPTION_READ) {
- if (device == TBM_DEVICE_3D)
- fence.type = DMA_BUF_ACCESS_READ | DMA_BUF_ACCESS_DMA;
- } else {
- TBM_ERR("Invalid argument\n");
- return TBM_ERROR_INVALID_PARAMETER;
- }
-
- /* Check if the tbm manager supports dma fence or not. */
- if (!bufmgr_exynos->use_dma_fence) {
- TBM_ERR("Not support DMA FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
- return TBM_ERROR_INVALID_OPERATION;
-
- }
-
- if (device == TBM_DEVICE_3D) {
- ret = ioctl(bo_exynos->dmabuf, DMABUF_IOCTL_GET_FENCE, &fence);
- if (ret < 0) {
- TBM_ERR("Cannot set GET FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
- return TBM_ERROR_INVALID_OPERATION;
- }
- } else {
- if (opt & TBM_OPTION_WRITE)
- filelock.l_type = F_WRLCK;
- else
- filelock.l_type = F_RDLCK;
-
- filelock.l_whence = SEEK_CUR;
- filelock.l_start = 0;
- filelock.l_len = 0;
-
- if (-1 == fcntl(bo_exynos->dmabuf, F_SETLKW, &filelock))
- return TBM_ERROR_INVALID_OPERATION;
- }
-
- pthread_mutex_lock(&bo_exynos->mutex);
-
- if (device == TBM_DEVICE_3D) {
- int i;
-
- for (i = 0; i < DMA_FENCE_LIST_MAX; i++) {
- if (bo_exynos->dma_fence[i].ctx == 0) {
- bo_exynos->dma_fence[i].type = fence.type;
- bo_exynos->dma_fence[i].ctx = fence.ctx;
- break;
- }
- }
-
- if (i == DMA_FENCE_LIST_MAX) {
- /*TODO: if dma_fence list is full, it needs realloc. I will fix this. by minseok3.kim*/
- TBM_ERR("fence list is full\n");
- }
- }
-
- pthread_mutex_unlock(&bo_exynos->mutex);
-
- TBM_DBG("DMABUF_IOCTL_GET_FENCE! bo_exynos:%p, gem:%d(%d), fd:%ds\n",
- bo_exynos,
- bo_exynos->gem, bo_exynos->name,
- bo_exynos->dmabuf);
-#endif /* ALWAYS_BACKEND_CTRL */
-
- return TBM_ERROR_NONE;
-}
-
-static tbm_error_e
-tbm_exynos_bo_unlock(tbm_backend_bo_data *bo_data)
-{
-#ifndef ALWAYS_BACKEND_CTRL
- tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data;
- struct dma_buf_fence fence;
- struct flock filelock;
- unsigned int dma_type = 0;
- int ret = 0;
- char buf[STRERR_BUFSIZE];
-
- bufmgr_exynos = bo_exynos->bufmgr_exynos;
- if (!bufmgr_exynos)
- return TBM_ERROR_INVALID_PARAMETER;
-
- if (bo_exynos->dma_fence[0].type & DMA_BUF_ACCESS_DMA)
- dma_type = 1;
-
- if (!bo_exynos->dma_fence[0].ctx && dma_type) {
- TBM_DBG("FENCE not support or ignored,\n");
- return TBM_ERROR_INVALID_OPERATION;
- }
-
- if (!bo_exynos->dma_fence[0].ctx && dma_type) {
- TBM_DBG("device type is not 3D/CPU,\n");
- return TBM_ERROR_INVALID_OPERATION;
- }
-
- pthread_mutex_lock(&bo_exynos->mutex);
-
- if (dma_type) {
- fence.type = bo_exynos->dma_fence[0].type;
- fence.ctx = bo_exynos->dma_fence[0].ctx;
- int i;
-
- for (i = 1; i < DMA_FENCE_LIST_MAX; i++) {
- bo_exynos->dma_fence[i - 1].type = bo_exynos->dma_fence[i].type;
- bo_exynos->dma_fence[i - 1].ctx = bo_exynos->dma_fence[i].ctx;
- }
- bo_exynos->dma_fence[DMA_FENCE_LIST_MAX - 1].type = 0;
- bo_exynos->dma_fence[DMA_FENCE_LIST_MAX - 1].ctx = 0;
- }
- pthread_mutex_unlock(&bo_exynos->mutex);
-
- if (dma_type) {
- ret = ioctl(bo_exynos->dmabuf, DMABUF_IOCTL_PUT_FENCE, &fence);
- if (ret < 0) {
- TBM_ERR("Can not set PUT FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
- return TBM_ERROR_INVALID_OPERATION;
- }
- } else {
- filelock.l_type = F_UNLCK;
- filelock.l_whence = SEEK_CUR;
- filelock.l_start = 0;
- filelock.l_len = 0;
-
- if (-1 == fcntl(bo_exynos->dmabuf, F_SETLKW, &filelock))
- return TBM_ERROR_INVALID_OPERATION;
- }
-
- TBM_DBG("DMABUF_IOCTL_PUT_FENCE! bo_exynos:%p, gem:%d(%d), fd:%ds\n",
- bo_exynos,
- bo_exynos->gem, bo_exynos->name,
- bo_exynos->dmabuf);
-#endif /* ALWAYS_BACKEND_CTRL */
-
- return TBM_ERROR_NONE;
-}
-static tbm_fd
-tbm_exynos_bo_export_fd(tbm_backend_bo_data *bo_data, tbm_error_e *error)
-{
- tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data;
- int ret;
- char buf[STRERR_BUFSIZE];
-
- if (!bo_exynos) {
- if (error)
- *error = TBM_ERROR_INVALID_PARAMETER;
- return -1;
- }
-
- struct drm_prime_handle arg = {0, };
-
- arg.handle = bo_exynos->gem;
- ret = drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg);
- if (ret) {
- TBM_ERR("bo_exynos:%p Cannot dmabuf=%d (%s)\n",
- bo_exynos, bo_exynos->gem, strerror_r(errno, buf, STRERR_BUFSIZE));
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- return (tbm_fd) ret;
- }
-
- TBM_DBG(" bo_exynos:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d(%d), size:%d\n",
- bo_exynos,
- bo_exynos->gem, bo_exynos->name,
- bo_exynos->dmabuf,
- arg.fd,
- bo_exynos->flags_tbm, bo_exynos->flags_exynos,
- bo_exynos->size);
-
- if (error)
- *error = TBM_ERROR_NONE;
-
- return (tbm_fd)arg.fd;
-}
-
-static tbm_key
-tbm_exynos_bo_export_key(tbm_backend_bo_data *bo_data, tbm_error_e *error)
-{
- tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data;
-
- if (!bo_exynos) {
- if (error)
- *error = TBM_ERROR_INVALID_PARAMETER;
- return 0;
- }
-
- if (!bo_exynos->name) {
- bo_exynos->name = _get_name(bo_exynos->fd, bo_exynos->gem);
- if (!bo_exynos->name) {
- TBM_ERR("error Cannot get name\n");
- if (error)
- *error = TBM_ERROR_INVALID_PARAMETER;
- return 0;
- }
- }
-
- TBM_DBG(" bo_exynos:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d\n",
- bo_exynos,
- bo_exynos->gem, bo_exynos->name,
- bo_exynos->dmabuf,
- bo_exynos->flags_tbm, bo_exynos->flags_exynos,
- bo_exynos->size);
-
- if (error)
- *error = TBM_ERROR_NONE;
-
- return (tbm_key)bo_exynos->name;
-}
-
-static void
-tbm_exynos_deinit(tbm_backend_bufmgr_data *bufmgr_data)
-{
- tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)bufmgr_data;
- tbm_bufmgr bufmgr;
- tbm_error_e error;
- unsigned long key;
- void *value;
-
- TBM_RETURN_IF_FAIL(bufmgr_exynos != NULL);
-
- bufmgr = bufmgr_exynos->bufmgr;
-
- tbm_backend_bufmgr_free_bufmgr_func(bufmgr, bufmgr_exynos->bufmgr_func);
- tbm_backend_bufmgr_free_bo_func(bufmgr, bufmgr_exynos->bo_func);
-
- if (bufmgr_exynos->hashBos) {
- while (drmHashFirst(bufmgr_exynos->hashBos, &key, &value) > 0) {
- free(value);
- drmHashDelete(bufmgr_exynos->hashBos, key);
- }
-
- drmHashDestroy(bufmgr_exynos->hashBos);
- bufmgr_exynos->hashBos = NULL;
- }
-
- _bufmgr_deinit_cache_state(bufmgr_exynos);
-
- if (bufmgr_exynos->bind_display)
- tbm_drm_helper_wl_auth_server_deinit();
-
- if (bufmgr_exynos->device_name)
- free(bufmgr_exynos->device_name);
-
- if (tbm_backend_bufmgr_query_display_server(bufmgr, &error))
- tbm_drm_helper_unset_tbm_master_fd();
- else
- tbm_drm_helper_unset_fd();
-
- close(bufmgr_exynos->fd);
-
- free(bufmgr_exynos);
-}
-
-static tbm_backend_bufmgr_data *
-tbm_exynos_init(tbm_bufmgr bufmgr, tbm_error_e *error)
-{
- tbm_bufmgr_exynos bufmgr_exynos = NULL;
- tbm_backend_bufmgr_func *bufmgr_func = NULL;
- tbm_backend_bo_func *bo_func = NULL;
- int fp;
- tbm_error_e err;
- int set_master = 0;
- char *value = NULL;
-
- if (!bufmgr) {
- TBM_ERR("bufmgr is null.\n");
- if (error)
- *error = TBM_ERROR_INVALID_PARAMETER;
- return NULL;
- }
-
- bufmgr_exynos = calloc(1, sizeof(struct _tbm_bufmgr_exynos));
- if (!bufmgr_exynos) {
- TBM_ERR("fail to alloc bufmgr_exynos!\n");
- if (error)
- *error = TBM_ERROR_OUT_OF_MEMORY;
- return NULL;
- }
-
- /* check the master_fd which already had opened */
- bufmgr_exynos->fd = tbm_drm_helper_get_master_fd();
- if (bufmgr_exynos->fd < 0) {
- bufmgr_exynos->fd = _tbm_exynos_open_drm();
- if (bufmgr_exynos->fd < 0) {
- TBM_ERR("fail to open drm!\n");
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- goto fail_open_drm;
- }
-
- if (drmIsMaster(bufmgr_exynos->fd)) {
- tbm_drm_helper_set_tbm_master_fd(bufmgr_exynos->fd);
- set_master = 1;
-
- bufmgr_exynos->device_name = drmGetDeviceNameFromFd(bufmgr_exynos->fd);
- if (!bufmgr_exynos->device_name) {
- TBM_ERR("fail to get device name!\n");
- tbm_drm_helper_unset_tbm_master_fd();
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- goto fail_get_device_name;
- }
- TBM_INFO("This is Master FD(%d) from open_drm.", bufmgr_exynos->fd);
- } else {
- /* close the fd and get the authenticated fd from the master fd */
- close(bufmgr_exynos->fd);
-#ifdef USE_RENDER_NODE
- bufmgr_exynos->fd = _get_render_node(0);
-#else
- bufmgr_exynos->fd = -1;
-#endif
- /* get the authenticated drm fd from the master fd */
- if (!tbm_drm_helper_get_auth_info(&(bufmgr_exynos->fd), &(bufmgr_exynos->device_name), NULL)) {
- TBM_ERR("fail to get auth drm info!\n");
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- goto fail_get_auth_info;
- }
- TBM_INFO("This is Authenticated FD(%d)", bufmgr_exynos->fd);
- }
- } else {
- bufmgr_exynos->device_name = drmGetDeviceNameFromFd(bufmgr_exynos->fd);
- if (!bufmgr_exynos->device_name) {
- TBM_ERR("fail to get device name!\n");
- tbm_drm_helper_unset_tbm_master_fd();
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- goto fail_get_device_name;
- }
- TBM_INFO("This is Master FD from tbm_drm_helper_get_master_fd(%d)", bufmgr_exynos->fd);
- }
- tbm_drm_helper_set_fd(bufmgr_exynos->fd);
-
- //Check if the tbm manager supports dma fence or not.
- fp = open("/sys/module/dmabuf_sync/parameters/enabled", O_RDONLY);
- if (fp != -1) {
- char buf[1];
- int length = read(fp, buf, 1);
-
- if (length == 1 && buf[0] == '1')
- bufmgr_exynos->use_dma_fence = 1;
-
- close(fp);
- }
-
- /* get the model name from the capi-system-info.
- * The alignment_plane and alignment_pitch_rgb is different accoring to the target.
- * There will be the stride issue when the right alignment_plane and alignment_pitch_rgb
- * is not set to the backend.
- */
- if (system_info_get_platform_string("http://tizen.org/system/model_name", &value) != SYSTEM_INFO_ERROR_NONE) {
- TBM_ERR("Cannot get the \"http://tizen.org/system/model_name\" key value from system-info.\n");
- TBM_ERR("May not set the right value on libtbm-exynos backend.\n");
- } else {
- if (!strncmp(value, "TW1", 4)) {
- g_tbm_surface_alignment_plane = 8;
- g_tbm_surface_alignment_pitch_rgb = 8;
- g_enable_cache_ctrl = 1;
- } else {
- g_tbm_surface_alignment_plane = 64;
- g_tbm_surface_alignment_pitch_rgb = 64;
- }
- }
-
- free(value);
-
- if (!_bufmgr_init_cache_state(bufmgr_exynos)) {
- TBM_ERR("fail to init bufmgr cache state\n");
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- goto fail_init_cache_state;
- }
-
- /*Create Hash Table*/
- bufmgr_exynos->hashBos = drmHashCreate();
-
- /* alloc and register bufmgr_funcs */
- bufmgr_func = tbm_backend_bufmgr_alloc_bufmgr_func(bufmgr, &err);
- if (!bufmgr_func) {
- TBM_ERR("fail to alloc bufmgr_func! err(%d)\n", err);
- if (error)
- *error = TBM_ERROR_OUT_OF_MEMORY;
- goto fail_alloc_bufmgr_func;
- }
-
- bufmgr_func->bufmgr_get_capabilities = tbm_exynos_bufmgr_get_capabilities;
- //if (tbm_backend_bufmgr_query_display_server(bufmgr, &err) && !_check_render_node())
- bufmgr_func->bufmgr_bind_native_display = tbm_exynos_bufmgr_bind_native_display;
- bufmgr_func->bufmgr_get_supported_formats = tbm_exynos_bufmgr_get_supported_formats;
- bufmgr_func->bufmgr_get_plane_data = tbm_exynos_bufmgr_get_plane_data;
- bufmgr_func->bufmgr_alloc_bo = tbm_exynos_bufmgr_alloc_bo;
- bufmgr_func->bufmgr_alloc_bo_with_format = NULL;
- bufmgr_func->bufmgr_import_fd = tbm_exynos_bufmgr_import_fd;
- bufmgr_func->bufmgr_import_key = tbm_exynos_bufmgr_import_key;
-
- err = tbm_backend_bufmgr_register_bufmgr_func(bufmgr, bufmgr_func);
- if (err != TBM_ERROR_NONE) {
- TBM_ERR("fail to register bufmgr_func! err(%d)\n", err);
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- goto fail_register_bufmgr_func;
- }
- bufmgr_exynos->bufmgr_func = bufmgr_func;
-
- /* alloc and register bo_funcs */
- bo_func = tbm_backend_bufmgr_alloc_bo_func(bufmgr, &err);
- if (!bo_func) {
- TBM_ERR("fail to alloc bo_func! err(%d)\n", err);
- if (error)
- *error = TBM_ERROR_OUT_OF_MEMORY;
- goto fail_alloc_bo_func;
- }
-
- bo_func->bo_free = tbm_exynos_bo_free;
- bo_func->bo_get_size = tbm_exynos_bo_get_size;
- bo_func->bo_get_memory_types = tbm_exynos_bo_get_memory_type;
- bo_func->bo_get_handle = tbm_exynos_bo_get_handle;
- bo_func->bo_map = tbm_exynos_bo_map;
- bo_func->bo_unmap = tbm_exynos_bo_unmap;
- bo_func->bo_lock = tbm_exynos_bo_lock;
- bo_func->bo_unlock = tbm_exynos_bo_unlock;
- bo_func->bo_export_fd = tbm_exynos_bo_export_fd;
- bo_func->bo_export_key = tbm_exynos_bo_export_key;
-
- err = tbm_backend_bufmgr_register_bo_func(bufmgr, bo_func);
- if (err != TBM_ERROR_NONE) {
- TBM_ERR("fail to register bo_func! err(%d)\n", err);
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- goto fail_register_bo_func;
- }
- bufmgr_exynos->bo_func = bo_func;
-
- TBM_DBG("drm_fd:%d\n", bufmgr_exynos->fd);
-
- if (error)
- *error = TBM_ERROR_NONE;
-
- bufmgr_exynos->bufmgr = bufmgr;
-
- return (tbm_backend_bufmgr_data *)bufmgr_exynos;
-
-fail_register_bo_func:
- tbm_backend_bufmgr_free_bo_func(bufmgr, bo_func);
-fail_alloc_bo_func:
-fail_register_bufmgr_func:
- tbm_backend_bufmgr_free_bufmgr_func(bufmgr, bufmgr_func);
-fail_alloc_bufmgr_func:
- _bufmgr_deinit_cache_state(bufmgr_exynos);
- if (bufmgr_exynos->hashBos)
- drmHashDestroy(bufmgr_exynos->hashBos);
-fail_init_cache_state:
- if (set_master)
- tbm_drm_helper_unset_tbm_master_fd();
- tbm_drm_helper_unset_fd();
-fail_get_device_name:
- if (bufmgr_exynos->fd >= 0)
- close(bufmgr_exynos->fd);
-fail_get_auth_info:
-fail_open_drm:
- free(bufmgr_exynos);
- return NULL;
-}
-
-tbm_backend_module tbm_backend_module_data = {
- "exynos",
- "Samsung",
- TBM_BACKEND_ABI_VERSION_3_0,
- tbm_exynos_init,
- tbm_exynos_deinit
-};
+++ /dev/null
-/**************************************************************************
- *
- * libtbm
- *
- * Copyright 2012 Samsung Electronics co., Ltd. All Rights Reserved.
- *
- * Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
- * Boram Park <boram1288.park@samsung.com>, Changyeon Lee <cyeon.lee@samsung.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * **************************************************************************/
-
-#ifndef __TBM_BUFMGR_TGL_H__
-#define __TBM_BUFMGR_TGL_H__
-
-#include <linux/ioctl.h>
-
-static char tgl_devfile[] = "/dev/slp_global_lock";
-static char tgl_devfile1[] = "/dev/tgl";
-
-#define TGL_IOCTL_BASE 0x32
-#define TGL_IO(nr) _IO(TGL_IOCTL_BASE, nr)
-#define TGL_IOR(nr, type) _IOR(TGL_IOCTL_BASE, nr, type)
-#define TGL_IOW(nr, type) _IOW(TGL_IOCTL_BASE, nr, type)
-#define TGL_IOWR(nr, type) _IOWR(TGL_IOCTL_BASE, nr, type)
-
-/**
- * struct tgl_ver_data - tgl version data structure
- * @major: major version
- * @minor: minor version
- */
-struct tgl_ver_data {
- unsigned int major;
- unsigned int minor;
-};
-
-/**
- * struct tgl_reg_data - tgl data structure
- * @key: lookup key
- * @timeout_ms: timeout value for waiting event
- */
-struct tgl_reg_data {
- unsigned int key;
- unsigned int timeout_ms;
-};
-
-enum tgl_type_data {
- TGL_TYPE_NONE = 0,
- TGL_TYPE_READ = (1 << 0),
- TGL_TYPE_WRITE = (1 << 1),
-};
-
-/**
- * struct tgl_lock_data - tgl lock data structure
- * @key: lookup key
- * @type: lock type that is in tgl_type_data
- */
-struct tgl_lock_data {
- unsigned int key;
- enum tgl_type_data type;
-};
-
-enum tgl_status_data {
- TGL_STATUS_UNLOCKED,
- TGL_STATUS_LOCKED,
-};
-
-/**
- * struct tgl_usr_data - tgl user data structure
- * @key: lookup key
- * @data1: user data 1
- * @data2: user data 2
- * @status: lock status that is in tgl_status_data
- */
-struct tgl_usr_data {
- unsigned int key;
- unsigned int data1;
- unsigned int data2;
- enum tgl_status_data status;
-};
-
-enum {
- _TGL_GET_VERSION,
- _TGL_REGISTER,
- _TGL_UNREGISTER,
- _TGL_LOCK,
- _TGL_UNLOCK,
- _TGL_SET_DATA,
- _TGL_GET_DATA,
-};
-
-/* get version information */
-#define TGL_IOCTL_GET_VERSION TGL_IOR(_TGL_GET_VERSION, struct tgl_ver_data)
-/* register key */
-#define TGL_IOCTL_REGISTER TGL_IOW(_TGL_REGISTER, struct tgl_reg_data)
-/* unregister key */
-#define TGL_IOCTL_UNREGISTER TGL_IOW(_TGL_UNREGISTER, struct tgl_reg_data)
-/* lock with key */
-#define TGL_IOCTL_LOCK TGL_IOW(_TGL_LOCK, struct tgl_lock_data)
-/* unlock with key */
-#define TGL_IOCTL_UNLOCK TGL_IOW(_TGL_UNLOCK, struct tgl_lock_data)
-/* set user data with key */
-#define TGL_IOCTL_SET_DATA TGL_IOW(_TGL_SET_DATA, struct tgl_usr_data)
-/* get user data with key */
-#define TGL_IOCTL_GET_DATA TGL_IOR(_TGL_GET_DATA, struct tgl_usr_data)
-
-/* indicate cache units. */
-enum e_drm_exynos_gem_cache_sel {
- EXYNOS_DRM_L1_CACHE = 1 << 0,
- EXYNOS_DRM_L2_CACHE = 1 << 1,
- EXYNOS_DRM_ALL_CORES = 1 << 2,
- EXYNOS_DRM_ALL_CACHES = EXYNOS_DRM_L1_CACHE |
- EXYNOS_DRM_L2_CACHE,
- EXYNOS_DRM_ALL_CACHES_CORES = EXYNOS_DRM_L1_CACHE |
- EXYNOS_DRM_L2_CACHE |
- EXYNOS_DRM_ALL_CORES,
- EXYNOS_DRM_CACHE_SEL_MASK = EXYNOS_DRM_ALL_CACHES_CORES
-};
-
-/* indicate cache operation types. */
-enum e_drm_exynos_gem_cache_op {
- EXYNOS_DRM_CACHE_INV_ALL = 1 << 3,
- EXYNOS_DRM_CACHE_INV_RANGE = 1 << 4,
- EXYNOS_DRM_CACHE_CLN_ALL = 1 << 5,
- EXYNOS_DRM_CACHE_CLN_RANGE = 1 << 6,
- EXYNOS_DRM_CACHE_FSH_ALL = EXYNOS_DRM_CACHE_INV_ALL |
- EXYNOS_DRM_CACHE_CLN_ALL,
- EXYNOS_DRM_CACHE_FSH_RANGE = EXYNOS_DRM_CACHE_INV_RANGE |
- EXYNOS_DRM_CACHE_CLN_RANGE,
- EXYNOS_DRM_CACHE_OP_MASK = EXYNOS_DRM_CACHE_FSH_ALL |
- EXYNOS_DRM_CACHE_FSH_RANGE
-};
-
-/**
- * A structure for cache operation.
- *
- * @usr_addr: user space address.
- * P.S. it SHOULD BE user space.
- * @size: buffer size for cache operation.
- * @flags: select cache unit and cache operation.
- * @gem_handle: a handle to a gem object.
- * this gem handle is needed for cache range operation to L2 cache.
- */
-struct drm_exynos_gem_cache_op {
- uint64_t usr_addr;
- unsigned int size;
- unsigned int flags;
- unsigned int gem_handle;
-};
-
-#define DRM_EXYNOS_GEM_CACHE_OP 0x12
-
-#define DRM_IOCTL_EXYNOS_GEM_CACHE_OP DRM_IOWR(DRM_COMMAND_BASE + \
- DRM_EXYNOS_GEM_CACHE_OP, struct drm_exynos_gem_cache_op)
-
-#endif /* __TBM_BUFMGR_TGL_H__ */
--- /dev/null
+/**************************************************************************
+
+libtbm_exynos
+
+Copyright 2012 Samsung Electronics co., Ltd. All Rights Reserved.
+
+Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sub license, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
+ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <libudev.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <xf86drm.h>
+#include <exynos_drm.h>
+#include <pthread.h>
+#include <hal-common.h>
+#include <hal-tbm-types.h>
+#include <hal-tbm-interface.h>
+#include <system_info.h>
+#include "tbm_bufmgr_tgl.h"
+#include "tbm_backend_log.h"
+
+#define EXYNOS_DRM_NAME "exynos"
+
+#define TBM_COLOR_FORMAT_COUNT 4
+#define STRERR_BUFSIZE 128
+#define SIZE_ALIGN(value, base) (((value) + ((base) - 1)) & ~((base) - 1))
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+
+static unsigned int g_tbm_surface_alignment_plane;
+static unsigned int g_tbm_surface_alignment_pitch_rgb;
+
+#define TBM_SURFACE_ALIGNMENT_PLANE_NV12 (4096)
+#define TBM_SURFACE_ALIGNMENT_PITCH_YUV (16)
+
+#define SZ_1M 0x00100000
+#define S5P_FIMV_MAX_FRAME_SIZE (2 * SZ_1M)
+#define S5P_FIMV_D_ALIGN_PLANE_SIZE 64
+#define S5P_FIMV_NUM_PIXELS_IN_MB_ROW 16
+#define S5P_FIMV_NUM_PIXELS_IN_MB_COL 16
+#define S5P_FIMV_DEC_BUF_ALIGN (8 * 1024)
+#define S5P_FIMV_NV12MT_HALIGN 128
+#define S5P_FIMV_NV12MT_VALIGN 64
+
+/* cache control at backend */
+static unsigned int g_enable_cache_ctrl = 0;
+
+struct dma_buf_info {
+ unsigned long size;
+ unsigned int fence_supported;
+ unsigned int padding;
+};
+
+#define DMA_BUF_ACCESS_READ 0x1
+#define DMA_BUF_ACCESS_WRITE 0x2
+#define DMA_BUF_ACCESS_DMA 0x4
+#define DMA_BUF_ACCESS_MAX 0x8
+
+#define DMA_FENCE_LIST_MAX 5
+
+struct dma_buf_fence {
+ unsigned long ctx;
+ unsigned int type;
+};
+
+#define DMABUF_IOCTL_BASE 'F'
+#define DMABUF_IOWR(nr, type) _IOWR(DMABUF_IOCTL_BASE, nr, type)
+
+#define DMABUF_IOCTL_GET_INFO DMABUF_IOWR(0x00, struct dma_buf_info)
+#define DMABUF_IOCTL_GET_FENCE DMABUF_IOWR(0x01, struct dma_buf_fence)
+#define DMABUF_IOCTL_PUT_FENCE DMABUF_IOWR(0x02, struct dma_buf_fence)
+
+/* tgl key values */
+#define GLOBAL_KEY ((unsigned int)(-1))
+/* TBM_CACHE */
+#define TBM_EXYNOS_CACHE_INV 0x01 /**< cache invalidate */
+#define TBM_EXYNOS_CACHE_CLN 0x02 /**< cache clean */
+#define TBM_EXYNOS_CACHE_ALL 0x10 /**< cache all */
+#define TBM_EXYNOS_CACHE_FLUSH (TBM_EXYNOS_CACHE_INV|TBM_EXYNOS_CACHE_CLN) /**< cache flush */
+#define TBM_EXYNOS_CACHE_FLUSH_ALL (TBM_EXYNOS_CACHE_FLUSH|TBM_EXYNOS_CACHE_ALL) /**< cache flush all */
+
+enum {
+ DEVICE_NONE = 0,
+ DEVICE_CA, /* cache aware device */
+ DEVICE_CO /* cache oblivious device */
+};
+
+typedef union _tbm_bo_cache_state tbm_bo_cache_state;
+
+union _tbm_bo_cache_state {
+ unsigned int val;
+ struct {
+ unsigned int cntFlush:16; /*Flush all index for sync */
+ unsigned int isCached:1;
+ unsigned int isDirtied:2;
+ } data;
+};
+
+typedef struct _tbm_exynos_bufmgr tbm_exynos_bufmgr;
+typedef struct _tbm_exynos_bo tbm_exynos_bo;
+
+/* tbm buffor object for exynos */
+struct _tbm_exynos_bo {
+ int fd;
+
+ unsigned int name; /* FLINK ID */
+
+ unsigned int gem; /* GEM Handle */
+
+ unsigned int dmabuf; /* fd for dmabuf */
+
+ void *pBase; /* virtual address */
+
+ unsigned int size;
+
+ unsigned int flags_exynos;
+ unsigned int flags_tbm;
+
+ pthread_mutex_t mutex;
+ struct dma_buf_fence dma_fence[DMA_FENCE_LIST_MAX];
+ int device;
+ int opt;
+
+ tbm_bo_cache_state cache_state;
+ unsigned int map_cnt;
+ int last_map_device;
+
+ tbm_exynos_bufmgr *bufmgr_data;
+};
+
+/* tbm bufmgr private for exynos */
+struct _tbm_exynos_bufmgr {
+ int fd;
+ int isLocal;
+ void *hashBos;
+
+ int use_dma_fence;
+
+ int tgl_fd;
+};
+
+static char *STR_DEVICE[] = {
+ "DEF",
+ "CPU",
+ "2D",
+ "3D",
+ "MM"
+};
+
+static char *STR_OPT[] = {
+ "NONE",
+ "RD",
+ "WR",
+ "RDWR"
+};
+
+static inline int
+_tgl_init(int fd, unsigned int key)
+{
+ struct tgl_reg_data data;
+ int err;
+ char buf[STRERR_BUFSIZE];
+
+ data.key = key;
+ data.timeout_ms = 1000;
+
+ err = ioctl(fd, TGL_IOCTL_REGISTER, &data);
+ if (err) {
+ TBM_BACKEND_ERR("error(%s) key:%d\n",
+ strerror_r(errno, buf, STRERR_BUFSIZE), key);
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline int
+_tgl_destroy(int fd, unsigned int key)
+{
+ struct tgl_reg_data data;
+ int err;
+ char buf[STRERR_BUFSIZE];
+
+ data.key = key;
+ err = ioctl(fd, TGL_IOCTL_UNREGISTER, &data);
+ if (err) {
+ TBM_BACKEND_ERR("error(%s) key:%d\n",
+ strerror_r(errno, buf, STRERR_BUFSIZE), key);
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline int
+_tgl_lock(int fd, unsigned int key, int opt)
+{
+ struct tgl_lock_data data;
+ enum tgl_type_data tgl_type;
+ int err;
+ char buf[STRERR_BUFSIZE];
+
+ switch (opt) {
+ case HAL_TBM_OPTION_READ:
+ tgl_type = TGL_TYPE_READ;
+ break;
+ case HAL_TBM_OPTION_WRITE:
+ tgl_type = TGL_TYPE_WRITE;
+ break;
+ default:
+ tgl_type = TGL_TYPE_NONE;
+ break;
+ }
+
+ data.key = key;
+ data.type = tgl_type;
+
+ err = ioctl(fd, TGL_IOCTL_LOCK, &data);
+ if (err) {
+ TBM_BACKEND_ERR("error(%s) key:%d opt:%d\n",
+ strerror_r(errno, buf, STRERR_BUFSIZE), key, opt);
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline int
+_tgl_unlock(int fd, unsigned int key)
+{
+ struct tgl_lock_data data;
+ int err;
+ char buf[STRERR_BUFSIZE];
+
+ data.key = key;
+ data.type = TGL_TYPE_NONE;
+
+ err = ioctl(fd, TGL_IOCTL_UNLOCK, &data);
+ if (err) {
+ TBM_BACKEND_ERR("error(%s) key:%d\n",
+ strerror_r(errno, buf, STRERR_BUFSIZE), key);
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline int
+_tgl_set_data(int fd, unsigned int key, unsigned int val)
+{
+ struct tgl_usr_data data;
+ int err;
+ char buf[STRERR_BUFSIZE];
+
+ data.key = key;
+ data.data1 = val;
+
+ err = ioctl(fd, TGL_IOCTL_SET_DATA, &data);
+ if (err) {
+ TBM_BACKEND_ERR("error(%s) key:%d\n",
+ strerror_r(errno, buf, STRERR_BUFSIZE), key);
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline unsigned int
+_tgl_get_data(int fd, unsigned int key)
+{
+ struct tgl_usr_data data = { 0, };
+ int err;
+ char buf[STRERR_BUFSIZE];
+
+ data.key = key;
+
+ err = ioctl(fd, TGL_IOCTL_GET_DATA, &data);
+ if (err) {
+ TBM_BACKEND_ERR("error(%s) key:%d\n",
+ strerror_r(errno, buf, STRERR_BUFSIZE), key);
+ return 0;
+ }
+
+ return data.data1;
+}
+
+static int
+_exynos_cache_flush(tbm_exynos_bufmgr *bufmgr_data, tbm_exynos_bo *bo_data, int flags)
+{
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, 0);
+
+ /* cache flush is managed by kernel side when using dma-fence. */
+ if (bufmgr_data->use_dma_fence)
+ return 1;
+
+ struct drm_exynos_gem_cache_op cache_op = {0, };
+ int ret;
+
+ /* if bo_data is null, do cache_flush_all */
+ if (bo_data) {
+ cache_op.flags = 0;
+ cache_op.usr_addr = (uint64_t)((uintptr_t)bo_data->pBase);
+ cache_op.size = bo_data->size;
+ } else {
+ flags = TBM_EXYNOS_CACHE_FLUSH_ALL;
+ cache_op.flags = 0;
+ cache_op.usr_addr = 0;
+ cache_op.size = 0;
+ }
+
+ if (flags & TBM_EXYNOS_CACHE_INV) {
+ if (flags & TBM_EXYNOS_CACHE_ALL)
+ cache_op.flags |= EXYNOS_DRM_CACHE_INV_ALL;
+ else
+ cache_op.flags |= EXYNOS_DRM_CACHE_INV_RANGE;
+ }
+
+ if (flags & TBM_EXYNOS_CACHE_CLN) {
+ if (flags & TBM_EXYNOS_CACHE_ALL)
+ cache_op.flags |= EXYNOS_DRM_CACHE_CLN_ALL;
+ else
+ cache_op.flags |= EXYNOS_DRM_CACHE_CLN_RANGE;
+ }
+
+ if (flags & TBM_EXYNOS_CACHE_ALL)
+ cache_op.flags |= EXYNOS_DRM_ALL_CACHES_CORES;
+
+ ret = drmCommandWriteRead(bufmgr_data->fd, DRM_EXYNOS_GEM_CACHE_OP, &cache_op,
+ sizeof(cache_op));
+ if (ret) {
+ TBM_BACKEND_ERR("fail to flush the cache.\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+static int
+_bo_init_cache_state(tbm_exynos_bufmgr *bufmgr_data, tbm_exynos_bo *bo_data, int import)
+{
+ /* check whether cache control do or not */
+ if (!g_enable_cache_ctrl)
+ return 1;
+
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, 0);
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bo_data != NULL, 0);
+
+ if (bufmgr_data->use_dma_fence)
+ return 1;
+
+ _tgl_init(bufmgr_data->tgl_fd, bo_data->name);
+
+ tbm_bo_cache_state cache_state;
+
+ if (import == 0) {
+ cache_state.data.isDirtied = DEVICE_NONE;
+ cache_state.data.isCached = 0;
+ cache_state.data.cntFlush = 0;
+
+ _tgl_set_data(bufmgr_data->tgl_fd, bo_data->name, cache_state.val);
+ }
+
+ return 1;
+}
+
+static int
+_bo_set_cache_state(tbm_exynos_bufmgr *bufmgr_data, tbm_exynos_bo *bo_data, int device, int opt)
+{
+ /* check whether cache control do or not */
+ if (!g_enable_cache_ctrl)
+ return 1;
+
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, 0);
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bo_data != NULL, 0);
+
+ if (bufmgr_data->use_dma_fence)
+ return 1;
+
+ char need_flush = 0;
+ unsigned short cntFlush = 0;
+
+ if (!(bo_data->flags_exynos & EXYNOS_BO_CACHABLE))
+ return 1;
+
+ /* get cache state of a bo_data */
+ bo_data->cache_state.val = _tgl_get_data(bufmgr_data->tgl_fd,
+ bo_data->name);
+
+ /* get global cache flush count */
+ cntFlush = (unsigned short)_tgl_get_data(bufmgr_data->tgl_fd, GLOBAL_KEY);
+
+ if (device == HAL_TBM_DEVICE_CPU) {
+ if (bo_data->cache_state.data.isDirtied == DEVICE_CO &&
+ bo_data->cache_state.data.isCached)
+ need_flush = TBM_EXYNOS_CACHE_INV;
+
+ bo_data->cache_state.data.isCached = 1;
+ if (opt & HAL_TBM_OPTION_WRITE)
+ bo_data->cache_state.data.isDirtied = DEVICE_CA;
+ else {
+ if (bo_data->cache_state.data.isDirtied != DEVICE_CA)
+ bo_data->cache_state.data.isDirtied = DEVICE_NONE;
+ }
+ } else {
+ if (bo_data->cache_state.data.isDirtied == DEVICE_CA &&
+ bo_data->cache_state.data.isCached &&
+ bo_data->cache_state.data.cntFlush == cntFlush)
+ need_flush = TBM_EXYNOS_CACHE_CLN | TBM_EXYNOS_CACHE_ALL;
+
+ if (opt & HAL_TBM_OPTION_WRITE)
+ bo_data->cache_state.data.isDirtied = DEVICE_CO;
+ else {
+ if (bo_data->cache_state.data.isDirtied != DEVICE_CO)
+ bo_data->cache_state.data.isDirtied = DEVICE_NONE;
+ }
+ }
+
+ if (need_flush) {
+ if (need_flush & TBM_EXYNOS_CACHE_ALL)
+ _tgl_set_data(bufmgr_data->tgl_fd, GLOBAL_KEY, (unsigned int)(++cntFlush));
+
+ /* call cache flush */
+ _exynos_cache_flush(bufmgr_data, bo_data, need_flush);
+
+ TBM_BACKEND_DBG(" \tcache(%d,%d)....flush:0x%x, cntFlush(%d)\n",
+ bo_data->cache_state.data.isCached,
+ bo_data->cache_state.data.isDirtied,
+ need_flush,
+ cntFlush);
+ }
+
+ return 1;
+}
+
+static int
+_bo_save_cache_state(tbm_exynos_bufmgr *bufmgr_data, tbm_exynos_bo *bo_data)
+{
+ /* check whether cache control do or not */
+ if (!g_enable_cache_ctrl)
+ return 1;
+
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, 0);
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bo_data != NULL, 0);
+
+ if (bufmgr_data->use_dma_fence)
+ return 1;
+
+ unsigned short cntFlush = 0;
+
+ /* get global cache flush count */
+ cntFlush = (unsigned short)_tgl_get_data(bufmgr_data->tgl_fd, GLOBAL_KEY);
+
+ /* save global cache flush count */
+ bo_data->cache_state.data.cntFlush = cntFlush;
+ _tgl_set_data(bufmgr_data->tgl_fd, bo_data->name,
+ bo_data->cache_state.val);
+
+ return 1;
+}
+
+static void
+_bo_destroy_cache_state(tbm_exynos_bufmgr *bufmgr_data, tbm_exynos_bo *bo_data)
+{
+ /* check whether cache control do or not */
+ if (!g_enable_cache_ctrl)
+ return;
+
+ TBM_BACKEND_RETURN_IF_FAIL(bufmgr_data != NULL);
+ TBM_BACKEND_RETURN_IF_FAIL(bo_data != NULL);
+
+ if (bufmgr_data->use_dma_fence)
+ return ;
+
+ _tgl_destroy(bufmgr_data->tgl_fd, bo_data->name);
+}
+
+static int
+_bufmgr_init_cache_state(tbm_exynos_bufmgr *bufmgr_data)
+{
+ /* check whether cache control do or not */
+ if (!g_enable_cache_ctrl)
+ return 1;
+
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, 0);
+
+ if (bufmgr_data->use_dma_fence)
+ return 1;
+
+ /* open tgl fd for saving cache flush data */
+ bufmgr_data->tgl_fd = open(tgl_devfile, O_RDWR);
+
+ if (bufmgr_data->tgl_fd < 0) {
+ bufmgr_data->tgl_fd = open(tgl_devfile1, O_RDWR);
+ if (bufmgr_data->tgl_fd < 0) {
+ TBM_BACKEND_ERR("fail to open global_lock:%s\n", tgl_devfile1);
+ return 0;
+ }
+ }
+
+ if (!_tgl_init(bufmgr_data->tgl_fd, GLOBAL_KEY)) {
+ TBM_BACKEND_ERR("fail to initialize the tgl\n");
+ close(bufmgr_data->tgl_fd);
+ return 0;
+ }
+
+ return 1;
+}
+
+static void
+_bufmgr_deinit_cache_state(tbm_exynos_bufmgr *bufmgr_data)
+{
+ /* check whether cache control do or not */
+ if (!g_enable_cache_ctrl)
+ return;
+
+ TBM_BACKEND_RETURN_IF_FAIL(bufmgr_data != NULL);
+
+ if (bufmgr_data->use_dma_fence)
+ return;
+
+ if (bufmgr_data->tgl_fd >= 0)
+ close(bufmgr_data->tgl_fd);
+}
+
+static int
+_tbm_exynos_open_drm()
+{
+ int fd = -1;
+
+ fd = drmOpen(EXYNOS_DRM_NAME, NULL);
+ if (fd < 0) {
+ TBM_BACKEND_ERR("fail to open drm.(%s)\n", EXYNOS_DRM_NAME);
+ }
+
+ if (fd < 0) {
+ struct udev *udev = NULL;
+ struct udev_enumerate *e = NULL;
+ struct udev_list_entry *entry = NULL;
+ struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
+ const char *filepath;
+ struct stat s;
+ int ret;
+
+ TBM_BACKEND_DBG("search drm-device by udev\n");
+
+ udev = udev_new();
+ if (!udev) {
+ TBM_BACKEND_ERR("udev_new() failed.\n");
+ return -1;
+ }
+
+ e = udev_enumerate_new(udev);
+ udev_enumerate_add_match_subsystem(e, "drm");
+ udev_enumerate_add_match_sysname(e, "card[0-9]*");
+ udev_enumerate_scan_devices(e);
+
+ udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
+ device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
+ udev_list_entry_get_name(entry));
+ device_parent = udev_device_get_parent(device);
+ /* Not need unref device_parent. device_parent and device have same refcnt */
+ if (device_parent) {
+ if (strcmp(udev_device_get_sysname(device_parent), "vc4-drm") == 0) {
+ drm_device = device;
+ TBM_BACKEND_DBG("Found render device: '%s' (%s)\n",
+ udev_device_get_syspath(drm_device),
+ udev_device_get_sysname(device_parent));
+ break;
+ }
+ }
+ udev_device_unref(device);
+ }
+
+ udev_enumerate_unref(e);
+
+ if (!drm_device) {
+ TBM_BACKEND_ERR("failed to find device\n");
+ udev_unref(udev);
+ return -1;
+ }
+
+ /* Get device file path. */
+ filepath = udev_device_get_devnode(drm_device);
+ if (!filepath) {
+ TBM_BACKEND_ERR("udev_device_get_devnode() failed.\n");
+ udev_device_unref(drm_device);
+ udev_unref(udev);
+ return -1;
+ }
+
+ /* Open DRM device file and check validity. */
+ fd = open(filepath, O_RDWR | O_CLOEXEC);
+ if (fd < 0) {
+ TBM_BACKEND_ERR("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
+ udev_device_unref(drm_device);
+ udev_unref(udev);
+ return -1;
+ }
+
+ ret = fstat(fd, &s);
+ if (ret) {
+ TBM_BACKEND_ERR("fstat() failed %s.\n");
+ close(fd);
+ udev_device_unref(drm_device);
+ udev_unref(udev);
+ return -1;
+ }
+
+ udev_device_unref(drm_device);
+ udev_unref(udev);
+ }
+
+ return fd;
+}
+
+#if 0
+static int
+_get_render_node(int is_master)
+{
+ struct udev *udev = NULL;
+ struct udev_enumerate *e = NULL;
+ struct udev_list_entry *entry = NULL;
+ struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
+ const char *filepath;
+ struct stat s;
+ int fd = -1;
+ int ret;
+
+ TBM_BACKEND_DBG("search drm-device by udev(is_master:%d)\n", is_master);
+
+ udev = udev_new();
+ if (!udev) {
+ TBM_BACKEND_ERR("udev_new() failed.\n");
+ return -1;
+ }
+
+ e = udev_enumerate_new(udev);
+ udev_enumerate_add_match_subsystem(e, "drm");
+ if (is_master)
+ udev_enumerate_add_match_sysname(e, "card[0-9]*");
+ else
+ udev_enumerate_add_match_sysname(e, "renderD[0-9]*");
+ udev_enumerate_scan_devices(e);
+
+ udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
+ device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
+ udev_list_entry_get_name(entry));
+ device_parent = udev_device_get_parent(device);
+ /* Not need unref device_parent. device_parent and device have same refcnt */
+ if (device_parent) {
+ if (strcmp(udev_device_get_sysname(device_parent), "exynos-drm") == 0) {
+ drm_device = device;
+ TBM_BACKEND_DBG("Found render device: '%s' (%s)\n",
+ udev_device_get_syspath(drm_device),
+ udev_device_get_sysname(device_parent));
+ break;
+ }
+ }
+ udev_device_unref(device);
+ }
+
+ udev_enumerate_unref(e);
+
+ if (!drm_device) {
+ TBM_BACKEND_ERR("failed to find device\n");
+ udev_unref(udev);
+ return -1;
+ }
+
+ /* Get device file path. */
+ filepath = udev_device_get_devnode(drm_device);
+ if (!filepath) {
+ TBM_BACKEND_ERR("udev_device_get_devnode() failed.\n");
+ udev_device_unref(drm_device);
+ udev_unref(udev);
+ return -1;
+ }
+
+ /* Open DRM device file and check validity. */
+ fd = open(filepath, O_RDWR | O_CLOEXEC);
+ if (fd < 0) {
+ TBM_BACKEND_ERR("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
+ udev_device_unref(drm_device);
+ udev_unref(udev);
+ return -1;
+ }
+
+ ret = fstat(fd, &s);
+ if (ret) {
+ TBM_BACKEND_ERR("fstat() failed %s.\n");
+ udev_device_unref(drm_device);
+ udev_unref(udev);
+ close(fd);
+ return -1;
+ }
+
+ udev_device_unref(drm_device);
+ udev_unref(udev);
+
+ return fd;
+}
+#endif
+
+static unsigned int
+_get_exynos_flag_from_tbm(unsigned int ftbm)
+{
+ unsigned int flags = 0;
+
+ if (ftbm & HAL_TBM_BO_SCANOUT)
+ flags |= EXYNOS_BO_CONTIG;
+ else
+ flags |= EXYNOS_BO_NONCONTIG;
+
+ if (ftbm & HAL_TBM_BO_WC)
+ flags |= EXYNOS_BO_WC;
+ else if (ftbm & HAL_TBM_BO_NONCACHABLE)
+ flags |= EXYNOS_BO_NONCACHABLE;
+ else
+ flags |= EXYNOS_BO_CACHABLE;
+
+ return flags;
+}
+
+static unsigned int
+_get_tbm_flag_from_exynos(unsigned int fexynos)
+{
+ unsigned int flags = 0;
+
+ if (fexynos & EXYNOS_BO_NONCONTIG)
+ flags |= HAL_TBM_BO_DEFAULT;
+ else
+ flags |= HAL_TBM_BO_SCANOUT;
+
+ if (fexynos & EXYNOS_BO_WC)
+ flags |= HAL_TBM_BO_WC;
+ else if (fexynos & EXYNOS_BO_CACHABLE)
+ flags |= HAL_TBM_BO_DEFAULT;
+ else
+ flags |= HAL_TBM_BO_NONCACHABLE;
+
+ return flags;
+}
+
+static unsigned int
+_get_name(int fd, unsigned int gem)
+{
+ struct drm_gem_flink arg = {0,};
+
+ arg.handle = gem;
+ if (drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &arg)) {
+ TBM_BACKEND_ERR("fail to DRM_IOCTL_GEM_FLINK gem:%d", gem);
+ return 0;
+ }
+
+ return (unsigned int)arg.name;
+}
+
+static hal_tbm_bo_handle
+_exynos_bo_handle(tbm_exynos_bo *bo_data, int device)
+{
+ hal_tbm_bo_handle bo_handle;
+
+ memset(&bo_handle, 0x0, sizeof(uint64_t));
+
+ switch (device) {
+ case HAL_TBM_DEVICE_DEFAULT:
+ case HAL_TBM_DEVICE_2D:
+ bo_handle.u32 = (uint32_t)bo_data->gem;
+ break;
+ case HAL_TBM_DEVICE_CPU:
+ if (!bo_data->pBase) {
+ struct drm_exynos_gem_map arg = {0,};
+ void *map = NULL;
+
+ arg.handle = bo_data->gem;
+ if (drmCommandWriteRead(bo_data->fd, DRM_EXYNOS_GEM_MAP, &arg, sizeof(arg))) {
+ TBM_BACKEND_ERR("Cannot map_exynos gem=%d\n", bo_data->gem);
+ return (hal_tbm_bo_handle) NULL;
+ }
+
+ map = mmap(NULL, bo_data->size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ bo_data->fd, arg.offset);
+ if (map == MAP_FAILED) {
+ TBM_BACKEND_ERR("Cannot usrptr gem=%d\n", bo_data->gem);
+ return (hal_tbm_bo_handle) NULL;
+ }
+ bo_data->pBase = map;
+ }
+ bo_handle.ptr = (void *)bo_data->pBase;
+ break;
+ case HAL_TBM_DEVICE_3D:
+ case HAL_TBM_DEVICE_MM:
+ if (!bo_data->dmabuf) {
+ struct drm_prime_handle arg = {0, };
+
+ arg.handle = bo_data->gem;
+ if (drmIoctl(bo_data->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
+ TBM_BACKEND_ERR("Cannot dmabuf=%d\n", bo_data->gem);
+ return (hal_tbm_bo_handle) NULL;
+ }
+ bo_data->dmabuf = arg.fd;
+ }
+
+ bo_handle.u32 = (uint32_t)bo_data->dmabuf;
+ break;
+ default:
+ TBM_BACKEND_ERR("Not supported device:%d\n", device);
+ bo_handle.ptr = (void *) NULL;
+ break;
+ }
+
+ return bo_handle;
+}
+
+static int
+_new_calc_plane_nv12(int width, int height)
+{
+ int mbX, mbY;
+
+ mbX = DIV_ROUND_UP(width, S5P_FIMV_NUM_PIXELS_IN_MB_ROW);
+ mbY = DIV_ROUND_UP(height, S5P_FIMV_NUM_PIXELS_IN_MB_COL);
+
+ if (width * height < S5P_FIMV_MAX_FRAME_SIZE)
+ mbY = (mbY + 1) / 2 * 2;
+
+ return ((mbX * S5P_FIMV_NUM_PIXELS_IN_MB_COL) * (mbY *
+ S5P_FIMV_NUM_PIXELS_IN_MB_ROW));
+}
+
+static int
+_calc_yplane_nv12(int width, int height)
+{
+ int mbX, mbY;
+
+ mbX = SIZE_ALIGN(width + 24, S5P_FIMV_NV12MT_HALIGN);
+ mbY = SIZE_ALIGN(height + 16, S5P_FIMV_NV12MT_VALIGN);
+
+ return SIZE_ALIGN(mbX * mbY, S5P_FIMV_DEC_BUF_ALIGN);
+}
+
+static int
+_calc_uvplane_nv12(int width, int height)
+{
+ int mbX, mbY;
+
+ mbX = SIZE_ALIGN(width + 16, S5P_FIMV_NV12MT_HALIGN);
+ mbY = SIZE_ALIGN(height + 4, S5P_FIMV_NV12MT_VALIGN);
+
+ return SIZE_ALIGN((mbX * mbY) >> 1, S5P_FIMV_DEC_BUF_ALIGN);
+}
+
+static int
+_new_calc_yplane_nv12(int width, int height)
+{
+ return SIZE_ALIGN(_new_calc_plane_nv12(width,
+ height) + S5P_FIMV_D_ALIGN_PLANE_SIZE,
+ TBM_SURFACE_ALIGNMENT_PLANE_NV12);
+}
+
+static int
+_new_calc_uvplane_nv12(int width, int height)
+{
+ return SIZE_ALIGN((_new_calc_plane_nv12(width,
+ height) >> 1) + S5P_FIMV_D_ALIGN_PLANE_SIZE,
+ TBM_SURFACE_ALIGNMENT_PLANE_NV12);
+}
+
+static hal_tbm_bufmgr_capability
+tbm_exynos_bufmgr_get_capabilities(hal_tbm_bufmgr *bufmgr, hal_tbm_error *error)
+{
+ hal_tbm_bufmgr_capability capabilities = HAL_TBM_BUFMGR_CAPABILITY_NONE;
+
+ capabilities = HAL_TBM_BUFMGR_CAPABILITY_SHARE_KEY | HAL_TBM_BUFMGR_CAPABILITY_SHARE_FD;
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return capabilities;
+}
+
+static hal_tbm_error
+tbm_exynos_bufmgr_get_supported_formats(hal_tbm_bufmgr *bufmgr,
+ uint32_t **formats, uint32_t *num)
+{
+ const static uint32_t tbm_exynos_color_format_list[TBM_COLOR_FORMAT_COUNT] = {
+ HAL_TBM_FORMAT_ARGB8888,
+ HAL_TBM_FORMAT_XRGB8888,
+ HAL_TBM_FORMAT_NV12,
+ HAL_TBM_FORMAT_YUV420
+ };
+
+ tbm_exynos_bufmgr *bufmgr_data = (tbm_exynos_bufmgr *)bufmgr;
+ uint32_t *color_formats;
+
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, HAL_TBM_ERROR_INVALID_PARAMETER);
+
+ color_formats = (uint32_t *)calloc(1, sizeof(uint32_t) * TBM_COLOR_FORMAT_COUNT);
+ if (color_formats == NULL)
+ return HAL_TBM_ERROR_OUT_OF_MEMORY;
+
+ memcpy(color_formats, tbm_exynos_color_format_list, sizeof(uint32_t)*TBM_COLOR_FORMAT_COUNT);
+
+ *formats = color_formats;
+ *num = TBM_COLOR_FORMAT_COUNT;
+
+ TBM_BACKEND_DBG("supported format count = %d\n", *num);
+
+ return HAL_TBM_ERROR_NONE;
+}
+
+static hal_tbm_error
+tbm_exynos_bufmgr_get_plane_data(hal_tbm_bufmgr *bufmgr,
+ hal_tbm_format format, int plane_idx, int width,
+ int height, uint32_t *size, uint32_t *offset,
+ uint32_t *pitch, int *bo_idx)
+{
+ tbm_exynos_bufmgr *bufmgr_data = (tbm_exynos_bufmgr *)bufmgr;
+ int bpp;
+ int _offset = 0;
+ int _pitch = 0;
+ int _size = 0;
+ int _bo_idx = 0;
+
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, HAL_TBM_ERROR_INVALID_PARAMETER);
+
+ switch (format) {
+ /* 16 bpp RGB */
+ case HAL_TBM_FORMAT_XRGB4444:
+ case HAL_TBM_FORMAT_XBGR4444:
+ case HAL_TBM_FORMAT_RGBX4444:
+ case HAL_TBM_FORMAT_BGRX4444:
+ case HAL_TBM_FORMAT_ARGB4444:
+ case HAL_TBM_FORMAT_ABGR4444:
+ case HAL_TBM_FORMAT_RGBA4444:
+ case HAL_TBM_FORMAT_BGRA4444:
+ case HAL_TBM_FORMAT_XRGB1555:
+ case HAL_TBM_FORMAT_XBGR1555:
+ case HAL_TBM_FORMAT_RGBX5551:
+ case HAL_TBM_FORMAT_BGRX5551:
+ case HAL_TBM_FORMAT_ARGB1555:
+ case HAL_TBM_FORMAT_ABGR1555:
+ case HAL_TBM_FORMAT_RGBA5551:
+ case HAL_TBM_FORMAT_BGRA5551:
+ case HAL_TBM_FORMAT_RGB565:
+ bpp = 16;
+ _offset = 0;
+ _pitch = SIZE_ALIGN((width * bpp) >> 3, g_tbm_surface_alignment_pitch_rgb);
+ _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
+ _bo_idx = 0;
+ break;
+ /* 24 bpp RGB */
+ case HAL_TBM_FORMAT_RGB888:
+ case HAL_TBM_FORMAT_BGR888:
+ bpp = 24;
+ _offset = 0;
+ _pitch = SIZE_ALIGN((width * bpp) >> 3, g_tbm_surface_alignment_pitch_rgb);
+ _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
+ _bo_idx = 0;
+ break;
+ /* 32 bpp RGB */
+ case HAL_TBM_FORMAT_XRGB8888:
+ case HAL_TBM_FORMAT_XBGR8888:
+ case HAL_TBM_FORMAT_RGBX8888:
+ case HAL_TBM_FORMAT_BGRX8888:
+ case HAL_TBM_FORMAT_ARGB8888:
+ case HAL_TBM_FORMAT_ABGR8888:
+ case HAL_TBM_FORMAT_RGBA8888:
+ case HAL_TBM_FORMAT_BGRA8888:
+ bpp = 32;
+ _offset = 0;
+ _pitch = SIZE_ALIGN((width * bpp) >> 3, g_tbm_surface_alignment_pitch_rgb);
+ _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
+ _bo_idx = 0;
+ break;
+
+ /* packed YCbCr */
+ case HAL_TBM_FORMAT_YUYV:
+ case HAL_TBM_FORMAT_YVYU:
+ case HAL_TBM_FORMAT_UYVY:
+ case HAL_TBM_FORMAT_VYUY:
+ case HAL_TBM_FORMAT_AYUV:
+ bpp = 32;
+ _offset = 0;
+ _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
+ _bo_idx = 0;
+ break;
+
+ /*
+ * 2 plane YCbCr
+ * index 0 = Y plane, [7:0] Y
+ * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
+ * or
+ * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
+ */
+ case HAL_TBM_FORMAT_NV12:
+ case HAL_TBM_FORMAT_NV21:
+ bpp = 12;
+ if (plane_idx == 0) {
+ _offset = 0;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _size = MAX(_calc_yplane_nv12(width, height),
+ _new_calc_yplane_nv12(width, height));
+ _bo_idx = 0;
+ } else if (plane_idx == 1) {
+ _offset = 0;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _size = MAX(_calc_uvplane_nv12(width, height),
+ _new_calc_uvplane_nv12(width, height));
+ _bo_idx = 1;
+ }
+ break;
+ case HAL_TBM_FORMAT_NV16:
+ case HAL_TBM_FORMAT_NV61:
+ bpp = 16;
+ /*if(plane_idx == 0)*/
+ {
+ _offset = 0;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
+ _bo_idx = 0;
+ if (plane_idx == 0)
+ break;
+ }
+ /*else if( plane_idx ==1 )*/
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
+ _bo_idx = 0;
+ }
+ break;
+
+ /*
+ * 3 plane YCbCr
+ * index 0: Y plane, [7:0] Y
+ * index 1: Cb plane, [7:0] Cb
+ * index 2: Cr plane, [7:0] Cr
+ * or
+ * index 1: Cr plane, [7:0] Cr
+ * index 2: Cb plane, [7:0] Cb
+ */
+
+ /*
+ * NATIVE_BUFFER_FORMAT_YV12
+ * NATIVE_BUFFER_FORMAT_I420
+ */
+ case HAL_TBM_FORMAT_YUV410:
+ case HAL_TBM_FORMAT_YVU410:
+ bpp = 9;
+ /*if(plane_idx == 0)*/
+ {
+ _offset = 0;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
+ _bo_idx = 0;
+ if (plane_idx == 0)
+ break;
+ }
+ /*else if(plane_idx == 1)*/
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4);
+ _size = SIZE_ALIGN(_pitch * (height / 4), g_tbm_surface_alignment_plane);
+ _bo_idx = 0;
+ if (plane_idx == 1)
+ break;
+ }
+ /*else if (plane_idx == 2)*/
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4);
+ _size = SIZE_ALIGN(_pitch * (height / 4), g_tbm_surface_alignment_plane);
+ _bo_idx = 0;
+ }
+ break;
+ case HAL_TBM_FORMAT_YUV411:
+ case HAL_TBM_FORMAT_YVU411:
+ case HAL_TBM_FORMAT_YUV420:
+ case HAL_TBM_FORMAT_YVU420:
+ bpp = 12;
+ /*if(plane_idx == 0)*/
+ {
+ _offset = 0;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
+ _bo_idx = 0;
+ if (plane_idx == 0)
+ break;
+ }
+ /*else if(plane_idx == 1)*/
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
+ _size = SIZE_ALIGN(_pitch * (height / 2), g_tbm_surface_alignment_plane);
+ _bo_idx = 0;
+ if (plane_idx == 1)
+ break;
+ }
+ /*else if (plane_idx == 2)*/
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
+ _size = SIZE_ALIGN(_pitch * (height / 2), g_tbm_surface_alignment_plane);
+ _bo_idx = 0;
+ }
+ break;
+ case HAL_TBM_FORMAT_YUV422:
+ case HAL_TBM_FORMAT_YVU422:
+ bpp = 16;
+ /*if(plane_idx == 0)*/
+ {
+ _offset = 0;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
+ _bo_idx = 0;
+ if (plane_idx == 0)
+ break;
+ }
+ /*else if(plane_idx == 1)*/
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
+ _size = SIZE_ALIGN(_pitch * (height), g_tbm_surface_alignment_plane);
+ _bo_idx = 0;
+ if (plane_idx == 1)
+ break;
+ }
+ /*else if (plane_idx == 2)*/
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
+ _size = SIZE_ALIGN(_pitch * (height), g_tbm_surface_alignment_plane);
+ _bo_idx = 0;
+ }
+ break;
+ case HAL_TBM_FORMAT_YUV444:
+ case HAL_TBM_FORMAT_YVU444:
+ bpp = 24;
+ /*if(plane_idx == 0)*/
+ {
+ _offset = 0;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
+ _bo_idx = 0;
+ if (plane_idx == 0)
+ break;
+ }
+ /*else if(plane_idx == 1)*/
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
+ _bo_idx = 0;
+ if (plane_idx == 1)
+ break;
+ }
+ /*else if (plane_idx == 2)*/
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
+ _bo_idx = 0;
+ }
+ break;
+ default:
+ bpp = 0;
+ break;
+ }
+
+ *size = _size;
+ *offset = _offset;
+ *pitch = _pitch;
+ *bo_idx = _bo_idx;
+
+ return HAL_TBM_ERROR_NONE;
+}
+
+static hal_tbm_bo *
+tbm_exynos_bufmgr_alloc_bo(hal_tbm_bufmgr *bufmgr, unsigned int size,
+ hal_tbm_bo_memory_type flags, hal_tbm_error *error)
+{
+ tbm_exynos_bufmgr *bufmgr_data = (tbm_exynos_bufmgr *)bufmgr;
+ tbm_exynos_bo *bo_data;
+ unsigned int exynos_flags;
+
+ if (bufmgr_data == NULL) {
+ TBM_BACKEND_ERR("bufmgr_data is null\n");
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return NULL;
+ }
+
+ bo_data = calloc(1, sizeof(struct _tbm_exynos_bo));
+ if (!bo_data) {
+ TBM_BACKEND_ERR("fail to allocate the bo_data private\n");
+ if (error)
+ *error = HAL_TBM_ERROR_OUT_OF_MEMORY;
+ return NULL;
+ }
+ bo_data->bufmgr_data = bufmgr_data;
+
+ exynos_flags = _get_exynos_flag_from_tbm(flags);
+ if ((flags & HAL_TBM_BO_SCANOUT) &&
+ size <= 4 * 1024) {
+ exynos_flags |= EXYNOS_BO_NONCONTIG;
+ }
+
+ struct drm_exynos_gem_create arg = {0, };
+
+ arg.size = (uint64_t)size;
+ arg.flags = exynos_flags;
+ if (drmCommandWriteRead(bufmgr_data->fd, DRM_EXYNOS_GEM_CREATE, &arg,
+ sizeof(arg))) {
+ TBM_BACKEND_ERR("Cannot create bo_data(flag:%x, size:%d)\n", arg.flags,
+ (unsigned int)arg.size);
+ free(bo_data);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+
+ bo_data->fd = bufmgr_data->fd;
+ bo_data->gem = arg.handle;
+ bo_data->size = size;
+ bo_data->flags_tbm = flags;
+ bo_data->flags_exynos = exynos_flags;
+ bo_data->name = _get_name(bo_data->fd, bo_data->gem);
+
+ if (!_bo_init_cache_state(bufmgr_data, bo_data, 0)) {
+ TBM_BACKEND_ERR("fail init cache state(%d)\n", bo_data->name);
+ free(bo_data);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+
+ pthread_mutex_init(&bo_data->mutex, NULL);
+
+ if (bufmgr_data->use_dma_fence && !bo_data->dmabuf) {
+ struct drm_prime_handle arg = {0, };
+
+ arg.handle = bo_data->gem;
+ if (drmIoctl(bo_data->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
+ TBM_BACKEND_ERR("Cannot dmabuf=%d\n", bo_data->gem);
+ free(bo_data);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+ bo_data->dmabuf = arg.fd;
+ }
+
+ /* add bo_data to hash */
+ if (drmHashInsert(bufmgr_data->hashBos, bo_data->name, (void *)bo_data) < 0)
+ TBM_BACKEND_ERR("Cannot insert bo_data to Hash(%d)\n", bo_data->name);
+
+ TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), flags:%d(%d), size:%d\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ flags, exynos_flags,
+ bo_data->size);
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return (hal_tbm_bo *)bo_data;
+}
+
+static hal_tbm_bo *
+tbm_exynos_bufmgr_import_fd(hal_tbm_bufmgr *bufmgr, hal_tbm_fd key, hal_tbm_error *error)
+{
+ tbm_exynos_bufmgr *bufmgr_data = (tbm_exynos_bufmgr *)bufmgr;
+ tbm_exynos_bo *bo_data;
+ unsigned int gem = 0;
+ unsigned int name;
+ int ret;
+ char buf[STRERR_BUFSIZE];
+
+ if (bufmgr_data == NULL) {
+ TBM_BACKEND_ERR("bufmgr_data is null\n");
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return NULL;
+ }
+
+ /*getting handle from fd*/
+ struct drm_prime_handle arg = {0, };
+
+ arg.fd = key;
+ arg.flags = 0;
+ if (drmIoctl(bufmgr_data->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &arg)) {
+ TBM_BACKEND_ERR("Cannot get gem handle from fd:%d (%s)\n",
+ arg.fd, strerror_r(errno, buf, STRERR_BUFSIZE));
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+ gem = arg.handle;
+
+ name = _get_name(bufmgr_data->fd, gem);
+ if (!name) {
+ TBM_BACKEND_ERR("Cannot get name from gem:%d, fd:%d (%s)\n",
+ gem, key, strerror_r(errno, buf, STRERR_BUFSIZE));
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+
+ ret = drmHashLookup(bufmgr_data->hashBos, name, (void **)&bo_data);
+ if (ret == 0) {
+ if (gem == bo_data->gem) {
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+ return bo_data;
+ }
+ }
+
+ /* Determine size of bo_data. The fd-to-handle ioctl really should
+ * return the size, but it doesn't. If we have kernel 3.12 or
+ * later, we can lseek on the prime fd to get the size. Older
+ * kernels will just fail, in which case we fall back to the
+ * provided (estimated or guess size).
+ */
+ unsigned int real_size = -1;
+ struct drm_exynos_gem_info info = {0, };
+
+ real_size = lseek(key, 0, SEEK_END);
+
+ info.handle = gem;
+ if (drmCommandWriteRead(bufmgr_data->fd,
+ DRM_EXYNOS_GEM_GET,
+ &info,
+ sizeof(struct drm_exynos_gem_info))) {
+ TBM_BACKEND_ERR("Cannot get gem info from gem:%d, fd:%d (%s)\n",
+ gem, key, strerror_r(errno, buf, STRERR_BUFSIZE));
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+
+ if (real_size == -1)
+ real_size = info.size;
+
+ bo_data = calloc(1, sizeof(struct _tbm_exynos_bo));
+ if (!bo_data) {
+ TBM_BACKEND_ERR("bo_data:%p fail to allocate the bo_data\n", bo_data);
+ if (error)
+ *error = HAL_TBM_ERROR_OUT_OF_MEMORY;
+ return NULL;
+ }
+ bo_data->bufmgr_data = bufmgr_data;
+
+ bo_data->fd = bufmgr_data->fd;
+ bo_data->gem = gem;
+ bo_data->size = real_size;
+ bo_data->flags_exynos = info.flags;
+ bo_data->flags_tbm = _get_tbm_flag_from_exynos(bo_data->flags_exynos);
+ bo_data->name = name;
+
+ if (!_bo_init_cache_state(bufmgr_data, bo_data, 1)) {
+ TBM_BACKEND_ERR("fail init cache state(%d)\n", bo_data->name);
+ free(bo_data);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+
+ /* add bo_data to hash */
+ if (drmHashInsert(bufmgr_data->hashBos, bo_data->name, (void *)bo_data) < 0)
+ TBM_BACKEND_ERR("bo_data:%p Cannot insert bo_data to Hash(%d) from gem:%d, fd:%d\n",
+ bo_data, bo_data->name, gem, key);
+
+ TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d(%d), size:%d\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf,
+ key,
+ bo_data->flags_tbm, bo_data->flags_exynos,
+ bo_data->size);
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return (hal_tbm_bo *)bo_data;
+}
+
+static hal_tbm_bo *
+tbm_exynos_bufmgr_import_key(hal_tbm_bufmgr *bufmgr, hal_tbm_key key, hal_tbm_error *error)
+{
+ tbm_exynos_bufmgr *bufmgr_data = (tbm_exynos_bufmgr *)bufmgr;
+ tbm_exynos_bo *bo_data;
+ int ret;
+
+ if (bufmgr_data == NULL) {
+ TBM_BACKEND_ERR("bufmgr_data is null\n");
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return NULL;
+ }
+
+ ret = drmHashLookup(bufmgr_data->hashBos, key, (void **)&bo_data);
+ if (ret == 0) {
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+ return (hal_tbm_bo *)bo_data;
+ }
+
+ struct drm_gem_open arg = {0, };
+ struct drm_exynos_gem_info info = {0, };
+
+ arg.name = key;
+ if (drmIoctl(bufmgr_data->fd, DRM_IOCTL_GEM_OPEN, &arg)) {
+ TBM_BACKEND_ERR("Cannot open gem name=%d\n", key);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+
+ info.handle = arg.handle;
+ if (drmCommandWriteRead(bufmgr_data->fd,
+ DRM_EXYNOS_GEM_GET,
+ &info,
+ sizeof(struct drm_exynos_gem_info))) {
+ TBM_BACKEND_ERR("Cannot get gem info=%d\n", key);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+
+ bo_data = calloc(1, sizeof(struct _tbm_exynos_bo));
+ if (!bo_data) {
+ TBM_BACKEND_ERR("fail to allocate the bo_data private\n");
+ if (error)
+ *error = HAL_TBM_ERROR_OUT_OF_MEMORY;
+ return NULL;
+ }
+ bo_data->bufmgr_data = bufmgr_data;
+
+ bo_data->fd = bufmgr_data->fd;
+ bo_data->gem = arg.handle;
+ bo_data->size = arg.size;
+ bo_data->flags_exynos = info.flags;
+ bo_data->name = key;
+ bo_data->flags_tbm = _get_tbm_flag_from_exynos(bo_data->flags_exynos);
+
+ if (!_bo_init_cache_state(bufmgr_data, bo_data, 1)) {
+ TBM_BACKEND_ERR("fail init cache state(%d)\n", bo_data->name);
+ free(bo_data);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+
+ if (!bo_data->dmabuf) {
+ struct drm_prime_handle arg = {0, };
+
+ arg.handle = bo_data->gem;
+ if (drmIoctl(bo_data->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
+ TBM_BACKEND_ERR("fail to DRM_IOCTL_PRIME_HANDLE_TO_FD gem=%d\n", bo_data->gem);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ free(bo_data);
+ return NULL;
+ }
+ bo_data->dmabuf = arg.fd;
+ }
+
+ /* add bo_data to hash */
+ if (drmHashInsert(bufmgr_data->hashBos, bo_data->name, (void *)bo_data) < 0)
+ TBM_BACKEND_ERR("Cannot insert bo_data to Hash(%d)\n", bo_data->name);
+
+ TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf,
+ bo_data->flags_tbm, bo_data->flags_exynos,
+ bo_data->size);
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return (hal_tbm_bo *)bo_data;
+}
+
+static void
+tbm_exynos_bo_free(hal_tbm_bo *bo)
+{
+ tbm_exynos_bo *bo_data = (tbm_exynos_bo *)bo;
+ tbm_exynos_bo *temp;
+ tbm_exynos_bufmgr *bufmgr_data;
+ char buf[STRERR_BUFSIZE];
+ int ret;
+
+ if (!bo_data)
+ return;
+
+ bufmgr_data = bo_data->bufmgr_data;
+ if (!bufmgr_data)
+ return;
+
+ TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, size:%d\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf,
+ bo_data->size);
+
+ if (bo_data->pBase) {
+ if (munmap(bo_data->pBase, bo_data->size) == -1) {
+ TBM_BACKEND_ERR("bo_data:%p fail to munmap(%s)\n",
+ bo_data, strerror_r(errno, buf, STRERR_BUFSIZE));
+ }
+ }
+
+ /* close dmabuf */
+ if (bo_data->dmabuf) {
+ close(bo_data->dmabuf);
+ bo_data->dmabuf = 0;
+ }
+
+ /* delete bo_data from hash */
+ ret = drmHashLookup(bufmgr_data->hashBos, bo_data->name, (void **)&temp);
+ if (ret == 0)
+ drmHashDelete(bufmgr_data->hashBos, bo_data->name);
+ else
+ TBM_BACKEND_ERR("Cannot find bo_data to Hash(%d), ret=%d\n", bo_data->name, ret);
+
+ if (temp != bo_data)
+ TBM_BACKEND_ERR("hashBos probably has several BOs with same name!!!\n");
+
+ _bo_destroy_cache_state(bufmgr_data, bo_data);
+
+ /* Free gem handle */
+ struct drm_gem_close arg = {0, };
+
+ memset(&arg, 0, sizeof(arg));
+ arg.handle = bo_data->gem;
+ if (drmIoctl(bo_data->fd, DRM_IOCTL_GEM_CLOSE, &arg))
+ TBM_BACKEND_ERR("bo_data:%p fail to gem close.(%s)\n",
+ bo_data, strerror_r(errno, buf, STRERR_BUFSIZE));
+
+ free(bo_data);
+}
+
+static int
+tbm_exynos_bo_get_size(hal_tbm_bo *bo, hal_tbm_error *error)
+{
+ tbm_exynos_bo *bo_data = (tbm_exynos_bo *)bo;
+
+ if (!bo_data) {
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return 0;
+ }
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return bo_data->size;
+}
+
+static hal_tbm_bo_memory_type
+tbm_exynos_bo_get_memory_type(hal_tbm_bo *bo, hal_tbm_error *error)
+{
+ tbm_exynos_bo *bo_data = (tbm_exynos_bo *)bo;
+
+ if (!bo_data) {
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return HAL_TBM_BO_DEFAULT;
+ }
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return bo_data->flags_tbm;
+}
+
+static hal_tbm_bo_handle
+tbm_exynos_bo_get_handle(hal_tbm_bo *bo, hal_tbm_bo_device_type device, hal_tbm_error *error)
+{
+ tbm_exynos_bo *bo_data = (tbm_exynos_bo *)bo;
+ hal_tbm_bo_handle bo_handle;
+
+ if (!bo_data) {
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return (hal_tbm_bo_handle) NULL;
+ }
+
+ if (!bo_data->gem) {
+ TBM_BACKEND_ERR("Cannot map gem=%d\n", bo_data->gem);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return (hal_tbm_bo_handle) NULL;
+ }
+
+ TBM_BACKEND_DBG("bo_data:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d, %s\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf,
+ bo_data->flags_tbm, bo_data->flags_exynos,
+ bo_data->size,
+ STR_DEVICE[device]);
+
+ /*Get mapped bo_handle*/
+ bo_handle = _exynos_bo_handle(bo_data, device);
+ if (bo_handle.ptr == NULL) {
+ TBM_BACKEND_ERR("Cannot get handle: gem:%d, device:%d\n",
+ bo_data->gem, device);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return (hal_tbm_bo_handle) NULL;
+ }
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return bo_handle;
+}
+
+static hal_tbm_bo_handle
+tbm_exynos_bo_map(hal_tbm_bo *bo, hal_tbm_bo_device_type device,
+ hal_tbm_bo_access_option opt, hal_tbm_error *error)
+{
+ tbm_exynos_bo *bo_data = (tbm_exynos_bo *)bo;
+ hal_tbm_bo_handle bo_handle;
+ tbm_exynos_bufmgr *bufmgr_data;
+
+ if (!bo_data) {
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return (hal_tbm_bo_handle) NULL;
+ }
+
+ bufmgr_data = bo_data->bufmgr_data;
+ if (!bufmgr_data) {
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return (hal_tbm_bo_handle) NULL;
+ }
+
+ if (!bo_data->gem) {
+ TBM_BACKEND_ERR("Cannot map gem=%d\n", bo_data->gem);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return (hal_tbm_bo_handle) NULL;
+ }
+
+ TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, %s, %s\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf,
+ STR_DEVICE[device],
+ STR_OPT[opt]);
+
+ /*Get mapped bo_handle*/
+ bo_handle = _exynos_bo_handle(bo_data, device);
+ if (bo_handle.ptr == NULL) {
+ TBM_BACKEND_ERR("Cannot get handle: gem:%d, device:%d, opt:%d\n",
+ bo_data->gem, device, opt);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return (hal_tbm_bo_handle) NULL;
+ }
+
+ if (bo_data->map_cnt == 0)
+ _bo_set_cache_state(bufmgr_data, bo_data, device, opt);
+
+ bo_data->last_map_device = device;
+
+ bo_data->map_cnt++;
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return bo_handle;
+}
+
+static hal_tbm_error
+tbm_exynos_bo_unmap(hal_tbm_bo *bo)
+{
+ tbm_exynos_bo *bo_data = (tbm_exynos_bo *)bo;
+ tbm_exynos_bufmgr *bufmgr_data;
+
+ if (!bo_data)
+ return HAL_TBM_ERROR_INVALID_PARAMETER;
+
+ bufmgr_data = bo_data->bufmgr_data;
+ if (!bufmgr_data)
+ return HAL_TBM_ERROR_INVALID_PARAMETER;
+
+ if (!bo_data->gem)
+ return HAL_TBM_ERROR_INVALID_PARAMETER;
+
+ bo_data->map_cnt--;
+
+ if (bo_data->map_cnt == 0)
+ _bo_save_cache_state(bufmgr_data, bo_data);
+
+ /* check whether cache control do or not */
+ if (g_enable_cache_ctrl && bo_data->last_map_device == HAL_TBM_DEVICE_CPU)
+ _exynos_cache_flush(bufmgr_data, bo_data, TBM_EXYNOS_CACHE_FLUSH_ALL);
+
+ bo_data->last_map_device = -1;
+
+ TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf);
+
+ return HAL_TBM_ERROR_NONE;
+}
+
+static hal_tbm_error
+tbm_exynos_bo_lock(hal_tbm_bo *bo, hal_tbm_bo_device_type device,
+ hal_tbm_bo_access_option opt)
+{
+#ifndef ALWAYS_BACKEND_CTRL
+ tbm_exynos_bo *bo_data = (tbm_exynos_bo *)bo;
+ tbm_exynos_bufmgr *bufmgr_data;
+ struct dma_buf_fence fence;
+ struct flock filelock;
+ int ret = 0;
+ char buf[STRERR_BUFSIZE];
+
+ if (!bo_data)
+ return HAL_TBM_ERROR_INVALID_PARAMETER;
+
+ bufmgr_data = bo_data->bufmgr_data;
+ if (!bufmgr_data)
+ return HAL_TBM_ERROR_INVALID_PARAMETER;
+
+ if (device != TBM_DEVICE_3D && device != HAL_TBM_DEVICE_CPU) {
+ TBM_BACKEND_DBG("Not support device type,\n");
+ return HAL_TBM_ERROR_INVALID_OPERATION;
+ }
+
+ memset(&fence, 0, sizeof(struct dma_buf_fence));
+
+ /* Check if the given type is valid or not. */
+ if (opt & HAL_TBM_OPTION_WRITE) {
+ if (device == TBM_DEVICE_3D)
+ fence.type = DMA_BUF_ACCESS_WRITE | DMA_BUF_ACCESS_DMA;
+ } else if (opt & HAL_TBM_OPTION_READ) {
+ if (device == TBM_DEVICE_3D)
+ fence.type = DMA_BUF_ACCESS_READ | DMA_BUF_ACCESS_DMA;
+ } else {
+ TBM_BACKEND_ERR("Invalid argument\n");
+ return HAL_TBM_ERROR_INVALID_PARAMETER;
+ }
+
+ /* Check if the tbm manager supports dma fence or not. */
+ if (!bufmgr_data->use_dma_fence) {
+ TBM_BACKEND_ERR("Not support DMA FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
+ return HAL_TBM_ERROR_INVALID_OPERATION;
+
+ }
+
+ if (device == TBM_DEVICE_3D) {
+ ret = ioctl(bo_data->dmabuf, DMABUF_IOCTL_GET_FENCE, &fence);
+ if (ret < 0) {
+ TBM_BACKEND_ERR("Cannot set GET FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
+ return HAL_TBM_ERROR_INVALID_OPERATION;
+ }
+ } else {
+ if (opt & HAL_TBM_OPTION_WRITE)
+ filelock.l_type = F_WRLCK;
+ else
+ filelock.l_type = F_RDLCK;
+
+ filelock.l_whence = SEEK_CUR;
+ filelock.l_start = 0;
+ filelock.l_len = 0;
+
+ if (-1 == fcntl(bo_data->dmabuf, F_SETLKW, &filelock))
+ return HAL_TBM_ERROR_INVALID_OPERATION;
+ }
+
+ pthread_mutex_lock(&bo_data->mutex);
+
+ if (device == TBM_DEVICE_3D) {
+ int i;
+
+ for (i = 0; i < DMA_FENCE_LIST_MAX; i++) {
+ if (bo_data->dma_fence[i].ctx == 0) {
+ bo_data->dma_fence[i].type = fence.type;
+ bo_data->dma_fence[i].ctx = fence.ctx;
+ break;
+ }
+ }
+
+ if (i == DMA_FENCE_LIST_MAX) {
+ /*TODO: if dma_fence list is full, it needs realloc. I will fix this. by minseok3.kim*/
+ TBM_BACKEND_ERR("fence list is full\n");
+ }
+ }
+
+ pthread_mutex_unlock(&bo_data->mutex);
+
+ TBM_BACKEND_DBG("DMABUF_IOCTL_GET_FENCE! bo_data:%p, gem:%d(%d), fd:%ds\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf);
+#endif /* ALWAYS_BACKEND_CTRL */
+
+ return HAL_TBM_ERROR_NONE;
+}
+
+static hal_tbm_error
+tbm_exynos_bo_unlock(hal_tbm_bo *bo)
+{
+#ifndef ALWAYS_BACKEND_CTRL
+ tbm_exynos_bo *bo_data = (tbm_exynos_bo *)bo;
+ tbm_exynos_bufmgr *bufmgr_data = NULL;
+ struct dma_buf_fence fence;
+ struct flock filelock;
+ unsigned int dma_type = 0;
+ int ret = 0;
+ char buf[STRERR_BUFSIZE];
+
+ bufmgr_data = bo_data->bufmgr_data;
+ if (!bufmgr_data)
+ return HAL_TBM_ERROR_INVALID_PARAMETER;
+
+ if (bo_data->dma_fence[0].type & DMA_BUF_ACCESS_DMA)
+ dma_type = 1;
+
+ if (!bo_data->dma_fence[0].ctx && dma_type) {
+ TBM_BACKEND_DBG("FENCE not support or ignored,\n");
+ return HAL_TBM_ERROR_INVALID_OPERATION;
+ }
+
+ if (!bo_data->dma_fence[0].ctx && dma_type) {
+ TBM_BACKEND_DBG("device type is not 3D/CPU,\n");
+ return HAL_TBM_ERROR_INVALID_OPERATION;
+ }
+
+ pthread_mutex_lock(&bo_data->mutex);
+
+ if (dma_type) {
+ fence.type = bo_data->dma_fence[0].type;
+ fence.ctx = bo_data->dma_fence[0].ctx;
+ int i;
+
+ for (i = 1; i < DMA_FENCE_LIST_MAX; i++) {
+ bo_data->dma_fence[i - 1].type = bo_data->dma_fence[i].type;
+ bo_data->dma_fence[i - 1].ctx = bo_data->dma_fence[i].ctx;
+ }
+ bo_data->dma_fence[DMA_FENCE_LIST_MAX - 1].type = 0;
+ bo_data->dma_fence[DMA_FENCE_LIST_MAX - 1].ctx = 0;
+ }
+ pthread_mutex_unlock(&bo_data->mutex);
+
+ if (dma_type) {
+ ret = ioctl(bo_data->dmabuf, DMABUF_IOCTL_PUT_FENCE, &fence);
+ if (ret < 0) {
+ TBM_BACKEND_ERR("Can not set PUT FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
+ return HAL_TBM_ERROR_INVALID_OPERATION;
+ }
+ } else {
+ filelock.l_type = F_UNLCK;
+ filelock.l_whence = SEEK_CUR;
+ filelock.l_start = 0;
+ filelock.l_len = 0;
+
+ if (-1 == fcntl(bo_data->dmabuf, F_SETLKW, &filelock))
+ return HAL_TBM_ERROR_INVALID_OPERATION;
+ }
+
+ TBM_BACKEND_DBG("DMABUF_IOCTL_PUT_FENCE! bo_data:%p, gem:%d(%d), fd:%ds\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf);
+#endif /* ALWAYS_BACKEND_CTRL */
+
+ return HAL_TBM_ERROR_NONE;
+}
+static hal_tbm_fd
+tbm_exynos_bo_export_fd(hal_tbm_bo *bo, hal_tbm_error *error)
+{
+ tbm_exynos_bo *bo_data = (tbm_exynos_bo *)bo;
+ int ret;
+ char buf[STRERR_BUFSIZE];
+
+ if (!bo_data) {
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return -1;
+ }
+
+ struct drm_prime_handle arg = {0, };
+
+ arg.handle = bo_data->gem;
+ ret = drmIoctl(bo_data->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg);
+ if (ret) {
+ TBM_BACKEND_ERR("bo_data:%p Cannot dmabuf=%d (%s)\n",
+ bo_data, bo_data->gem, strerror_r(errno, buf, STRERR_BUFSIZE));
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return (hal_tbm_fd) ret;
+ }
+
+ TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d(%d), size:%d\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf,
+ arg.fd,
+ bo_data->flags_tbm, bo_data->flags_exynos,
+ bo_data->size);
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return (hal_tbm_fd)arg.fd;
+}
+
+static hal_tbm_key
+tbm_exynos_bo_export_key(hal_tbm_bo *bo, hal_tbm_error *error)
+{
+ tbm_exynos_bo *bo_data = (tbm_exynos_bo *)bo;
+
+ if (!bo_data) {
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return 0;
+ }
+
+ if (!bo_data->name) {
+ bo_data->name = _get_name(bo_data->fd, bo_data->gem);
+ if (!bo_data->name) {
+ TBM_BACKEND_ERR("error Cannot get name\n");
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return 0;
+ }
+ }
+
+ TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf,
+ bo_data->flags_tbm, bo_data->flags_exynos,
+ bo_data->size);
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return (hal_tbm_key)bo_data->name;
+}
+
+static hal_tbm_error
+_tbm_exynos_authenticated_drm_fd_handler(hal_tbm_fd auth_fd, void *user_data)
+{
+ tbm_exynos_bufmgr *bufmgr_data = (tbm_exynos_bufmgr *)user_data;
+
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, HAL_TBM_ERROR_INVALID_PARAMETER);
+
+ bufmgr_data->fd = auth_fd;
+ TBM_BACKEND_INFO("Get the authenticated drm_fd(%d)!\n", bufmgr_data->fd);
+
+ return HAL_TBM_ERROR_NONE;
+}
+
+static int
+hal_backend_tbm_exynos_exit(void *data)
+{
+ hal_tbm_backend_data *backend_data = (hal_tbm_backend_data *)data;
+ tbm_exynos_bufmgr *bufmgr_data;
+ unsigned long key;
+ void *value;
+
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(backend_data != NULL, -1);
+
+ bufmgr_data = (tbm_exynos_bufmgr *)backend_data->bufmgr;
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, -1);
+
+ if (backend_data->bufmgr_funcs)
+ free(backend_data->bufmgr_funcs);
+ if (backend_data->bo_funcs)
+ free(backend_data->bo_funcs);
+
+ if (bufmgr_data->hashBos) {
+ while (drmHashFirst(bufmgr_data->hashBos, &key, &value) > 0) {
+ free(value);
+ drmHashDelete(bufmgr_data->hashBos, key);
+ }
+
+ drmHashDestroy(bufmgr_data->hashBos);
+ bufmgr_data->hashBos = NULL;
+ }
+
+ _bufmgr_deinit_cache_state(bufmgr_data);
+
+ close(bufmgr_data->fd);
+
+ free(backend_data->bufmgr);
+ free(backend_data);
+
+ return HAL_TBM_ERROR_NONE;
+}
+
+static int
+hal_backend_tbm_exynos_init(void **data)
+{
+ hal_tbm_backend_data *backend_data = NULL;
+ hal_tbm_bufmgr_funcs *bufmgr_funcs = NULL;
+ hal_tbm_bo_funcs *bo_funcs = NULL;
+ tbm_exynos_bufmgr *bufmgr_data = NULL;
+ int drm_fd = -1;
+ int fp;
+ char *value = NULL;
+
+ /* allocate a hal_tbm_backend_data */
+ backend_data = calloc(1, sizeof(struct _hal_tbm_backend_data));
+ if (!backend_data) {
+ TBM_BACKEND_ERR("fail to alloc backend_data!\n");
+ *data = NULL;
+ return -1;
+ }
+ *data = backend_data;
+
+ /* allocate a hal_tbm_bufmgr */
+ bufmgr_data = calloc(1, sizeof(struct _tbm_exynos_bufmgr));
+ if (!bufmgr_data) {
+ TBM_BACKEND_ERR("fail to alloc bufmgr_data!\n");
+ goto fail_alloc_bufmgr_data;
+ }
+ backend_data->bufmgr = (hal_tbm_bufmgr *)bufmgr_data;
+
+ // open drm_fd
+ drm_fd = _tbm_exynos_open_drm();
+ if (drm_fd < 0) {
+ TBM_BACKEND_ERR("fail to open drm!\n");
+ goto fail_open_drm;
+ }
+
+ // set true when backend has a drm_device.
+ backend_data->has_drm_device = 1;
+
+ // check if drm_fd is master_drm_fd.
+ if (drmIsMaster(drm_fd)) {
+ // drm_fd is a master_drm_fd.
+ backend_data->drm_info.drm_fd = drm_fd;
+ backend_data->drm_info.is_master = 1;
+
+ bufmgr_data->fd = drm_fd;
+ TBM_BACKEND_INFO("Get the master drm_fd(%d)!\n", bufmgr_data->fd);
+ } else {
+ // drm_fd is not a master_drm_fd.
+ // request authenticated fd
+ close(drm_fd);
+ backend_data->drm_info.drm_fd = -1;
+ backend_data->drm_info.is_master = 0;
+ backend_data->drm_info.auth_drm_fd_func = _tbm_exynos_authenticated_drm_fd_handler;
+ backend_data->drm_info.user_data = bufmgr_data;
+
+ TBM_BACKEND_INFO("A backend requests an authenticated drm_fd.\n");
+ }
+
+ //Check if the tbm manager supports dma fence or not.
+ fp = open("/sys/module/dmabuf_sync/parameters/enabled", O_RDONLY);
+ if (fp != -1) {
+ char buf[1];
+ int length = read(fp, buf, 1);
+
+ if (length == 1 && buf[0] == '1')
+ bufmgr_data->use_dma_fence = 1;
+
+ close(fp);
+ }
+
+ /* get the model name from the capi-system-info.
+ * The alignment_plane and alignment_pitch_rgb is different accoring to the target.
+ * There will be the stride issue when the right alignment_plane and alignment_pitch_rgb
+ * is not set to the backend.
+ */
+ if (system_info_get_platform_string("http://tizen.org/system/model_name", &value) != SYSTEM_INFO_ERROR_NONE) {
+ TBM_BACKEND_ERR("Cannot get the \"http://tizen.org/system/model_name\" key value from system-info.\n");
+ TBM_BACKEND_ERR("May not set the right value on libtbm-exynos backend.\n");
+ } else {
+ if (!strncmp(value, "TW1", 4)) {
+ g_tbm_surface_alignment_plane = 8;
+ g_tbm_surface_alignment_pitch_rgb = 8;
+ g_enable_cache_ctrl = 1;
+ } else {
+ g_tbm_surface_alignment_plane = 64;
+ g_tbm_surface_alignment_pitch_rgb = 64;
+ }
+ }
+
+ free(value);
+
+ if (!_bufmgr_init_cache_state(bufmgr_data)) {
+ TBM_BACKEND_ERR("fail to init bufmgr cache state\n");
+ goto fail_init_cache_state;
+ }
+
+ /*Create Hash Table*/
+ bufmgr_data->hashBos = drmHashCreate();
+
+ /* alloc and register bufmgr_funcs */
+ bufmgr_funcs = calloc(1, sizeof(struct _hal_tbm_bufmgr_funcs));
+ if (!bufmgr_funcs) {
+ TBM_BACKEND_ERR("fail to alloc bufmgr_funcs!\n");
+ goto fail_alloc_bufmgr_funcs;
+ }
+ backend_data->bufmgr_funcs = bufmgr_funcs;
+
+ bufmgr_funcs->bufmgr_get_capabilities = tbm_exynos_bufmgr_get_capabilities;
+ bufmgr_funcs->bufmgr_get_supported_formats = tbm_exynos_bufmgr_get_supported_formats;
+ bufmgr_funcs->bufmgr_get_plane_data = tbm_exynos_bufmgr_get_plane_data;
+ bufmgr_funcs->bufmgr_alloc_bo = tbm_exynos_bufmgr_alloc_bo;
+ bufmgr_funcs->bufmgr_alloc_bo_with_format = NULL;
+ bufmgr_funcs->bufmgr_import_fd = tbm_exynos_bufmgr_import_fd;
+ bufmgr_funcs->bufmgr_import_key = tbm_exynos_bufmgr_import_key;
+
+ /* alloc and register bo_funcs */
+ bo_funcs = calloc(1, sizeof(struct _hal_tbm_bo_funcs));
+ if (!bo_funcs) {
+ TBM_BACKEND_ERR("fail to alloc bo_funcs!\n");
+ goto fail_alloc_bo_funcs;
+ }
+ backend_data->bo_funcs = bo_funcs;
+
+ bo_funcs->bo_free = tbm_exynos_bo_free;
+ bo_funcs->bo_get_size = tbm_exynos_bo_get_size;
+ bo_funcs->bo_get_memory_types = tbm_exynos_bo_get_memory_type;
+ bo_funcs->bo_get_handle = tbm_exynos_bo_get_handle;
+ bo_funcs->bo_map = tbm_exynos_bo_map;
+ bo_funcs->bo_unmap = tbm_exynos_bo_unmap;
+ bo_funcs->bo_lock = tbm_exynos_bo_lock;
+ bo_funcs->bo_unlock = tbm_exynos_bo_unlock;
+ bo_funcs->bo_export_fd = tbm_exynos_bo_export_fd;
+ bo_funcs->bo_export_key = tbm_exynos_bo_export_key;
+
+ TBM_BACKEND_DBG("drm_fd:%d\n", bufmgr_data->fd);
+
+ return HAL_TBM_ERROR_NONE;
+
+fail_alloc_bo_funcs:
+ free(bufmgr_funcs);
+fail_alloc_bufmgr_funcs:
+ _bufmgr_deinit_cache_state(bufmgr_data);
+ if (bufmgr_data->hashBos)
+ drmHashDestroy(bufmgr_data->hashBos);
+fail_init_cache_state:
+ close(bufmgr_data->fd);
+fail_open_drm:
+ free(bufmgr_data);
+fail_alloc_bufmgr_data:
+ free(backend_data);
+
+ *data = NULL;
+
+ return -1;
+}
+
+hal_backend hal_backend_tbm_data = {
+ "exynos",
+ "Samsung",
+ HAL_ABI_VERSION_TIZEN_6_5,
+ hal_backend_tbm_exynos_init,
+ hal_backend_tbm_exynos_exit
+};
--- /dev/null
+/**************************************************************************
+
+libtbm_exynos
+
+Copyright 2021 Samsung Electronics co., Ltd. All Rights Reserved.
+
+Contact: SooChan Lim <sc1.lim@samsung.com>
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sub license, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
+ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+#include "tbm_backend_log.h"
+
+#undef LOG_TAG
+#define LOG_TAG "TBM_BACKEND"
+
+unsigned int tbm_log_debug_level = TBM_BACKEND_LOG_LEVEL_INFO;
+
+static void
+_tbm_backend_log_dlog_print(int level, const char *fmt, va_list arg)
+{
+ log_priority dlog_prio;
+
+ switch (level) {
+ case TBM_BACKEND_LOG_LEVEL_ERR:
+ dlog_prio = DLOG_ERROR;
+ break;
+ case TBM_BACKEND_LOG_LEVEL_WRN:
+ dlog_prio = DLOG_WARN;
+ break;
+ case TBM_BACKEND_LOG_LEVEL_INFO:
+ dlog_prio = DLOG_INFO;
+ break;
+ case TBM_BACKEND_LOG_LEVEL_DBG:
+ dlog_prio = DLOG_DEBUG;
+ break;
+ default:
+ return;
+ }
+ __dlog_vprint(LOG_ID_SYSTEM, dlog_prio, LOG_TAG, fmt, arg);
+}
+
+void
+tbm_backend_log_print(int level, const char *fmt, ...)
+{
+ va_list arg;
+
+ if (level > tbm_log_debug_level)
+ return;
+
+ va_start(arg, fmt);
+ _tbm_backend_log_dlog_print(level, fmt, arg);
+ va_end(arg);
+}
+
--- /dev/null
+/**************************************************************************
+
+libtbm_exynos
+
+Copyright 2021 Samsung Electronics co., Ltd. All Rights Reserved.
+
+Contact: SooChan Lim <sc1.lim@samsung.com>
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sub license, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
+ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+#ifndef __TBM_BACKEND_LOG_H__
+#define __TBM_BACKEND_LOG_H__
+
+#include <sys/syscall.h>
+#include <time.h>
+#include <dlog.h>
+
+enum {
+ TBM_BACKEND_LOG_LEVEL_NONE,
+ TBM_BACKEND_LOG_LEVEL_ERR,
+ TBM_BACKEND_LOG_LEVEL_WRN,
+ TBM_BACKEND_LOG_LEVEL_INFO,
+ TBM_BACKEND_LOG_LEVEL_DBG,
+};
+
+
+/* log level */
+void tbm_backend_log_print(int level, const char *fmt, ...);
+
+#define TBM_BACKEND_DBG(fmt, args...) \
+ do { \
+ struct timespec ts; \
+ clock_gettime(CLOCK_MONOTONIC, &ts); \
+ tbm_backend_log_print(TBM_BACKEND_LOG_LEVEL_DBG, "[%5d.%06d][%d][%s %d]"fmt, \
+ (int)ts.tv_sec, (int)ts.tv_nsec / 1000, \
+ (int)syscall(SYS_gettid), __FUNCTION__, __LINE__, ##args); \
+ } while (0)
+
+#define TBM_BACKEND_INFO(fmt, args...) \
+ do { \
+ struct timespec ts; \
+ clock_gettime(CLOCK_MONOTONIC, &ts); \
+ tbm_backend_log_print(TBM_BACKEND_LOG_LEVEL_INFO, "[%5d.%06d][%d][%s %d]"fmt, \
+ (int)ts.tv_sec, (int)ts.tv_nsec / 1000, \
+ (int)syscall(SYS_gettid), __FUNCTION__, __LINE__, ##args); \
+ } while (0)
+
+#define TBM_BACKEND_WRN(fmt, args...) \
+ do { \
+ struct timespec ts; \
+ clock_gettime(CLOCK_MONOTONIC, &ts); \
+ tbm_backend_log_print(TBM_BACKEND_LOG_LEVEL_WRN, "[%5d.%06d][%d][%s %d]"fmt, \
+ (int)ts.tv_sec, (int)ts.tv_nsec / 1000, \
+ (int)syscall(SYS_gettid), __FUNCTION__, __LINE__, ##args); \
+ } while (0)
+
+#define TBM_BACKEND_ERR(fmt, args...) \
+ do { \
+ struct timespec ts; \
+ clock_gettime(CLOCK_MONOTONIC, &ts); \
+ tbm_backend_log_print(TBM_BACKEND_LOG_LEVEL_ERR, "[%5d.%06d][%d][%s %d]"fmt, \
+ (int)ts.tv_sec, (int)ts.tv_nsec / 1000, \
+ (int)syscall(SYS_gettid), __FUNCTION__, __LINE__, ##args); \
+ } while (0)
+
+#define TBM_BACKEND_RETURN_IF_FAIL(cond) {\
+ if (!(cond)) {\
+ TBM_BACKEND_ERR("'%s' failed.\n", #cond);\
+ return;\
+ } \
+}
+#define TBM_BACKEND_RETURN_VAL_IF_FAIL(cond, val) {\
+ if (!(cond)) {\
+ TBM_BACKEND_ERR("'%s' failed.\n", #cond);\
+ return val;\
+ } \
+}
+#define TBM_BACKEND_GOTO_VAL_IF_FAIL(cond, val) {\
+ if (!(cond)) {\
+ TBM_BACKEND_ERR("'%s' failed.\n", #cond);\
+ goto val;\
+ } \
+}
+
+#endif /* __TBM_BACKEND_LOG_H__ */
--- /dev/null
+/**************************************************************************
+ *
+ * libtbm
+ *
+ * Copyright 2012 Samsung Electronics co., Ltd. All Rights Reserved.
+ *
+ * Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
+ * Boram Park <boram1288.park@samsung.com>, Changyeon Lee <cyeon.lee@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * **************************************************************************/
+
+#ifndef __TBM_BUFMGR_TGL_H__
+#define __TBM_BUFMGR_TGL_H__
+
+#include <linux/ioctl.h>
+
+static char tgl_devfile[] = "/dev/slp_global_lock";
+static char tgl_devfile1[] = "/dev/tgl";
+
+#define TGL_IOCTL_BASE 0x32
+#define TGL_IO(nr) _IO(TGL_IOCTL_BASE, nr)
+#define TGL_IOR(nr, type) _IOR(TGL_IOCTL_BASE, nr, type)
+#define TGL_IOW(nr, type) _IOW(TGL_IOCTL_BASE, nr, type)
+#define TGL_IOWR(nr, type) _IOWR(TGL_IOCTL_BASE, nr, type)
+
+/**
+ * struct tgl_ver_data - tgl version data structure
+ * @major: major version
+ * @minor: minor version
+ */
+struct tgl_ver_data {
+ unsigned int major;
+ unsigned int minor;
+};
+
+/**
+ * struct tgl_reg_data - tgl data structure
+ * @key: lookup key
+ * @timeout_ms: timeout value for waiting event
+ */
+struct tgl_reg_data {
+ unsigned int key;
+ unsigned int timeout_ms;
+};
+
+enum tgl_type_data {
+ TGL_TYPE_NONE = 0,
+ TGL_TYPE_READ = (1 << 0),
+ TGL_TYPE_WRITE = (1 << 1),
+};
+
+/**
+ * struct tgl_lock_data - tgl lock data structure
+ * @key: lookup key
+ * @type: lock type that is in tgl_type_data
+ */
+struct tgl_lock_data {
+ unsigned int key;
+ enum tgl_type_data type;
+};
+
+enum tgl_status_data {
+ TGL_STATUS_UNLOCKED,
+ TGL_STATUS_LOCKED,
+};
+
+/**
+ * struct tgl_usr_data - tgl user data structure
+ * @key: lookup key
+ * @data1: user data 1
+ * @data2: user data 2
+ * @status: lock status that is in tgl_status_data
+ */
+struct tgl_usr_data {
+ unsigned int key;
+ unsigned int data1;
+ unsigned int data2;
+ enum tgl_status_data status;
+};
+
+enum {
+ _TGL_GET_VERSION,
+ _TGL_REGISTER,
+ _TGL_UNREGISTER,
+ _TGL_LOCK,
+ _TGL_UNLOCK,
+ _TGL_SET_DATA,
+ _TGL_GET_DATA,
+};
+
+/* get version information */
+#define TGL_IOCTL_GET_VERSION TGL_IOR(_TGL_GET_VERSION, struct tgl_ver_data)
+/* register key */
+#define TGL_IOCTL_REGISTER TGL_IOW(_TGL_REGISTER, struct tgl_reg_data)
+/* unregister key */
+#define TGL_IOCTL_UNREGISTER TGL_IOW(_TGL_UNREGISTER, struct tgl_reg_data)
+/* lock with key */
+#define TGL_IOCTL_LOCK TGL_IOW(_TGL_LOCK, struct tgl_lock_data)
+/* unlock with key */
+#define TGL_IOCTL_UNLOCK TGL_IOW(_TGL_UNLOCK, struct tgl_lock_data)
+/* set user data with key */
+#define TGL_IOCTL_SET_DATA TGL_IOW(_TGL_SET_DATA, struct tgl_usr_data)
+/* get user data with key */
+#define TGL_IOCTL_GET_DATA TGL_IOR(_TGL_GET_DATA, struct tgl_usr_data)
+
+/* indicate cache units. */
+enum e_drm_exynos_gem_cache_sel {
+ EXYNOS_DRM_L1_CACHE = 1 << 0,
+ EXYNOS_DRM_L2_CACHE = 1 << 1,
+ EXYNOS_DRM_ALL_CORES = 1 << 2,
+ EXYNOS_DRM_ALL_CACHES = EXYNOS_DRM_L1_CACHE |
+ EXYNOS_DRM_L2_CACHE,
+ EXYNOS_DRM_ALL_CACHES_CORES = EXYNOS_DRM_L1_CACHE |
+ EXYNOS_DRM_L2_CACHE |
+ EXYNOS_DRM_ALL_CORES,
+ EXYNOS_DRM_CACHE_SEL_MASK = EXYNOS_DRM_ALL_CACHES_CORES
+};
+
+/* indicate cache operation types. */
+enum e_drm_exynos_gem_cache_op {
+ EXYNOS_DRM_CACHE_INV_ALL = 1 << 3,
+ EXYNOS_DRM_CACHE_INV_RANGE = 1 << 4,
+ EXYNOS_DRM_CACHE_CLN_ALL = 1 << 5,
+ EXYNOS_DRM_CACHE_CLN_RANGE = 1 << 6,
+ EXYNOS_DRM_CACHE_FSH_ALL = EXYNOS_DRM_CACHE_INV_ALL |
+ EXYNOS_DRM_CACHE_CLN_ALL,
+ EXYNOS_DRM_CACHE_FSH_RANGE = EXYNOS_DRM_CACHE_INV_RANGE |
+ EXYNOS_DRM_CACHE_CLN_RANGE,
+ EXYNOS_DRM_CACHE_OP_MASK = EXYNOS_DRM_CACHE_FSH_ALL |
+ EXYNOS_DRM_CACHE_FSH_RANGE
+};
+
+/**
+ * A structure for cache operation.
+ *
+ * @usr_addr: user space address.
+ * P.S. it SHOULD BE user space.
+ * @size: buffer size for cache operation.
+ * @flags: select cache unit and cache operation.
+ * @gem_handle: a handle to a gem object.
+ * this gem handle is needed for cache range operation to L2 cache.
+ */
+struct drm_exynos_gem_cache_op {
+ uint64_t usr_addr;
+ unsigned int size;
+ unsigned int flags;
+ unsigned int gem_handle;
+};
+
+#define DRM_EXYNOS_GEM_CACHE_OP 0x12
+
+#define DRM_IOCTL_EXYNOS_GEM_CACHE_OP DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_EXYNOS_GEM_CACHE_OP, struct drm_exynos_gem_cache_op)
+
+#endif /* __TBM_BUFMGR_TGL_H__ */