Use 2.0 version frontend 52/59752/1
authorChangyeon Lee <cyeon.lee@samsung.com>
Thu, 11 Feb 2016 12:30:33 +0000 (21:30 +0900)
committerChangyeon Lee <cyeon.lee@samsung.com>
Thu, 18 Feb 2016 07:40:52 +0000 (16:40 +0900)
Change-Id: Idd6b95bf2231549b9ba51ace9a2280ac2d7eaef9

src/tbm_bufmgr_exynos.c
src/tbm_bufmgr_tgl.h [new file with mode: 0644]

index e4ecddb..9464c57 100644 (file)
@@ -52,6 +52,8 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include <tbm_surface_internal.h>
 #include <libudev.h>
 
+#include "tbm_bufmgr_tgl.h"
+
 #define DEBUG
 #define USE_DMAIMPORT
 #define TBM_COLOR_FORMAT_COUNT 8
@@ -162,12 +164,39 @@ struct dma_buf_fence {
 #define DMABUF_IOCTL_GET_FENCE DMABUF_IOWR(0x01, struct dma_buf_fence)
 #define DMABUF_IOCTL_PUT_FENCE DMABUF_IOWR(0x02, struct dma_buf_fence)
 
+/* tgl key values */
+#define GLOBAL_KEY   ((unsigned int)(-1))
+/* TBM_CACHE */
+#define TBM_EXYNOS_CACHE_INV       0x01 /**< cache invalidate  */
+#define TBM_EXYNOS_CACHE_CLN       0x02 /**< cache clean */
+#define TBM_EXYNOS_CACHE_ALL       0x10 /**< cache all */
+#define TBM_EXYNOS_CACHE_FLUSH     (TBM_EXYNOS_CACHE_INV|TBM_EXYNOS_CACHE_CLN) /**< cache flush  */
+#define TBM_EXYNOS_CACHE_FLUSH_ALL (TBM_EXYNOS_CACHE_FLUSH|TBM_EXYNOS_CACHE_ALL)       /**< cache flush all */
+
+enum {
+       DEVICE_NONE = 0,
+       DEVICE_CA,                                      /* cache aware device */
+       DEVICE_CO                                       /* cache oblivious device */
+};
+
+typedef union _tbm_bo_cache_state tbm_bo_cache_state;
+
+union _tbm_bo_cache_state {
+       unsigned int val;
+       struct {
+               unsigned int cntFlush:16;       /*Flush all index for sync */
+               unsigned int isCached:1;
+               unsigned int isDirtied:2;
+       } data;
+};
+
 typedef struct _tbm_bufmgr_exynos *tbm_bufmgr_exynos;
 typedef struct _tbm_bo_exynos *tbm_bo_exynos;
 
 typedef struct _exynos_private
 {
     int ref_count;
+    struct _tbm_bo_exynos *bo_priv;
 } PrivGem;
 
 /* tbm buffor object for exynos */
@@ -194,6 +223,9 @@ struct _tbm_bo_exynos
     struct dma_buf_fence dma_fence[DMA_FENCE_LIST_MAX];
     int device;
     int opt;
+
+    tbm_bo_cache_state cache_state;
+    unsigned int map_cnt;
 };
 
 /* tbm bufmgr private for exynos */
@@ -205,6 +237,8 @@ struct _tbm_bufmgr_exynos
 
     int use_dma_fence;
 
+       int tgl_fd;
+
     int fd_owner;
 };
 
@@ -235,6 +269,73 @@ uint32_t tbm_exynos_color_format_list[TBM_COLOR_FORMAT_COUNT] = {   TBM_FORMAT_R
                                                                                                                                                TBM_FORMAT_YUV420,
                                                                                                                                                TBM_FORMAT_YVU420 };
 
+static inline int _tgl_init(int fd, unsigned int key)
+{
+       struct tgl_attribute attr;
+       int err;
+
+       attr.key = key;
+       attr.timeout_ms = 1000;
+
+       err = ioctl(fd, TGL_IOC_INIT_LOCK, &attr);
+       if (err) {
+               TBM_EXYNOS_LOG("[libtbm:%d] "
+                       "error(%s) %s:%d key:%d\n",
+                       getpid(), strerror(errno), __FUNCTION__, __LINE__, key);
+               return 0;
+       }
+
+       return 1;
+}
+
+static inline int _tgl_destroy(int fd, unsigned int key)
+{
+       int err;
+       err = ioctl(fd, TGL_IOC_DESTROY_LOCK, key);
+       if (err) {
+               TBM_EXYNOS_LOG("[libtbm:%d] "
+                       "error(%s) %s:%d key:%d\n",
+                       getpid(), strerror(errno), __FUNCTION__, __LINE__, key);
+               return 0;
+       }
+
+       return 1;
+}
+static inline int _tgl_set_data(int fd, unsigned int key, unsigned int val)
+{
+       int err;
+       struct tgl_user_data arg;
+
+       arg.key = key;
+       arg.data1 = val;
+       err = ioctl(fd, TGL_IOC_SET_DATA, &arg);
+       if (err) {
+               TBM_EXYNOS_LOG("[libtbm:%d] "
+                       "error(%s) %s:%d key:%d\n",
+                       getpid(), strerror(errno), __FUNCTION__, __LINE__, key);
+               return 0;
+       }
+
+       return 1;
+}
+
+static inline unsigned int _tgl_get_data(int fd, unsigned int key)
+{
+       int err;
+       struct tgl_user_data arg = { 0, };
+
+       arg.key = key;
+       err = ioctl(fd, TGL_IOC_GET_DATA, &arg);
+       if (err) {
+               TBM_EXYNOS_LOG("[libtbm:%d] "
+                       "error(%s) %s:%d key:%d\n",
+                       getpid(), strerror(errno), __FUNCTION__, __LINE__, key);
+               return 0;
+       }
+
+       return arg.data1;
+}
+
 static inline int
 _is_drm_master(int drm_fd)
 {
@@ -245,6 +346,198 @@ _is_drm_master(int drm_fd)
 }
 
 static int
+_exynos_cache_flush (tbm_bo bo, int flags)
+{
+#ifdef ENABLE_CACHECRTL
+    tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
+    EXYNOS_RETURN_VAL_IF_FAIL (bufmgr_exynos!=NULL, 0);
+
+    /* cache flush is managed by kernel side when using dma-fence. */
+    if (bufmgr_exynos->use_dma_fence)
+       return 1;
+
+    EXYNOS_RETURN_VAL_IF_FAIL (bo!=NULL, 0);
+
+    tbm_bo_exynos bo_exynos;
+
+    bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
+    EXYNOS_RETURN_VAL_IF_FAIL (bo_exynos!=NULL, 0);
+
+    struct drm_exynos_gem_cache_op cache_op = {0, };
+    int ret;
+
+    /* if bo_exynos is null, do cache_flush_all */
+    if(bo_exynos)
+    {
+        cache_op.flags = 0;
+        cache_op.usr_addr = (uint64_t)((uint32_t)bo_exynos->pBase);
+        cache_op.size = bo_exynos->size;
+    }
+    else
+    {
+        flags = TBM_EXYNOS_CACHE_FLUSH_ALL;
+        cache_op.flags = 0;
+        cache_op.usr_addr = 0;
+        cache_op.size = 0;
+    }
+
+    if (flags & TBM_EXYNOS_CACHE_INV)
+    {
+        if(flags & TBM_EXYNOS_CACHE_ALL)
+            cache_op.flags |= EXYNOS_DRM_CACHE_INV_ALL;
+        else
+            cache_op.flags |= EXYNOS_DRM_CACHE_INV_RANGE;
+    }
+
+    if (flags & TBM_EXYNOS_CACHE_CLN)
+    {
+        if(flags & TBM_EXYNOS_CACHE_ALL)
+            cache_op.flags |= EXYNOS_DRM_CACHE_CLN_ALL;
+        else
+            cache_op.flags |= EXYNOS_DRM_CACHE_CLN_RANGE;
+    }
+
+    if(flags & TBM_EXYNOS_CACHE_ALL)
+        cache_op.flags |= EXYNOS_DRM_ALL_CACHES_CORES;
+
+    ret = drmCommandWriteRead (fd, DRM_EXYNOS_GEM_CACHE_OP, &cache_op, sizeof(cache_op));
+    if (ret)
+    {
+        TBM_EXYNOS_LOG ("error fail to flush the cache.\n");
+        return 0;
+    }
+#else
+    TBM_EXYNOS_LOG ("warning fail to enable the cache flush.\n");
+#endif
+    return 1;
+}
+
+static int
+_bo_init_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos)
+{
+    EXYNOS_RETURN_VAL_IF_FAIL (bufmgr_exynos!=NULL, 0);
+    EXYNOS_RETURN_VAL_IF_FAIL (bo_exynos!=NULL, 0);
+
+    tbm_bo_cache_state cache_state;
+
+       _tgl_init(bufmgr_exynos->tgl_fd, bo_exynos->name);
+
+       cache_state.data.isDirtied = DEVICE_NONE;
+       cache_state.data.isCached = 0;
+       cache_state.data.cntFlush = 0;
+
+       _tgl_set_data(bufmgr_exynos->tgl_fd, bo_exynos->name, cache_state.val);
+
+       return 1;
+}
+
+static int
+_bo_set_cache_state(tbm_bo bo, int device, int opt)
+{
+    tbm_bo_exynos bo_exynos;
+    tbm_bufmgr_exynos bufmgr_exynos;
+       char need_flush = 0;
+       unsigned short cntFlush = 0;
+
+    bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
+    EXYNOS_RETURN_VAL_IF_FAIL (bo_exynos!=NULL, 0);
+
+    bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
+    EXYNOS_RETURN_VAL_IF_FAIL (bufmgr_exynos!=NULL, 0);
+
+    if (bo_exynos->flags_exynos & EXYNOS_BO_NONCACHABLE)
+        return 1;
+
+    /* get cache state of a bo */
+    bo_exynos->cache_state.val = _tgl_get_data(bufmgr_exynos->tgl_fd, bo_exynos->name);
+
+    /* get global cache flush count */
+    cntFlush = (unsigned short)_tgl_get_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY);
+
+    if (opt == TBM_DEVICE_CPU) {
+        if (bo_exynos->cache_state.data.isDirtied == DEVICE_CO &&
+            bo_exynos->cache_state.data.isCached)
+            need_flush = TBM_EXYNOS_CACHE_INV;
+
+        bo_exynos->cache_state.data.isCached = 1;
+        if (opt & TBM_OPTION_WRITE)
+            bo_exynos->cache_state.data.isDirtied = DEVICE_CA;
+        else {
+            if (bo_exynos->cache_state.data.isDirtied != DEVICE_CA)
+                bo_exynos->cache_state.data.isDirtied = DEVICE_NONE;
+        }
+    } else {
+        if (bo_exynos->cache_state.data.isDirtied == DEVICE_CA &&
+            bo_exynos->cache_state.data.isCached &&
+            bo_exynos->cache_state.data.cntFlush == cntFlush)
+            need_flush = TBM_EXYNOS_CACHE_CLN | TBM_EXYNOS_CACHE_ALL;
+
+        if (opt & TBM_OPTION_WRITE)
+            bo_exynos->cache_state.data.isDirtied = DEVICE_CO;
+        else {
+            if (bo_exynos->cache_state.data.isDirtied != DEVICE_CO)
+                bo_exynos->cache_state.data.isDirtied = DEVICE_NONE;
+        }
+    }
+
+    if (need_flush)
+    {
+        if (need_flush & TBM_CACHE_ALL)
+           _tgl_set_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY, (unsigned int)(++cntFlush));
+
+       /* call cache flush */
+       _exynos_cache_flush (bo, need_flush);
+
+        DBG("[libtbm:%d] \tcache(%d,%d)....flush:0x%x, cntFlush(%d)\n",
+            getpid(),
+            bo_exynos->cache_state.data.isCached,
+            bo_exynos->cache_state.data.isDirtied,
+            need_flush,
+            cntFlush);
+    }
+
+       return 1;
+}
+
+static int
+_bo_save_cache_state(tbm_bo bo)
+{
+    unsigned short cntFlush = 0;
+    tbm_bo_exynos bo_exynos;
+    tbm_bufmgr_exynos bufmgr_exynos;
+
+    bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
+    EXYNOS_RETURN_VAL_IF_FAIL (bo_exynos!=NULL, 0);
+
+    bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
+    EXYNOS_RETURN_VAL_IF_FAIL (bufmgr_exynos!=NULL, 0);
+
+    /* get global cache flush count */
+    cntFlush = (unsigned short)_tgl_get_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY);
+
+    /* save global cache flush count */
+    bo_exynos->cache_state.data.cntFlush = cntFlush;
+    _tgl_set_data(bufmgr_exynos->tgl_fd, bo_exynos->name, bo_exynos->cache_state.val);
+
+    return 1;
+}
+
+static void
+_bo_destroy_cache_state(tbm_bo bo)
+{
+    tbm_bo_exynos bo_exynos;
+    tbm_bufmgr_exynos bufmgr_exynos;
+
+    bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
+    EXYNOS_RETURN_IF_FAIL (bo_exynos!=NULL);
+
+    bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
+    EXYNOS_RETURN_IF_FAIL (bufmgr_exynos!=NULL);
+
+    _tgl_destroy(bufmgr_exynos->tgl_fd, bo_exynos->name);
+}
+
+static int
 _get_render_node ()
 {
     struct udev *udev = NULL;
@@ -464,59 +757,6 @@ _exynos_bo_handle (tbm_bo_exynos bo_exynos, int device)
 }
 
 static int
-_exynos_cache_flush (int fd, tbm_bo_exynos bo_exynos, int flags)
-{
-#ifdef ENABLE_CACHECRTL
-    struct drm_exynos_gem_cache_op cache_op = {0, };
-    int ret;
-
-    /* if bo_exynos is null, do cache_flush_all */
-    if(bo_exynos)
-    {
-        cache_op.flags = 0;
-        cache_op.usr_addr = (uint64_t)((uint32_t)bo_exynos->pBase);
-        cache_op.size = bo_exynos->size;
-    }
-    else
-    {
-        flags = TBM_CACHE_FLUSH_ALL;
-        cache_op.flags = 0;
-        cache_op.usr_addr = 0;
-        cache_op.size = 0;
-    }
-
-    if (flags & TBM_CACHE_INV)
-    {
-        if(flags & TBM_CACHE_ALL)
-            cache_op.flags |= EXYNOS_DRM_CACHE_INV_ALL;
-        else
-            cache_op.flags |= EXYNOS_DRM_CACHE_INV_RANGE;
-    }
-
-    if (flags & TBM_CACHE_CLN)
-    {
-        if(flags & TBM_CACHE_ALL)
-            cache_op.flags |= EXYNOS_DRM_CACHE_CLN_ALL;
-        else
-            cache_op.flags |= EXYNOS_DRM_CACHE_CLN_RANGE;
-    }
-
-    if(flags & TBM_CACHE_ALL)
-        cache_op.flags |= EXYNOS_DRM_ALL_CACHES_CORES;
-
-    ret = drmCommandWriteRead (fd, DRM_EXYNOS_GEM_CACHE_OP, &cache_op, sizeof(cache_op));
-    if (ret)
-    {
-        TBM_EXYNOS_LOG ("error fail to flush the cache.\n");
-        return 0;
-    }
-#else
-    TBM_EXYNOS_LOG ("warning fail to enable the cache flush.\n");
-#endif
-    return 1;
-}
-
-static int
 tbm_exynos_bo_size (tbm_bo bo)
 {
     EXYNOS_RETURN_VAL_IF_FAIL (bo!=NULL, 0);
@@ -572,6 +812,13 @@ tbm_exynos_bo_alloc (tbm_bo bo, int size, int flags)
     bo_exynos->flags_exynos = exynos_flags;
     bo_exynos->name = _get_name (bo_exynos->fd, bo_exynos->gem);
 
+    if (!_bo_init_cache_state(bufmgr_exynos, bo_exynos))
+    {
+        TBM_EXYNOS_LOG ("error fail init cache state(%d)\n", bo_exynos->name);
+        free (bo_exynos);
+        return 0;
+    }
+
     pthread_mutex_init(&bo_exynos->mutex, NULL);
 
     if (bufmgr_exynos->use_dma_fence
@@ -601,6 +848,8 @@ tbm_exynos_bo_alloc (tbm_bo bo, int size, int flags)
     }
 
     privGem->ref_count = 1;
+    privGem->bo_priv = bo_exynos;
+
     if (drmHashInsert(bufmgr_exynos->hashBos, bo_exynos->name, (void *)privGem) < 0)
     {
         TBM_EXYNOS_LOG ("error Cannot insert bo to Hash(%d)\n", bo_exynos->name);
@@ -672,6 +921,8 @@ tbm_exynos_bo_free(tbm_bo bo)
         TBM_EXYNOS_LOG ("warning Cannot find bo to Hash(%d), ret=%d\n", bo_exynos->name, ret);
     }
 
+    _bo_destroy_cache_state(bo);
+
     /* Free gem handle */
     struct drm_gem_close arg = {0, };
     memset (&arg, 0, sizeof(arg));
@@ -693,10 +944,19 @@ tbm_exynos_bo_import (tbm_bo bo, unsigned int key)
 
     tbm_bufmgr_exynos bufmgr_exynos;
     tbm_bo_exynos bo_exynos;
+    PrivGem *privGem = NULL;
+       int ret;
 
     bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
     EXYNOS_RETURN_VAL_IF_FAIL (bufmgr_exynos!=NULL, 0);
 
+    ret = drmHashLookup (bufmgr_exynos->hashBos, key, (void**)&privGem);
+    if (ret == 0)
+    {
+        privGem->ref_count++;
+        return privGem->bo_priv;
+    }
+
     struct drm_gem_open arg = {0, };
     struct drm_exynos_gem_info info = {0, };
 
@@ -731,6 +991,13 @@ tbm_exynos_bo_import (tbm_bo bo, unsigned int key)
     bo_exynos->name = key;
     bo_exynos->flags_tbm = _get_tbm_flag_from_exynos (bo_exynos->flags_exynos);
 
+    if (!_tgl_init(bufmgr_exynos->tgl_fd, bo_exynos->name))
+    {
+        TBM_EXYNOS_LOG ("error fail tgl init(%d)\n", bo_exynos->name);
+        free (bo_exynos);
+        return 0;
+    }
+
     if (!bo_exynos->dmabuf)
     {
         struct drm_prime_handle arg = {0, };
@@ -746,33 +1013,22 @@ tbm_exynos_bo_import (tbm_bo bo, unsigned int key)
     }
 
     /* add bo to hash */
-    PrivGem *privGem = NULL;
-    int ret;
+    privGem = NULL;
 
-    ret = drmHashLookup (bufmgr_exynos->hashBos, bo_exynos->name, (void**)&privGem);
-    if (ret == 0)
+    privGem = calloc (1, sizeof(PrivGem));
+    if (!privGem)
     {
-        privGem->ref_count++;
+        TBM_EXYNOS_LOG ("[libtbm-exynos:%d] "
+                "error %s:%d Fail to calloc privGem\n",
+                getpid(), __FUNCTION__, __LINE__);
+        free (bo_exynos);
+        return 0;
     }
-    else if (ret == 1)
-    {
-        privGem = calloc (1, sizeof(PrivGem));
-        if (!privGem)
-        {
-            TBM_EXYNOS_LOG ("[libtbm-exynos:%d] "
-                    "error %s:%d Fail to calloc privGem\n",
-                    getpid(), __FUNCTION__, __LINE__);
-            free (bo_exynos);
-            return 0;
-        }
 
-        privGem->ref_count = 1;
-        if (drmHashInsert (bufmgr_exynos->hashBos, bo_exynos->name, (void *)privGem) < 0)
-        {
-            TBM_EXYNOS_LOG ("error Cannot insert bo to Hash(%d)\n", bo_exynos->name);
-        }
-    }
-    else
+    privGem->ref_count = 1;
+    privGem->bo_priv = bo_exynos;
+
+    if (drmHashInsert (bufmgr_exynos->hashBos, bo_exynos->name, (void *)privGem) < 0)
     {
         TBM_EXYNOS_LOG ("error Cannot insert bo to Hash(%d)\n", bo_exynos->name);
     }
@@ -794,15 +1050,15 @@ tbm_exynos_bo_import_fd (tbm_bo bo, tbm_fd key)
 
     tbm_bufmgr_exynos bufmgr_exynos;
     tbm_bo_exynos bo_exynos;
+    PrivGem *privGem = NULL;
+    int name;
+    int ret;
 
     bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
     EXYNOS_RETURN_VAL_IF_FAIL (bufmgr_exynos!=NULL, 0);
 
-    unsigned int gem = 0;
-    unsigned int real_size = -1;
-    struct drm_exynos_gem_info info = {0, };
-
        //getting handle from fd
+    unsigned int gem = 0;
     struct drm_prime_handle arg = {0, };
 
        arg.fd = key;
@@ -815,6 +1071,30 @@ tbm_exynos_bo_import_fd (tbm_bo bo, tbm_fd key)
     }
     gem = arg.handle;
 
+    name = _get_name (bufmgr_exynos->fd, gem);
+    if (!name)
+    {
+        TBM_EXYNOS_LOG ("error bo:%p Cannot get name from gem:%d, fd:%d (%s)\n",
+            bo, gem, key, strerror(errno));
+        free (bo_exynos);
+        return 0;
+    }
+
+    ret = drmHashLookup (bufmgr_exynos->hashBos, name, (void**)&privGem);
+    if (ret == 0)
+    {
+        if (gem == privGem->bo_priv->gem)
+        {
+            privGem->ref_count++;
+            return privGem->bo_priv;
+        }
+        else
+            printf ("ERROR cyeon!!!!!!!!!!!!!\n");
+    }
+
+    unsigned int real_size = -1;
+    struct drm_exynos_gem_info info = {0, };
+
        /* Determine size of bo.  The fd-to-handle ioctl really should
         * return the size, but it doesn't.  If we have kernel 3.12 or
         * later, we can lseek on the prime fd to get the size.  Older
@@ -848,48 +1128,35 @@ tbm_exynos_bo_import_fd (tbm_bo bo, tbm_fd key)
     bo_exynos->size = real_size;
     bo_exynos->flags_exynos = info.flags;
     bo_exynos->flags_tbm = _get_tbm_flag_from_exynos (bo_exynos->flags_exynos);
+    bo_exynos->name = name;
 
-    bo_exynos->name = _get_name(bo_exynos->fd, bo_exynos->gem);
-    if (!bo_exynos->name)
+    if (!_tgl_init(bufmgr_exynos->tgl_fd, bo_exynos->name))
     {
-        TBM_EXYNOS_LOG ("error bo:%p Cannot get name from gem:%d, fd:%d (%s)\n",
-            bo, gem, key, strerror(errno));
+        TBM_EXYNOS_LOG ("error fail tgl init(%d)\n", bo_exynos->name);
         free (bo_exynos);
         return 0;
     }
 
     /* add bo to hash */
-    PrivGem *privGem = NULL;
-    int ret;
+    privGem = NULL;
 
-    ret = drmHashLookup (bufmgr_exynos->hashBos, bo_exynos->name, (void**)&privGem);
-    if (ret == 0)
+    privGem = calloc (1, sizeof(PrivGem));
+    if (!privGem)
     {
-        privGem->ref_count++;
+        TBM_EXYNOS_LOG ("[libtbm-exynos:%d] "
+                "error %s:%d Fail to calloc privGem\n",
+                getpid(), __FUNCTION__, __LINE__);
+        free (bo_exynos);
+        return 0;
     }
-    else if (ret == 1)
-    {
-        privGem = calloc (1, sizeof(PrivGem));
-        if (!privGem)
-        {
-            TBM_EXYNOS_LOG ("[libtbm-exynos:%d] "
-                    "error %s:%d Fail to calloc privGem\n",
-                    getpid(), __FUNCTION__, __LINE__);
-            free (bo_exynos);
-            return 0;
-        }
 
-        privGem->ref_count = 1;
-        if (drmHashInsert (bufmgr_exynos->hashBos, bo_exynos->name, (void *)privGem) < 0)
-        {
-            TBM_EXYNOS_LOG ("error bo:%p Cannot insert bo to Hash(%d) from gem:%d, fd:%d\n",
-                bo, bo_exynos->name, gem, key);
-        }
-    }
-    else
+    privGem->ref_count = 1;
+    privGem->bo_priv = bo_exynos;
+
+    if (drmHashInsert (bufmgr_exynos->hashBos, bo_exynos->name, (void *)privGem) < 0)
     {
         TBM_EXYNOS_LOG ("error bo:%p Cannot insert bo to Hash(%d) from gem:%d, fd:%d\n",
-                bo, bo_exynos->name, gem, key);
+            bo, bo_exynos->name, gem, key);
     }
 
     DBG (" [%s] bo:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d(%d), size:%d\n", target_name(),
@@ -1034,6 +1301,11 @@ tbm_exynos_bo_map (tbm_bo bo, int device, int opt)
         return (tbm_bo_handle) NULL;
     }
 
+    if (bo_exynos->map_cnt == 0)
+        _bo_set_cache_state (bo, device, opt);
+
+    bo_exynos->map_cnt++;
+
     return bo_handle;
 }
 
@@ -1050,6 +1322,11 @@ tbm_exynos_bo_unmap (tbm_bo bo)
     if (!bo_exynos->gem)
         return 0;
 
+    bo_exynos->map_cnt--;
+
+    if (bo_exynos->map_cnt == 0)
+        _bo_save_cache_state (bo);
+
     DBG ("     [%s] bo:%p, gem:%d(%d), fd:%d\n", target_name(),
           bo,
           bo_exynos->gem, bo_exynos->name,
@@ -1059,50 +1336,6 @@ tbm_exynos_bo_unmap (tbm_bo bo)
 }
 
 static int
-tbm_exynos_bo_cache_flush (tbm_bo bo, int flags)
-{
-    tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
-    EXYNOS_RETURN_VAL_IF_FAIL (bufmgr_exynos!=NULL, 0);
-
-    /* cache flush is managed by kernel side when using dma-fence. */
-    if (bufmgr_exynos->use_dma_fence)
-       return 1;
-
-    EXYNOS_RETURN_VAL_IF_FAIL (bo!=NULL, 0);
-
-    tbm_bo_exynos bo_exynos;
-
-    bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
-    EXYNOS_RETURN_VAL_IF_FAIL (bo_exynos!=NULL, 0);
-
-    if (!_exynos_cache_flush(bo_exynos->fd, bo_exynos, flags))
-        return 0;
-
-    return 1;
-}
-
-static int
-tbm_exynos_bo_get_global_key (tbm_bo bo)
-{
-    EXYNOS_RETURN_VAL_IF_FAIL (bo!=NULL, 0);
-
-    tbm_bo_exynos bo_exynos;
-
-    bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
-    EXYNOS_RETURN_VAL_IF_FAIL (bo_exynos!=NULL, 0);
-
-    if (!bo_exynos->name)
-    {
-        if (!bo_exynos->gem)
-            return 0;
-
-        bo_exynos->name = _get_name(bo_exynos->fd, bo_exynos->gem);
-    }
-
-    return bo_exynos->name;
-}
-
-static int
 tbm_exynos_bo_lock(tbm_bo bo, int device, int opt)
 {
     EXYNOS_RETURN_VAL_IF_FAIL (bo!=NULL, 0);
@@ -1315,6 +1548,8 @@ tbm_exynos_bufmgr_deinit (void *priv)
     if (bufmgr_exynos->fd_owner)
         close (bufmgr_exynos->fd);
 
+    close (bufmgr_exynos->tgl_fd);
+
     free (bufmgr_exynos);
 }
 
@@ -1747,251 +1982,6 @@ tbm_exynos_surface_get_num_bos(tbm_format format)
     return num;
 }
 
-/**
-* @brief get the size of the surface with a format.
-* @param[in] surface : the surface
-* @param[in] width : the width of the surface
-* @param[in] height : the height of the surface
-* @param[in] format : the format of the surface
-* @return size of the surface if this function succeeds, otherwise 0.
-*/
-
-int
-tbm_exynos_surface_get_size(tbm_surface_h surface, int width, int height, tbm_format format)
-{
-       int ret = 0;
-       int bpp = 0;
-       int _pitch =0;
-       int _size =0;
-       int align =TBM_SURFACE_ALIGNMENT_PLANE;
-
-    switch(format)
-    {
-        /* 16 bpp RGB */
-        case TBM_FORMAT_XRGB4444:
-        case TBM_FORMAT_XBGR4444:
-        case TBM_FORMAT_RGBX4444:
-        case TBM_FORMAT_BGRX4444:
-        case TBM_FORMAT_ARGB4444:
-        case TBM_FORMAT_ABGR4444:
-        case TBM_FORMAT_RGBA4444:
-        case TBM_FORMAT_BGRA4444:
-        case TBM_FORMAT_XRGB1555:
-        case TBM_FORMAT_XBGR1555:
-        case TBM_FORMAT_RGBX5551:
-        case TBM_FORMAT_BGRX5551:
-        case TBM_FORMAT_ARGB1555:
-        case TBM_FORMAT_ABGR1555:
-        case TBM_FORMAT_RGBA5551:
-        case TBM_FORMAT_BGRA5551:
-        case TBM_FORMAT_RGB565:
-            bpp = 16;
-                       _pitch = SIZE_ALIGN((width*bpp)>>3,TBM_SURFACE_ALIGNMENT_PITCH_RGB);
-            _size = SIZE_ALIGN(_pitch*height,TBM_SURFACE_ALIGNMENT_PLANE);
-            break;
-        /* 24 bpp RGB */
-        case TBM_FORMAT_RGB888:
-        case TBM_FORMAT_BGR888:
-            bpp = 24;
-                       _pitch = SIZE_ALIGN((width*bpp)>>3,TBM_SURFACE_ALIGNMENT_PITCH_RGB);
-            _size = SIZE_ALIGN(_pitch*height,TBM_SURFACE_ALIGNMENT_PLANE);
-            break;
-        /* 32 bpp RGB */
-        case TBM_FORMAT_XRGB8888:
-        case TBM_FORMAT_XBGR8888:
-        case TBM_FORMAT_RGBX8888:
-        case TBM_FORMAT_BGRX8888:
-        case TBM_FORMAT_ARGB8888:
-        case TBM_FORMAT_ABGR8888:
-        case TBM_FORMAT_RGBA8888:
-        case TBM_FORMAT_BGRA8888:
-            bpp = 32;
-                       _pitch = SIZE_ALIGN((width*bpp)>>3,TBM_SURFACE_ALIGNMENT_PITCH_RGB);
-            _size = SIZE_ALIGN(_pitch*height,TBM_SURFACE_ALIGNMENT_PLANE);
-            break;
-        /* packed YCbCr */
-        case TBM_FORMAT_YUYV:
-        case TBM_FORMAT_YVYU:
-        case TBM_FORMAT_UYVY:
-        case TBM_FORMAT_VYUY:
-        case TBM_FORMAT_AYUV:
-            bpp = 32;
-                       _pitch = SIZE_ALIGN((width*bpp)>>3,TBM_SURFACE_ALIGNMENT_PITCH_YUV);
-            _size = SIZE_ALIGN(_pitch*height,TBM_SURFACE_ALIGNMENT_PLANE);
-            break;
-        /*
-        * 2 plane YCbCr
-        * index 0 = Y plane, [7:0] Y
-        * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
-        * or
-        * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
-        */
-        case TBM_FORMAT_NV12:
-                       bpp = 12;
-                        //plane_idx == 0
-                        {
-                                _pitch = SIZE_ALIGN(width ,TBM_SURFACE_ALIGNMENT_PITCH_YUV);
-                                _size = MAX(_calc_yplane_nv12(width,height), _new_calc_yplane_nv12(width,height));
-                        }
-                        //plane_idx ==1
-                        {
-                                _pitch = SIZE_ALIGN( width ,TBM_SURFACE_ALIGNMENT_PITCH_YUV/2);
-                                _size += MAX(_calc_uvplane_nv12(width,height), _new_calc_uvplane_nv12(width,height));
-                        }
-                        break;
-
-        case TBM_FORMAT_NV21:
-                       bpp = 12;
-                        //plane_idx == 0
-                        {
-                                _pitch = SIZE_ALIGN( width ,TBM_SURFACE_ALIGNMENT_PITCH_YUV);
-                                _size = SIZE_ALIGN(_pitch*height,TBM_SURFACE_ALIGNMENT_PLANE);
-                        }
-                        //plane_idx ==1
-                        {
-                                _pitch = SIZE_ALIGN( width ,TBM_SURFACE_ALIGNMENT_PITCH_YUV/2);
-                                _size += SIZE_ALIGN(_pitch*(height/2),TBM_SURFACE_ALIGNMENT_PLANE);
-                        }
-                        break;
-
-        case TBM_FORMAT_NV16:
-        case TBM_FORMAT_NV61:
-            bpp = 16;
-            //plane_idx == 0
-            {
-                               _pitch = SIZE_ALIGN(width,TBM_SURFACE_ALIGNMENT_PITCH_YUV);
-                _size = SIZE_ALIGN(_pitch*height,TBM_SURFACE_ALIGNMENT_PLANE);
-            }
-            //plane_idx ==1
-            {
-                           _pitch = SIZE_ALIGN(width,TBM_SURFACE_ALIGNMENT_PITCH_YUV/2);
-                _size += SIZE_ALIGN(_pitch*height,TBM_SURFACE_ALIGNMENT_PLANE);
-            }
-
-            break;
-        /*
-        * 3 plane YCbCr
-        * index 0: Y plane, [7:0] Y
-        * index 1: Cb plane, [7:0] Cb
-        * index 2: Cr plane, [7:0] Cr
-        * or
-        * index 1: Cr plane, [7:0] Cr
-        * index 2: Cb plane, [7:0] Cb
-        */
-        case TBM_FORMAT_YUV410:
-        case TBM_FORMAT_YVU410:
-            bpp = 9;
-           align = TBM_SURFACE_ALIGNMENT_PITCH_YUV;
-            break;
-        case TBM_FORMAT_YUV411:
-        case TBM_FORMAT_YVU411:
-        case TBM_FORMAT_YUV420:
-        case TBM_FORMAT_YVU420:
-            bpp = 12;
-           //plane_idx == 0
-            {
-               _pitch = SIZE_ALIGN(width,TBM_SURFACE_ALIGNMENT_PITCH_YUV);
-                _size = SIZE_ALIGN(_pitch*height,TBM_SURFACE_ALIGNMENT_PLANE);
-            }
-            //plane_idx == 1
-            {
-               _pitch = SIZE_ALIGN(width/2,TBM_SURFACE_ALIGNMENT_PITCH_YUV/2);
-               _size += SIZE_ALIGN(_pitch*(height/2),TBM_SURFACE_ALIGNMENT_PLANE);
-            }
-            //plane_idx == 2
-            {
-               _pitch = SIZE_ALIGN(width/2,TBM_SURFACE_ALIGNMENT_PITCH_YUV/2);
-                _size += SIZE_ALIGN(_pitch*(height/2),TBM_SURFACE_ALIGNMENT_PLANE);
-            }
-
-            break;
-        case TBM_FORMAT_YUV422:
-        case TBM_FORMAT_YVU422:
-            bpp = 16;
-           //plane_idx == 0
-            {
-               _pitch = SIZE_ALIGN(width,TBM_SURFACE_ALIGNMENT_PITCH_YUV);
-                _size = SIZE_ALIGN(_pitch*height,TBM_SURFACE_ALIGNMENT_PLANE);
-            }
-            //plane_idx == 1
-            {
-               _pitch = SIZE_ALIGN(width/2,TBM_SURFACE_ALIGNMENT_PITCH_YUV/2);
-               _size += SIZE_ALIGN(_pitch*height,TBM_SURFACE_ALIGNMENT_PLANE);
-            }
-            //plane_idx == 2
-            {
-               _pitch = SIZE_ALIGN(width/2,TBM_SURFACE_ALIGNMENT_PITCH_YUV/2);
-                _size += SIZE_ALIGN(_pitch*height,TBM_SURFACE_ALIGNMENT_PLANE);
-            }
-            break;
-        case TBM_FORMAT_YUV444:
-        case TBM_FORMAT_YVU444:
-            bpp = 24;
-           align = TBM_SURFACE_ALIGNMENT_PITCH_YUV;
-            break;
-
-        default:
-            bpp = 0;
-            break;
-    }
-
-       if(_size > 0)
-               ret = _size;
-       else
-           ret =  SIZE_ALIGN( (width * height * bpp) >> 3, align);
-
-    return ret;
-
-}
-
-tbm_bo_handle
-tbm_exynos_fd_to_handle(tbm_bufmgr bufmgr, tbm_fd fd, int device)
-{
-    EXYNOS_RETURN_VAL_IF_FAIL (bufmgr!=NULL, (tbm_bo_handle) NULL);
-    EXYNOS_RETURN_VAL_IF_FAIL (fd > 0, (tbm_bo_handle) NULL);
-
-    tbm_bo_handle bo_handle;
-    memset (&bo_handle, 0x0, sizeof (uint64_t));
-
-    tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_priv_from_bufmgr(bufmgr);
-
-    switch(device)
-    {
-    case TBM_DEVICE_DEFAULT:
-    case TBM_DEVICE_2D:
-    {
-        //getting handle from fd
-        struct drm_prime_handle arg = {0, };
-
-        arg.fd = fd;
-        arg.flags = 0;
-        if (drmIoctl (bufmgr_exynos->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &arg))
-        {
-            TBM_EXYNOS_LOG ("error Cannot get gem handle from fd:%d (%s)\n",
-                     arg.fd, strerror(errno));
-            return (tbm_bo_handle) NULL;
-        }
-
-        bo_handle.u32 = (uint32_t)arg.handle;;
-        break;
-    }
-    case TBM_DEVICE_CPU:
-        TBM_EXYNOS_LOG ("Not supported device:%d\n", device);
-        bo_handle.ptr = (void *) NULL;
-        break;
-    case TBM_DEVICE_3D:
-    case TBM_DEVICE_MM:
-        bo_handle.u32 = (uint32_t)fd;
-        break;
-    default:
-        TBM_EXYNOS_LOG ("error Not supported device:%d\n", device);
-        bo_handle.ptr = (void *) NULL;
-        break;
-    }
-
-    return bo_handle;
-}
-
 int
 tbm_exynos_bo_get_flags (tbm_bo bo)
 {
@@ -2061,23 +2051,39 @@ init_tbm_bufmgr_priv (tbm_bufmgr bufmgr, int fd)
         return 0;
     }
 
-    //Create Hash Table
-    bufmgr_exynos->hashBos = drmHashCreate ();
+       /* open tgl fd for saving cache flush data */
+    bufmgr_exynos->tgl_fd = open(tgl_devfile, O_RDWR);
 
-    //Check if the tbm manager supports dma fence or not.
-    int fp = open("/sys/module/dmabuf_sync/parameters/enabled", O_RDONLY);
-    int length;
-    char buf[1];
-    if (fp != -1)
-    {
-        length = read(fp, buf, 1);
+    if (bufmgr_exynos->tgl_fd < 0) {
+        bufmgr_exynos->tgl_fd = open(tgl_devfile1, O_RDWR);
+        if (bufmgr_exynos->tgl_fd < 0) {
+            TBM_EXYNOS_LOG("[libtbm:%d] "
+                "error: Fail to open global_lock:%s\n",
+                getpid(), tgl_devfile);
+
+            if (bufmgr_exynos->fd_owner)
+                close(bufmgr_exynos->fd);
+
+            free (bufmgr_exynos);
+            return 0;
+        }
+    }
 
-        if (length == 1 && buf[0] == '1')
-            bufmgr_exynos->use_dma_fence = 1;
+    if (!_tgl_init(bufmgr_exynos->tgl_fd, GLOBAL_KEY)) {
+        TBM_EXYNOS_LOG("[libtbm:%d] "
+            "error: Fail to initialize the tgl\n",
+            getpid());
+
+        if (bufmgr_exynos->fd_owner)
+            close(bufmgr_exynos->fd);
 
-        close(fp);
+        free (bufmgr_exynos);
+        return 0;
     }
 
+    //Create Hash Table
+    bufmgr_exynos->hashBos = drmHashCreate ();
+
     bufmgr_backend = tbm_backend_alloc();
     if (!bufmgr_backend)
     {
@@ -2104,35 +2110,15 @@ init_tbm_bufmgr_priv (tbm_bufmgr bufmgr, int fd)
     bufmgr_backend->bo_get_handle = tbm_exynos_bo_get_handle,
     bufmgr_backend->bo_map = tbm_exynos_bo_map,
     bufmgr_backend->bo_unmap = tbm_exynos_bo_unmap,
-    bufmgr_backend->bo_cache_flush = tbm_exynos_bo_cache_flush,
-    bufmgr_backend->bo_get_global_key = tbm_exynos_bo_get_global_key;
     bufmgr_backend->surface_get_plane_data = tbm_exynos_surface_get_plane_data;
-    bufmgr_backend->surface_get_size = tbm_exynos_surface_get_size;
     bufmgr_backend->surface_supported_format = tbm_exynos_surface_supported_format;
-    bufmgr_backend->fd_to_handle = tbm_exynos_fd_to_handle;
     bufmgr_backend->surface_get_num_bos = tbm_exynos_surface_get_num_bos;
     bufmgr_backend->bo_get_flags = tbm_exynos_bo_get_flags;
-
-#ifdef ALWAYS_BACKEND_CTRL
-    bufmgr_backend->flags = (TBM_LOCK_CTRL_BACKEND | TBM_CACHE_CTRL_BACKEND);
     bufmgr_backend->bo_lock = NULL;
     bufmgr_backend->bo_lock2 = tbm_exynos_bo_lock;
     bufmgr_backend->bo_unlock = tbm_exynos_bo_unlock;
-#else
-    if (bufmgr_exynos->use_dma_fence)
-    {
-        bufmgr_backend->flags = (TBM_LOCK_CTRL_BACKEND | TBM_CACHE_CTRL_BACKEND);
-        bufmgr_backend->bo_lock = NULL;
-        bufmgr_backend->bo_lock2 = tbm_exynos_bo_lock;
-        bufmgr_backend->bo_unlock = tbm_exynos_bo_unlock;
-    }
-    else
-    {
-        bufmgr_backend->flags = 0;
-        bufmgr_backend->bo_lock = NULL;
-        bufmgr_backend->bo_unlock = NULL;
-    }
-#endif /* ALWAYS_BACKEND_CTRL */
+
+       bufmgr_backend->flags = TBM_USE_2_0_BACKEND;
 
     if (!tbm_backend_init (bufmgr, bufmgr_backend))
     {
@@ -2162,9 +2148,6 @@ init_tbm_bufmgr_priv (tbm_bufmgr bufmgr, int fd)
     }
 #endif
 
-    DBG ("[%s] DMABUF FENCE is %s\n", target_name(),
-          bufmgr_exynos->use_dma_fence ? "supported!" : "NOT supported!");
-
     DBG ("[%s] drm_fd:%d\n", target_name(),
           bufmgr_exynos->fd);
 
diff --git a/src/tbm_bufmgr_tgl.h b/src/tbm_bufmgr_tgl.h
new file mode 100644 (file)
index 0000000..2279cac
--- /dev/null
@@ -0,0 +1,70 @@
+/**************************************************************************
+ *
+ * libtbm
+ *
+ * Copyright 2012 Samsung Electronics co., Ltd. All Rights Reserved.
+ *
+ * Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
+ * Boram Park <boram1288.park@samsung.com>, Changyeon Lee <cyeon.lee@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * **************************************************************************/
+
+#ifndef __TBM_BUFMGR_TGL_H__
+#define __TBM_BUFMGR_TGL_H__
+
+#include <linux/ioctl.h>
+
+static char tgl_devfile[] = "/dev/slp_global_lock";
+static char tgl_devfile1[] = "/dev/tgl";
+
+#define TGL_IOC_BASE                           0x32
+
+struct tgl_attribute {
+       unsigned int key;
+       unsigned int timeout_ms;
+};
+
+struct tgl_user_data {
+       unsigned int key;
+       unsigned int data1;
+       unsigned int data2;
+       unsigned int locked;
+};
+
+typedef enum {
+       _TGL_INIT_LOCK = 1,
+       _TGL_DESTROY_LOCK,
+       _TGL_LOCK_LOCK,
+       _TGL_UNLOCK_LOCK,
+       _TGL_SET_DATA,
+       _TGL_GET_DATA,
+} _tgl_ioctls;
+
+#define TGL_IOC_INIT_LOCK                      _IOW(TGL_IOC_BASE, _TGL_INIT_LOCK, struct tgl_attribute *)
+#define TGL_IOC_DESTROY_LOCK           _IOW(TGL_IOC_BASE, _TGL_DESTROY_LOCK, unsigned int)
+#define TGL_IOC_LOCK_LOCK                      _IOW(TGL_IOC_BASE, _TGL_LOCK_LOCK, unsigned int)
+#define TGL_IOC_UNLOCK_LOCK                    _IOW(TGL_IOC_BASE, _TGL_UNLOCK_LOCK, unsigned int)
+#define TGL_IOC_SET_DATA                       _IOW(TGL_IOC_BASE, _TGL_SET_DATA, struct tgl_user_data *)
+#define TGL_IOC_GET_DATA                       _IOW(TGL_IOC_BASE, _TGL_GET_DATA, struct tgl_user_data *)
+
+#endif                                                 /* __TBM_BUFMGR_TGL_H__ */