--- /dev/null
+/**************************************************************************
+
+libtbm_vc4
+
+Copyright 2017 Samsung Electronics co., Ltd. All Rights Reserved.
+
+Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sub license, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
+ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <libudev.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <xf86drm.h>
+#include <vc4_drm.h>
+#include <pthread.h>
+#include <hal-common.h>
+#include <hal-tbm-types.h>
+#include <hal-tbm-interface.h>
+#include "tbm_bufmgr_tgl.h"
+#include "tbm_backend_log.h"
+
+#define VC4_DRM_NAME "vc4"
+
+#define TBM_COLOR_FORMAT_COUNT 4
+#define STRERR_BUFSIZE 128
+#define SIZE_ALIGN(value, base) (((value) + ((base) - 1)) & ~((base) - 1))
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+
+#ifdef ALIGN_EIGHT
+#define TBM_SURFACE_ALIGNMENT_PLANE (8)
+#define TBM_SURFACE_ALIGNMENT_PITCH_RGB (8)
+#else
+#define TBM_SURFACE_ALIGNMENT_PLANE (16)
+#define TBM_SURFACE_ALIGNMENT_PITCH_RGB (16)
+#endif
+
+#define TBM_SURFACE_ALIGNMENT_PLANE_NV12 (4096)
+#define TBM_SURFACE_ALIGNMENT_PITCH_YUV (16)
+#define TBM_SURFACE_ALIGNMENT_HEIGHT_YUV (16)
+
+//#define VC4_TILED_FORMAT 1
+
+struct dma_buf_info {
+ unsigned long size;
+ unsigned int fence_supported;
+ unsigned int padding;
+};
+
+#define DMA_BUF_ACCESS_READ 0x1
+#define DMA_BUF_ACCESS_WRITE 0x2
+#define DMA_BUF_ACCESS_DMA 0x4
+#define DMA_BUF_ACCESS_MAX 0x8
+
+#define DMA_FENCE_LIST_MAX 5
+
+struct dma_buf_fence {
+ unsigned long ctx;
+ unsigned int type;
+};
+
+#define DMABUF_IOCTL_BASE 'F'
+#define DMABUF_IOWR(nr, type) _IOWR(DMABUF_IOCTL_BASE, nr, type)
+
+#define DMABUF_IOCTL_GET_INFO DMABUF_IOWR(0x00, struct dma_buf_info)
+#define DMABUF_IOCTL_GET_FENCE DMABUF_IOWR(0x01, struct dma_buf_fence)
+#define DMABUF_IOCTL_PUT_FENCE DMABUF_IOWR(0x02, struct dma_buf_fence)
+
+/* tgl key values */
+#define GLOBAL_KEY ((unsigned int)(-1))
+/* TBM_CACHE */
+#define TBM_VC4_CACHE_INV 0x01 /**< cache invalidate */
+#define TBM_VC4_CACHE_CLN 0x02 /**< cache clean */
+#define TBM_VC4_CACHE_ALL 0x10 /**< cache all */
+#define TBM_VC4_CACHE_FLUSH (TBM_VC4_CACHE_INV|TBM_VC4_CACHE_CLN) /**< cache flush */
+#define TBM_VC4_CACHE_FLUSH_ALL (TBM_VC4_CACHE_FLUSH|TBM_VC4_CACHE_ALL) /**< cache flush all */
+
+enum {
+ DEVICE_NONE = 0,
+ DEVICE_CA, /* cache aware device */
+ DEVICE_CO /* cache oblivious device */
+};
+
+typedef union _tbm_bo_cache_state tbm_bo_cache_state;
+
+union _tbm_bo_cache_state {
+ unsigned int val;
+ struct {
+ unsigned int cntFlush:16; /*Flush all index for sync */
+ unsigned int isCached:1;
+ unsigned int isDirtied:2;
+ } data;
+};
+
+typedef struct _tbm_vc4_bufmgr tbm_vc4_bufmgr;
+typedef struct _tbm_vc4_bo tbm_vc4_bo;
+
+/* tbm buffor object for vc4 */
+struct _tbm_vc4_bo {
+ int fd;
+
+ unsigned int name; /* FLINK ID */
+
+ unsigned int gem; /* GEM Handle */
+
+ unsigned int dmabuf; /* fd for dmabuf */
+
+ void *pBase; /* virtual address */
+
+ unsigned int size;
+
+ unsigned int flags_tbm; /*not used now*//*currently no values for the flags,but it may be used in future extension*/
+
+ pthread_mutex_t mutex;
+ struct dma_buf_fence dma_fence[DMA_FENCE_LIST_MAX];
+ int device;
+ int opt;
+
+ tbm_bo_cache_state cache_state;
+ unsigned int map_cnt;
+ int last_map_device;
+
+ tbm_vc4_bufmgr *bufmgr_data;
+};
+
+/* tbm bufmgr private for vc4 */
+struct _tbm_vc4_bufmgr {
+ int fd;
+ int isLocal;
+ void *hashBos;
+
+ int use_dma_fence;
+
+ int tgl_fd;
+};
+
+static char *STR_DEVICE[] = {
+ "DEF",
+ "CPU",
+ "2D",
+ "3D",
+ "MM"
+};
+
+static char *STR_OPT[] = {
+ "NONE",
+ "RD",
+ "WR",
+ "RDWR"
+};
+
+
+static uint32_t tbm_vc4_color_format_list[TBM_COLOR_FORMAT_COUNT] = {
+ HAL_TBM_FORMAT_ARGB8888,
+ HAL_TBM_FORMAT_XRGB8888,
+ HAL_TBM_FORMAT_NV12,
+ HAL_TBM_FORMAT_YUV420
+ };
+#undef ENABLE_CACHECRTL
+#ifdef ENABLE_CACHECRTL
+#ifdef TGL_GET_VERSION
+static inline int
+_tgl_get_version(int fd)
+{
+ struct tgl_ver_data data;
+ int err;
+ char buf[STRERR_BUFSIZE];
+
+ err = ioctl(fd, TGL_IOCTL_GET_VERSION, &data);
+ if (err) {
+ TBM_BACKEND_ERR("error(%s) %s:%d\n",
+ strerror_r(errno, buf, STRERR_BUFSIZE));
+ return 0;
+ }
+
+ TBM_BACKEND_DBG("tgl version is (%u, %u).\n", data.major, data.minor);
+
+ return 1;
+}
+#endif
+
+static inline int
+_tgl_init(int fd, unsigned int key)
+{
+ struct tgl_reg_data data;
+ int err;
+ char buf[STRERR_BUFSIZE];
+
+ data.key = key;
+ data.timeout_ms = 1000;
+
+ err = ioctl(fd, TGL_IOCTL_REGISTER, &data);
+ if (err) {
+ TBM_BACKEND_ERR("error(%s) key:%d\n",
+ strerror_r(errno, buf, STRERR_BUFSIZE), key);
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline int
+_tgl_destroy(int fd, unsigned int key)
+{
+ struct tgl_reg_data data;
+ int err;
+ char buf[STRERR_BUFSIZE];
+
+ data.key = key;
+ err = ioctl(fd, TGL_IOCTL_UNREGISTER, &data);
+ if (err) {
+ TBM_BACKEND_ERR("error(%s) key:%d\n",
+ strerror_r(errno, buf, STRERR_BUFSIZE), key);
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline int
+_tgl_lock(int fd, unsigned int key, int opt)
+{
+ struct tgl_lock_data data;
+ enum tgl_type_data tgl_type;
+ int err;
+ char buf[STRERR_BUFSIZE];
+
+ switch (opt) {
+ case TBM_OPTION_READ:
+ tgl_type = TGL_TYPE_READ;
+ break;
+ case TBM_OPTION_WRITE:
+ tgl_type = TGL_TYPE_WRITE;
+ break;
+ default:
+ tgl_type = TGL_TYPE_NONE;
+ break;
+ }
+
+ data.key = key;
+ data.type = tgl_type;
+
+ err = ioctl(fd, TGL_IOCTL_LOCK, &data);
+ if (err) {
+ TBM_BACKEND_ERR("error(%s) key:%d opt:%d\n",
+ strerror_r(errno, buf, STRERR_BUFSIZE), key, opt);
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline int
+_tgl_unlock(int fd, unsigned int key)
+{
+ struct tgl_lock_data data;
+ int err;
+ char buf[STRERR_BUFSIZE];
+
+ data.key = key;
+ data.type = TGL_TYPE_NONE;
+
+ err = ioctl(fd, TGL_IOCTL_UNLOCK, &data);
+ if (err) {
+ TBM_BACKEND_ERR("error(%s) key:%d\n",
+ strerror_r(errno, buf, STRERR_BUFSIZE), key);
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline int
+_tgl_set_data(int fd, unsigned int key, unsigned int val)
+{
+ struct tgl_usr_data data;
+ int err;
+ char buf[STRERR_BUFSIZE];
+
+ data.key = key;
+ data.data1 = val;
+
+ err = ioctl(fd, TGL_IOCTL_SET_DATA, &data);
+ if (err) {
+ TBM_BACKEND_ERR("error(%s) key:%d\n",
+ strerror_r(errno, buf, STRERR_BUFSIZE), key);
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline unsigned int
+_tgl_get_data(int fd, unsigned int key)
+{
+ struct tgl_usr_data data = { 0, };
+ int err;
+ char buf[STRERR_BUFSIZE];
+
+ data.key = key;
+
+ err = ioctl(fd, TGL_IOCTL_GET_DATA, &data);
+ if (err) {
+ TBM_BACKEND_ERR("error(%s) key:%d\n",
+ strerror_r(errno, buf, STRERR_BUFSIZE), key);
+ return 0;
+ }
+
+ return data.data1;
+}
+
+static int
+_vc4_cache_flush(tbm_vc4_bufmgr *bufmgr_data, tbm_vc4_bo *bo_data, int flags)
+{
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, 0);
+
+ /* cache flush is managed by kernel side when using dma-fence. */
+ if (bufmgr_data->use_dma_fence)
+ return 1;
+
+ struct drm_vc4_gem_cache_op cache_op = {0, };
+ int ret;
+
+ /* if bo_data is null, do cache_flush_all */
+ if (bo_data) {
+ cache_op.flags = 0;
+ cache_op.usr_addr = (uint64_t)((uint32_t)bo_data->pBase);
+ cache_op.size = bo_data->size;
+ } else {
+ flags = TBM_VC4_CACHE_FLUSH_ALL;
+ cache_op.flags = 0;
+ cache_op.usr_addr = 0;
+ cache_op.size = 0;
+ }
+
+ if (flags & TBM_VC4_CACHE_INV) {
+ if (flags & TBM_VC4_CACHE_ALL)
+ cache_op.flags |= VC4_DRM_CACHE_INV_ALL;
+ else
+ cache_op.flags |= VC4_DRM_CACHE_INV_RANGE;
+ }
+
+ if (flags & TBM_VC4_CACHE_CLN) {
+ if (flags & TBM_VC4_CACHE_ALL)
+ cache_op.flags |= VC4_DRM_CACHE_CLN_ALL;
+ else
+ cache_op.flags |= VC4_DRM_CACHE_CLN_RANGE;
+ }
+
+ if (flags & TBM_VC4_CACHE_ALL)
+ cache_op.flags |= VC4_DRM_ALL_CACHES_CORES;
+
+ ret = drmCommandWriteRead(bufmgr_data->fd, DRM_VC4_GEM_CACHE_OP, &cache_op,
+ sizeof(cache_op));
+ if (ret) {
+ TBM_BACKEND_ERR("fail to flush the cache.\n");
+ return 0;
+ }
+
+ return 1;
+}
+#endif
+
+static int
+_bo_init_cache_state(tbm_vc4_bufmgr *bufmgr_data, tbm_vc4_bo *bo_data, int import)
+{
+#ifdef ENABLE_CACHECRTL
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, 0);
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bo_data != NULL, 0);
+
+ if (bufmgr_data->use_dma_fence)
+ return 1;
+
+ _tgl_init(bufmgr_data->tgl_fd, bo_data->name);
+
+ tbm_bo_cache_state cache_state;
+
+ if (import == 0) {
+ cache_state.data.isDirtied = DEVICE_NONE;
+ cache_state.data.isCached = 0;
+ cache_state.data.cntFlush = 0;
+
+ _tgl_set_data(bufmgr_data->tgl_fd, bo_data->name, cache_state.val);
+ }
+#endif
+
+ return 1;
+}
+
+static int
+_bo_set_cache_state(tbm_vc4_bufmgr *bufmgr_data, tbm_vc4_bo *bo_data, int device, int opt)
+{
+#ifdef ENABLE_CACHECRTL
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, 0);
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bo_data != NULL, 0);
+
+ if (bufmgr_data->use_dma_fence)
+ return 1;
+
+ char need_flush = 0;
+ unsigned short cntFlush = 0;
+
+ /* get cache state of a bo_data */
+ bo_data->cache_state.val = _tgl_get_data(bufmgr_data->tgl_fd,
+ bo_data->name);
+
+ /* get global cache flush count */
+ cntFlush = (unsigned short)_tgl_get_data(bufmgr_data->tgl_fd, GLOBAL_KEY);
+
+ if (device == HAL_TBM_DEVICE_CPU) {
+ if (bo_data->cache_state.data.isDirtied == DEVICE_CO &&
+ bo_data->cache_state.data.isCached)
+ need_flush = TBM_VC4_CACHE_INV;
+
+ bo_data->cache_state.data.isCached = 1;
+ if (opt & TBM_OPTION_WRITE)
+ bo_data->cache_state.data.isDirtied = DEVICE_CA;
+ else {
+ if (bo_data->cache_state.data.isDirtied != DEVICE_CA)
+ bo_data->cache_state.data.isDirtied = DEVICE_NONE;
+ }
+ } else {
+ if (bo_data->cache_state.data.isDirtied == DEVICE_CA &&
+ bo_data->cache_state.data.isCached &&
+ bo_data->cache_state.data.cntFlush == cntFlush)
+ need_flush = TBM_VC4_CACHE_CLN | TBM_VC4_CACHE_ALL;
+
+ if (opt & TBM_OPTION_WRITE)
+ bo_data->cache_state.data.isDirtied = DEVICE_CO;
+ else {
+ if (bo_data->cache_state.data.isDirtied != DEVICE_CO)
+ bo_data->cache_state.data.isDirtied = DEVICE_NONE;
+ }
+ }
+
+ if (need_flush) {
+ if (need_flush & TBM_VC4_CACHE_ALL)
+ _tgl_set_data(bufmgr_data->tgl_fd, GLOBAL_KEY, (unsigned int)(++cntFlush));
+
+ /* call cache flush */
+ _vc4_cache_flush(bufmgr_data, bo_data, need_flush);
+
+ TBM_BACKEND_DBG(" \tcache(%d,%d)....flush:0x%x, cntFlush(%d)\n",
+ bo_data->cache_state.data.isCached,
+ bo_data->cache_state.data.isDirtied,
+ need_flush,
+ cntFlush);
+ }
+#endif
+
+ return 1;
+}
+
+static int
+_bo_save_cache_state(tbm_vc4_bufmgr *bufmgr_data, tbm_vc4_bo *bo_data)
+{
+#ifdef ENABLE_CACHECRTL
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, 0);
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bo_data != NULL, 0);
+
+ if (bufmgr_data->use_dma_fence)
+ return 1;
+
+ unsigned short cntFlush = 0;
+
+ /* get global cache flush count */
+ cntFlush = (unsigned short)_tgl_get_data(bufmgr_data->tgl_fd, GLOBAL_KEY);
+
+ /* save global cache flush count */
+ bo_data->cache_state.data.cntFlush = cntFlush;
+ _tgl_set_data(bufmgr_data->tgl_fd, bo_data->name,
+ bo_data->cache_state.val);
+#endif
+
+ return 1;
+}
+
+static void
+_bo_destroy_cache_state(tbm_vc4_bufmgr *bufmgr_data, tbm_vc4_bo *bo_data)
+{
+#ifdef ENABLE_CACHECRTL
+ TBM_BACKEND_RETURN_IF_FAIL(bufmgr_data != NULL);
+ TBM_BACKEND_RETURN_IF_FAIL(bo_data != NULL);
+
+ if (bufmgr_data->use_dma_fence)
+ return ;
+
+ _tgl_destroy(bufmgr_data->tgl_fd, bo_data->name);
+#endif
+}
+
+static int
+_bufmgr_init_cache_state(tbm_vc4_bufmgr *bufmgr_data)
+{
+#ifdef ENABLE_CACHECRTL
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, 0);
+
+ if (bufmgr_data->use_dma_fence)
+ return 1;
+
+ /* open tgl fd for saving cache flush data */
+ bufmgr_data->tgl_fd = open(tgl_devfile, O_RDWR);
+
+ if (bufmgr_data->tgl_fd < 0) {
+ bufmgr_data->tgl_fd = open(tgl_devfile1, O_RDWR);
+ if (bufmgr_data->tgl_fd < 0) {
+ TBM_BACKEND_ERR("fail to open global_lock:%s\n",
+ tgl_devfile1);
+ return 0;
+ }
+ }
+
+#ifdef TGL_GET_VERSION
+ if (!_tgl_get_version(bufmgr_data->tgl_fd)) {
+ TBM_BACKEND_ERR("fail to get tgl_version. tgl init failed.\n");
+ close(bufmgr_data->tgl_fd);
+ return 0;
+ }
+#endif
+
+ if (!_tgl_init(bufmgr_data->tgl_fd, GLOBAL_KEY)) {
+ TBM_BACKEND_ERR("fail to initialize the tgl\n");
+ close(bufmgr_data->tgl_fd);
+ return 0;
+ }
+#endif
+
+ return 1;
+}
+
+static void
+_bufmgr_deinit_cache_state(tbm_vc4_bufmgr *bufmgr_data)
+{
+#ifdef ENABLE_CACHECRTL
+ TBM_BACKEND_RETURN_IF_FAIL(bufmgr_data != NULL);
+
+ if (bufmgr_data->use_dma_fence)
+ return;
+
+ if (bufmgr_data->tgl_fd >= 0)
+ close(bufmgr_data->tgl_fd);
+#endif
+}
+
+static int
+_tbm_vc4_open_drm()
+{
+ int fd = -1;
+
+ fd = drmOpen(VC4_DRM_NAME, NULL);
+ if (fd < 0) {
+ TBM_BACKEND_ERR("fail to open drm.(%s)\n", VC4_DRM_NAME);
+ }
+
+ if (fd < 0) {
+ struct udev *udev = NULL;
+ struct udev_enumerate *e = NULL;
+ struct udev_list_entry *entry = NULL;
+ struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
+ const char *filepath;
+ struct stat s;
+ int ret;
+
+ TBM_BACKEND_DBG("search drm-device by udev\n");
+
+ udev = udev_new();
+ if (!udev) {
+ TBM_BACKEND_ERR("udev_new() failed.\n");
+ return -1;
+ }
+
+ e = udev_enumerate_new(udev);
+ udev_enumerate_add_match_subsystem(e, "drm");
+ udev_enumerate_add_match_sysname(e, "card[0-9]*");
+ udev_enumerate_scan_devices(e);
+
+ udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
+ device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
+ udev_list_entry_get_name(entry));
+ device_parent = udev_device_get_parent(device);
+ /* Not need unref device_parent. device_parent and device have same refcnt */
+ if (device_parent) {
+ if (strcmp(udev_device_get_sysname(device_parent), "vc4-drm") == 0) {
+ drm_device = device;
+ TBM_BACKEND_DBG("Found render device: '%s' (%s)\n",
+ udev_device_get_syspath(drm_device),
+ udev_device_get_sysname(device_parent));
+ break;
+ }
+ }
+ udev_device_unref(device);
+ }
+
+ udev_enumerate_unref(e);
+
+ /* Get device file path. */
+ filepath = udev_device_get_devnode(drm_device);
+ if (!filepath) {
+ TBM_BACKEND_ERR("udev_device_get_devnode() failed.\n");
+ udev_device_unref(drm_device);
+ udev_unref(udev);
+ return -1;
+ }
+
+ /* Open DRM device file and check validity. */
+ fd = open(filepath, O_RDWR | O_CLOEXEC);
+ if (fd < 0) {
+ TBM_BACKEND_ERR("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
+ udev_device_unref(drm_device);
+ udev_unref(udev);
+ return -1;
+ }
+
+ ret = fstat(fd, &s);
+ if (ret) {
+ TBM_BACKEND_ERR("fstat() failed %s.\n");
+ close(fd);
+ udev_device_unref(drm_device);
+ udev_unref(udev);
+ return -1;
+ }
+
+ udev_device_unref(drm_device);
+ udev_unref(udev);
+ }
+
+ return fd;
+}
+
+#if 0 // render node functions.
+static int
+_check_render_node(void)
+{
+#ifndef USE_RENDER_NODE
+ return 0;
+#else
+ struct udev *udev = NULL;
+ struct udev_enumerate *e = NULL;
+ struct udev_list_entry *entry = NULL;
+ struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
+
+ udev = udev_new();
+ if (!udev) {
+ TBM_BACKEND_ERR("udev_new() failed.\n");
+ return -1;
+ }
+
+ e = udev_enumerate_new(udev);
+ udev_enumerate_add_match_subsystem(e, "drm");
+ udev_enumerate_add_match_sysname(e, "renderD[0-9]*");
+ udev_enumerate_scan_devices(e);
+
+ udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
+ device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
+ udev_list_entry_get_name(entry));
+ device_parent = udev_device_get_parent(device);
+ /* Not need unref device_parent. device_parent and device have same refcnt */
+ if (device_parent) {
+ if (strcmp(udev_device_get_sysname(device_parent), "vc4-drm") == 0) {
+ drm_device = device;
+ TBM_BACKEND_DBG("Found render device: '%s' (%s)\n",
+ udev_device_get_syspath(drm_device),
+ udev_device_get_sysname(device_parent));
+ break;
+ }
+ }
+ udev_device_unref(device);
+ }
+
+ udev_enumerate_unref(e);
+ udev_unref(udev);
+
+ if (!drm_device) {
+ udev_device_unref(drm_device);
+ return 0;
+ }
+
+ udev_device_unref(drm_device);
+ return 1;
+#endif
+}
+
+static int
+_get_render_node(void)
+{
+ struct udev *udev = NULL;
+ struct udev_enumerate *e = NULL;
+ struct udev_list_entry *entry = NULL;
+ struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
+ const char *filepath;
+ struct stat s;
+ int fd = -1;
+ int ret;
+
+ udev = udev_new();
+ if (!udev) {
+ TBM_BACKEND_ERR("udev_new() failed.\n");
+ return -1;
+ }
+
+ e = udev_enumerate_new(udev);
+ udev_enumerate_add_match_subsystem(e, "drm");
+ udev_enumerate_add_match_sysname(e, "renderD[0-9]*");
+ udev_enumerate_scan_devices(e);
+
+ udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
+ device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
+ udev_list_entry_get_name(entry));
+ device_parent = udev_device_get_parent(device);
+ /* Not need unref device_parent. device_parent and device have same refcnt */
+ if (device_parent) {
+ if (strcmp(udev_device_get_sysname(device_parent), "vc4-drm") == 0) {
+ drm_device = device;
+ TBM_BACKEND_DBG("Found render device: '%s' (%s)\n",
+ udev_device_get_syspath(drm_device),
+ udev_device_get_sysname(device_parent));
+ break;
+ }
+ }
+ udev_device_unref(device);
+ }
+
+ udev_enumerate_unref(e);
+
+ /* Get device file path. */
+ filepath = udev_device_get_devnode(drm_device);
+ if (!filepath) {
+ TBM_BACKEND_ERR("udev_device_get_devnode() failed.\n");
+ udev_device_unref(drm_device);
+ udev_unref(udev);
+ return -1;
+ }
+
+ /* Open DRM device file and check validity. */
+ fd = open(filepath, O_RDWR | O_CLOEXEC);
+ if (fd < 0) {
+ TBM_BACKEND_ERR("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
+ udev_device_unref(drm_device);
+ udev_unref(udev);
+ return -1;
+ }
+
+ ret = fstat(fd, &s);
+ if (ret) {
+ TBM_BACKEND_ERR("fstat() failed %s.\n");
+ udev_device_unref(drm_device);
+ udev_unref(udev);
+ close(fd);
+ return -1;
+ }
+
+ udev_device_unref(drm_device);
+ udev_unref(udev);
+
+ return fd;
+}
+#endif
+
+static unsigned int
+_get_name(int fd, unsigned int gem)
+{
+ struct drm_gem_flink arg = {0,};
+
+ arg.handle = gem;
+ if (drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &arg)) {
+ TBM_BACKEND_ERR("fail to DRM_IOCTL_GEM_FLINK gem:%d", gem);
+ return 0;
+ }
+
+ return (unsigned int)arg.name;
+}
+
+static hal_tbm_bo_handle
+_vc4_bo_handle(tbm_vc4_bo *bo_data, int device)
+{
+ hal_tbm_bo_handle bo_handle;
+
+ memset(&bo_handle, 0x0, sizeof(uint64_t));
+
+ switch (device) {
+ case HAL_TBM_DEVICE_DEFAULT:
+ case HAL_TBM_DEVICE_2D:
+ bo_handle.u32 = (uint32_t)bo_data->gem;
+ break;
+ case HAL_TBM_DEVICE_CPU:
+ if (!bo_data->pBase) {
+ struct drm_vc4_mmap_bo arg = {0, };
+ void *map = NULL;
+
+ arg.handle = bo_data->gem;
+ if (drmIoctl(bo_data->fd, DRM_IOCTL_VC4_MMAP_BO, &arg)) {
+ TBM_BACKEND_ERR("Cannot map_vc4 gem=%d\n", bo_data->gem);
+ return (hal_tbm_bo_handle) NULL;
+ }
+
+ map = mmap(NULL, bo_data->size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ bo_data->fd, arg.offset);
+ if (map == MAP_FAILED) {
+ TBM_BACKEND_ERR("Cannot usrptr gem=%d\n", bo_data->gem);
+ return (hal_tbm_bo_handle) NULL;
+ }
+ bo_data->pBase = map;
+ }
+ bo_handle.ptr = (void *)bo_data->pBase;
+ break;
+ case HAL_TBM_DEVICE_3D:
+ case HAL_TBM_DEVICE_MM:
+ if (!bo_data->dmabuf) {
+ struct drm_prime_handle arg = {0, };
+
+ arg.handle = bo_data->gem;
+ if (drmIoctl(bo_data->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
+ TBM_BACKEND_ERR("Cannot dmabuf=%d\n", bo_data->gem);
+ return (hal_tbm_bo_handle) NULL;
+ }
+ bo_data->dmabuf = arg.fd;
+ }
+
+ bo_handle.u32 = (uint32_t)bo_data->dmabuf;
+ break;
+ default:
+ TBM_BACKEND_ERR("Not supported device:%d\n", device);
+ bo_handle.ptr = (void *) NULL;
+ break;
+ }
+
+ return bo_handle;
+}
+
+static hal_tbm_bufmgr_capability
+tbm_vc4_bufmgr_get_capabilities(hal_tbm_bufmgr *bufmgr, hal_tbm_error *error)
+{
+ hal_tbm_bufmgr_capability capabilities = HAL_TBM_BUFMGR_CAPABILITY_NONE;
+
+#ifdef VC4_TILED_FORMAT
+ capabilities = HAL_TBM_BUFMGR_CAPABILITY_SHARE_KEY|HAL_TBM_BUFMGR_CAPABILITY_SHARE_FD|HAL_TBM_BUFMGR_CAPABILITY_TILED_MEMORY;
+#else
+ capabilities = HAL_TBM_BUFMGR_CAPABILITY_SHARE_KEY|HAL_TBM_BUFMGR_CAPABILITY_SHARE_FD;
+#endif
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return capabilities;
+}
+
+static hal_tbm_error
+tbm_vc4_bufmgr_get_supported_formats(hal_tbm_bufmgr *bufmgr,
+ uint32_t **formats, uint32_t *num)
+{
+ tbm_vc4_bufmgr *bufmgr_data = (tbm_vc4_bufmgr *)bufmgr;
+ uint32_t *color_formats;
+
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, HAL_TBM_ERROR_INVALID_PARAMETER);
+
+ color_formats = (uint32_t *)calloc(1, sizeof(uint32_t) * TBM_COLOR_FORMAT_COUNT);
+ if (color_formats == NULL)
+ return HAL_TBM_ERROR_OUT_OF_MEMORY;
+
+ memcpy(color_formats, tbm_vc4_color_format_list, sizeof(uint32_t)*TBM_COLOR_FORMAT_COUNT);
+
+ *formats = color_formats;
+ *num = TBM_COLOR_FORMAT_COUNT;
+
+ TBM_BACKEND_DBG("supported format count = %d\n", *num);
+
+ return HAL_TBM_ERROR_NONE;
+}
+
+
+#ifdef VC4_TILED_FORMAT
+#include <drm_fourcc.h>
+static inline uint32_t
+vc4_utile_width(int cpp)
+{
+ switch (cpp) {
+ case 1:
+ case 2:
+ return 8;
+ case 4:
+ return 4;
+ case 8:
+ return 2;
+ default:
+ return 4;
+ }
+}
+
+static inline uint32_t
+vc4_utile_height(int cpp)
+{
+ switch (cpp) {
+ case 1:
+ return 8;
+ case 2:
+ case 4:
+ case 8:
+ return 4;
+ default:
+ return 4;
+ }
+}
+
+static inline bool
+vc4_size_is_lt(uint32_t width, uint32_t height, int cpp)
+{
+ return (width <= 4 * vc4_utile_width(cpp) ||
+ height <= 4 * vc4_utile_height(cpp));
+}
+
+static hal_tbm_bo *
+tbm_vc4_bufmgr_alloc_bo_with_tiled_format(hal_tbm_bufmgr *bufmgr, int width, int height,
+ int cpp, int format, hal_tbm_bo_memory_type flags, int bo_idx, hal_tbm_error *err)
+{
+ tbm_vc4_bufmgr *bufmgr_data = (tbm_vc4_bufmgr *)bufmgr;
+ tbm_vc4_bo *bo_data;
+ uint32_t utile_w = vc4_utile_width(cpp);
+ uint32_t utile_h = vc4_utile_height(cpp);
+ uint32_t level_width, level_height;
+ int size;
+ uint32_t stride;
+
+
+ level_width = width;
+ level_height = height;
+
+ if (bufmgr_data == NULL) {
+ TBM_BACKEND_ERR("bufmgr is null\n");
+ return NULL;
+ }
+
+ if (vc4_size_is_lt(level_width, level_height, cpp)) {
+ level_width = SIZE_ALIGN(level_width, utile_w);
+ level_height = SIZE_ALIGN(level_height, utile_h);
+ } else {
+ level_width = SIZE_ALIGN(level_width,
+ 4 * 2 * utile_w);
+ level_height = SIZE_ALIGN(level_height,
+ 4 * 2 * utile_h);
+ }
+
+ stride = level_width * cpp;
+
+ size = level_height * stride;
+ size = SIZE_ALIGN(size, 4096);
+
+
+ bo_data = calloc(1, sizeof(struct _tbm_vc4_bo));
+ if (!bo_data) {
+ TBM_BACKEND_ERR("fail to allocate the bo_data private\n");
+ return NULL;
+ }
+ bo_data->bufmgr_data = bufmgr_data;
+
+ struct drm_vc4_create_bo arg = {0, };
+
+ arg.size = (__u32)size;
+ arg.flags = flags;/*currently no values for the flags,but it may be used in future extension*/
+ if (drmIoctl(bufmgr_data->fd, DRM_IOCTL_VC4_CREATE_BO, &arg)) {
+ TBM_BACKEND_ERR("Cannot create bo_data(flag:%x, size:%d)\n", arg.flags,
+ (unsigned int)arg.size);
+ free(bo_data);
+ return NULL;
+ }
+
+ bo_data->fd = bufmgr_data->fd;
+ bo_data->gem = (unsigned int)arg.handle;
+ bo_data->size = size;
+ bo_data->flags_tbm = flags;
+ bo_data->name = _get_name(bo_data->fd, bo_data->gem);
+
+ if (!_bo_init_cache_state(bufmgr_data, bo_data, 0)) {
+ TBM_BACKEND_ERR("fail init cache state(%d)\n", bo_data->name);
+ free(bo_data);
+ return NULL;
+ }
+
+ pthread_mutex_init(&bo_data->mutex, NULL);
+
+ if (bufmgr_data->use_dma_fence && !bo_data->dmabuf) {
+ struct drm_prime_handle arg = {0, };
+
+ arg.handle = bo_data->gem;
+ if (drmIoctl(bo_data->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
+ TBM_BACKEND_ERR("Cannot dmabuf=%d\n", bo_data->gem);
+ free(bo_data);
+ return NULL;
+ }
+ bo_data->dmabuf = arg.fd;
+ }
+
+ //set modifier
+ uint64_t modifier;
+ modifier = DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
+ struct drm_vc4_set_tiling set_tiling = {
+ .handle = bo_data->gem,
+ .modifier = modifier,
+ };
+ drmIoctl(bo_data->fd, DRM_IOCTL_VC4_SET_TILING, &set_tiling);
+
+
+ /* add bo_data to hash */
+ if (drmHashInsert(bufmgr_data->hashBos, bo_data->name, (void *)bo_data) < 0)
+ TBM_BACKEND_ERR("Cannot insert bo_data to Hash(%d)\n", bo_data->name);
+
+ TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), flags:%d(%d), size:%d\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->flags_tbm,
+ bo_data->size);
+
+ return (hal_tbm_bo *)bo_data;
+}
+#endif
+
+static hal_tbm_error
+tbm_vc4_bufmgr_get_plane_data(hal_tbm_bufmgr *bufmgr,
+ hal_tbm_format format, int plane_idx, int width,
+ int height, uint32_t *size, uint32_t *offset,
+ uint32_t *pitch, int *bo_idx)
+{
+ tbm_vc4_bufmgr *bufmgr_data = (tbm_vc4_bufmgr *)bufmgr;
+ int bpp;
+ int _offset = 0;
+ int _pitch = 0;
+ int _size = 0;
+ int _bo_idx = 0;
+ int _align_height = 0;
+
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, HAL_TBM_ERROR_INVALID_PARAMETER);
+
+ switch (format) {
+ /* 16 bpp RGB */
+ case HAL_TBM_FORMAT_XRGB4444:
+ case HAL_TBM_FORMAT_XBGR4444:
+ case HAL_TBM_FORMAT_RGBX4444:
+ case HAL_TBM_FORMAT_BGRX4444:
+ case HAL_TBM_FORMAT_ARGB4444:
+ case HAL_TBM_FORMAT_ABGR4444:
+ case HAL_TBM_FORMAT_RGBA4444:
+ case HAL_TBM_FORMAT_BGRA4444:
+ case HAL_TBM_FORMAT_XRGB1555:
+ case HAL_TBM_FORMAT_XBGR1555:
+ case HAL_TBM_FORMAT_RGBX5551:
+ case HAL_TBM_FORMAT_BGRX5551:
+ case HAL_TBM_FORMAT_ARGB1555:
+ case HAL_TBM_FORMAT_ABGR1555:
+ case HAL_TBM_FORMAT_RGBA5551:
+ case HAL_TBM_FORMAT_BGRA5551:
+ case HAL_TBM_FORMAT_RGB565:
+ bpp = 16;
+ _offset = 0;
+ _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
+ _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ break;
+ /* 24 bpp RGB */
+ case HAL_TBM_FORMAT_RGB888:
+ case HAL_TBM_FORMAT_BGR888:
+ bpp = 24;
+ _offset = 0;
+ _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
+ _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ break;
+ /* 32 bpp RGB */
+ case HAL_TBM_FORMAT_XRGB8888:
+ case HAL_TBM_FORMAT_XBGR8888:
+ case HAL_TBM_FORMAT_RGBX8888:
+ case HAL_TBM_FORMAT_BGRX8888:
+ case HAL_TBM_FORMAT_ARGB8888:
+ case HAL_TBM_FORMAT_ABGR8888:
+ case HAL_TBM_FORMAT_RGBA8888:
+ case HAL_TBM_FORMAT_BGRA8888:
+ bpp = 32;
+ _offset = 0;
+#ifdef VC4_TILED_FORMAT
+ if (vc4_size_is_lt(width, height, 4)) {
+ width = SIZE_ALIGN(width, vc4_utile_width(4));
+ height = SIZE_ALIGN(height, vc4_utile_height(4));
+
+ } else {
+ width = SIZE_ALIGN(width, 32);
+ uint32_t utile_h = vc4_utile_height(bpp);
+ height = SIZE_ALIGN(height, 8*utile_h);
+ }
+#endif
+ _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
+ _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ break;
+
+ /* packed YCbCr */
+ case HAL_TBM_FORMAT_YUYV:
+ case HAL_TBM_FORMAT_YVYU:
+ case HAL_TBM_FORMAT_UYVY:
+ case HAL_TBM_FORMAT_VYUY:
+ case HAL_TBM_FORMAT_AYUV:
+ bpp = 32;
+ _offset = 0;
+ _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
+ _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ break;
+
+ /*
+ * 2 plane YCbCr
+ * index 0 = Y plane, [7:0] Y
+ * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
+ * or
+ * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
+ */
+ case HAL_TBM_FORMAT_NV12:
+ case HAL_TBM_FORMAT_NV21:
+ bpp = 12;
+ /*if (plane_idx == 0)*/
+ {
+ _offset = 0;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
+ _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ if (plane_idx == 0)
+ break;
+ }
+ /*else if (plane_idx == 1)*/
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _align_height = SIZE_ALIGN(height / 2, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
+ _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ }
+ break;
+ case HAL_TBM_FORMAT_NV16:
+ case HAL_TBM_FORMAT_NV61:
+ bpp = 16;
+ /*if(plane_idx == 0)*/
+ {
+ _offset = 0;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
+ _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ if (plane_idx == 0)
+ break;
+ }
+ /*else if( plane_idx ==1 )*/
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
+ _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ }
+ break;
+
+ /*
+ * 3 plane YCbCr
+ * index 0: Y plane, [7:0] Y
+ * index 1: Cb plane, [7:0] Cb
+ * index 2: Cr plane, [7:0] Cr
+ * or
+ * index 1: Cr plane, [7:0] Cr
+ * index 2: Cb plane, [7:0] Cb
+ */
+
+ /*
+ * NATIVE_BUFFER_FORMAT_YV12
+ * NATIVE_BUFFER_FORMAT_I420
+ */
+ case HAL_TBM_FORMAT_YUV410:
+ case HAL_TBM_FORMAT_YVU410:
+ bpp = 9;
+ /*if(plane_idx == 0)*/
+ {
+ _offset = 0;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
+ _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ if (plane_idx == 0)
+ break;
+ }
+ /*else if(plane_idx == 1)*/
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4);
+ _align_height = SIZE_ALIGN(height / 4, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
+ _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ if (plane_idx == 1)
+ break;
+ }
+ /*else if (plane_idx == 2)*/
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4);
+ _align_height = SIZE_ALIGN(height / 4, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
+ _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ }
+ break;
+ case HAL_TBM_FORMAT_YUV411:
+ case HAL_TBM_FORMAT_YVU411:
+ case HAL_TBM_FORMAT_YUV420:
+ case HAL_TBM_FORMAT_YVU420:
+ bpp = 12;
+ /*if(plane_idx == 0)*/
+ {
+ _offset = 0;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
+ _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ if (plane_idx == 0)
+ break;
+ }
+ /*else if(plane_idx == 1)*/
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
+ _align_height = SIZE_ALIGN(height / 2, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV / 2);
+ _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ if (plane_idx == 1)
+ break;
+ }
+ /*else if (plane_idx == 2)*/
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
+ _align_height = SIZE_ALIGN(height / 2, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV / 2);
+ _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ }
+ break;
+ case HAL_TBM_FORMAT_YUV422:
+ case HAL_TBM_FORMAT_YVU422:
+ bpp = 16;
+ /*if(plane_idx == 0)*/
+ {
+ _offset = 0;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
+ _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ if (plane_idx == 0)
+ break;
+ }
+ /*else if(plane_idx == 1)*/
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
+ _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
+ _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ if (plane_idx == 1)
+ break;
+ }
+ /*else if (plane_idx == 2)*/
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
+ _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
+ _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ }
+ break;
+ case HAL_TBM_FORMAT_YUV444:
+ case HAL_TBM_FORMAT_YVU444:
+ bpp = 24;
+ /*if(plane_idx == 0)*/
+ {
+ _offset = 0;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
+ _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ if (plane_idx == 0)
+ break;
+ }
+ /*else if(plane_idx == 1)*/
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
+ _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ if (plane_idx == 1)
+ break;
+ }
+ /*else if (plane_idx == 2)*/
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
+ _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ }
+ break;
+ default:
+ bpp = 0;
+ break;
+ }
+
+ *size = _size;
+ *offset = _offset;
+ *pitch = _pitch;
+ *bo_idx = _bo_idx;
+
+ return HAL_TBM_ERROR_NONE;
+}
+
+static hal_tbm_bo *
+tbm_vc4_bufmgr_alloc_bo(hal_tbm_bufmgr *bufmgr, unsigned int size,
+ hal_tbm_bo_memory_type flags, hal_tbm_error *error)
+{
+ tbm_vc4_bufmgr *bufmgr_data = (tbm_vc4_bufmgr *)bufmgr;
+ tbm_vc4_bo *bo_data;
+
+ if (bufmgr_data == NULL) {
+ TBM_BACKEND_ERR("bufmgr is null\n");
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return NULL;
+ }
+
+ bo_data = calloc(1, sizeof(struct _tbm_vc4_bo));
+ if (!bo_data) {
+ TBM_BACKEND_ERR("fail to allocate the bo_data private\n");
+ if (error)
+ *error = HAL_TBM_ERROR_OUT_OF_MEMORY;
+ return NULL;
+ }
+ bo_data->bufmgr_data = bufmgr_data;
+
+ struct drm_vc4_create_bo arg = {0, };
+
+ arg.size = (__u32)size;
+ arg.flags = flags;/*currently no values for the flags,but it may be used in future extension*/
+ if (drmIoctl(bufmgr_data->fd, DRM_IOCTL_VC4_CREATE_BO, &arg)) {
+ TBM_BACKEND_ERR("Cannot create bo_data(flag:%x, size:%d)\n", arg.flags,
+ (unsigned int)arg.size);
+ free(bo_data);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+
+ bo_data->fd = bufmgr_data->fd;
+ bo_data->gem = (unsigned int)arg.handle;
+ bo_data->size = size;
+ bo_data->flags_tbm = flags;
+ bo_data->name = _get_name(bo_data->fd, bo_data->gem);
+
+ if (!_bo_init_cache_state(bufmgr_data, bo_data, 0)) {
+ TBM_BACKEND_ERR("fail init cache state(%d)\n", bo_data->name);
+ free(bo_data);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+
+ pthread_mutex_init(&bo_data->mutex, NULL);
+
+ if (bufmgr_data->use_dma_fence && !bo_data->dmabuf) {
+ struct drm_prime_handle arg = {0, };
+
+ arg.handle = bo_data->gem;
+ if (drmIoctl(bo_data->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
+ TBM_BACKEND_ERR("Cannot dmabuf=%d\n", bo_data->gem);
+ free(bo_data);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+ bo_data->dmabuf = arg.fd;
+ }
+
+ /* add bo_data to hash */
+ if (drmHashInsert(bufmgr_data->hashBos, bo_data->name, (void *)bo_data) < 0)
+ TBM_BACKEND_ERR("Cannot insert bo_data to Hash(%d)\n", bo_data->name);
+
+ TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), flags:%d, size:%d\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->flags_tbm,
+ bo_data->size);
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return (hal_tbm_bo *)bo_data;
+}
+
+static hal_tbm_bo *
+tbm_vc4_bufmgr_import_fd(hal_tbm_bufmgr *bufmgr, hal_tbm_fd key, hal_tbm_error *error)
+{
+ tbm_vc4_bufmgr *bufmgr_data = (tbm_vc4_bufmgr *)bufmgr;
+ tbm_vc4_bo *bo_data;
+ unsigned int gem = 0;
+ unsigned int name;
+ int ret;
+ char buf[STRERR_BUFSIZE];
+
+ if (bufmgr_data == NULL) {
+ TBM_BACKEND_ERR("bufmgr is null\n");
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return NULL;
+ }
+
+ /*getting handle from fd*/
+ struct drm_prime_handle arg = {0, };
+
+ arg.fd = key;
+ arg.flags = 0;
+ if (drmIoctl(bufmgr_data->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &arg)) {
+ TBM_BACKEND_ERR("Cannot get gem handle from fd:%d (%s)\n",
+ arg.fd, strerror_r(errno, buf, STRERR_BUFSIZE));
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+ gem = arg.handle;
+
+ name = _get_name(bufmgr_data->fd, gem);
+ if (!name) {
+ TBM_BACKEND_ERR("Cannot get name from gem:%d, fd:%d (%s)\n",
+ gem, key, strerror_r(errno, buf, STRERR_BUFSIZE));
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+
+ ret = drmHashLookup(bufmgr_data->hashBos, name, (void **)&bo_data);
+ if (ret == 0) {
+ if (gem == bo_data->gem) {
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+ return bo_data;
+ }
+ }
+
+ /* Determine size of bo_data. The fd-to-handle ioctl really should
+ * return the size, but it doesn't. If we have kernel 3.12 or
+ * later, we can lseek on the prime fd to get the size. Older
+ * kernels will just fail, in which case we fall back to the
+ * provided (estimated or guess size).
+ */
+ unsigned int real_size = -1;
+ struct drm_gem_open open_arg = {0, };
+
+ real_size = lseek(key, 0, SEEK_END);
+
+ open_arg.name = name;
+ if (drmIoctl(bufmgr_data->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
+ TBM_BACKEND_ERR("Cannot get gem info from gem:%d, fd:%d (%s)\n",
+ gem, key, strerror_r(errno, buf, STRERR_BUFSIZE));
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+
+ /* Free gem handle to avoid a memory leak*/
+ struct drm_gem_close close_arg = {0, };
+ memset(&close_arg, 0, sizeof(close_arg));
+ close_arg.handle = open_arg.handle;
+ if (drmIoctl(bufmgr_data->fd, DRM_IOCTL_GEM_CLOSE, &close_arg)) {
+ TBM_BACKEND_ERR("Cannot close gem_handle.\n",
+ strerror_r(errno, buf, STRERR_BUFSIZE));
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+
+ if (real_size == -1)
+ real_size = open_arg.size;
+
+ bo_data = calloc(1, sizeof(struct _tbm_vc4_bo));
+ if (!bo_data) {
+ TBM_BACKEND_ERR("bo_data:%p fail to allocate the bo_data\n", bo_data);
+ if (error)
+ *error = HAL_TBM_ERROR_OUT_OF_MEMORY;
+ return NULL;
+ }
+ bo_data->bufmgr_data = bufmgr_data;
+
+ bo_data->fd = bufmgr_data->fd;
+ bo_data->gem = gem;
+ bo_data->size = real_size;
+ bo_data->name = name;
+ bo_data->flags_tbm = 0;
+
+#ifdef VC4_TILED_FORMAT
+ struct drm_vc4_get_tiling get_tiling = {
+ .handle = bo_data->gem,
+ };
+ drmIoctl(bo_data->fd, DRM_IOCTL_VC4_GET_TILING, &get_tiling);
+
+ if (get_tiling.modifier == DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED)
+ bo_data->flags_tbm |= HAL_TBM_BO_TILED;
+#endif
+
+ if (!_bo_init_cache_state(bufmgr_data, bo_data, 1)) {
+ TBM_BACKEND_ERR("fail init cache state(%d)\n", bo_data->name);
+ free(bo_data);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+
+ /* add bo_data to hash */
+ if (drmHashInsert(bufmgr_data->hashBos, bo_data->name, (void *)bo_data) < 0)
+ TBM_BACKEND_ERR("bo_data:%p Cannot insert bo_data to Hash(%d) from gem:%d, fd:%d\n",
+ bo_data, bo_data->name, gem, key);
+
+ TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d, size:%d\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf,
+ key,
+ bo_data->flags_tbm,
+ bo_data->size);
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return (hal_tbm_bo *)bo_data;
+}
+
+static hal_tbm_bo *
+tbm_vc4_bufmgr_import_key(hal_tbm_bufmgr *bufmgr, hal_tbm_key key, hal_tbm_error *error)
+{
+ tbm_vc4_bufmgr *bufmgr_data = (tbm_vc4_bufmgr *)bufmgr;
+ tbm_vc4_bo *bo_data;
+ int ret;
+
+ if (bufmgr_data == NULL) {
+ TBM_BACKEND_ERR("bufmgr is null\n");
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return NULL;
+ }
+
+ ret = drmHashLookup(bufmgr_data->hashBos, key, (void **)&bo_data);
+ if (ret == 0) {
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+ return (hal_tbm_bo *)bo_data;
+ }
+
+ struct drm_gem_open arg = {0, };
+
+ arg.name = key;
+ if (drmIoctl(bufmgr_data->fd, DRM_IOCTL_GEM_OPEN, &arg)) {
+ TBM_BACKEND_ERR("Cannot open gem name=%d\n", key);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+
+ bo_data = calloc(1, sizeof(struct _tbm_vc4_bo));
+ if (!bo_data) {
+ TBM_BACKEND_ERR("fail to allocate the bo_data private\n");
+ if (error)
+ *error = HAL_TBM_ERROR_OUT_OF_MEMORY;
+ return NULL;
+ }
+ bo_data->bufmgr_data = bufmgr_data;
+
+ bo_data->fd = bufmgr_data->fd;
+ bo_data->gem = arg.handle;
+ bo_data->size = arg.size;
+ bo_data->name = key;
+ bo_data->flags_tbm = 0;
+
+#ifdef VC4_TILED_FORMAT
+ struct drm_vc4_get_tiling get_tiling = {
+ .handle = bo_data->gem,
+ };
+ drmIoctl(bo_data->fd, DRM_IOCTL_VC4_GET_TILING, &get_tiling);
+
+ if (get_tiling.modifier == DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED)
+ bo_data->flags_tbm |= HAL_TBM_BO_TILED;
+#endif
+
+ if (!_bo_init_cache_state(bufmgr_data, bo_data, 1)) {
+ TBM_BACKEND_ERR("fail init cache state(%d)\n", bo_data->name);
+ free(bo_data);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+
+ if (!bo_data->dmabuf) {
+ struct drm_prime_handle arg = {0, };
+
+ arg.handle = bo_data->gem;
+ if (drmIoctl(bo_data->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
+ TBM_BACKEND_ERR("fail to DRM_IOCTL_PRIME_HANDLE_TO_FD gem=%d\n", bo_data->gem);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ free(bo_data);
+ return NULL;
+ }
+ bo_data->dmabuf = arg.fd;
+ }
+
+ /* add bo_data to hash */
+ if (drmHashInsert(bufmgr_data->hashBos, bo_data->name, (void *)bo_data) < 0)
+ TBM_BACKEND_ERR("Cannot insert bo_data to Hash(%d)\n", bo_data->name);
+
+ TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, flags:%d, size:%d\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf,
+ bo_data->flags_tbm,
+ bo_data->size);
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return (hal_tbm_bo *)bo_data;
+}
+
+static void
+tbm_vc4_bo_free(hal_tbm_bo *bo)
+{
+ tbm_vc4_bo *bo_data = (tbm_vc4_bo *)bo;
+ tbm_vc4_bo *temp;
+ tbm_vc4_bufmgr *bufmgr_data;
+ char buf[STRERR_BUFSIZE];
+ int ret;
+
+ if (!bo_data)
+ return;
+
+ bufmgr_data = bo_data->bufmgr_data;
+ if (!bufmgr_data)
+ return;
+
+ TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, size:%d\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf,
+ bo_data->size);
+
+ if (bo_data->pBase) {
+ if (munmap(bo_data->pBase, bo_data->size) == -1) {
+ TBM_BACKEND_ERR("bo_data:%p fail to munmap(%s)\n",
+ bo_data, strerror_r(errno, buf, STRERR_BUFSIZE));
+ }
+ }
+
+ /* close dmabuf */
+ if (bo_data->dmabuf) {
+ close(bo_data->dmabuf);
+ bo_data->dmabuf = 0;
+ }
+
+ /* delete bo_data from hash */
+ ret = drmHashLookup(bufmgr_data->hashBos, bo_data->name, (void **)&temp);
+ if (ret == 0)
+ drmHashDelete(bufmgr_data->hashBos, bo_data->name);
+ else
+ TBM_BACKEND_ERR("Cannot find bo_data to Hash(%d), ret=%d\n", bo_data->name, ret);
+
+ if (temp != bo_data)
+ TBM_BACKEND_ERR("hashBos probably has several BOs with same name!!!\n");
+
+ _bo_destroy_cache_state(bufmgr_data, bo_data);
+
+ /* Free gem handle */
+ struct drm_gem_close arg = {0, };
+
+ memset(&arg, 0, sizeof(arg));
+ arg.handle = bo_data->gem;
+ if (drmIoctl(bo_data->fd, DRM_IOCTL_GEM_CLOSE, &arg))
+ TBM_BACKEND_ERR("bo_data:%p fail to gem close.(%s)\n",
+ bo_data, strerror_r(errno, buf, STRERR_BUFSIZE));
+
+ free(bo_data);
+}
+
+static int
+tbm_vc4_bo_get_size(hal_tbm_bo *bo, hal_tbm_error *error)
+{
+ tbm_vc4_bo *bo_data = (tbm_vc4_bo *)bo;
+
+ if (!bo_data) {
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return 0;
+ }
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return bo_data->size;
+}
+
+static hal_tbm_bo_memory_type
+tbm_vc4_bo_get_memory_type(hal_tbm_bo *bo, hal_tbm_error *error)
+{
+ tbm_vc4_bo *bo_data = (tbm_vc4_bo *)bo;
+
+ if (!bo_data) {
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return HAL_TBM_BO_DEFAULT;
+ }
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return bo_data->flags_tbm;
+}
+
+static hal_tbm_bo_handle
+tbm_vc4_bo_get_handle(hal_tbm_bo *bo, hal_tbm_bo_device_type device, hal_tbm_error *error)
+{
+ tbm_vc4_bo *bo_data = (tbm_vc4_bo *)bo;
+ hal_tbm_bo_handle bo_handle;
+
+ if (!bo_data) {
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return (hal_tbm_bo_handle) NULL;
+ }
+
+ if (!bo_data->gem) {
+ TBM_BACKEND_ERR("Cannot map gem=%d\n", bo_data->gem);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return (hal_tbm_bo_handle) NULL;
+ }
+
+ TBM_BACKEND_DBG("bo_data:%p, gem:%d(%d), fd:%d, flags:%d, size:%d, %s\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf,
+ bo_data->flags_tbm,
+ bo_data->size,
+ STR_DEVICE[device]);
+
+ /*Get mapped bo_handle*/
+ bo_handle = _vc4_bo_handle(bo_data, device);
+ if (bo_handle.ptr == NULL) {
+ TBM_BACKEND_ERR("Cannot get handle: gem:%d, device:%d\n",
+ bo_data->gem, device);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return (hal_tbm_bo_handle) NULL;
+ }
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return bo_handle;
+}
+
+static hal_tbm_bo_handle
+tbm_vc4_bo_map(hal_tbm_bo *bo, hal_tbm_bo_device_type device,
+ hal_tbm_bo_access_option opt, hal_tbm_error *error)
+{
+ tbm_vc4_bo *bo_data = (tbm_vc4_bo *)bo;
+ hal_tbm_bo_handle bo_handle;
+ tbm_vc4_bufmgr *bufmgr_data;
+
+ if (!bo_data) {
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return (hal_tbm_bo_handle) NULL;
+ }
+
+ bufmgr_data = bo_data->bufmgr_data;
+ if (!bufmgr_data) {
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return (hal_tbm_bo_handle) NULL;
+ }
+
+ if (!bo_data->gem) {
+ TBM_BACKEND_ERR("Cannot map gem=%d\n", bo_data->gem);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return (hal_tbm_bo_handle) NULL;
+ }
+
+ TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, %s, %s\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf,
+ STR_DEVICE[device],
+ STR_OPT[opt]);
+
+ /*Get mapped bo_handle*/
+ bo_handle = _vc4_bo_handle(bo_data, device);
+ if (bo_handle.ptr == NULL) {
+ TBM_BACKEND_ERR("Cannot get handle: gem:%d, device:%d, opt:%d\n",
+ bo_data->gem, device, opt);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return (hal_tbm_bo_handle) NULL;
+ }
+
+ if (bo_data->map_cnt == 0)
+ _bo_set_cache_state(bufmgr_data, bo_data, device, opt);
+
+ bo_data->last_map_device = device;
+
+ bo_data->map_cnt++;
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return bo_handle;
+}
+
+static hal_tbm_error
+tbm_vc4_bo_unmap(hal_tbm_bo *bo)
+{
+ tbm_vc4_bo *bo_data = (tbm_vc4_bo *)bo;
+ tbm_vc4_bufmgr *bufmgr_data;
+
+ if (!bo_data)
+ return HAL_TBM_ERROR_INVALID_PARAMETER;
+
+ bufmgr_data = bo_data->bufmgr_data;
+ if (!bufmgr_data)
+ return HAL_TBM_ERROR_INVALID_PARAMETER;
+
+ if (!bo_data->gem)
+ return HAL_TBM_ERROR_INVALID_PARAMETER;
+
+ bo_data->map_cnt--;
+
+ if (bo_data->map_cnt == 0)
+ _bo_save_cache_state(bufmgr_data, bo_data);
+
+#ifdef ENABLE_CACHECRTL
+ if (bo_data->last_map_device == HAL_TBM_DEVICE_CPU)
+ _vc4_cache_flush(bufmgr_data, bo_data, TBM_VC4_CACHE_FLUSH_ALL);
+#endif
+
+ bo_data->last_map_device = -1;
+
+ TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf);
+
+ return HAL_TBM_ERROR_NONE;
+}
+
+static hal_tbm_error
+tbm_vc4_bo_lock(hal_tbm_bo *bo, hal_tbm_bo_device_type device,
+ hal_tbm_bo_access_option opt)
+{
+#ifndef ALWAYS_BACKEND_CTRL
+ tbm_vc4_bo *bo_data = (tbm_vc4_bo *)bo;
+ tbm_vc4_bufmgr *bufmgr_data;
+ struct dma_buf_fence fence;
+ struct flock filelock;
+ int ret = 0;
+ char buf[STRERR_BUFSIZE];
+
+ if (!bo_data)
+ return HAL_TBM_ERROR_INVALID_PARAMETER;
+
+ bufmgr_data = bo_data->bufmgr_data;
+ if (!bufmgr_data)
+ return HAL_TBM_ERROR_INVALID_PARAMETER;
+
+ if (device != HAL_TBM_DEVICE_3D && device != HAL_TBM_DEVICE_CPU) {
+ TBM_BACKEND_DBG("Not support device type,\n");
+ return HAL_TBM_ERROR_INVALID_OPERATION;
+ }
+
+ memset(&fence, 0, sizeof(struct dma_buf_fence));
+
+ /* Check if the given type is valid or not. */
+ if (opt & TBM_OPTION_WRITE) {
+ if (device == HAL_TBM_DEVICE_3D)
+ fence.type = DMA_BUF_ACCESS_WRITE | DMA_BUF_ACCESS_DMA;
+ } else if (opt & TBM_OPTION_READ) {
+ if (device == HAL_TBM_DEVICE_3D)
+ fence.type = DMA_BUF_ACCESS_READ | DMA_BUF_ACCESS_DMA;
+ } else {
+ TBM_BACKEND_ERR("Invalid argument\n");
+ return HAL_TBM_ERROR_INVALID_PARAMETER;
+ }
+
+ /* Check if the tbm manager supports dma fence or not. */
+ if (!bufmgr_data->use_dma_fence) {
+ TBM_BACKEND_ERR("Not support DMA FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
+ return HAL_TBM_ERROR_INVALID_OPERATION;
+
+ }
+
+ if (device == HAL_TBM_DEVICE_3D) {
+ ret = ioctl(bo_data->dmabuf, DMABUF_IOCTL_GET_FENCE, &fence);
+ if (ret < 0) {
+ TBM_BACKEND_ERR("Cannot set GET FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
+ return HAL_TBM_ERROR_INVALID_OPERATION;
+ }
+ } else {
+ if (opt & TBM_OPTION_WRITE)
+ filelock.l_type = F_WRLCK;
+ else
+ filelock.l_type = F_RDLCK;
+
+ filelock.l_whence = SEEK_CUR;
+ filelock.l_start = 0;
+ filelock.l_len = 0;
+
+ if (-1 == fcntl(bo_data->dmabuf, F_SETLKW, &filelock))
+ return HAL_TBM_ERROR_INVALID_OPERATION;
+ }
+
+ pthread_mutex_lock(&bo_data->mutex);
+
+ if (device == HAL_TBM_DEVICE_3D) {
+ int i;
+
+ for (i = 0; i < DMA_FENCE_LIST_MAX; i++) {
+ if (bo_data->dma_fence[i].ctx == 0) {
+ bo_data->dma_fence[i].type = fence.type;
+ bo_data->dma_fence[i].ctx = fence.ctx;
+ break;
+ }
+ }
+
+ if (i == DMA_FENCE_LIST_MAX) {
+ /*TODO: if dma_fence list is full, it needs realloc. I will fix this. by minseok3.kim*/
+ TBM_BACKEND_ERR("fence list is full\n");
+ }
+ }
+
+ pthread_mutex_unlock(&bo_data->mutex);
+
+ TBM_BACKEND_DBG("DMABUF_IOCTL_GET_FENCE! bo_data:%p, gem:%d(%d), fd:%ds\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf);
+#endif /* ALWAYS_BACKEND_CTRL */
+
+ return HAL_TBM_ERROR_NONE;
+}
+
+static hal_tbm_error
+tbm_vc4_bo_unlock(hal_tbm_bo *bo)
+{
+#ifndef ALWAYS_BACKEND_CTRL
+ tbm_vc4_bo *bo_data = (tbm_vc4_bo *)bo;
+ struct dma_buf_fence fence;
+ struct flock filelock;
+ unsigned int dma_type = 0;
+ int ret = 0;
+ char buf[STRERR_BUFSIZE];
+
+ bufmgr_data = bo_data->bufmgr_data;
+ if (!bufmgr_data)
+ return HAL_TBM_ERROR_INVALID_PARAMETER;
+
+ if (bo_data->dma_fence[0].type & DMA_BUF_ACCESS_DMA)
+ dma_type = 1;
+
+ if (!bo_data->dma_fence[0].ctx && dma_type) {
+ TBM_BACKEND_DBG("FENCE not support or ignored,\n");
+ return HAL_TBM_ERROR_INVALID_OPERATION;
+ }
+
+ if (!bo_data->dma_fence[0].ctx && dma_type) {
+ TBM_BACKEND_DBG("device type is not 3D/CPU,\n");
+ return HAL_TBM_ERROR_INVALID_OPERATION;
+ }
+
+ pthread_mutex_lock(&bo_data->mutex);
+
+ if (dma_type) {
+ fence.type = bo_data->dma_fence[0].type;
+ fence.ctx = bo_data->dma_fence[0].ctx;
+ int i;
+
+ for (i = 1; i < DMA_FENCE_LIST_MAX; i++) {
+ bo_data->dma_fence[i - 1].type = bo_data->dma_fence[i].type;
+ bo_data->dma_fence[i - 1].ctx = bo_data->dma_fence[i].ctx;
+ }
+ bo_data->dma_fence[DMA_FENCE_LIST_MAX - 1].type = 0;
+ bo_data->dma_fence[DMA_FENCE_LIST_MAX - 1].ctx = 0;
+ }
+
+ pthread_mutex_unlock(&bo_data->mutex);
+
+ if (dma_type) {
+ ret = ioctl(bo_data->dmabuf, DMABUF_IOCTL_PUT_FENCE, &fence);
+ if (ret < 0) {
+ TBM_BACKEND_ERR("Can not set PUT FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
+ return HAL_TBM_ERROR_INVALID_OPERATION;
+ }
+ } else {
+ filelock.l_type = F_UNLCK;
+ filelock.l_whence = SEEK_CUR;
+ filelock.l_start = 0;
+ filelock.l_len = 0;
+
+ if (-1 == fcntl(bo_data->dmabuf, F_SETLKW, &filelock))
+ return HAL_TBM_ERROR_INVALID_OPERATION;
+ }
+
+ TBM_BACKEND_DBG("DMABUF_IOCTL_PUT_FENCE! bo_data:%p, gem:%d(%d), fd:%ds\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf);
+#endif /* ALWAYS_BACKEND_CTRL */
+
+ return HAL_TBM_ERROR_NONE;
+}
+
+static hal_tbm_fd
+tbm_vc4_bo_export_fd(hal_tbm_bo *bo, hal_tbm_error *error)
+{
+ tbm_vc4_bo *bo_data = (tbm_vc4_bo *)bo;
+ int ret;
+ char buf[STRERR_BUFSIZE];
+
+ if (!bo_data) {
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return -1;
+ }
+
+ struct drm_prime_handle arg = {0, };
+
+ arg.handle = bo_data->gem;
+ ret = drmIoctl(bo_data->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg);
+ if (ret) {
+ TBM_BACKEND_ERR("bo_data:%p Cannot dmabuf=%d (%s)\n",
+ bo_data, bo_data->gem, strerror_r(errno, buf, STRERR_BUFSIZE));
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return (hal_tbm_fd) ret;
+ }
+
+ TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d, size:%d\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf,
+ arg.fd,
+ bo_data->flags_tbm,
+ bo_data->size);
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return (hal_tbm_fd)arg.fd;
+}
+
+static hal_tbm_key
+tbm_vc4_bo_export_key(hal_tbm_bo *bo, hal_tbm_error *error)
+{
+ tbm_vc4_bo *bo_data = (tbm_vc4_bo *)bo;
+
+ if (!bo_data) {
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return 0;
+ }
+
+ if (!bo_data->name) {
+ bo_data->name = _get_name(bo_data->fd, bo_data->gem);
+ if (!bo_data->name) {
+ TBM_BACKEND_ERR("error Cannot get name\n");
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return 0;
+ }
+ }
+
+ TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, flags:%d, size:%d\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf,
+ bo_data->flags_tbm,
+ bo_data->size);
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return (hal_tbm_key)bo_data->name;
+}
+
+static hal_tbm_error
+_tbm_vc4_authenticated_drm_fd_handler(hal_tbm_fd auth_fd, void *user_data)
+{
+ tbm_vc4_bufmgr *bufmgr_data = (tbm_vc4_bufmgr *) user_data;
+
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, HAL_TBM_ERROR_INVALID_PARAMETER);
+
+ bufmgr_data->fd = auth_fd;
+ TBM_BACKEND_INFO("Get the authenticated drm_fd(%d)!\n", bufmgr_data->fd);
+
+ return HAL_TBM_ERROR_NONE;
+}
+
+static int
+hal_backend_tbm_vc4_exit(void *data)
+{
+ hal_tbm_backend_data *backend_data = (hal_tbm_backend_data *)data;
+ tbm_vc4_bufmgr *bufmgr_data;
+ unsigned long key;
+ void *value;
+
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(backend_data != NULL, -1);
+
+ bufmgr_data = (tbm_vc4_bufmgr *)backend_data->bufmgr;
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, -1);
+
+ if (backend_data->bufmgr_funcs)
+ free(backend_data->bufmgr_funcs);
+ if (backend_data->bo_funcs)
+ free(backend_data->bo_funcs);
+
+ if (bufmgr_data->hashBos) {
+ while (drmHashFirst(bufmgr_data->hashBos, &key, &value) > 0) {
+ free(value);
+ drmHashDelete(bufmgr_data->hashBos, key);
+ }
+
+ drmHashDestroy(bufmgr_data->hashBos);
+ bufmgr_data->hashBos = NULL;
+ }
+
+ _bufmgr_deinit_cache_state(bufmgr_data);
+
+ close(bufmgr_data->fd);
+
+ free(backend_data->bufmgr);
+ free(backend_data);
+
+ return HAL_TBM_ERROR_NONE;
+}
+
+static int
+hal_backend_tbm_vc4_init(void **data)
+{
+ hal_tbm_backend_data *backend_data = NULL;
+ hal_tbm_bufmgr_funcs *bufmgr_funcs = NULL;
+ hal_tbm_bo_funcs *bo_funcs = NULL;
+ tbm_vc4_bufmgr *bufmgr_data = NULL;
+ int drm_fd = -1;
+ int fp;
+
+ if (!*data) {
+ TBM_BACKEND_ERR("*data is null.\n");
+ return -1;
+ }
+
+ /* allocate a hal_tbm_backend_data */
+ backend_data = calloc(1, sizeof(struct _hal_tbm_backend_data));
+ if (!backend_data) {
+ TBM_BACKEND_ERR("fail to alloc backend_data!\n");
+ *data = NULL;
+ return -1;
+ }
+ *data = backend_data;
+
+ /* allocate a hal_tbm_bufmgr */
+ bufmgr_data = calloc(1, sizeof(struct _tbm_vc4_bufmgr));
+ if (!bufmgr_data) {
+ TBM_BACKEND_ERR("fail to alloc bufmgr_data!\n");
+ goto fail_alloc_bufmgr_data;
+ }
+ backend_data->bufmgr = (hal_tbm_bufmgr *)bufmgr_data;
+
+ // open drm_fd
+ drm_fd = _tbm_vc4_open_drm();
+ if (drm_fd < 0) {
+ TBM_BACKEND_ERR("fail to open drm!\n");
+ goto fail_open_drm;
+ }
+
+ // set true when backend has a drm_device.
+ backend_data->has_drm_device = 1;
+
+ // check if drm_fd is master_drm_fd.
+ if (drmIsMaster(drm_fd)) {
+ // drm_fd is a master_drm_fd.
+ backend_data->drm_info.drm_fd = drm_fd;
+ backend_data->drm_info.is_master = 1;
+
+ bufmgr_data->fd = drm_fd;
+ TBM_BACKEND_INFO("Get the master drm_fd(%d)!\n", bufmgr_data->fd);
+ } else {
+ // drm_fd is not a master_drm_fd.
+ // request authenticated fd
+ close(drm_fd);
+ backend_data->drm_info.drm_fd = -1;
+ backend_data->drm_info.is_master = 0;
+ backend_data->drm_info.auth_drm_fd_func = _tbm_vc4_authenticated_drm_fd_handler;
+ backend_data->drm_info.user_data = bufmgr_data;
+
+ TBM_BACKEND_INFO("A backend requests an authenticated drm_fd.\n");
+ }
+
+ //Check if the tbm manager supports dma fence or not.
+ fp = open("/sys/module/dmabuf_sync/parameters/enabled", O_RDONLY);
+ if (fp != -1) {
+ char buf[1];
+ int length = read(fp, buf, 1);
+
+ if (length == 1 && buf[0] == '1')
+ bufmgr_data->use_dma_fence = 1;
+
+ close(fp);
+ }
+
+ if (!_bufmgr_init_cache_state(bufmgr_data)) {
+ TBM_BACKEND_ERR("fail to init bufmgr cache state\n");
+ goto fail_init_cache_state;
+ }
+
+ /*Create Hash Table*/
+ bufmgr_data->hashBos = drmHashCreate();
+
+ /* alloc and register bufmgr_funcs */
+ bufmgr_funcs = calloc(1, sizeof(struct _hal_tbm_bufmgr_funcs));
+ if (!bufmgr_funcs) {
+ TBM_BACKEND_ERR("fail to alloc bufmgr_funcs!\n");
+ goto fail_alloc_bufmgr_funcs;
+ }
+ backend_data->bufmgr_funcs = bufmgr_funcs;
+
+ bufmgr_funcs->bufmgr_get_capabilities = tbm_vc4_bufmgr_get_capabilities;
+ bufmgr_funcs->bufmgr_get_supported_formats = tbm_vc4_bufmgr_get_supported_formats;
+ bufmgr_funcs->bufmgr_get_plane_data = tbm_vc4_bufmgr_get_plane_data;
+ bufmgr_funcs->bufmgr_alloc_bo = tbm_vc4_bufmgr_alloc_bo;
+ bufmgr_funcs->bufmgr_alloc_bo_with_format = NULL;
+ bufmgr_funcs->bufmgr_import_fd = tbm_vc4_bufmgr_import_fd;
+ bufmgr_funcs->bufmgr_import_key = tbm_vc4_bufmgr_import_key;
+#ifdef VC4_TILED_FORMAT
+ bufmgr_funcs->bufmgr_alloc_bo_with_tiled_format = tbm_vc4_bufmgr_alloc_bo_with_tiled_format;
+#else
+ bufmgr_funcs->bufmgr_alloc_bo_with_tiled_format = NULL;
+#endif
+
+ /* alloc and register bo_funcs */
+ bo_funcs = calloc(1, sizeof(struct _hal_tbm_bo_funcs));
+ if (!bo_funcs) {
+ TBM_BACKEND_ERR("fail to alloc bo_funcs!\n");
+ goto fail_alloc_bo_funcs;
+ }
+ backend_data->bo_funcs = bo_funcs;
+
+ bo_funcs->bo_free = tbm_vc4_bo_free;
+ bo_funcs->bo_get_size = tbm_vc4_bo_get_size;
+ bo_funcs->bo_get_memory_types = tbm_vc4_bo_get_memory_type;
+ bo_funcs->bo_get_handle = tbm_vc4_bo_get_handle;
+ bo_funcs->bo_map = tbm_vc4_bo_map;
+ bo_funcs->bo_unmap = tbm_vc4_bo_unmap;
+ bo_funcs->bo_lock = tbm_vc4_bo_lock;
+ bo_funcs->bo_unlock = tbm_vc4_bo_unlock;
+ bo_funcs->bo_export_fd = tbm_vc4_bo_export_fd;
+ bo_funcs->bo_export_key = tbm_vc4_bo_export_key;
+
+ TBM_BACKEND_DBG("drm_fd:%d\n", bufmgr_data->fd);
+
+ return HAL_TBM_ERROR_NONE;
+
+fail_alloc_bo_funcs:
+ free(bufmgr_funcs);
+fail_alloc_bufmgr_funcs:
+ _bufmgr_deinit_cache_state(bufmgr_data);
+ if (bufmgr_data->hashBos)
+ drmHashDestroy(bufmgr_data->hashBos);
+fail_init_cache_state:
+ close(bufmgr_data->fd);
+fail_open_drm:
+ free(bufmgr_data);
+fail_alloc_bufmgr_data:
+ free(backend_data);
+
+ *data = NULL;
+
+ return -1;
+}
+
+hal_backend hal_backend_tbm_data = {
+ "vc4",
+ "Samsung",
+ HAL_ABI_VERSION_TIZEN_6_5,
+ hal_backend_tbm_vc4_init,
+ hal_backend_tbm_vc4_exit
+};