--- /dev/null
+/**************************************************************************
+
+libtbm_dumb
+
+Copyright 2012 Samsung Electronics co., Ltd. All Rights Reserved.
+
+Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sub license, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
+ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <libudev.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <xf86drm.h>
+#include <pthread.h>
+#include <hal-common.h>
+#include <hal-tbm-types.h>
+#include <hal-tbm-interface.h>
+#include "tbm_backend_log.h"
+
+#define USE_DMAIMPORT
+#define TBM_COLOR_FORMAT_COUNT 4
+
+#define STRERR_BUFSIZE 128
+
+#define SIZE_ALIGN(value, base) (((value) + ((base) - 1)) & ~((base) - 1))
+
+#define TBM_SURFACE_ALIGNMENT_PLANE (64)
+#define TBM_SURFACE_ALIGNMENT_PITCH_RGB (128)
+#define TBM_SURFACE_ALIGNMENT_PITCH_YUV (16)
+
+struct dma_buf_info {
+ unsigned long size;
+ unsigned int fence_supported;
+ unsigned int padding;
+};
+
+#define DMA_BUF_ACCESS_READ 0x1
+#define DMA_BUF_ACCESS_WRITE 0x2
+#define DMA_BUF_ACCESS_DMA 0x4
+#define DMA_BUF_ACCESS_MAX 0x8
+
+#define DMA_FENCE_LIST_MAX 5
+
+struct dma_buf_fence {
+ unsigned long ctx;
+ unsigned int type;
+};
+
+#define DMABUF_IOCTL_BASE 'F'
+#define DMABUF_IOWR(nr, type) _IOWR(DMABUF_IOCTL_BASE, nr, type)
+
+#define DMABUF_IOCTL_GET_INFO DMABUF_IOWR(0x00, struct dma_buf_info)
+#define DMABUF_IOCTL_GET_FENCE DMABUF_IOWR(0x01, struct dma_buf_fence)
+#define DMABUF_IOCTL_PUT_FENCE DMABUF_IOWR(0x02, struct dma_buf_fence)
+
+typedef struct _tbm_dumb_bufmgr tbm_dumb_bufmgr;
+typedef struct _tbm_dumb_bo tbm_dumb_bo;
+
+/* tbm buffor object for dumb */
+struct _tbm_dumb_bo {
+ int fd;
+
+ unsigned int name; /* FLINK ID */
+
+ unsigned int gem; /* GEM Handle */
+
+ unsigned int dmabuf; /* fd for dmabuf */
+
+ void *pBase; /* virtual address */
+
+ unsigned int size;
+
+ unsigned int flags_dumb;
+ unsigned int flags_tbm;
+
+ pthread_mutex_t mutex;
+ struct dma_buf_fence dma_fence[DMA_FENCE_LIST_MAX];
+ int device;
+ int opt;
+
+ tbm_dumb_bufmgr *bufmgr_data;
+};
+
+/* tbm bufmgr private for dumb */
+struct _tbm_dumb_bufmgr {
+ int fd;
+ void *hashBos;
+
+ int use_dma_fence;
+};
+
+static char *STR_DEVICE[] = {
+ "DEF",
+ "CPU",
+ "2D",
+ "3D",
+ "MM"
+};
+
+static char *STR_OPT[] = {
+ "NONE",
+ "RD",
+ "WR",
+ "RDWR"
+};
+
+static uint32_t tbm_dumb_color_format_list[TBM_COLOR_FORMAT_COUNT] = {
+ HAL_TBM_FORMAT_ARGB8888,
+ HAL_TBM_FORMAT_XRGB8888,
+ HAL_TBM_FORMAT_NV12,
+ HAL_TBM_FORMAT_YUV420
+};
+
+static int
+_tbm_dumb_open_drm()
+{
+ struct udev *udev = NULL;
+ struct udev_enumerate *e = NULL;
+ struct udev_list_entry *entry = NULL;
+ struct udev_device *device = NULL, *drm_device = NULL, *pci = NULL;
+ const char *filepath, *id;
+ struct stat s;
+ int fd = -1;
+ int ret;
+
+ udev = udev_new();
+ if (!udev) {
+ TBM_BACKEND_ERR("udev_new() failed.\n");
+ return -1;
+ }
+
+ e = udev_enumerate_new(udev);
+ udev_enumerate_add_match_subsystem(e, "drm");
+ udev_enumerate_add_match_sysname(e, "card[0-9]*");
+ udev_enumerate_scan_devices(e);
+
+ drm_device = NULL;
+ udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
+ filepath = udev_list_entry_get_name(entry);
+ device = udev_device_new_from_syspath(udev, filepath);
+ if (!device)
+ continue;
+
+ pci = udev_device_get_parent_with_subsystem_devtype(device, "pci", NULL);
+ if (pci) {
+ id = udev_device_get_sysattr_value(pci, "boot_vga");
+ if (id && !strcmp(id, "1")) {
+ if (drm_device)
+ udev_device_unref(drm_device);
+ drm_device = device;
+ break;
+ }
+ }
+
+ if (!drm_device)
+ drm_device = device;
+ else
+ udev_device_unref(device);
+ }
+
+ udev_enumerate_unref(e);
+
+ /* Get device file path. */
+ filepath = udev_device_get_devnode(drm_device);
+ if (!filepath) {
+ TBM_BACKEND_ERR("udev_device_get_devnode() failed.\n");
+ udev_device_unref(drm_device);
+ udev_unref(udev);
+ return -1;
+ }
+
+ /* Open DRM device file and check validity. */
+ fd = open(filepath, O_RDWR | O_CLOEXEC);
+ if (fd < 0) {
+ TBM_BACKEND_ERR("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
+ udev_device_unref(drm_device);
+ udev_unref(udev);
+ return -1;
+ }
+
+ ret = fstat(fd, &s);
+ if (ret) {
+ TBM_BACKEND_ERR("fstat() failed %s.\n");
+ close(fd);
+ udev_device_unref(drm_device);
+ udev_unref(udev);
+ return -1;
+ }
+
+ udev_device_unref(drm_device);
+ udev_unref(udev);
+
+ return fd;
+}
+
+static unsigned int
+_get_dumb_flag_from_tbm(unsigned int ftbm)
+{
+ unsigned int flags = 0;
+ return flags;
+}
+
+static unsigned int
+_get_tbm_flag_from_dumb(unsigned int fdumb)
+{
+ unsigned int flags = 0;
+
+ flags |= HAL_TBM_BO_SCANOUT;
+ flags |= HAL_TBM_BO_NONCACHABLE;
+
+ return flags;
+}
+
+static unsigned int
+_get_name(int fd, unsigned int gem)
+{
+ struct drm_gem_flink arg = {0,};
+
+ arg.handle = gem;
+ if (drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &arg)) {
+ TBM_BACKEND_ERR("fail to DRM_IOCTL_GEM_FLINK gem:%d", gem);
+ return 0;
+ }
+
+ return (unsigned int)arg.name;
+}
+
+static hal_tbm_bo_handle
+_dumb_bo_handle(tbm_dumb_bo *bo_data, int device)
+{
+ hal_tbm_bo_handle bo_handle;
+
+ memset(&bo_handle, 0x0, sizeof(uint64_t));
+
+ switch (device) {
+ case HAL_TBM_DEVICE_DEFAULT:
+ case HAL_TBM_DEVICE_2D:
+ bo_handle.u32 = (uint32_t)bo_data->gem;
+ break;
+ case HAL_TBM_DEVICE_CPU:
+ if (!bo_data->pBase) {
+ struct drm_mode_map_dumb arg = {0,};
+ void *map = NULL;
+
+ arg.handle = bo_data->gem;
+ if (drmIoctl(bo_data->fd, DRM_IOCTL_MODE_MAP_DUMB, &arg)) {
+ TBM_BACKEND_ERR("Cannot map_ gem=%d\n", bo_data->gem);
+ return (hal_tbm_bo_handle) NULL;
+ }
+
+ map = mmap(NULL, bo_data->size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ bo_data->fd, arg.offset);
+ if (map == MAP_FAILED) {
+ TBM_BACKEND_ERR("Cannot usrptr gem=%d\n", bo_data->gem);
+ return (hal_tbm_bo_handle) NULL;
+ }
+ bo_data->pBase = map;
+ }
+ bo_handle.ptr = (void *)bo_data->pBase;
+ break;
+ case HAL_TBM_DEVICE_3D:
+#ifdef USE_DMAIMPORT
+ if (!bo_data->dmabuf) {
+ struct drm_prime_handle arg = {0, };
+
+ arg.handle = bo_data->gem;
+ if (drmIoctl(bo_data->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
+ TBM_BACKEND_ERR("Cannot dmabuf=%d\n", bo_data->gem);
+ return (hal_tbm_bo_handle) NULL;
+ }
+ bo_data->dmabuf = arg.fd;
+ }
+
+ bo_handle.u32 = (uint32_t)bo_data->dmabuf;
+#endif
+ break;
+ case HAL_TBM_DEVICE_MM:
+ if (!bo_data->dmabuf) {
+ struct drm_prime_handle arg = {0, };
+
+ arg.handle = bo_data->gem;
+ if (drmIoctl(bo_data->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
+ TBM_BACKEND_ERR("Cannot dmabuf=%d\n", bo_data->gem);
+ return (hal_tbm_bo_handle) NULL;
+ }
+ bo_data->dmabuf = arg.fd;
+ }
+
+ bo_handle.u32 = (uint32_t)bo_data->dmabuf;
+ break;
+ default:
+ TBM_BACKEND_ERR("Not supported device:%d\n", device);
+ bo_handle.ptr = (void *) NULL;
+ break;
+ }
+
+ return bo_handle;
+}
+
+#ifdef USE_CACHE
+static int
+_dumb_cache_flush(int fd, tbm_dumb_bo *bo_data, int flags)
+{
+ TBM_BACKEND_ERR("warning fail to flush the cache.\n");
+ return 1;
+}
+#endif
+
+static hal_tbm_bufmgr_capability
+tbm_dumb_bufmgr_get_capabilities(hal_tbm_bufmgr *bufmgr, hal_tbm_error *error)
+{
+ hal_tbm_bufmgr_capability capabilities = HAL_TBM_BUFMGR_CAPABILITY_NONE;
+
+ capabilities = HAL_TBM_BUFMGR_CAPABILITY_SHARE_KEY | HAL_TBM_BUFMGR_CAPABILITY_SHARE_FD;
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return capabilities;
+}
+
+static hal_tbm_error
+tbm_dumb_bufmgr_get_supported_formats(hal_tbm_bufmgr *bufmgr,
+ uint32_t **formats, uint32_t *num)
+{
+ tbm_dumb_bufmgr *bufmgr_data = (tbm_dumb_bufmgr *)bufmgr;
+ uint32_t *color_formats;
+
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, HAL_TBM_ERROR_INVALID_PARAMETER);
+
+ color_formats = (uint32_t *)calloc(1, sizeof(uint32_t) * TBM_COLOR_FORMAT_COUNT);
+ if (color_formats == NULL)
+ return HAL_TBM_ERROR_OUT_OF_MEMORY;
+
+ memcpy(color_formats, tbm_dumb_color_format_list, sizeof(uint32_t) * TBM_COLOR_FORMAT_COUNT);
+
+ *formats = color_formats;
+ *num = TBM_COLOR_FORMAT_COUNT;
+
+ TBM_BACKEND_DBG("supported format count = %d\n", *num);
+
+ return HAL_TBM_ERROR_NONE;
+}
+
+static hal_tbm_error
+tbm_dumb_bufmgr_get_plane_data(hal_tbm_bufmgr *bufmgr,
+ hal_tbm_format format, int plane_idx, int width,
+ int height, uint32_t *size, uint32_t *offset,
+ uint32_t *pitch, int *bo_idx)
+{
+ tbm_dumb_bufmgr *bufmgr_data = (tbm_dumb_bufmgr *)bufmgr;
+ int bpp;
+ int _offset = 0;
+ int _pitch = 0;
+ int _size = 0;
+ int _bo_idx = 0;
+
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, HAL_TBM_ERROR_INVALID_PARAMETER);
+
+ switch (format) {
+ /* 16 bpp RGB */
+ case HAL_TBM_FORMAT_XRGB4444:
+ case HAL_TBM_FORMAT_XBGR4444:
+ case HAL_TBM_FORMAT_RGBX4444:
+ case HAL_TBM_FORMAT_BGRX4444:
+ case HAL_TBM_FORMAT_ARGB4444:
+ case HAL_TBM_FORMAT_ABGR4444:
+ case HAL_TBM_FORMAT_RGBA4444:
+ case HAL_TBM_FORMAT_BGRA4444:
+ case HAL_TBM_FORMAT_XRGB1555:
+ case HAL_TBM_FORMAT_XBGR1555:
+ case HAL_TBM_FORMAT_RGBX5551:
+ case HAL_TBM_FORMAT_BGRX5551:
+ case HAL_TBM_FORMAT_ARGB1555:
+ case HAL_TBM_FORMAT_ABGR1555:
+ case HAL_TBM_FORMAT_RGBA5551:
+ case HAL_TBM_FORMAT_BGRA5551:
+ case HAL_TBM_FORMAT_RGB565:
+ bpp = 16;
+ _offset = 0;
+ _pitch = SIZE_ALIGN((width * bpp)>>3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
+ _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ break;
+ /* 24 bpp RGB */
+ case HAL_TBM_FORMAT_RGB888:
+ case HAL_TBM_FORMAT_BGR888:
+ bpp = 24;
+ _offset = 0;
+ _pitch = SIZE_ALIGN((width * bpp)>>3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
+ _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ break;
+ /* 32 bpp RGB */
+ case HAL_TBM_FORMAT_XRGB8888:
+ case HAL_TBM_FORMAT_XBGR8888:
+ case HAL_TBM_FORMAT_RGBX8888:
+ case HAL_TBM_FORMAT_BGRX8888:
+ case HAL_TBM_FORMAT_ARGB8888:
+ case HAL_TBM_FORMAT_ABGR8888:
+ case HAL_TBM_FORMAT_RGBA8888:
+ case HAL_TBM_FORMAT_BGRA8888:
+ bpp = 32;
+ _offset = 0;
+ _pitch = SIZE_ALIGN((width * bpp)>>3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
+ _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ break;
+
+ /* packed YCbCr */
+ case HAL_TBM_FORMAT_YUYV:
+ case HAL_TBM_FORMAT_YVYU:
+ case HAL_TBM_FORMAT_UYVY:
+ case HAL_TBM_FORMAT_VYUY:
+ case HAL_TBM_FORMAT_AYUV:
+ bpp = 32;
+ _offset = 0;
+ _pitch = SIZE_ALIGN((width * bpp)>>3, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ break;
+
+ /*
+ * 2 plane YCbCr
+ * index 0 = Y plane, [7:0] Y
+ * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
+ * or
+ * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
+ */
+ case HAL_TBM_FORMAT_NV12:
+ case HAL_TBM_FORMAT_NV21:
+ bpp = 12;
+ //if (plane_idx == 0)
+ {
+ _offset = 0;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ if (plane_idx == 0)
+ break;
+ }
+ //else if (plane_idx == 1)
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _size = SIZE_ALIGN(_pitch * (height / 2), TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ }
+ break;
+ case HAL_TBM_FORMAT_NV16:
+ case HAL_TBM_FORMAT_NV61:
+ bpp = 16;
+ //if (plane_idx == 0)
+ {
+ _offset = 0;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ if (plane_idx == 0)
+ break;
+ }
+ //else if (plane_idx == 1)
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ }
+ break;
+
+ /*
+ * 3 plane YCbCr
+ * index 0: Y plane, [7:0] Y
+ * index 1: Cb plane, [7:0] Cb
+ * index 2: Cr plane, [7:0] Cr
+ * or
+ * index 1: Cr plane, [7:0] Cr
+ * index 2: Cb plane, [7:0] Cb
+ */
+ /*
+ NATIVE_BUFFER_FORMAT_YV12
+ NATIVE_BUFFER_FORMAT_I420
+ */
+ case HAL_TBM_FORMAT_YUV410:
+ case HAL_TBM_FORMAT_YVU410:
+ bpp = 9;
+ break;
+ case HAL_TBM_FORMAT_YUV411:
+ case HAL_TBM_FORMAT_YVU411:
+ case HAL_TBM_FORMAT_YUV420:
+ case HAL_TBM_FORMAT_YVU420:
+ bpp = 12;
+ //if (plane_idx == 0)
+ {
+ _offset = 0;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ if (plane_idx == 0)
+ break;
+ }
+ //else if (plane_idx == 1)
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width/2, TBM_SURFACE_ALIGNMENT_PITCH_YUV/2);
+ _size = SIZE_ALIGN(_pitch * (height / 2), TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ if (plane_idx == 1)
+ break;
+ }
+ //else if (plane_idx == 2)
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width/2, TBM_SURFACE_ALIGNMENT_PITCH_YUV/2);
+ _size = SIZE_ALIGN(_pitch * (height / 2), TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ }
+ break;
+ case HAL_TBM_FORMAT_YUV422:
+ case HAL_TBM_FORMAT_YVU422:
+ bpp = 16;
+ //if (plane_idx == 0)
+ {
+ _offset = 0;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ if (plane_idx == 0)
+ break;
+ }
+ //else if (plane_idx == 1)
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width/2, TBM_SURFACE_ALIGNMENT_PITCH_YUV/2);
+ _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ if (plane_idx == 1)
+ break;
+ }
+ //else if (plane_idx == 2)
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width/2, TBM_SURFACE_ALIGNMENT_PITCH_YUV/2);
+ _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ }
+ break;
+ case HAL_TBM_FORMAT_YUV444:
+ case HAL_TBM_FORMAT_YVU444:
+ bpp = 24;
+ //if (plane_idx == 0)
+ {
+ _offset = 0;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ if (plane_idx == 0)
+ break;
+ }
+ //else if (plane_idx == 1)
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ if (plane_idx == 1)
+ break;
+ }
+ //else if (plane_idx == 2)
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ }
+ break;
+ default:
+ bpp = 0;
+ break;
+ }
+
+ *size = _size;
+ *offset = _offset;
+ *pitch = _pitch;
+ *bo_idx = _bo_idx;
+
+ return HAL_TBM_ERROR_NONE;
+}
+
+static hal_tbm_bo *
+tbm_dumb_bufmgr_alloc_bo(hal_tbm_bufmgr *bufmgr, unsigned int size,
+ hal_tbm_bo_memory_type flags, hal_tbm_error *error)
+{
+ tbm_dumb_bufmgr *bufmgr_data = (tbm_dumb_bufmgr *)bufmgr;
+ tbm_dumb_bo *bo_data;
+ unsigned int dumb_flags;
+
+ if (bufmgr_data == NULL) {
+ TBM_BACKEND_ERR("bufmgr_data is null\n");
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return NULL;
+ }
+
+ bo_data = calloc(1, sizeof(struct _tbm_dumb_bo));
+ if (!bo_data) {
+ TBM_BACKEND_ERR("fail to allocate the bo_data private\n");
+ if (error)
+ *error = HAL_TBM_ERROR_OUT_OF_MEMORY;
+ return NULL;
+ }
+ bo_data->bufmgr_data = bufmgr_data;
+
+ dumb_flags = _get_dumb_flag_from_tbm(flags);
+
+ struct drm_mode_create_dumb arg = {0, };
+ //as we know only size for new bo set height=1 and bpp=8 and in this case
+ //width will by equal to size in bytes;
+ arg.height = 1;
+ arg.bpp = 8;
+ arg.width = size;
+ arg.flags = dumb_flags;
+ if (drmIoctl(bufmgr_data->fd, DRM_IOCTL_MODE_CREATE_DUMB, &arg)) {
+ TBM_BACKEND_ERR("Cannot create bo_data(flag:%x, size:%d)\n", arg.flags,
+ (unsigned int)size);
+ free(bo_data);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+
+ bo_data->fd = bufmgr_data->fd;
+ bo_data->gem = arg.handle;
+ bo_data->size = arg.size;
+ bo_data->flags_tbm = flags;
+ bo_data->flags_dumb = dumb_flags;
+ bo_data->name = _get_name(bo_data->fd, bo_data->gem);
+
+ pthread_mutex_init(&bo_data->mutex, NULL);
+
+ if (bufmgr_data->use_dma_fence && !bo_data->dmabuf) {
+ struct drm_prime_handle arg = {0, };
+
+ arg.handle = bo_data->gem;
+ if (drmIoctl(bo_data->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
+ TBM_BACKEND_ERR("Cannot dmabuf=%d\n", bo_data->gem);
+ free(bo_data);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+ bo_data->dmabuf = arg.fd;
+ }
+
+ /* add bo to hash */
+ if (drmHashInsert(bufmgr_data->hashBos, bo_data->name, (void *)bo_data) < 0)
+ TBM_BACKEND_ERR("error Cannot insert bo to Hash(%d)\n", bo_data->name);
+
+ TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), flags:%d(%d), size:%d\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->flags_tbm,
+ bo_data->size);;
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return (hal_tbm_bo *)bo_data;
+}
+
+static hal_tbm_bo *
+tbm_dumb_bufmgr_import_fd(hal_tbm_bufmgr *bufmgr, hal_tbm_fd key, hal_tbm_error *error)
+{
+ tbm_dumb_bufmgr *bufmgr_data = (tbm_dumb_bufmgr *)bufmgr;
+ tbm_dumb_bo *bo_data;
+ unsigned int gem = 0;
+ unsigned int name;
+ int ret;
+ char buf[STRERR_BUFSIZE];
+
+ if (bufmgr_data == NULL) {
+ TBM_BACKEND_ERR("bufmgr_data is null\n");
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return NULL;
+ }
+
+ //getting handle from fd
+ struct drm_prime_handle arg = {0, };
+
+ arg.fd = key;
+ arg.flags = 0;
+ if (drmIoctl(bufmgr_data->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &arg)) {
+ TBM_BACKEND_ERR("Cannot get gem handle from fd:%d (%s)\n",
+ arg.fd, strerror_r(errno, buf, STRERR_BUFSIZE));
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+ gem = arg.handle;
+
+ name = _get_name(bufmgr_data->fd, gem);
+ if (name == 0) {
+ TBM_BACKEND_ERR("Cannot get name from gem:%d, fd:%d (%s)\n",
+ gem, key, strerror_r(errno, buf, STRERR_BUFSIZE));
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+
+ ret = drmHashLookup(bufmgr_data->hashBos, name, (void **)&bo_data);
+ if (ret == 0) {
+ if (gem == bo_data->gem) {
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+ return bo_data;
+ }
+ }
+
+ /* Determine size of bo. The fd-to-handle ioctl really should
+ * return the size, but it doesn't. If we have kernel 3.12 or
+ * later, we can lseek on the prime fd to get the size. Older
+ * kernels will just fail, in which case we fall back to the
+ * provided (estimated or guess size). */
+ unsigned int real_size = -1;
+ struct drm_gem_open open_arg = {0, };
+
+ real_size = lseek(key, 0, SEEK_END);
+
+ /* Open the same GEM object only for finding out its size */
+ open_arg.name = name;
+ if (drmIoctl(bufmgr_data->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
+ TBM_BACKEND_ERR("Cannot get gem info from gem:%d, fd:%d (%s)\n",
+ gem, key, strerror_r(errno, buf, STRERR_BUFSIZE));
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+
+ /* Free gem handle to avoid a memory leak*/
+ struct drm_gem_close gem_close;
+ gem_close.handle = open_arg.handle;
+ if (drmIoctl(bufmgr_data->fd, DRM_IOCTL_GEM_CLOSE, &gem_close)) {
+ TBM_BACKEND_ERR("Cannot close gem_handle.\n",
+ strerror_r(errno, buf, STRERR_BUFSIZE));
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+
+ if (real_size == -1)
+ real_size = open_arg.size;
+
+ bo_data = calloc(1, sizeof(struct _tbm_dumb_bo));
+ if (!bo_data) {
+ TBM_BACKEND_ERR("bo_data:%p fail to allocate the bo_data\n", bo_data);
+ if (error)
+ *error = HAL_TBM_ERROR_OUT_OF_MEMORY;
+ return NULL;
+ }
+ bo_data->bufmgr_data = bufmgr_data;
+
+ bo_data->fd = bufmgr_data->fd;
+ bo_data->gem = gem;
+ bo_data->dmabuf = 0;
+ bo_data->size = real_size;
+ bo_data->flags_dumb = 0;
+ bo_data->flags_tbm = _get_tbm_flag_from_dumb(bo_data->flags_dumb);
+ bo_data->name = name;
+
+ /* add bo_data to hash */
+ if (drmHashInsert(bufmgr_data->hashBos, bo_data->name, (void *)bo_data) < 0)
+ TBM_BACKEND_ERR("bo_data:%p Cannot insert bo_data to Hash(%d) from gem:%d, fd:%d\n",
+ bo_data, bo_data->name, gem, key);
+
+ TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d, size:%d\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf,
+ key,
+ bo_data->flags_tbm,
+ bo_data->size);
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return (hal_tbm_bo *)bo_data;
+}
+
+static hal_tbm_bo *
+tbm_dumb_bufmgr_import_key(hal_tbm_bufmgr *bufmgr, hal_tbm_key key, hal_tbm_error *error)
+{
+ tbm_dumb_bufmgr *bufmgr_data = (tbm_dumb_bufmgr *)bufmgr;
+ tbm_dumb_bo *bo_data;
+ int ret;
+
+ if (bufmgr_data == NULL) {
+ TBM_BACKEND_ERR("bufmgr_data is null\n");
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return NULL;
+ }
+
+ ret = drmHashLookup(bufmgr_data->hashBos, key, (void **)&bo_data);
+ if (ret == 0) {
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+ return (hal_tbm_bo *)bo_data;
+ }
+
+ struct drm_gem_open arg = {0, };
+
+ arg.name = key;
+ if (drmIoctl(bufmgr_data->fd, DRM_IOCTL_GEM_OPEN, &arg)) {
+ TBM_BACKEND_ERR("Cannot open gem name=%d\n", key);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+
+ bo_data = calloc(1, sizeof(struct _tbm_dumb_bo));
+ if (!bo_data) {
+ TBM_BACKEND_ERR("fail to allocate the bo_data private\n");
+ if (error)
+ *error = HAL_TBM_ERROR_OUT_OF_MEMORY;
+ return NULL;
+ }
+ bo_data->bufmgr_data = bufmgr_data;
+
+ bo_data->fd = bufmgr_data->fd;
+ bo_data->gem = arg.handle;
+ bo_data->size = arg.size;
+ bo_data->flags_dumb = 0;
+ bo_data->name = key;
+ bo_data->flags_tbm = _get_tbm_flag_from_dumb(bo_data->flags_dumb);
+
+ if (!bo_data->dmabuf) {
+ struct drm_prime_handle arg = {0, };
+
+ arg.handle = bo_data->gem;
+ if (drmIoctl(bo_data->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
+ TBM_BACKEND_ERR("fail to DRM_IOCTL_PRIME_HANDLE_TO_FD gem=%d\n", bo_data->gem);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ free(bo_data);
+ return NULL;
+ }
+ bo_data->dmabuf = arg.fd;
+ }
+
+ /* add bo to hash */
+ if (drmHashInsert(bufmgr_data->hashBos, bo_data->name, (void *)bo_data) < 0)
+ TBM_BACKEND_ERR("Cannot insert bo_data to Hash(%d)\n", bo_data->name);
+
+ TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf,
+ bo_data->flags_tbm,
+ bo_data->size);
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return (hal_tbm_bo *)bo_data;
+}
+
+static void
+tbm_dumb_bo_free(hal_tbm_bo *bo)
+{
+ tbm_dumb_bo *bo_data = (tbm_dumb_bo *)bo;
+ tbm_dumb_bo *temp;
+ tbm_dumb_bufmgr *bufmgr_data;
+ char buf[STRERR_BUFSIZE];
+ int ret;
+
+ if (!bo_data)
+ return;
+
+ bufmgr_data = bo_data->bufmgr_data;
+ if (!bufmgr_data)
+ return;
+
+ TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, size:%d\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf,
+ bo_data->size);
+
+ if (bo_data->pBase) {
+ if (munmap(bo_data->pBase, bo_data->size) == -1) {
+ TBM_BACKEND_ERR("bo_data:%p fail to munmap(%s)\n",
+ bo_data, strerror_r(errno, buf, STRERR_BUFSIZE));
+ }
+ }
+
+ /* close dmabuf */
+ if (bo_data->dmabuf) {
+ close(bo_data->dmabuf);
+ bo_data->dmabuf = 0;
+ }
+
+ /* delete bo from hash */
+ ret = drmHashLookup(bufmgr_data->hashBos, bo_data->name, (void**)&temp);
+ if (ret == 0)
+ drmHashDelete(bufmgr_data->hashBos, bo_data->name);
+ else
+ TBM_BACKEND_ERR("Cannot find bo_data to Hash(%d), ret=%d\n", bo_data->name, ret);
+
+ if (temp != bo_data)
+ TBM_BACKEND_ERR("hashBos probably has several BOs with same name!!!\n");
+
+ /* Free gem handle */
+ struct drm_gem_close arg = {0, };
+
+ memset(&arg, 0, sizeof(arg));
+ arg.handle = bo_data->gem;
+ if (drmIoctl(bo_data->fd, DRM_IOCTL_GEM_CLOSE, &arg))
+ TBM_BACKEND_ERR("bo_data:%p fail to gem close.(%s)\n",
+ bo_data, strerror_r(errno, buf, STRERR_BUFSIZE));
+
+ free(bo_data);
+}
+
+static int
+tbm_dumb_bo_get_size(hal_tbm_bo *bo, hal_tbm_error *error)
+{
+ tbm_dumb_bo *bo_data = (tbm_dumb_bo *)bo;
+
+ if (!bo_data) {
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return 0;
+ }
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return bo_data->size;
+}
+
+static hal_tbm_bo_memory_type
+tbm_dumb_bo_get_memory_type(hal_tbm_bo *bo, hal_tbm_error *error)
+{
+ tbm_dumb_bo *bo_data = (tbm_dumb_bo *)bo;
+
+ if (!bo_data) {
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return HAL_TBM_BO_DEFAULT;
+ }
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return bo_data->flags_tbm;
+}
+
+static hal_tbm_bo_handle
+tbm_dumb_bo_get_handle(hal_tbm_bo *bo, hal_tbm_bo_device_type device, hal_tbm_error *error)
+{
+ tbm_dumb_bo *bo_data = (tbm_dumb_bo *)bo;
+ hal_tbm_bo_handle bo_handle;
+
+ if (!bo_data) {
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return (hal_tbm_bo_handle) NULL;
+ }
+
+ if (!bo_data->gem) {
+ TBM_BACKEND_ERR("Cannot map gem=%d\n", bo_data->gem);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return (hal_tbm_bo_handle) NULL;
+ }
+
+ TBM_BACKEND_DBG("bo_data:%p, gem:%d(%d), fd:%d, flags:%d, size:%d, %s\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf,
+ bo_data->flags_tbm,
+ bo_data->size,
+ STR_DEVICE[device]);
+
+ /*Get mapped bo_handle*/
+ bo_handle = _dumb_bo_handle(bo_data, device);
+ if (bo_handle.ptr == NULL) {
+ TBM_BACKEND_ERR("Cannot get handle: gem:%d, device:%d\n",
+ bo_data->gem, device);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return (hal_tbm_bo_handle) NULL;
+ }
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return bo_handle;
+}
+
+static hal_tbm_bo_handle
+tbm_dumb_bo_map(hal_tbm_bo *bo, hal_tbm_bo_device_type device,
+ hal_tbm_bo_access_option opt, hal_tbm_error *error)
+{
+ tbm_dumb_bo *bo_data = (tbm_dumb_bo *)bo;
+ hal_tbm_bo_handle bo_handle;
+ tbm_dumb_bufmgr *bufmgr_data;
+
+ if (!bo_data) {
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return (hal_tbm_bo_handle) NULL;
+ }
+
+ bufmgr_data = bo_data->bufmgr_data;
+ if (!bufmgr_data) {
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return (hal_tbm_bo_handle) NULL;
+ }
+
+ if (!bo_data->gem) {
+ TBM_BACKEND_ERR("Cannot map gem=%d\n", bo_data->gem);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return (hal_tbm_bo_handle) NULL;
+ }
+
+ TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, %s, %s\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf,
+ STR_DEVICE[device],
+ STR_OPT[opt]);
+
+ /*Get mapped bo_handle*/
+ bo_handle = _dumb_bo_handle(bo_data, device);
+ if (bo_handle.ptr == NULL) {
+ TBM_BACKEND_ERR("Cannot get handle: gem:%d, device:%d, opt:%d\n",
+ bo_data->gem, device, opt);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return (hal_tbm_bo_handle) NULL;
+ }
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return bo_handle;
+}
+
+static hal_tbm_error
+tbm_dumb_bo_unmap(hal_tbm_bo *bo)
+{
+ tbm_dumb_bo *bo_data = (tbm_dumb_bo *)bo;
+ tbm_dumb_bufmgr *bufmgr_data;
+
+ if (!bo_data)
+ return HAL_TBM_ERROR_INVALID_PARAMETER;
+
+ bufmgr_data = bo_data->bufmgr_data;
+ if (!bufmgr_data)
+ return HAL_TBM_ERROR_INVALID_PARAMETER;
+
+ if (!bo_data->gem)
+ return HAL_TBM_ERROR_INVALID_PARAMETER;
+
+ TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf);
+
+ return HAL_TBM_ERROR_NONE;
+}
+
+static hal_tbm_error
+tbm_dumb_bo_lock(hal_tbm_bo *bo, hal_tbm_bo_device_type device,
+ hal_tbm_bo_access_option opt)
+{
+#if USE_BACKEND_LOCK
+ tbm_dumb_bo *bo_data = (tbm_dumb_bo *)bo;
+ tbm_dumb_bufmgr *bufmgr_data;
+ struct dma_buf_fence fence;
+ struct flock filelock;
+ int ret = 0;
+ char buf[STRERR_BUFSIZE];
+
+ if (!bo_data)
+ return HAL_TBM_ERROR_INVALID_PARAMETER;
+
+ bufmgr_data = bo_data->bufmgr_data;
+ if (!bufmgr_data)
+ return HAL_TBM_ERROR_INVALID_PARAMETER;
+
+ if (device != TBM_DEVICE_3D && device != TBM_DEVICE_CPU) {
+ TBM_BACKEND_DBG("Not support device type,\n");
+ return HAL_TBM_ERROR_INVALID_OPERATION;
+ }
+
+ if (!bufmgr_data->use_dma_fence)
+ return HAL_TBM_ERROR_INVALID_OPERATION;
+
+ memset(&fence, 0, sizeof(struct dma_buf_fence));
+
+ /* Check if the given type is valid or not. */
+ if (opt & HAL_TBM_OPTION_WRITE) {
+ if (device == HAL_TBM_DEVICE_3D)
+ fence.type = DMA_BUF_ACCESS_WRITE | DMA_BUF_ACCESS_DMA;
+ } else if (opt & HAL_TBM_OPTION_READ) {
+ if (device == HAL_TBM_DEVICE_3D)
+ fence.type = DMA_BUF_ACCESS_READ | DMA_BUF_ACCESS_DMA;
+ } else {
+ TBM_BACKEND_ERR("error Invalid argument\n");
+ return HAL_TBM_ERROR_INVALID_PARAMETER;
+ }
+
+ /* Check if the tbm manager supports dma fence or not. */
+ if (!bufmgr_data->use_dma_fence) {
+ TBM_BACKEND_ERR("Not support DMA FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
+ return HAL_TBM_ERROR_INVALID_OPERATION;
+ }
+
+ if (device == HAL_TBM_DEVICE_3D) {
+ ret = ioctl(bo_data->dmabuf, DMABUF_IOCTL_GET_FENCE, &fence);
+ if (ret < 0) {
+ TBM_BACKEND_ERR("Cannot set GET FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
+ return HAL_TBM_ERROR_INVALID_OPERATION;
+ }
+ } else {
+ if (opt & HAL_TBM_OPTION_WRITE)
+ filelock.l_type = F_WRLCK;
+ else
+ filelock.l_type = F_RDLCK;
+
+ filelock.l_whence = SEEK_CUR;
+ filelock.l_start = 0;
+ filelock.l_len = 0;
+
+ if (-1 == fcntl(bo_data->dmabuf, F_SETLKW, &filelock))
+ return HAL_TBM_ERROR_INVALID_OPERATION;
+ }
+
+ pthread_mutex_lock(&bo_data->mutex);
+
+ if (device == HAL_TBM_DEVICE_3D) {
+ int i;
+ for (i = 0; i < DMA_FENCE_LIST_MAX; i++) {
+ if (bo_data->dma_fence[i].ctx == 0) {
+ bo_data->dma_fence[i].type = fence.type;
+ bo_data->dma_fence[i].ctx = fence.ctx;
+ break;
+ }
+ }
+
+ if (i == DMA_FENCE_LIST_MAX) {
+ //TODO: if dma_fence list is full, it needs realloc. I will fix this. by minseok3.kim
+ TBM_BACKEND_ERR("fence list is full\n");
+ }
+ }
+
+ pthread_mutex_unlock(&bo_data->mutex);
+
+ TBM_BACKEND_DBG("DMABUF_IOCTL_GET_FENCE! bo_data:%p, gem:%d(%d), fd:%ds\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf);
+#endif
+
+ return HAL_TBM_ERROR_NONE;
+}
+
+static hal_tbm_error
+tbm_dumb_bo_unlock(hal_tbm_bo *bo)
+{
+#if USE_BACKEND_LOCK
+ tbm_dumb_bo *bo_data = (tbm_dumb_bo *)bo;
+ tbm_dumb_bufmgr *bufmgr_data;
+ struct dma_buf_fence fence;
+ struct flock filelock;
+ unsigned int dma_type = 0;
+ int ret = 0;
+ char buf[STRERR_BUFSIZE];
+
+ bufmgr_data = bo_data->bufmgr_data;
+ if (!bufmgr_data)
+ return HAL_TBM_ERROR_INVALID_PARAMETER;
+
+ if (!bufmgr_data->use_dma_fence)
+ return HAL_TBM_ERROR_INVALID_OPERATION;
+
+ if (bo_data->dma_fence[0].type & DMA_BUF_ACCESS_DMA)
+ dma_type = 1;
+
+ if (!bo_data->dma_fence[0].ctx && dma_type) {
+ TBM_BACKEND_DBG("FENCE not support or ignored,\n");
+ return HAL_TBM_ERROR_INVALID_OPERATION;
+ }
+
+ if (!bo_data->dma_fence[0].ctx && dma_type) {
+ TBM_BACKEND_DBG("device type is not 3D/CPU,\n");
+ return HAL_TBM_ERROR_INVALID_OPERATION;
+ }
+
+ pthread_mutex_lock(&bo_data->mutex);
+
+ if (dma_type) {
+ fence.type = bo_data->dma_fence[0].type;
+ fence.ctx = bo_data->dma_fence[0].ctx;
+ int i;
+ for (i = 1; i < DMA_FENCE_LIST_MAX; i++) {
+ bo_data->dma_fence[i-1].type = bo_data->dma_fence[i].type;
+ bo_data->dma_fence[i-1].ctx = bo_data->dma_fence[i].ctx;
+ }
+ bo_data->dma_fence[DMA_FENCE_LIST_MAX-1].type = 0;
+ bo_data->dma_fence[DMA_FENCE_LIST_MAX-1].ctx = 0;
+ }
+
+ pthread_mutex_unlock(&bo_data->mutex);
+
+ if (dma_type) {
+ ret = ioctl(bo_data->dmabuf, DMABUF_IOCTL_PUT_FENCE, &fence);
+ if (ret < 0) {
+ TBM_BACKEND_ERR("Can not set PUT FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
+ return HAL_TBM_ERROR_INVALID_OPERATION;
+ }
+ } else {
+ filelock.l_type = F_UNLCK;
+ filelock.l_whence = SEEK_CUR;
+ filelock.l_start = 0;
+ filelock.l_len = 0;
+
+ if (-1 == fcntl(bo_data->dmabuf, F_SETLKW, &filelock))
+ return HAL_TBM_ERROR_INVALID_OPERATION;
+ }
+
+ TBM_BACKEND_DBG("DMABUF_IOCTL_PUT_FENCE! bo_data:%p, gem:%d(%d), fd:%ds\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf);
+#endif
+
+ return HAL_TBM_ERROR_NONE;
+}
+
+hal_tbm_fd
+tbm_dumb_bo_export_fd(hal_tbm_bo *bo, hal_tbm_error *error)
+{
+ tbm_dumb_bo *bo_data = (tbm_dumb_bo *)bo;
+ int ret;
+ char buf[STRERR_BUFSIZE];
+
+ if (!bo_data) {
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return -1;
+ }
+
+ struct drm_prime_handle arg = {0, };
+
+ arg.handle = bo_data->gem;
+ ret = drmIoctl(bo_data->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg);
+ if (ret) {
+ TBM_BACKEND_ERR("bo_data:%p Cannot dmabuf=%d (%s)\n",
+ bo_data, bo_data->gem, strerror_r(errno, buf, STRERR_BUFSIZE));
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return (hal_tbm_fd)ret;
+ }
+
+ TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d, size:%d\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf,
+ arg.fd,
+ bo_data->flags_tbm,
+ bo_data->size);
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return (hal_tbm_fd)arg.fd;
+}
+
+static hal_tbm_key
+tbm_dumb_bo_export_key(hal_tbm_bo *bo, hal_tbm_error *error)
+{
+ tbm_dumb_bo *bo_data = (tbm_dumb_bo *)bo;
+
+ if (!bo_data) {
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return 0;
+ }
+
+ if (!bo_data->name) {
+ bo_data->name = _get_name(bo_data->fd, bo_data->gem);
+ if (!bo_data->name) {
+ TBM_BACKEND_ERR("error Cannot get name\n");
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return 0;
+ }
+ }
+
+ TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, flags:%d, size:%d\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf,
+ bo_data->flags_tbm,
+ bo_data->size);
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return (hal_tbm_key)bo_data->name;
+}
+
+static hal_tbm_error
+_tbm_dumb_authenticated_drm_fd_handler(hal_tbm_fd auth_fd, void *user_data)
+{
+ tbm_dumb_bufmgr *bufmgr_data = (tbm_dumb_bufmgr *)user_data;
+
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, HAL_TBM_ERROR_INVALID_PARAMETER);
+
+ bufmgr_data->fd = auth_fd;
+ TBM_BACKEND_INFO("Get the authenticated drm_fd(%d)!\n", bufmgr_data->fd);
+
+ return HAL_TBM_ERROR_NONE;
+}
+
+static int
+hal_backend_tbm_dumb_exit(void *data)
+{
+ hal_tbm_backend_data *backend_data = (hal_tbm_backend_data *)data;
+ tbm_dumb_bufmgr *bufmgr_data;
+ unsigned long key;
+ void *value;
+
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(backend_data != NULL, -1);
+
+ bufmgr_data = (tbm_dumb_bufmgr *)backend_data->bufmgr;
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, -1);
+
+ if (backend_data->bufmgr_funcs)
+ free(backend_data->bufmgr_funcs);
+ if (backend_data->bo_funcs)
+ free(backend_data->bo_funcs);
+
+ if (bufmgr_data->hashBos) {
+ while (drmHashFirst(bufmgr_data->hashBos, &key, &value) > 0) {
+ free(value);
+ drmHashDelete(bufmgr_data->hashBos, key);
+ }
+
+ drmHashDestroy(bufmgr_data->hashBos);
+ bufmgr_data->hashBos = NULL;
+ }
+
+ close(bufmgr_data->fd);
+
+ free(backend_data->bufmgr);
+ free(backend_data);
+
+ return HAL_TBM_ERROR_NONE;
+}
+
+static int
+hal_backend_tbm_dumb_init(void **data)
+{
+ hal_tbm_backend_data *backend_data = NULL;
+ hal_tbm_bufmgr_funcs *bufmgr_funcs = NULL;
+ hal_tbm_bo_funcs *bo_funcs = NULL;
+ tbm_dumb_bufmgr *bufmgr_data = NULL;
+ int drm_fd = -1;
+ int fp;
+ uint64_t cap = 0;
+ uint32_t ret;
+
+ /* allocate a hal_tbm_backend_data */
+ backend_data = calloc(1, sizeof(struct _hal_tbm_backend_data));
+ if (!backend_data) {
+ TBM_BACKEND_ERR("fail to alloc backend_data!\n");
+ *data = NULL;
+ return -1;
+ }
+ *data = backend_data;
+
+ /* allocate a hal_tbm_bufmgr */
+ bufmgr_data = calloc(1, sizeof(struct _tbm_dumb_bufmgr));
+ if (!bufmgr_data) {
+ TBM_BACKEND_ERR("fail to alloc bufmgr_data!\n");
+ goto fail_alloc_bufmgr_data;
+ }
+ backend_data->bufmgr = (hal_tbm_bufmgr *)bufmgr_data;
+
+ // open drm_fd
+ drm_fd = _tbm_dumb_open_drm();
+ if (drm_fd < 0) {
+ TBM_BACKEND_ERR("fail to open drm!\n");
+ goto fail_open_drm;
+ }
+
+ // set true when backend has a drm_device.
+ backend_data->has_drm_device = 1;
+
+ ret = drmGetCap(drm_fd, DRM_CAP_DUMB_BUFFER, &cap);
+ if (ret || cap == 0) {
+ TBM_BACKEND_ERR("drm buffer isn't supported !\n");
+ goto fail_get_cap;
+ }
+
+ // check if drm_fd is master_drm_fd.
+ if (drmIsMaster(drm_fd)) {
+ // drm_fd is a master_drm_fd.
+ backend_data->drm_info.drm_fd = drm_fd;
+ backend_data->drm_info.is_master = 1;
+
+ bufmgr_data->fd = drm_fd;
+ TBM_BACKEND_INFO("Get the master drm_fd(%d)!\n", bufmgr_data->fd);
+ } else {
+ // drm_fd is not a master_drm_fd.
+ // request authenticated fd
+ close(drm_fd);
+ backend_data->drm_info.drm_fd = -1;
+ backend_data->drm_info.is_master = 0;
+ backend_data->drm_info.auth_drm_fd_func = _tbm_dumb_authenticated_drm_fd_handler;
+ backend_data->drm_info.user_data = bufmgr_data;
+
+ TBM_BACKEND_INFO("A backend requests an authenticated drm_fd.\n");
+ }
+
+ //Check if the tbm manager supports dma fence or not.
+ fp = open("/sys/module/dmabuf_sync/parameters/enabled", O_RDONLY);
+ if (fp != -1) {
+ char buf[1];
+ int length = read(fp, buf, 1);
+
+ if (length == 1 && buf[0] == '1')
+ bufmgr_data->use_dma_fence = 1;
+
+ close(fp);
+ }
+
+ /*Create Hash Table*/
+ bufmgr_data->hashBos = drmHashCreate();
+
+ /* alloc and register bufmgr_funcs */
+ bufmgr_funcs = calloc(1, sizeof(struct _hal_tbm_bufmgr_funcs));
+ if (!bufmgr_funcs) {
+ TBM_BACKEND_ERR("fail to alloc bufmgr_funcs!\n");
+ goto fail_alloc_bufmgr_funcs;
+ }
+ backend_data->bufmgr_funcs = bufmgr_funcs;
+
+ bufmgr_funcs->bufmgr_get_capabilities = tbm_dumb_bufmgr_get_capabilities;
+ bufmgr_funcs->bufmgr_get_supported_formats = tbm_dumb_bufmgr_get_supported_formats;
+ bufmgr_funcs->bufmgr_get_plane_data = tbm_dumb_bufmgr_get_plane_data;
+ bufmgr_funcs->bufmgr_alloc_bo = tbm_dumb_bufmgr_alloc_bo;
+ bufmgr_funcs->bufmgr_alloc_bo_with_format = NULL;
+ bufmgr_funcs->bufmgr_import_fd = tbm_dumb_bufmgr_import_fd;
+ bufmgr_funcs->bufmgr_import_key = tbm_dumb_bufmgr_import_key;
+
+ /* alloc and register bo_funcs */
+ bo_funcs = calloc(1, sizeof(struct _hal_tbm_bo_funcs));
+ if (!bo_funcs) {
+ TBM_BACKEND_ERR("fail to alloc bo_funcs!\n");
+ goto fail_alloc_bo_funcs;
+ }
+ backend_data->bo_funcs = bo_funcs;
+
+ bo_funcs->bo_free = tbm_dumb_bo_free;
+ bo_funcs->bo_get_size = tbm_dumb_bo_get_size;
+ bo_funcs->bo_get_memory_types = tbm_dumb_bo_get_memory_type;
+ bo_funcs->bo_get_handle = tbm_dumb_bo_get_handle;
+ bo_funcs->bo_map = tbm_dumb_bo_map;
+ bo_funcs->bo_unmap = tbm_dumb_bo_unmap;
+ bo_funcs->bo_lock = tbm_dumb_bo_lock;
+ bo_funcs->bo_unlock = tbm_dumb_bo_unlock;
+ bo_funcs->bo_export_fd = tbm_dumb_bo_export_fd;
+ bo_funcs->bo_export_key = tbm_dumb_bo_export_key;
+
+ TBM_BACKEND_DBG("drm_fd:%d\n", bufmgr_data->fd);
+
+ return HAL_TBM_ERROR_NONE;
+
+fail_alloc_bo_funcs:
+ free(bufmgr_funcs);
+fail_alloc_bufmgr_funcs:
+ if (bufmgr_data->hashBos)
+ drmHashDestroy(bufmgr_data->hashBos);
+fail_get_cap:
+ if (backend_data->drm_info.is_master)
+ close(bufmgr_data->fd);
+fail_open_drm:
+ free(bufmgr_data);
+fail_alloc_bufmgr_data:
+ free(backend_data);
+
+ *data = NULL;
+
+ return -1;
+}
+
+hal_backend hal_backend_tbm_data = {
+ "dumb",
+ "Samsung",
+ HAL_ABI_VERSION_TIZEN_6_5,
+ hal_backend_tbm_dumb_init,
+ hal_backend_tbm_dumb_exit
+};
\ No newline at end of file