1 /**************************************************************************
5 Copyright 2012 Samsung Electronics co., Ltd. All Rights Reserved.
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
9 Permission is hereby granted, free of charge, to any person obtaining a
10 copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sub license, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial portions
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
22 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
24 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
25 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
26 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
27 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 **************************************************************************/
39 #include <sys/ioctl.h>
40 #include <sys/types.h>
47 #include <tbm_bufmgr.h>
48 #include <tbm_bufmgr_backend.h>
49 #include <exynos_drm.h>
51 #include <tbm_surface.h>
52 #include <tbm_surface_internal.h>
53 #include <tbm_drm_helper.h>
57 #include "tbm_bufmgr_tgl.h"
61 #define TBM_COLOR_FORMAT_COUNT 8
63 #define EXYNOS_DRM_NAME "exynos"
66 #define LOG_TAG "TBM_BACKEND"
74 static int initialized = 0;
75 static char app_name[128];
80 /* get the application name */
81 f = fopen("/proc/self/cmdline", "r");
86 memset(app_name, 0x00, sizeof(app_name));
88 if (fgets(app_name, 100, f) == NULL) {
95 slash = strrchr(app_name, '/');
97 memmove(app_name, slash + 1, strlen(slash));
104 #define TBM_EXYNOS_LOG(fmt, args...) LOGE("\033[31m" "[%s]" fmt "\033[0m", target_name(), ##args)
105 #define DBG(fmt, args...) {if (bDebug&01) LOGE(fmt, ##args); }
107 #define TBM_EXYNOS_LOG(...)
111 #define SIZE_ALIGN(value, base) (((value) + ((base) - 1)) & ~((base) - 1))
112 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
113 #define MAX(a, b) ((a) > (b) ? (a) : (b))
116 #define TBM_SURFACE_ALIGNMENT_PLANE (8)
117 #define TBM_SURFACE_ALIGNMENT_PITCH_RGB (8)
119 #define TBM_SURFACE_ALIGNMENT_PLANE (64)
120 #define TBM_SURFACE_ALIGNMENT_PITCH_RGB (64)
123 #define TBM_SURFACE_ALIGNMENT_PLANE_NV12 (4096)
124 #define TBM_SURFACE_ALIGNMENT_PITCH_YUV (16)
126 #define SZ_1M 0x00100000
127 #define S5P_FIMV_MAX_FRAME_SIZE (2 * SZ_1M)
128 #define S5P_FIMV_D_ALIGN_PLANE_SIZE 64
129 #define S5P_FIMV_NUM_PIXELS_IN_MB_ROW 16
130 #define S5P_FIMV_NUM_PIXELS_IN_MB_COL 16
131 #define S5P_FIMV_DEC_BUF_ALIGN (8 * 1024)
132 #define S5P_FIMV_NV12MT_HALIGN 128
133 #define S5P_FIMV_NV12MT_VALIGN 64
135 /* check condition */
136 #define EXYNOS_RETURN_IF_FAIL(cond) {\
138 TBM_EXYNOS_LOG("[%s] : '%s' failed.\n", __func__, #cond);\
143 #define EXYNOS_RETURN_VAL_IF_FAIL(cond, val) {\
145 TBM_EXYNOS_LOG("[%s] : '%s' failed.\n", __func__, #cond);\
150 struct dma_buf_info {
152 unsigned int fence_supported;
153 unsigned int padding;
156 #define DMA_BUF_ACCESS_READ 0x1
157 #define DMA_BUF_ACCESS_WRITE 0x2
158 #define DMA_BUF_ACCESS_DMA 0x4
159 #define DMA_BUF_ACCESS_MAX 0x8
161 #define DMA_FENCE_LIST_MAX 5
163 struct dma_buf_fence {
168 #define DMABUF_IOCTL_BASE 'F'
169 #define DMABUF_IOWR(nr, type) _IOWR(DMABUF_IOCTL_BASE, nr, type)
171 #define DMABUF_IOCTL_GET_INFO DMABUF_IOWR(0x00, struct dma_buf_info)
172 #define DMABUF_IOCTL_GET_FENCE DMABUF_IOWR(0x01, struct dma_buf_fence)
173 #define DMABUF_IOCTL_PUT_FENCE DMABUF_IOWR(0x02, struct dma_buf_fence)
176 #define GLOBAL_KEY ((unsigned int)(-1))
178 #define TBM_EXYNOS_CACHE_INV 0x01 /**< cache invalidate */
179 #define TBM_EXYNOS_CACHE_CLN 0x02 /**< cache clean */
180 #define TBM_EXYNOS_CACHE_ALL 0x10 /**< cache all */
181 #define TBM_EXYNOS_CACHE_FLUSH (TBM_EXYNOS_CACHE_INV|TBM_EXYNOS_CACHE_CLN) /**< cache flush */
182 #define TBM_EXYNOS_CACHE_FLUSH_ALL (TBM_EXYNOS_CACHE_FLUSH|TBM_EXYNOS_CACHE_ALL) /**< cache flush all */
186 DEVICE_CA, /* cache aware device */
187 DEVICE_CO /* cache oblivious device */
190 typedef union _tbm_bo_cache_state tbm_bo_cache_state;
192 union _tbm_bo_cache_state {
195 unsigned int cntFlush:16; /*Flush all index for sync */
196 unsigned int isCached:1;
197 unsigned int isDirtied:2;
201 typedef struct _tbm_bufmgr_exynos *tbm_bufmgr_exynos;
202 typedef struct _tbm_bo_exynos *tbm_bo_exynos;
204 typedef struct _exynos_private {
206 struct _tbm_bo_exynos *bo_priv;
209 /* tbm buffor object for exynos */
210 struct _tbm_bo_exynos {
213 unsigned int name; /* FLINK ID */
215 unsigned int gem; /* GEM Handle */
217 unsigned int dmabuf; /* fd for dmabuf */
219 void *pBase; /* virtual address */
223 unsigned int flags_exynos;
224 unsigned int flags_tbm;
228 pthread_mutex_t mutex;
229 struct dma_buf_fence dma_fence[DMA_FENCE_LIST_MAX];
233 tbm_bo_cache_state cache_state;
234 unsigned int map_cnt;
238 /* tbm bufmgr private for exynos */
239 struct _tbm_bufmgr_exynos {
252 char *STR_DEVICE[] = {
268 uint32_t tbm_exynos_color_format_list[TBM_COLOR_FORMAT_COUNT] = { TBM_FORMAT_RGBA8888,
278 #ifdef ENABLE_CACHECRTL
280 _tgl_init(int fd, unsigned int key)
282 struct tgl_attribute attr;
286 attr.timeout_ms = 1000;
288 err = ioctl(fd, TGL_IOC_INIT_LOCK, &attr);
290 TBM_EXYNOS_LOG("[libtbm:%d] "
291 "error(%s) %s:%d key:%d\n",
292 getpid(), strerror(errno), __func__, __LINE__, key);
300 _tgl_destroy(int fd, unsigned int key)
304 err = ioctl(fd, TGL_IOC_DESTROY_LOCK, key);
306 TBM_EXYNOS_LOG("[libtbm:%d] "
307 "error(%s) %s:%d key:%d\n",
308 getpid(), strerror(errno), __func__, __LINE__, key);
315 _tgl_set_data(int fd, unsigned int key, unsigned int val)
319 struct tgl_user_data arg;
323 err = ioctl(fd, TGL_IOC_SET_DATA, &arg);
325 TBM_EXYNOS_LOG("[libtbm:%d] "
326 "error(%s) %s:%d key:%d\n",
327 getpid(), strerror(errno), __func__, __LINE__, key);
334 static inline unsigned int
335 _tgl_get_data(int fd, unsigned int key)
338 struct tgl_user_data arg = { 0, };
341 err = ioctl(fd, TGL_IOC_GET_DATA, &arg);
343 TBM_EXYNOS_LOG("[libtbm:%d] "
344 "error(%s) %s:%d key:%d\n",
345 getpid(), strerror(errno), __func__, __LINE__, key);
353 _exynos_cache_flush(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos, int flags)
355 EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
357 /* cache flush is managed by kernel side when using dma-fence. */
358 if (bufmgr_exynos->use_dma_fence)
361 struct drm_exynos_gem_cache_op cache_op = {0, };
364 /* if bo_exynos is null, do cache_flush_all */
367 cache_op.usr_addr = (uint64_t)((uint32_t)bo_exynos->pBase);
368 cache_op.size = bo_exynos->size;
370 flags = TBM_EXYNOS_CACHE_FLUSH_ALL;
372 cache_op.usr_addr = 0;
376 if (flags & TBM_EXYNOS_CACHE_INV) {
377 if (flags & TBM_EXYNOS_CACHE_ALL)
378 cache_op.flags |= EXYNOS_DRM_CACHE_INV_ALL;
380 cache_op.flags |= EXYNOS_DRM_CACHE_INV_RANGE;
383 if (flags & TBM_EXYNOS_CACHE_CLN) {
384 if (flags & TBM_EXYNOS_CACHE_ALL)
385 cache_op.flags |= EXYNOS_DRM_CACHE_CLN_ALL;
387 cache_op.flags |= EXYNOS_DRM_CACHE_CLN_RANGE;
390 if (flags & TBM_EXYNOS_CACHE_ALL)
391 cache_op.flags |= EXYNOS_DRM_ALL_CACHES_CORES;
393 ret = drmCommandWriteRead(bufmgr_exynos->fd, DRM_EXYNOS_GEM_CACHE_OP, &cache_op,
396 TBM_EXYNOS_LOG("error fail to flush the cache.\n");
405 _bo_init_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos, int import)
407 #ifdef ENABLE_CACHECRTL
408 EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
409 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
411 if (bufmgr_exynos->use_dma_fence)
414 tbm_bo_cache_state cache_state;
416 _tgl_init(bufmgr_exynos->tgl_fd, bo_exynos->name);
419 cache_state.data.isDirtied = DEVICE_NONE;
420 cache_state.data.isCached = 0;
421 cache_state.data.cntFlush = 0;
423 _tgl_set_data(bufmgr_exynos->tgl_fd, bo_exynos->name, cache_state.val);
431 _bo_set_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos, int device, int opt)
433 #ifdef ENABLE_CACHECRTL
434 EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
435 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
437 if (bufmgr_exynos->use_dma_fence)
441 unsigned short cntFlush = 0;
443 if (!(bo_exynos->flags_exynos & EXYNOS_BO_CACHABLE))
446 /* get cache state of a bo */
447 bo_exynos->cache_state.val = _tgl_get_data(bufmgr_exynos->tgl_fd,
450 /* get global cache flush count */
451 cntFlush = (unsigned short)_tgl_get_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY);
453 if (device == TBM_DEVICE_CPU) {
454 if (bo_exynos->cache_state.data.isDirtied == DEVICE_CO &&
455 bo_exynos->cache_state.data.isCached)
456 need_flush = TBM_EXYNOS_CACHE_INV;
458 bo_exynos->cache_state.data.isCached = 1;
459 if (opt & TBM_OPTION_WRITE)
460 bo_exynos->cache_state.data.isDirtied = DEVICE_CA;
462 if (bo_exynos->cache_state.data.isDirtied != DEVICE_CA)
463 bo_exynos->cache_state.data.isDirtied = DEVICE_NONE;
466 if (bo_exynos->cache_state.data.isDirtied == DEVICE_CA &&
467 bo_exynos->cache_state.data.isCached &&
468 bo_exynos->cache_state.data.cntFlush == cntFlush)
469 need_flush = TBM_EXYNOS_CACHE_CLN | TBM_EXYNOS_CACHE_ALL;
471 if (opt & TBM_OPTION_WRITE)
472 bo_exynos->cache_state.data.isDirtied = DEVICE_CO;
474 if (bo_exynos->cache_state.data.isDirtied != DEVICE_CO)
475 bo_exynos->cache_state.data.isDirtied = DEVICE_NONE;
480 if (need_flush & TBM_EXYNOS_CACHE_ALL)
481 _tgl_set_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY, (unsigned int)(++cntFlush));
483 /* call cache flush */
484 _exynos_cache_flush(bufmgr_exynos, bo_exynos, need_flush);
486 DBG("[libtbm:%d] \tcache(%d,%d)....flush:0x%x, cntFlush(%d)\n",
488 bo_exynos->cache_state.data.isCached,
489 bo_exynos->cache_state.data.isDirtied,
499 _bo_save_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos)
501 #ifdef ENABLE_CACHECRTL
502 EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
503 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
505 if (bufmgr_exynos->use_dma_fence)
508 unsigned short cntFlush = 0;
510 /* get global cache flush count */
511 cntFlush = (unsigned short)_tgl_get_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY);
513 /* save global cache flush count */
514 bo_exynos->cache_state.data.cntFlush = cntFlush;
515 _tgl_set_data(bufmgr_exynos->tgl_fd, bo_exynos->name,
516 bo_exynos->cache_state.val);
523 _bo_destroy_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos)
525 #ifdef ENABLE_CACHECRTL
526 EXYNOS_RETURN_IF_FAIL(bufmgr_exynos != NULL);
527 EXYNOS_RETURN_IF_FAIL(bo_exynos != NULL);
529 if (bufmgr_exynos->use_dma_fence)
532 _tgl_destroy(bufmgr_exynos->tgl_fd, bo_exynos->name);
537 _bufmgr_init_cache_state(tbm_bufmgr_exynos bufmgr_exynos)
539 #ifdef ENABLE_CACHECRTL
540 EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
542 if (bufmgr_exynos->use_dma_fence)
545 /* open tgl fd for saving cache flush data */
546 bufmgr_exynos->tgl_fd = open(tgl_devfile, O_RDWR);
548 if (bufmgr_exynos->tgl_fd < 0) {
549 bufmgr_exynos->tgl_fd = open(tgl_devfile1, O_RDWR);
550 if (bufmgr_exynos->tgl_fd < 0) {
551 TBM_EXYNOS_LOG("[libtbm-exynos:%d] "
552 "error: Fail to open global_lock:%s\n",
553 getpid(), tgl_devfile);
558 if (!_tgl_init(bufmgr_exynos->tgl_fd, GLOBAL_KEY)) {
559 TBM_EXYNOS_LOG("[libtbm-exynos:%d] "
560 "error: Fail to initialize the tgl\n",
563 close(bufmgr_exynos->tgl_fd);
572 _bufmgr_deinit_cache_state(tbm_bufmgr_exynos bufmgr_exynos)
574 #ifdef ENABLE_CACHECRTL
575 EXYNOS_RETURN_IF_FAIL(bufmgr_exynos != NULL);
577 if (bufmgr_exynos->use_dma_fence)
580 if (bufmgr_exynos->tgl_fd >= 0)
581 close(bufmgr_exynos->tgl_fd);
586 _tbm_exynos_open_drm()
590 fd = drmOpen(EXYNOS_DRM_NAME, NULL);
592 TBM_EXYNOS_LOG("[libtbm-exynos:%d] "
593 "warning %s:%d fail to open drm\n",
594 getpid(), __FUNCTION__, __LINE__);
598 struct udev *udev = NULL;
599 struct udev_enumerate *e = NULL;
600 struct udev_list_entry *entry = NULL;
601 struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
602 const char *filepath;
606 TBM_EXYNOS_LOG("[libtbm-exynos:%d] "
607 "%s:%d search drm-device by udev\n",
608 getpid(), __FUNCTION__, __LINE__);
612 TBM_EXYNOS_LOG("udev_new() failed.\n");
616 e = udev_enumerate_new(udev);
617 udev_enumerate_add_match_subsystem(e, "drm");
618 udev_enumerate_add_match_sysname(e, "card[0-9]*");
619 udev_enumerate_scan_devices(e);
621 udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
622 device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
623 udev_list_entry_get_name(entry));
624 device_parent = udev_device_get_parent(device);
625 /* Not need unref device_parent. device_parent and device have same refcnt */
627 if (strcmp(udev_device_get_sysname(device_parent), "exynos-drm") == 0) {
629 DBG("[%s] Found render device: '%s' (%s)\n",
631 udev_device_get_syspath(drm_device),
632 udev_device_get_sysname(device_parent));
636 udev_device_unref(device);
639 udev_enumerate_unref(e);
641 /* Get device file path. */
642 filepath = udev_device_get_devnode(drm_device);
644 TBM_EXYNOS_LOG("udev_device_get_devnode() failed.\n");
645 udev_device_unref(drm_device);
650 /* Open DRM device file and check validity. */
651 fd = open(filepath, O_RDWR | O_CLOEXEC);
653 TBM_EXYNOS_LOG("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
654 udev_device_unref(drm_device);
661 TBM_EXYNOS_LOG("fstat() failed %s.\n");
663 udev_device_unref(drm_device);
668 udev_device_unref(drm_device);
676 _check_render_node(void)
678 struct udev *udev = NULL;
679 struct udev_enumerate *e = NULL;
680 struct udev_list_entry *entry = NULL;
681 struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
683 #ifndef USE_RENDER_NODE
689 TBM_EXYNOS_LOG("udev_new() failed.\n");
693 e = udev_enumerate_new(udev);
694 udev_enumerate_add_match_subsystem(e, "drm");
695 udev_enumerate_add_match_sysname(e, "renderD[0-9]*");
696 udev_enumerate_scan_devices(e);
698 udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
699 device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
700 udev_list_entry_get_name(entry));
701 device_parent = udev_device_get_parent(device);
702 /* Not need unref device_parent. device_parent and device have same refcnt */
704 if (strcmp(udev_device_get_sysname(device_parent), "exynos-drm") == 0) {
706 DBG("[%s] Found render device: '%s' (%s)\n",
708 udev_device_get_syspath(drm_device),
709 udev_device_get_sysname(device_parent));
713 udev_device_unref(device);
716 udev_enumerate_unref(e);
720 udev_device_unref(drm_device);
724 udev_device_unref(drm_device);
729 _get_render_node(void)
731 struct udev *udev = NULL;
732 struct udev_enumerate *e = NULL;
733 struct udev_list_entry *entry = NULL;
734 struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
735 const char *filepath;
742 TBM_EXYNOS_LOG("udev_new() failed.\n");
746 e = udev_enumerate_new(udev);
747 udev_enumerate_add_match_subsystem(e, "drm");
748 udev_enumerate_add_match_sysname(e, "renderD[0-9]*");
749 udev_enumerate_scan_devices(e);
751 udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
752 device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
753 udev_list_entry_get_name(entry));
754 device_parent = udev_device_get_parent(device);
755 /* Not need unref device_parent. device_parent and device have same refcnt */
757 if (strcmp(udev_device_get_sysname(device_parent), "exynos-drm") == 0) {
759 DBG("[%s] Found render device: '%s' (%s)\n",
761 udev_device_get_syspath(drm_device),
762 udev_device_get_sysname(device_parent));
766 udev_device_unref(device);
769 udev_enumerate_unref(e);
771 /* Get device file path. */
772 filepath = udev_device_get_devnode(drm_device);
774 TBM_EXYNOS_LOG("udev_device_get_devnode() failed.\n");
775 udev_device_unref(drm_device);
780 /* Open DRM device file and check validity. */
781 fd = open(filepath, O_RDWR | O_CLOEXEC);
783 TBM_EXYNOS_LOG("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
784 udev_device_unref(drm_device);
791 TBM_EXYNOS_LOG("fstat() failed %s.\n");
792 udev_device_unref(drm_device);
798 udev_device_unref(drm_device);
805 _get_exynos_flag_from_tbm(unsigned int ftbm)
807 unsigned int flags = 0;
809 if (ftbm & TBM_BO_SCANOUT)
810 flags |= EXYNOS_BO_CONTIG;
812 flags |= EXYNOS_BO_NONCONTIG;
814 if (ftbm & TBM_BO_WC)
815 flags |= EXYNOS_BO_WC;
816 else if (ftbm & TBM_BO_NONCACHABLE)
817 flags |= EXYNOS_BO_NONCACHABLE;
819 flags |= EXYNOS_BO_CACHABLE;
825 _get_tbm_flag_from_exynos(unsigned int fexynos)
827 unsigned int flags = 0;
829 if (fexynos & EXYNOS_BO_NONCONTIG)
830 flags |= TBM_BO_DEFAULT;
832 flags |= TBM_BO_SCANOUT;
834 if (fexynos & EXYNOS_BO_WC)
836 else if (fexynos & EXYNOS_BO_CACHABLE)
837 flags |= TBM_BO_DEFAULT;
839 flags |= TBM_BO_NONCACHABLE;
845 _get_name(int fd, unsigned int gem)
847 struct drm_gem_flink arg = {0,};
850 if (drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &arg)) {
851 TBM_EXYNOS_LOG("error fail to get flink from gem:%d (DRM_IOCTL_GEM_FLINK)\n",
856 return (unsigned int)arg.name;
860 _exynos_bo_handle(tbm_bo_exynos bo_exynos, int device)
862 tbm_bo_handle bo_handle;
864 memset(&bo_handle, 0x0, sizeof(uint64_t));
867 case TBM_DEVICE_DEFAULT:
869 bo_handle.u32 = (uint32_t)bo_exynos->gem;
872 if (!bo_exynos->pBase) {
873 struct drm_exynos_gem_map arg = {0,};
876 arg.handle = bo_exynos->gem;
877 if (drmCommandWriteRead(bo_exynos->fd, DRM_EXYNOS_GEM_MAP, &arg,
879 TBM_EXYNOS_LOG("error Cannot map_dumb gem=%d\n", bo_exynos->gem);
880 return (tbm_bo_handle) NULL;
883 map = mmap(NULL, bo_exynos->size, PROT_READ | PROT_WRITE, MAP_SHARED,
884 bo_exynos->fd, arg.offset);
885 if (map == MAP_FAILED) {
886 TBM_EXYNOS_LOG("error Cannot usrptr gem=%d\n", bo_exynos->gem);
887 return (tbm_bo_handle) NULL;
889 bo_exynos->pBase = map;
891 bo_handle.ptr = (void *)bo_exynos->pBase;
895 if (bo_exynos->dmabuf) {
896 bo_handle.u32 = (uint32_t)bo_exynos->dmabuf;
900 if (!bo_exynos->dmabuf) {
901 struct drm_prime_handle arg = {0, };
903 arg.handle = bo_exynos->gem;
904 if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
905 TBM_EXYNOS_LOG("error Cannot dmabuf=%d\n", bo_exynos->gem);
906 return (tbm_bo_handle) NULL;
908 bo_exynos->dmabuf = arg.fd;
911 bo_handle.u32 = (uint32_t)bo_exynos->dmabuf;
915 if (!bo_exynos->dmabuf) {
916 struct drm_prime_handle arg = {0, };
918 arg.handle = bo_exynos->gem;
919 if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
920 TBM_EXYNOS_LOG("error Cannot dmabuf=%d\n", bo_exynos->gem);
921 return (tbm_bo_handle) NULL;
923 bo_exynos->dmabuf = arg.fd;
926 bo_handle.u32 = (uint32_t)bo_exynos->dmabuf;
929 TBM_EXYNOS_LOG("error Not supported device:%d\n", device);
930 bo_handle.ptr = (void *) NULL;
938 tbm_exynos_bo_size(tbm_bo bo)
940 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
942 tbm_bo_exynos bo_exynos;
944 bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
945 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
947 return bo_exynos->size;
951 tbm_exynos_bo_alloc(tbm_bo bo, int size, int flags)
953 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
955 tbm_bo_exynos bo_exynos;
956 tbm_bufmgr_exynos bufmgr_exynos;
957 unsigned int exynos_flags;
959 bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
960 EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
962 bo_exynos = calloc(1, sizeof(struct _tbm_bo_exynos));
964 TBM_EXYNOS_LOG("error fail to allocate the bo private\n");
968 exynos_flags = _get_exynos_flag_from_tbm(flags);
969 if ((flags & TBM_BO_SCANOUT) &&
971 exynos_flags |= EXYNOS_BO_NONCONTIG;
974 struct drm_exynos_gem_create arg = {0, };
976 arg.size = (uint64_t)size;
977 arg.flags = exynos_flags;
978 if (drmCommandWriteRead(bufmgr_exynos->fd, DRM_EXYNOS_GEM_CREATE, &arg,
980 TBM_EXYNOS_LOG("error Cannot create bo(flag:%x, size:%d)\n", arg.flags,
981 (unsigned int)arg.size);
986 bo_exynos->fd = bufmgr_exynos->fd;
987 bo_exynos->gem = arg.handle;
988 bo_exynos->size = size;
989 bo_exynos->flags_tbm = flags;
990 bo_exynos->flags_exynos = exynos_flags;
991 bo_exynos->name = _get_name(bo_exynos->fd, bo_exynos->gem);
993 if (!_bo_init_cache_state(bufmgr_exynos, bo_exynos, 0)) {
994 TBM_EXYNOS_LOG("error fail init cache state(%d)\n", bo_exynos->name);
999 pthread_mutex_init(&bo_exynos->mutex, NULL);
1001 if (bufmgr_exynos->use_dma_fence
1002 && !bo_exynos->dmabuf) {
1003 struct drm_prime_handle arg = {0, };
1005 arg.handle = bo_exynos->gem;
1006 if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
1007 TBM_EXYNOS_LOG("error Cannot dmabuf=%d\n", bo_exynos->gem);
1011 bo_exynos->dmabuf = arg.fd;
1014 /* add bo to hash */
1015 PrivGem *privGem = calloc(1, sizeof(PrivGem));
1018 TBM_EXYNOS_LOG("[libtbm-exynos:%d] "
1019 "error %s:%d Fail to calloc privGem\n",
1020 getpid(), __func__, __LINE__);
1025 privGem->ref_count = 1;
1026 privGem->bo_priv = bo_exynos;
1028 if (drmHashInsert(bufmgr_exynos->hashBos, bo_exynos->name,
1029 (void *)privGem) < 0) {
1030 TBM_EXYNOS_LOG("error Cannot insert bo to Hash(%d)\n", bo_exynos->name);
1033 DBG(" [%s] bo:%p, gem:%d(%d), flags:%d(%d), size:%d\n", target_name(),
1035 bo_exynos->gem, bo_exynos->name,
1036 flags, exynos_flags,
1039 return (void *)bo_exynos;
1043 tbm_exynos_bo_free(tbm_bo bo)
1045 tbm_bo_exynos bo_exynos;
1046 tbm_bufmgr_exynos bufmgr_exynos;
1051 bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1052 EXYNOS_RETURN_IF_FAIL(bufmgr_exynos != NULL);
1054 bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1055 EXYNOS_RETURN_IF_FAIL(bo_exynos != NULL);
1057 DBG(" [%s] bo:%p, gem:%d(%d), fd:%d, size:%d\n", target_name(),
1059 bo_exynos->gem, bo_exynos->name,
1063 if (bo_exynos->pBase) {
1064 if (munmap(bo_exynos->pBase, bo_exynos->size) == -1) {
1065 TBM_EXYNOS_LOG("error bo:%p fail to munmap(%s)\n",
1066 bo, strerror(errno));
1071 if (bo_exynos->dmabuf) {
1072 close(bo_exynos->dmabuf);
1073 bo_exynos->dmabuf = 0;
1076 /* delete bo from hash */
1077 PrivGem *privGem = NULL;
1080 ret = drmHashLookup(bufmgr_exynos->hashBos, bo_exynos->name,
1083 privGem->ref_count--;
1084 if (privGem->ref_count == 0) {
1085 drmHashDelete(bufmgr_exynos->hashBos, bo_exynos->name);
1090 TBM_EXYNOS_LOG("warning Cannot find bo to Hash(%d), ret=%d\n", bo_exynos->name,
1094 _bo_destroy_cache_state(bufmgr_exynos, bo_exynos);
1096 /* Free gem handle */
1097 struct drm_gem_close arg = {0, };
1099 memset(&arg, 0, sizeof(arg));
1100 arg.handle = bo_exynos->gem;
1101 if (drmIoctl(bo_exynos->fd, DRM_IOCTL_GEM_CLOSE, &arg)) {
1102 TBM_EXYNOS_LOG("error bo:%p fail to gem close.(%s)\n",
1103 bo, strerror(errno));
1111 tbm_exynos_bo_import(tbm_bo bo, unsigned int key)
1113 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1115 tbm_bufmgr_exynos bufmgr_exynos;
1116 tbm_bo_exynos bo_exynos;
1117 PrivGem *privGem = NULL;
1120 bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1121 EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
1123 ret = drmHashLookup(bufmgr_exynos->hashBos, key, (void **)&privGem);
1125 return privGem->bo_priv;
1127 struct drm_gem_open arg = {0, };
1128 struct drm_exynos_gem_info info = {0, };
1131 if (drmIoctl(bufmgr_exynos->fd, DRM_IOCTL_GEM_OPEN, &arg)) {
1132 TBM_EXYNOS_LOG("error Cannot open gem name=%d\n", key);
1136 info.handle = arg.handle;
1137 if (drmCommandWriteRead(bufmgr_exynos->fd,
1140 sizeof(struct drm_exynos_gem_info))) {
1141 TBM_EXYNOS_LOG("error Cannot get gem info=%d\n", key);
1145 bo_exynos = calloc(1, sizeof(struct _tbm_bo_exynos));
1147 TBM_EXYNOS_LOG("error fail to allocate the bo private\n");
1151 bo_exynos->fd = bufmgr_exynos->fd;
1152 bo_exynos->gem = arg.handle;
1153 bo_exynos->size = arg.size;
1154 bo_exynos->flags_exynos = info.flags;
1155 bo_exynos->name = key;
1156 bo_exynos->flags_tbm = _get_tbm_flag_from_exynos(bo_exynos->flags_exynos);
1158 if (!_bo_init_cache_state(bufmgr_exynos, bo_exynos, 1)) {
1159 TBM_EXYNOS_LOG("error fail init cache state(%d)\n", bo_exynos->name);
1164 if (!bo_exynos->dmabuf) {
1165 struct drm_prime_handle arg = {0, };
1167 arg.handle = bo_exynos->gem;
1168 if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
1169 TBM_EXYNOS_LOG("error Cannot dmabuf=%d\n", bo_exynos->gem);
1173 bo_exynos->dmabuf = arg.fd;
1176 /* add bo to hash */
1179 privGem = calloc(1, sizeof(PrivGem));
1181 TBM_EXYNOS_LOG("[libtbm-exynos:%d] "
1182 "error %s:%d Fail to calloc privGem\n",
1183 getpid(), __func__, __LINE__);
1188 privGem->ref_count = 1;
1189 privGem->bo_priv = bo_exynos;
1191 if (drmHashInsert(bufmgr_exynos->hashBos, bo_exynos->name,
1192 (void *)privGem) < 0) {
1193 TBM_EXYNOS_LOG("error Cannot insert bo to Hash(%d)\n", bo_exynos->name);
1196 DBG(" [%s] bo:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d\n",
1199 bo_exynos->gem, bo_exynos->name,
1201 bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1204 return (void *)bo_exynos;
1208 tbm_exynos_bo_import_fd(tbm_bo bo, tbm_fd key)
1210 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1212 tbm_bufmgr_exynos bufmgr_exynos;
1213 tbm_bo_exynos bo_exynos;
1214 PrivGem *privGem = NULL;
1218 bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1219 EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
1221 /*getting handle from fd*/
1222 unsigned int gem = 0;
1223 struct drm_prime_handle arg = {0, };
1227 if (drmIoctl(bufmgr_exynos->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &arg)) {
1228 TBM_EXYNOS_LOG("error bo:%p Cannot get gem handle from fd:%d (%s)\n",
1229 bo, arg.fd, strerror(errno));
1234 name = _get_name(bufmgr_exynos->fd, gem);
1236 TBM_EXYNOS_LOG("error bo:%p Cannot get name from gem:%d, fd:%d (%s)\n",
1237 bo, gem, key, strerror(errno));
1241 ret = drmHashLookup(bufmgr_exynos->hashBos, name, (void **)&privGem);
1243 if (gem == privGem->bo_priv->gem)
1244 return privGem->bo_priv;
1247 unsigned int real_size = -1;
1248 struct drm_exynos_gem_info info = {0, };
1250 /* Determine size of bo. The fd-to-handle ioctl really should
1251 * return the size, but it doesn't. If we have kernel 3.12 or
1252 * later, we can lseek on the prime fd to get the size. Older
1253 * kernels will just fail, in which case we fall back to the
1254 * provided (estimated or guess size).
1256 real_size = lseek(key, 0, SEEK_END);
1259 if (drmCommandWriteRead(bufmgr_exynos->fd,
1262 sizeof(struct drm_exynos_gem_info))) {
1263 TBM_EXYNOS_LOG("error bo:%p Cannot get gem info from gem:%d, fd:%d (%s)\n",
1264 bo, gem, key, strerror(errno));
1268 if (real_size == -1)
1269 real_size = info.size;
1271 bo_exynos = calloc(1, sizeof(struct _tbm_bo_exynos));
1273 TBM_EXYNOS_LOG("error bo:%p fail to allocate the bo private\n", bo);
1277 bo_exynos->fd = bufmgr_exynos->fd;
1278 bo_exynos->gem = gem;
1279 bo_exynos->size = real_size;
1280 bo_exynos->flags_exynos = info.flags;
1281 bo_exynos->flags_tbm = _get_tbm_flag_from_exynos(bo_exynos->flags_exynos);
1282 bo_exynos->name = name;
1284 if (!_bo_init_cache_state(bufmgr_exynos, bo_exynos, 1)) {
1285 TBM_EXYNOS_LOG("error fail init cache state(%d)\n", bo_exynos->name);
1290 /* add bo to hash */
1293 privGem = calloc(1, sizeof(PrivGem));
1295 TBM_EXYNOS_LOG("[libtbm-exynos:%d] "
1296 "error %s:%d Fail to calloc privGem\n",
1297 getpid(), __func__, __LINE__);
1302 privGem->ref_count = 1;
1303 privGem->bo_priv = bo_exynos;
1305 if (drmHashInsert(bufmgr_exynos->hashBos, bo_exynos->name,
1306 (void *)privGem) < 0) {
1307 TBM_EXYNOS_LOG("error bo:%p Cannot insert bo to Hash(%d) from gem:%d, fd:%d\n",
1308 bo, bo_exynos->name, gem, key);
1311 DBG(" [%s] bo:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d(%d), size:%d\n",
1314 bo_exynos->gem, bo_exynos->name,
1317 bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1320 return (void *)bo_exynos;
1324 tbm_exynos_bo_export(tbm_bo bo)
1326 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1328 tbm_bo_exynos bo_exynos;
1330 bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1331 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
1333 if (!bo_exynos->name) {
1334 bo_exynos->name = _get_name(bo_exynos->fd, bo_exynos->gem);
1335 if (!bo_exynos->name) {
1336 TBM_EXYNOS_LOG("error Cannot get name\n");
1341 DBG(" [%s] bo:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d\n",
1344 bo_exynos->gem, bo_exynos->name,
1346 bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1349 return (unsigned int)bo_exynos->name;
1353 tbm_exynos_bo_export_fd(tbm_bo bo)
1355 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, -1);
1357 tbm_bo_exynos bo_exynos;
1360 bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1361 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, -1);
1363 struct drm_prime_handle arg = {0, };
1365 arg.handle = bo_exynos->gem;
1366 ret = drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg);
1368 TBM_EXYNOS_LOG("error bo:%p Cannot dmabuf=%d (%s)\n",
1369 bo, bo_exynos->gem, strerror(errno));
1370 return (tbm_fd) ret;
1373 DBG(" [%s] bo:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d(%d), size:%d\n",
1376 bo_exynos->gem, bo_exynos->name,
1379 bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1382 return (tbm_fd)arg.fd;
1385 static tbm_bo_handle
1386 tbm_exynos_bo_get_handle(tbm_bo bo, int device)
1388 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, (tbm_bo_handle) NULL);
1390 tbm_bo_handle bo_handle;
1391 tbm_bo_exynos bo_exynos;
1393 bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1394 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, (tbm_bo_handle) NULL);
1396 if (!bo_exynos->gem) {
1397 TBM_EXYNOS_LOG("error Cannot map gem=%d\n", bo_exynos->gem);
1398 return (tbm_bo_handle) NULL;
1401 DBG("[%s] bo:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d, %s\n",
1404 bo_exynos->gem, bo_exynos->name,
1406 bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1408 STR_DEVICE[device]);
1410 /*Get mapped bo_handle*/
1411 bo_handle = _exynos_bo_handle(bo_exynos, device);
1412 if (bo_handle.ptr == NULL) {
1413 TBM_EXYNOS_LOG("error Cannot get handle: gem:%d, device:%d\n", bo_exynos->gem,
1415 return (tbm_bo_handle) NULL;
1421 static tbm_bo_handle
1422 tbm_exynos_bo_map(tbm_bo bo, int device, int opt)
1424 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, (tbm_bo_handle) NULL);
1426 tbm_bo_handle bo_handle;
1427 tbm_bo_exynos bo_exynos;
1428 tbm_bufmgr_exynos bufmgr_exynos;
1430 bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1431 EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, (tbm_bo_handle)NULL);
1433 bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1434 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, (tbm_bo_handle) NULL);
1436 if (!bo_exynos->gem) {
1437 TBM_EXYNOS_LOG("error Cannot map gem=%d\n", bo_exynos->gem);
1438 return (tbm_bo_handle) NULL;
1441 DBG(" [%s] bo:%p, gem:%d(%d), fd:%d, %s, %s\n", target_name(),
1443 bo_exynos->gem, bo_exynos->name,
1448 /*Get mapped bo_handle*/
1449 bo_handle = _exynos_bo_handle(bo_exynos, device);
1450 if (bo_handle.ptr == NULL) {
1451 TBM_EXYNOS_LOG("error Cannot get handle: gem:%d, device:%d, opt:%d\n",
1452 bo_exynos->gem, device, opt);
1453 return (tbm_bo_handle) NULL;
1456 if (bo_exynos->map_cnt == 0)
1457 _bo_set_cache_state(bufmgr_exynos, bo_exynos, device, opt);
1459 bo_exynos->last_map_device = device;
1461 bo_exynos->map_cnt++;
1467 tbm_exynos_bo_unmap(tbm_bo bo)
1469 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1471 tbm_bo_exynos bo_exynos;
1472 tbm_bufmgr_exynos bufmgr_exynos;
1474 bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1475 EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
1477 bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1478 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
1481 if (!bo_exynos->gem)
1484 bo_exynos->map_cnt--;
1486 if (bo_exynos->map_cnt == 0)
1487 _bo_save_cache_state(bufmgr_exynos, bo_exynos);
1489 #ifdef ENABLE_CACHECRTL
1490 if (bo_exynos->last_map_device == TBM_DEVICE_CPU)
1491 _exynos_cache_flush(bufmgr_exynos, bo_exynos, TBM_EXYNOS_CACHE_FLUSH_ALL);
1494 bo_exynos->last_map_device = -1;
1496 DBG(" [%s] bo:%p, gem:%d(%d), fd:%d\n", target_name(),
1498 bo_exynos->gem, bo_exynos->name,
1505 tbm_exynos_bo_lock(tbm_bo bo, int device, int opt)
1507 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1509 #ifndef ALWAYS_BACKEND_CTRL
1510 tbm_bufmgr_exynos bufmgr_exynos;
1511 tbm_bo_exynos bo_exynos;
1512 struct dma_buf_fence fence;
1513 struct flock filelock;
1516 if (device != TBM_DEVICE_3D && device != TBM_DEVICE_CPU) {
1517 DBG("[libtbm-exynos:%d] %s not support device type,\n", getpid(),
1522 bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1523 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
1525 bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1526 EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
1528 memset(&fence, 0, sizeof(struct dma_buf_fence));
1530 /* Check if the given type is valid or not. */
1531 if (opt & TBM_OPTION_WRITE) {
1532 if (device == TBM_DEVICE_3D)
1533 fence.type = DMA_BUF_ACCESS_WRITE | DMA_BUF_ACCESS_DMA;
1534 } else if (opt & TBM_OPTION_READ) {
1535 if (device == TBM_DEVICE_3D)
1536 fence.type = DMA_BUF_ACCESS_READ | DMA_BUF_ACCESS_DMA;
1538 TBM_EXYNOS_LOG("error Invalid argument\n");
1542 /* Check if the tbm manager supports dma fence or not. */
1543 if (!bufmgr_exynos->use_dma_fence) {
1544 TBM_EXYNOS_LOG("error Not support DMA FENCE(%s)\n", strerror(errno));
1549 if (device == TBM_DEVICE_3D) {
1550 ret = ioctl(bo_exynos->dmabuf, DMABUF_IOCTL_GET_FENCE, &fence);
1552 TBM_EXYNOS_LOG("error Cannot set GET FENCE(%s)\n", strerror(errno));
1556 if (opt & TBM_OPTION_WRITE)
1557 filelock.l_type = F_WRLCK;
1559 filelock.l_type = F_RDLCK;
1561 filelock.l_whence = SEEK_CUR;
1562 filelock.l_start = 0;
1565 if (-1 == fcntl(bo_exynos->dmabuf, F_SETLKW, &filelock))
1569 pthread_mutex_lock(&bo_exynos->mutex);
1571 if (device == TBM_DEVICE_3D) {
1574 for (i = 0; i < DMA_FENCE_LIST_MAX; i++) {
1575 if (bo_exynos->dma_fence[i].ctx == 0) {
1576 bo_exynos->dma_fence[i].type = fence.type;
1577 bo_exynos->dma_fence[i].ctx = fence.ctx;
1582 if (i == DMA_FENCE_LIST_MAX) {
1583 /*TODO: if dma_fence list is full, it needs realloc. I will fix this. by minseok3.kim*/
1584 TBM_EXYNOS_LOG("error fence list is full\n");
1588 pthread_mutex_unlock(&bo_exynos->mutex);
1590 DBG("[%s] DMABUF_IOCTL_GET_FENCE! bo:%p, gem:%d(%d), fd:%ds\n", target_name(),
1592 bo_exynos->gem, bo_exynos->name,
1594 #endif /* ALWAYS_BACKEND_CTRL */
1600 tbm_exynos_bo_unlock(tbm_bo bo)
1602 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1604 #ifndef ALWAYS_BACKEND_CTRL
1605 tbm_bo_exynos bo_exynos;
1606 struct dma_buf_fence fence;
1607 struct flock filelock;
1608 unsigned int dma_type = 0;
1611 bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1612 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
1614 if (bo_exynos->dma_fence[0].type & DMA_BUF_ACCESS_DMA)
1617 if (!bo_exynos->dma_fence[0].ctx && dma_type) {
1618 DBG("[libtbm-exynos:%d] %s FENCE not support or ignored,\n", getpid(),
1623 if (!bo_exynos->dma_fence[0].ctx && dma_type) {
1624 DBG("[libtbm-exynos:%d] %s device type is not 3D/CPU,\n", getpid(),
1629 pthread_mutex_lock(&bo_exynos->mutex);
1632 fence.type = bo_exynos->dma_fence[0].type;
1633 fence.ctx = bo_exynos->dma_fence[0].ctx;
1636 for (i = 1; i < DMA_FENCE_LIST_MAX; i++) {
1637 bo_exynos->dma_fence[i - 1].type = bo_exynos->dma_fence[i].type;
1638 bo_exynos->dma_fence[i - 1].ctx = bo_exynos->dma_fence[i].ctx;
1640 bo_exynos->dma_fence[DMA_FENCE_LIST_MAX - 1].type = 0;
1641 bo_exynos->dma_fence[DMA_FENCE_LIST_MAX - 1].ctx = 0;
1643 pthread_mutex_unlock(&bo_exynos->mutex);
1646 ret = ioctl(bo_exynos->dmabuf, DMABUF_IOCTL_PUT_FENCE, &fence);
1648 TBM_EXYNOS_LOG("error Can not set PUT FENCE(%s)\n", strerror(errno));
1652 filelock.l_type = F_UNLCK;
1653 filelock.l_whence = SEEK_CUR;
1654 filelock.l_start = 0;
1657 if (-1 == fcntl(bo_exynos->dmabuf, F_SETLKW, &filelock))
1661 DBG("[%s] DMABUF_IOCTL_PUT_FENCE! bo:%p, gem:%d(%d), fd:%ds\n", target_name(),
1663 bo_exynos->gem, bo_exynos->name,
1665 #endif /* ALWAYS_BACKEND_CTRL */
1671 tbm_exynos_bufmgr_deinit(void *priv)
1673 EXYNOS_RETURN_IF_FAIL(priv != NULL);
1675 tbm_bufmgr_exynos bufmgr_exynos;
1677 bufmgr_exynos = (tbm_bufmgr_exynos)priv;
1679 if (bufmgr_exynos->hashBos) {
1683 while (drmHashFirst(bufmgr_exynos->hashBos, &key, &value) > 0) {
1685 drmHashDelete(bufmgr_exynos->hashBos, key);
1688 drmHashDestroy(bufmgr_exynos->hashBos);
1689 bufmgr_exynos->hashBos = NULL;
1692 _bufmgr_deinit_cache_state(bufmgr_exynos);
1694 if (bufmgr_exynos->bind_display)
1695 tbm_drm_helper_wl_auth_server_deinit();
1697 if (bufmgr_exynos->device_name)
1698 free(bufmgr_exynos->device_name);
1700 if (tbm_backend_is_display_server())
1701 tbm_drm_helper_unset_tbm_master_fd();
1703 close(bufmgr_exynos->fd);
1705 free(bufmgr_exynos);
1709 tbm_exynos_surface_supported_format(uint32_t **formats, uint32_t *num)
1711 uint32_t *color_formats = NULL;
1713 color_formats = (uint32_t *)calloc(1,
1714 sizeof(uint32_t) * TBM_COLOR_FORMAT_COUNT);
1716 if (color_formats == NULL)
1719 memcpy(color_formats, tbm_exynos_color_format_list,
1720 sizeof(uint32_t)*TBM_COLOR_FORMAT_COUNT);
1722 *formats = color_formats;
1723 *num = TBM_COLOR_FORMAT_COUNT;
1725 fprintf(stderr, "tbm_exynos_surface_supported_format count = %d\n", *num);
1731 _new_calc_plane_nv12(int width, int height)
1735 mbX = DIV_ROUND_UP(width, S5P_FIMV_NUM_PIXELS_IN_MB_ROW);
1736 mbY = DIV_ROUND_UP(height, S5P_FIMV_NUM_PIXELS_IN_MB_COL);
1738 if (width * height < S5P_FIMV_MAX_FRAME_SIZE)
1739 mbY = (mbY + 1) / 2 * 2;
1741 return ((mbX * S5P_FIMV_NUM_PIXELS_IN_MB_COL) * (mbY *
1742 S5P_FIMV_NUM_PIXELS_IN_MB_ROW));
1746 _calc_yplane_nv12(int width, int height)
1750 mbX = SIZE_ALIGN(width + 24, S5P_FIMV_NV12MT_HALIGN);
1751 mbY = SIZE_ALIGN(height + 16, S5P_FIMV_NV12MT_VALIGN);
1753 return SIZE_ALIGN(mbX * mbY, S5P_FIMV_DEC_BUF_ALIGN);
1757 _calc_uvplane_nv12(int width, int height)
1761 mbX = SIZE_ALIGN(width + 16, S5P_FIMV_NV12MT_HALIGN);
1762 mbY = SIZE_ALIGN(height + 4, S5P_FIMV_NV12MT_VALIGN);
1764 return SIZE_ALIGN((mbX * mbY) >> 1, S5P_FIMV_DEC_BUF_ALIGN);
1768 _new_calc_yplane_nv12(int width, int height)
1770 return SIZE_ALIGN(_new_calc_plane_nv12(width,
1771 height) + S5P_FIMV_D_ALIGN_PLANE_SIZE,
1772 TBM_SURFACE_ALIGNMENT_PLANE_NV12);
1776 _new_calc_uvplane_nv12(int width, int height)
1778 return SIZE_ALIGN((_new_calc_plane_nv12(width,
1779 height) >> 1) + S5P_FIMV_D_ALIGN_PLANE_SIZE,
1780 TBM_SURFACE_ALIGNMENT_PLANE_NV12);
1784 * @brief get the plane data of the surface.
1785 * @param[in] width : the width of the surface
1786 * @param[in] height : the height of the surface
1787 * @param[in] format : the format of the surface
1788 * @param[in] plane_idx : the format of the surface
1789 * @param[out] size : the size of the plane
1790 * @param[out] offset : the offset of the plane
1791 * @param[out] pitch : the pitch of the plane
1792 * @param[out] padding : the padding of the plane
1793 * @return 1 if this function succeeds, otherwise 0.
1796 tbm_exynos_surface_get_plane_data(int width, int height,
1797 tbm_format format, int plane_idx, uint32_t *size, uint32_t *offset,
1798 uint32_t *pitch, int *bo_idx)
1809 case TBM_FORMAT_XRGB4444:
1810 case TBM_FORMAT_XBGR4444:
1811 case TBM_FORMAT_RGBX4444:
1812 case TBM_FORMAT_BGRX4444:
1813 case TBM_FORMAT_ARGB4444:
1814 case TBM_FORMAT_ABGR4444:
1815 case TBM_FORMAT_RGBA4444:
1816 case TBM_FORMAT_BGRA4444:
1817 case TBM_FORMAT_XRGB1555:
1818 case TBM_FORMAT_XBGR1555:
1819 case TBM_FORMAT_RGBX5551:
1820 case TBM_FORMAT_BGRX5551:
1821 case TBM_FORMAT_ARGB1555:
1822 case TBM_FORMAT_ABGR1555:
1823 case TBM_FORMAT_RGBA5551:
1824 case TBM_FORMAT_BGRA5551:
1825 case TBM_FORMAT_RGB565:
1828 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
1829 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1833 case TBM_FORMAT_RGB888:
1834 case TBM_FORMAT_BGR888:
1837 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
1838 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1842 case TBM_FORMAT_XRGB8888:
1843 case TBM_FORMAT_XBGR8888:
1844 case TBM_FORMAT_RGBX8888:
1845 case TBM_FORMAT_BGRX8888:
1846 case TBM_FORMAT_ARGB8888:
1847 case TBM_FORMAT_ABGR8888:
1848 case TBM_FORMAT_RGBA8888:
1849 case TBM_FORMAT_BGRA8888:
1852 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
1853 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1858 case TBM_FORMAT_YUYV:
1859 case TBM_FORMAT_YVYU:
1860 case TBM_FORMAT_UYVY:
1861 case TBM_FORMAT_VYUY:
1862 case TBM_FORMAT_AYUV:
1865 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1866 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1872 * index 0 = Y plane, [7:0] Y
1873 * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
1875 * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
1877 case TBM_FORMAT_NV12:
1879 if (plane_idx == 0) {
1881 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1882 _size = MAX(_calc_yplane_nv12(width, height), _new_calc_yplane_nv12(width,
1885 } else if (plane_idx == 1) {
1887 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1888 _size = MAX(_calc_uvplane_nv12(width, height), _new_calc_uvplane_nv12(width,
1893 case TBM_FORMAT_NV21:
1895 if (plane_idx == 0) {
1897 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1898 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1900 } else if (plane_idx == 1) {
1901 _offset = width * height;
1902 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1903 _size = SIZE_ALIGN(_pitch * (height / 2), TBM_SURFACE_ALIGNMENT_PLANE);
1908 case TBM_FORMAT_NV16:
1909 case TBM_FORMAT_NV61:
1911 /*if(plane_idx == 0)*/
1914 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1915 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1920 /*else if( plane_idx ==1 )*/
1923 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1924 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1931 * index 0: Y plane, [7:0] Y
1932 * index 1: Cb plane, [7:0] Cb
1933 * index 2: Cr plane, [7:0] Cr
1935 * index 1: Cr plane, [7:0] Cr
1936 * index 2: Cb plane, [7:0] Cb
1940 * NATIVE_BUFFER_FORMAT_YV12
1941 * NATIVE_BUFFER_FORMAT_I420
1943 case TBM_FORMAT_YUV410:
1944 case TBM_FORMAT_YVU410:
1946 /*if(plane_idx == 0)*/
1949 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1950 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1955 /*else if(plane_idx == 1)*/
1958 _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4);
1959 _size = SIZE_ALIGN(_pitch * (height / 4), TBM_SURFACE_ALIGNMENT_PLANE);
1964 /*else if (plane_idx == 2)*/
1967 _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4);
1968 _size = SIZE_ALIGN(_pitch * (height / 4), TBM_SURFACE_ALIGNMENT_PLANE);
1972 case TBM_FORMAT_YUV411:
1973 case TBM_FORMAT_YVU411:
1974 case TBM_FORMAT_YUV420:
1975 case TBM_FORMAT_YVU420:
1977 /*if(plane_idx == 0)*/
1980 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1981 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1986 /*else if(plane_idx == 1)*/
1989 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1990 _size = SIZE_ALIGN(_pitch * (height / 2), TBM_SURFACE_ALIGNMENT_PLANE);
1995 /*else if (plane_idx == 2)*/
1998 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1999 _size = SIZE_ALIGN(_pitch * (height / 2), TBM_SURFACE_ALIGNMENT_PLANE);
2003 case TBM_FORMAT_YUV422:
2004 case TBM_FORMAT_YVU422:
2006 /*if(plane_idx == 0)*/
2009 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
2010 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
2015 /*else if(plane_idx == 1)*/
2018 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
2019 _size = SIZE_ALIGN(_pitch * (height), TBM_SURFACE_ALIGNMENT_PLANE);
2024 /*else if (plane_idx == 2)*/
2027 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
2028 _size = SIZE_ALIGN(_pitch * (height), TBM_SURFACE_ALIGNMENT_PLANE);
2032 case TBM_FORMAT_YUV444:
2033 case TBM_FORMAT_YVU444:
2035 /*if(plane_idx == 0)*/
2038 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
2039 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
2044 /*else if(plane_idx == 1)*/
2047 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
2048 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
2053 /*else if (plane_idx == 2)*/
2056 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
2057 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
2075 tbm_exynos_bo_get_flags(tbm_bo bo)
2077 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
2079 tbm_bo_exynos bo_exynos;
2081 bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
2082 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
2084 return bo_exynos->flags_tbm;
2088 tbm_exynos_bufmgr_bind_native_display(tbm_bufmgr bufmgr, void *native_display)
2090 tbm_bufmgr_exynos bufmgr_exynos;
2092 bufmgr_exynos = tbm_backend_get_priv_from_bufmgr(bufmgr);
2093 EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
2095 if (!tbm_drm_helper_wl_auth_server_init(native_display, bufmgr_exynos->fd,
2096 bufmgr_exynos->device_name, 0)) {
2097 TBM_EXYNOS_LOG("[libtbm-exynos:%d] error:Fail to tbm_drm_helper_wl_server_init\n");
2101 bufmgr_exynos->bind_display = native_display;
2106 MODULEINITPPROTO(init_tbm_bufmgr_priv);
2108 static TBMModuleVersionInfo ExynosVersRec = {
2114 TBMModuleData tbmModuleData = { &ExynosVersRec, init_tbm_bufmgr_priv};
2117 init_tbm_bufmgr_priv(tbm_bufmgr bufmgr, int fd)
2119 tbm_bufmgr_backend bufmgr_backend;
2120 tbm_bufmgr_exynos bufmgr_exynos;
2126 bufmgr_exynos = calloc(1, sizeof(struct _tbm_bufmgr_exynos));
2127 if (!bufmgr_exynos) {
2128 TBM_EXYNOS_LOG("error: Fail to alloc bufmgr_exynos!\n");
2132 if (tbm_backend_is_display_server()) {
2133 bufmgr_exynos->fd = tbm_drm_helper_get_master_fd();
2134 if (bufmgr_exynos->fd < 0) {
2135 bufmgr_exynos->fd = _tbm_exynos_open_drm();
2136 if (bufmgr_exynos->fd < 0) {
2137 TBM_EXYNOS_LOG("[libtbm-exynos:%d] error: Fail to open drm!\n", getpid());
2142 tbm_drm_helper_set_tbm_master_fd(bufmgr_exynos->fd);
2144 bufmgr_exynos->device_name = drmGetDeviceNameFromFd(bufmgr_exynos->fd);
2145 if (!bufmgr_exynos->device_name) {
2146 TBM_EXYNOS_LOG("[libtbm-exynos:%d] error: Fail to get device name!\n", getpid());
2148 tbm_drm_helper_unset_tbm_master_fd();
2149 goto fail_get_device_name;
2152 if (_check_render_node()) {
2153 bufmgr_exynos->fd = _get_render_node();
2154 if (bufmgr_exynos->fd < 0) {
2155 TBM_EXYNOS_LOG("[%s] get render node failed\n", target_name(), fd);
2156 goto fail_get_render_node;
2158 DBG("[%s] Use render node:%d\n", target_name(), bufmgr_exynos->fd);
2160 if (!tbm_drm_helper_get_auth_info(&(bufmgr_exynos->fd), &(bufmgr_exynos->device_name), NULL)) {
2161 TBM_EXYNOS_LOG("[libtbm-exynos:%d] error: Fail to get auth drm info!\n", getpid());
2162 goto fail_get_auth_info;
2167 //Check if the tbm manager supports dma fence or not.
2168 fp = open("/sys/module/dmabuf_sync/parameters/enabled", O_RDONLY);
2171 int length = read(fp, buf, 1);
2173 if (length == 1 && buf[0] == '1')
2174 bufmgr_exynos->use_dma_fence = 1;
2179 if (!_bufmgr_init_cache_state(bufmgr_exynos)) {
2180 TBM_EXYNOS_LOG("[libtbm-exynos:%d] error: Fail to init bufmgr cache state\n", getpid());
2181 goto fail_init_cache_state;
2184 /*Create Hash Table*/
2185 bufmgr_exynos->hashBos = drmHashCreate();
2187 bufmgr_backend = tbm_backend_alloc();
2188 if (!bufmgr_backend) {
2189 TBM_EXYNOS_LOG("error: Fail to alloc backend!\n");
2190 goto fail_alloc_backend;
2193 bufmgr_backend->priv = (void *)bufmgr_exynos;
2194 bufmgr_backend->bufmgr_deinit = tbm_exynos_bufmgr_deinit;
2195 bufmgr_backend->bo_size = tbm_exynos_bo_size;
2196 bufmgr_backend->bo_alloc = tbm_exynos_bo_alloc;
2197 bufmgr_backend->bo_free = tbm_exynos_bo_free;
2198 bufmgr_backend->bo_import = tbm_exynos_bo_import;
2199 bufmgr_backend->bo_import_fd = tbm_exynos_bo_import_fd;
2200 bufmgr_backend->bo_export = tbm_exynos_bo_export;
2201 bufmgr_backend->bo_export_fd = tbm_exynos_bo_export_fd;
2202 bufmgr_backend->bo_get_handle = tbm_exynos_bo_get_handle;
2203 bufmgr_backend->bo_map = tbm_exynos_bo_map;
2204 bufmgr_backend->bo_unmap = tbm_exynos_bo_unmap;
2205 bufmgr_backend->surface_get_plane_data = tbm_exynos_surface_get_plane_data;
2206 bufmgr_backend->surface_supported_format = tbm_exynos_surface_supported_format;
2207 bufmgr_backend->bo_get_flags = tbm_exynos_bo_get_flags;
2208 bufmgr_backend->bo_lock = tbm_exynos_bo_lock;
2209 bufmgr_backend->bo_unlock = tbm_exynos_bo_unlock;
2211 if (tbm_backend_is_display_server() && !_check_render_node())
2212 bufmgr_backend->bufmgr_bind_native_display = tbm_exynos_bufmgr_bind_native_display;
2214 if (!tbm_backend_init(bufmgr, bufmgr_backend)) {
2215 TBM_EXYNOS_LOG("error: Fail to init backend!\n");
2216 goto fail_init_backend;
2223 env = getenv("TBM_EXYNOS_DEBUG");
2226 TBM_EXYNOS_LOG("TBM_EXYNOS_DEBUG=%s\n", env);
2232 DBG("[%s] drm_fd:%d\n", target_name(), bufmgr_exynos->fd);
2237 tbm_backend_free(bufmgr_backend);
2239 if (bufmgr_exynos->hashBos)
2240 drmHashDestroy(bufmgr_exynos->hashBos);
2241 _bufmgr_deinit_cache_state(bufmgr_exynos);
2242 fail_init_cache_state:
2243 if (tbm_backend_is_display_server())
2244 tbm_drm_helper_unset_tbm_master_fd();
2245 fail_get_device_name:
2246 close(bufmgr_exynos->fd);
2248 fail_get_render_node:
2250 free(bufmgr_exynos);