1 /**************************************************************************
5 Copyright 2012 Samsung Electronics co., Ltd. All Rights Reserved.
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
9 Permission is hereby granted, free of charge, to any person obtaining a
10 copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sub license, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial portions
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
22 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
24 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
25 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
26 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
27 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 **************************************************************************/
39 #include <sys/ioctl.h>
40 #include <sys/types.h>
47 #include <tbm_bufmgr.h>
48 #include <tbm_bufmgr_backend.h>
49 #include <exynos_drm.h>
51 #include <tbm_surface.h>
52 #include <tbm_surface_internal.h>
53 #include <tbm_drm_helper.h>
57 #include <system_info.h>
59 #include "tbm_bufmgr_tgl.h"
63 #define TBM_COLOR_FORMAT_COUNT 4
65 #define EXYNOS_DRM_NAME "exynos"
68 #define LOG_TAG "TBM_BACKEND"
76 static int initialized = 0;
77 static char app_name[128];
82 /* get the application name */
83 f = fopen("/proc/self/cmdline", "r");
88 memset(app_name, 0x00, sizeof(app_name));
90 if (fgets(app_name, 100, f) == NULL) {
97 slash = strrchr(app_name, '/');
99 memmove(app_name, slash + 1, strlen(slash));
106 #define TBM_EXYNOS_ERROR(fmt, args...) LOGE("\033[31m" "[%s] " fmt "\033[0m", target_name(), ##args)
107 #define TBM_EXYNOS_DEBUG(fmt, args...) {if (bDebug&01) LOGD("[%s] " fmt, target_name(), ##args); }
109 #define TBM_EXYNOS_ERROR(...)
110 #define TBM_EXYNOS_DEBUG(...)
113 #define STRERR_BUFSIZE 128
115 #define SIZE_ALIGN(value, base) (((value) + ((base) - 1)) & ~((base) - 1))
116 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
117 #define MAX(a, b) ((a) > (b) ? (a) : (b))
119 static unsigned int g_tbm_surface_alignment_plane;
120 static unsigned int g_tbm_surface_alignment_pitch_rgb;
122 #define TBM_SURFACE_ALIGNMENT_PLANE_NV12 (4096)
123 #define TBM_SURFACE_ALIGNMENT_PITCH_YUV (16)
125 #define SZ_1M 0x00100000
126 #define S5P_FIMV_MAX_FRAME_SIZE (2 * SZ_1M)
127 #define S5P_FIMV_D_ALIGN_PLANE_SIZE 64
128 #define S5P_FIMV_NUM_PIXELS_IN_MB_ROW 16
129 #define S5P_FIMV_NUM_PIXELS_IN_MB_COL 16
130 #define S5P_FIMV_DEC_BUF_ALIGN (8 * 1024)
131 #define S5P_FIMV_NV12MT_HALIGN 128
132 #define S5P_FIMV_NV12MT_VALIGN 64
134 /* check condition */
135 #define EXYNOS_RETURN_IF_FAIL(cond) {\
137 TBM_EXYNOS_ERROR("[%s] : '%s' failed.\n", __func__, #cond);\
142 #define EXYNOS_RETURN_VAL_IF_FAIL(cond, val) {\
144 TBM_EXYNOS_ERROR("[%s] : '%s' failed.\n", __func__, #cond);\
149 /* cache control at backend */
150 static unsigned int g_enable_cache_ctrl = 0;
152 struct dma_buf_info {
154 unsigned int fence_supported;
155 unsigned int padding;
158 #define DMA_BUF_ACCESS_READ 0x1
159 #define DMA_BUF_ACCESS_WRITE 0x2
160 #define DMA_BUF_ACCESS_DMA 0x4
161 #define DMA_BUF_ACCESS_MAX 0x8
163 #define DMA_FENCE_LIST_MAX 5
165 struct dma_buf_fence {
170 #define DMABUF_IOCTL_BASE 'F'
171 #define DMABUF_IOWR(nr, type) _IOWR(DMABUF_IOCTL_BASE, nr, type)
173 #define DMABUF_IOCTL_GET_INFO DMABUF_IOWR(0x00, struct dma_buf_info)
174 #define DMABUF_IOCTL_GET_FENCE DMABUF_IOWR(0x01, struct dma_buf_fence)
175 #define DMABUF_IOCTL_PUT_FENCE DMABUF_IOWR(0x02, struct dma_buf_fence)
178 #define GLOBAL_KEY ((unsigned int)(-1))
180 #define TBM_EXYNOS_CACHE_INV 0x01 /**< cache invalidate */
181 #define TBM_EXYNOS_CACHE_CLN 0x02 /**< cache clean */
182 #define TBM_EXYNOS_CACHE_ALL 0x10 /**< cache all */
183 #define TBM_EXYNOS_CACHE_FLUSH (TBM_EXYNOS_CACHE_INV|TBM_EXYNOS_CACHE_CLN) /**< cache flush */
184 #define TBM_EXYNOS_CACHE_FLUSH_ALL (TBM_EXYNOS_CACHE_FLUSH|TBM_EXYNOS_CACHE_ALL) /**< cache flush all */
188 DEVICE_CA, /* cache aware device */
189 DEVICE_CO /* cache oblivious device */
192 typedef union _tbm_bo_cache_state tbm_bo_cache_state;
194 union _tbm_bo_cache_state {
197 unsigned int cntFlush:16; /*Flush all index for sync */
198 unsigned int isCached:1;
199 unsigned int isDirtied:2;
203 typedef struct _tbm_bufmgr_exynos *tbm_bufmgr_exynos;
204 typedef struct _tbm_bo_exynos *tbm_bo_exynos;
206 /* tbm buffor object for exynos */
207 struct _tbm_bo_exynos {
210 unsigned int name; /* FLINK ID */
212 unsigned int gem; /* GEM Handle */
214 unsigned int dmabuf; /* fd for dmabuf */
216 void *pBase; /* virtual address */
220 unsigned int flags_exynos;
221 unsigned int flags_tbm;
223 pthread_mutex_t mutex;
224 struct dma_buf_fence dma_fence[DMA_FENCE_LIST_MAX];
228 tbm_bo_cache_state cache_state;
229 unsigned int map_cnt;
233 /* tbm bufmgr private for exynos */
234 struct _tbm_bufmgr_exynos {
247 char *STR_DEVICE[] = {
263 uint32_t tbm_exynos_color_format_list[TBM_COLOR_FORMAT_COUNT] = {
270 #ifdef TGL_GET_VERSION
272 _tgl_get_version(int fd)
274 struct tgl_ver_data data;
276 char buf[STRERR_BUFSIZE];
278 err = ioctl(fd, TGL_IOCTL_GET_VERSION, &data);
280 TBM_EXYNOS_ERROR("error(%s) %s:%d\n", strerror_r(errno, buf, STRERR_BUFSIZE));
284 TBM_EXYNOS_DEBUG("tgl version is (%u, %u).\n", data.major, data.minor);
291 _tgl_init(int fd, unsigned int key)
293 struct tgl_reg_data data;
295 char buf[STRERR_BUFSIZE];
298 data.timeout_ms = 1000;
300 err = ioctl(fd, TGL_IOCTL_REGISTER, &data);
302 TBM_EXYNOS_ERROR("error(%s) key:%d\n", strerror_r(errno, buf, STRERR_BUFSIZE), key);
310 _tgl_destroy(int fd, unsigned int key)
312 struct tgl_reg_data data;
314 char buf[STRERR_BUFSIZE];
317 err = ioctl(fd, TGL_IOCTL_UNREGISTER, &data);
319 TBM_EXYNOS_ERROR("error(%s) key:%d\n", strerror_r(errno, buf, STRERR_BUFSIZE), key);
327 _tgl_lock(int fd, unsigned int key, int opt)
329 struct tgl_lock_data data;
330 enum tgl_type_data tgl_type;
332 char buf[STRERR_BUFSIZE];
335 case TBM_OPTION_READ:
336 tgl_type = TGL_TYPE_READ;
338 case TBM_OPTION_WRITE:
339 tgl_type = TGL_TYPE_WRITE;
342 tgl_type = TGL_TYPE_NONE;
347 data.type = tgl_type;
349 err = ioctl(fd, TGL_IOCTL_LOCK, &data);
351 TBM_EXYNOS_ERROR("error(%s) key:%d opt:%d\n",
352 strerror_r(errno, buf, STRERR_BUFSIZE), key, opt);
360 _tgl_unlock(int fd, unsigned int key)
362 struct tgl_lock_data data;
364 char buf[STRERR_BUFSIZE];
367 data.type = TGL_TYPE_NONE;
369 err = ioctl(fd, TGL_IOCTL_UNLOCK, &data);
371 TBM_EXYNOS_ERROR("error(%s) key:%d\n",
372 strerror_r(errno, buf, STRERR_BUFSIZE), key);
380 _tgl_set_data(int fd, unsigned int key, unsigned int val)
382 struct tgl_usr_data data;
384 char buf[STRERR_BUFSIZE];
389 err = ioctl(fd, TGL_IOCTL_SET_DATA, &data);
391 TBM_EXYNOS_ERROR("error(%s) key:%d\n",
392 strerror_r(errno, buf, STRERR_BUFSIZE), key);
399 static inline unsigned int
400 _tgl_get_data(int fd, unsigned int key)
402 struct tgl_usr_data data = { 0, };
404 char buf[STRERR_BUFSIZE];
408 err = ioctl(fd, TGL_IOCTL_GET_DATA, &data);
410 TBM_EXYNOS_ERROR("error(%s) key:%d\n",
411 strerror_r(errno, buf, STRERR_BUFSIZE), key);
419 _exynos_cache_flush(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos, int flags)
421 EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
423 /* cache flush is managed by kernel side when using dma-fence. */
424 if (bufmgr_exynos->use_dma_fence)
427 struct drm_exynos_gem_cache_op cache_op = {0, };
430 /* if bo_exynos is null, do cache_flush_all */
433 cache_op.usr_addr = (uint64_t)((uint32_t)bo_exynos->pBase);
434 cache_op.size = bo_exynos->size;
436 flags = TBM_EXYNOS_CACHE_FLUSH_ALL;
438 cache_op.usr_addr = 0;
442 if (flags & TBM_EXYNOS_CACHE_INV) {
443 if (flags & TBM_EXYNOS_CACHE_ALL)
444 cache_op.flags |= EXYNOS_DRM_CACHE_INV_ALL;
446 cache_op.flags |= EXYNOS_DRM_CACHE_INV_RANGE;
449 if (flags & TBM_EXYNOS_CACHE_CLN) {
450 if (flags & TBM_EXYNOS_CACHE_ALL)
451 cache_op.flags |= EXYNOS_DRM_CACHE_CLN_ALL;
453 cache_op.flags |= EXYNOS_DRM_CACHE_CLN_RANGE;
456 if (flags & TBM_EXYNOS_CACHE_ALL)
457 cache_op.flags |= EXYNOS_DRM_ALL_CACHES_CORES;
459 ret = drmCommandWriteRead(bufmgr_exynos->fd, DRM_EXYNOS_GEM_CACHE_OP, &cache_op,
462 TBM_EXYNOS_ERROR("fail to flush the cache.\n");
470 _bo_init_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos, int import)
472 /* check whether cache control do or not */
473 if (!g_enable_cache_ctrl)
476 EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
477 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
479 if (bufmgr_exynos->use_dma_fence)
482 _tgl_init(bufmgr_exynos->tgl_fd, bo_exynos->name);
484 tbm_bo_cache_state cache_state;
487 cache_state.data.isDirtied = DEVICE_NONE;
488 cache_state.data.isCached = 0;
489 cache_state.data.cntFlush = 0;
491 _tgl_set_data(bufmgr_exynos->tgl_fd, bo_exynos->name, cache_state.val);
498 _bo_set_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos, int device, int opt)
500 /* check whether cache control do or not */
501 if (!g_enable_cache_ctrl)
504 EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
505 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
507 if (bufmgr_exynos->use_dma_fence)
511 unsigned short cntFlush = 0;
513 if (!(bo_exynos->flags_exynos & EXYNOS_BO_CACHABLE))
516 /* get cache state of a bo */
517 bo_exynos->cache_state.val = _tgl_get_data(bufmgr_exynos->tgl_fd,
520 /* get global cache flush count */
521 cntFlush = (unsigned short)_tgl_get_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY);
523 if (device == TBM_DEVICE_CPU) {
524 if (bo_exynos->cache_state.data.isDirtied == DEVICE_CO &&
525 bo_exynos->cache_state.data.isCached)
526 need_flush = TBM_EXYNOS_CACHE_INV;
528 bo_exynos->cache_state.data.isCached = 1;
529 if (opt & TBM_OPTION_WRITE)
530 bo_exynos->cache_state.data.isDirtied = DEVICE_CA;
532 if (bo_exynos->cache_state.data.isDirtied != DEVICE_CA)
533 bo_exynos->cache_state.data.isDirtied = DEVICE_NONE;
536 if (bo_exynos->cache_state.data.isDirtied == DEVICE_CA &&
537 bo_exynos->cache_state.data.isCached &&
538 bo_exynos->cache_state.data.cntFlush == cntFlush)
539 need_flush = TBM_EXYNOS_CACHE_CLN | TBM_EXYNOS_CACHE_ALL;
541 if (opt & TBM_OPTION_WRITE)
542 bo_exynos->cache_state.data.isDirtied = DEVICE_CO;
544 if (bo_exynos->cache_state.data.isDirtied != DEVICE_CO)
545 bo_exynos->cache_state.data.isDirtied = DEVICE_NONE;
550 if (need_flush & TBM_EXYNOS_CACHE_ALL)
551 _tgl_set_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY, (unsigned int)(++cntFlush));
553 /* call cache flush */
554 _exynos_cache_flush(bufmgr_exynos, bo_exynos, need_flush);
556 TBM_EXYNOS_DEBUG(" \tcache(%d,%d)....flush:0x%x, cntFlush(%d)\n",
557 bo_exynos->cache_state.data.isCached,
558 bo_exynos->cache_state.data.isDirtied,
567 _bo_save_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos)
569 /* check whether cache control do or not */
570 if (!g_enable_cache_ctrl)
573 EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
574 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
576 if (bufmgr_exynos->use_dma_fence)
579 unsigned short cntFlush = 0;
581 /* get global cache flush count */
582 cntFlush = (unsigned short)_tgl_get_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY);
584 /* save global cache flush count */
585 bo_exynos->cache_state.data.cntFlush = cntFlush;
586 _tgl_set_data(bufmgr_exynos->tgl_fd, bo_exynos->name,
587 bo_exynos->cache_state.val);
593 _bo_destroy_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos)
595 /* check whether cache control do or not */
596 if (!g_enable_cache_ctrl)
599 EXYNOS_RETURN_IF_FAIL(bufmgr_exynos != NULL);
600 EXYNOS_RETURN_IF_FAIL(bo_exynos != NULL);
602 if (bufmgr_exynos->use_dma_fence)
605 _tgl_destroy(bufmgr_exynos->tgl_fd, bo_exynos->name);
609 _bufmgr_init_cache_state(tbm_bufmgr_exynos bufmgr_exynos)
611 /* check whether cache control do or not */
612 if (!g_enable_cache_ctrl)
615 EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
617 if (bufmgr_exynos->use_dma_fence)
620 /* open tgl fd for saving cache flush data */
621 bufmgr_exynos->tgl_fd = open(tgl_devfile, O_RDWR);
623 if (bufmgr_exynos->tgl_fd < 0) {
624 bufmgr_exynos->tgl_fd = open(tgl_devfile1, O_RDWR);
625 if (bufmgr_exynos->tgl_fd < 0) {
626 TBM_EXYNOS_ERROR("fail to open global_lock:%s\n",
632 #ifdef TGL_GET_VERSION
633 if (!_tgl_get_version(bufmgr_exynos->tgl_fd)) {
634 TBM_EXYNOS_ERROR("fail to get tgl_version. tgl init failed.\n");
635 close(bufmgr_sprd->tgl_fd);
640 if (!_tgl_init(bufmgr_exynos->tgl_fd, GLOBAL_KEY)) {
641 TBM_EXYNOS_ERROR("fail to initialize the tgl\n");
642 close(bufmgr_exynos->tgl_fd);
650 _bufmgr_deinit_cache_state(tbm_bufmgr_exynos bufmgr_exynos)
652 /* check whether cache control do or not */
653 if (!g_enable_cache_ctrl)
656 EXYNOS_RETURN_IF_FAIL(bufmgr_exynos != NULL);
658 if (bufmgr_exynos->use_dma_fence)
661 if (bufmgr_exynos->tgl_fd >= 0)
662 close(bufmgr_exynos->tgl_fd);
666 _tbm_exynos_open_drm()
670 fd = drmOpen(EXYNOS_DRM_NAME, NULL);
672 TBM_EXYNOS_ERROR("fail to open drm.(%s)\n", EXYNOS_DRM_NAME);
676 struct udev *udev = NULL;
677 struct udev_enumerate *e = NULL;
678 struct udev_list_entry *entry = NULL;
679 struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
680 const char *filepath;
684 TBM_EXYNOS_DEBUG("search drm-device by udev\n");
688 TBM_EXYNOS_ERROR("udev_new() failed.\n");
692 e = udev_enumerate_new(udev);
693 udev_enumerate_add_match_subsystem(e, "drm");
694 udev_enumerate_add_match_sysname(e, "card[0-9]*");
695 udev_enumerate_scan_devices(e);
697 udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
698 device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
699 udev_list_entry_get_name(entry));
700 device_parent = udev_device_get_parent(device);
701 /* Not need unref device_parent. device_parent and device have same refcnt */
703 if (strcmp(udev_device_get_sysname(device_parent), "exynos-drm") == 0) {
705 TBM_EXYNOS_DEBUG("[%s] Found render device: '%s' (%s)\n",
707 udev_device_get_syspath(drm_device),
708 udev_device_get_sysname(device_parent));
712 udev_device_unref(device);
715 udev_enumerate_unref(e);
717 /* Get device file path. */
718 filepath = udev_device_get_devnode(drm_device);
720 TBM_EXYNOS_ERROR("udev_device_get_devnode() failed.\n");
721 udev_device_unref(drm_device);
726 /* Open DRM device file and check validity. */
727 fd = open(filepath, O_RDWR | O_CLOEXEC);
729 TBM_EXYNOS_ERROR("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
730 udev_device_unref(drm_device);
737 TBM_EXYNOS_ERROR("fstat() failed %s.\n");
739 udev_device_unref(drm_device);
744 udev_device_unref(drm_device);
752 _check_render_node(void)
754 struct udev *udev = NULL;
755 struct udev_enumerate *e = NULL;
756 struct udev_list_entry *entry = NULL;
757 struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
759 #ifndef USE_RENDER_NODE
765 TBM_EXYNOS_ERROR("udev_new() failed.\n");
769 e = udev_enumerate_new(udev);
770 udev_enumerate_add_match_subsystem(e, "drm");
771 udev_enumerate_add_match_sysname(e, "renderD[0-9]*");
772 udev_enumerate_scan_devices(e);
774 udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
775 device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
776 udev_list_entry_get_name(entry));
777 device_parent = udev_device_get_parent(device);
778 /* Not need unref device_parent. device_parent and device have same refcnt */
780 if (strcmp(udev_device_get_sysname(device_parent), "exynos-drm") == 0) {
782 TBM_EXYNOS_DEBUG("Found render device: '%s' (%s)\n",
783 udev_device_get_syspath(drm_device),
784 udev_device_get_sysname(device_parent));
788 udev_device_unref(device);
791 udev_enumerate_unref(e);
795 udev_device_unref(drm_device);
799 udev_device_unref(drm_device);
804 _get_render_node(void)
806 struct udev *udev = NULL;
807 struct udev_enumerate *e = NULL;
808 struct udev_list_entry *entry = NULL;
809 struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
810 const char *filepath;
817 TBM_EXYNOS_ERROR("udev_new() failed.\n");
821 e = udev_enumerate_new(udev);
822 udev_enumerate_add_match_subsystem(e, "drm");
823 udev_enumerate_add_match_sysname(e, "renderD[0-9]*");
824 udev_enumerate_scan_devices(e);
826 udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
827 device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
828 udev_list_entry_get_name(entry));
829 device_parent = udev_device_get_parent(device);
830 /* Not need unref device_parent. device_parent and device have same refcnt */
832 if (strcmp(udev_device_get_sysname(device_parent), "exynos-drm") == 0) {
834 TBM_EXYNOS_DEBUG("Found render device: '%s' (%s)\n",
835 udev_device_get_syspath(drm_device),
836 udev_device_get_sysname(device_parent));
840 udev_device_unref(device);
843 udev_enumerate_unref(e);
845 /* Get device file path. */
846 filepath = udev_device_get_devnode(drm_device);
848 TBM_EXYNOS_ERROR("udev_device_get_devnode() failed.\n");
849 udev_device_unref(drm_device);
854 /* Open DRM device file and check validity. */
855 fd = open(filepath, O_RDWR | O_CLOEXEC);
857 TBM_EXYNOS_ERROR("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
858 udev_device_unref(drm_device);
865 TBM_EXYNOS_ERROR("fstat() failed %s.\n");
866 udev_device_unref(drm_device);
872 udev_device_unref(drm_device);
879 _get_exynos_flag_from_tbm(unsigned int ftbm)
881 unsigned int flags = 0;
883 if (ftbm & TBM_BO_SCANOUT)
884 flags |= EXYNOS_BO_CONTIG;
886 flags |= EXYNOS_BO_NONCONTIG;
888 if (ftbm & TBM_BO_WC)
889 flags |= EXYNOS_BO_WC;
890 else if (ftbm & TBM_BO_NONCACHABLE)
891 flags |= EXYNOS_BO_NONCACHABLE;
893 flags |= EXYNOS_BO_CACHABLE;
899 _get_tbm_flag_from_exynos(unsigned int fexynos)
901 unsigned int flags = 0;
903 if (fexynos & EXYNOS_BO_NONCONTIG)
904 flags |= TBM_BO_DEFAULT;
906 flags |= TBM_BO_SCANOUT;
908 if (fexynos & EXYNOS_BO_WC)
910 else if (fexynos & EXYNOS_BO_CACHABLE)
911 flags |= TBM_BO_DEFAULT;
913 flags |= TBM_BO_NONCACHABLE;
919 _get_name(int fd, unsigned int gem)
921 struct drm_gem_flink arg = {0,};
924 if (drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &arg)) {
925 TBM_EXYNOS_ERROR("fail to DRM_IOCTL_GEM_FLINK gem:%d", gem);
929 return (unsigned int)arg.name;
933 _exynos_bo_handle(tbm_bo_exynos bo_exynos, int device)
935 tbm_bo_handle bo_handle;
937 memset(&bo_handle, 0x0, sizeof(uint64_t));
940 case TBM_DEVICE_DEFAULT:
942 bo_handle.u32 = (uint32_t)bo_exynos->gem;
945 if (!bo_exynos->pBase) {
946 struct drm_exynos_gem_map arg = {0,};
949 arg.handle = bo_exynos->gem;
950 if (drmCommandWriteRead(bo_exynos->fd, DRM_EXYNOS_GEM_MAP, &arg,
952 TBM_EXYNOS_ERROR("Cannot map_dumb gem=%d\n", bo_exynos->gem);
953 return (tbm_bo_handle) NULL;
956 map = mmap(NULL, bo_exynos->size, PROT_READ | PROT_WRITE, MAP_SHARED,
957 bo_exynos->fd, arg.offset);
958 if (map == MAP_FAILED) {
959 TBM_EXYNOS_ERROR("Cannot usrptr gem=%d\n", bo_exynos->gem);
960 return (tbm_bo_handle) NULL;
962 bo_exynos->pBase = map;
964 bo_handle.ptr = (void *)bo_exynos->pBase;
968 if (bo_exynos->dmabuf) {
969 bo_handle.u32 = (uint32_t)bo_exynos->dmabuf;
973 if (!bo_exynos->dmabuf) {
974 struct drm_prime_handle arg = {0, };
976 arg.handle = bo_exynos->gem;
977 if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
978 TBM_EXYNOS_ERROR("Cannot dmabuf=%d\n", bo_exynos->gem);
979 return (tbm_bo_handle) NULL;
981 bo_exynos->dmabuf = arg.fd;
984 bo_handle.u32 = (uint32_t)bo_exynos->dmabuf;
988 if (!bo_exynos->dmabuf) {
989 struct drm_prime_handle arg = {0, };
991 arg.handle = bo_exynos->gem;
992 if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
993 TBM_EXYNOS_ERROR("Cannot dmabuf=%d\n", bo_exynos->gem);
994 return (tbm_bo_handle) NULL;
996 bo_exynos->dmabuf = arg.fd;
999 bo_handle.u32 = (uint32_t)bo_exynos->dmabuf;
1002 TBM_EXYNOS_ERROR("Not supported device:%d\n", device);
1003 bo_handle.ptr = (void *) NULL;
1011 tbm_exynos_bo_size(tbm_bo bo)
1013 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1015 tbm_bo_exynos bo_exynos;
1017 bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1018 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
1020 return bo_exynos->size;
1024 tbm_exynos_bo_alloc(tbm_bo bo, int size, int flags)
1026 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1028 tbm_bo_exynos bo_exynos;
1029 tbm_bufmgr_exynos bufmgr_exynos;
1030 unsigned int exynos_flags;
1032 bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1033 EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
1035 bo_exynos = calloc(1, sizeof(struct _tbm_bo_exynos));
1037 TBM_EXYNOS_ERROR("fail to allocate the bo private\n");
1041 exynos_flags = _get_exynos_flag_from_tbm(flags);
1042 if ((flags & TBM_BO_SCANOUT) &&
1044 exynos_flags |= EXYNOS_BO_NONCONTIG;
1047 struct drm_exynos_gem_create arg = {0, };
1049 arg.size = (uint64_t)size;
1050 arg.flags = exynos_flags;
1051 if (drmCommandWriteRead(bufmgr_exynos->fd, DRM_EXYNOS_GEM_CREATE, &arg,
1053 TBM_EXYNOS_ERROR("Cannot create bo(flag:%x, size:%d)\n", arg.flags,
1054 (unsigned int)arg.size);
1059 bo_exynos->fd = bufmgr_exynos->fd;
1060 bo_exynos->gem = arg.handle;
1061 bo_exynos->size = size;
1062 bo_exynos->flags_tbm = flags;
1063 bo_exynos->flags_exynos = exynos_flags;
1064 bo_exynos->name = _get_name(bo_exynos->fd, bo_exynos->gem);
1066 if (!_bo_init_cache_state(bufmgr_exynos, bo_exynos, 0)) {
1067 TBM_EXYNOS_ERROR("fail init cache state(%d)\n", bo_exynos->name);
1072 pthread_mutex_init(&bo_exynos->mutex, NULL);
1074 if (bufmgr_exynos->use_dma_fence
1075 && !bo_exynos->dmabuf) {
1076 struct drm_prime_handle arg = {0, };
1078 arg.handle = bo_exynos->gem;
1079 if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
1080 TBM_EXYNOS_ERROR("Cannot dmabuf=%d\n", bo_exynos->gem);
1084 bo_exynos->dmabuf = arg.fd;
1087 if (drmHashInsert(bufmgr_exynos->hashBos, bo_exynos->name,
1088 (void *)bo_exynos) < 0) {
1089 TBM_EXYNOS_ERROR("Cannot insert bo to Hash(%d)\n", bo_exynos->name);
1092 TBM_EXYNOS_DEBUG(" bo:%p, gem:%d(%d), flags:%d(%d), size:%d\n",
1094 bo_exynos->gem, bo_exynos->name,
1095 flags, exynos_flags,
1098 return (void *)bo_exynos;
1102 tbm_exynos_bo_free(tbm_bo bo)
1104 tbm_bo_exynos bo_exynos;
1105 tbm_bufmgr_exynos bufmgr_exynos;
1106 char buf[STRERR_BUFSIZE];
1111 bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1112 EXYNOS_RETURN_IF_FAIL(bufmgr_exynos != NULL);
1114 bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1115 EXYNOS_RETURN_IF_FAIL(bo_exynos != NULL);
1117 TBM_EXYNOS_DEBUG(" bo:%p, gem:%d(%d), fd:%d, size:%d\n",
1119 bo_exynos->gem, bo_exynos->name,
1123 if (bo_exynos->pBase) {
1124 if (munmap(bo_exynos->pBase, bo_exynos->size) == -1) {
1125 TBM_EXYNOS_ERROR("bo:%p fail to munmap(%s)\n",
1126 bo, strerror_r(errno, buf, STRERR_BUFSIZE));
1131 if (bo_exynos->dmabuf) {
1132 close(bo_exynos->dmabuf);
1133 bo_exynos->dmabuf = 0;
1136 /* delete bo from hash */
1139 ret = drmHashLookup(bufmgr_exynos->hashBos, bo_exynos->name,
1140 (void **)&bo_exynos);
1142 drmHashDelete(bufmgr_exynos->hashBos, bo_exynos->name);
1144 TBM_EXYNOS_ERROR("Cannot find bo to Hash(%d), ret=%d\n",
1145 bo_exynos->name, ret);
1148 _bo_destroy_cache_state(bufmgr_exynos, bo_exynos);
1150 /* Free gem handle */
1151 struct drm_gem_close arg = {0, };
1153 memset(&arg, 0, sizeof(arg));
1154 arg.handle = bo_exynos->gem;
1155 if (drmIoctl(bo_exynos->fd, DRM_IOCTL_GEM_CLOSE, &arg)) {
1156 TBM_EXYNOS_ERROR("bo:%p fail to gem close.(%s)\n",
1157 bo, strerror_r(errno, buf, STRERR_BUFSIZE));
1165 tbm_exynos_bo_import(tbm_bo bo, unsigned int key)
1167 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1169 tbm_bufmgr_exynos bufmgr_exynos;
1170 tbm_bo_exynos bo_exynos;
1173 bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1174 EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
1176 ret = drmHashLookup(bufmgr_exynos->hashBos, key, (void **)&bo_exynos);
1180 struct drm_gem_open arg = {0, };
1181 struct drm_exynos_gem_info info = {0, };
1184 if (drmIoctl(bufmgr_exynos->fd, DRM_IOCTL_GEM_OPEN, &arg)) {
1185 TBM_EXYNOS_ERROR("Cannot open gem name=%d\n", key);
1189 info.handle = arg.handle;
1190 if (drmCommandWriteRead(bufmgr_exynos->fd,
1193 sizeof(struct drm_exynos_gem_info))) {
1194 TBM_EXYNOS_ERROR("Cannot get gem info=%d\n", key);
1198 bo_exynos = calloc(1, sizeof(struct _tbm_bo_exynos));
1200 TBM_EXYNOS_ERROR("fail to allocate the bo private\n");
1204 bo_exynos->fd = bufmgr_exynos->fd;
1205 bo_exynos->gem = arg.handle;
1206 bo_exynos->size = arg.size;
1207 bo_exynos->flags_exynos = info.flags;
1208 bo_exynos->name = key;
1209 bo_exynos->flags_tbm = _get_tbm_flag_from_exynos(bo_exynos->flags_exynos);
1211 if (!_bo_init_cache_state(bufmgr_exynos, bo_exynos, 1)) {
1212 TBM_EXYNOS_ERROR("fail init cache state(%d)\n", bo_exynos->name);
1217 if (!bo_exynos->dmabuf) {
1218 struct drm_prime_handle arg = {0, };
1220 arg.handle = bo_exynos->gem;
1221 if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
1222 TBM_EXYNOS_ERROR("fail to DRM_IOCTL_PRIME_HANDLE_TO_FD gem=%d\n", bo_exynos->gem);
1226 bo_exynos->dmabuf = arg.fd;
1229 /* add bo to hash */
1230 if (drmHashInsert(bufmgr_exynos->hashBos, bo_exynos->name,
1231 (void *)bo_exynos) < 0) {
1232 TBM_EXYNOS_ERROR("Cannot insert bo to Hash(%d)\n", bo_exynos->name);
1235 TBM_EXYNOS_DEBUG(" bo:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d\n",
1237 bo_exynos->gem, bo_exynos->name,
1239 bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1242 return (void *)bo_exynos;
1246 tbm_exynos_bo_import_fd(tbm_bo bo, tbm_fd key)
1248 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1250 tbm_bufmgr_exynos bufmgr_exynos;
1251 tbm_bo_exynos bo_exynos;
1254 char buf[STRERR_BUFSIZE];
1256 bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1257 EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
1259 /*getting handle from fd*/
1260 unsigned int gem = 0;
1261 struct drm_prime_handle arg = {0, };
1265 if (drmIoctl(bufmgr_exynos->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &arg)) {
1266 TBM_EXYNOS_ERROR("bo:%p Cannot get gem handle from fd:%d (%s)\n",
1267 bo, arg.fd, strerror_r(errno, buf, STRERR_BUFSIZE));
1272 name = _get_name(bufmgr_exynos->fd, gem);
1274 TBM_EXYNOS_ERROR("bo:%p Cannot get name from gem:%d, fd:%d (%s)\n",
1275 bo, gem, key, strerror_r(errno, buf, STRERR_BUFSIZE));
1279 ret = drmHashLookup(bufmgr_exynos->hashBos, name, (void **)&bo_exynos);
1281 if (gem == bo_exynos->gem)
1285 unsigned int real_size = -1;
1286 struct drm_exynos_gem_info info = {0, };
1288 /* Determine size of bo. The fd-to-handle ioctl really should
1289 * return the size, but it doesn't. If we have kernel 3.12 or
1290 * later, we can lseek on the prime fd to get the size. Older
1291 * kernels will just fail, in which case we fall back to the
1292 * provided (estimated or guess size).
1294 real_size = lseek(key, 0, SEEK_END);
1297 if (drmCommandWriteRead(bufmgr_exynos->fd,
1300 sizeof(struct drm_exynos_gem_info))) {
1301 TBM_EXYNOS_ERROR("bo:%p Cannot get gem info from gem:%d, fd:%d (%s)\n",
1302 bo, gem, key, strerror_r(errno, buf, STRERR_BUFSIZE));
1306 if (real_size == -1)
1307 real_size = info.size;
1309 bo_exynos = calloc(1, sizeof(struct _tbm_bo_exynos));
1311 TBM_EXYNOS_ERROR("bo:%p fail to allocate the bo private\n", bo);
1315 bo_exynos->fd = bufmgr_exynos->fd;
1316 bo_exynos->gem = gem;
1317 bo_exynos->size = real_size;
1318 bo_exynos->flags_exynos = info.flags;
1319 bo_exynos->flags_tbm = _get_tbm_flag_from_exynos(bo_exynos->flags_exynos);
1320 bo_exynos->name = name;
1322 if (!_bo_init_cache_state(bufmgr_exynos, bo_exynos, 1)) {
1323 TBM_EXYNOS_ERROR("fail init cache state(%d)\n", bo_exynos->name);
1328 /* add bo to hash */
1329 if (drmHashInsert(bufmgr_exynos->hashBos, bo_exynos->name,
1330 (void *)bo_exynos) < 0) {
1331 TBM_EXYNOS_ERROR("bo:%p Cannot insert bo to Hash(%d) from gem:%d, fd:%d\n",
1332 bo, bo_exynos->name, gem, key);
1335 TBM_EXYNOS_DEBUG(" bo:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d(%d), size:%d\n",
1337 bo_exynos->gem, bo_exynos->name,
1340 bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1343 return (void *)bo_exynos;
1347 tbm_exynos_bo_export(tbm_bo bo)
1349 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1351 tbm_bo_exynos bo_exynos;
1353 bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1354 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
1356 if (!bo_exynos->name) {
1357 bo_exynos->name = _get_name(bo_exynos->fd, bo_exynos->gem);
1358 if (!bo_exynos->name) {
1359 TBM_EXYNOS_ERROR("Cannot get name\n");
1364 TBM_EXYNOS_DEBUG(" bo:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d\n",
1366 bo_exynos->gem, bo_exynos->name,
1368 bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1371 return (unsigned int)bo_exynos->name;
1375 tbm_exynos_bo_export_fd(tbm_bo bo)
1377 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, -1);
1379 tbm_bo_exynos bo_exynos;
1381 char buf[STRERR_BUFSIZE];
1383 bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1384 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, -1);
1386 struct drm_prime_handle arg = {0, };
1388 arg.handle = bo_exynos->gem;
1389 ret = drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg);
1391 TBM_EXYNOS_ERROR("bo:%p Cannot dmabuf=%d (%s)\n",
1392 bo, bo_exynos->gem, strerror_r(errno, buf, STRERR_BUFSIZE));
1393 return (tbm_fd) ret;
1396 TBM_EXYNOS_DEBUG(" bo:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d(%d), size:%d\n",
1398 bo_exynos->gem, bo_exynos->name,
1401 bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1404 return (tbm_fd)arg.fd;
1407 static tbm_bo_handle
1408 tbm_exynos_bo_get_handle(tbm_bo bo, int device)
1410 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, (tbm_bo_handle) NULL);
1412 tbm_bo_handle bo_handle;
1413 tbm_bo_exynos bo_exynos;
1415 bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1416 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, (tbm_bo_handle) NULL);
1418 if (!bo_exynos->gem) {
1419 TBM_EXYNOS_ERROR("Cannot map gem=%d\n", bo_exynos->gem);
1420 return (tbm_bo_handle) NULL;
1423 TBM_EXYNOS_DEBUG("bo:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d, %s\n",
1425 bo_exynos->gem, bo_exynos->name,
1427 bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1429 STR_DEVICE[device]);
1431 /*Get mapped bo_handle*/
1432 bo_handle = _exynos_bo_handle(bo_exynos, device);
1433 if (bo_handle.ptr == NULL) {
1434 TBM_EXYNOS_ERROR("Cannot get handle: gem:%d, device:%d\n",
1435 bo_exynos->gem, device);
1436 return (tbm_bo_handle) NULL;
1442 static tbm_bo_handle
1443 tbm_exynos_bo_map(tbm_bo bo, int device, int opt)
1445 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, (tbm_bo_handle) NULL);
1447 tbm_bo_handle bo_handle;
1448 tbm_bo_exynos bo_exynos;
1449 tbm_bufmgr_exynos bufmgr_exynos;
1451 bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1452 EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, (tbm_bo_handle)NULL);
1454 bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1455 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, (tbm_bo_handle) NULL);
1457 if (!bo_exynos->gem) {
1458 TBM_EXYNOS_ERROR("Cannot map gem=%d\n", bo_exynos->gem);
1459 return (tbm_bo_handle) NULL;
1462 TBM_EXYNOS_DEBUG(" bo:%p, gem:%d(%d), fd:%d, %s, %s\n",
1464 bo_exynos->gem, bo_exynos->name,
1469 /*Get mapped bo_handle*/
1470 bo_handle = _exynos_bo_handle(bo_exynos, device);
1471 if (bo_handle.ptr == NULL) {
1472 TBM_EXYNOS_ERROR("Cannot get handle: gem:%d, device:%d, opt:%d\n",
1473 bo_exynos->gem, device, opt);
1474 return (tbm_bo_handle) NULL;
1477 if (bo_exynos->map_cnt == 0)
1478 _bo_set_cache_state(bufmgr_exynos, bo_exynos, device, opt);
1480 bo_exynos->last_map_device = device;
1482 bo_exynos->map_cnt++;
1488 tbm_exynos_bo_unmap(tbm_bo bo)
1490 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1492 tbm_bo_exynos bo_exynos;
1493 tbm_bufmgr_exynos bufmgr_exynos;
1495 bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1496 EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
1498 bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1499 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
1502 if (!bo_exynos->gem)
1505 bo_exynos->map_cnt--;
1507 if (bo_exynos->map_cnt == 0)
1508 _bo_save_cache_state(bufmgr_exynos, bo_exynos);
1510 /* check whether cache control do or not */
1511 if (g_enable_cache_ctrl && bo_exynos->last_map_device == TBM_DEVICE_CPU)
1512 _exynos_cache_flush(bufmgr_exynos, bo_exynos, TBM_EXYNOS_CACHE_FLUSH_ALL);
1514 bo_exynos->last_map_device = -1;
1516 TBM_EXYNOS_DEBUG(" bo:%p, gem:%d(%d), fd:%d\n",
1518 bo_exynos->gem, bo_exynos->name,
1525 tbm_exynos_bo_lock(tbm_bo bo, int device, int opt)
1527 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1529 #ifndef ALWAYS_BACKEND_CTRL
1530 tbm_bufmgr_exynos bufmgr_exynos;
1531 tbm_bo_exynos bo_exynos;
1532 struct dma_buf_fence fence;
1533 struct flock filelock;
1535 char buf[STRERR_BUFSIZE];
1537 if (device != TBM_DEVICE_3D && device != TBM_DEVICE_CPU) {
1538 TBM_EXYNOS_DEBUG("Not support device type,\n");
1542 bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1543 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
1545 bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1546 EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
1548 memset(&fence, 0, sizeof(struct dma_buf_fence));
1550 /* Check if the given type is valid or not. */
1551 if (opt & TBM_OPTION_WRITE) {
1552 if (device == TBM_DEVICE_3D)
1553 fence.type = DMA_BUF_ACCESS_WRITE | DMA_BUF_ACCESS_DMA;
1554 } else if (opt & TBM_OPTION_READ) {
1555 if (device == TBM_DEVICE_3D)
1556 fence.type = DMA_BUF_ACCESS_READ | DMA_BUF_ACCESS_DMA;
1558 TBM_EXYNOS_ERROR("Invalid argument\n");
1562 /* Check if the tbm manager supports dma fence or not. */
1563 if (!bufmgr_exynos->use_dma_fence) {
1564 TBM_EXYNOS_ERROR("Not support DMA FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
1569 if (device == TBM_DEVICE_3D) {
1570 ret = ioctl(bo_exynos->dmabuf, DMABUF_IOCTL_GET_FENCE, &fence);
1572 TBM_EXYNOS_ERROR("Cannot set GET FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
1576 if (opt & TBM_OPTION_WRITE)
1577 filelock.l_type = F_WRLCK;
1579 filelock.l_type = F_RDLCK;
1581 filelock.l_whence = SEEK_CUR;
1582 filelock.l_start = 0;
1585 if (-1 == fcntl(bo_exynos->dmabuf, F_SETLKW, &filelock))
1589 pthread_mutex_lock(&bo_exynos->mutex);
1591 if (device == TBM_DEVICE_3D) {
1594 for (i = 0; i < DMA_FENCE_LIST_MAX; i++) {
1595 if (bo_exynos->dma_fence[i].ctx == 0) {
1596 bo_exynos->dma_fence[i].type = fence.type;
1597 bo_exynos->dma_fence[i].ctx = fence.ctx;
1602 if (i == DMA_FENCE_LIST_MAX) {
1603 /*TODO: if dma_fence list is full, it needs realloc. I will fix this. by minseok3.kim*/
1604 TBM_EXYNOS_ERROR("fence list is full\n");
1608 pthread_mutex_unlock(&bo_exynos->mutex);
1610 TBM_EXYNOS_DEBUG("DMABUF_IOCTL_GET_FENCE! bo:%p, gem:%d(%d), fd:%ds\n",
1612 bo_exynos->gem, bo_exynos->name,
1614 #endif /* ALWAYS_BACKEND_CTRL */
1620 tbm_exynos_bo_unlock(tbm_bo bo)
1622 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1624 #ifndef ALWAYS_BACKEND_CTRL
1625 tbm_bo_exynos bo_exynos;
1626 struct dma_buf_fence fence;
1627 struct flock filelock;
1628 unsigned int dma_type = 0;
1630 char buf[STRERR_BUFSIZE];
1632 bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1633 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
1635 if (bo_exynos->dma_fence[0].type & DMA_BUF_ACCESS_DMA)
1638 if (!bo_exynos->dma_fence[0].ctx && dma_type) {
1639 TBM_EXYNOS_DEBUG("FENCE not support or ignored,\n");
1643 if (!bo_exynos->dma_fence[0].ctx && dma_type) {
1644 TBM_EXYNOS_DEBUG("device type is not 3D/CPU,\n");
1648 pthread_mutex_lock(&bo_exynos->mutex);
1651 fence.type = bo_exynos->dma_fence[0].type;
1652 fence.ctx = bo_exynos->dma_fence[0].ctx;
1655 for (i = 1; i < DMA_FENCE_LIST_MAX; i++) {
1656 bo_exynos->dma_fence[i - 1].type = bo_exynos->dma_fence[i].type;
1657 bo_exynos->dma_fence[i - 1].ctx = bo_exynos->dma_fence[i].ctx;
1659 bo_exynos->dma_fence[DMA_FENCE_LIST_MAX - 1].type = 0;
1660 bo_exynos->dma_fence[DMA_FENCE_LIST_MAX - 1].ctx = 0;
1662 pthread_mutex_unlock(&bo_exynos->mutex);
1665 ret = ioctl(bo_exynos->dmabuf, DMABUF_IOCTL_PUT_FENCE, &fence);
1667 TBM_EXYNOS_ERROR("Can not set PUT FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
1671 filelock.l_type = F_UNLCK;
1672 filelock.l_whence = SEEK_CUR;
1673 filelock.l_start = 0;
1676 if (-1 == fcntl(bo_exynos->dmabuf, F_SETLKW, &filelock))
1680 TBM_EXYNOS_DEBUG("DMABUF_IOCTL_PUT_FENCE! bo:%p, gem:%d(%d), fd:%ds\n",
1682 bo_exynos->gem, bo_exynos->name,
1684 #endif /* ALWAYS_BACKEND_CTRL */
1690 tbm_exynos_bufmgr_deinit(void *priv)
1692 EXYNOS_RETURN_IF_FAIL(priv != NULL);
1694 tbm_bufmgr_exynos bufmgr_exynos;
1696 bufmgr_exynos = (tbm_bufmgr_exynos)priv;
1698 if (bufmgr_exynos->hashBos) {
1702 while (drmHashFirst(bufmgr_exynos->hashBos, &key, &value) > 0) {
1704 drmHashDelete(bufmgr_exynos->hashBos, key);
1707 drmHashDestroy(bufmgr_exynos->hashBos);
1708 bufmgr_exynos->hashBos = NULL;
1711 _bufmgr_deinit_cache_state(bufmgr_exynos);
1713 if (bufmgr_exynos->bind_display)
1714 tbm_drm_helper_wl_auth_server_deinit();
1716 if (bufmgr_exynos->device_name)
1717 free(bufmgr_exynos->device_name);
1719 if (tbm_backend_is_display_server())
1720 tbm_drm_helper_unset_tbm_master_fd();
1722 tbm_drm_helper_unset_fd();
1724 close(bufmgr_exynos->fd);
1726 free(bufmgr_exynos);
1730 tbm_exynos_surface_supported_format(uint32_t **formats, uint32_t *num)
1732 uint32_t *color_formats = NULL;
1734 color_formats = (uint32_t *)calloc(1,
1735 sizeof(uint32_t) * TBM_COLOR_FORMAT_COUNT);
1737 if (color_formats == NULL)
1740 memcpy(color_formats, tbm_exynos_color_format_list,
1741 sizeof(uint32_t)*TBM_COLOR_FORMAT_COUNT);
1743 *formats = color_formats;
1744 *num = TBM_COLOR_FORMAT_COUNT;
1746 TBM_EXYNOS_DEBUG("tbm_exynos_surface_supported_format count = %d\n", *num);
1752 _new_calc_plane_nv12(int width, int height)
1756 mbX = DIV_ROUND_UP(width, S5P_FIMV_NUM_PIXELS_IN_MB_ROW);
1757 mbY = DIV_ROUND_UP(height, S5P_FIMV_NUM_PIXELS_IN_MB_COL);
1759 if (width * height < S5P_FIMV_MAX_FRAME_SIZE)
1760 mbY = (mbY + 1) / 2 * 2;
1762 return ((mbX * S5P_FIMV_NUM_PIXELS_IN_MB_COL) * (mbY *
1763 S5P_FIMV_NUM_PIXELS_IN_MB_ROW));
1767 _calc_yplane_nv12(int width, int height)
1771 mbX = SIZE_ALIGN(width + 24, S5P_FIMV_NV12MT_HALIGN);
1772 mbY = SIZE_ALIGN(height + 16, S5P_FIMV_NV12MT_VALIGN);
1774 return SIZE_ALIGN(mbX * mbY, S5P_FIMV_DEC_BUF_ALIGN);
1778 _calc_uvplane_nv12(int width, int height)
1782 mbX = SIZE_ALIGN(width + 16, S5P_FIMV_NV12MT_HALIGN);
1783 mbY = SIZE_ALIGN(height + 4, S5P_FIMV_NV12MT_VALIGN);
1785 return SIZE_ALIGN((mbX * mbY) >> 1, S5P_FIMV_DEC_BUF_ALIGN);
1789 _new_calc_yplane_nv12(int width, int height)
1791 return SIZE_ALIGN(_new_calc_plane_nv12(width,
1792 height) + S5P_FIMV_D_ALIGN_PLANE_SIZE,
1793 TBM_SURFACE_ALIGNMENT_PLANE_NV12);
1797 _new_calc_uvplane_nv12(int width, int height)
1799 return SIZE_ALIGN((_new_calc_plane_nv12(width,
1800 height) >> 1) + S5P_FIMV_D_ALIGN_PLANE_SIZE,
1801 TBM_SURFACE_ALIGNMENT_PLANE_NV12);
1805 * @brief get the plane data of the surface.
1806 * @param[in] width : the width of the surface
1807 * @param[in] height : the height of the surface
1808 * @param[in] format : the format of the surface
1809 * @param[in] plane_idx : the format of the surface
1810 * @param[out] size : the size of the plane
1811 * @param[out] offset : the offset of the plane
1812 * @param[out] pitch : the pitch of the plane
1813 * @param[out] padding : the padding of the plane
1814 * @return 1 if this function succeeds, otherwise 0.
1817 tbm_exynos_surface_get_plane_data(int width, int height,
1818 tbm_format format, int plane_idx, uint32_t *size, uint32_t *offset,
1819 uint32_t *pitch, int *bo_idx)
1830 case TBM_FORMAT_XRGB4444:
1831 case TBM_FORMAT_XBGR4444:
1832 case TBM_FORMAT_RGBX4444:
1833 case TBM_FORMAT_BGRX4444:
1834 case TBM_FORMAT_ARGB4444:
1835 case TBM_FORMAT_ABGR4444:
1836 case TBM_FORMAT_RGBA4444:
1837 case TBM_FORMAT_BGRA4444:
1838 case TBM_FORMAT_XRGB1555:
1839 case TBM_FORMAT_XBGR1555:
1840 case TBM_FORMAT_RGBX5551:
1841 case TBM_FORMAT_BGRX5551:
1842 case TBM_FORMAT_ARGB1555:
1843 case TBM_FORMAT_ABGR1555:
1844 case TBM_FORMAT_RGBA5551:
1845 case TBM_FORMAT_BGRA5551:
1846 case TBM_FORMAT_RGB565:
1849 _pitch = SIZE_ALIGN((width * bpp) >> 3, g_tbm_surface_alignment_pitch_rgb);
1850 _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
1854 case TBM_FORMAT_RGB888:
1855 case TBM_FORMAT_BGR888:
1858 _pitch = SIZE_ALIGN((width * bpp) >> 3, g_tbm_surface_alignment_pitch_rgb);
1859 _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
1863 case TBM_FORMAT_XRGB8888:
1864 case TBM_FORMAT_XBGR8888:
1865 case TBM_FORMAT_RGBX8888:
1866 case TBM_FORMAT_BGRX8888:
1867 case TBM_FORMAT_ARGB8888:
1868 case TBM_FORMAT_ABGR8888:
1869 case TBM_FORMAT_RGBA8888:
1870 case TBM_FORMAT_BGRA8888:
1873 _pitch = SIZE_ALIGN((width * bpp) >> 3, g_tbm_surface_alignment_pitch_rgb);
1874 _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
1879 case TBM_FORMAT_YUYV:
1880 case TBM_FORMAT_YVYU:
1881 case TBM_FORMAT_UYVY:
1882 case TBM_FORMAT_VYUY:
1883 case TBM_FORMAT_AYUV:
1886 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1887 _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
1893 * index 0 = Y plane, [7:0] Y
1894 * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
1896 * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
1898 case TBM_FORMAT_NV12:
1900 if (plane_idx == 0) {
1902 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1903 _size = MAX(_calc_yplane_nv12(width, height), _new_calc_yplane_nv12(width,
1906 } else if (plane_idx == 1) {
1908 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1909 _size = MAX(_calc_uvplane_nv12(width, height), _new_calc_uvplane_nv12(width,
1914 case TBM_FORMAT_NV21:
1916 if (plane_idx == 0) {
1918 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1919 _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
1921 } else if (plane_idx == 1) {
1922 _offset = width * height;
1923 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1924 _size = SIZE_ALIGN(_pitch * (height / 2), g_tbm_surface_alignment_plane);
1929 case TBM_FORMAT_NV16:
1930 case TBM_FORMAT_NV61:
1932 /*if(plane_idx == 0)*/
1935 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1936 _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
1941 /*else if( plane_idx ==1 )*/
1944 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1945 _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
1952 * index 0: Y plane, [7:0] Y
1953 * index 1: Cb plane, [7:0] Cb
1954 * index 2: Cr plane, [7:0] Cr
1956 * index 1: Cr plane, [7:0] Cr
1957 * index 2: Cb plane, [7:0] Cb
1961 * NATIVE_BUFFER_FORMAT_YV12
1962 * NATIVE_BUFFER_FORMAT_I420
1964 case TBM_FORMAT_YUV410:
1965 case TBM_FORMAT_YVU410:
1967 /*if(plane_idx == 0)*/
1970 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1971 _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
1976 /*else if(plane_idx == 1)*/
1979 _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4);
1980 _size = SIZE_ALIGN(_pitch * (height / 4), g_tbm_surface_alignment_plane);
1985 /*else if (plane_idx == 2)*/
1988 _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4);
1989 _size = SIZE_ALIGN(_pitch * (height / 4), g_tbm_surface_alignment_plane);
1993 case TBM_FORMAT_YUV411:
1994 case TBM_FORMAT_YVU411:
1995 case TBM_FORMAT_YUV420:
1996 case TBM_FORMAT_YVU420:
1998 /*if(plane_idx == 0)*/
2001 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
2002 _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
2007 /*else if(plane_idx == 1)*/
2010 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
2011 _size = SIZE_ALIGN(_pitch * (height / 2), g_tbm_surface_alignment_plane);
2016 /*else if (plane_idx == 2)*/
2019 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
2020 _size = SIZE_ALIGN(_pitch * (height / 2), g_tbm_surface_alignment_plane);
2024 case TBM_FORMAT_YUV422:
2025 case TBM_FORMAT_YVU422:
2027 /*if(plane_idx == 0)*/
2030 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
2031 _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
2036 /*else if(plane_idx == 1)*/
2039 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
2040 _size = SIZE_ALIGN(_pitch * (height), g_tbm_surface_alignment_plane);
2045 /*else if (plane_idx == 2)*/
2048 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
2049 _size = SIZE_ALIGN(_pitch * (height), g_tbm_surface_alignment_plane);
2053 case TBM_FORMAT_YUV444:
2054 case TBM_FORMAT_YVU444:
2056 /*if(plane_idx == 0)*/
2059 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
2060 _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
2065 /*else if(plane_idx == 1)*/
2068 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
2069 _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
2074 /*else if (plane_idx == 2)*/
2077 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
2078 _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
2096 tbm_exynos_bo_get_flags(tbm_bo bo)
2098 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
2100 tbm_bo_exynos bo_exynos;
2102 bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
2103 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
2105 return bo_exynos->flags_tbm;
2109 tbm_exynos_bufmgr_bind_native_display(tbm_bufmgr bufmgr, void *native_display)
2111 tbm_bufmgr_exynos bufmgr_exynos;
2113 bufmgr_exynos = tbm_backend_get_priv_from_bufmgr(bufmgr);
2114 EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
2116 if (!tbm_drm_helper_wl_auth_server_init(native_display, bufmgr_exynos->fd,
2117 bufmgr_exynos->device_name, 0)) {
2118 TBM_EXYNOS_ERROR("fail to tbm_drm_helper_wl_server_init\n");
2122 bufmgr_exynos->bind_display = native_display;
2127 MODULEINITPPROTO(init_tbm_bufmgr_priv);
2129 static TBMModuleVersionInfo ExynosVersRec = {
2135 TBMModuleData tbmModuleData = { &ExynosVersRec, init_tbm_bufmgr_priv};
2138 init_tbm_bufmgr_priv(tbm_bufmgr bufmgr, int fd)
2140 tbm_bufmgr_backend bufmgr_backend;
2141 tbm_bufmgr_exynos bufmgr_exynos;
2147 bufmgr_exynos = calloc(1, sizeof(struct _tbm_bufmgr_exynos));
2148 if (!bufmgr_exynos) {
2149 TBM_EXYNOS_ERROR("fail to alloc bufmgr_exynos!\n");
2153 if (tbm_backend_is_display_server()) {
2154 bufmgr_exynos->fd = tbm_drm_helper_get_master_fd();
2155 if (bufmgr_exynos->fd < 0) {
2156 bufmgr_exynos->fd = _tbm_exynos_open_drm();
2157 if (bufmgr_exynos->fd < 0) {
2158 TBM_EXYNOS_ERROR("fail to open drm!\n", getpid());
2163 tbm_drm_helper_set_tbm_master_fd(bufmgr_exynos->fd);
2165 bufmgr_exynos->device_name = drmGetDeviceNameFromFd(bufmgr_exynos->fd);
2166 if (!bufmgr_exynos->device_name) {
2167 TBM_EXYNOS_ERROR("fail to get device name!\n", getpid());
2169 tbm_drm_helper_unset_tbm_master_fd();
2170 goto fail_get_device_name;
2173 if (_check_render_node()) {
2174 bufmgr_exynos->fd = _get_render_node();
2175 if (bufmgr_exynos->fd < 0) {
2176 TBM_EXYNOS_ERROR("fail to get render node\n");
2177 goto fail_get_render_node;
2179 TBM_EXYNOS_DEBUG("Use render node:%d\n", bufmgr_exynos->fd);
2181 if (!tbm_drm_helper_get_auth_info(&(bufmgr_exynos->fd), &(bufmgr_exynos->device_name), NULL)) {
2182 TBM_EXYNOS_ERROR("fail to get auth drm info!\n");
2183 goto fail_get_auth_info;
2186 tbm_drm_helper_set_fd(bufmgr_exynos->fd);
2190 //Check if the tbm manager supports dma fence or not.
2191 fp = open("/sys/module/dmabuf_sync/parameters/enabled", O_RDONLY);
2194 int length = read(fp, buf, 1);
2196 if (length == 1 && buf[0] == '1')
2197 bufmgr_exynos->use_dma_fence = 1;
2202 if (!_bufmgr_init_cache_state(bufmgr_exynos)) {
2203 TBM_EXYNOS_ERROR("fail to init bufmgr cache state\n");
2204 goto fail_init_cache_state;
2207 /*Create Hash Table*/
2208 bufmgr_exynos->hashBos = drmHashCreate();
2210 bufmgr_backend = tbm_backend_alloc();
2211 if (!bufmgr_backend) {
2212 TBM_EXYNOS_ERROR("fail to alloc backend!\n");
2213 goto fail_alloc_backend;
2216 bufmgr_backend->priv = (void *)bufmgr_exynos;
2217 bufmgr_backend->bufmgr_deinit = tbm_exynos_bufmgr_deinit;
2218 bufmgr_backend->bo_size = tbm_exynos_bo_size;
2219 bufmgr_backend->bo_alloc = tbm_exynos_bo_alloc;
2220 bufmgr_backend->bo_free = tbm_exynos_bo_free;
2221 bufmgr_backend->bo_import = tbm_exynos_bo_import;
2222 bufmgr_backend->bo_import_fd = tbm_exynos_bo_import_fd;
2223 bufmgr_backend->bo_export = tbm_exynos_bo_export;
2224 bufmgr_backend->bo_export_fd = tbm_exynos_bo_export_fd;
2225 bufmgr_backend->bo_get_handle = tbm_exynos_bo_get_handle;
2226 bufmgr_backend->bo_map = tbm_exynos_bo_map;
2227 bufmgr_backend->bo_unmap = tbm_exynos_bo_unmap;
2228 bufmgr_backend->surface_get_plane_data = tbm_exynos_surface_get_plane_data;
2229 bufmgr_backend->surface_supported_format = tbm_exynos_surface_supported_format;
2230 bufmgr_backend->bo_get_flags = tbm_exynos_bo_get_flags;
2231 bufmgr_backend->bo_lock = tbm_exynos_bo_lock;
2232 bufmgr_backend->bo_unlock = tbm_exynos_bo_unlock;
2234 if (tbm_backend_is_display_server() && !_check_render_node())
2235 bufmgr_backend->bufmgr_bind_native_display = tbm_exynos_bufmgr_bind_native_display;
2237 if (!tbm_backend_init(bufmgr, bufmgr_backend)) {
2238 TBM_EXYNOS_ERROR("fail to init backend!\n");
2239 goto fail_init_backend;
2242 /* get the model name from the capi-system-info.
2243 * The alignment_plane and alignment_pitch_rgb is different accoring to the target.
2244 * There will be the stride issue when the right alignment_plane and alignment_pitch_rgb
2245 * is not set to the backend.
2248 if (system_info_get_platform_string("http://tizen.org/system/model_name", &value) != SYSTEM_INFO_ERROR_NONE) {
2249 TBM_EXYNOS_ERROR("Cannot get the \"http://tizen.org/system/model_name\" key value from system-info.\n");
2250 TBM_EXYNOS_ERROR("May not set the right value on libtbm-exynos backend.\n");
2252 if (!strncmp(value, "TW1", 4)) {
2253 g_tbm_surface_alignment_plane = 8;
2254 g_tbm_surface_alignment_pitch_rgb = 8;
2255 g_enable_cache_ctrl = 1;
2257 g_tbm_surface_alignment_plane = 64;
2258 g_tbm_surface_alignment_pitch_rgb = 64;
2268 env = getenv("TBM_EXYNOS_DEBUG");
2271 TBM_EXYNOS_ERROR("TBM_EXYNOS_DEBUG=%s\n", env);
2277 TBM_EXYNOS_DEBUG("drm_fd:%d\n", bufmgr_exynos->fd);
2282 tbm_backend_free(bufmgr_backend);
2284 if (bufmgr_exynos->hashBos)
2285 drmHashDestroy(bufmgr_exynos->hashBos);
2286 _bufmgr_deinit_cache_state(bufmgr_exynos);
2287 fail_init_cache_state:
2288 if (tbm_backend_is_display_server())
2289 tbm_drm_helper_unset_tbm_master_fd();
2291 tbm_drm_helper_unset_fd();
2292 fail_get_device_name:
2293 close(bufmgr_exynos->fd);
2295 fail_get_render_node:
2297 free(bufmgr_exynos);