1 /**************************************************************************
5 Copyright 2012 Samsung Electronics co., Ltd. All Rights Reserved.
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
9 Permission is hereby granted, free of charge, to any person obtaining a
10 copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sub license, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial portions
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
22 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
24 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
25 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
26 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
27 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 **************************************************************************/
39 #include <sys/ioctl.h>
40 #include <sys/types.h>
47 #include <tbm_bufmgr.h>
48 #include <tbm_bufmgr_backend.h>
49 #include <exynos_drm.h>
51 #include <tbm_surface.h>
52 #include <tbm_surface_internal.h>
53 #include <tbm_drm_helper.h>
57 #include "tbm_bufmgr_tgl.h"
61 #define TBM_COLOR_FORMAT_COUNT 8
63 #define EXYNOS_DRM_NAME "exynos"
66 #define LOG_TAG "TBM_BACKEND"
74 static int initialized = 0;
75 static char app_name[128];
80 /* get the application name */
81 f = fopen("/proc/self/cmdline", "r");
86 memset(app_name, 0x00, sizeof(app_name));
88 if (fgets(app_name, 100, f) == NULL) {
95 slash = strrchr(app_name, '/');
97 memmove(app_name, slash + 1, strlen(slash));
104 #define TBM_EXYNOS_LOG(fmt, args...) LOGE("\033[31m" "[%s]" fmt "\033[0m", target_name(), ##args)
105 #define DBG(fmt, args...) {if (bDebug&01) LOGE(fmt, ##args);}
107 #define TBM_EXYNOS_LOG(...)
111 #define SIZE_ALIGN(value, base) (((value) + ((base) - 1)) & ~((base) - 1))
112 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
113 #define MAX(a, b) ((a) > (b) ? (a) : (b))
116 #define TBM_SURFACE_ALIGNMENT_PLANE (8)
117 #define TBM_SURFACE_ALIGNMENT_PITCH_RGB (8)
119 #define TBM_SURFACE_ALIGNMENT_PLANE (64)
120 #define TBM_SURFACE_ALIGNMENT_PITCH_RGB (64)
123 #define TBM_SURFACE_ALIGNMENT_PLANE_NV12 (4096)
124 #define TBM_SURFACE_ALIGNMENT_PITCH_YUV (16)
126 #define SZ_1M 0x00100000
127 #define S5P_FIMV_MAX_FRAME_SIZE (2 * SZ_1M)
128 #define S5P_FIMV_D_ALIGN_PLANE_SIZE 64
129 #define S5P_FIMV_NUM_PIXELS_IN_MB_ROW 16
130 #define S5P_FIMV_NUM_PIXELS_IN_MB_COL 16
131 #define S5P_FIMV_DEC_BUF_ALIGN (8 * 1024)
132 #define S5P_FIMV_NV12MT_HALIGN 128
133 #define S5P_FIMV_NV12MT_VALIGN 64
135 /* check condition */
136 #define EXYNOS_RETURN_IF_FAIL(cond) {\
138 TBM_EXYNOS_LOG("[%s] : '%s' failed.\n", __func__, #cond);\
143 #define EXYNOS_RETURN_VAL_IF_FAIL(cond, val) {\
145 TBM_EXYNOS_LOG("[%s] : '%s' failed.\n", __func__, #cond);\
150 struct dma_buf_info {
152 unsigned int fence_supported;
153 unsigned int padding;
156 #define DMA_BUF_ACCESS_READ 0x1
157 #define DMA_BUF_ACCESS_WRITE 0x2
158 #define DMA_BUF_ACCESS_DMA 0x4
159 #define DMA_BUF_ACCESS_MAX 0x8
161 #define DMA_FENCE_LIST_MAX 5
163 struct dma_buf_fence {
168 #define DMABUF_IOCTL_BASE 'F'
169 #define DMABUF_IOWR(nr, type) _IOWR(DMABUF_IOCTL_BASE, nr, type)
171 #define DMABUF_IOCTL_GET_INFO DMABUF_IOWR(0x00, struct dma_buf_info)
172 #define DMABUF_IOCTL_GET_FENCE DMABUF_IOWR(0x01, struct dma_buf_fence)
173 #define DMABUF_IOCTL_PUT_FENCE DMABUF_IOWR(0x02, struct dma_buf_fence)
176 #define GLOBAL_KEY ((unsigned int)(-1))
178 #define TBM_EXYNOS_CACHE_INV 0x01 /**< cache invalidate */
179 #define TBM_EXYNOS_CACHE_CLN 0x02 /**< cache clean */
180 #define TBM_EXYNOS_CACHE_ALL 0x10 /**< cache all */
181 #define TBM_EXYNOS_CACHE_FLUSH (TBM_EXYNOS_CACHE_INV|TBM_EXYNOS_CACHE_CLN) /**< cache flush */
182 #define TBM_EXYNOS_CACHE_FLUSH_ALL (TBM_EXYNOS_CACHE_FLUSH|TBM_EXYNOS_CACHE_ALL) /**< cache flush all */
186 DEVICE_CA, /* cache aware device */
187 DEVICE_CO /* cache oblivious device */
190 typedef union _tbm_bo_cache_state tbm_bo_cache_state;
192 union _tbm_bo_cache_state {
195 unsigned int cntFlush: 16; /*Flush all index for sync */
196 unsigned int isCached: 1;
197 unsigned int isDirtied: 2;
201 typedef struct _tbm_bufmgr_exynos *tbm_bufmgr_exynos;
202 typedef struct _tbm_bo_exynos *tbm_bo_exynos;
204 typedef struct _exynos_private {
206 struct _tbm_bo_exynos *bo_priv;
209 /* tbm buffor object for exynos */
210 struct _tbm_bo_exynos {
213 unsigned int name; /* FLINK ID */
215 unsigned int gem; /* GEM Handle */
217 unsigned int dmabuf; /* fd for dmabuf */
219 void *pBase; /* virtual address */
223 unsigned int flags_exynos;
224 unsigned int flags_tbm;
228 pthread_mutex_t mutex;
229 struct dma_buf_fence dma_fence[DMA_FENCE_LIST_MAX];
233 tbm_bo_cache_state cache_state;
234 unsigned int map_cnt;
237 /* tbm bufmgr private for exynos */
238 struct _tbm_bufmgr_exynos {
251 char *STR_DEVICE[] = {
267 uint32_t tbm_exynos_color_format_list[TBM_COLOR_FORMAT_COUNT] = { TBM_FORMAT_RGBA8888,
277 #ifdef ENABLE_CACHECRTL
279 _tgl_init(int fd, unsigned int key)
281 struct tgl_attribute attr;
285 attr.timeout_ms = 1000;
287 err = ioctl(fd, TGL_IOC_INIT_LOCK, &attr);
289 TBM_EXYNOS_LOG("[libtbm:%d] "
290 "error(%s) %s:%d key:%d\n",
291 getpid(), strerror(errno), __func__, __LINE__, key);
299 _tgl_destroy(int fd, unsigned int key)
303 err = ioctl(fd, TGL_IOC_DESTROY_LOCK, key);
305 TBM_EXYNOS_LOG("[libtbm:%d] "
306 "error(%s) %s:%d key:%d\n",
307 getpid(), strerror(errno), __func__, __LINE__, key);
314 _tgl_set_data(int fd, unsigned int key, unsigned int val)
318 struct tgl_user_data arg;
322 err = ioctl(fd, TGL_IOC_SET_DATA, &arg);
324 TBM_EXYNOS_LOG("[libtbm:%d] "
325 "error(%s) %s:%d key:%d\n",
326 getpid(), strerror(errno), __func__, __LINE__, key);
333 static inline unsigned int
334 _tgl_get_data(int fd, unsigned int key)
337 struct tgl_user_data arg = { 0, };
340 err = ioctl(fd, TGL_IOC_GET_DATA, &arg);
342 TBM_EXYNOS_LOG("[libtbm:%d] "
343 "error(%s) %s:%d key:%d\n",
344 getpid(), strerror(errno), __func__, __LINE__, key);
352 _exynos_cache_flush(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos, int flags)
354 EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
356 /* cache flush is managed by kernel side when using dma-fence. */
357 if (bufmgr_exynos->use_dma_fence)
360 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
362 struct drm_exynos_gem_cache_op cache_op = {0, };
365 /* if bo_exynos is null, do cache_flush_all */
368 cache_op.usr_addr = (uint64_t)((uint32_t)bo_exynos->pBase);
369 cache_op.size = bo_exynos->size;
371 flags = TBM_EXYNOS_CACHE_FLUSH_ALL;
373 cache_op.usr_addr = 0;
377 if (flags & TBM_EXYNOS_CACHE_INV) {
378 if (flags & TBM_EXYNOS_CACHE_ALL)
379 cache_op.flags |= EXYNOS_DRM_CACHE_INV_ALL;
381 cache_op.flags |= EXYNOS_DRM_CACHE_INV_RANGE;
384 if (flags & TBM_EXYNOS_CACHE_CLN) {
385 if (flags & TBM_EXYNOS_CACHE_ALL)
386 cache_op.flags |= EXYNOS_DRM_CACHE_CLN_ALL;
388 cache_op.flags |= EXYNOS_DRM_CACHE_CLN_RANGE;
391 if (flags & TBM_EXYNOS_CACHE_ALL)
392 cache_op.flags |= EXYNOS_DRM_ALL_CACHES_CORES;
394 ret = drmCommandWriteRead(fd, DRM_EXYNOS_GEM_CACHE_OP, &cache_op,
397 TBM_EXYNOS_LOG("error fail to flush the cache.\n");
406 _bo_init_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos, int import)
408 #ifdef ENABLE_CACHECRTL
409 EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
410 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
412 if (bufmgr_exynos->use_dma_fence)
415 tbm_bo_cache_state cache_state;
417 _tgl_init(bufmgr_exynos->tgl_fd, bo_exynos->name);
420 cache_state.data.isDirtied = DEVICE_NONE;
421 cache_state.data.isCached = 0;
422 cache_state.data.cntFlush = 0;
424 _tgl_set_data(bufmgr_exynos->tgl_fd, bo_exynos->name, cache_state.val);
432 _bo_set_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos, int device, int opt)
434 #ifdef ENABLE_CACHECRTL
435 EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
436 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
438 if (bufmgr_exynos->use_dma_fence)
442 unsigned short cntFlush = 0;
444 if (bo_exynos->flags_exynos & EXYNOS_BO_NONCACHABLE)
447 /* get cache state of a bo */
448 bo_exynos->cache_state.val = _tgl_get_data(bufmgr_exynos->tgl_fd,
451 /* get global cache flush count */
452 cntFlush = (unsigned short)_tgl_get_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY);
454 if (opt == TBM_DEVICE_CPU) {
455 if (bo_exynos->cache_state.data.isDirtied == DEVICE_CO &&
456 bo_exynos->cache_state.data.isCached)
457 need_flush = TBM_EXYNOS_CACHE_INV;
459 bo_exynos->cache_state.data.isCached = 1;
460 if (opt & TBM_OPTION_WRITE)
461 bo_exynos->cache_state.data.isDirtied = DEVICE_CA;
463 if (bo_exynos->cache_state.data.isDirtied != DEVICE_CA)
464 bo_exynos->cache_state.data.isDirtied = DEVICE_NONE;
467 if (bo_exynos->cache_state.data.isDirtied == DEVICE_CA &&
468 bo_exynos->cache_state.data.isCached &&
469 bo_exynos->cache_state.data.cntFlush == cntFlush)
470 need_flush = TBM_EXYNOS_CACHE_CLN | TBM_EXYNOS_CACHE_ALL;
472 if (opt & TBM_OPTION_WRITE)
473 bo_exynos->cache_state.data.isDirtied = DEVICE_CO;
475 if (bo_exynos->cache_state.data.isDirtied != DEVICE_CO)
476 bo_exynos->cache_state.data.isDirtied = DEVICE_NONE;
481 if (need_flush & TBM_EXYNOS_CACHE_ALL)
482 _tgl_set_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY, (unsigned int)(++cntFlush));
484 /* call cache flush */
485 _exynos_cache_flush(bufmgr_exynos, bo_exynos, need_flush);
487 DBG("[libtbm:%d] \tcache(%d,%d)....flush:0x%x, cntFlush(%d)\n",
489 bo_exynos->cache_state.data.isCached,
490 bo_exynos->cache_state.data.isDirtied,
500 _bo_save_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos)
502 #ifdef ENABLE_CACHECRTL
503 EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
504 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
506 if (bufmgr_exynos->use_dma_fence)
509 unsigned short cntFlush = 0;
511 /* get global cache flush count */
512 cntFlush = (unsigned short)_tgl_get_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY);
514 /* save global cache flush count */
515 bo_exynos->cache_state.data.cntFlush = cntFlush;
516 _tgl_set_data(bufmgr_exynos->tgl_fd, bo_exynos->name,
517 bo_exynos->cache_state.val);
524 _bo_destroy_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos)
526 #ifdef ENABLE_CACHECRTL
527 EXYNOS_RETURN_IF_FAIL(bufmgr_exynos != NULL);
528 EXYNOS_RETURN_IF_FAIL(bo_exynos != NULL);
530 if (bufmgr_exynos->use_dma_fence)
533 _tgl_destroy(bufmgr_exynos->tgl_fd, bo_exynos->name);
538 _bufmgr_init_cache_state(tbm_bufmgr_exynos bufmgr_exynos)
540 #ifdef ENABLE_CACHECRTL
541 EXYNOS_RETURN_IF_FAIL(bufmgr_exynos != NULL);
543 if (bufmgr_exynos->use_dma_fence)
546 /* open tgl fd for saving cache flush data */
547 bufmgr_exynos->tgl_fd = open(tgl_devfile, O_RDWR);
549 if (bufmgr_exynos->tgl_fd < 0) {
550 bufmgr_exynos->tgl_fd = open(tgl_devfile1, O_RDWR);
551 if (bufmgr_exynos->tgl_fd < 0) {
552 TBM_EXYNOS_LOG("[libtbm-exynos:%d] "
553 "error: Fail to open global_lock:%s\n",
554 getpid(), tgl_devfile);
556 close(bufmgr_exynos->tgl_fd);
561 if (!_tgl_init(bufmgr_exynos->tgl_fd, GLOBAL_KEY)) {
562 TBM_EXYNOS_LOG("[libtbm-exynos:%d] "
563 "error: Fail to initialize the tgl\n",
566 close(bufmgr_exynos->tgl_fd);
575 _bufmgr_deinit_cache_state(tbm_bufmgr_exynos bufmgr_exynos)
577 #ifdef ENABLE_CACHECRTL
578 EXYNOS_RETURN_IF_FAIL(bufmgr_exynos != NULL);
580 if (bufmgr_exynos->use_dma_fence)
583 if (bufmgr_exynos->tgl_fd >= 0)
584 close(bufmgr_exynos->tgl_fd);
589 _tbm_exynos_open_drm()
593 fd = drmOpen(EXYNOS_DRM_NAME, NULL);
595 TBM_EXYNOS_LOG ("[libtbm-exynos:%d] "
596 "warning %s:%d fail to open drm\n",
597 getpid(), __FUNCTION__, __LINE__);
601 struct udev *udev = NULL;
602 struct udev_enumerate *e = NULL;
603 struct udev_list_entry *entry = NULL;
604 struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
605 const char *filepath;
610 TBM_EXYNOS_LOG ("[libtbm-exynos:%d] "
611 "%s:%d search drm-device by udev\n",
612 getpid(), __FUNCTION__, __LINE__);
616 TBM_EXYNOS_LOG("udev_new() failed.\n");
620 e = udev_enumerate_new(udev);
621 udev_enumerate_add_match_subsystem(e, "drm");
622 udev_enumerate_add_match_sysname(e, "card[0-9]*");
623 udev_enumerate_scan_devices(e);
625 udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
626 device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
627 udev_list_entry_get_name(entry));
628 device_parent = udev_device_get_parent(device);
629 /* Not need unref device_parent. device_parent and device have same refcnt */
631 if (strcmp(udev_device_get_sysname(device_parent), "exynos-drm") == 0) {
633 DBG("[%s] Found render device: '%s' (%s)\n",
635 udev_device_get_syspath(drm_device),
636 udev_device_get_sysname(device_parent));
640 udev_device_unref(device);
643 udev_enumerate_unref(e);
645 /* Get device file path. */
646 filepath = udev_device_get_devnode(drm_device);
648 TBM_EXYNOS_LOG("udev_device_get_devnode() failed.\n");
649 udev_device_unref(drm_device);
654 /* Open DRM device file and check validity. */
655 fd = open(filepath, O_RDWR | O_CLOEXEC);
657 TBM_EXYNOS_LOG("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
658 udev_device_unref(drm_device);
665 TBM_EXYNOS_LOG("fstat() failed %s.\n");
666 udev_device_unref(drm_device);
671 udev_device_unref(drm_device);
679 _check_render_node(void)
681 struct udev *udev = NULL;
682 struct udev_enumerate *e = NULL;
683 struct udev_list_entry *entry = NULL;
684 struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
688 TBM_EXYNOS_LOG("udev_new() failed.\n");
692 e = udev_enumerate_new(udev);
693 udev_enumerate_add_match_subsystem(e, "drm");
694 udev_enumerate_add_match_sysname(e, "renderD[0-9]*");
695 udev_enumerate_scan_devices(e);
697 udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
698 device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
699 udev_list_entry_get_name(entry));
700 device_parent = udev_device_get_parent(device);
701 /* Not need unref device_parent. device_parent and device have same refcnt */
703 if (strcmp(udev_device_get_sysname(device_parent), "exynos-drm") == 0) {
705 DBG("[%s] Found render device: '%s' (%s)\n",
707 udev_device_get_syspath(drm_device),
708 udev_device_get_sysname(device_parent));
712 udev_device_unref(device);
715 udev_enumerate_unref(e);
719 udev_device_unref(drm_device);
723 udev_device_unref(drm_device);
728 _get_render_node(void)
730 struct udev *udev = NULL;
731 struct udev_enumerate *e = NULL;
732 struct udev_list_entry *entry = NULL;
733 struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
734 const char *filepath;
741 TBM_EXYNOS_LOG("udev_new() failed.\n");
745 e = udev_enumerate_new(udev);
746 udev_enumerate_add_match_subsystem(e, "drm");
747 udev_enumerate_add_match_sysname(e, "renderD[0-9]*");
748 udev_enumerate_scan_devices(e);
750 udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
751 device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
752 udev_list_entry_get_name(entry));
753 device_parent = udev_device_get_parent(device);
754 /* Not need unref device_parent. device_parent and device have same refcnt */
756 if (strcmp(udev_device_get_sysname(device_parent), "exynos-drm") == 0) {
758 DBG("[%s] Found render device: '%s' (%s)\n",
760 udev_device_get_syspath(drm_device),
761 udev_device_get_sysname(device_parent));
765 udev_device_unref(device);
768 udev_enumerate_unref(e);
770 /* Get device file path. */
771 filepath = udev_device_get_devnode(drm_device);
773 TBM_EXYNOS_LOG("udev_device_get_devnode() failed.\n");
774 udev_device_unref(drm_device);
779 /* Open DRM device file and check validity. */
780 fd = open(filepath, O_RDWR | O_CLOEXEC);
782 TBM_EXYNOS_LOG("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
783 udev_device_unref(drm_device);
790 TBM_EXYNOS_LOG("fstat() failed %s.\n");
791 udev_device_unref(drm_device);
797 udev_device_unref(drm_device);
804 _get_exynos_flag_from_tbm(unsigned int ftbm)
806 unsigned int flags = 0;
808 if (ftbm & TBM_BO_SCANOUT)
809 flags |= EXYNOS_BO_CONTIG;
811 flags |= EXYNOS_BO_NONCONTIG;
813 if (ftbm & TBM_BO_WC)
814 flags |= EXYNOS_BO_WC;
815 else if (ftbm & TBM_BO_NONCACHABLE)
816 flags |= EXYNOS_BO_NONCACHABLE;
818 flags |= EXYNOS_BO_CACHABLE;
824 _get_tbm_flag_from_exynos(unsigned int fexynos)
826 unsigned int flags = 0;
828 if (fexynos & EXYNOS_BO_NONCONTIG)
829 flags |= TBM_BO_DEFAULT;
831 flags |= TBM_BO_SCANOUT;
833 if (fexynos & EXYNOS_BO_WC)
835 else if (fexynos & EXYNOS_BO_CACHABLE)
836 flags |= TBM_BO_DEFAULT;
838 flags |= TBM_BO_NONCACHABLE;
844 _get_name(int fd, unsigned int gem)
846 struct drm_gem_flink arg = {0,};
849 if (drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &arg)) {
850 TBM_EXYNOS_LOG("error fail to get flink from gem:%d (DRM_IOCTL_GEM_FLINK)\n",
855 return (unsigned int)arg.name;
859 _exynos_bo_handle(tbm_bo_exynos bo_exynos, int device)
861 tbm_bo_handle bo_handle;
863 memset(&bo_handle, 0x0, sizeof(uint64_t));
866 case TBM_DEVICE_DEFAULT:
868 bo_handle.u32 = (uint32_t)bo_exynos->gem;
871 if (!bo_exynos->pBase) {
872 struct drm_exynos_gem_map arg = {0,};
875 arg.handle = bo_exynos->gem;
876 if (drmCommandWriteRead(bo_exynos->fd, DRM_EXYNOS_GEM_MAP, &arg,
878 TBM_EXYNOS_LOG("error Cannot map_dumb gem=%d\n", bo_exynos->gem);
879 return (tbm_bo_handle) NULL;
882 map = mmap(NULL, bo_exynos->size, PROT_READ | PROT_WRITE, MAP_SHARED,
883 bo_exynos->fd, arg.offset);
884 if (map == MAP_FAILED) {
885 TBM_EXYNOS_LOG("error Cannot usrptr gem=%d\n", bo_exynos->gem);
886 return (tbm_bo_handle) NULL;
888 bo_exynos->pBase = map;
890 bo_handle.ptr = (void *)bo_exynos->pBase;
894 if (bo_exynos->dmabuf) {
895 bo_handle.u32 = (uint32_t)bo_exynos->dmabuf;
899 if (!bo_exynos->dmabuf) {
900 struct drm_prime_handle arg = {0, };
902 arg.handle = bo_exynos->gem;
903 if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
904 TBM_EXYNOS_LOG("error Cannot dmabuf=%d\n", bo_exynos->gem);
905 return (tbm_bo_handle) NULL;
907 bo_exynos->dmabuf = arg.fd;
910 bo_handle.u32 = (uint32_t)bo_exynos->dmabuf;
914 if (!bo_exynos->dmabuf) {
915 struct drm_prime_handle arg = {0, };
917 arg.handle = bo_exynos->gem;
918 if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
919 TBM_EXYNOS_LOG("error Cannot dmabuf=%d\n", bo_exynos->gem);
920 return (tbm_bo_handle) NULL;
922 bo_exynos->dmabuf = arg.fd;
925 bo_handle.u32 = (uint32_t)bo_exynos->dmabuf;
928 TBM_EXYNOS_LOG("error Not supported device:%d\n", device);
929 bo_handle.ptr = (void *) NULL;
937 tbm_exynos_bo_size(tbm_bo bo)
939 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
941 tbm_bo_exynos bo_exynos;
943 bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
944 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
946 return bo_exynos->size;
950 tbm_exynos_bo_alloc(tbm_bo bo, int size, int flags)
952 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
954 tbm_bo_exynos bo_exynos;
955 tbm_bufmgr_exynos bufmgr_exynos;
956 unsigned int exynos_flags;
958 bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
959 EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
961 bo_exynos = calloc(1, sizeof(struct _tbm_bo_exynos));
963 TBM_EXYNOS_LOG("error fail to allocate the bo private\n");
967 exynos_flags = _get_exynos_flag_from_tbm(flags);
968 if ((flags & TBM_BO_SCANOUT) &&
970 exynos_flags |= EXYNOS_BO_NONCONTIG;
973 struct drm_exynos_gem_create arg = {0, };
975 arg.size = (uint64_t)size;
976 arg.flags = exynos_flags;
977 if (drmCommandWriteRead(bufmgr_exynos->fd, DRM_EXYNOS_GEM_CREATE, &arg,
979 TBM_EXYNOS_LOG("error Cannot create bo(flag:%x, size:%d)\n", arg.flags,
980 (unsigned int)arg.size);
985 bo_exynos->fd = bufmgr_exynos->fd;
986 bo_exynos->gem = arg.handle;
987 bo_exynos->size = size;
988 bo_exynos->flags_tbm = flags;
989 bo_exynos->flags_exynos = exynos_flags;
990 bo_exynos->name = _get_name(bo_exynos->fd, bo_exynos->gem);
992 if (!_bo_init_cache_state(bufmgr_exynos, bo_exynos, 0)) {
993 TBM_EXYNOS_LOG("error fail init cache state(%d)\n", bo_exynos->name);
998 pthread_mutex_init(&bo_exynos->mutex, NULL);
1000 if (bufmgr_exynos->use_dma_fence
1001 && !bo_exynos->dmabuf) {
1002 struct drm_prime_handle arg = {0, };
1004 arg.handle = bo_exynos->gem;
1005 if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
1006 TBM_EXYNOS_LOG("error Cannot dmabuf=%d\n", bo_exynos->gem);
1010 bo_exynos->dmabuf = arg.fd;
1013 /* add bo to hash */
1014 PrivGem *privGem = calloc(1, sizeof(PrivGem));
1017 TBM_EXYNOS_LOG("[libtbm-exynos:%d] "
1018 "error %s:%d Fail to calloc privGem\n",
1019 getpid(), __func__, __LINE__);
1024 privGem->ref_count = 1;
1025 privGem->bo_priv = bo_exynos;
1027 if (drmHashInsert(bufmgr_exynos->hashBos, bo_exynos->name,
1028 (void *)privGem) < 0) {
1029 TBM_EXYNOS_LOG("error Cannot insert bo to Hash(%d)\n", bo_exynos->name);
1032 DBG(" [%s] bo:%p, gem:%d(%d), flags:%d(%d), size:%d\n", target_name(),
1034 bo_exynos->gem, bo_exynos->name,
1035 flags, exynos_flags,
1038 return (void *)bo_exynos;
1042 tbm_exynos_bo_free(tbm_bo bo)
1044 tbm_bo_exynos bo_exynos;
1045 tbm_bufmgr_exynos bufmgr_exynos;
1050 bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1051 EXYNOS_RETURN_IF_FAIL(bufmgr_exynos != NULL);
1053 bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1054 EXYNOS_RETURN_IF_FAIL(bo_exynos != NULL);
1056 DBG(" [%s] bo:%p, gem:%d(%d), fd:%d, size:%d\n", target_name(),
1058 bo_exynos->gem, bo_exynos->name,
1062 if (bo_exynos->pBase) {
1063 if (munmap(bo_exynos->pBase, bo_exynos->size) == -1) {
1064 TBM_EXYNOS_LOG("error bo:%p fail to munmap(%s)\n",
1065 bo, strerror(errno));
1070 if (bo_exynos->dmabuf) {
1071 close(bo_exynos->dmabuf);
1072 bo_exynos->dmabuf = 0;
1075 /* delete bo from hash */
1076 PrivGem *privGem = NULL;
1079 ret = drmHashLookup(bufmgr_exynos->hashBos, bo_exynos->name,
1082 privGem->ref_count--;
1083 if (privGem->ref_count == 0) {
1084 drmHashDelete(bufmgr_exynos->hashBos, bo_exynos->name);
1089 TBM_EXYNOS_LOG("warning Cannot find bo to Hash(%d), ret=%d\n", bo_exynos->name,
1093 _bo_destroy_cache_state(bufmgr_exynos, bo_exynos);
1095 /* Free gem handle */
1096 struct drm_gem_close arg = {0, };
1098 memset(&arg, 0, sizeof(arg));
1099 arg.handle = bo_exynos->gem;
1100 if (drmIoctl(bo_exynos->fd, DRM_IOCTL_GEM_CLOSE, &arg)) {
1101 TBM_EXYNOS_LOG("error bo:%p fail to gem close.(%s)\n",
1102 bo, strerror(errno));
1110 tbm_exynos_bo_import(tbm_bo bo, unsigned int key)
1112 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1114 tbm_bufmgr_exynos bufmgr_exynos;
1115 tbm_bo_exynos bo_exynos;
1116 PrivGem *privGem = NULL;
1119 bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1120 EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
1122 ret = drmHashLookup(bufmgr_exynos->hashBos, key, (void **)&privGem);
1124 return privGem->bo_priv;
1127 struct drm_gem_open arg = {0, };
1128 struct drm_exynos_gem_info info = {0, };
1131 if (drmIoctl(bufmgr_exynos->fd, DRM_IOCTL_GEM_OPEN, &arg)) {
1132 TBM_EXYNOS_LOG("error Cannot open gem name=%d\n", key);
1136 info.handle = arg.handle;
1137 if (drmCommandWriteRead(bufmgr_exynos->fd,
1140 sizeof(struct drm_exynos_gem_info))) {
1141 TBM_EXYNOS_LOG("error Cannot get gem info=%d\n", key);
1145 bo_exynos = calloc(1, sizeof(struct _tbm_bo_exynos));
1147 TBM_EXYNOS_LOG("error fail to allocate the bo private\n");
1151 bo_exynos->fd = bufmgr_exynos->fd;
1152 bo_exynos->gem = arg.handle;
1153 bo_exynos->size = arg.size;
1154 bo_exynos->flags_exynos = info.flags;
1155 bo_exynos->name = key;
1156 bo_exynos->flags_tbm = _get_tbm_flag_from_exynos(bo_exynos->flags_exynos);
1158 if (!_bo_init_cache_state(bufmgr_exynos, bo_exynos, 1)) {
1159 TBM_EXYNOS_LOG("error fail init cache state(%d)\n", bo_exynos->name);
1164 if (!bo_exynos->dmabuf) {
1165 struct drm_prime_handle arg = {0, };
1167 arg.handle = bo_exynos->gem;
1168 if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
1169 TBM_EXYNOS_LOG("error Cannot dmabuf=%d\n", bo_exynos->gem);
1173 bo_exynos->dmabuf = arg.fd;
1176 /* add bo to hash */
1179 privGem = calloc(1, sizeof(PrivGem));
1181 TBM_EXYNOS_LOG("[libtbm-exynos:%d] "
1182 "error %s:%d Fail to calloc privGem\n",
1183 getpid(), __func__, __LINE__);
1188 privGem->ref_count = 1;
1189 privGem->bo_priv = bo_exynos;
1191 if (drmHashInsert(bufmgr_exynos->hashBos, bo_exynos->name,
1192 (void *)privGem) < 0) {
1193 TBM_EXYNOS_LOG("error Cannot insert bo to Hash(%d)\n", bo_exynos->name);
1196 DBG(" [%s] bo:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d\n",
1199 bo_exynos->gem, bo_exynos->name,
1201 bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1204 return (void *)bo_exynos;
1208 tbm_exynos_bo_import_fd(tbm_bo bo, tbm_fd key)
1210 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1212 tbm_bufmgr_exynos bufmgr_exynos;
1213 tbm_bo_exynos bo_exynos;
1214 PrivGem *privGem = NULL;
1218 bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1219 EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
1221 /*getting handle from fd*/
1222 unsigned int gem = 0;
1223 struct drm_prime_handle arg = {0, };
1227 if (drmIoctl(bufmgr_exynos->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &arg)) {
1228 TBM_EXYNOS_LOG("error bo:%p Cannot get gem handle from fd:%d (%s)\n",
1229 bo, arg.fd, strerror(errno));
1234 name = _get_name(bufmgr_exynos->fd, gem);
1236 TBM_EXYNOS_LOG("error bo:%p Cannot get name from gem:%d, fd:%d (%s)\n",
1237 bo, gem, key, strerror(errno));
1241 ret = drmHashLookup(bufmgr_exynos->hashBos, name, (void **)&privGem);
1243 if (gem == privGem->bo_priv->gem) {
1244 return privGem->bo_priv;
1248 unsigned int real_size = -1;
1249 struct drm_exynos_gem_info info = {0, };
1251 /* Determine size of bo. The fd-to-handle ioctl really should
1252 * return the size, but it doesn't. If we have kernel 3.12 or
1253 * later, we can lseek on the prime fd to get the size. Older
1254 * kernels will just fail, in which case we fall back to the
1255 * provided (estimated or guess size).
1257 real_size = lseek(key, 0, SEEK_END);
1260 if (drmCommandWriteRead(bufmgr_exynos->fd,
1263 sizeof(struct drm_exynos_gem_info))) {
1264 TBM_EXYNOS_LOG("error bo:%p Cannot get gem info from gem:%d, fd:%d (%s)\n",
1265 bo, gem, key, strerror(errno));
1269 if (real_size == -1)
1270 real_size = info.size;
1272 bo_exynos = calloc(1, sizeof(struct _tbm_bo_exynos));
1274 TBM_EXYNOS_LOG("error bo:%p fail to allocate the bo private\n", bo);
1278 bo_exynos->fd = bufmgr_exynos->fd;
1279 bo_exynos->gem = gem;
1280 bo_exynos->size = real_size;
1281 bo_exynos->flags_exynos = info.flags;
1282 bo_exynos->flags_tbm = _get_tbm_flag_from_exynos(bo_exynos->flags_exynos);
1283 bo_exynos->name = name;
1285 if (!_bo_init_cache_state(bufmgr_exynos, bo_exynos, 1)) {
1286 TBM_EXYNOS_LOG("error fail init cache state(%d)\n", bo_exynos->name);
1291 /* add bo to hash */
1294 privGem = calloc(1, sizeof(PrivGem));
1296 TBM_EXYNOS_LOG("[libtbm-exynos:%d] "
1297 "error %s:%d Fail to calloc privGem\n",
1298 getpid(), __func__, __LINE__);
1303 privGem->ref_count = 1;
1304 privGem->bo_priv = bo_exynos;
1306 if (drmHashInsert(bufmgr_exynos->hashBos, bo_exynos->name,
1307 (void *)privGem) < 0) {
1308 TBM_EXYNOS_LOG("error bo:%p Cannot insert bo to Hash(%d) from gem:%d, fd:%d\n",
1309 bo, bo_exynos->name, gem, key);
1312 DBG(" [%s] bo:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d(%d), size:%d\n",
1315 bo_exynos->gem, bo_exynos->name,
1318 bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1321 return (void *)bo_exynos;
1325 tbm_exynos_bo_export(tbm_bo bo)
1327 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1329 tbm_bo_exynos bo_exynos;
1331 bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1332 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
1334 if (!bo_exynos->name) {
1335 bo_exynos->name = _get_name(bo_exynos->fd, bo_exynos->gem);
1336 if (!bo_exynos->name) {
1337 TBM_EXYNOS_LOG("error Cannot get name\n");
1342 DBG(" [%s] bo:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d\n",
1345 bo_exynos->gem, bo_exynos->name,
1347 bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1350 return (unsigned int)bo_exynos->name;
1354 tbm_exynos_bo_export_fd(tbm_bo bo)
1356 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, -1);
1358 tbm_bo_exynos bo_exynos;
1361 bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1362 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, -1);
1364 struct drm_prime_handle arg = {0, };
1366 arg.handle = bo_exynos->gem;
1367 ret = drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg);
1369 TBM_EXYNOS_LOG("error bo:%p Cannot dmabuf=%d (%s)\n",
1370 bo, bo_exynos->gem, strerror(errno));
1371 return (tbm_fd) ret;
1374 DBG(" [%s] bo:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d(%d), size:%d\n",
1377 bo_exynos->gem, bo_exynos->name,
1380 bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1383 return (tbm_fd)arg.fd;
1386 static tbm_bo_handle
1387 tbm_exynos_bo_get_handle(tbm_bo bo, int device)
1389 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, (tbm_bo_handle) NULL);
1391 tbm_bo_handle bo_handle;
1392 tbm_bo_exynos bo_exynos;
1394 bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1395 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, (tbm_bo_handle) NULL);
1397 if (!bo_exynos->gem) {
1398 TBM_EXYNOS_LOG("error Cannot map gem=%d\n", bo_exynos->gem);
1399 return (tbm_bo_handle) NULL;
1402 DBG("[%s] bo:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d, %s\n",
1405 bo_exynos->gem, bo_exynos->name,
1407 bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1409 STR_DEVICE[device]);
1411 /*Get mapped bo_handle*/
1412 bo_handle = _exynos_bo_handle(bo_exynos, device);
1413 if (bo_handle.ptr == NULL) {
1414 TBM_EXYNOS_LOG("error Cannot get handle: gem:%d, device:%d\n", bo_exynos->gem,
1416 return (tbm_bo_handle) NULL;
1422 static tbm_bo_handle
1423 tbm_exynos_bo_map(tbm_bo bo, int device, int opt)
1425 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, (tbm_bo_handle) NULL);
1427 tbm_bo_handle bo_handle;
1428 tbm_bo_exynos bo_exynos;
1429 tbm_bufmgr_exynos bufmgr_exynos;
1431 bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1432 EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, (tbm_bo_handle)NULL);
1434 bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1435 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, (tbm_bo_handle) NULL);
1437 if (!bo_exynos->gem) {
1438 TBM_EXYNOS_LOG("error Cannot map gem=%d\n", bo_exynos->gem);
1439 return (tbm_bo_handle) NULL;
1442 DBG(" [%s] bo:%p, gem:%d(%d), fd:%d, %s, %s\n", target_name(),
1444 bo_exynos->gem, bo_exynos->name,
1449 /*Get mapped bo_handle*/
1450 bo_handle = _exynos_bo_handle(bo_exynos, device);
1451 if (bo_handle.ptr == NULL) {
1452 TBM_EXYNOS_LOG("error Cannot get handle: gem:%d, device:%d, opt:%d\n",
1453 bo_exynos->gem, device, opt);
1454 return (tbm_bo_handle) NULL;
1457 if (bo_exynos->map_cnt == 0)
1458 _bo_set_cache_state(bufmgr_exynos, bo_exynos, device, opt);
1460 bo_exynos->map_cnt++;
1466 tbm_exynos_bo_unmap(tbm_bo bo)
1468 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1470 tbm_bo_exynos bo_exynos;
1471 tbm_bufmgr_exynos bufmgr_exynos;
1473 bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1474 EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
1476 bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1477 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
1479 if (!bo_exynos->gem)
1482 bo_exynos->map_cnt--;
1484 if (bo_exynos->map_cnt == 0)
1485 _bo_save_cache_state(bufmgr_exynos, bo_exynos);
1487 DBG(" [%s] bo:%p, gem:%d(%d), fd:%d\n", target_name(),
1489 bo_exynos->gem, bo_exynos->name,
1496 tbm_exynos_bo_lock(tbm_bo bo, int device, int opt)
1498 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1500 #ifndef ALWAYS_BACKEND_CTRL
1501 tbm_bufmgr_exynos bufmgr_exynos;
1502 tbm_bo_exynos bo_exynos;
1503 struct dma_buf_fence fence;
1504 struct flock filelock;
1507 if (device != TBM_DEVICE_3D && device != TBM_DEVICE_CPU) {
1508 DBG("[libtbm-exynos:%d] %s not support device type,\n", getpid(),
1513 bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1514 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
1516 bufmgr_exynos = (tbm_bufmgr_exynos)tbm_backend_get_bufmgr_priv(bo);
1517 EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
1519 memset(&fence, 0, sizeof(struct dma_buf_fence));
1521 /* Check if the given type is valid or not. */
1522 if (opt & TBM_OPTION_WRITE) {
1523 if (device == TBM_DEVICE_3D)
1524 fence.type = DMA_BUF_ACCESS_WRITE | DMA_BUF_ACCESS_DMA;
1525 } else if (opt & TBM_OPTION_READ) {
1526 if (device == TBM_DEVICE_3D)
1527 fence.type = DMA_BUF_ACCESS_READ | DMA_BUF_ACCESS_DMA;
1529 TBM_EXYNOS_LOG("error Invalid argument\n");
1533 /* Check if the tbm manager supports dma fence or not. */
1534 if (!bufmgr_exynos->use_dma_fence) {
1535 TBM_EXYNOS_LOG("error Not support DMA FENCE(%s)\n", strerror(errno));
1540 if (device == TBM_DEVICE_3D) {
1541 ret = ioctl(bo_exynos->dmabuf, DMABUF_IOCTL_GET_FENCE, &fence);
1543 TBM_EXYNOS_LOG("error Cannot set GET FENCE(%s)\n", strerror(errno));
1547 if (opt & TBM_OPTION_WRITE)
1548 filelock.l_type = F_WRLCK;
1550 filelock.l_type = F_RDLCK;
1552 filelock.l_whence = SEEK_CUR;
1553 filelock.l_start = 0;
1556 if (-1 == fcntl(bo_exynos->dmabuf, F_SETLKW, &filelock))
1560 pthread_mutex_lock(&bo_exynos->mutex);
1562 if (device == TBM_DEVICE_3D) {
1565 for (i = 0; i < DMA_FENCE_LIST_MAX; i++) {
1566 if (bo_exynos->dma_fence[i].ctx == 0) {
1567 bo_exynos->dma_fence[i].type = fence.type;
1568 bo_exynos->dma_fence[i].ctx = fence.ctx;
1573 if (i == DMA_FENCE_LIST_MAX) {
1574 /*TODO: if dma_fence list is full, it needs realloc. I will fix this. by minseok3.kim*/
1575 TBM_EXYNOS_LOG("error fence list is full\n");
1579 pthread_mutex_unlock(&bo_exynos->mutex);
1581 DBG("[%s] DMABUF_IOCTL_GET_FENCE! bo:%p, gem:%d(%d), fd:%ds\n", target_name(),
1583 bo_exynos->gem, bo_exynos->name,
1585 #endif /* ALWAYS_BACKEND_CTRL */
1591 tbm_exynos_bo_unlock(tbm_bo bo)
1593 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1595 #ifndef ALWAYS_BACKEND_CTRL
1596 tbm_bo_exynos bo_exynos;
1597 struct dma_buf_fence fence;
1598 struct flock filelock;
1599 unsigned int dma_type = 0;
1602 bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
1603 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
1605 if (bo_exynos->dma_fence[0].type & DMA_BUF_ACCESS_DMA)
1608 if (!bo_exynos->dma_fence[0].ctx && dma_type) {
1609 DBG("[libtbm-exynos:%d] %s FENCE not support or ignored,\n", getpid(),
1614 if (!bo_exynos->dma_fence[0].ctx && dma_type) {
1615 DBG("[libtbm-exynos:%d] %s device type is not 3D/CPU,\n", getpid(),
1620 pthread_mutex_lock(&bo_exynos->mutex);
1623 fence.type = bo_exynos->dma_fence[0].type;
1624 fence.ctx = bo_exynos->dma_fence[0].ctx;
1627 for (i = 1; i < DMA_FENCE_LIST_MAX; i++) {
1628 bo_exynos->dma_fence[i - 1].type = bo_exynos->dma_fence[i].type;
1629 bo_exynos->dma_fence[i - 1].ctx = bo_exynos->dma_fence[i].ctx;
1631 bo_exynos->dma_fence[DMA_FENCE_LIST_MAX - 1].type = 0;
1632 bo_exynos->dma_fence[DMA_FENCE_LIST_MAX - 1].ctx = 0;
1634 pthread_mutex_unlock(&bo_exynos->mutex);
1637 ret = ioctl(bo_exynos->dmabuf, DMABUF_IOCTL_PUT_FENCE, &fence);
1639 TBM_EXYNOS_LOG("error Can not set PUT FENCE(%s)\n", strerror(errno));
1643 filelock.l_type = F_UNLCK;
1644 filelock.l_whence = SEEK_CUR;
1645 filelock.l_start = 0;
1648 if (-1 == fcntl(bo_exynos->dmabuf, F_SETLKW, &filelock))
1652 DBG("[%s] DMABUF_IOCTL_PUT_FENCE! bo:%p, gem:%d(%d), fd:%ds\n", target_name(),
1654 bo_exynos->gem, bo_exynos->name,
1656 #endif /* ALWAYS_BACKEND_CTRL */
1662 tbm_exynos_bufmgr_deinit(void *priv)
1664 EXYNOS_RETURN_IF_FAIL(priv != NULL);
1666 tbm_bufmgr_exynos bufmgr_exynos;
1668 bufmgr_exynos = (tbm_bufmgr_exynos)priv;
1670 if (bufmgr_exynos->hashBos) {
1674 while (drmHashFirst(bufmgr_exynos->hashBos, &key, &value) > 0) {
1676 drmHashDelete(bufmgr_exynos->hashBos, key);
1679 drmHashDestroy(bufmgr_exynos->hashBos);
1680 bufmgr_exynos->hashBos = NULL;
1683 _bufmgr_deinit_cache_state(bufmgr_exynos);
1685 if (bufmgr_exynos->bind_display)
1686 tbm_drm_helper_wl_auth_server_deinit();
1688 if (bufmgr_exynos->device_name)
1689 free(bufmgr_exynos->device_name);
1691 if (tbm_backend_is_display_server())
1692 tbm_drm_helper_unset_tbm_master_fd();
1694 close(bufmgr_exynos->fd);
1696 free(bufmgr_exynos);
1700 tbm_exynos_surface_supported_format(uint32_t **formats, uint32_t *num)
1702 uint32_t *color_formats = NULL;
1704 color_formats = (uint32_t *)calloc(1,
1705 sizeof(uint32_t) * TBM_COLOR_FORMAT_COUNT);
1707 if (color_formats == NULL)
1710 memcpy(color_formats, tbm_exynos_color_format_list,
1711 sizeof(uint32_t)*TBM_COLOR_FORMAT_COUNT);
1713 *formats = color_formats;
1714 *num = TBM_COLOR_FORMAT_COUNT;
1716 fprintf(stderr, "tbm_exynos_surface_supported_format count = %d\n", *num);
1722 _new_calc_plane_nv12(int width, int height)
1726 mbX = DIV_ROUND_UP(width, S5P_FIMV_NUM_PIXELS_IN_MB_ROW);
1727 mbY = DIV_ROUND_UP(height, S5P_FIMV_NUM_PIXELS_IN_MB_COL);
1729 if (width * height < S5P_FIMV_MAX_FRAME_SIZE)
1730 mbY = (mbY + 1) / 2 * 2;
1732 return ((mbX * S5P_FIMV_NUM_PIXELS_IN_MB_COL) * (mbY *
1733 S5P_FIMV_NUM_PIXELS_IN_MB_ROW));
1737 _calc_yplane_nv12(int width, int height)
1741 mbX = SIZE_ALIGN(width + 24, S5P_FIMV_NV12MT_HALIGN);
1742 mbY = SIZE_ALIGN(height + 16, S5P_FIMV_NV12MT_VALIGN);
1744 return SIZE_ALIGN(mbX * mbY, S5P_FIMV_DEC_BUF_ALIGN);
1748 _calc_uvplane_nv12(int width, int height)
1752 mbX = SIZE_ALIGN(width + 16, S5P_FIMV_NV12MT_HALIGN);
1753 mbY = SIZE_ALIGN(height + 4, S5P_FIMV_NV12MT_VALIGN);
1755 return SIZE_ALIGN((mbX * mbY) >> 1, S5P_FIMV_DEC_BUF_ALIGN);
1759 _new_calc_yplane_nv12(int width, int height)
1761 return SIZE_ALIGN(_new_calc_plane_nv12(width,
1762 height) + S5P_FIMV_D_ALIGN_PLANE_SIZE,
1763 TBM_SURFACE_ALIGNMENT_PLANE_NV12);
1767 _new_calc_uvplane_nv12(int width, int height)
1769 return SIZE_ALIGN((_new_calc_plane_nv12(width,
1770 height) >> 1) + S5P_FIMV_D_ALIGN_PLANE_SIZE,
1771 TBM_SURFACE_ALIGNMENT_PLANE_NV12);
1775 * @brief get the plane data of the surface.
1776 * @param[in] width : the width of the surface
1777 * @param[in] height : the height of the surface
1778 * @param[in] format : the format of the surface
1779 * @param[in] plane_idx : the format of the surface
1780 * @param[out] size : the size of the plane
1781 * @param[out] offset : the offset of the plane
1782 * @param[out] pitch : the pitch of the plane
1783 * @param[out] padding : the padding of the plane
1784 * @return 1 if this function succeeds, otherwise 0.
1787 tbm_exynos_surface_get_plane_data(int width, int height,
1788 tbm_format format, int plane_idx, uint32_t *size, uint32_t *offset,
1789 uint32_t *pitch, int *bo_idx)
1800 case TBM_FORMAT_XRGB4444:
1801 case TBM_FORMAT_XBGR4444:
1802 case TBM_FORMAT_RGBX4444:
1803 case TBM_FORMAT_BGRX4444:
1804 case TBM_FORMAT_ARGB4444:
1805 case TBM_FORMAT_ABGR4444:
1806 case TBM_FORMAT_RGBA4444:
1807 case TBM_FORMAT_BGRA4444:
1808 case TBM_FORMAT_XRGB1555:
1809 case TBM_FORMAT_XBGR1555:
1810 case TBM_FORMAT_RGBX5551:
1811 case TBM_FORMAT_BGRX5551:
1812 case TBM_FORMAT_ARGB1555:
1813 case TBM_FORMAT_ABGR1555:
1814 case TBM_FORMAT_RGBA5551:
1815 case TBM_FORMAT_BGRA5551:
1816 case TBM_FORMAT_RGB565:
1819 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
1820 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1824 case TBM_FORMAT_RGB888:
1825 case TBM_FORMAT_BGR888:
1828 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
1829 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1833 case TBM_FORMAT_XRGB8888:
1834 case TBM_FORMAT_XBGR8888:
1835 case TBM_FORMAT_RGBX8888:
1836 case TBM_FORMAT_BGRX8888:
1837 case TBM_FORMAT_ARGB8888:
1838 case TBM_FORMAT_ABGR8888:
1839 case TBM_FORMAT_RGBA8888:
1840 case TBM_FORMAT_BGRA8888:
1843 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
1844 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1849 case TBM_FORMAT_YUYV:
1850 case TBM_FORMAT_YVYU:
1851 case TBM_FORMAT_UYVY:
1852 case TBM_FORMAT_VYUY:
1853 case TBM_FORMAT_AYUV:
1856 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1857 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1863 * index 0 = Y plane, [7:0] Y
1864 * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
1866 * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
1868 case TBM_FORMAT_NV12:
1870 if (plane_idx == 0) {
1872 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1873 _size = MAX(_calc_yplane_nv12(width, height), _new_calc_yplane_nv12(width,
1876 } else if (plane_idx == 1) {
1878 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1879 _size = MAX(_calc_uvplane_nv12(width, height), _new_calc_uvplane_nv12(width,
1884 case TBM_FORMAT_NV21:
1886 if (plane_idx == 0) {
1888 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1889 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1891 } else if (plane_idx == 1) {
1892 _offset = width * height;
1893 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1894 _size = SIZE_ALIGN(_pitch * (height / 2), TBM_SURFACE_ALIGNMENT_PLANE);
1899 case TBM_FORMAT_NV16:
1900 case TBM_FORMAT_NV61:
1902 /*if(plane_idx == 0)*/
1905 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1906 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1911 /*else if( plane_idx ==1 )*/
1914 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1915 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1922 * index 0: Y plane, [7:0] Y
1923 * index 1: Cb plane, [7:0] Cb
1924 * index 2: Cr plane, [7:0] Cr
1926 * index 1: Cr plane, [7:0] Cr
1927 * index 2: Cb plane, [7:0] Cb
1931 * NATIVE_BUFFER_FORMAT_YV12
1932 * NATIVE_BUFFER_FORMAT_I420
1934 case TBM_FORMAT_YUV410:
1935 case TBM_FORMAT_YVU410:
1939 case TBM_FORMAT_YUV411:
1940 case TBM_FORMAT_YVU411:
1941 case TBM_FORMAT_YUV420:
1942 case TBM_FORMAT_YVU420:
1944 /*if(plane_idx == 0)*/
1947 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1948 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1953 /*else if(plane_idx == 1)*/
1956 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1957 _size = SIZE_ALIGN(_pitch * (height / 2), TBM_SURFACE_ALIGNMENT_PLANE);
1962 /*else if (plane_idx == 2)*/
1965 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1966 _size = SIZE_ALIGN(_pitch * (height / 2), TBM_SURFACE_ALIGNMENT_PLANE);
1970 case TBM_FORMAT_YUV422:
1971 case TBM_FORMAT_YVU422:
1973 /*if(plane_idx == 0)*/
1976 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1977 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1982 /*else if(plane_idx == 1)*/
1985 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1986 _size = SIZE_ALIGN(_pitch * (height), TBM_SURFACE_ALIGNMENT_PLANE);
1991 /*else if (plane_idx == 2)*/
1994 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1995 _size = SIZE_ALIGN(_pitch * (height), TBM_SURFACE_ALIGNMENT_PLANE);
1999 case TBM_FORMAT_YUV444:
2000 case TBM_FORMAT_YVU444:
2002 /*if(plane_idx == 0)*/
2005 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
2006 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
2011 /*else if(plane_idx == 1)*/
2014 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
2015 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
2020 /*else if (plane_idx == 2)*/
2023 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
2024 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
2042 tbm_exynos_bo_get_flags(tbm_bo bo)
2044 EXYNOS_RETURN_VAL_IF_FAIL(bo != NULL, 0);
2046 tbm_bo_exynos bo_exynos;
2048 bo_exynos = (tbm_bo_exynos)tbm_backend_get_bo_priv(bo);
2049 EXYNOS_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
2051 return bo_exynos->flags_tbm;
2055 tbm_exynos_bufmgr_bind_native_display (tbm_bufmgr bufmgr, void *native_display)
2057 tbm_bufmgr_exynos bufmgr_exynos;
2059 bufmgr_exynos = tbm_backend_get_priv_from_bufmgr(bufmgr);
2060 EXYNOS_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
2062 if (!tbm_drm_helper_wl_auth_server_init(native_display, bufmgr_exynos->fd,
2063 bufmgr_exynos->device_name, 0)) {
2064 TBM_EXYNOS_LOG("[libtbm-exynos:%d] error:Fail to tbm_drm_helper_wl_server_init\n");
2068 bufmgr_exynos->bind_display = native_display;
2073 MODULEINITPPROTO(init_tbm_bufmgr_priv);
2075 static TBMModuleVersionInfo ExynosVersRec = {
2081 TBMModuleData tbmModuleData = { &ExynosVersRec, init_tbm_bufmgr_priv};
2084 init_tbm_bufmgr_priv(tbm_bufmgr bufmgr, int fd)
2086 tbm_bufmgr_exynos bufmgr_exynos;
2087 tbm_bufmgr_backend bufmgr_backend;
2092 bufmgr_exynos = calloc(1, sizeof(struct _tbm_bufmgr_exynos));
2093 if (!bufmgr_exynos) {
2094 TBM_EXYNOS_LOG("error: Fail to alloc bufmgr_exynos!\n");
2098 if (tbm_backend_is_display_server()) {
2099 bufmgr_exynos->fd = -1;
2101 bufmgr_exynos->fd = tbm_drm_helper_get_master_fd();
2102 if (bufmgr_exynos->fd < 0) {
2103 bufmgr_exynos->fd = _tbm_exynos_open_drm();
2106 if (bufmgr_exynos->fd < 0) {
2107 TBM_EXYNOS_LOG ("[libtbm-exynos:%d] error: Fail to create drm!\n", getpid());
2108 free (bufmgr_exynos);
2112 tbm_drm_helper_set_tbm_master_fd(bufmgr_exynos->fd);
2114 bufmgr_exynos->device_name = drmGetDeviceNameFromFd(bufmgr_exynos->fd);
2116 if (!bufmgr_exynos->device_name)
2118 TBM_EXYNOS_LOG ("[libtbm-exynos:%d] error: Fail to get device name!\n", getpid());
2120 tbm_drm_helper_unset_tbm_master_fd();
2121 close(bufmgr_exynos->fd);
2122 free (bufmgr_exynos);
2127 if (_check_render_node()) {
2128 bufmgr_exynos->fd = _get_render_node();
2129 if (bufmgr_exynos->fd < 0) {
2130 TBM_EXYNOS_LOG("[%s] get render node failed\n", target_name(), fd);
2131 free (bufmgr_exynos);
2134 DBG("[%s] Use render node:%d\n", target_name(), bufmgr_exynos->fd);
2137 if (!tbm_drm_helper_get_auth_info(&(bufmgr_exynos->fd), &(bufmgr_exynos->device_name), NULL)) {
2138 TBM_EXYNOS_LOG ("[libtbm-exynos:%d] error: Fail to get auth drm info!\n", getpid());
2139 free (bufmgr_exynos);
2145 //Check if the tbm manager supports dma fence or not.
2146 int fp = open("/sys/module/dmabuf_sync/parameters/enabled", O_RDONLY);
2150 length = read(fp, buf, 1);
2152 if (length == 1 && buf[0] == '1')
2153 bufmgr_exynos->use_dma_fence = 1;
2158 if (!_bufmgr_init_cache_state(bufmgr_exynos)) {
2159 TBM_EXYNOS_LOG ("[libtbm-exynos:%d] error: init bufmgr cache state failed!\n", getpid());
2161 if (tbm_backend_is_display_server())
2162 tbm_drm_helper_unset_tbm_master_fd();
2164 close(bufmgr_exynos->fd);
2166 free(bufmgr_exynos);
2170 /*Create Hash Table*/
2171 bufmgr_exynos->hashBos = drmHashCreate();
2173 bufmgr_backend = tbm_backend_alloc();
2174 if (!bufmgr_backend) {
2175 TBM_EXYNOS_LOG("error: Fail to create drm!\n");
2176 if (bufmgr_exynos->hashBos)
2177 drmHashDestroy(bufmgr_exynos->hashBos);
2179 _bufmgr_deinit_cache_state(bufmgr_exynos);
2181 if (tbm_backend_is_display_server())
2182 tbm_drm_helper_unset_tbm_master_fd();
2184 close(bufmgr_exynos->fd);
2186 free(bufmgr_exynos);
2190 bufmgr_backend->priv = (void *)bufmgr_exynos;
2191 bufmgr_backend->bufmgr_deinit = tbm_exynos_bufmgr_deinit;
2192 bufmgr_backend->bo_size = tbm_exynos_bo_size;
2193 bufmgr_backend->bo_alloc = tbm_exynos_bo_alloc;
2194 bufmgr_backend->bo_free = tbm_exynos_bo_free;
2195 bufmgr_backend->bo_import = tbm_exynos_bo_import;
2196 bufmgr_backend->bo_import_fd = tbm_exynos_bo_import_fd;
2197 bufmgr_backend->bo_export = tbm_exynos_bo_export;
2198 bufmgr_backend->bo_export_fd = tbm_exynos_bo_export_fd;
2199 bufmgr_backend->bo_get_handle = tbm_exynos_bo_get_handle;
2200 bufmgr_backend->bo_map = tbm_exynos_bo_map;
2201 bufmgr_backend->bo_unmap = tbm_exynos_bo_unmap;
2202 bufmgr_backend->surface_get_plane_data = tbm_exynos_surface_get_plane_data;
2203 bufmgr_backend->surface_supported_format = tbm_exynos_surface_supported_format;
2204 bufmgr_backend->bo_get_flags = tbm_exynos_bo_get_flags;
2205 bufmgr_backend->bo_lock = tbm_exynos_bo_lock;
2206 bufmgr_backend->bo_unlock = tbm_exynos_bo_unlock;
2208 if (tbm_backend_is_display_server() && !_check_render_node()) {
2209 bufmgr_backend->bufmgr_bind_native_display = tbm_exynos_bufmgr_bind_native_display;
2212 if (!tbm_backend_init(bufmgr, bufmgr_backend)) {
2213 TBM_EXYNOS_LOG("error: Fail to init backend!\n");
2214 tbm_backend_free(bufmgr_backend);
2216 _bufmgr_deinit_cache_state(bufmgr_exynos);
2218 if (tbm_backend_is_display_server())
2219 tbm_drm_helper_unset_tbm_master_fd();
2221 close(bufmgr_exynos->fd);
2223 free(bufmgr_exynos);
2231 env = getenv("TBM_EXYNOS_DEBUG");
2234 TBM_EXYNOS_LOG("TBM_EXYNOS_DEBUG=%s\n", env);
2241 DBG("[%s] drm_fd:%d\n", target_name(),