1 /**************************************************************************
5 Copyright 2012 Samsung Electronics co., Ltd. All Rights Reserved.
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
9 Permission is hereby granted, free of charge, to any person obtaining a
10 copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sub license, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial portions
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
22 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
24 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
25 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
26 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
27 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 **************************************************************************/
41 #include <sys/ioctl.h>
42 #include <sys/types.h>
49 #include <exynos_drm.h>
51 #include <tbm_backend.h>
52 #include <tbm_drm_helper.h>
54 #include <system_info.h>
55 #include "tbm_bufmgr_tgl.h"
57 #define TBM_COLOR_FORMAT_COUNT 4
59 #define EXYNOS_DRM_NAME "exynos"
61 #define STRERR_BUFSIZE 128
63 #define SIZE_ALIGN(value, base) (((value) + ((base) - 1)) & ~((base) - 1))
64 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
65 #define MAX(a, b) ((a) > (b) ? (a) : (b))
67 static unsigned int g_tbm_surface_alignment_plane;
68 static unsigned int g_tbm_surface_alignment_pitch_rgb;
70 #define TBM_SURFACE_ALIGNMENT_PLANE_NV12 (4096)
71 #define TBM_SURFACE_ALIGNMENT_PITCH_YUV (16)
73 #define SZ_1M 0x00100000
74 #define S5P_FIMV_MAX_FRAME_SIZE (2 * SZ_1M)
75 #define S5P_FIMV_D_ALIGN_PLANE_SIZE 64
76 #define S5P_FIMV_NUM_PIXELS_IN_MB_ROW 16
77 #define S5P_FIMV_NUM_PIXELS_IN_MB_COL 16
78 #define S5P_FIMV_DEC_BUF_ALIGN (8 * 1024)
79 #define S5P_FIMV_NV12MT_HALIGN 128
80 #define S5P_FIMV_NV12MT_VALIGN 64
82 /* cache control at backend */
83 static unsigned int g_enable_cache_ctrl = 0;
87 unsigned int fence_supported;
91 #define DMA_BUF_ACCESS_READ 0x1
92 #define DMA_BUF_ACCESS_WRITE 0x2
93 #define DMA_BUF_ACCESS_DMA 0x4
94 #define DMA_BUF_ACCESS_MAX 0x8
96 #define DMA_FENCE_LIST_MAX 5
98 struct dma_buf_fence {
103 #define DMABUF_IOCTL_BASE 'F'
104 #define DMABUF_IOWR(nr, type) _IOWR(DMABUF_IOCTL_BASE, nr, type)
106 #define DMABUF_IOCTL_GET_INFO DMABUF_IOWR(0x00, struct dma_buf_info)
107 #define DMABUF_IOCTL_GET_FENCE DMABUF_IOWR(0x01, struct dma_buf_fence)
108 #define DMABUF_IOCTL_PUT_FENCE DMABUF_IOWR(0x02, struct dma_buf_fence)
111 #define GLOBAL_KEY ((unsigned int)(-1))
113 #define TBM_EXYNOS_CACHE_INV 0x01 /**< cache invalidate */
114 #define TBM_EXYNOS_CACHE_CLN 0x02 /**< cache clean */
115 #define TBM_EXYNOS_CACHE_ALL 0x10 /**< cache all */
116 #define TBM_EXYNOS_CACHE_FLUSH (TBM_EXYNOS_CACHE_INV|TBM_EXYNOS_CACHE_CLN) /**< cache flush */
117 #define TBM_EXYNOS_CACHE_FLUSH_ALL (TBM_EXYNOS_CACHE_FLUSH|TBM_EXYNOS_CACHE_ALL) /**< cache flush all */
121 DEVICE_CA, /* cache aware device */
122 DEVICE_CO /* cache oblivious device */
125 typedef union _tbm_bo_cache_state tbm_bo_cache_state;
127 union _tbm_bo_cache_state {
130 unsigned int cntFlush:16; /*Flush all index for sync */
131 unsigned int isCached:1;
132 unsigned int isDirtied:2;
136 typedef struct _tbm_bufmgr_exynos *tbm_bufmgr_exynos;
137 typedef struct _tbm_bo_exynos *tbm_bo_exynos;
139 /* tbm buffor object for exynos */
140 struct _tbm_bo_exynos {
143 unsigned int name; /* FLINK ID */
145 unsigned int gem; /* GEM Handle */
147 unsigned int dmabuf; /* fd for dmabuf */
149 void *pBase; /* virtual address */
153 unsigned int flags_exynos;
154 unsigned int flags_tbm;
156 pthread_mutex_t mutex;
157 struct dma_buf_fence dma_fence[DMA_FENCE_LIST_MAX];
161 tbm_bo_cache_state cache_state;
162 unsigned int map_cnt;
165 tbm_bufmgr_exynos bufmgr_exynos;
168 /* tbm bufmgr private for exynos */
169 struct _tbm_bufmgr_exynos {
181 tbm_backend_bufmgr_func *bufmgr_func;
182 tbm_backend_bo_func *bo_func;
187 const static char *STR_DEVICE[] = {
195 const static char *STR_OPT[] = {
202 static int _get_render_node(int is_master);
205 _tgl_init(int fd, unsigned int key)
207 struct tgl_reg_data data;
209 char buf[STRERR_BUFSIZE];
212 data.timeout_ms = 1000;
214 err = ioctl(fd, TGL_IOCTL_REGISTER, &data);
216 TBM_ERR("error(%s) key:%d\n",
217 strerror_r(errno, buf, STRERR_BUFSIZE), key);
225 _tgl_destroy(int fd, unsigned int key)
227 struct tgl_reg_data data;
229 char buf[STRERR_BUFSIZE];
232 err = ioctl(fd, TGL_IOCTL_UNREGISTER, &data);
234 TBM_ERR("error(%s) key:%d\n",
235 strerror_r(errno, buf, STRERR_BUFSIZE), key);
243 _tgl_lock(int fd, unsigned int key, int opt)
245 struct tgl_lock_data data;
246 enum tgl_type_data tgl_type;
248 char buf[STRERR_BUFSIZE];
251 case TBM_OPTION_READ:
252 tgl_type = TGL_TYPE_READ;
254 case TBM_OPTION_WRITE:
255 tgl_type = TGL_TYPE_WRITE;
258 tgl_type = TGL_TYPE_NONE;
263 data.type = tgl_type;
265 err = ioctl(fd, TGL_IOCTL_LOCK, &data);
267 TBM_ERR("error(%s) key:%d opt:%d\n",
268 strerror_r(errno, buf, STRERR_BUFSIZE), key, opt);
276 _tgl_unlock(int fd, unsigned int key)
278 struct tgl_lock_data data;
280 char buf[STRERR_BUFSIZE];
283 data.type = TGL_TYPE_NONE;
285 err = ioctl(fd, TGL_IOCTL_UNLOCK, &data);
287 TBM_ERR("error(%s) key:%d\n",
288 strerror_r(errno, buf, STRERR_BUFSIZE), key);
296 _tgl_set_data(int fd, unsigned int key, unsigned int val)
298 struct tgl_usr_data data;
300 char buf[STRERR_BUFSIZE];
305 err = ioctl(fd, TGL_IOCTL_SET_DATA, &data);
307 TBM_ERR("error(%s) key:%d\n",
308 strerror_r(errno, buf, STRERR_BUFSIZE), key);
315 static inline unsigned int
316 _tgl_get_data(int fd, unsigned int key)
318 struct tgl_usr_data data = { 0, };
320 char buf[STRERR_BUFSIZE];
324 err = ioctl(fd, TGL_IOCTL_GET_DATA, &data);
326 TBM_ERR("error(%s) key:%d\n",
327 strerror_r(errno, buf, STRERR_BUFSIZE), key);
335 _exynos_cache_flush(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos, int flags)
337 TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
339 /* cache flush is managed by kernel side when using dma-fence. */
340 if (bufmgr_exynos->use_dma_fence)
343 struct drm_exynos_gem_cache_op cache_op = {0, };
346 /* if bo_exynos is null, do cache_flush_all */
349 cache_op.usr_addr = (uint64_t)((uintptr_t)bo_exynos->pBase);
350 cache_op.size = bo_exynos->size;
352 flags = TBM_EXYNOS_CACHE_FLUSH_ALL;
354 cache_op.usr_addr = 0;
358 if (flags & TBM_EXYNOS_CACHE_INV) {
359 if (flags & TBM_EXYNOS_CACHE_ALL)
360 cache_op.flags |= EXYNOS_DRM_CACHE_INV_ALL;
362 cache_op.flags |= EXYNOS_DRM_CACHE_INV_RANGE;
365 if (flags & TBM_EXYNOS_CACHE_CLN) {
366 if (flags & TBM_EXYNOS_CACHE_ALL)
367 cache_op.flags |= EXYNOS_DRM_CACHE_CLN_ALL;
369 cache_op.flags |= EXYNOS_DRM_CACHE_CLN_RANGE;
372 if (flags & TBM_EXYNOS_CACHE_ALL)
373 cache_op.flags |= EXYNOS_DRM_ALL_CACHES_CORES;
375 ret = drmCommandWriteRead(bufmgr_exynos->fd, DRM_EXYNOS_GEM_CACHE_OP, &cache_op,
378 TBM_ERR("fail to flush the cache.\n");
386 _bo_init_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos, int import)
388 /* check whether cache control do or not */
389 if (!g_enable_cache_ctrl)
392 TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
393 TBM_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
395 if (bufmgr_exynos->use_dma_fence)
398 _tgl_init(bufmgr_exynos->tgl_fd, bo_exynos->name);
400 tbm_bo_cache_state cache_state;
403 cache_state.data.isDirtied = DEVICE_NONE;
404 cache_state.data.isCached = 0;
405 cache_state.data.cntFlush = 0;
407 _tgl_set_data(bufmgr_exynos->tgl_fd, bo_exynos->name, cache_state.val);
414 _bo_set_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos, int device, int opt)
416 /* check whether cache control do or not */
417 if (!g_enable_cache_ctrl)
420 TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
421 TBM_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
423 if (bufmgr_exynos->use_dma_fence)
427 unsigned short cntFlush = 0;
429 if (!(bo_exynos->flags_exynos & EXYNOS_BO_CACHABLE))
432 /* get cache state of a bo_exynos */
433 bo_exynos->cache_state.val = _tgl_get_data(bufmgr_exynos->tgl_fd,
436 /* get global cache flush count */
437 cntFlush = (unsigned short)_tgl_get_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY);
439 if (device == TBM_DEVICE_CPU) {
440 if (bo_exynos->cache_state.data.isDirtied == DEVICE_CO &&
441 bo_exynos->cache_state.data.isCached)
442 need_flush = TBM_EXYNOS_CACHE_INV;
444 bo_exynos->cache_state.data.isCached = 1;
445 if (opt & TBM_OPTION_WRITE)
446 bo_exynos->cache_state.data.isDirtied = DEVICE_CA;
448 if (bo_exynos->cache_state.data.isDirtied != DEVICE_CA)
449 bo_exynos->cache_state.data.isDirtied = DEVICE_NONE;
452 if (bo_exynos->cache_state.data.isDirtied == DEVICE_CA &&
453 bo_exynos->cache_state.data.isCached &&
454 bo_exynos->cache_state.data.cntFlush == cntFlush)
455 need_flush = TBM_EXYNOS_CACHE_CLN | TBM_EXYNOS_CACHE_ALL;
457 if (opt & TBM_OPTION_WRITE)
458 bo_exynos->cache_state.data.isDirtied = DEVICE_CO;
460 if (bo_exynos->cache_state.data.isDirtied != DEVICE_CO)
461 bo_exynos->cache_state.data.isDirtied = DEVICE_NONE;
466 if (need_flush & TBM_EXYNOS_CACHE_ALL)
467 _tgl_set_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY, (unsigned int)(++cntFlush));
469 /* call cache flush */
470 _exynos_cache_flush(bufmgr_exynos, bo_exynos, need_flush);
472 TBM_DBG(" \tcache(%d,%d)....flush:0x%x, cntFlush(%d)\n",
473 bo_exynos->cache_state.data.isCached,
474 bo_exynos->cache_state.data.isDirtied,
483 _bo_save_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos)
485 /* check whether cache control do or not */
486 if (!g_enable_cache_ctrl)
489 TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
490 TBM_RETURN_VAL_IF_FAIL(bo_exynos != NULL, 0);
492 if (bufmgr_exynos->use_dma_fence)
495 unsigned short cntFlush = 0;
497 /* get global cache flush count */
498 cntFlush = (unsigned short)_tgl_get_data(bufmgr_exynos->tgl_fd, GLOBAL_KEY);
500 /* save global cache flush count */
501 bo_exynos->cache_state.data.cntFlush = cntFlush;
502 _tgl_set_data(bufmgr_exynos->tgl_fd, bo_exynos->name,
503 bo_exynos->cache_state.val);
509 _bo_destroy_cache_state(tbm_bufmgr_exynos bufmgr_exynos, tbm_bo_exynos bo_exynos)
511 /* check whether cache control do or not */
512 if (!g_enable_cache_ctrl)
515 TBM_RETURN_IF_FAIL(bufmgr_exynos != NULL);
516 TBM_RETURN_IF_FAIL(bo_exynos != NULL);
518 if (bufmgr_exynos->use_dma_fence)
521 _tgl_destroy(bufmgr_exynos->tgl_fd, bo_exynos->name);
525 _bufmgr_init_cache_state(tbm_bufmgr_exynos bufmgr_exynos)
527 /* check whether cache control do or not */
528 if (!g_enable_cache_ctrl)
531 TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, 0);
533 if (bufmgr_exynos->use_dma_fence)
536 /* open tgl fd for saving cache flush data */
537 bufmgr_exynos->tgl_fd = open(tgl_devfile, O_RDWR);
539 if (bufmgr_exynos->tgl_fd < 0) {
540 bufmgr_exynos->tgl_fd = open(tgl_devfile1, O_RDWR);
541 if (bufmgr_exynos->tgl_fd < 0) {
542 TBM_ERR("fail to open global_lock:%s\n",
548 if (!_tgl_init(bufmgr_exynos->tgl_fd, GLOBAL_KEY)) {
549 TBM_ERR("fail to initialize the tgl\n");
550 close(bufmgr_exynos->tgl_fd);
558 _bufmgr_deinit_cache_state(tbm_bufmgr_exynos bufmgr_exynos)
560 /* check whether cache control do or not */
561 if (!g_enable_cache_ctrl)
564 TBM_RETURN_IF_FAIL(bufmgr_exynos != NULL);
566 if (bufmgr_exynos->use_dma_fence)
569 if (bufmgr_exynos->tgl_fd >= 0)
570 close(bufmgr_exynos->tgl_fd);
574 _tbm_exynos_open_drm()
578 fd = drmOpen(EXYNOS_DRM_NAME, NULL);
580 TBM_ERR("fail to open drm.(%s)\n", EXYNOS_DRM_NAME);
584 fd = _get_render_node(1);
586 TBM_ERR("cannot find render_node\n");
594 _get_render_node(int is_master)
596 struct udev *udev = NULL;
597 struct udev_enumerate *e = NULL;
598 struct udev_list_entry *entry = NULL;
599 struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
600 const char *filepath;
605 TBM_DBG("search drm-device by udev(is_master:%d)\n", is_master);
609 TBM_ERR("udev_new() failed.\n");
613 e = udev_enumerate_new(udev);
614 udev_enumerate_add_match_subsystem(e, "drm");
616 udev_enumerate_add_match_sysname(e, "card[0-9]*");
618 udev_enumerate_add_match_sysname(e, "renderD[0-9]*");
619 udev_enumerate_scan_devices(e);
621 udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
622 device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
623 udev_list_entry_get_name(entry));
624 device_parent = udev_device_get_parent(device);
625 /* Not need unref device_parent. device_parent and device have same refcnt */
627 if (strcmp(udev_device_get_sysname(device_parent), "exynos-drm") == 0) {
629 TBM_DBG("Found render device: '%s' (%s)\n",
630 udev_device_get_syspath(drm_device),
631 udev_device_get_sysname(device_parent));
635 udev_device_unref(device);
638 udev_enumerate_unref(e);
641 TBM_ERR("failed to find device\n");
646 /* Get device file path. */
647 filepath = udev_device_get_devnode(drm_device);
649 TBM_ERR("udev_device_get_devnode() failed.\n");
650 udev_device_unref(drm_device);
655 /* Open DRM device file and check validity. */
656 fd = open(filepath, O_RDWR | O_CLOEXEC);
658 TBM_ERR("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
659 udev_device_unref(drm_device);
666 TBM_ERR("fstat() failed %s.\n");
667 udev_device_unref(drm_device);
673 udev_device_unref(drm_device);
680 _get_exynos_flag_from_tbm(unsigned int ftbm)
682 unsigned int flags = 0;
684 if (ftbm & TBM_BO_SCANOUT)
685 flags |= EXYNOS_BO_CONTIG;
687 flags |= EXYNOS_BO_NONCONTIG;
689 if (ftbm & TBM_BO_WC)
690 flags |= EXYNOS_BO_WC;
691 else if (ftbm & TBM_BO_NONCACHABLE)
692 flags |= EXYNOS_BO_NONCACHABLE;
694 flags |= EXYNOS_BO_CACHABLE;
700 _get_tbm_flag_from_exynos(unsigned int fexynos)
702 unsigned int flags = 0;
704 if (fexynos & EXYNOS_BO_NONCONTIG)
705 flags |= TBM_BO_DEFAULT;
707 flags |= TBM_BO_SCANOUT;
709 if (fexynos & EXYNOS_BO_WC)
711 else if (fexynos & EXYNOS_BO_CACHABLE)
712 flags |= TBM_BO_DEFAULT;
714 flags |= TBM_BO_NONCACHABLE;
720 _get_name(int fd, unsigned int gem)
722 struct drm_gem_flink arg = {0,};
725 if (drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &arg)) {
726 TBM_ERR("fail to DRM_IOCTL_GEM_FLINK gem:%d", gem);
730 return (unsigned int)arg.name;
734 _exynos_bo_handle(tbm_bo_exynos bo_exynos, int device)
736 tbm_bo_handle bo_handle;
738 memset(&bo_handle, 0x0, sizeof(uint64_t));
741 case TBM_DEVICE_DEFAULT:
743 bo_handle.u32 = (uint32_t)bo_exynos->gem;
746 if (!bo_exynos->pBase) {
747 struct drm_exynos_gem_map arg = {0,};
750 arg.handle = bo_exynos->gem;
751 if (drmCommandWriteRead(bo_exynos->fd, DRM_EXYNOS_GEM_MAP, &arg,
753 TBM_ERR("Cannot map_exynos gem=%d\n", bo_exynos->gem);
754 return (tbm_bo_handle) NULL;
757 map = mmap(NULL, bo_exynos->size, PROT_READ | PROT_WRITE, MAP_SHARED,
758 bo_exynos->fd, arg.offset);
759 if (map == MAP_FAILED) {
760 TBM_ERR("Cannot usrptr gem=%d\n", bo_exynos->gem);
761 return (tbm_bo_handle) NULL;
763 bo_exynos->pBase = map;
765 bo_handle.ptr = (void *)bo_exynos->pBase;
769 if (!bo_exynos->dmabuf) {
770 struct drm_prime_handle arg = {0, };
772 arg.handle = bo_exynos->gem;
773 if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
774 TBM_ERR("Cannot dmabuf=%d\n", bo_exynos->gem);
775 return (tbm_bo_handle) NULL;
777 bo_exynos->dmabuf = arg.fd;
780 bo_handle.u32 = (uint32_t)bo_exynos->dmabuf;
783 TBM_ERR("Not supported device:%d\n", device);
784 bo_handle.ptr = (void *) NULL;
792 _new_calc_plane_nv12(int width, int height)
796 mbX = DIV_ROUND_UP(width, S5P_FIMV_NUM_PIXELS_IN_MB_ROW);
797 mbY = DIV_ROUND_UP(height, S5P_FIMV_NUM_PIXELS_IN_MB_COL);
799 if (width * height < S5P_FIMV_MAX_FRAME_SIZE)
800 mbY = (mbY + 1) / 2 * 2;
802 return ((mbX * S5P_FIMV_NUM_PIXELS_IN_MB_COL) * (mbY *
803 S5P_FIMV_NUM_PIXELS_IN_MB_ROW));
807 _calc_yplane_nv12(int width, int height)
811 mbX = SIZE_ALIGN(width + 24, S5P_FIMV_NV12MT_HALIGN);
812 mbY = SIZE_ALIGN(height + 16, S5P_FIMV_NV12MT_VALIGN);
814 return SIZE_ALIGN(mbX * mbY, S5P_FIMV_DEC_BUF_ALIGN);
818 _calc_uvplane_nv12(int width, int height)
822 mbX = SIZE_ALIGN(width + 16, S5P_FIMV_NV12MT_HALIGN);
823 mbY = SIZE_ALIGN(height + 4, S5P_FIMV_NV12MT_VALIGN);
825 return SIZE_ALIGN((mbX * mbY) >> 1, S5P_FIMV_DEC_BUF_ALIGN);
829 _new_calc_yplane_nv12(int width, int height)
831 return SIZE_ALIGN(_new_calc_plane_nv12(width,
832 height) + S5P_FIMV_D_ALIGN_PLANE_SIZE,
833 TBM_SURFACE_ALIGNMENT_PLANE_NV12);
837 _new_calc_uvplane_nv12(int width, int height)
839 return SIZE_ALIGN((_new_calc_plane_nv12(width,
840 height) >> 1) + S5P_FIMV_D_ALIGN_PLANE_SIZE,
841 TBM_SURFACE_ALIGNMENT_PLANE_NV12);
844 static tbm_bufmgr_capability
845 tbm_exynos_bufmgr_get_capabilities(tbm_backend_bufmgr_data *bufmgr_data, tbm_error_e *error)
847 tbm_bufmgr_capability capabilities = TBM_BUFMGR_CAPABILITY_NONE;
849 capabilities = TBM_BUFMGR_CAPABILITY_SHARE_KEY|TBM_BUFMGR_CAPABILITY_SHARE_FD;
852 *error = TBM_ERROR_NONE;
858 tbm_exynos_bufmgr_bind_native_display(tbm_backend_bufmgr_data *bufmgr_data, tbm_native_display *native_display)
860 tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)bufmgr_data;
861 TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, TBM_ERROR_INVALID_PARAMETER);
863 if (!tbm_drm_helper_wl_auth_server_init(native_display, bufmgr_exynos->fd,
864 bufmgr_exynos->device_name, 0)) {
865 TBM_ERR("fail to tbm_drm_helper_wl_server_init\n");
866 return TBM_ERROR_INVALID_OPERATION;
869 bufmgr_exynos->bind_display = native_display;
871 return TBM_ERROR_NONE;
875 tbm_exynos_bufmgr_get_supported_formats(tbm_backend_bufmgr_data *bufmgr_data,
876 uint32_t **formats, uint32_t *num)
878 const static uint32_t tbm_exynos_color_format_list[TBM_COLOR_FORMAT_COUNT] = {
885 tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)bufmgr_data;
886 uint32_t *color_formats;
888 TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, TBM_ERROR_INVALID_PARAMETER);
890 color_formats = (uint32_t *)calloc(1, sizeof(uint32_t) * TBM_COLOR_FORMAT_COUNT);
891 if (color_formats == NULL)
892 return TBM_ERROR_OUT_OF_MEMORY;
894 memcpy(color_formats, tbm_exynos_color_format_list, sizeof(uint32_t)*TBM_COLOR_FORMAT_COUNT);
896 *formats = color_formats;
897 *num = TBM_COLOR_FORMAT_COUNT;
899 TBM_DBG("supported format count = %d\n", *num);
901 return TBM_ERROR_NONE;
905 tbm_exynos_bufmgr_get_plane_data(tbm_backend_bufmgr_data *bufmgr_data,
906 tbm_format format, int plane_idx, int width,
907 int height, uint32_t *size, uint32_t *offset,
908 uint32_t *pitch, int *bo_idx)
910 tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)bufmgr_data;
917 TBM_RETURN_VAL_IF_FAIL(bufmgr_exynos != NULL, TBM_ERROR_INVALID_PARAMETER);
921 case TBM_FORMAT_XRGB4444:
922 case TBM_FORMAT_XBGR4444:
923 case TBM_FORMAT_RGBX4444:
924 case TBM_FORMAT_BGRX4444:
925 case TBM_FORMAT_ARGB4444:
926 case TBM_FORMAT_ABGR4444:
927 case TBM_FORMAT_RGBA4444:
928 case TBM_FORMAT_BGRA4444:
929 case TBM_FORMAT_XRGB1555:
930 case TBM_FORMAT_XBGR1555:
931 case TBM_FORMAT_RGBX5551:
932 case TBM_FORMAT_BGRX5551:
933 case TBM_FORMAT_ARGB1555:
934 case TBM_FORMAT_ABGR1555:
935 case TBM_FORMAT_RGBA5551:
936 case TBM_FORMAT_BGRA5551:
937 case TBM_FORMAT_RGB565:
940 _pitch = SIZE_ALIGN((width * bpp) >> 3, g_tbm_surface_alignment_pitch_rgb);
941 _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
945 case TBM_FORMAT_RGB888:
946 case TBM_FORMAT_BGR888:
949 _pitch = SIZE_ALIGN((width * bpp) >> 3, g_tbm_surface_alignment_pitch_rgb);
950 _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
954 case TBM_FORMAT_XRGB8888:
955 case TBM_FORMAT_XBGR8888:
956 case TBM_FORMAT_RGBX8888:
957 case TBM_FORMAT_BGRX8888:
958 case TBM_FORMAT_ARGB8888:
959 case TBM_FORMAT_ABGR8888:
960 case TBM_FORMAT_RGBA8888:
961 case TBM_FORMAT_BGRA8888:
964 _pitch = SIZE_ALIGN((width * bpp) >> 3, g_tbm_surface_alignment_pitch_rgb);
965 _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
970 case TBM_FORMAT_YUYV:
971 case TBM_FORMAT_YVYU:
972 case TBM_FORMAT_UYVY:
973 case TBM_FORMAT_VYUY:
974 case TBM_FORMAT_AYUV:
977 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
978 _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
984 * index 0 = Y plane, [7:0] Y
985 * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
987 * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
989 case TBM_FORMAT_NV12:
990 case TBM_FORMAT_NV21:
992 if (plane_idx == 0) {
994 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
995 _size = MAX(_calc_yplane_nv12(width, height),
996 _new_calc_yplane_nv12(width, height));
998 } else if (plane_idx == 1) {
1000 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1001 _size = MAX(_calc_uvplane_nv12(width, height),
1002 _new_calc_uvplane_nv12(width, height));
1006 case TBM_FORMAT_NV16:
1007 case TBM_FORMAT_NV61:
1009 /*if(plane_idx == 0)*/
1012 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1013 _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
1018 /*else if( plane_idx ==1 )*/
1021 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1022 _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
1029 * index 0: Y plane, [7:0] Y
1030 * index 1: Cb plane, [7:0] Cb
1031 * index 2: Cr plane, [7:0] Cr
1033 * index 1: Cr plane, [7:0] Cr
1034 * index 2: Cb plane, [7:0] Cb
1038 * NATIVE_BUFFER_FORMAT_YV12
1039 * NATIVE_BUFFER_FORMAT_I420
1041 case TBM_FORMAT_YUV410:
1042 case TBM_FORMAT_YVU410:
1044 /*if(plane_idx == 0)*/
1047 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1048 _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
1053 /*else if(plane_idx == 1)*/
1056 _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4);
1057 _size = SIZE_ALIGN(_pitch * (height / 4), g_tbm_surface_alignment_plane);
1062 /*else if (plane_idx == 2)*/
1065 _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4);
1066 _size = SIZE_ALIGN(_pitch * (height / 4), g_tbm_surface_alignment_plane);
1070 case TBM_FORMAT_YUV411:
1071 case TBM_FORMAT_YVU411:
1072 case TBM_FORMAT_YUV420:
1073 case TBM_FORMAT_YVU420:
1075 /*if(plane_idx == 0)*/
1078 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1079 _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
1084 /*else if(plane_idx == 1)*/
1087 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1088 _size = SIZE_ALIGN(_pitch * (height / 2), g_tbm_surface_alignment_plane);
1093 /*else if (plane_idx == 2)*/
1096 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1097 _size = SIZE_ALIGN(_pitch * (height / 2), g_tbm_surface_alignment_plane);
1101 case TBM_FORMAT_YUV422:
1102 case TBM_FORMAT_YVU422:
1104 /*if(plane_idx == 0)*/
1107 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1108 _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
1113 /*else if(plane_idx == 1)*/
1116 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1117 _size = SIZE_ALIGN(_pitch * (height), g_tbm_surface_alignment_plane);
1122 /*else if (plane_idx == 2)*/
1125 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1126 _size = SIZE_ALIGN(_pitch * (height), g_tbm_surface_alignment_plane);
1130 case TBM_FORMAT_YUV444:
1131 case TBM_FORMAT_YVU444:
1133 /*if(plane_idx == 0)*/
1136 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1137 _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
1142 /*else if(plane_idx == 1)*/
1145 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1146 _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
1151 /*else if (plane_idx == 2)*/
1154 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1155 _size = SIZE_ALIGN(_pitch * height, g_tbm_surface_alignment_plane);
1169 return TBM_ERROR_NONE;
1172 static tbm_backend_bo_data *
1173 tbm_exynos_bufmgr_alloc_bo(tbm_backend_bufmgr_data *bufmgr_data, unsigned int size,
1174 tbm_bo_memory_type flags, tbm_error_e *error)
1176 tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)bufmgr_data;
1177 tbm_bo_exynos bo_exynos;
1178 unsigned int exynos_flags;
1180 if (bufmgr_exynos == NULL) {
1181 TBM_ERR("bufmgr_data is null\n");
1183 *error = TBM_ERROR_INVALID_PARAMETER;
1187 bo_exynos = calloc(1, sizeof(struct _tbm_bo_exynos));
1189 TBM_ERR("fail to allocate the bo_exynos private\n");
1191 *error = TBM_ERROR_OUT_OF_MEMORY;
1194 bo_exynos->bufmgr_exynos = bufmgr_exynos;
1196 exynos_flags = _get_exynos_flag_from_tbm(flags);
1197 if ((flags & TBM_BO_SCANOUT) &&
1199 exynos_flags |= EXYNOS_BO_NONCONTIG;
1202 struct drm_exynos_gem_create arg = {0, };
1204 arg.size = (uint64_t)size;
1205 arg.flags = exynos_flags;
1206 if (drmCommandWriteRead(bufmgr_exynos->fd, DRM_EXYNOS_GEM_CREATE, &arg,
1208 TBM_ERR("Cannot create bo_exynos(flag:%x, size:%d)\n", arg.flags,
1209 (unsigned int)arg.size);
1212 *error = TBM_ERROR_INVALID_OPERATION;
1216 bo_exynos->fd = bufmgr_exynos->fd;
1217 bo_exynos->gem = arg.handle;
1218 bo_exynos->size = size;
1219 bo_exynos->flags_tbm = flags;
1220 bo_exynos->flags_exynos = exynos_flags;
1221 bo_exynos->name = _get_name(bo_exynos->fd, bo_exynos->gem);
1223 if (!_bo_init_cache_state(bufmgr_exynos, bo_exynos, 0)) {
1224 TBM_ERR("fail init cache state(%d)\n", bo_exynos->name);
1227 *error = TBM_ERROR_INVALID_OPERATION;
1231 pthread_mutex_init(&bo_exynos->mutex, NULL);
1233 if (bufmgr_exynos->use_dma_fence && !bo_exynos->dmabuf) {
1234 struct drm_prime_handle arg = {0, };
1236 arg.handle = bo_exynos->gem;
1237 if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
1238 TBM_ERR("Cannot dmabuf=%d\n", bo_exynos->gem);
1241 *error = TBM_ERROR_INVALID_OPERATION;
1244 bo_exynos->dmabuf = arg.fd;
1247 /* add bo_exynos to hash */
1248 if (drmHashInsert(bufmgr_exynos->hashBos, bo_exynos->name, (void *)bo_exynos) < 0)
1249 TBM_ERR("Cannot insert bo_exynos to Hash(%d)\n", bo_exynos->name);
1251 TBM_DBG(" bo_exynos:%p, gem:%d(%d), flags:%d(%d), size:%d\n",
1253 bo_exynos->gem, bo_exynos->name,
1254 flags, exynos_flags,
1258 *error = TBM_ERROR_NONE;
1260 return (tbm_backend_bo_data *)bo_exynos;
1263 static tbm_backend_bo_data *
1264 tbm_exynos_bufmgr_import_fd(tbm_backend_bufmgr_data *bufmgr_data, tbm_fd key, tbm_error_e *error)
1266 tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)bufmgr_data;
1267 tbm_bo_exynos bo_exynos;
1268 unsigned int gem = 0;
1271 char buf[STRERR_BUFSIZE];
1273 if (bufmgr_exynos == NULL) {
1274 TBM_ERR("bufmgr_data is null\n");
1276 *error = TBM_ERROR_INVALID_PARAMETER;
1280 /*getting handle from fd*/
1281 struct drm_prime_handle arg = {0, };
1285 if (drmIoctl(bufmgr_exynos->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &arg)) {
1286 TBM_ERR("Cannot get gem handle from fd:%d (%s)\n",
1287 arg.fd, strerror_r(errno, buf, STRERR_BUFSIZE));
1289 *error = TBM_ERROR_INVALID_OPERATION;
1294 name = _get_name(bufmgr_exynos->fd, gem);
1296 TBM_ERR("Cannot get name from gem:%d, fd:%d (%s)\n",
1297 gem, key, strerror_r(errno, buf, STRERR_BUFSIZE));
1299 *error = TBM_ERROR_INVALID_OPERATION;
1303 ret = drmHashLookup(bufmgr_exynos->hashBos, name, (void **)&bo_exynos);
1305 if (gem == bo_exynos->gem) {
1307 *error = TBM_ERROR_NONE;
1312 /* Determine size of bo_exynos. The fd-to-handle ioctl really should
1313 * return the size, but it doesn't. If we have kernel 3.12 or
1314 * later, we can lseek on the prime fd to get the size. Older
1315 * kernels will just fail, in which case we fall back to the
1316 * provided (estimated or guess size).
1318 unsigned int real_size = -1;
1319 struct drm_exynos_gem_info info = {0, };
1321 real_size = lseek(key, 0, SEEK_END);
1324 if (drmCommandWriteRead(bufmgr_exynos->fd,
1327 sizeof(struct drm_exynos_gem_info))) {
1328 TBM_ERR("Cannot get gem info from gem:%d, fd:%d (%s)\n",
1329 gem, key, strerror_r(errno, buf, STRERR_BUFSIZE));
1331 *error = TBM_ERROR_INVALID_OPERATION;
1335 if (real_size == -1)
1336 real_size = info.size;
1338 bo_exynos = calloc(1, sizeof(struct _tbm_bo_exynos));
1340 TBM_ERR("bo_exynos:%p fail to allocate the bo_exynos\n", bo_exynos);
1342 *error = TBM_ERROR_OUT_OF_MEMORY;
1345 bo_exynos->bufmgr_exynos = bufmgr_exynos;
1347 bo_exynos->fd = bufmgr_exynos->fd;
1348 bo_exynos->gem = gem;
1349 bo_exynos->size = real_size;
1350 bo_exynos->flags_exynos = info.flags;
1351 bo_exynos->flags_tbm = _get_tbm_flag_from_exynos(bo_exynos->flags_exynos);
1352 bo_exynos->name = name;
1354 if (!_bo_init_cache_state(bufmgr_exynos, bo_exynos, 1)) {
1355 TBM_ERR("fail init cache state(%d)\n", bo_exynos->name);
1358 *error = TBM_ERROR_INVALID_OPERATION;
1362 /* add bo_exynos to hash */
1363 if (drmHashInsert(bufmgr_exynos->hashBos, bo_exynos->name, (void *)bo_exynos) < 0)
1364 TBM_ERR("bo_exynos:%p Cannot insert bo_exynos to Hash(%d) from gem:%d, fd:%d\n",
1365 bo_exynos, bo_exynos->name, gem, key);
1367 TBM_DBG(" bo_exynos:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d(%d), size:%d\n",
1369 bo_exynos->gem, bo_exynos->name,
1372 bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1376 *error = TBM_ERROR_NONE;
1378 return (tbm_backend_bo_data *)bo_exynos;
1381 static tbm_backend_bo_data *
1382 tbm_exynos_bufmgr_import_key(tbm_backend_bufmgr_data *bufmgr_data, tbm_key key, tbm_error_e *error)
1384 tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)bufmgr_data;
1385 tbm_bo_exynos bo_exynos;
1388 if (bufmgr_exynos == NULL) {
1389 TBM_ERR("bufmgr_data is null\n");
1391 *error = TBM_ERROR_INVALID_PARAMETER;
1395 ret = drmHashLookup(bufmgr_exynos->hashBos, key, (void **)&bo_exynos);
1398 *error = TBM_ERROR_NONE;
1399 return (tbm_backend_bo_data *)bo_exynos;
1402 struct drm_gem_open arg = {0, };
1403 struct drm_exynos_gem_info info = {0, };
1406 if (drmIoctl(bufmgr_exynos->fd, DRM_IOCTL_GEM_OPEN, &arg)) {
1407 TBM_ERR("Cannot open gem name=%d\n", key);
1409 *error = TBM_ERROR_INVALID_OPERATION;
1413 info.handle = arg.handle;
1414 if (drmCommandWriteRead(bufmgr_exynos->fd,
1417 sizeof(struct drm_exynos_gem_info))) {
1418 TBM_ERR("Cannot get gem info=%d\n", key);
1420 *error = TBM_ERROR_INVALID_OPERATION;
1424 bo_exynos = calloc(1, sizeof(struct _tbm_bo_exynos));
1426 TBM_ERR("fail to allocate the bo_exynos private\n");
1428 *error = TBM_ERROR_OUT_OF_MEMORY;
1431 bo_exynos->bufmgr_exynos = bufmgr_exynos;
1433 bo_exynos->fd = bufmgr_exynos->fd;
1434 bo_exynos->gem = arg.handle;
1435 bo_exynos->size = arg.size;
1436 bo_exynos->flags_exynos = info.flags;
1437 bo_exynos->name = key;
1438 bo_exynos->flags_tbm = _get_tbm_flag_from_exynos(bo_exynos->flags_exynos);
1440 if (!_bo_init_cache_state(bufmgr_exynos, bo_exynos, 1)) {
1441 TBM_ERR("fail init cache state(%d)\n", bo_exynos->name);
1444 *error = TBM_ERROR_INVALID_OPERATION;
1448 if (!bo_exynos->dmabuf) {
1449 struct drm_prime_handle arg = {0, };
1451 arg.handle = bo_exynos->gem;
1452 if (drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
1453 TBM_ERR("fail to DRM_IOCTL_PRIME_HANDLE_TO_FD gem=%d\n", bo_exynos->gem);
1455 *error = TBM_ERROR_INVALID_OPERATION;
1459 bo_exynos->dmabuf = arg.fd;
1462 /* add bo_exynos to hash */
1463 if (drmHashInsert(bufmgr_exynos->hashBos, bo_exynos->name, (void *)bo_exynos) < 0)
1464 TBM_ERR("Cannot insert bo_exynos to Hash(%d)\n", bo_exynos->name);
1466 TBM_DBG(" bo_exynos:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d\n",
1468 bo_exynos->gem, bo_exynos->name,
1470 bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1474 *error = TBM_ERROR_NONE;
1476 return (tbm_backend_bo_data *)bo_exynos;
1480 tbm_exynos_bo_free(tbm_backend_bo_data *bo_data)
1482 tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data;
1484 tbm_bufmgr_exynos bufmgr_exynos;
1485 char buf[STRERR_BUFSIZE];
1491 bufmgr_exynos = bo_exynos->bufmgr_exynos;
1495 TBM_DBG(" bo_exynos:%p, gem:%d(%d), fd:%d, size:%d\n",
1497 bo_exynos->gem, bo_exynos->name,
1501 if (bo_exynos->pBase) {
1502 if (munmap(bo_exynos->pBase, bo_exynos->size) == -1) {
1503 TBM_ERR("bo_exynos:%p fail to munmap(%s)\n",
1504 bo_exynos, strerror_r(errno, buf, STRERR_BUFSIZE));
1509 if (bo_exynos->dmabuf) {
1510 close(bo_exynos->dmabuf);
1511 bo_exynos->dmabuf = 0;
1514 /* delete bo_exynos from hash */
1515 ret = drmHashLookup(bufmgr_exynos->hashBos, bo_exynos->name, (void **)&temp);
1517 drmHashDelete(bufmgr_exynos->hashBos, bo_exynos->name);
1519 TBM_ERR("Cannot find bo_exynos to Hash(%d), ret=%d\n", bo_exynos->name, ret);
1521 if (temp != bo_exynos)
1522 TBM_ERR("hashBos probably has several BOs with same name!!!\n");
1524 _bo_destroy_cache_state(bufmgr_exynos, bo_exynos);
1526 /* Free gem handle */
1527 struct drm_gem_close arg = {0, };
1529 memset(&arg, 0, sizeof(arg));
1530 arg.handle = bo_exynos->gem;
1531 if (drmIoctl(bo_exynos->fd, DRM_IOCTL_GEM_CLOSE, &arg))
1532 TBM_ERR("bo_exynos:%p fail to gem close.(%s)\n",
1533 bo_exynos, strerror_r(errno, buf, STRERR_BUFSIZE));
1539 tbm_exynos_bo_get_size(tbm_backend_bo_data *bo_data, tbm_error_e *error)
1541 tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data;
1545 *error = TBM_ERROR_INVALID_PARAMETER;
1550 *error = TBM_ERROR_NONE;
1552 return bo_exynos->size;
1555 static tbm_bo_memory_type
1556 tbm_exynos_bo_get_memory_type(tbm_backend_bo_data *bo_data, tbm_error_e *error)
1558 tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data;
1562 *error = TBM_ERROR_INVALID_PARAMETER;
1563 return TBM_BO_DEFAULT;
1567 *error = TBM_ERROR_NONE;
1569 return bo_exynos->flags_tbm;
1572 static tbm_bo_handle
1573 tbm_exynos_bo_get_handle(tbm_backend_bo_data *bo_data, tbm_bo_device_type device, tbm_error_e *error)
1575 tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data;
1576 tbm_bo_handle bo_handle;
1580 *error = TBM_ERROR_INVALID_PARAMETER;
1581 return (tbm_bo_handle) NULL;
1584 if (!bo_exynos->gem) {
1585 TBM_ERR("Cannot map gem=%d\n", bo_exynos->gem);
1587 *error = TBM_ERROR_INVALID_PARAMETER;
1588 return (tbm_bo_handle) NULL;
1591 TBM_DBG("bo_exynos:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d, %s\n",
1593 bo_exynos->gem, bo_exynos->name,
1595 bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1597 STR_DEVICE[device]);
1599 /*Get mapped bo_handle*/
1600 bo_handle = _exynos_bo_handle(bo_exynos, device);
1601 if (bo_handle.ptr == NULL) {
1602 TBM_ERR("Cannot get handle: gem:%d, device:%d\n",
1603 bo_exynos->gem, device);
1605 *error = TBM_ERROR_INVALID_OPERATION;
1606 return (tbm_bo_handle) NULL;
1610 *error = TBM_ERROR_NONE;
1615 static tbm_bo_handle
1616 tbm_exynos_bo_map(tbm_backend_bo_data *bo_data, tbm_bo_device_type device,
1617 tbm_bo_access_option opt, tbm_error_e *error)
1619 tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data;
1620 tbm_bo_handle bo_handle;
1621 tbm_bufmgr_exynos bufmgr_exynos;
1625 *error = TBM_ERROR_INVALID_PARAMETER;
1626 return (tbm_bo_handle) NULL;
1629 bufmgr_exynos = bo_exynos->bufmgr_exynos;
1630 if (!bufmgr_exynos) {
1632 *error = TBM_ERROR_INVALID_PARAMETER;
1633 return (tbm_bo_handle) NULL;
1636 if (!bo_exynos->gem) {
1637 TBM_ERR("Cannot map gem=%d\n", bo_exynos->gem);
1639 *error = TBM_ERROR_INVALID_PARAMETER;
1640 return (tbm_bo_handle) NULL;
1643 TBM_DBG(" bo_exynos:%p, gem:%d(%d), fd:%d, %s, %s\n",
1645 bo_exynos->gem, bo_exynos->name,
1650 /*Get mapped bo_handle*/
1651 bo_handle = _exynos_bo_handle(bo_exynos, device);
1652 if (bo_handle.ptr == NULL) {
1653 TBM_ERR("Cannot get handle: gem:%d, device:%d, opt:%d\n",
1654 bo_exynos->gem, device, opt);
1656 *error = TBM_ERROR_INVALID_OPERATION;
1657 return (tbm_bo_handle) NULL;
1660 if (bo_exynos->map_cnt == 0)
1661 _bo_set_cache_state(bufmgr_exynos, bo_exynos, device, opt);
1663 bo_exynos->last_map_device = device;
1665 bo_exynos->map_cnt++;
1668 *error = TBM_ERROR_NONE;
1674 tbm_exynos_bo_unmap(tbm_backend_bo_data *bo_data)
1676 tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data;
1677 tbm_bufmgr_exynos bufmgr_exynos;
1680 return TBM_ERROR_INVALID_PARAMETER;
1682 bufmgr_exynos = bo_exynos->bufmgr_exynos;
1684 return TBM_ERROR_INVALID_PARAMETER;
1686 if (!bo_exynos->gem)
1687 return TBM_ERROR_INVALID_PARAMETER;
1689 bo_exynos->map_cnt--;
1691 if (bo_exynos->map_cnt == 0)
1692 _bo_save_cache_state(bufmgr_exynos, bo_exynos);
1694 /* check whether cache control do or not */
1695 if (g_enable_cache_ctrl && bo_exynos->last_map_device == TBM_DEVICE_CPU)
1696 _exynos_cache_flush(bufmgr_exynos, bo_exynos, TBM_EXYNOS_CACHE_FLUSH_ALL);
1698 bo_exynos->last_map_device = -1;
1700 TBM_DBG(" bo_exynos:%p, gem:%d(%d), fd:%d\n",
1702 bo_exynos->gem, bo_exynos->name,
1705 return TBM_ERROR_NONE;
1709 tbm_exynos_bo_lock(tbm_backend_bo_data *bo_data, tbm_bo_device_type device,
1710 tbm_bo_access_option opt)
1712 #ifndef ALWAYS_BACKEND_CTRL
1713 tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data;
1714 tbm_bufmgr_exynos bufmgr_exynos;
1715 struct dma_buf_fence fence;
1716 struct flock filelock;
1718 char buf[STRERR_BUFSIZE];
1721 return TBM_ERROR_INVALID_PARAMETER;
1723 bufmgr_exynos = bo_exynos->bufmgr_exynos;
1725 return TBM_ERROR_INVALID_PARAMETER;
1727 if (device != TBM_DEVICE_3D && device != TBM_DEVICE_CPU) {
1728 TBM_DBG("Not support device type,\n");
1729 return TBM_ERROR_INVALID_OPERATION;
1732 memset(&fence, 0, sizeof(struct dma_buf_fence));
1734 /* Check if the given type is valid or not. */
1735 if (opt & TBM_OPTION_WRITE) {
1736 if (device == TBM_DEVICE_3D)
1737 fence.type = DMA_BUF_ACCESS_WRITE | DMA_BUF_ACCESS_DMA;
1738 } else if (opt & TBM_OPTION_READ) {
1739 if (device == TBM_DEVICE_3D)
1740 fence.type = DMA_BUF_ACCESS_READ | DMA_BUF_ACCESS_DMA;
1742 TBM_ERR("Invalid argument\n");
1743 return TBM_ERROR_INVALID_PARAMETER;
1746 /* Check if the tbm manager supports dma fence or not. */
1747 if (!bufmgr_exynos->use_dma_fence) {
1748 TBM_ERR("Not support DMA FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
1749 return TBM_ERROR_INVALID_OPERATION;
1753 if (device == TBM_DEVICE_3D) {
1754 ret = ioctl(bo_exynos->dmabuf, DMABUF_IOCTL_GET_FENCE, &fence);
1756 TBM_ERR("Cannot set GET FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
1757 return TBM_ERROR_INVALID_OPERATION;
1760 if (opt & TBM_OPTION_WRITE)
1761 filelock.l_type = F_WRLCK;
1763 filelock.l_type = F_RDLCK;
1765 filelock.l_whence = SEEK_CUR;
1766 filelock.l_start = 0;
1769 if (-1 == fcntl(bo_exynos->dmabuf, F_SETLKW, &filelock))
1770 return TBM_ERROR_INVALID_OPERATION;
1773 pthread_mutex_lock(&bo_exynos->mutex);
1775 if (device == TBM_DEVICE_3D) {
1778 for (i = 0; i < DMA_FENCE_LIST_MAX; i++) {
1779 if (bo_exynos->dma_fence[i].ctx == 0) {
1780 bo_exynos->dma_fence[i].type = fence.type;
1781 bo_exynos->dma_fence[i].ctx = fence.ctx;
1786 if (i == DMA_FENCE_LIST_MAX) {
1787 /*TODO: if dma_fence list is full, it needs realloc. I will fix this. by minseok3.kim*/
1788 TBM_ERR("fence list is full\n");
1792 pthread_mutex_unlock(&bo_exynos->mutex);
1794 TBM_DBG("DMABUF_IOCTL_GET_FENCE! bo_exynos:%p, gem:%d(%d), fd:%ds\n",
1796 bo_exynos->gem, bo_exynos->name,
1798 #endif /* ALWAYS_BACKEND_CTRL */
1800 return TBM_ERROR_NONE;
1804 tbm_exynos_bo_unlock(tbm_backend_bo_data *bo_data)
1806 #ifndef ALWAYS_BACKEND_CTRL
1807 tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data;
1808 struct dma_buf_fence fence;
1809 struct flock filelock;
1810 unsigned int dma_type = 0;
1812 char buf[STRERR_BUFSIZE];
1814 bufmgr_exynos = bo_exynos->bufmgr_exynos;
1816 return TBM_ERROR_INVALID_PARAMETER;
1818 if (bo_exynos->dma_fence[0].type & DMA_BUF_ACCESS_DMA)
1821 if (!bo_exynos->dma_fence[0].ctx && dma_type) {
1822 TBM_DBG("FENCE not support or ignored,\n");
1823 return TBM_ERROR_INVALID_OPERATION;
1826 if (!bo_exynos->dma_fence[0].ctx && dma_type) {
1827 TBM_DBG("device type is not 3D/CPU,\n");
1828 return TBM_ERROR_INVALID_OPERATION;
1831 pthread_mutex_lock(&bo_exynos->mutex);
1834 fence.type = bo_exynos->dma_fence[0].type;
1835 fence.ctx = bo_exynos->dma_fence[0].ctx;
1838 for (i = 1; i < DMA_FENCE_LIST_MAX; i++) {
1839 bo_exynos->dma_fence[i - 1].type = bo_exynos->dma_fence[i].type;
1840 bo_exynos->dma_fence[i - 1].ctx = bo_exynos->dma_fence[i].ctx;
1842 bo_exynos->dma_fence[DMA_FENCE_LIST_MAX - 1].type = 0;
1843 bo_exynos->dma_fence[DMA_FENCE_LIST_MAX - 1].ctx = 0;
1845 pthread_mutex_unlock(&bo_exynos->mutex);
1848 ret = ioctl(bo_exynos->dmabuf, DMABUF_IOCTL_PUT_FENCE, &fence);
1850 TBM_ERR("Can not set PUT FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
1851 return TBM_ERROR_INVALID_OPERATION;
1854 filelock.l_type = F_UNLCK;
1855 filelock.l_whence = SEEK_CUR;
1856 filelock.l_start = 0;
1859 if (-1 == fcntl(bo_exynos->dmabuf, F_SETLKW, &filelock))
1860 return TBM_ERROR_INVALID_OPERATION;
1863 TBM_DBG("DMABUF_IOCTL_PUT_FENCE! bo_exynos:%p, gem:%d(%d), fd:%ds\n",
1865 bo_exynos->gem, bo_exynos->name,
1867 #endif /* ALWAYS_BACKEND_CTRL */
1869 return TBM_ERROR_NONE;
1872 tbm_exynos_bo_export_fd(tbm_backend_bo_data *bo_data, tbm_error_e *error)
1874 tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data;
1876 char buf[STRERR_BUFSIZE];
1880 *error = TBM_ERROR_INVALID_PARAMETER;
1884 struct drm_prime_handle arg = {0, };
1886 arg.handle = bo_exynos->gem;
1887 ret = drmIoctl(bo_exynos->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg);
1889 TBM_ERR("bo_exynos:%p Cannot dmabuf=%d (%s)\n",
1890 bo_exynos, bo_exynos->gem, strerror_r(errno, buf, STRERR_BUFSIZE));
1892 *error = TBM_ERROR_INVALID_OPERATION;
1893 return (tbm_fd) ret;
1896 TBM_DBG(" bo_exynos:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d(%d), size:%d\n",
1898 bo_exynos->gem, bo_exynos->name,
1901 bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1905 *error = TBM_ERROR_NONE;
1907 return (tbm_fd)arg.fd;
1911 tbm_exynos_bo_export_key(tbm_backend_bo_data *bo_data, tbm_error_e *error)
1913 tbm_bo_exynos bo_exynos = (tbm_bo_exynos)bo_data;
1917 *error = TBM_ERROR_INVALID_PARAMETER;
1921 if (!bo_exynos->name) {
1922 bo_exynos->name = _get_name(bo_exynos->fd, bo_exynos->gem);
1923 if (!bo_exynos->name) {
1924 TBM_ERR("error Cannot get name\n");
1926 *error = TBM_ERROR_INVALID_PARAMETER;
1931 TBM_DBG(" bo_exynos:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d\n",
1933 bo_exynos->gem, bo_exynos->name,
1935 bo_exynos->flags_tbm, bo_exynos->flags_exynos,
1939 *error = TBM_ERROR_NONE;
1941 return (tbm_key)bo_exynos->name;
1945 tbm_exynos_deinit(tbm_backend_bufmgr_data *bufmgr_data)
1947 tbm_bufmgr_exynos bufmgr_exynos = (tbm_bufmgr_exynos)bufmgr_data;
1953 TBM_RETURN_IF_FAIL(bufmgr_exynos != NULL);
1955 bufmgr = bufmgr_exynos->bufmgr;
1957 tbm_backend_bufmgr_free_bufmgr_func(bufmgr, bufmgr_exynos->bufmgr_func);
1958 tbm_backend_bufmgr_free_bo_func(bufmgr, bufmgr_exynos->bo_func);
1960 if (bufmgr_exynos->hashBos) {
1961 while (drmHashFirst(bufmgr_exynos->hashBos, &key, &value) > 0) {
1963 drmHashDelete(bufmgr_exynos->hashBos, key);
1966 drmHashDestroy(bufmgr_exynos->hashBos);
1967 bufmgr_exynos->hashBos = NULL;
1970 _bufmgr_deinit_cache_state(bufmgr_exynos);
1972 if (bufmgr_exynos->bind_display)
1973 tbm_drm_helper_wl_auth_server_deinit();
1975 if (bufmgr_exynos->device_name)
1976 free(bufmgr_exynos->device_name);
1978 if (tbm_backend_bufmgr_query_display_server(bufmgr, &error))
1979 tbm_drm_helper_unset_tbm_master_fd();
1981 tbm_drm_helper_unset_fd();
1983 close(bufmgr_exynos->fd);
1985 free(bufmgr_exynos);
1988 static tbm_backend_bufmgr_data *
1989 tbm_exynos_init(tbm_bufmgr bufmgr, tbm_error_e *error)
1991 tbm_bufmgr_exynos bufmgr_exynos = NULL;
1992 tbm_backend_bufmgr_func *bufmgr_func = NULL;
1993 tbm_backend_bo_func *bo_func = NULL;
1999 TBM_ERR("bufmgr is null.\n");
2001 *error = TBM_ERROR_INVALID_PARAMETER;
2005 bufmgr_exynos = calloc(1, sizeof(struct _tbm_bufmgr_exynos));
2006 if (!bufmgr_exynos) {
2007 TBM_ERR("fail to alloc bufmgr_exynos!\n");
2009 *error = TBM_ERROR_OUT_OF_MEMORY;
2013 /* check the master_fd which already had opened */
2014 bufmgr_exynos->fd = tbm_drm_helper_get_master_fd();
2015 if (bufmgr_exynos->fd < 0) {
2016 bufmgr_exynos->fd = _tbm_exynos_open_drm();
2017 if (bufmgr_exynos->fd < 0) {
2018 TBM_ERR("fail to open drm!\n");
2020 *error = TBM_ERROR_INVALID_OPERATION;
2024 if (drmIsMaster(bufmgr_exynos->fd)) {
2025 tbm_drm_helper_set_tbm_master_fd(bufmgr_exynos->fd);
2027 bufmgr_exynos->device_name = drmGetDeviceNameFromFd(bufmgr_exynos->fd);
2028 if (!bufmgr_exynos->device_name) {
2029 TBM_ERR("fail to get device name!\n");
2030 tbm_drm_helper_unset_tbm_master_fd();
2032 *error = TBM_ERROR_INVALID_OPERATION;
2033 goto fail_get_device_name;
2035 TBM_INFO("This is Master FD(%d) from open_drm.", bufmgr_exynos->fd);
2037 /* close the fd and get the authenticated fd from the master fd */
2038 close(bufmgr_exynos->fd);
2039 #ifdef USE_RENDER_NODE
2040 bufmgr_exynos->fd = _get_render_node(0);
2042 bufmgr_exynos->fd = -1;
2044 /* get the authenticated drm fd from the master fd */
2045 if (!tbm_drm_helper_get_auth_info(&(bufmgr_exynos->fd), &(bufmgr_exynos->device_name), NULL)) {
2046 TBM_ERR("fail to get auth drm info!\n");
2048 *error = TBM_ERROR_INVALID_OPERATION;
2049 goto fail_get_auth_info;
2051 TBM_INFO("This is Authenticated FD(%d)", bufmgr_exynos->fd);
2054 bufmgr_exynos->device_name = drmGetDeviceNameFromFd(bufmgr_exynos->fd);
2055 if (!bufmgr_exynos->device_name) {
2056 TBM_ERR("fail to get device name!\n");
2057 tbm_drm_helper_unset_tbm_master_fd();
2059 *error = TBM_ERROR_INVALID_OPERATION;
2060 goto fail_get_device_name;
2062 TBM_INFO("This is Master FD from tbm_drm_helper_get_master_fd(%d)", bufmgr_exynos->fd);
2064 tbm_drm_helper_set_fd(bufmgr_exynos->fd);
2066 //Check if the tbm manager supports dma fence or not.
2067 fp = open("/sys/module/dmabuf_sync/parameters/enabled", O_RDONLY);
2070 int length = read(fp, buf, 1);
2072 if (length == 1 && buf[0] == '1')
2073 bufmgr_exynos->use_dma_fence = 1;
2078 /* get the model name from the capi-system-info.
2079 * The alignment_plane and alignment_pitch_rgb is different accoring to the target.
2080 * There will be the stride issue when the right alignment_plane and alignment_pitch_rgb
2081 * is not set to the backend.
2083 if (system_info_get_platform_string("http://tizen.org/system/model_name", &value) != SYSTEM_INFO_ERROR_NONE) {
2084 TBM_ERR("Cannot get the \"http://tizen.org/system/model_name\" key value from system-info.\n");
2085 TBM_ERR("May not set the right value on libtbm-exynos backend.\n");
2087 if (!strncmp(value, "TW1", 4)) {
2088 g_tbm_surface_alignment_plane = 8;
2089 g_tbm_surface_alignment_pitch_rgb = 8;
2090 g_enable_cache_ctrl = 1;
2092 g_tbm_surface_alignment_plane = 64;
2093 g_tbm_surface_alignment_pitch_rgb = 64;
2099 if (!_bufmgr_init_cache_state(bufmgr_exynos)) {
2100 TBM_ERR("fail to init bufmgr cache state\n");
2102 *error = TBM_ERROR_INVALID_OPERATION;
2103 goto fail_init_cache_state;
2106 /*Create Hash Table*/
2107 bufmgr_exynos->hashBos = drmHashCreate();
2109 /* alloc and register bufmgr_funcs */
2110 bufmgr_func = tbm_backend_bufmgr_alloc_bufmgr_func(bufmgr, &err);
2112 TBM_ERR("fail to alloc bufmgr_func! err(%d)\n", err);
2114 *error = TBM_ERROR_OUT_OF_MEMORY;
2115 goto fail_alloc_bufmgr_func;
2118 bufmgr_func->bufmgr_get_capabilities = tbm_exynos_bufmgr_get_capabilities;
2119 //if (tbm_backend_bufmgr_query_display_server(bufmgr, &err) && !_check_render_node())
2120 bufmgr_func->bufmgr_bind_native_display = tbm_exynos_bufmgr_bind_native_display;
2121 bufmgr_func->bufmgr_get_supported_formats = tbm_exynos_bufmgr_get_supported_formats;
2122 bufmgr_func->bufmgr_get_plane_data = tbm_exynos_bufmgr_get_plane_data;
2123 bufmgr_func->bufmgr_alloc_bo = tbm_exynos_bufmgr_alloc_bo;
2124 bufmgr_func->bufmgr_alloc_bo_with_format = NULL;
2125 bufmgr_func->bufmgr_import_fd = tbm_exynos_bufmgr_import_fd;
2126 bufmgr_func->bufmgr_import_key = tbm_exynos_bufmgr_import_key;
2128 err = tbm_backend_bufmgr_register_bufmgr_func(bufmgr, bufmgr_func);
2129 if (err != TBM_ERROR_NONE) {
2130 TBM_ERR("fail to register bufmgr_func! err(%d)\n", err);
2132 *error = TBM_ERROR_INVALID_OPERATION;
2133 goto fail_register_bufmgr_func;
2135 bufmgr_exynos->bufmgr_func = bufmgr_func;
2137 /* alloc and register bo_funcs */
2138 bo_func = tbm_backend_bufmgr_alloc_bo_func(bufmgr, &err);
2140 TBM_ERR("fail to alloc bo_func! err(%d)\n", err);
2142 *error = TBM_ERROR_OUT_OF_MEMORY;
2143 goto fail_alloc_bo_func;
2146 bo_func->bo_free = tbm_exynos_bo_free;
2147 bo_func->bo_get_size = tbm_exynos_bo_get_size;
2148 bo_func->bo_get_memory_types = tbm_exynos_bo_get_memory_type;
2149 bo_func->bo_get_handle = tbm_exynos_bo_get_handle;
2150 bo_func->bo_map = tbm_exynos_bo_map;
2151 bo_func->bo_unmap = tbm_exynos_bo_unmap;
2152 bo_func->bo_lock = tbm_exynos_bo_lock;
2153 bo_func->bo_unlock = tbm_exynos_bo_unlock;
2154 bo_func->bo_export_fd = tbm_exynos_bo_export_fd;
2155 bo_func->bo_export_key = tbm_exynos_bo_export_key;
2157 err = tbm_backend_bufmgr_register_bo_func(bufmgr, bo_func);
2158 if (err != TBM_ERROR_NONE) {
2159 TBM_ERR("fail to register bo_func! err(%d)\n", err);
2161 *error = TBM_ERROR_INVALID_OPERATION;
2162 goto fail_register_bo_func;
2164 bufmgr_exynos->bo_func = bo_func;
2166 TBM_DBG("drm_fd:%d\n", bufmgr_exynos->fd);
2169 *error = TBM_ERROR_NONE;
2171 bufmgr_exynos->bufmgr = bufmgr;
2173 return (tbm_backend_bufmgr_data *)bufmgr_exynos;
2175 fail_register_bo_func:
2176 tbm_backend_bufmgr_free_bo_func(bufmgr, bo_func);
2178 fail_register_bufmgr_func:
2179 tbm_backend_bufmgr_free_bufmgr_func(bufmgr, bufmgr_func);
2180 fail_alloc_bufmgr_func:
2181 _bufmgr_deinit_cache_state(bufmgr_exynos);
2182 if (bufmgr_exynos->hashBos)
2183 drmHashDestroy(bufmgr_exynos->hashBos);
2184 fail_init_cache_state:
2185 if (tbm_drm_helper_get_master_fd() >= 0)
2186 tbm_drm_helper_unset_tbm_master_fd();
2188 tbm_drm_helper_unset_fd();
2189 fail_get_device_name:
2190 if (bufmgr_exynos->fd >= 0)
2191 close(bufmgr_exynos->fd);
2194 free(bufmgr_exynos);
2198 tbm_backend_module tbm_backend_module_data = {
2201 TBM_BACKEND_ABI_VERSION_3_0,