1 /**************************************************************************
5 Copyright 2017 Samsung Electronics co., Ltd. All Rights Reserved.
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
9 Permission is hereby granted, free of charge, to any person obtaining a
10 copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sub license, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial portions
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
22 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
24 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
25 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
26 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
27 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 **************************************************************************/
41 #include <sys/ioctl.h>
42 #include <sys/types.h>
51 #include <tbm_backend.h>
52 #include <tbm_drm_helper.h>
54 #include "tbm_bufmgr_tgl.h"
56 #define TBM_COLOR_FORMAT_COUNT 4
58 #define VC4_DRM_NAME "vc4"
60 #define STRERR_BUFSIZE 128
62 #define SIZE_ALIGN(value, base) (((value) + ((base) - 1)) & ~((base) - 1))
63 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
64 #define MAX(a, b) ((a) > (b) ? (a) : (b))
67 #define TBM_SURFACE_ALIGNMENT_PLANE (8)
68 #define TBM_SURFACE_ALIGNMENT_PITCH_RGB (8)
70 #define TBM_SURFACE_ALIGNMENT_PLANE (16)
71 #define TBM_SURFACE_ALIGNMENT_PITCH_RGB (16)
74 #define TBM_SURFACE_ALIGNMENT_PLANE_NV12 (4096)
75 #define TBM_SURFACE_ALIGNMENT_PITCH_YUV (16)
77 #define SZ_1M 0x00100000
78 #define S5P_FIMV_MAX_FRAME_SIZE (2 * SZ_1M)
79 #define S5P_FIMV_D_ALIGN_PLANE_SIZE 64
80 #define S5P_FIMV_NUM_PIXELS_IN_MB_ROW 16
81 #define S5P_FIMV_NUM_PIXELS_IN_MB_COL 16
82 #define S5P_FIMV_DEC_BUF_ALIGN (8 * 1024)
83 #define S5P_FIMV_NV12MT_HALIGN 128
84 #define S5P_FIMV_NV12MT_VALIGN 64
86 //#define VC4_TILED_FORMAT 1
90 unsigned int fence_supported;
94 #define DMA_BUF_ACCESS_READ 0x1
95 #define DMA_BUF_ACCESS_WRITE 0x2
96 #define DMA_BUF_ACCESS_DMA 0x4
97 #define DMA_BUF_ACCESS_MAX 0x8
99 #define DMA_FENCE_LIST_MAX 5
101 struct dma_buf_fence {
106 #define DMABUF_IOCTL_BASE 'F'
107 #define DMABUF_IOWR(nr, type) _IOWR(DMABUF_IOCTL_BASE, nr, type)
109 #define DMABUF_IOCTL_GET_INFO DMABUF_IOWR(0x00, struct dma_buf_info)
110 #define DMABUF_IOCTL_GET_FENCE DMABUF_IOWR(0x01, struct dma_buf_fence)
111 #define DMABUF_IOCTL_PUT_FENCE DMABUF_IOWR(0x02, struct dma_buf_fence)
114 #define GLOBAL_KEY ((unsigned int)(-1))
116 #define TBM_VC4_CACHE_INV 0x01 /**< cache invalidate */
117 #define TBM_VC4_CACHE_CLN 0x02 /**< cache clean */
118 #define TBM_VC4_CACHE_ALL 0x10 /**< cache all */
119 #define TBM_VC4_CACHE_FLUSH (TBM_VC4_CACHE_INV|TBM_VC4_CACHE_CLN) /**< cache flush */
120 #define TBM_VC4_CACHE_FLUSH_ALL (TBM_VC4_CACHE_FLUSH|TBM_VC4_CACHE_ALL) /**< cache flush all */
124 DEVICE_CA, /* cache aware device */
125 DEVICE_CO /* cache oblivious device */
128 typedef union _tbm_bo_cache_state tbm_bo_cache_state;
130 union _tbm_bo_cache_state {
133 unsigned int cntFlush:16; /*Flush all index for sync */
134 unsigned int isCached:1;
135 unsigned int isDirtied:2;
139 typedef struct _tbm_bufmgr_vc4 *tbm_bufmgr_vc4;
140 typedef struct _tbm_bo_vc4 *tbm_bo_vc4;
142 /* tbm buffor object for vc4 */
146 unsigned int name; /* FLINK ID */
148 unsigned int gem; /* GEM Handle */
150 unsigned int dmabuf; /* fd for dmabuf */
152 void *pBase; /* virtual address */
156 unsigned int flags_tbm; /*not used now*//*currently no values for the flags,but it may be used in future extension*/
158 pthread_mutex_t mutex;
159 struct dma_buf_fence dma_fence[DMA_FENCE_LIST_MAX];
163 tbm_bo_cache_state cache_state;
164 unsigned int map_cnt;
167 tbm_bufmgr_vc4 bufmgr_vc4;
170 /* tbm bufmgr private for vc4 */
171 struct _tbm_bufmgr_vc4 {
183 tbm_backend_bufmgr_func *bufmgr_func;
184 tbm_backend_bo_func *bo_func;
189 char *STR_DEVICE[] = {
205 uint32_t tbm_vc4_color_format_list[TBM_COLOR_FORMAT_COUNT] = {
211 #undef ENABLE_CACHECRTL
212 #ifdef ENABLE_CACHECRTL
213 #ifdef TGL_GET_VERSION
215 _tgl_get_version(int fd)
217 struct tgl_ver_data data;
219 char buf[STRERR_BUFSIZE];
221 err = ioctl(fd, TGL_IOCTL_GET_VERSION, &data);
223 TBM_ERR("error(%s) %s:%d\n",
224 strerror_r(errno, buf, STRERR_BUFSIZE));
228 TBM_DBG("tgl version is (%u, %u).\n", data.major, data.minor);
235 _tgl_init(int fd, unsigned int key)
237 struct tgl_reg_data data;
239 char buf[STRERR_BUFSIZE];
242 data.timeout_ms = 1000;
244 err = ioctl(fd, TGL_IOCTL_REGISTER, &data);
246 TBM_ERR("error(%s) key:%d\n",
247 strerror_r(errno, buf, STRERR_BUFSIZE), key);
255 _tgl_destroy(int fd, unsigned int key)
257 struct tgl_reg_data data;
259 char buf[STRERR_BUFSIZE];
262 err = ioctl(fd, TGL_IOCTL_UNREGISTER, &data);
264 TBM_ERR("error(%s) key:%d\n",
265 strerror_r(errno, buf, STRERR_BUFSIZE), key);
273 _tgl_lock(int fd, unsigned int key, int opt)
275 struct tgl_lock_data data;
276 enum tgl_type_data tgl_type;
278 char buf[STRERR_BUFSIZE];
281 case TBM_OPTION_READ:
282 tgl_type = TGL_TYPE_READ;
284 case TBM_OPTION_WRITE:
285 tgl_type = TGL_TYPE_WRITE;
288 tgl_type = TGL_TYPE_NONE;
293 data.type = tgl_type;
295 err = ioctl(fd, TGL_IOCTL_LOCK, &data);
297 TBM_ERR("error(%s) key:%d opt:%d\n",
298 strerror_r(errno, buf, STRERR_BUFSIZE), key, opt);
306 _tgl_unlock(int fd, unsigned int key)
308 struct tgl_lock_data data;
310 char buf[STRERR_BUFSIZE];
313 data.type = TGL_TYPE_NONE;
315 err = ioctl(fd, TGL_IOCTL_UNLOCK, &data);
317 TBM_ERR("error(%s) key:%d\n",
318 strerror_r(errno, buf, STRERR_BUFSIZE), key);
326 _tgl_set_data(int fd, unsigned int key, unsigned int val)
328 struct tgl_usr_data data;
330 char buf[STRERR_BUFSIZE];
335 err = ioctl(fd, TGL_IOCTL_SET_DATA, &data);
337 TBM_ERR("error(%s) key:%d\n",
338 strerror_r(errno, buf, STRERR_BUFSIZE), key);
345 static inline unsigned int
346 _tgl_get_data(int fd, unsigned int key)
348 struct tgl_usr_data data = { 0, };
350 char buf[STRERR_BUFSIZE];
354 err = ioctl(fd, TGL_IOCTL_GET_DATA, &data);
356 TBM_ERR("error(%s) key:%d\n",
357 strerror_r(errno, buf, STRERR_BUFSIZE), key);
365 _vc4_cache_flush(tbm_bufmgr_vc4 bufmgr_vc4, tbm_bo_vc4 bo_vc4, int flags)
367 TBM_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
369 /* cache flush is managed by kernel side when using dma-fence. */
370 if (bufmgr_vc4->use_dma_fence)
373 struct drm_vc4_gem_cache_op cache_op = {0, };
376 /* if bo_vc4 is null, do cache_flush_all */
379 cache_op.usr_addr = (uint64_t)((uint32_t)bo_vc4->pBase);
380 cache_op.size = bo_vc4->size;
382 flags = TBM_VC4_CACHE_FLUSH_ALL;
384 cache_op.usr_addr = 0;
388 if (flags & TBM_VC4_CACHE_INV) {
389 if (flags & TBM_VC4_CACHE_ALL)
390 cache_op.flags |= VC4_DRM_CACHE_INV_ALL;
392 cache_op.flags |= VC4_DRM_CACHE_INV_RANGE;
395 if (flags & TBM_VC4_CACHE_CLN) {
396 if (flags & TBM_VC4_CACHE_ALL)
397 cache_op.flags |= VC4_DRM_CACHE_CLN_ALL;
399 cache_op.flags |= VC4_DRM_CACHE_CLN_RANGE;
402 if (flags & TBM_VC4_CACHE_ALL)
403 cache_op.flags |= VC4_DRM_ALL_CACHES_CORES;
405 ret = drmCommandWriteRead(bufmgr_vc4->fd, DRM_VC4_GEM_CACHE_OP, &cache_op,
408 TBM_ERR("fail to flush the cache.\n");
417 _bo_init_cache_state(tbm_bufmgr_vc4 bufmgr_vc4, tbm_bo_vc4 bo_vc4, int import)
419 #ifdef ENABLE_CACHECRTL
420 TBM_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
421 TBM_RETURN_VAL_IF_FAIL(bo_vc4 != NULL, 0);
423 if (bufmgr_vc4->use_dma_fence)
426 _tgl_init(bufmgr_vc4->tgl_fd, bo_vc4->name);
428 tbm_bo_cache_state cache_state;
431 cache_state.data.isDirtied = DEVICE_NONE;
432 cache_state.data.isCached = 0;
433 cache_state.data.cntFlush = 0;
435 _tgl_set_data(bufmgr_vc4->tgl_fd, bo_vc4->name, cache_state.val);
443 _bo_set_cache_state(tbm_bufmgr_vc4 bufmgr_vc4, tbm_bo_vc4 bo_vc4, int device, int opt)
445 #ifdef ENABLE_CACHECRTL
446 TBM_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
447 TBM_RETURN_VAL_IF_FAIL(bo_vc4 != NULL, 0);
449 if (bufmgr_vc4->use_dma_fence)
453 unsigned short cntFlush = 0;
455 /* get cache state of a bo_vc4 */
456 bo_vc4->cache_state.val = _tgl_get_data(bufmgr_vc4->tgl_fd,
459 /* get global cache flush count */
460 cntFlush = (unsigned short)_tgl_get_data(bufmgr_vc4->tgl_fd, GLOBAL_KEY);
462 if (device == TBM_DEVICE_CPU) {
463 if (bo_vc4->cache_state.data.isDirtied == DEVICE_CO &&
464 bo_vc4->cache_state.data.isCached)
465 need_flush = TBM_VC4_CACHE_INV;
467 bo_vc4->cache_state.data.isCached = 1;
468 if (opt & TBM_OPTION_WRITE)
469 bo_vc4->cache_state.data.isDirtied = DEVICE_CA;
471 if (bo_vc4->cache_state.data.isDirtied != DEVICE_CA)
472 bo_vc4->cache_state.data.isDirtied = DEVICE_NONE;
475 if (bo_vc4->cache_state.data.isDirtied == DEVICE_CA &&
476 bo_vc4->cache_state.data.isCached &&
477 bo_vc4->cache_state.data.cntFlush == cntFlush)
478 need_flush = TBM_VC4_CACHE_CLN | TBM_VC4_CACHE_ALL;
480 if (opt & TBM_OPTION_WRITE)
481 bo_vc4->cache_state.data.isDirtied = DEVICE_CO;
483 if (bo_vc4->cache_state.data.isDirtied != DEVICE_CO)
484 bo_vc4->cache_state.data.isDirtied = DEVICE_NONE;
489 if (need_flush & TBM_VC4_CACHE_ALL)
490 _tgl_set_data(bufmgr_vc4->tgl_fd, GLOBAL_KEY, (unsigned int)(++cntFlush));
492 /* call cache flush */
493 _vc4_cache_flush(bufmgr_vc4, bo_vc4, need_flush);
495 TBM_DBG(" \tcache(%d,%d)....flush:0x%x, cntFlush(%d)\n",
496 bo_vc4->cache_state.data.isCached,
497 bo_vc4->cache_state.data.isDirtied,
507 _bo_save_cache_state(tbm_bufmgr_vc4 bufmgr_vc4, tbm_bo_vc4 bo_vc4)
509 #ifdef ENABLE_CACHECRTL
510 TBM_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
511 TBM_RETURN_VAL_IF_FAIL(bo_vc4 != NULL, 0);
513 if (bufmgr_vc4->use_dma_fence)
516 unsigned short cntFlush = 0;
518 /* get global cache flush count */
519 cntFlush = (unsigned short)_tgl_get_data(bufmgr_vc4->tgl_fd, GLOBAL_KEY);
521 /* save global cache flush count */
522 bo_vc4->cache_state.data.cntFlush = cntFlush;
523 _tgl_set_data(bufmgr_vc4->tgl_fd, bo_vc4->name,
524 bo_vc4->cache_state.val);
531 _bo_destroy_cache_state(tbm_bufmgr_vc4 bufmgr_vc4, tbm_bo_vc4 bo_vc4)
533 #ifdef ENABLE_CACHECRTL
534 TBM_RETURN_IF_FAIL(bufmgr_vc4 != NULL);
535 TBM_RETURN_IF_FAIL(bo_vc4 != NULL);
537 if (bufmgr_vc4->use_dma_fence)
540 _tgl_destroy(bufmgr_vc4->tgl_fd, bo_vc4->name);
545 _bufmgr_init_cache_state(tbm_bufmgr_vc4 bufmgr_vc4)
547 #ifdef ENABLE_CACHECRTL
548 TBM_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
550 if (bufmgr_vc4->use_dma_fence)
553 /* open tgl fd for saving cache flush data */
554 bufmgr_vc4->tgl_fd = open(tgl_devfile, O_RDWR);
556 if (bufmgr_vc4->tgl_fd < 0) {
557 bufmgr_vc4->tgl_fd = open(tgl_devfile1, O_RDWR);
558 if (bufmgr_vc4->tgl_fd < 0) {
559 TBM_ERR("fail to open global_lock:%s\n",
565 #ifdef TGL_GET_VERSION
566 if (!_tgl_get_version(bufmgr_vc4->tgl_fd)) {
567 TBM_ERR("fail to get tgl_version. tgl init failed.\n");
568 close(bufmgr_sprd->tgl_fd);
573 if (!_tgl_init(bufmgr_vc4->tgl_fd, GLOBAL_KEY)) {
574 TBM_ERR("fail to initialize the tgl\n");
575 close(bufmgr_vc4->tgl_fd);
584 _bufmgr_deinit_cache_state(tbm_bufmgr_vc4 bufmgr_vc4)
586 #ifdef ENABLE_CACHECRTL
587 TBM_RETURN_IF_FAIL(bufmgr_vc4 != NULL);
589 if (bufmgr_vc4->use_dma_fence)
592 if (bufmgr_vc4->tgl_fd >= 0)
593 close(bufmgr_vc4->tgl_fd);
602 fd = drmOpen(VC4_DRM_NAME, NULL);
604 TBM_ERR("fail to open drm.(%s)\n", VC4_DRM_NAME);
608 struct udev *udev = NULL;
609 struct udev_enumerate *e = NULL;
610 struct udev_list_entry *entry = NULL;
611 struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
612 const char *filepath;
616 TBM_DBG("search drm-device by udev\n");
620 TBM_ERR("udev_new() failed.\n");
624 e = udev_enumerate_new(udev);
625 udev_enumerate_add_match_subsystem(e, "drm");
626 udev_enumerate_add_match_sysname(e, "card[0-9]*");
627 udev_enumerate_scan_devices(e);
629 udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
630 device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
631 udev_list_entry_get_name(entry));
632 device_parent = udev_device_get_parent(device);
633 /* Not need unref device_parent. device_parent and device have same refcnt */
635 if (strcmp(udev_device_get_sysname(device_parent), "vc4-drm") == 0) {
637 TBM_DBG("Found render device: '%s' (%s)\n",
638 udev_device_get_syspath(drm_device),
639 udev_device_get_sysname(device_parent));
643 udev_device_unref(device);
646 udev_enumerate_unref(e);
648 /* Get device file path. */
649 filepath = udev_device_get_devnode(drm_device);
651 TBM_ERR("udev_device_get_devnode() failed.\n");
652 udev_device_unref(drm_device);
657 /* Open DRM device file and check validity. */
658 fd = open(filepath, O_RDWR | O_CLOEXEC);
660 TBM_ERR("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
661 udev_device_unref(drm_device);
668 TBM_ERR("fstat() failed %s.\n");
670 udev_device_unref(drm_device);
675 udev_device_unref(drm_device);
683 _check_render_node(void)
685 #ifndef USE_RENDER_NODE
688 struct udev *udev = NULL;
689 struct udev_enumerate *e = NULL;
690 struct udev_list_entry *entry = NULL;
691 struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
695 TBM_ERR("udev_new() failed.\n");
699 e = udev_enumerate_new(udev);
700 udev_enumerate_add_match_subsystem(e, "drm");
701 udev_enumerate_add_match_sysname(e, "renderD[0-9]*");
702 udev_enumerate_scan_devices(e);
704 udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
705 device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
706 udev_list_entry_get_name(entry));
707 device_parent = udev_device_get_parent(device);
708 /* Not need unref device_parent. device_parent and device have same refcnt */
710 if (strcmp(udev_device_get_sysname(device_parent), "vc4-drm") == 0) {
712 TBM_DBG("Found render device: '%s' (%s)\n",
713 udev_device_get_syspath(drm_device),
714 udev_device_get_sysname(device_parent));
718 udev_device_unref(device);
721 udev_enumerate_unref(e);
725 udev_device_unref(drm_device);
729 udev_device_unref(drm_device);
735 _get_render_node(void)
737 struct udev *udev = NULL;
738 struct udev_enumerate *e = NULL;
739 struct udev_list_entry *entry = NULL;
740 struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
741 const char *filepath;
748 TBM_ERR("udev_new() failed.\n");
752 e = udev_enumerate_new(udev);
753 udev_enumerate_add_match_subsystem(e, "drm");
754 udev_enumerate_add_match_sysname(e, "renderD[0-9]*");
755 udev_enumerate_scan_devices(e);
757 udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
758 device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
759 udev_list_entry_get_name(entry));
760 device_parent = udev_device_get_parent(device);
761 /* Not need unref device_parent. device_parent and device have same refcnt */
763 if (strcmp(udev_device_get_sysname(device_parent), "vc4-drm") == 0) {
765 TBM_DBG("Found render device: '%s' (%s)\n",
766 udev_device_get_syspath(drm_device),
767 udev_device_get_sysname(device_parent));
771 udev_device_unref(device);
774 udev_enumerate_unref(e);
776 /* Get device file path. */
777 filepath = udev_device_get_devnode(drm_device);
779 TBM_ERR("udev_device_get_devnode() failed.\n");
780 udev_device_unref(drm_device);
785 /* Open DRM device file and check validity. */
786 fd = open(filepath, O_RDWR | O_CLOEXEC);
788 TBM_ERR("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
789 udev_device_unref(drm_device);
796 TBM_ERR("fstat() failed %s.\n");
797 udev_device_unref(drm_device);
803 udev_device_unref(drm_device);
810 _get_name(int fd, unsigned int gem)
812 struct drm_gem_flink arg = {0,};
815 if (drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &arg)) {
816 TBM_ERR("fail to DRM_IOCTL_GEM_FLINK gem:%d", gem);
820 return (unsigned int)arg.name;
824 _vc4_bo_handle(tbm_bo_vc4 bo_vc4, int device)
826 tbm_bo_handle bo_handle;
828 memset(&bo_handle, 0x0, sizeof(uint64_t));
831 case TBM_DEVICE_DEFAULT:
833 bo_handle.u32 = (uint32_t)bo_vc4->gem;
836 if (!bo_vc4->pBase) {
837 struct drm_vc4_mmap_bo arg = {0, };
840 arg.handle = bo_vc4->gem;
841 if (drmIoctl(bo_vc4->fd, DRM_IOCTL_VC4_MMAP_BO, &arg)) {
842 TBM_ERR("Cannot map_vc4 gem=%d\n", bo_vc4->gem);
843 return (tbm_bo_handle) NULL;
846 map = mmap(NULL, bo_vc4->size, PROT_READ | PROT_WRITE, MAP_SHARED,
847 bo_vc4->fd, arg.offset);
848 if (map == MAP_FAILED) {
849 TBM_ERR("Cannot usrptr gem=%d\n", bo_vc4->gem);
850 return (tbm_bo_handle) NULL;
854 bo_handle.ptr = (void *)bo_vc4->pBase;
858 if (!bo_vc4->dmabuf) {
859 struct drm_prime_handle arg = {0, };
861 arg.handle = bo_vc4->gem;
862 if (drmIoctl(bo_vc4->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
863 TBM_ERR("Cannot dmabuf=%d\n", bo_vc4->gem);
864 return (tbm_bo_handle) NULL;
866 bo_vc4->dmabuf = arg.fd;
869 bo_handle.u32 = (uint32_t)bo_vc4->dmabuf;
872 TBM_ERR("Not supported device:%d\n", device);
873 bo_handle.ptr = (void *) NULL;
881 _new_calc_plane_nv12(int width, int height)
885 mbX = DIV_ROUND_UP(width, S5P_FIMV_NUM_PIXELS_IN_MB_ROW);
886 mbY = DIV_ROUND_UP(height, S5P_FIMV_NUM_PIXELS_IN_MB_COL);
888 if (width * height < S5P_FIMV_MAX_FRAME_SIZE)
889 mbY = (mbY + 1) / 2 * 2;
891 return ((mbX * S5P_FIMV_NUM_PIXELS_IN_MB_COL) * (mbY *
892 S5P_FIMV_NUM_PIXELS_IN_MB_ROW));
896 _calc_yplane_nv12(int width, int height)
900 mbX = SIZE_ALIGN(width + 24, S5P_FIMV_NV12MT_HALIGN);
901 mbY = SIZE_ALIGN(height + 16, S5P_FIMV_NV12MT_VALIGN);
903 return SIZE_ALIGN(mbX * mbY, S5P_FIMV_DEC_BUF_ALIGN);
907 _calc_uvplane_nv12(int width, int height)
911 mbX = SIZE_ALIGN(width + 16, S5P_FIMV_NV12MT_HALIGN);
912 mbY = SIZE_ALIGN(height + 4, S5P_FIMV_NV12MT_VALIGN);
914 return SIZE_ALIGN((mbX * mbY) >> 1, S5P_FIMV_DEC_BUF_ALIGN);
918 _new_calc_yplane_nv12(int width, int height)
920 return SIZE_ALIGN(_new_calc_plane_nv12(width,
921 height) + S5P_FIMV_D_ALIGN_PLANE_SIZE,
922 TBM_SURFACE_ALIGNMENT_PLANE_NV12);
926 _new_calc_uvplane_nv12(int width, int height)
928 return SIZE_ALIGN((_new_calc_plane_nv12(width,
929 height) >> 1) + S5P_FIMV_D_ALIGN_PLANE_SIZE,
930 TBM_SURFACE_ALIGNMENT_PLANE_NV12);
933 static tbm_bufmgr_capability
934 tbm_vc4_bufmgr_get_capabilities(tbm_backend_bufmgr_data *bufmgr_data, tbm_error_e *error)
936 tbm_bufmgr_capability capabilities = TBM_BUFMGR_CAPABILITY_NONE;
938 #ifdef VC4_TILED_FORMAT
939 capabilities = TBM_BUFMGR_CAPABILITY_SHARE_KEY|TBM_BUFMGR_CAPABILITY_SHARE_FD|TBM_BUFMGR_CAPABILITY_TILED_MEMORY;
941 capabilities = TBM_BUFMGR_CAPABILITY_SHARE_KEY|TBM_BUFMGR_CAPABILITY_SHARE_FD;
945 *error = TBM_ERROR_NONE;
951 tbm_vc4_bufmgr_bind_native_display(tbm_backend_bufmgr_data *bufmgr_data, tbm_native_display *native_display)
953 tbm_bufmgr_vc4 bufmgr_vc4 = (tbm_bufmgr_vc4)bufmgr_data;
954 TBM_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, TBM_ERROR_INVALID_PARAMETER);
956 if (!tbm_drm_helper_wl_auth_server_init(native_display, bufmgr_vc4->fd,
957 bufmgr_vc4->device_name, 0)) {
958 TBM_ERR("fail to tbm_drm_helper_wl_server_init\n");
959 return TBM_ERROR_INVALID_OPERATION;
962 bufmgr_vc4->bind_display = native_display;
964 return TBM_ERROR_NONE;
967 tbm_vc4_bufmgr_get_supported_formats(tbm_backend_bufmgr_data *bufmgr_data,
968 uint32_t **formats, uint32_t *num)
970 tbm_bufmgr_vc4 bufmgr_vc4 = (tbm_bufmgr_vc4)bufmgr_data;
971 uint32_t *color_formats;
973 TBM_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, TBM_ERROR_INVALID_PARAMETER);
975 color_formats = (uint32_t *)calloc(1, sizeof(uint32_t) * TBM_COLOR_FORMAT_COUNT);
976 if (color_formats == NULL)
977 return TBM_ERROR_OUT_OF_MEMORY;
979 memcpy(color_formats, tbm_vc4_color_format_list, sizeof(uint32_t)*TBM_COLOR_FORMAT_COUNT);
981 *formats = color_formats;
982 *num = TBM_COLOR_FORMAT_COUNT;
984 TBM_DBG("supported format count = %d\n", *num);
986 return TBM_ERROR_NONE;
990 #ifdef VC4_TILED_FORMAT
991 #include <drm_fourcc.h>
992 static inline uint32_t
993 vc4_utile_width(int cpp)
1008 static inline uint32_t
1009 vc4_utile_height(int cpp)
1024 vc4_size_is_lt(uint32_t width, uint32_t height, int cpp)
1026 return (width <= 4 * vc4_utile_width(cpp) ||
1027 height <= 4 * vc4_utile_height(cpp));
1030 static tbm_backend_bo_data *
1031 tbm_vc4_bufmgr_alloc_bo_with_tiled_format(tbm_backend_bufmgr_data *bufmgr_data, int width, int height,
1032 int cpp, int format, tbm_bo_memory_type flags, int bo_idx, tbm_error_e *err)
1034 tbm_bufmgr_vc4 bufmgr_vc4 = (tbm_bufmgr_vc4)bufmgr_data;
1036 uint32_t utile_w = vc4_utile_width(cpp);
1037 uint32_t utile_h = vc4_utile_height(cpp);
1038 uint32_t level_width, level_height;
1043 level_width = width;
1044 level_height = height;
1046 if (bufmgr_vc4 == NULL) {
1047 TBM_ERR("bufmgr_data is null\n");
1051 if (vc4_size_is_lt(level_width, level_height, cpp)) {
1052 level_width = SIZE_ALIGN(level_width, utile_w);
1053 level_height = SIZE_ALIGN(level_height, utile_h);
1055 level_width = SIZE_ALIGN(level_width,
1057 level_height = SIZE_ALIGN(level_height,
1061 stride = level_width * cpp;
1063 size = level_height * stride;
1064 size = SIZE_ALIGN(size, 4096);
1067 bo_vc4 = calloc(1, sizeof(struct _tbm_bo_vc4));
1069 TBM_ERR("fail to allocate the bo_vc4 private\n");
1072 bo_vc4->bufmgr_vc4 = bufmgr_vc4;
1074 struct drm_vc4_create_bo arg = {0, };
1076 arg.size = (__u32)size;
1077 arg.flags = flags;/*currently no values for the flags,but it may be used in future extension*/
1078 if (drmIoctl(bufmgr_vc4->fd, DRM_IOCTL_VC4_CREATE_BO, &arg)) {
1079 TBM_ERR("Cannot create bo_vc4(flag:%x, size:%d)\n", arg.flags,
1080 (unsigned int)arg.size);
1085 bo_vc4->fd = bufmgr_vc4->fd;
1086 bo_vc4->gem = (unsigned int)arg.handle;
1087 bo_vc4->size = size;
1088 bo_vc4->flags_tbm = flags;
1089 bo_vc4->name = _get_name(bo_vc4->fd, bo_vc4->gem);
1091 if (!_bo_init_cache_state(bufmgr_vc4, bo_vc4, 0)) {
1092 TBM_ERR("fail init cache state(%d)\n", bo_vc4->name);
1097 pthread_mutex_init(&bo_vc4->mutex, NULL);
1099 if (bufmgr_vc4->use_dma_fence && !bo_vc4->dmabuf) {
1100 struct drm_prime_handle arg = {0, };
1102 arg.handle = bo_vc4->gem;
1103 if (drmIoctl(bo_vc4->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
1104 TBM_ERR("Cannot dmabuf=%d\n", bo_vc4->gem);
1108 bo_vc4->dmabuf = arg.fd;
1113 modifier = DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
1114 struct drm_vc4_set_tiling set_tiling = {
1115 .handle = bo_vc4->gem,
1116 .modifier = modifier,
1118 drmIoctl(bo_vc4->fd, DRM_IOCTL_VC4_SET_TILING, &set_tiling);
1121 /* add bo_vc4 to hash */
1122 if (drmHashInsert(bufmgr_vc4->hashBos, bo_vc4->name, (void *)bo_vc4) < 0)
1123 TBM_ERR("Cannot insert bo_vc4 to Hash(%d)\n", bo_vc4->name);
1125 TBM_DBG(" bo_vc4:%p, gem:%d(%d), flags:%d(%d), size:%d\n",
1127 bo_vc4->gem, bo_vc4->name,
1131 return (tbm_backend_bo_data *)bo_vc4;
1136 tbm_vc4_bufmgr_get_plane_data(tbm_backend_bufmgr_data *bufmgr_data,
1137 tbm_format format, int plane_idx, int width,
1138 int height, uint32_t *size, uint32_t *offset,
1139 uint32_t *pitch, int *bo_idx)
1141 tbm_bufmgr_vc4 bufmgr_vc4 = (tbm_bufmgr_vc4)bufmgr_data;
1148 TBM_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, TBM_ERROR_INVALID_PARAMETER);
1152 case TBM_FORMAT_XRGB4444:
1153 case TBM_FORMAT_XBGR4444:
1154 case TBM_FORMAT_RGBX4444:
1155 case TBM_FORMAT_BGRX4444:
1156 case TBM_FORMAT_ARGB4444:
1157 case TBM_FORMAT_ABGR4444:
1158 case TBM_FORMAT_RGBA4444:
1159 case TBM_FORMAT_BGRA4444:
1160 case TBM_FORMAT_XRGB1555:
1161 case TBM_FORMAT_XBGR1555:
1162 case TBM_FORMAT_RGBX5551:
1163 case TBM_FORMAT_BGRX5551:
1164 case TBM_FORMAT_ARGB1555:
1165 case TBM_FORMAT_ABGR1555:
1166 case TBM_FORMAT_RGBA5551:
1167 case TBM_FORMAT_BGRA5551:
1168 case TBM_FORMAT_RGB565:
1171 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
1172 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1176 case TBM_FORMAT_RGB888:
1177 case TBM_FORMAT_BGR888:
1180 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
1181 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1185 case TBM_FORMAT_XRGB8888:
1186 case TBM_FORMAT_XBGR8888:
1187 case TBM_FORMAT_RGBX8888:
1188 case TBM_FORMAT_BGRX8888:
1189 case TBM_FORMAT_ARGB8888:
1190 case TBM_FORMAT_ABGR8888:
1191 case TBM_FORMAT_RGBA8888:
1192 case TBM_FORMAT_BGRA8888:
1195 #ifdef VC4_TILED_FORMAT
1196 if (vc4_size_is_lt(width, height, 4)) {
1197 width = SIZE_ALIGN(width, vc4_utile_width(4));
1198 height = SIZE_ALIGN(height, vc4_utile_height(4));
1201 width = SIZE_ALIGN(width, 32);
1202 uint32_t utile_h = vc4_utile_height(bpp);
1203 height = SIZE_ALIGN(height, 8*utile_h);
1206 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
1207 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1212 case TBM_FORMAT_YUYV:
1213 case TBM_FORMAT_YVYU:
1214 case TBM_FORMAT_UYVY:
1215 case TBM_FORMAT_VYUY:
1216 case TBM_FORMAT_AYUV:
1219 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1220 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1226 * index 0 = Y plane, [7:0] Y
1227 * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
1229 * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
1231 case TBM_FORMAT_NV12:
1233 if (plane_idx == 0) {
1235 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1236 _size = MAX(_calc_yplane_nv12(width, height), _new_calc_yplane_nv12(width,
1239 } else if (plane_idx == 1) {
1240 _offset = MAX(_calc_yplane_nv12(width, height), _new_calc_yplane_nv12(width,
1242 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1243 _size = MAX(_calc_uvplane_nv12(width, height), _new_calc_uvplane_nv12(width,
1248 case TBM_FORMAT_NV21:
1250 if (plane_idx == 0) {
1252 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1253 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1255 } else if (plane_idx == 1) {
1256 _offset = width * height;
1257 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1258 _size = SIZE_ALIGN(_pitch * (height / 2), TBM_SURFACE_ALIGNMENT_PLANE);
1263 case TBM_FORMAT_NV16:
1264 case TBM_FORMAT_NV61:
1266 /*if(plane_idx == 0)*/
1269 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1270 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1275 /*else if( plane_idx ==1 )*/
1278 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1279 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1286 * index 0: Y plane, [7:0] Y
1287 * index 1: Cb plane, [7:0] Cb
1288 * index 2: Cr plane, [7:0] Cr
1290 * index 1: Cr plane, [7:0] Cr
1291 * index 2: Cb plane, [7:0] Cb
1295 * NATIVE_BUFFER_FORMAT_YV12
1296 * NATIVE_BUFFER_FORMAT_I420
1298 case TBM_FORMAT_YUV410:
1299 case TBM_FORMAT_YVU410:
1301 /*if(plane_idx == 0)*/
1304 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1305 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1310 /*else if(plane_idx == 1)*/
1313 _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4);
1314 _size = SIZE_ALIGN(_pitch * (height / 4), TBM_SURFACE_ALIGNMENT_PLANE);
1319 /*else if (plane_idx == 2)*/
1322 _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4);
1323 _size = SIZE_ALIGN(_pitch * (height / 4), TBM_SURFACE_ALIGNMENT_PLANE);
1327 case TBM_FORMAT_YUV411:
1328 case TBM_FORMAT_YVU411:
1329 case TBM_FORMAT_YUV420:
1330 case TBM_FORMAT_YVU420:
1332 /*if(plane_idx == 0)*/
1335 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1336 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1341 /*else if(plane_idx == 1)*/
1344 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1345 _size = SIZE_ALIGN(_pitch * (height / 2), TBM_SURFACE_ALIGNMENT_PLANE);
1350 /*else if (plane_idx == 2)*/
1353 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1354 _size = SIZE_ALIGN(_pitch * (height / 2), TBM_SURFACE_ALIGNMENT_PLANE);
1358 case TBM_FORMAT_YUV422:
1359 case TBM_FORMAT_YVU422:
1361 /*if(plane_idx == 0)*/
1364 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1365 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1370 /*else if(plane_idx == 1)*/
1373 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1374 _size = SIZE_ALIGN(_pitch * (height), TBM_SURFACE_ALIGNMENT_PLANE);
1379 /*else if (plane_idx == 2)*/
1382 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1383 _size = SIZE_ALIGN(_pitch * (height), TBM_SURFACE_ALIGNMENT_PLANE);
1387 case TBM_FORMAT_YUV444:
1388 case TBM_FORMAT_YVU444:
1390 /*if(plane_idx == 0)*/
1393 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1394 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1399 /*else if(plane_idx == 1)*/
1402 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1403 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1408 /*else if (plane_idx == 2)*/
1411 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1412 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1426 return TBM_ERROR_NONE;
1429 static tbm_backend_bo_data *
1430 tbm_vc4_bufmgr_alloc_bo(tbm_backend_bufmgr_data *bufmgr_data, unsigned int size,
1431 tbm_bo_memory_type flags, tbm_error_e *error)
1433 tbm_bufmgr_vc4 bufmgr_vc4 = (tbm_bufmgr_vc4)bufmgr_data;
1436 if (bufmgr_vc4 == NULL) {
1437 TBM_ERR("bufmgr_data is null\n");
1439 *error = TBM_ERROR_INVALID_PARAMETER;
1443 bo_vc4 = calloc(1, sizeof(struct _tbm_bo_vc4));
1445 TBM_ERR("fail to allocate the bo_vc4 private\n");
1447 *error = TBM_ERROR_OUT_OF_MEMORY;
1450 bo_vc4->bufmgr_vc4 = bufmgr_vc4;
1452 struct drm_vc4_create_bo arg = {0, };
1454 arg.size = (__u32)size;
1455 arg.flags = flags;/*currently no values for the flags,but it may be used in future extension*/
1456 if (drmIoctl(bufmgr_vc4->fd, DRM_IOCTL_VC4_CREATE_BO, &arg)) {
1457 TBM_ERR("Cannot create bo_vc4(flag:%x, size:%d)\n", arg.flags,
1458 (unsigned int)arg.size);
1461 *error = TBM_ERROR_INVALID_OPERATION;
1465 bo_vc4->fd = bufmgr_vc4->fd;
1466 bo_vc4->gem = (unsigned int)arg.handle;
1467 bo_vc4->size = size;
1468 bo_vc4->flags_tbm = flags;
1469 bo_vc4->name = _get_name(bo_vc4->fd, bo_vc4->gem);
1471 if (!_bo_init_cache_state(bufmgr_vc4, bo_vc4, 0)) {
1472 TBM_ERR("fail init cache state(%d)\n", bo_vc4->name);
1475 *error = TBM_ERROR_INVALID_OPERATION;
1479 pthread_mutex_init(&bo_vc4->mutex, NULL);
1481 if (bufmgr_vc4->use_dma_fence && !bo_vc4->dmabuf) {
1482 struct drm_prime_handle arg = {0, };
1484 arg.handle = bo_vc4->gem;
1485 if (drmIoctl(bo_vc4->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
1486 TBM_ERR("Cannot dmabuf=%d\n", bo_vc4->gem);
1489 *error = TBM_ERROR_INVALID_OPERATION;
1492 bo_vc4->dmabuf = arg.fd;
1495 /* add bo_vc4 to hash */
1496 if (drmHashInsert(bufmgr_vc4->hashBos, bo_vc4->name, (void *)bo_vc4) < 0)
1497 TBM_ERR("Cannot insert bo_vc4 to Hash(%d)\n", bo_vc4->name);
1499 TBM_DBG(" bo_vc4:%p, gem:%d(%d), flags:%d(%d), size:%d\n",
1501 bo_vc4->gem, bo_vc4->name,
1506 *error = TBM_ERROR_NONE;
1508 return (tbm_backend_bo_data *)bo_vc4;
1511 static tbm_backend_bo_data *
1512 tbm_vc4_bufmgr_import_fd(tbm_backend_bufmgr_data *bufmgr_data, tbm_fd key, tbm_error_e *error)
1514 tbm_bufmgr_vc4 bufmgr_vc4 = (tbm_bufmgr_vc4)bufmgr_data;
1516 unsigned int gem = 0;
1519 char buf[STRERR_BUFSIZE];
1521 if (bufmgr_vc4 == NULL) {
1522 TBM_ERR("bufmgr_data is null\n");
1524 *error = TBM_ERROR_INVALID_PARAMETER;
1528 /*getting handle from fd*/
1529 struct drm_prime_handle arg = {0, };
1533 if (drmIoctl(bufmgr_vc4->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &arg)) {
1534 TBM_ERR("Cannot get gem handle from fd:%d (%s)\n",
1535 arg.fd, strerror_r(errno, buf, STRERR_BUFSIZE));
1537 *error = TBM_ERROR_INVALID_OPERATION;
1542 name = _get_name(bufmgr_vc4->fd, gem);
1544 TBM_ERR("Cannot get name from gem:%d, fd:%d (%s)\n",
1545 gem, key, strerror_r(errno, buf, STRERR_BUFSIZE));
1547 *error = TBM_ERROR_INVALID_OPERATION;
1551 ret = drmHashLookup(bufmgr_vc4->hashBos, name, (void **)&bo_vc4);
1553 if (gem == bo_vc4->gem) {
1555 *error = TBM_ERROR_NONE;
1560 /* Determine size of bo_vc4. The fd-to-handle ioctl really should
1561 * return the size, but it doesn't. If we have kernel 3.12 or
1562 * later, we can lseek on the prime fd to get the size. Older
1563 * kernels will just fail, in which case we fall back to the
1564 * provided (estimated or guess size).
1566 unsigned int real_size = -1;
1567 struct drm_gem_open open_arg = {0, };
1569 real_size = lseek(key, 0, SEEK_END);
1571 open_arg.name = name;
1572 if (drmIoctl(bufmgr_vc4->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
1573 TBM_ERR("Cannot get gem info from gem:%d, fd:%d (%s)\n",
1574 gem, key, strerror_r(errno, buf, STRERR_BUFSIZE));
1576 *error = TBM_ERROR_INVALID_OPERATION;
1580 /* Free gem handle to avoid a memory leak*/
1581 struct drm_gem_close close_arg = {0, };
1582 memset(&close_arg, 0, sizeof(close_arg));
1583 close_arg.handle = open_arg.handle;
1584 if (drmIoctl(bufmgr_vc4->fd, DRM_IOCTL_GEM_CLOSE, &close_arg)) {
1585 TBM_ERR("Cannot close gem_handle.\n",
1586 strerror_r(errno, buf, STRERR_BUFSIZE));
1588 *error = TBM_ERROR_INVALID_OPERATION;
1592 if (real_size == -1)
1593 real_size = open_arg.size;
1595 bo_vc4 = calloc(1, sizeof(struct _tbm_bo_vc4));
1597 TBM_ERR("bo_vc4:%p fail to allocate the bo_vc4\n", bo_vc4);
1599 *error = TBM_ERROR_OUT_OF_MEMORY;
1602 bo_vc4->bufmgr_vc4 = bufmgr_vc4;
1604 bo_vc4->fd = bufmgr_vc4->fd;
1606 bo_vc4->size = real_size;
1607 bo_vc4->name = name;
1608 bo_vc4->flags_tbm = 0;
1610 #ifdef VC4_TILED_FORMAT
1611 struct drm_vc4_get_tiling get_tiling = {
1612 .handle = bo_vc4->gem,
1614 drmIoctl(bo_vc4->fd, DRM_IOCTL_VC4_GET_TILING, &get_tiling);
1616 if (get_tiling.modifier == DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED)
1617 bo_vc4->flags_tbm |= TBM_BO_TILED;
1620 if (!_bo_init_cache_state(bufmgr_vc4, bo_vc4, 1)) {
1621 TBM_ERR("fail init cache state(%d)\n", bo_vc4->name);
1624 *error = TBM_ERROR_INVALID_OPERATION;
1628 /* add bo_vc4 to hash */
1629 if (drmHashInsert(bufmgr_vc4->hashBos, bo_vc4->name, (void *)bo_vc4) < 0)
1630 TBM_ERR("bo_vc4:%p Cannot insert bo_vc4 to Hash(%d) from gem:%d, fd:%d\n",
1631 bo_vc4, bo_vc4->name, gem, key);
1633 TBM_DBG(" bo_vc4:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d, size:%d\n",
1635 bo_vc4->gem, bo_vc4->name,
1642 *error = TBM_ERROR_NONE;
1644 return (tbm_backend_bo_data *)bo_vc4;
1647 static tbm_backend_bo_data *
1648 tbm_vc4_bufmgr_import_key(tbm_backend_bufmgr_data *bufmgr_data, tbm_key key, tbm_error_e *error)
1650 tbm_bufmgr_vc4 bufmgr_vc4 = (tbm_bufmgr_vc4)bufmgr_data;
1654 if (bufmgr_vc4 == NULL) {
1655 TBM_ERR("bufmgr_data is null\n");
1657 *error = TBM_ERROR_INVALID_PARAMETER;
1661 ret = drmHashLookup(bufmgr_vc4->hashBos, key, (void **)&bo_vc4);
1664 *error = TBM_ERROR_NONE;
1665 return (tbm_backend_bo_data *)bo_vc4;
1668 struct drm_gem_open arg = {0, };
1671 if (drmIoctl(bufmgr_vc4->fd, DRM_IOCTL_GEM_OPEN, &arg)) {
1672 TBM_ERR("Cannot open gem name=%d\n", key);
1674 *error = TBM_ERROR_INVALID_OPERATION;
1678 bo_vc4 = calloc(1, sizeof(struct _tbm_bo_vc4));
1680 TBM_ERR("fail to allocate the bo_vc4 private\n");
1682 *error = TBM_ERROR_OUT_OF_MEMORY;
1685 bo_vc4->bufmgr_vc4 = bufmgr_vc4;
1687 bo_vc4->fd = bufmgr_vc4->fd;
1688 bo_vc4->gem = arg.handle;
1689 bo_vc4->size = arg.size;
1691 bo_vc4->flags_tbm = 0;
1693 #ifdef VC4_TILED_FORMAT
1694 struct drm_vc4_get_tiling get_tiling = {
1695 .handle = bo_vc4->gem,
1697 drmIoctl(bo_vc4->fd, DRM_IOCTL_VC4_GET_TILING, &get_tiling);
1699 if (get_tiling.modifier == DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED)
1700 bo_vc4->flags_tbm |= TBM_BO_TILED;
1703 if (!_bo_init_cache_state(bufmgr_vc4, bo_vc4, 1)) {
1704 TBM_ERR("fail init cache state(%d)\n", bo_vc4->name);
1707 *error = TBM_ERROR_INVALID_OPERATION;
1711 if (!bo_vc4->dmabuf) {
1712 struct drm_prime_handle arg = {0, };
1714 arg.handle = bo_vc4->gem;
1715 if (drmIoctl(bo_vc4->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
1716 TBM_ERR("fail to DRM_IOCTL_PRIME_HANDLE_TO_FD gem=%d\n", bo_vc4->gem);
1718 *error = TBM_ERROR_INVALID_OPERATION;
1722 bo_vc4->dmabuf = arg.fd;
1725 /* add bo_vc4 to hash */
1726 if (drmHashInsert(bufmgr_vc4->hashBos, bo_vc4->name, (void *)bo_vc4) < 0)
1727 TBM_ERR("Cannot insert bo_vc4 to Hash(%d)\n", bo_vc4->name);
1729 TBM_DBG(" bo_vc4:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d\n",
1731 bo_vc4->gem, bo_vc4->name,
1737 *error = TBM_ERROR_NONE;
1739 return (tbm_backend_bo_data *)bo_vc4;
1743 tbm_vc4_bo_free(tbm_backend_bo_data *bo_data)
1745 tbm_bo_vc4 bo_vc4 = (tbm_bo_vc4)bo_data;
1747 tbm_bufmgr_vc4 bufmgr_vc4;
1748 char buf[STRERR_BUFSIZE];
1754 bufmgr_vc4 = bo_vc4->bufmgr_vc4;
1758 TBM_DBG(" bo_vc4:%p, gem:%d(%d), fd:%d, size:%d\n",
1760 bo_vc4->gem, bo_vc4->name,
1764 if (bo_vc4->pBase) {
1765 if (munmap(bo_vc4->pBase, bo_vc4->size) == -1) {
1766 TBM_ERR("bo_vc4:%p fail to munmap(%s)\n",
1767 bo_vc4, strerror_r(errno, buf, STRERR_BUFSIZE));
1772 if (bo_vc4->dmabuf) {
1773 close(bo_vc4->dmabuf);
1777 /* delete bo_vc4 from hash */
1778 ret = drmHashLookup(bufmgr_vc4->hashBos, bo_vc4->name, (void **)&temp);
1780 drmHashDelete(bufmgr_vc4->hashBos, bo_vc4->name);
1782 TBM_ERR("Cannot find bo_vc4 to Hash(%d), ret=%d\n", bo_vc4->name, ret);
1785 TBM_ERR("hashBos probably has several BOs with same name!!!\n");
1787 _bo_destroy_cache_state(bufmgr_vc4, bo_vc4);
1789 /* Free gem handle */
1790 struct drm_gem_close arg = {0, };
1792 memset(&arg, 0, sizeof(arg));
1793 arg.handle = bo_vc4->gem;
1794 if (drmIoctl(bo_vc4->fd, DRM_IOCTL_GEM_CLOSE, &arg))
1795 TBM_ERR("bo_vc4:%p fail to gem close.(%s)\n",
1796 bo_vc4, strerror_r(errno, buf, STRERR_BUFSIZE));
1802 tbm_vc4_bo_get_size(tbm_backend_bo_data *bo_data, tbm_error_e *error)
1804 tbm_bo_vc4 bo_vc4 = (tbm_bo_vc4)bo_data;
1808 *error = TBM_ERROR_INVALID_PARAMETER;
1813 *error = TBM_ERROR_NONE;
1815 return bo_vc4->size;
1818 static tbm_bo_memory_type
1819 tbm_vc4_bo_get_memory_type(tbm_backend_bo_data *bo_data, tbm_error_e *error)
1821 tbm_bo_vc4 bo_vc4 = (tbm_bo_vc4)bo_data;
1825 *error = TBM_ERROR_INVALID_PARAMETER;
1826 return TBM_BO_DEFAULT;
1830 *error = TBM_ERROR_NONE;
1832 return bo_vc4->flags_tbm;
1835 static tbm_bo_handle
1836 tbm_vc4_bo_get_handle(tbm_backend_bo_data *bo_data, tbm_bo_device_type device, tbm_error_e *error)
1838 tbm_bo_vc4 bo_vc4 = (tbm_bo_vc4)bo_data;
1839 tbm_bo_handle bo_handle;
1843 *error = TBM_ERROR_INVALID_PARAMETER;
1844 return (tbm_bo_handle) NULL;
1848 TBM_ERR("Cannot map gem=%d\n", bo_vc4->gem);
1850 *error = TBM_ERROR_INVALID_PARAMETER;
1851 return (tbm_bo_handle) NULL;
1854 TBM_DBG("bo_vc4:%p, gem:%d(%d), fd:%d, flags:%d, size:%d, %s\n",
1856 bo_vc4->gem, bo_vc4->name,
1860 STR_DEVICE[device]);
1862 /*Get mapped bo_handle*/
1863 bo_handle = _vc4_bo_handle(bo_vc4, device);
1864 if (bo_handle.ptr == NULL) {
1865 TBM_ERR("Cannot get handle: gem:%d, device:%d\n",
1866 bo_vc4->gem, device);
1868 *error = TBM_ERROR_INVALID_OPERATION;
1869 return (tbm_bo_handle) NULL;
1873 *error = TBM_ERROR_NONE;
1878 static tbm_bo_handle
1879 tbm_vc4_bo_map(tbm_backend_bo_data *bo_data, tbm_bo_device_type device,
1880 tbm_bo_access_option opt, tbm_error_e *error)
1882 tbm_bo_vc4 bo_vc4 = (tbm_bo_vc4)bo_data;
1883 tbm_bo_handle bo_handle;
1884 tbm_bufmgr_vc4 bufmgr_vc4;
1888 *error = TBM_ERROR_INVALID_PARAMETER;
1889 return (tbm_bo_handle) NULL;
1892 bufmgr_vc4 = bo_vc4->bufmgr_vc4;
1895 *error = TBM_ERROR_INVALID_PARAMETER;
1896 return (tbm_bo_handle) NULL;
1900 TBM_ERR("Cannot map gem=%d\n", bo_vc4->gem);
1902 *error = TBM_ERROR_INVALID_PARAMETER;
1903 return (tbm_bo_handle) NULL;
1906 TBM_DBG(" bo_vc4:%p, gem:%d(%d), fd:%d, %s, %s\n",
1908 bo_vc4->gem, bo_vc4->name,
1913 /*Get mapped bo_handle*/
1914 bo_handle = _vc4_bo_handle(bo_vc4, device);
1915 if (bo_handle.ptr == NULL) {
1916 TBM_ERR("Cannot get handle: gem:%d, device:%d, opt:%d\n",
1917 bo_vc4->gem, device, opt);
1919 *error = TBM_ERROR_INVALID_OPERATION;
1920 return (tbm_bo_handle) NULL;
1923 if (bo_vc4->map_cnt == 0)
1924 _bo_set_cache_state(bufmgr_vc4, bo_vc4, device, opt);
1926 bo_vc4->last_map_device = device;
1931 *error = TBM_ERROR_NONE;
1937 tbm_vc4_bo_unmap(tbm_backend_bo_data *bo_data)
1939 tbm_bo_vc4 bo_vc4 = (tbm_bo_vc4)bo_data;
1940 tbm_bufmgr_vc4 bufmgr_vc4;
1943 return TBM_ERROR_INVALID_PARAMETER;
1945 bufmgr_vc4 = bo_vc4->bufmgr_vc4;
1947 return TBM_ERROR_INVALID_PARAMETER;
1950 return TBM_ERROR_INVALID_PARAMETER;
1954 if (bo_vc4->map_cnt == 0)
1955 _bo_save_cache_state(bufmgr_vc4, bo_vc4);
1957 #ifdef ENABLE_CACHECRTL
1958 if (bo_vc4->last_map_device == TBM_DEVICE_CPU)
1959 _vc4_cache_flush(bufmgr_vc4, bo_vc4, TBM_VC4_CACHE_FLUSH_ALL);
1962 bo_vc4->last_map_device = -1;
1964 TBM_DBG(" bo_vc4:%p, gem:%d(%d), fd:%d\n",
1966 bo_vc4->gem, bo_vc4->name,
1969 return TBM_ERROR_NONE;
1973 tbm_vc4_bo_lock(tbm_backend_bo_data *bo_data, tbm_bo_device_type device,
1974 tbm_bo_access_option opt)
1976 #ifndef ALWAYS_BACKEND_CTRL
1977 tbm_bo_vc4 bo_vc4 = (tbm_bo_vc4)bo_data;
1978 tbm_bufmgr_vc4 bufmgr_vc4;
1979 struct dma_buf_fence fence;
1980 struct flock filelock;
1982 char buf[STRERR_BUFSIZE];
1985 return TBM_ERROR_INVALID_PARAMETER;
1987 bufmgr_vc4 = bo_vc4->bufmgr_vc4;
1989 return TBM_ERROR_INVALID_PARAMETER;
1991 if (device != TBM_DEVICE_3D && device != TBM_DEVICE_CPU) {
1992 TBM_DBG("Not support device type,\n");
1993 return TBM_ERROR_INVALID_OPERATION;
1996 memset(&fence, 0, sizeof(struct dma_buf_fence));
1998 /* Check if the given type is valid or not. */
1999 if (opt & TBM_OPTION_WRITE) {
2000 if (device == TBM_DEVICE_3D)
2001 fence.type = DMA_BUF_ACCESS_WRITE | DMA_BUF_ACCESS_DMA;
2002 } else if (opt & TBM_OPTION_READ) {
2003 if (device == TBM_DEVICE_3D)
2004 fence.type = DMA_BUF_ACCESS_READ | DMA_BUF_ACCESS_DMA;
2006 TBM_ERR("Invalid argument\n");
2007 return TBM_ERROR_INVALID_PARAMETER;
2010 /* Check if the tbm manager supports dma fence or not. */
2011 if (!bufmgr_vc4->use_dma_fence) {
2012 TBM_ERR("Not support DMA FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
2013 return TBM_ERROR_INVALID_OPERATION;
2017 if (device == TBM_DEVICE_3D) {
2018 ret = ioctl(bo_vc4->dmabuf, DMABUF_IOCTL_GET_FENCE, &fence);
2020 TBM_ERR("Cannot set GET FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
2021 return TBM_ERROR_INVALID_OPERATION;
2024 if (opt & TBM_OPTION_WRITE)
2025 filelock.l_type = F_WRLCK;
2027 filelock.l_type = F_RDLCK;
2029 filelock.l_whence = SEEK_CUR;
2030 filelock.l_start = 0;
2033 if (-1 == fcntl(bo_vc4->dmabuf, F_SETLKW, &filelock))
2034 return TBM_ERROR_INVALID_OPERATION;
2037 pthread_mutex_lock(&bo_vc4->mutex);
2039 if (device == TBM_DEVICE_3D) {
2042 for (i = 0; i < DMA_FENCE_LIST_MAX; i++) {
2043 if (bo_vc4->dma_fence[i].ctx == 0) {
2044 bo_vc4->dma_fence[i].type = fence.type;
2045 bo_vc4->dma_fence[i].ctx = fence.ctx;
2050 if (i == DMA_FENCE_LIST_MAX) {
2051 /*TODO: if dma_fence list is full, it needs realloc. I will fix this. by minseok3.kim*/
2052 TBM_ERR("fence list is full\n");
2056 pthread_mutex_unlock(&bo_vc4->mutex);
2058 TBM_DBG("DMABUF_IOCTL_GET_FENCE! bo_vc4:%p, gem:%d(%d), fd:%ds\n",
2060 bo_vc4->gem, bo_vc4->name,
2062 #endif /* ALWAYS_BACKEND_CTRL */
2064 return TBM_ERROR_NONE;
2068 tbm_vc4_bo_unlock(tbm_backend_bo_data *bo_data)
2070 #ifndef ALWAYS_BACKEND_CTRL
2071 tbm_bo_vc4 bo_vc4 = (tbm_bo_vc4)bo_data;
2072 struct dma_buf_fence fence;
2073 struct flock filelock;
2074 unsigned int dma_type = 0;
2076 char buf[STRERR_BUFSIZE];
2078 bufmgr_vc4 = bo_vc4->bufmgr_vc4;
2080 return TBM_ERROR_INVALID_PARAMETER;
2082 if (bo_vc4->dma_fence[0].type & DMA_BUF_ACCESS_DMA)
2085 if (!bo_vc4->dma_fence[0].ctx && dma_type) {
2086 TBM_DBG("FENCE not support or ignored,\n");
2087 return TBM_ERROR_INVALID_OPERATION;
2090 if (!bo_vc4->dma_fence[0].ctx && dma_type) {
2091 TBM_DBG("device type is not 3D/CPU,\n");
2092 return TBM_ERROR_INVALID_OPERATION;
2095 pthread_mutex_lock(&bo_vc4->mutex);
2098 fence.type = bo_vc4->dma_fence[0].type;
2099 fence.ctx = bo_vc4->dma_fence[0].ctx;
2102 for (i = 1; i < DMA_FENCE_LIST_MAX; i++) {
2103 bo_vc4->dma_fence[i - 1].type = bo_vc4->dma_fence[i].type;
2104 bo_vc4->dma_fence[i - 1].ctx = bo_vc4->dma_fence[i].ctx;
2106 bo_vc4->dma_fence[DMA_FENCE_LIST_MAX - 1].type = 0;
2107 bo_vc4->dma_fence[DMA_FENCE_LIST_MAX - 1].ctx = 0;
2110 pthread_mutex_unlock(&bo_vc4->mutex);
2113 ret = ioctl(bo_vc4->dmabuf, DMABUF_IOCTL_PUT_FENCE, &fence);
2115 TBM_ERR("Can not set PUT FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
2116 return TBM_ERROR_INVALID_OPERATION;
2119 filelock.l_type = F_UNLCK;
2120 filelock.l_whence = SEEK_CUR;
2121 filelock.l_start = 0;
2124 if (-1 == fcntl(bo_vc4->dmabuf, F_SETLKW, &filelock))
2125 return TBM_ERROR_INVALID_OPERATION;
2128 TBM_DBG("DMABUF_IOCTL_PUT_FENCE! bo_vc4:%p, gem:%d(%d), fd:%ds\n",
2130 bo_vc4->gem, bo_vc4->name,
2132 #endif /* ALWAYS_BACKEND_CTRL */
2134 return TBM_ERROR_NONE;
2138 tbm_vc4_bo_export_fd(tbm_backend_bo_data *bo_data, tbm_error_e *error)
2140 tbm_bo_vc4 bo_vc4 = (tbm_bo_vc4)bo_data;
2142 char buf[STRERR_BUFSIZE];
2146 *error = TBM_ERROR_INVALID_PARAMETER;
2150 struct drm_prime_handle arg = {0, };
2152 arg.handle = bo_vc4->gem;
2153 ret = drmIoctl(bo_vc4->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg);
2155 TBM_ERR("bo_vc4:%p Cannot dmabuf=%d (%s)\n",
2156 bo_vc4, bo_vc4->gem, strerror_r(errno, buf, STRERR_BUFSIZE));
2158 *error = TBM_ERROR_INVALID_OPERATION;
2159 return (tbm_fd) ret;
2162 TBM_DBG(" bo_vc4:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d, size:%d\n",
2164 bo_vc4->gem, bo_vc4->name,
2171 *error = TBM_ERROR_NONE;
2173 return (tbm_fd)arg.fd;
2177 tbm_vc4_bo_export_key(tbm_backend_bo_data *bo_data, tbm_error_e *error)
2179 tbm_bo_vc4 bo_vc4 = (tbm_bo_vc4)bo_data;
2183 *error = TBM_ERROR_INVALID_PARAMETER;
2187 if (!bo_vc4->name) {
2188 bo_vc4->name = _get_name(bo_vc4->fd, bo_vc4->gem);
2189 if (!bo_vc4->name) {
2190 TBM_ERR("error Cannot get name\n");
2192 *error = TBM_ERROR_INVALID_PARAMETER;
2197 TBM_DBG(" bo_vc4:%p, gem:%d(%d), fd:%d, flags:%d, size:%d\n",
2199 bo_vc4->gem, bo_vc4->name,
2205 *error = TBM_ERROR_NONE;
2207 return (tbm_key)bo_vc4->name;
2211 tbm_vc4_deinit(tbm_backend_bufmgr_data *bufmgr_data)
2213 tbm_bufmgr_vc4 bufmgr_vc4 = (tbm_bufmgr_vc4)bufmgr_data;
2219 TBM_RETURN_IF_FAIL(bufmgr_vc4 != NULL);
2221 bufmgr = bufmgr_vc4->bufmgr;
2223 tbm_backend_bufmgr_free_bufmgr_func(bufmgr, bufmgr_vc4->bufmgr_func);
2224 tbm_backend_bufmgr_free_bo_func(bufmgr, bufmgr_vc4->bo_func);
2226 if (bufmgr_vc4->hashBos) {
2227 while (drmHashFirst(bufmgr_vc4->hashBos, &key, &value) > 0) {
2229 drmHashDelete(bufmgr_vc4->hashBos, key);
2232 drmHashDestroy(bufmgr_vc4->hashBos);
2233 bufmgr_vc4->hashBos = NULL;
2236 _bufmgr_deinit_cache_state(bufmgr_vc4);
2238 if (bufmgr_vc4->bind_display)
2239 tbm_drm_helper_wl_auth_server_deinit();
2241 if (bufmgr_vc4->device_name)
2242 free(bufmgr_vc4->device_name);
2244 if (tbm_backend_bufmgr_query_display_server(bufmgr, &error))
2245 tbm_drm_helper_unset_tbm_master_fd();
2247 tbm_drm_helper_unset_fd();
2249 close(bufmgr_vc4->fd);
2254 static tbm_backend_bufmgr_data *
2255 tbm_vc4_init(tbm_bufmgr bufmgr, tbm_error_e *error)
2257 tbm_bufmgr_vc4 bufmgr_vc4 = NULL;
2258 tbm_backend_bufmgr_func *bufmgr_func = NULL;
2259 tbm_backend_bo_func *bo_func = NULL;
2264 TBM_ERR("bufmgr is null.\n");
2266 *error = TBM_ERROR_INVALID_PARAMETER;
2270 bufmgr_vc4 = calloc(1, sizeof(struct _tbm_bufmgr_vc4));
2272 TBM_ERR("fail to alloc bufmgr_vc4!\n");
2274 *error = TBM_ERROR_OUT_OF_MEMORY;
2278 if (tbm_backend_bufmgr_query_display_server(bufmgr, &err)) {
2279 bufmgr_vc4->fd = tbm_drm_helper_get_master_fd();
2280 if (bufmgr_vc4->fd < 0) {
2281 bufmgr_vc4->fd = _tbm_vc4_open_drm();
2282 if (bufmgr_vc4->fd < 0) {
2283 TBM_ERR("fail to open drm!\n");
2285 *error = TBM_ERROR_INVALID_OPERATION;
2290 tbm_drm_helper_set_tbm_master_fd(bufmgr_vc4->fd);
2292 bufmgr_vc4->device_name = drmGetDeviceNameFromFd(bufmgr_vc4->fd);
2293 if (!bufmgr_vc4->device_name) {
2294 TBM_ERR("fail to get device name!\n");
2295 tbm_drm_helper_unset_tbm_master_fd();
2297 *error = TBM_ERROR_INVALID_OPERATION;
2298 goto fail_get_device_name;
2300 tbm_drm_helper_set_fd(bufmgr_vc4->fd);
2302 if (_check_render_node()) {
2303 bufmgr_vc4->fd = _get_render_node();//TODO
2304 if (bufmgr_vc4->fd < 0) {
2305 TBM_ERR("fail to get render node\n");
2307 *error = TBM_ERROR_INVALID_OPERATION;
2308 goto fail_get_render_node;
2310 TBM_DBG("Use render node:%d\n", bufmgr_vc4->fd);
2312 if (!tbm_drm_helper_get_auth_info(&(bufmgr_vc4->fd), &(bufmgr_vc4->device_name), NULL)) {
2313 TBM_ERR("fail to get auth drm info!\n");
2315 *error = TBM_ERROR_INVALID_OPERATION;
2316 goto fail_get_auth_info;
2318 tbm_drm_helper_set_fd(bufmgr_vc4->fd);
2322 //Check if the tbm manager supports dma fence or not.
2323 fp = open("/sys/module/dmabuf_sync/parameters/enabled", O_RDONLY);
2326 int length = read(fp, buf, 1);
2328 if (length == 1 && buf[0] == '1')
2329 bufmgr_vc4->use_dma_fence = 1;
2334 if (!_bufmgr_init_cache_state(bufmgr_vc4)) {
2335 TBM_ERR("fail to init bufmgr cache state\n");
2336 goto fail_init_cache_state;
2339 /*Create Hash Table*/
2340 bufmgr_vc4->hashBos = drmHashCreate();
2342 /* alloc and register bufmgr_funcs */
2343 bufmgr_func = tbm_backend_bufmgr_alloc_bufmgr_func(bufmgr, &err);
2345 TBM_ERR("fail to alloc bufmgr_func! err(%d)\n", err);
2347 *error = TBM_ERROR_OUT_OF_MEMORY;
2348 goto fail_alloc_bufmgr_func;
2351 bufmgr_func->bufmgr_get_capabilities = tbm_vc4_bufmgr_get_capabilities;
2352 //if (tbm_backend_bufmgr_query_display_server(bufmgr, &err) && !_check_render_node())
2353 bufmgr_func->bufmgr_bind_native_display = tbm_vc4_bufmgr_bind_native_display;
2354 bufmgr_func->bufmgr_get_supported_formats = tbm_vc4_bufmgr_get_supported_formats;
2355 bufmgr_func->bufmgr_get_plane_data = tbm_vc4_bufmgr_get_plane_data;
2356 bufmgr_func->bufmgr_alloc_bo = tbm_vc4_bufmgr_alloc_bo;
2357 bufmgr_func->bufmgr_alloc_bo_with_format = NULL;
2358 bufmgr_func->bufmgr_import_fd = tbm_vc4_bufmgr_import_fd;
2359 bufmgr_func->bufmgr_import_key = tbm_vc4_bufmgr_import_key;
2360 #ifdef VC4_TILED_FORMAT
2361 bufmgr_func->bufmgr_alloc_bo_with_tiled_format = tbm_vc4_bufmgr_alloc_bo_with_tiled_format;
2363 bufmgr_func->bufmgr_alloc_bo_with_tiled_format = NULL;
2365 err = tbm_backend_bufmgr_register_bufmgr_func(bufmgr, bufmgr_func);
2366 if (err != TBM_ERROR_NONE) {
2367 TBM_ERR("fail to register bufmgr_func! err(%d)\n", err);
2369 *error = TBM_ERROR_INVALID_OPERATION;
2370 goto fail_register_bufmgr_func;
2372 bufmgr_vc4->bufmgr_func = bufmgr_func;
2374 /* alloc and register bo_funcs */
2375 bo_func = tbm_backend_bufmgr_alloc_bo_func(bufmgr, &err);
2377 TBM_ERR("fail to alloc bo_func! err(%d)\n", err);
2379 *error = TBM_ERROR_OUT_OF_MEMORY;
2380 goto fail_alloc_bo_func;
2383 bo_func->bo_free = tbm_vc4_bo_free;
2384 bo_func->bo_get_size = tbm_vc4_bo_get_size;
2385 bo_func->bo_get_memory_types = tbm_vc4_bo_get_memory_type;
2386 bo_func->bo_get_handle = tbm_vc4_bo_get_handle;
2387 bo_func->bo_map = tbm_vc4_bo_map;
2388 bo_func->bo_unmap = tbm_vc4_bo_unmap;
2389 bo_func->bo_lock = tbm_vc4_bo_lock;
2390 bo_func->bo_unlock = tbm_vc4_bo_unlock;
2391 bo_func->bo_export_fd = tbm_vc4_bo_export_fd;
2392 bo_func->bo_export_key = tbm_vc4_bo_export_key;
2394 err = tbm_backend_bufmgr_register_bo_func(bufmgr, bo_func);
2395 if (err != TBM_ERROR_NONE) {
2396 TBM_ERR("fail to register bo_func! err(%d)\n", err);
2398 *error = TBM_ERROR_INVALID_OPERATION;
2399 goto fail_register_bo_func;
2401 bufmgr_vc4->bo_func = bo_func;
2403 TBM_DBG("drm_fd:%d\n", bufmgr_vc4->fd);
2406 *error = TBM_ERROR_NONE;
2408 bufmgr_vc4->bufmgr = bufmgr;
2410 return (tbm_backend_bufmgr_data *)bufmgr_vc4;
2412 fail_register_bo_func:
2413 tbm_backend_bufmgr_free_bo_func(bufmgr, bo_func);
2415 fail_register_bufmgr_func:
2416 tbm_backend_bufmgr_free_bufmgr_func(bufmgr, bufmgr_func);
2417 fail_alloc_bufmgr_func:
2418 _bufmgr_deinit_cache_state(bufmgr_vc4);
2419 if (bufmgr_vc4->hashBos)
2420 drmHashDestroy(bufmgr_vc4->hashBos);
2421 fail_init_cache_state:
2422 if (tbm_backend_bufmgr_query_display_server(bufmgr, &err))
2423 tbm_drm_helper_unset_tbm_master_fd();
2425 tbm_drm_helper_unset_fd();
2426 fail_get_device_name:
2427 close(bufmgr_vc4->fd);
2429 fail_get_render_node:
2435 tbm_backend_module tbm_backend_module_data = {
2438 TBM_BACKEND_ABI_VERSION_3_0,