1 /**************************************************************************
5 Copyright 2017 Samsung Electronics co., Ltd. All Rights Reserved.
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
9 Permission is hereby granted, free of charge, to any person obtaining a
10 copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sub license, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial portions
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
22 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
24 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
25 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
26 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
27 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 **************************************************************************/
39 #include <sys/ioctl.h>
40 #include <sys/types.h>
47 #include <tbm_bufmgr.h>
48 #include <tbm_bufmgr_backend.h>
51 #include <tbm_surface.h>
52 #include <tbm_surface_internal.h>
53 #include <tbm_drm_helper.h>
57 #include "tbm_bufmgr_tgl.h"
61 #define TBM_COLOR_FORMAT_COUNT 4
63 #define VC4_DRM_NAME "vc42837"
66 #define LOG_TAG "TBM_BACKEND"
74 static int initialized = 0;
75 static char app_name[128];
80 /* get the application name */
81 f = fopen("/proc/self/cmdline", "r");
86 memset(app_name, 0x00, sizeof(app_name));
88 if (fgets(app_name, 100, f) == NULL) {
95 slash = strrchr(app_name, '/');
97 memmove(app_name, slash + 1, strlen(slash));
104 #define TBM_VC4_ERROR(fmt, args...) LOGE("\033[31m" "[%s] " fmt "\033[0m", target_name(), ##args)
105 #define TBM_VC4_DEBUG(fmt, args...) {if (bDebug&01) LOGD("[%s] " fmt, target_name(), ##args); }
107 #define TBM_VC4_ERROR(...)
108 #define TBM_VC4_DEBUG(...)
111 #define SIZE_ALIGN(value, base) (((value) + ((base) - 1)) & ~((base) - 1))
112 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
113 #define MAX(a, b) ((a) > (b) ? (a) : (b))
116 #define TBM_SURFACE_ALIGNMENT_PLANE (8)
117 #define TBM_SURFACE_ALIGNMENT_PITCH_RGB (8)
119 #define TBM_SURFACE_ALIGNMENT_PLANE (16)
120 #define TBM_SURFACE_ALIGNMENT_PITCH_RGB (16)
123 #define TBM_SURFACE_ALIGNMENT_PLANE_NV12 (4096)
124 #define TBM_SURFACE_ALIGNMENT_PITCH_YUV (16)
126 #define SZ_1M 0x00100000
127 #define S5P_FIMV_MAX_FRAME_SIZE (2 * SZ_1M)
128 #define S5P_FIMV_D_ALIGN_PLANE_SIZE 64
129 #define S5P_FIMV_NUM_PIXELS_IN_MB_ROW 16
130 #define S5P_FIMV_NUM_PIXELS_IN_MB_COL 16
131 #define S5P_FIMV_DEC_BUF_ALIGN (8 * 1024)
132 #define S5P_FIMV_NV12MT_HALIGN 128
133 #define S5P_FIMV_NV12MT_VALIGN 64
135 /* check condition */
136 #define VC4_RETURN_IF_FAIL(cond) {\
138 TBM_VC4_ERROR("[%s] : '%s' failed.\n", __func__, #cond);\
143 #define VC4_RETURN_VAL_IF_FAIL(cond, val) {\
145 TBM_VC4_ERROR("[%s] : '%s' failed.\n", __func__, #cond);\
150 struct dma_buf_info {
152 unsigned int fence_supported;
153 unsigned int padding;
156 #define DMA_BUF_ACCESS_READ 0x1
157 #define DMA_BUF_ACCESS_WRITE 0x2
158 #define DMA_BUF_ACCESS_DMA 0x4
159 #define DMA_BUF_ACCESS_MAX 0x8
161 #define DMA_FENCE_LIST_MAX 5
163 struct dma_buf_fence {
168 #define DMABUF_IOCTL_BASE 'F'
169 #define DMABUF_IOWR(nr, type) _IOWR(DMABUF_IOCTL_BASE, nr, type)
171 #define DMABUF_IOCTL_GET_INFO DMABUF_IOWR(0x00, struct dma_buf_info)
172 #define DMABUF_IOCTL_GET_FENCE DMABUF_IOWR(0x01, struct dma_buf_fence)
173 #define DMABUF_IOCTL_PUT_FENCE DMABUF_IOWR(0x02, struct dma_buf_fence)
176 #define GLOBAL_KEY ((unsigned int)(-1))
178 #define TBM_VC4_CACHE_INV 0x01 /**< cache invalidate */
179 #define TBM_VC4_CACHE_CLN 0x02 /**< cache clean */
180 #define TBM_VC4_CACHE_ALL 0x10 /**< cache all */
181 #define TBM_VC4_CACHE_FLUSH (TBM_VC4_CACHE_INV|TBM_VC4_CACHE_CLN) /**< cache flush */
182 #define TBM_VC4_CACHE_FLUSH_ALL (TBM_VC4_CACHE_FLUSH|TBM_VC4_CACHE_ALL) /**< cache flush all */
186 DEVICE_CA, /* cache aware device */
187 DEVICE_CO /* cache oblivious device */
190 typedef union _tbm_bo_cache_state tbm_bo_cache_state;
192 union _tbm_bo_cache_state {
195 unsigned int cntFlush:16; /*Flush all index for sync */
196 unsigned int isCached:1;
197 unsigned int isDirtied:2;
201 typedef struct _tbm_bufmgr_vc4 *tbm_bufmgr_vc4;
202 typedef struct _tbm_bo_vc4 *tbm_bo_vc4;
204 typedef struct _vc4_private {
206 struct _tbm_bo_vc4 *bo_priv;
209 /* tbm buffor object for vc4 */
213 unsigned int name; /* FLINK ID */
215 unsigned int gem; /* GEM Handle */
217 unsigned int dmabuf; /* fd for dmabuf */
219 void *pBase; /* virtual address */
223 unsigned int flags_tbm; /*not used now*//*currently no values for the flags,but it may be used in future extension*/
227 pthread_mutex_t mutex;
228 struct dma_buf_fence dma_fence[DMA_FENCE_LIST_MAX];
232 tbm_bo_cache_state cache_state;
233 unsigned int map_cnt;
237 /* tbm bufmgr private for vc4 */
238 struct _tbm_bufmgr_vc4 {
251 char *STR_DEVICE[] = {
267 uint32_t tbm_vc4_color_format_list[TBM_COLOR_FORMAT_COUNT] = {
273 #undef ENABLE_CACHECRTL
274 #ifdef ENABLE_CACHECRTL
275 #ifdef TGL_GET_VERSION
277 _tgl_get_version(int fd)
279 struct tgl_ver_data data;
282 err = ioctl(fd, TGL_IOCTL_GET_VERSION, &data);
284 TBM_VC4_ERROR("error(%s) %s:%d\n", strerror(errno));
288 TBM_VC4_DEBUG("tgl version is (%u, %u).\n", data.major, data.minor);
295 _tgl_init(int fd, unsigned int key)
297 struct tgl_reg_data data;
301 data.timeout_ms = 1000;
303 err = ioctl(fd, TGL_IOCTL_REGISTER, &data);
305 TBM_VC4_ERROR("error(%s) key:%d\n", strerror(errno), key);
313 _tgl_destroy(int fd, unsigned int key)
315 struct tgl_reg_data data;
319 err = ioctl(fd, TGL_IOCTL_UNREGISTER, &data);
321 TBM_VC4_ERROR("error(%s) key:%d\n", strerror(errno), key);
329 _tgl_lock(int fd, unsigned int key, int opt)
331 struct tgl_lock_data data;
332 enum tgl_type_data tgl_type;
336 case TBM_OPTION_READ:
337 tgl_type = TGL_TYPE_READ;
339 case TBM_OPTION_WRITE:
340 tgl_type = TGL_TYPE_WRITE;
343 tgl_type = TGL_TYPE_NONE;
348 data.type = tgl_type;
350 err = ioctl(fd, TGL_IOCTL_LOCK, &data);
352 TBM_VC4_ERROR("error(%s) key:%d opt:%d\n",
353 strerror(errno), key, opt);
361 _tgl_unlock(int fd, unsigned int key)
363 struct tgl_lock_data data;
367 data.type = TGL_TYPE_NONE;
369 err = ioctl(fd, TGL_IOCTL_UNLOCK, &data);
371 TBM_VC4_ERROR("error(%s) key:%d\n",
372 strerror(errno), key);
380 _tgl_set_data(int fd, unsigned int key, unsigned int val)
382 struct tgl_usr_data data;
388 err = ioctl(fd, TGL_IOCTL_SET_DATA, &data);
390 TBM_VC4_ERROR("error(%s) key:%d\n",
391 strerror(errno), key);
398 static inline unsigned int
399 _tgl_get_data(int fd, unsigned int key)
401 struct tgl_usr_data data = { 0, };
406 err = ioctl(fd, TGL_IOCTL_GET_DATA, &data);
408 TBM_VC4_ERROR("error(%s) key:%d\n",
409 strerror(errno), key);
417 _vc4_cache_flush(tbm_bufmgr_vc4 bufmgr_vc4, tbm_bo_vc4 bo_vc4, int flags)
419 VC4_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
421 /* cache flush is managed by kernel side when using dma-fence. */
422 if (bufmgr_vc4->use_dma_fence)
425 struct drm_vc4_gem_cache_op cache_op = {0, };
428 /* if bo_vc4 is null, do cache_flush_all */
431 cache_op.usr_addr = (uint64_t)((uint32_t)bo_vc4->pBase);
432 cache_op.size = bo_vc4->size;
434 flags = TBM_VC4_CACHE_FLUSH_ALL;
436 cache_op.usr_addr = 0;
440 if (flags & TBM_VC4_CACHE_INV) {
441 if (flags & TBM_VC4_CACHE_ALL)
442 cache_op.flags |= VC4_DRM_CACHE_INV_ALL;
444 cache_op.flags |= VC4_DRM_CACHE_INV_RANGE;
447 if (flags & TBM_VC4_CACHE_CLN) {
448 if (flags & TBM_VC4_CACHE_ALL)
449 cache_op.flags |= VC4_DRM_CACHE_CLN_ALL;
451 cache_op.flags |= VC4_DRM_CACHE_CLN_RANGE;
454 if (flags & TBM_VC4_CACHE_ALL)
455 cache_op.flags |= VC4_DRM_ALL_CACHES_CORES;
457 ret = drmCommandWriteRead(bufmgr_vc4->fd, DRM_VC4_GEM_CACHE_OP, &cache_op,
460 TBM_VC4_ERROR("fail to flush the cache.\n");
469 _bo_init_cache_state(tbm_bufmgr_vc4 bufmgr_vc4, tbm_bo_vc4 bo_vc4, int import)
471 #ifdef ENABLE_CACHECRTL
472 VC4_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
473 VC4_RETURN_VAL_IF_FAIL(bo_vc4 != NULL, 0);
475 if (bufmgr_vc4->use_dma_fence)
478 _tgl_init(bufmgr_vc4->tgl_fd, bo_vc4->name);
480 tbm_bo_cache_state cache_state;
483 cache_state.data.isDirtied = DEVICE_NONE;
484 cache_state.data.isCached = 0;
485 cache_state.data.cntFlush = 0;
487 _tgl_set_data(bufmgr_vc4->tgl_fd, bo_vc4->name, cache_state.val);
495 _bo_set_cache_state(tbm_bufmgr_vc4 bufmgr_vc4, tbm_bo_vc4 bo_vc4, int device, int opt)
497 #ifdef ENABLE_CACHECRTL
498 VC4_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
499 VC4_RETURN_VAL_IF_FAIL(bo_vc4 != NULL, 0);
501 if (bufmgr_vc4->use_dma_fence)
505 unsigned short cntFlush = 0;
507 /* get cache state of a bo */
508 bo_vc4->cache_state.val = _tgl_get_data(bufmgr_vc4->tgl_fd,
511 /* get global cache flush count */
512 cntFlush = (unsigned short)_tgl_get_data(bufmgr_vc4->tgl_fd, GLOBAL_KEY);
514 if (device == TBM_DEVICE_CPU) {
515 if (bo_vc4->cache_state.data.isDirtied == DEVICE_CO &&
516 bo_vc4->cache_state.data.isCached)
517 need_flush = TBM_VC4_CACHE_INV;
519 bo_vc4->cache_state.data.isCached = 1;
520 if (opt & TBM_OPTION_WRITE)
521 bo_vc4->cache_state.data.isDirtied = DEVICE_CA;
523 if (bo_vc4->cache_state.data.isDirtied != DEVICE_CA)
524 bo_vc4->cache_state.data.isDirtied = DEVICE_NONE;
527 if (bo_vc4->cache_state.data.isDirtied == DEVICE_CA &&
528 bo_vc4->cache_state.data.isCached &&
529 bo_vc4->cache_state.data.cntFlush == cntFlush)
530 need_flush = TBM_VC4_CACHE_CLN | TBM_VC4_CACHE_ALL;
532 if (opt & TBM_OPTION_WRITE)
533 bo_vc4->cache_state.data.isDirtied = DEVICE_CO;
535 if (bo_vc4->cache_state.data.isDirtied != DEVICE_CO)
536 bo_vc4->cache_state.data.isDirtied = DEVICE_NONE;
541 if (need_flush & TBM_VC4_CACHE_ALL)
542 _tgl_set_data(bufmgr_vc4->tgl_fd, GLOBAL_KEY, (unsigned int)(++cntFlush));
544 /* call cache flush */
545 _vc4_cache_flush(bufmgr_vc4, bo_vc4, need_flush);
547 TBM_VC4_DEBUG(" \tcache(%d,%d)....flush:0x%x, cntFlush(%d)\n",
548 bo_vc4->cache_state.data.isCached,
549 bo_vc4->cache_state.data.isDirtied,
559 _bo_save_cache_state(tbm_bufmgr_vc4 bufmgr_vc4, tbm_bo_vc4 bo_vc4)
561 #ifdef ENABLE_CACHECRTL
562 VC4_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
563 VC4_RETURN_VAL_IF_FAIL(bo_vc4 != NULL, 0);
565 if (bufmgr_vc4->use_dma_fence)
568 unsigned short cntFlush = 0;
570 /* get global cache flush count */
571 cntFlush = (unsigned short)_tgl_get_data(bufmgr_vc4->tgl_fd, GLOBAL_KEY);
573 /* save global cache flush count */
574 bo_vc4->cache_state.data.cntFlush = cntFlush;
575 _tgl_set_data(bufmgr_vc4->tgl_fd, bo_vc4->name,
576 bo_vc4->cache_state.val);
583 _bo_destroy_cache_state(tbm_bufmgr_vc4 bufmgr_vc4, tbm_bo_vc4 bo_vc4)
585 #ifdef ENABLE_CACHECRTL
586 VC4_RETURN_IF_FAIL(bufmgr_vc4 != NULL);
587 VC4_RETURN_IF_FAIL(bo_vc4 != NULL);
589 if (bufmgr_vc4->use_dma_fence)
592 _tgl_destroy(bufmgr_vc4->tgl_fd, bo_vc4->name);
597 _bufmgr_init_cache_state(tbm_bufmgr_vc4 bufmgr_vc4)
599 #ifdef ENABLE_CACHECRTL
600 VC4_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
602 if (bufmgr_vc4->use_dma_fence)
605 /* open tgl fd for saving cache flush data */
606 bufmgr_vc4->tgl_fd = open(tgl_devfile, O_RDWR);
608 if (bufmgr_vc4->tgl_fd < 0) {
609 bufmgr_vc4->tgl_fd = open(tgl_devfile1, O_RDWR);
610 if (bufmgr_vc4->tgl_fd < 0) {
611 TBM_VC4_ERROR("fail to open global_lock:%s\n",
617 #ifdef TGL_GET_VERSION
618 if (!_tgl_get_version(bufmgr_vc4->tgl_fd)) {
619 TBM_VC4_ERROR("fail to get tgl_version. tgl init failed.\n");
620 close(bufmgr_sprd->tgl_fd);
625 if (!_tgl_init(bufmgr_vc4->tgl_fd, GLOBAL_KEY)) {
626 TBM_VC4_ERROR("fail to initialize the tgl\n");
627 close(bufmgr_vc4->tgl_fd);
636 _bufmgr_deinit_cache_state(tbm_bufmgr_vc4 bufmgr_vc4)
638 #ifdef ENABLE_CACHECRTL
639 VC4_RETURN_IF_FAIL(bufmgr_vc4 != NULL);
641 if (bufmgr_vc4->use_dma_fence)
644 if (bufmgr_vc4->tgl_fd >= 0)
645 close(bufmgr_vc4->tgl_fd);
654 fd = drmOpen(VC4_DRM_NAME, NULL);
656 TBM_VC4_ERROR("fail to open drm.(%s)\n", VC4_DRM_NAME);
660 struct udev *udev = NULL;
661 struct udev_enumerate *e = NULL;
662 struct udev_list_entry *entry = NULL;
663 struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
664 const char *filepath;
668 TBM_VC4_DEBUG("search drm-device by udev\n");
672 TBM_VC4_ERROR("udev_new() failed.\n");
676 e = udev_enumerate_new(udev);
677 udev_enumerate_add_match_subsystem(e, "drm");
678 udev_enumerate_add_match_sysname(e, "card[0-9]*");
679 udev_enumerate_scan_devices(e);
681 udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
682 device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
683 udev_list_entry_get_name(entry));
684 device_parent = udev_device_get_parent(device);
685 /* Not need unref device_parent. device_parent and device have same refcnt */
687 if (strcmp(udev_device_get_sysname(device_parent), "vc4-drm") == 0) {
689 TBM_VC4_DEBUG("[%s] Found render device: '%s' (%s)\n",
691 udev_device_get_syspath(drm_device),
692 udev_device_get_sysname(device_parent));
696 udev_device_unref(device);
699 udev_enumerate_unref(e);
701 /* Get device file path. */
702 filepath = udev_device_get_devnode(drm_device);
704 TBM_VC4_ERROR("udev_device_get_devnode() failed.\n");
705 udev_device_unref(drm_device);
710 /* Open DRM device file and check validity. */
711 fd = open(filepath, O_RDWR | O_CLOEXEC);
713 TBM_VC4_ERROR("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
714 udev_device_unref(drm_device);
721 TBM_VC4_ERROR("fstat() failed %s.\n");
723 udev_device_unref(drm_device);
728 udev_device_unref(drm_device);
736 _check_render_node(void) //TODO
738 struct udev *udev = NULL;
739 struct udev_enumerate *e = NULL;
740 struct udev_list_entry *entry = NULL;
741 struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
743 #ifndef USE_RENDER_NODE
749 TBM_VC4_ERROR("udev_new() failed.\n");
753 e = udev_enumerate_new(udev);
754 udev_enumerate_add_match_subsystem(e, "drm");
755 udev_enumerate_add_match_sysname(e, "renderD[0-9]*");
756 udev_enumerate_scan_devices(e);
758 udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
759 device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
760 udev_list_entry_get_name(entry));
761 device_parent = udev_device_get_parent(device);
762 /* Not need unref device_parent. device_parent and device have same refcnt */
764 if (strcmp(udev_device_get_sysname(device_parent), "vc4-drm") == 0) {
766 TBM_VC4_DEBUG("Found render device: '%s' (%s)\n",
767 udev_device_get_syspath(drm_device),
768 udev_device_get_sysname(device_parent));
772 udev_device_unref(device);
775 udev_enumerate_unref(e);
779 udev_device_unref(drm_device);
783 udev_device_unref(drm_device);
788 _get_render_node(void)//TODO
790 struct udev *udev = NULL;
791 struct udev_enumerate *e = NULL;
792 struct udev_list_entry *entry = NULL;
793 struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
794 const char *filepath;
801 TBM_VC4_ERROR("udev_new() failed.\n");
805 e = udev_enumerate_new(udev);
806 udev_enumerate_add_match_subsystem(e, "drm");
807 udev_enumerate_add_match_sysname(e, "renderD[0-9]*");
808 udev_enumerate_scan_devices(e);
810 udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
811 device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
812 udev_list_entry_get_name(entry));
813 device_parent = udev_device_get_parent(device);
814 /* Not need unref device_parent. device_parent and device have same refcnt */
816 if (strcmp(udev_device_get_sysname(device_parent), "vc4-drm") == 0) {
818 TBM_VC4_DEBUG("Found render device: '%s' (%s)\n",
819 udev_device_get_syspath(drm_device),
820 udev_device_get_sysname(device_parent));
824 udev_device_unref(device);
827 udev_enumerate_unref(e);
829 /* Get device file path. */
830 filepath = udev_device_get_devnode(drm_device);
832 TBM_VC4_ERROR("udev_device_get_devnode() failed.\n");
833 udev_device_unref(drm_device);
838 /* Open DRM device file and check validity. */
839 fd = open(filepath, O_RDWR | O_CLOEXEC);
841 TBM_VC4_ERROR("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
842 udev_device_unref(drm_device);
849 TBM_VC4_ERROR("fstat() failed %s.\n");
850 udev_device_unref(drm_device);
856 udev_device_unref(drm_device);
863 _get_name(int fd, unsigned int gem)
865 struct drm_gem_flink arg = {0,};
868 if (drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &arg)) {
869 TBM_VC4_ERROR("fail to DRM_IOCTL_GEM_FLINK gem:%d", gem);
873 return (unsigned int)arg.name;
877 _vc4_bo_handle(tbm_bo_vc4 bo_vc4, int device)
879 tbm_bo_handle bo_handle;
881 memset(&bo_handle, 0x0, sizeof(uint64_t));
884 case TBM_DEVICE_DEFAULT:
886 bo_handle.u32 = (uint32_t)bo_vc4->gem;
889 if (!bo_vc4->pBase) {
891 struct drm_vc4_mmap_bo arg = {0, };
892 arg.handle = bo_vc4->gem;
893 if (drmIoctl(bo_vc4->fd, DRM_IOCTL_VC4_MMAP_BO, &arg)){
894 TBM_VC4_ERROR("Cannot map_dumb gem=%d\n", bo_vc4->gem);
895 return (tbm_bo_handle) NULL;
898 map = mmap(NULL, bo_vc4->size, PROT_READ | PROT_WRITE, MAP_SHARED,
899 bo_vc4->fd, arg.offset);
900 if (map == MAP_FAILED) {
901 TBM_VC4_ERROR("Cannot usrptr gem=%d\n", bo_vc4->gem);
902 return (tbm_bo_handle) NULL;
906 bo_handle.ptr = (void *)bo_vc4->pBase;
910 if (bo_vc4->dmabuf) {
911 bo_handle.u32 = (uint32_t)bo_vc4->dmabuf;
915 if (!bo_vc4->dmabuf) {
916 struct drm_prime_handle arg = {0, };
918 arg.handle = bo_vc4->gem;
919 if (drmIoctl(bo_vc4->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
920 TBM_VC4_ERROR("Cannot dmabuf=%d\n", bo_vc4->gem);
921 return (tbm_bo_handle) NULL;
923 bo_vc4->dmabuf = arg.fd;
926 bo_handle.u32 = (uint32_t)bo_vc4->dmabuf;
930 if (!bo_vc4->dmabuf) {
931 struct drm_prime_handle arg = {0, };
933 arg.handle = bo_vc4->gem;
934 if (drmIoctl(bo_vc4->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
935 TBM_VC4_ERROR("Cannot dmabuf=%d\n", bo_vc4->gem);
936 return (tbm_bo_handle) NULL;
938 bo_vc4->dmabuf = arg.fd;
941 bo_handle.u32 = (uint32_t)bo_vc4->dmabuf;
944 TBM_VC4_ERROR("Not supported device:%d\n", device);
945 bo_handle.ptr = (void *) NULL;
953 tbm_vc4_bo_size(tbm_bo bo)
955 VC4_RETURN_VAL_IF_FAIL(bo != NULL, 0);
959 bo_vc4 = (tbm_bo_vc4)tbm_backend_get_bo_priv(bo);
960 VC4_RETURN_VAL_IF_FAIL(bo_vc4 != NULL, 0);
966 tbm_vc4_bo_alloc(tbm_bo bo, int size, int flags)
968 VC4_RETURN_VAL_IF_FAIL(bo != NULL, 0);
971 tbm_bufmgr_vc4 bufmgr_vc4;
973 bufmgr_vc4 = (tbm_bufmgr_vc4)tbm_backend_get_bufmgr_priv(bo);
974 VC4_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
976 bo_vc4 = calloc(1, sizeof(struct _tbm_bo_vc4));
978 TBM_VC4_ERROR("fail to allocate the bo private\n");
982 struct drm_vc4_create_bo arg = {0, };
983 arg.flags = flags;/*currently no values for the flags,but it may be used in future extension*/
984 arg.size = (__u32)size;
985 if (drmIoctl(bufmgr_vc4->fd, DRM_IOCTL_VC4_CREATE_BO, &arg)){
986 TBM_VC4_ERROR("Cannot create bo(flag:%x, size:%d)\n", arg.flags,
987 (unsigned int)arg.size);
992 bo_vc4->fd = bufmgr_vc4->fd;
993 bo_vc4->gem = (unsigned int)arg.handle;
995 bo_vc4->flags_tbm = flags;
996 bo_vc4->name = _get_name(bo_vc4->fd, bo_vc4->gem);
998 if (!_bo_init_cache_state(bufmgr_vc4, bo_vc4, 0)) {
999 TBM_VC4_ERROR("fail init cache state(%d)\n", bo_vc4->name);
1004 pthread_mutex_init(&bo_vc4->mutex, NULL);
1006 if (bufmgr_vc4->use_dma_fence
1007 && !bo_vc4->dmabuf) {
1008 struct drm_prime_handle arg = {0, };
1010 arg.handle = bo_vc4->gem;
1011 if (drmIoctl(bo_vc4->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
1012 TBM_VC4_ERROR("Cannot dmabuf=%d\n", bo_vc4->gem);
1016 bo_vc4->dmabuf = arg.fd;
1019 /* add bo to hash */
1020 PrivGem *privGem = calloc(1, sizeof(PrivGem));
1023 TBM_VC4_ERROR("fail to calloc privGem\n");
1028 privGem->ref_count = 1;
1029 privGem->bo_priv = bo_vc4;
1031 if (drmHashInsert(bufmgr_vc4->hashBos, bo_vc4->name,
1032 (void *)privGem) < 0) {
1033 TBM_VC4_ERROR("Cannot insert bo to Hash(%d)\n", bo_vc4->name);
1036 TBM_VC4_DEBUG(" bo:%p, gem:%d(%d), flags:%d, size:%d\n",
1038 bo_vc4->gem, bo_vc4->name,
1042 return (void *)bo_vc4;
1046 tbm_vc4_bo_free(tbm_bo bo)
1049 tbm_bufmgr_vc4 bufmgr_vc4;
1054 bufmgr_vc4 = (tbm_bufmgr_vc4)tbm_backend_get_bufmgr_priv(bo);
1055 VC4_RETURN_IF_FAIL(bufmgr_vc4 != NULL);
1057 bo_vc4 = (tbm_bo_vc4)tbm_backend_get_bo_priv(bo);
1058 VC4_RETURN_IF_FAIL(bo_vc4 != NULL);
1060 TBM_VC4_DEBUG(" bo:%p, gem:%d(%d), fd:%d, size:%d\n",
1062 bo_vc4->gem, bo_vc4->name,
1066 if (bo_vc4->pBase) {
1067 if (munmap(bo_vc4->pBase, bo_vc4->size) == -1) {
1068 TBM_VC4_ERROR("bo:%p fail to munmap(%s)\n",
1069 bo, strerror(errno));
1074 if (bo_vc4->dmabuf) {
1075 close(bo_vc4->dmabuf);
1079 /* delete bo from hash */
1080 PrivGem *privGem = NULL;
1083 ret = drmHashLookup(bufmgr_vc4->hashBos, bo_vc4->name,
1086 privGem->ref_count--;
1087 if (privGem->ref_count == 0) {
1088 drmHashDelete(bufmgr_vc4->hashBos, bo_vc4->name);
1093 TBM_VC4_ERROR("Cannot find bo to Hash(%d), ret=%d\n",
1097 _bo_destroy_cache_state(bufmgr_vc4, bo_vc4);
1099 /* Free gem handle */
1100 struct drm_gem_close arg = {0, };
1102 memset(&arg, 0, sizeof(arg));
1103 arg.handle = bo_vc4->gem;
1104 if (drmIoctl(bo_vc4->fd, DRM_IOCTL_GEM_CLOSE, &arg)) {
1105 TBM_VC4_ERROR("bo:%p fail to gem close.(%s)\n",
1106 bo, strerror(errno));
1114 tbm_vc4_bo_import(tbm_bo bo, unsigned int key)
1116 VC4_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1118 tbm_bufmgr_vc4 bufmgr_vc4;
1120 PrivGem *privGem = NULL;
1123 bufmgr_vc4 = (tbm_bufmgr_vc4)tbm_backend_get_bufmgr_priv(bo);
1124 VC4_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
1126 ret = drmHashLookup(bufmgr_vc4->hashBos, key, (void **)&privGem);
1128 return privGem->bo_priv;
1130 struct drm_gem_open arg = {0, };
1133 if (drmIoctl(bufmgr_vc4->fd, DRM_IOCTL_GEM_OPEN, &arg)) {
1134 TBM_VC4_ERROR("Cannot open gem name=%d\n", key);
1138 bo_vc4 = calloc(1, sizeof(struct _tbm_bo_vc4));
1140 TBM_VC4_ERROR("fail to allocate the bo private\n");
1144 bo_vc4->fd = bufmgr_vc4->fd;
1145 bo_vc4->gem = arg.handle;
1146 bo_vc4->size = arg.size;
1148 bo_vc4->flags_tbm = 0;
1150 if (!_bo_init_cache_state(bufmgr_vc4, bo_vc4, 1)) {
1151 TBM_VC4_ERROR("fail init cache state(%d)\n", bo_vc4->name);
1156 if (!bo_vc4->dmabuf) {
1157 struct drm_prime_handle arg = {0, };
1159 arg.handle = bo_vc4->gem;
1160 if (drmIoctl(bo_vc4->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
1161 TBM_VC4_ERROR("fail to DRM_IOCTL_PRIME_HANDLE_TO_FD gem=%d\n", bo_vc4->gem);
1165 bo_vc4->dmabuf = arg.fd;
1168 /* add bo to hash */
1171 privGem = calloc(1, sizeof(PrivGem));
1173 TBM_VC4_ERROR("fail to calloc privGem\n");
1178 privGem->ref_count = 1;
1179 privGem->bo_priv = bo_vc4;
1181 if (drmHashInsert(bufmgr_vc4->hashBos, bo_vc4->name,
1182 (void *)privGem) < 0) {
1183 TBM_VC4_ERROR("Cannot insert bo to Hash(%d)\n", bo_vc4->name);
1186 TBM_VC4_DEBUG(" bo:%p, gem:%d(%d), fd:%d, flags:%d, size:%d\n",
1188 bo_vc4->gem, bo_vc4->name,
1193 return (void *)bo_vc4;
1197 tbm_vc4_bo_import_fd(tbm_bo bo, tbm_fd key)
1199 VC4_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1201 tbm_bufmgr_vc4 bufmgr_vc4;
1203 PrivGem *privGem = NULL;
1207 bufmgr_vc4 = (tbm_bufmgr_vc4)tbm_backend_get_bufmgr_priv(bo);
1208 VC4_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
1210 /*getting handle from fd*/
1211 unsigned int gem = 0;
1212 struct drm_prime_handle arg = {0, };
1216 if (drmIoctl(bufmgr_vc4->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &arg)) {
1217 TBM_VC4_ERROR("bo:%p Cannot get gem handle from fd:%d (%s)\n",
1218 bo, arg.fd, strerror(errno));
1223 name = _get_name(bufmgr_vc4->fd, gem);
1225 TBM_VC4_ERROR("bo:%p Cannot get name from gem:%d, fd:%d (%s)\n",
1226 bo, gem, key, strerror(errno));
1230 ret = drmHashLookup(bufmgr_vc4->hashBos, name, (void **)&privGem);
1232 if (gem == privGem->bo_priv->gem)
1233 return privGem->bo_priv;
1236 unsigned int real_size = -1;
1238 /* Determine size of bo. The fd-to-handle ioctl really should
1239 * return the size, but it doesn't. If we have kernel 3.12 or
1240 * later, we can lseek on the prime fd to get the size. Older
1241 * kernels will just fail, in which case we fall back to the
1242 * provided (estimated or guess size).
1244 real_size = lseek(key, 0, SEEK_END);
1246 struct drm_gem_open open_arg = {0, };
1248 open_arg.name = name;
1249 if (drmIoctl(bufmgr_vc4->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
1250 TBM_VC4_ERROR("Cannot open gem name=%d\n", name);
1254 struct drm_gem_close close_arg = {0, };
1255 memset(&close_arg, 0, sizeof(close_arg));
1256 close_arg.handle = open_arg.handle;
1257 if (drmIoctl(bufmgr_vc4->fd, DRM_IOCTL_GEM_CLOSE, &close_arg)) {
1258 TBM_VC4_ERROR("Cannot close gem_handle (%d)\n", open_arg.handle,
1263 if (real_size == -1)
1264 real_size = open_arg.size;
1266 bo_vc4 = calloc(1, sizeof(struct _tbm_bo_vc4));
1268 TBM_VC4_ERROR("bo:%p fail to allocate the bo private\n", bo);
1272 bo_vc4->fd = bufmgr_vc4->fd;
1274 bo_vc4->size = real_size;
1275 bo_vc4->flags_tbm = 0;
1276 bo_vc4->name = name;
1278 if (!_bo_init_cache_state(bufmgr_vc4, bo_vc4, 1)) {
1279 TBM_VC4_ERROR("fail init cache state(%d)\n", bo_vc4->name);
1284 /* add bo to hash */
1287 privGem = calloc(1, sizeof(PrivGem));
1289 TBM_VC4_ERROR("fail to calloc privGem\n");
1294 privGem->ref_count = 1;
1295 privGem->bo_priv = bo_vc4;
1297 if (drmHashInsert(bufmgr_vc4->hashBos, bo_vc4->name,
1298 (void *)privGem) < 0) {
1299 TBM_VC4_ERROR("bo:%p Cannot insert bo to Hash(%d) from gem:%d, fd:%d\n",
1300 bo, bo_vc4->name, gem, key);
1303 TBM_VC4_DEBUG(" bo:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d, size:%d\n",
1305 bo_vc4->gem, bo_vc4->name,
1311 return (void *)bo_vc4;
1315 tbm_vc4_bo_export(tbm_bo bo)
1317 VC4_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1321 bo_vc4 = (tbm_bo_vc4)tbm_backend_get_bo_priv(bo);
1322 VC4_RETURN_VAL_IF_FAIL(bo_vc4 != NULL, 0);
1324 if (!bo_vc4->name) {
1325 bo_vc4->name = _get_name(bo_vc4->fd, bo_vc4->gem);
1326 if (!bo_vc4->name) {
1327 TBM_VC4_ERROR("Cannot get name\n");
1332 TBM_VC4_DEBUG(" bo:%p, gem:%d(%d), fd:%d, flags:%d, size:%d\n",
1334 bo_vc4->gem, bo_vc4->name,
1339 return (unsigned int)bo_vc4->name;
1343 tbm_vc4_bo_export_fd(tbm_bo bo)
1345 VC4_RETURN_VAL_IF_FAIL(bo != NULL, -1);
1350 bo_vc4 = (tbm_bo_vc4)tbm_backend_get_bo_priv(bo);
1351 VC4_RETURN_VAL_IF_FAIL(bo_vc4 != NULL, -1);
1353 struct drm_prime_handle arg = {0, };
1355 arg.handle = bo_vc4->gem;
1356 ret = drmIoctl(bo_vc4->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg);
1358 TBM_VC4_ERROR("bo:%p Cannot dmabuf=%d (%s)\n",
1359 bo, bo_vc4->gem, strerror(errno));
1360 return (tbm_fd) ret;
1363 TBM_VC4_DEBUG(" bo:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d, size:%d\n",
1365 bo_vc4->gem, bo_vc4->name,
1371 return (tbm_fd)arg.fd;
1374 static tbm_bo_handle
1375 tbm_vc4_bo_get_handle(tbm_bo bo, int device)
1377 VC4_RETURN_VAL_IF_FAIL(bo != NULL, (tbm_bo_handle) NULL);
1379 tbm_bo_handle bo_handle;
1382 bo_vc4 = (tbm_bo_vc4)tbm_backend_get_bo_priv(bo);
1383 VC4_RETURN_VAL_IF_FAIL(bo_vc4 != NULL, (tbm_bo_handle) NULL);
1386 TBM_VC4_ERROR("Cannot map gem=%d\n", bo_vc4->gem);
1387 return (tbm_bo_handle) NULL;
1390 TBM_VC4_DEBUG("bo:%p, gem:%d(%d), fd:%d, flags:%d, size:%d, %s\n",
1392 bo_vc4->gem, bo_vc4->name,
1396 STR_DEVICE[device]);
1398 /*Get mapped bo_handle*/
1399 bo_handle = _vc4_bo_handle(bo_vc4, device);
1400 if (bo_handle.ptr == NULL) {
1401 TBM_VC4_ERROR("Cannot get handle: gem:%d, device:%d\n",
1402 bo_vc4->gem, device);
1403 return (tbm_bo_handle) NULL;
1409 static tbm_bo_handle
1410 tbm_vc4_bo_map(tbm_bo bo, int device, int opt)
1412 VC4_RETURN_VAL_IF_FAIL(bo != NULL, (tbm_bo_handle) NULL);
1414 tbm_bo_handle bo_handle;
1416 tbm_bufmgr_vc4 bufmgr_vc4;
1418 bufmgr_vc4 = (tbm_bufmgr_vc4)tbm_backend_get_bufmgr_priv(bo);
1419 VC4_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, (tbm_bo_handle)NULL);
1421 bo_vc4 = (tbm_bo_vc4)tbm_backend_get_bo_priv(bo);
1422 VC4_RETURN_VAL_IF_FAIL(bo_vc4 != NULL, (tbm_bo_handle) NULL);
1425 TBM_VC4_ERROR("Cannot map gem=%d\n", bo_vc4->gem);
1426 return (tbm_bo_handle) NULL;
1429 TBM_VC4_DEBUG(" bo:%p, gem:%d(%d), fd:%d, %s, %s\n",
1431 bo_vc4->gem, bo_vc4->name,
1436 /*Get mapped bo_handle*/
1437 bo_handle = _vc4_bo_handle(bo_vc4, device);
1438 if (bo_handle.ptr == NULL) {
1439 TBM_VC4_ERROR("Cannot get handle: gem:%d, device:%d, opt:%d\n",
1440 bo_vc4->gem, device, opt);
1441 return (tbm_bo_handle) NULL;
1444 if (bo_vc4->map_cnt == 0)
1445 _bo_set_cache_state(bufmgr_vc4, bo_vc4, device, opt);
1447 bo_vc4->last_map_device = device;
1455 tbm_vc4_bo_unmap(tbm_bo bo)
1457 VC4_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1460 tbm_bufmgr_vc4 bufmgr_vc4;
1462 bufmgr_vc4 = (tbm_bufmgr_vc4)tbm_backend_get_bufmgr_priv(bo);
1463 VC4_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
1465 bo_vc4 = (tbm_bo_vc4)tbm_backend_get_bo_priv(bo);
1466 VC4_RETURN_VAL_IF_FAIL(bo_vc4 != NULL, 0);
1474 if (bo_vc4->map_cnt == 0)
1475 _bo_save_cache_state(bufmgr_vc4, bo_vc4);
1477 #ifdef ENABLE_CACHECRTL
1478 if (bo_vc4->last_map_device == TBM_DEVICE_CPU)
1479 _vc4_cache_flush(bufmgr_vc4, bo_vc4, TBM_VC4_CACHE_FLUSH_ALL);
1482 bo_vc4->last_map_device = -1;
1484 TBM_VC4_DEBUG(" bo:%p, gem:%d(%d), fd:%d\n",
1486 bo_vc4->gem, bo_vc4->name,
1493 tbm_vc4_bo_lock(tbm_bo bo, int device, int opt)
1495 VC4_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1497 #ifndef ALWAYS_BACKEND_CTRL
1498 tbm_bufmgr_vc4 bufmgr_vc4;
1500 struct dma_buf_fence fence;
1501 struct flock filelock;
1504 if (device != TBM_DEVICE_3D && device != TBM_DEVICE_CPU) {
1505 TBM_VC4_DEBUG("Not support device type,\n");
1509 bo_vc4 = (tbm_bo_vc4)tbm_backend_get_bo_priv(bo);
1510 VC4_RETURN_VAL_IF_FAIL(bo_vc4 != NULL, 0);
1512 bufmgr_vc4 = (tbm_bufmgr_vc4)tbm_backend_get_bufmgr_priv(bo);
1513 VC4_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
1515 memset(&fence, 0, sizeof(struct dma_buf_fence));
1517 /* Check if the given type is valid or not. */
1518 if (opt & TBM_OPTION_WRITE) {
1519 if (device == TBM_DEVICE_3D)
1520 fence.type = DMA_BUF_ACCESS_WRITE | DMA_BUF_ACCESS_DMA;
1521 } else if (opt & TBM_OPTION_READ) {
1522 if (device == TBM_DEVICE_3D)
1523 fence.type = DMA_BUF_ACCESS_READ | DMA_BUF_ACCESS_DMA;
1525 TBM_VC4_ERROR("Invalid argument\n");
1529 /* Check if the tbm manager supports dma fence or not. */
1530 if (!bufmgr_vc4->use_dma_fence) {
1531 TBM_VC4_ERROR("Not support DMA FENCE(%s)\n", strerror(errno));
1536 if (device == TBM_DEVICE_3D) {
1537 ret = ioctl(bo_vc4->dmabuf, DMABUF_IOCTL_GET_FENCE, &fence);
1539 TBM_VC4_ERROR("Cannot set GET FENCE(%s)\n", strerror(errno));
1543 if (opt & TBM_OPTION_WRITE)
1544 filelock.l_type = F_WRLCK;
1546 filelock.l_type = F_RDLCK;
1548 filelock.l_whence = SEEK_CUR;
1549 filelock.l_start = 0;
1552 if (-1 == fcntl(bo_vc4->dmabuf, F_SETLKW, &filelock))
1556 pthread_mutex_lock(&bo_vc4->mutex);
1558 if (device == TBM_DEVICE_3D) {
1561 for (i = 0; i < DMA_FENCE_LIST_MAX; i++) {
1562 if (bo_vc4->dma_fence[i].ctx == 0) {
1563 bo_vc4->dma_fence[i].type = fence.type;
1564 bo_vc4->dma_fence[i].ctx = fence.ctx;
1569 if (i == DMA_FENCE_LIST_MAX) {
1570 /*TODO: if dma_fence list is full, it needs realloc. I will fix this. by minseok3.kim*/
1571 TBM_VC4_ERROR("fence list is full\n");
1575 pthread_mutex_unlock(&bo_vc4->mutex);
1577 TBM_VC4_DEBUG("DMABUF_IOCTL_GET_FENCE! bo:%p, gem:%d(%d), fd:%ds\n",
1579 bo_vc4->gem, bo_vc4->name,
1581 #endif /* ALWAYS_BACKEND_CTRL */
1587 tbm_vc4_bo_unlock(tbm_bo bo)
1589 VC4_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1591 #ifndef ALWAYS_BACKEND_CTRL
1593 struct dma_buf_fence fence;
1594 struct flock filelock;
1595 unsigned int dma_type = 0;
1598 bo_vc4 = (tbm_bo_vc4)tbm_backend_get_bo_priv(bo);
1599 VC4_RETURN_VAL_IF_FAIL(bo_vc4 != NULL, 0);
1601 if (bo_vc4->dma_fence[0].type & DMA_BUF_ACCESS_DMA)
1604 if (!bo_vc4->dma_fence[0].ctx && dma_type) {
1605 TBM_VC4_DEBUG("FENCE not support or ignored,\n");
1609 if (!bo_vc4->dma_fence[0].ctx && dma_type) {
1610 TBM_VC4_DEBUG("device type is not 3D/CPU,\n");
1614 pthread_mutex_lock(&bo_vc4->mutex);
1617 fence.type = bo_vc4->dma_fence[0].type;
1618 fence.ctx = bo_vc4->dma_fence[0].ctx;
1621 for (i = 1; i < DMA_FENCE_LIST_MAX; i++) {
1622 bo_vc4->dma_fence[i - 1].type = bo_vc4->dma_fence[i].type;
1623 bo_vc4->dma_fence[i - 1].ctx = bo_vc4->dma_fence[i].ctx;
1625 bo_vc4->dma_fence[DMA_FENCE_LIST_MAX - 1].type = 0;
1626 bo_vc4->dma_fence[DMA_FENCE_LIST_MAX - 1].ctx = 0;
1628 pthread_mutex_unlock(&bo_vc4->mutex);
1631 ret = ioctl(bo_vc4->dmabuf, DMABUF_IOCTL_PUT_FENCE, &fence);
1633 TBM_VC4_ERROR("Can not set PUT FENCE(%s)\n", strerror(errno));
1637 filelock.l_type = F_UNLCK;
1638 filelock.l_whence = SEEK_CUR;
1639 filelock.l_start = 0;
1642 if (-1 == fcntl(bo_vc4->dmabuf, F_SETLKW, &filelock))
1646 TBM_VC4_DEBUG("DMABUF_IOCTL_PUT_FENCE! bo:%p, gem:%d(%d), fd:%ds\n",
1648 bo_vc4->gem, bo_vc4->name,
1650 #endif /* ALWAYS_BACKEND_CTRL */
1656 tbm_vc4_bufmgr_deinit(void *priv)
1658 VC4_RETURN_IF_FAIL(priv != NULL);
1660 tbm_bufmgr_vc4 bufmgr_vc4;
1662 bufmgr_vc4 = (tbm_bufmgr_vc4)priv;
1664 if (bufmgr_vc4->hashBos) {
1668 while (drmHashFirst(bufmgr_vc4->hashBos, &key, &value) > 0) {
1670 drmHashDelete(bufmgr_vc4->hashBos, key);
1673 drmHashDestroy(bufmgr_vc4->hashBos);
1674 bufmgr_vc4->hashBos = NULL;
1677 _bufmgr_deinit_cache_state(bufmgr_vc4);
1679 if (bufmgr_vc4->bind_display)
1680 tbm_drm_helper_wl_auth_server_deinit();
1682 if (bufmgr_vc4->device_name)
1683 free(bufmgr_vc4->device_name);
1685 if (tbm_backend_is_display_server())
1686 tbm_drm_helper_unset_tbm_master_fd();
1688 tbm_drm_helper_unset_fd();
1690 close(bufmgr_vc4->fd);
1696 tbm_vc4_surface_supported_format(uint32_t **formats, uint32_t *num)
1698 uint32_t *color_formats = NULL;
1700 color_formats = (uint32_t *)calloc(1,
1701 sizeof(uint32_t) * TBM_COLOR_FORMAT_COUNT);
1703 if (color_formats == NULL)
1706 memcpy(color_formats, tbm_vc4_color_format_list,
1707 sizeof(uint32_t)*TBM_COLOR_FORMAT_COUNT);
1709 *formats = color_formats;
1710 *num = TBM_COLOR_FORMAT_COUNT;
1712 TBM_VC4_DEBUG("tbm_vc4_surface_supported_format count = %d\n", *num);
1718 _new_calc_plane_nv12(int width, int height)
1722 mbX = DIV_ROUND_UP(width, S5P_FIMV_NUM_PIXELS_IN_MB_ROW);
1723 mbY = DIV_ROUND_UP(height, S5P_FIMV_NUM_PIXELS_IN_MB_COL);
1725 if (width * height < S5P_FIMV_MAX_FRAME_SIZE)
1726 mbY = (mbY + 1) / 2 * 2;
1728 return ((mbX * S5P_FIMV_NUM_PIXELS_IN_MB_COL) * (mbY *
1729 S5P_FIMV_NUM_PIXELS_IN_MB_ROW));
1733 _calc_yplane_nv12(int width, int height)
1737 mbX = SIZE_ALIGN(width + 24, S5P_FIMV_NV12MT_HALIGN);
1738 mbY = SIZE_ALIGN(height + 16, S5P_FIMV_NV12MT_VALIGN);
1740 return SIZE_ALIGN(mbX * mbY, S5P_FIMV_DEC_BUF_ALIGN);
1744 _calc_uvplane_nv12(int width, int height)
1748 mbX = SIZE_ALIGN(width + 16, S5P_FIMV_NV12MT_HALIGN);
1749 mbY = SIZE_ALIGN(height + 4, S5P_FIMV_NV12MT_VALIGN);
1751 return SIZE_ALIGN((mbX * mbY) >> 1, S5P_FIMV_DEC_BUF_ALIGN);
1755 _new_calc_yplane_nv12(int width, int height)
1757 return SIZE_ALIGN(_new_calc_plane_nv12(width,
1758 height) + S5P_FIMV_D_ALIGN_PLANE_SIZE,
1759 TBM_SURFACE_ALIGNMENT_PLANE_NV12);
1763 _new_calc_uvplane_nv12(int width, int height)
1765 return SIZE_ALIGN((_new_calc_plane_nv12(width,
1766 height) >> 1) + S5P_FIMV_D_ALIGN_PLANE_SIZE,
1767 TBM_SURFACE_ALIGNMENT_PLANE_NV12);
1771 * @brief get the plane data of the surface.
1772 * @param[in] width : the width of the surface
1773 * @param[in] height : the height of the surface
1774 * @param[in] format : the format of the surface
1775 * @param[in] plane_idx : the format of the surface
1776 * @param[out] size : the size of the plane
1777 * @param[out] offset : the offset of the plane
1778 * @param[out] pitch : the pitch of the plane
1779 * @param[out] padding : the padding of the plane
1780 * @return 1 if this function succeeds, otherwise 0.
1783 tbm_vc4_surface_get_plane_data(int width, int height,
1784 tbm_format format, int plane_idx, uint32_t *size, uint32_t *offset,
1785 uint32_t *pitch, int *bo_idx)
1796 case TBM_FORMAT_XRGB4444:
1797 case TBM_FORMAT_XBGR4444:
1798 case TBM_FORMAT_RGBX4444:
1799 case TBM_FORMAT_BGRX4444:
1800 case TBM_FORMAT_ARGB4444:
1801 case TBM_FORMAT_ABGR4444:
1802 case TBM_FORMAT_RGBA4444:
1803 case TBM_FORMAT_BGRA4444:
1804 case TBM_FORMAT_XRGB1555:
1805 case TBM_FORMAT_XBGR1555:
1806 case TBM_FORMAT_RGBX5551:
1807 case TBM_FORMAT_BGRX5551:
1808 case TBM_FORMAT_ARGB1555:
1809 case TBM_FORMAT_ABGR1555:
1810 case TBM_FORMAT_RGBA5551:
1811 case TBM_FORMAT_BGRA5551:
1812 case TBM_FORMAT_RGB565:
1815 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
1816 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1820 case TBM_FORMAT_RGB888:
1821 case TBM_FORMAT_BGR888:
1824 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
1825 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1829 case TBM_FORMAT_XRGB8888:
1830 case TBM_FORMAT_XBGR8888:
1831 case TBM_FORMAT_RGBX8888:
1832 case TBM_FORMAT_BGRX8888:
1833 case TBM_FORMAT_ARGB8888:
1834 case TBM_FORMAT_ABGR8888:
1835 case TBM_FORMAT_RGBA8888:
1836 case TBM_FORMAT_BGRA8888:
1839 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
1840 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1845 case TBM_FORMAT_YUYV:
1846 case TBM_FORMAT_YVYU:
1847 case TBM_FORMAT_UYVY:
1848 case TBM_FORMAT_VYUY:
1849 case TBM_FORMAT_AYUV:
1852 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1853 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1859 * index 0 = Y plane, [7:0] Y
1860 * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
1862 * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
1864 case TBM_FORMAT_NV12:
1866 if (plane_idx == 0) {
1868 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1869 _size = MAX(_calc_yplane_nv12(width, height), _new_calc_yplane_nv12(width,
1872 } else if (plane_idx == 1) {
1874 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1875 _size = MAX(_calc_uvplane_nv12(width, height), _new_calc_uvplane_nv12(width,
1880 case TBM_FORMAT_NV21:
1882 if (plane_idx == 0) {
1884 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1885 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1887 } else if (plane_idx == 1) {
1888 _offset = width * height;
1889 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1890 _size = SIZE_ALIGN(_pitch * (height / 2), TBM_SURFACE_ALIGNMENT_PLANE);
1895 case TBM_FORMAT_NV16:
1896 case TBM_FORMAT_NV61:
1898 /*if(plane_idx == 0)*/
1901 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1902 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1907 /*else if( plane_idx ==1 )*/
1910 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1911 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1918 * index 0: Y plane, [7:0] Y
1919 * index 1: Cb plane, [7:0] Cb
1920 * index 2: Cr plane, [7:0] Cr
1922 * index 1: Cr plane, [7:0] Cr
1923 * index 2: Cb plane, [7:0] Cb
1927 * NATIVE_BUFFER_FORMAT_YV12
1928 * NATIVE_BUFFER_FORMAT_I420
1930 case TBM_FORMAT_YUV410:
1931 case TBM_FORMAT_YVU410:
1933 /*if(plane_idx == 0)*/
1936 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1937 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1942 /*else if(plane_idx == 1)*/
1945 _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4);
1946 _size = SIZE_ALIGN(_pitch * (height / 4), TBM_SURFACE_ALIGNMENT_PLANE);
1951 /*else if (plane_idx == 2)*/
1954 _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4);
1955 _size = SIZE_ALIGN(_pitch * (height / 4), TBM_SURFACE_ALIGNMENT_PLANE);
1959 case TBM_FORMAT_YUV411:
1960 case TBM_FORMAT_YVU411:
1961 case TBM_FORMAT_YUV420:
1962 case TBM_FORMAT_YVU420:
1964 /*if(plane_idx == 0)*/
1967 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1968 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1973 /*else if(plane_idx == 1)*/
1976 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1977 _size = SIZE_ALIGN(_pitch * (height / 2), TBM_SURFACE_ALIGNMENT_PLANE);
1982 /*else if (plane_idx == 2)*/
1985 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1986 _size = SIZE_ALIGN(_pitch * (height / 2), TBM_SURFACE_ALIGNMENT_PLANE);
1990 case TBM_FORMAT_YUV422:
1991 case TBM_FORMAT_YVU422:
1993 /*if(plane_idx == 0)*/
1996 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1997 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
2002 /*else if(plane_idx == 1)*/
2005 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
2006 _size = SIZE_ALIGN(_pitch * (height), TBM_SURFACE_ALIGNMENT_PLANE);
2011 /*else if (plane_idx == 2)*/
2014 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
2015 _size = SIZE_ALIGN(_pitch * (height), TBM_SURFACE_ALIGNMENT_PLANE);
2019 case TBM_FORMAT_YUV444:
2020 case TBM_FORMAT_YVU444:
2022 /*if(plane_idx == 0)*/
2025 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
2026 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
2031 /*else if(plane_idx == 1)*/
2034 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
2035 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
2040 /*else if (plane_idx == 2)*/
2043 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
2044 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
2062 tbm_vc4_bo_get_flags(tbm_bo bo)
2064 VC4_RETURN_VAL_IF_FAIL(bo != NULL, 0);
2068 bo_vc4 = (tbm_bo_vc4)tbm_backend_get_bo_priv(bo);
2069 VC4_RETURN_VAL_IF_FAIL(bo_vc4 != NULL, 0);
2071 return bo_vc4->flags_tbm;
2075 tbm_vc4_bufmgr_bind_native_display(tbm_bufmgr bufmgr, void *native_display)
2077 tbm_bufmgr_vc4 bufmgr_vc4;
2079 bufmgr_vc4 = tbm_backend_get_priv_from_bufmgr(bufmgr);
2080 VC4_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
2082 if (!tbm_drm_helper_wl_auth_server_init(native_display, bufmgr_vc4->fd,
2083 bufmgr_vc4->device_name, 0)) {
2084 TBM_VC4_ERROR("fail to tbm_drm_helper_wl_server_init\n");
2088 bufmgr_vc4->bind_display = native_display;
2093 MODULEINITPPROTO(init_tbm_bufmgr_priv);
2095 static TBMModuleVersionInfo BcmVersRec = {
2101 TBMModuleData tbmModuleData = { &BcmVersRec, init_tbm_bufmgr_priv};
2104 init_tbm_bufmgr_priv(tbm_bufmgr bufmgr, int fd)
2106 tbm_bufmgr_backend bufmgr_backend;
2107 tbm_bufmgr_vc4 bufmgr_vc4;
2113 bufmgr_vc4 = calloc(1, sizeof(struct _tbm_bufmgr_vc4));
2115 TBM_VC4_ERROR("fail to alloc bufmgr_vc4!\n");
2119 if (tbm_backend_is_display_server()) {
2120 bufmgr_vc4->fd = tbm_drm_helper_get_master_fd();
2121 if (bufmgr_vc4->fd < 0) {
2122 bufmgr_vc4->fd = _tbm_vc4_open_drm();
2123 if (bufmgr_vc4->fd < 0) {
2124 TBM_VC4_ERROR("fail to open drm!\n", getpid());
2129 tbm_drm_helper_set_tbm_master_fd(bufmgr_vc4->fd);
2131 bufmgr_vc4->device_name = drmGetDeviceNameFromFd(bufmgr_vc4->fd);
2132 if (!bufmgr_vc4->device_name) {
2133 TBM_VC4_ERROR("fail to get device name!\n", getpid());
2135 tbm_drm_helper_unset_tbm_master_fd();
2136 goto fail_get_device_name;
2138 tbm_drm_helper_set_fd(bufmgr_vc4->fd);
2140 if (_check_render_node()) {
2141 bufmgr_vc4->fd = _get_render_node();//TODO
2142 if (bufmgr_vc4->fd < 0) {
2143 TBM_VC4_ERROR("fail to get render node\n");
2144 goto fail_get_render_node;
2146 TBM_VC4_DEBUG("Use render node:%d\n", bufmgr_vc4->fd);
2148 if (!tbm_drm_helper_get_auth_info(&(bufmgr_vc4->fd), &(bufmgr_vc4->device_name), NULL)) {
2149 TBM_VC4_ERROR("fail to get auth drm info!\n");
2150 goto fail_get_auth_info;
2153 tbm_drm_helper_set_fd(bufmgr_vc4->fd);
2157 //Check if the tbm manager supports dma fence or not.
2158 fp = open("/sys/module/dmabuf_sync/parameters/enabled", O_RDONLY);
2161 int length = read(fp, buf, 1);
2163 if (length == 1 && buf[0] == '1')
2164 bufmgr_vc4->use_dma_fence = 1;
2169 if (!_bufmgr_init_cache_state(bufmgr_vc4)) {
2170 TBM_VC4_ERROR("fail to init bufmgr cache state\n");
2171 goto fail_init_cache_state;
2174 /*Create Hash Table*/
2175 bufmgr_vc4->hashBos = drmHashCreate();
2177 bufmgr_backend = tbm_backend_alloc();
2178 if (!bufmgr_backend) {
2179 TBM_VC4_ERROR("fail to alloc backend!\n");
2180 goto fail_alloc_backend;
2183 bufmgr_backend->priv = (void *)bufmgr_vc4;
2184 bufmgr_backend->bufmgr_deinit = tbm_vc4_bufmgr_deinit;
2185 bufmgr_backend->bo_size = tbm_vc4_bo_size;
2186 bufmgr_backend->bo_alloc = tbm_vc4_bo_alloc;
2187 bufmgr_backend->bo_free = tbm_vc4_bo_free;
2188 bufmgr_backend->bo_import = tbm_vc4_bo_import;
2189 bufmgr_backend->bo_import_fd = tbm_vc4_bo_import_fd;
2190 bufmgr_backend->bo_export = tbm_vc4_bo_export;
2191 bufmgr_backend->bo_export_fd = tbm_vc4_bo_export_fd;
2192 bufmgr_backend->bo_get_handle = tbm_vc4_bo_get_handle;
2193 bufmgr_backend->bo_map = tbm_vc4_bo_map;
2194 bufmgr_backend->bo_unmap = tbm_vc4_bo_unmap;
2195 bufmgr_backend->surface_get_plane_data = tbm_vc4_surface_get_plane_data;
2196 bufmgr_backend->surface_supported_format = tbm_vc4_surface_supported_format;
2197 bufmgr_backend->bo_get_flags = tbm_vc4_bo_get_flags;
2198 bufmgr_backend->bo_lock = tbm_vc4_bo_lock;
2199 bufmgr_backend->bo_unlock = tbm_vc4_bo_unlock;
2201 if (tbm_backend_is_display_server() && !_check_render_node())
2202 bufmgr_backend->bufmgr_bind_native_display = tbm_vc4_bufmgr_bind_native_display;
2204 if (!tbm_backend_init(bufmgr, bufmgr_backend)) {
2205 TBM_VC4_ERROR("fail to init backend!\n");
2206 goto fail_init_backend;
2213 env = getenv("TBM_VC4_DEBUG");
2216 TBM_VC4_ERROR("TBM_VC4_DEBUG=%s\n", env);
2222 TBM_VC4_DEBUG("drm_fd:%d\n", bufmgr_vc4->fd);
2227 tbm_backend_free(bufmgr_backend);
2229 if (bufmgr_vc4->hashBos)
2230 drmHashDestroy(bufmgr_vc4->hashBos);
2231 _bufmgr_deinit_cache_state(bufmgr_vc4);
2232 fail_init_cache_state:
2233 if (tbm_backend_is_display_server())
2234 tbm_drm_helper_unset_tbm_master_fd();
2236 tbm_drm_helper_unset_fd();
2237 fail_get_device_name:
2238 close(bufmgr_vc4->fd);
2240 fail_get_render_node: