1 /**************************************************************************
5 Copyright 2012 Samsung Electronics co., Ltd. All Rights Reserved.
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
9 Permission is hereby granted, free of charge, to any person obtaining a
10 copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sub license, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial portions
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
22 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
24 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
25 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
26 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
27 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 **************************************************************************/
43 #include <sys/ioctl.h>
44 #include <sys/types.h>
51 #include <tbm_bufmgr.h>
52 #include <tbm_bufmgr_backend.h>
53 #include <drm/sprd_drm.h>
55 #include <tbm_surface.h>
56 #include <tbm_drm_helper.h>
59 #include "tbm_bufmgr_tgl.h"
61 //#define USE_CONTIG_ONLY
65 #define TBM_COLOR_FORMAT_COUNT 4
68 #define LOG_TAG "TBM_BACKEND"
70 static int bDebug = 0;
72 #define SPRD_DRM_NAME "sprd"
77 static char app_name[128] = {0, };
78 static int initialized = 0;
85 /* get the application name */
86 f = fopen("/proc/self/cmdline", "r");
90 if (fgets(app_name, 100, f) == NULL) {
97 slash = strrchr(app_name, '/');
99 memmove(app_name, slash + 1, strlen(slash));
106 #define TBM_SPRD_ERROR(fmt, args...) LOGE("\033[31m" "[%s] " fmt "\033[0m", _target_name(), ##args)
107 #define TBM_SPRD_DEBUG(fmt, args...) if (bDebug&01) LOGD("[%s] " fmt, _target_name(), ##args)
109 #define TBM_SPRD_ERROR(...)
110 #define TBM_SPRD_DEBUG(...)
113 #define SIZE_ALIGN(value, base) (((value) + ((base) - 1)) & ~((base) - 1))
115 #define TBM_SURFACE_ALIGNMENT_PLANE (64)
116 #define TBM_SURFACE_ALIGNMENT_PITCH_RGB (128)
117 #define TBM_SURFACE_ALIGNMENT_PITCH_YUV (16)
120 /* check condition */
121 #define SPRD_RETURN_IF_FAIL(cond) {\
123 TBM_SPRD_ERROR("[%s] : '%s' failed.\n", __FUNCTION__, #cond);\
127 #define SPRD_RETURN_VAL_IF_FAIL(cond, val) {\
129 TBM_SPRD_ERROR("[%s] : '%s' failed.\n", __FUNCTION__, #cond);\
134 struct dma_buf_info {
136 unsigned int fence_supported;
137 unsigned int padding;
140 #define DMA_BUF_ACCESS_READ 0x1
141 #define DMA_BUF_ACCESS_WRITE 0x2
142 #define DMA_BUF_ACCESS_DMA 0x4
143 #define DMA_BUF_ACCESS_MAX 0x8
145 #define DMA_FENCE_LIST_MAX 5
147 struct dma_buf_fence {
152 #define DMABUF_IOCTL_BASE 'F'
153 #define DMABUF_IOWR(nr, type) _IOWR(DMABUF_IOCTL_BASE, nr, type)
155 #define DMABUF_IOCTL_GET_INFO DMABUF_IOWR(0x00, struct dma_buf_info)
156 #define DMABUF_IOCTL_GET_FENCE DMABUF_IOWR(0x01, struct dma_buf_fence)
157 #define DMABUF_IOCTL_PUT_FENCE DMABUF_IOWR(0x02, struct dma_buf_fence)
160 #define GLOBAL_KEY ((unsigned int)(-1))
162 #define TBM_SPRD_CACHE_INV 0x01 /**< cache invalidate */
163 #define TBM_SPRD_CACHE_CLN 0x02 /**< cache clean */
164 #define TBM_SPRD_CACHE_ALL 0x10 /**< cache all */
165 #define TBM_SPRD_CACHE_FLUSH (TBM_SPRD_CACHE_INV|TBM_SPRD_CACHE_CLN) /**< cache flush */
166 #define TBM_SPRD_CACHE_FLUSH_ALL (TBM_SPRD_CACHE_FLUSH|TBM_SPRD_CACHE_ALL) /**< cache flush all */
170 DEVICE_CA, /* cache aware device */
171 DEVICE_CO /* cache oblivious device */
174 typedef union _tbm_bo_cache_state tbm_bo_cache_state;
176 union _tbm_bo_cache_state {
179 unsigned int cntFlush:16; /*Flush all index for sync */
180 unsigned int isCached:1;
181 unsigned int isDirtied:2;
185 typedef struct _tbm_bufmgr_sprd *tbm_bufmgr_sprd;
186 typedef struct _tbm_bo_sprd *tbm_bo_sprd;
188 typedef struct _sprd_private {
190 struct _tbm_bo_sprd *bo_priv;
193 /* tbm buffor object for sprd */
194 struct _tbm_bo_sprd {
197 unsigned int name; /* FLINK ID */
199 unsigned int gem; /* GEM Handle */
201 unsigned int dmabuf; /* fd for dmabuf */
203 void *pBase; /* virtual address */
207 unsigned int flags_sprd;
208 unsigned int flags_tbm;
212 pthread_mutex_t mutex;
213 struct dma_buf_fence dma_fence[DMA_FENCE_LIST_MAX];
217 tbm_bo_cache_state cache_state;
218 unsigned int map_cnt;
221 /* tbm bufmgr private for sprd */
222 struct _tbm_bufmgr_sprd {
234 char *STR_DEVICE[] = {
250 uint32_t tbm_sprd_color_format_list[TBM_COLOR_FORMAT_COUNT] = {
258 _tgl_get_version(int fd)
260 struct tgl_ver_data data;
263 err = ioctl(fd, TGL_IOCTL_GET_VERSION, &data);
265 TBM_SPRD_ERROR("error(%s) %s:%d\n", strerror(errno));
269 TBM_SPRD_DEBUG("tgl version is (%u, %u).\n", data.major, data.minor);
276 _tgl_init(int fd, unsigned int key)
278 struct tgl_reg_data data;
282 data.timeout_ms = 1000;
284 err = ioctl(fd, TGL_IOCTL_REGISTER, &data);
286 TBM_SPRD_ERROR("error(%s) key:%d\n", strerror(errno), key);
294 _tgl_destroy(int fd, unsigned int key)
296 struct tgl_reg_data data;
300 err = ioctl(fd, TGL_IOCTL_UNREGISTER, &data);
302 TBM_SPRD_ERROR("error(%s) key:%d\n", strerror(errno), key);
310 _tgl_lock(int fd, unsigned int key, int opt)
312 struct tgl_lock_data data;
313 enum tgl_type_data tgl_type;
317 case TBM_OPTION_READ:
318 tgl_type = TGL_TYPE_READ;
320 case TBM_OPTION_WRITE:
321 tgl_type = TGL_TYPE_WRITE;
324 tgl_type = TGL_TYPE_NONE;
329 data.type = tgl_type;
331 err = ioctl(fd, TGL_IOCTL_LOCK, data);
333 TBM_SPRD_ERROR("error(%s) key:%d opt:%d\n",
334 strerror(errno), key, opt);
342 _tgl_unlock(int fd, unsigned int key)
344 struct tgl_lock_data data;
348 data.type = TGL_TYPE_NONE;
350 err = ioctl(fd, TGL_IOCTL_UNLOCK, data);
352 TBM_SPRD_ERROR("error(%s) key:%d\n",
353 strerror(errno), key);
362 _tgl_set_data(int fd, unsigned int key, unsigned int val)
364 struct tgl_usr_data data;
370 err = ioctl(fd, TGL_IOCTL_SET_DATA, &data);
372 TBM_SPRD_ERROR("error(%s) key:%d\n",
373 strerror(errno), key);
380 static inline unsigned int
381 _tgl_get_data(int fd, unsigned int key, unsigned int *locked)
383 struct tgl_usr_data data = { 0, };
388 err = ioctl(fd, TGL_IOCTL_GET_DATA, &data);
390 TBM_SPRD_ERROR("error(%s) key:%d\n",
391 strerror(errno), key);
396 *locked = (unsigned int)data.status;
403 _tbm_sprd_open_drm(void)
406 struct udev_device *drm_device = NULL;
407 struct udev_list_entry *entry = NULL;
408 struct udev_enumerate *e;
409 const char *filepath;
415 fd = drmOpen(SPRD_DRM_NAME, NULL);
420 TBM_SPRD_DEBUG("warning fail to open drm. search drm-device by udev\n");
424 TBM_SPRD_ERROR("udev_new() failed.\n");
428 e = udev_enumerate_new(udev);
429 udev_enumerate_add_match_subsystem(e, "drm");
430 udev_enumerate_add_match_sysname(e, "card[0-9]*");
431 udev_enumerate_scan_devices(e);
433 udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
434 struct udev_device *device, *device_parent;
436 device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
437 udev_list_entry_get_name(entry));
438 device_parent = udev_device_get_parent(device);
439 /* Not need unref device_parent. device_parent and device have same refcnt */
441 if (strcmp(udev_device_get_sysname(device_parent), "sprd-drm") == 0) {
443 TBM_SPRD_DEBUG("Found render device: '%s' (%s)\n",
444 udev_device_get_syspath(drm_device),
445 udev_device_get_sysname(device_parent));
449 udev_device_unref(device);
452 udev_enumerate_unref(e);
454 /* Get device file path. */
455 filepath = udev_device_get_devnode(drm_device);
457 TBM_SPRD_ERROR("udev_device_get_devnode() failed.\n");
458 udev_device_unref(drm_device);
463 udev_device_unref(drm_device);
466 /* Open DRM device file and check validity. */
467 fd = open(filepath, O_RDWR | O_CLOEXEC);
469 TBM_SPRD_ERROR("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
474 TBM_SPRD_ERROR("fstat() failed %s.\n");
479 TBM_SPRD_ERROR("warning fail to open drm\n",
488 _sprd_bo_cache_flush(tbm_bufmgr_sprd bufmgr_sprd, tbm_bo_sprd bo_sprd, int flags)
490 SPRD_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, 0);
492 /* cache flush is managed by kernel side when using dma-fence. */
493 if (bufmgr_sprd->use_dma_fence)
495 // TODO: The tm1 kernel does not support ioctl for cache flush right now.
496 // The drm in tm1 kernel has to support cache_flush to turn on this feature(TBM_SRPD_CACHE_FLUSH).
497 #if TBM_SRPD_CACHE_FLUSH
498 struct drm_sprd_gem_cache_op cache_op = {0, };
501 /* if bo_sprd is null, do cache_flush_all */
504 cache_op.usr_addr = (uint64_t)((uint32_t)bo_sprd->pBase);
505 cache_op.size = bo_sprd->size;
507 flags = TBM_SPRD_CACHE_FLUSH_ALL;
509 cache_op.usr_addr = 0;
513 if (flags & TBM_SPRD_CACHE_INV) {
514 if (flags & TBM_SPRD_CACHE_ALL)
515 cache_op.flags |= SPRD_DRM_CACHE_INV_ALL;
517 cache_op.flags |= SPRD_DRM_CACHE_INV_RANGE;
520 if (flags & TBM_SPRD_CACHE_CLN) {
521 if (flags & TBM_SPRD_CACHE_ALL)
522 cache_op.flags |= SPRD_DRM_CACHE_CLN_ALL;
524 cache_op.flags |= SPRD_DRM_CACHE_CLN_RANGE;
527 if (flags & TBM_SPRD_CACHE_ALL)
528 cache_op.flags |= SPRD_DRM_ALL_CACHES_CORES;
530 ret = drmCommandWriteRead(bufmgr_sprd->fd, DRM_SPRD_GEM_CACHE_OP, &cache_op,
533 TBM_SPRD_ERROR("error fail to flush the cache.\n");
542 _bo_init_cache_state(tbm_bufmgr_sprd bufmgr_sprd, tbm_bo_sprd bo_sprd, int import)
544 SPRD_RETURN_VAL_IF_FAIL(bo_sprd != NULL, 0);
545 SPRD_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, 0);
547 if (bufmgr_sprd->use_dma_fence)
550 _tgl_init(bufmgr_sprd->tgl_fd, bo_sprd->name);
553 tbm_bo_cache_state cache_state;
556 cache_state.data.isDirtied = DEVICE_NONE;
557 cache_state.data.isCached = 0;
558 cache_state.data.cntFlush = 0;
560 _tgl_set_data(bufmgr_sprd->tgl_fd, bo_sprd->name, cache_state.val);
568 _bo_set_cache_state(tbm_bufmgr_sprd bufmgr_sprd, tbm_bo_sprd bo_sprd, int device, int opt)
571 SPRD_RETURN_VAL_IF_FAIL(bo_sprd != NULL, 0);
572 SPRD_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, 0);
575 unsigned short cntFlush = 0;
577 if (bufmgr_sprd->use_dma_fence)
580 if (bo_sprd->flags_sprd & SPRD_BO_NONCACHABLE)
583 /* get cache state of a bo */
584 bo_sprd->cache_state.val = _tgl_get_data(bufmgr_sprd->tgl_fd, bo_sprd->name, NULL);
586 /* get global cache flush count */
587 cntFlush = (unsigned short)_tgl_get_data(bufmgr_sprd->tgl_fd, GLOBAL_KEY, NULL);
589 if (opt == TBM_DEVICE_CPU) {
590 if (bo_sprd->cache_state.data.isDirtied == DEVICE_CO &&
591 bo_sprd->cache_state.data.isCached)
592 need_flush = TBM_SPRD_CACHE_INV;
594 bo_sprd->cache_state.data.isCached = 1;
595 if (opt & TBM_OPTION_WRITE)
596 bo_sprd->cache_state.data.isDirtied = DEVICE_CA;
598 if (bo_sprd->cache_state.data.isDirtied != DEVICE_CA)
599 bo_sprd->cache_state.data.isDirtied = DEVICE_NONE;
602 if (bo_sprd->cache_state.data.isDirtied == DEVICE_CA &&
603 bo_sprd->cache_state.data.isCached &&
604 bo_sprd->cache_state.data.cntFlush == cntFlush)
605 need_flush = TBM_SPRD_CACHE_CLN | TBM_SPRD_CACHE_ALL;
607 if (opt & TBM_OPTION_WRITE)
608 bo_sprd->cache_state.data.isDirtied = DEVICE_CO;
610 if (bo_sprd->cache_state.data.isDirtied != DEVICE_CO)
611 bo_sprd->cache_state.data.isDirtied = DEVICE_NONE;
616 if (need_flush & TBM_SPRD_CACHE_ALL)
617 _tgl_set_data(bufmgr_sprd->tgl_fd, GLOBAL_KEY, (unsigned int)(++cntFlush));
619 /* call cache flush */
620 _sprd_bo_cache_flush(bufmgr_sprd, bo_sprd, need_flush);
622 TBM_SPRD_DEBUG("\tcache(%d,%d)....flush:0x%x, cntFlush(%d)\n",
623 bo_sprd->cache_state.data.isCached,
624 bo_sprd->cache_state.data.isDirtied,
634 _bo_save_cache_state(tbm_bufmgr_sprd bufmgr_sprd, tbm_bo_sprd bo_sprd)
637 SPRD_RETURN_VAL_IF_FAIL(bo_sprd != NULL, 0);
638 SPRD_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, 0);
640 if (bufmgr_sprd->use_dma_fence)
643 unsigned short cntFlush = 0;
645 /* get global cache flush count */
646 cntFlush = (unsigned short)_tgl_get_data(bufmgr_sprd->tgl_fd, GLOBAL_KEY, NULL);
648 /* save global cache flush count */
649 bo_sprd->cache_state.data.cntFlush = cntFlush;
650 _tgl_set_data(bufmgr_sprd->tgl_fd, bo_sprd->name, bo_sprd->cache_state.val);
657 _bo_destroy_cache_state(tbm_bufmgr_sprd bufmgr_sprd, tbm_bo_sprd bo_sprd)
659 SPRD_RETURN_IF_FAIL(bo_sprd != NULL);
660 SPRD_RETURN_IF_FAIL(bufmgr_sprd != NULL);
662 if (bufmgr_sprd->use_dma_fence)
665 _tgl_destroy(bufmgr_sprd->tgl_fd, bo_sprd->name);
669 _bufmgr_init_cache_state(tbm_bufmgr_sprd bufmgr_sprd)
671 SPRD_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, 0);
673 if (bufmgr_sprd->use_dma_fence)
676 /* open tgl fd for saving cache flush data */
677 bufmgr_sprd->tgl_fd = open(tgl_devfile, O_RDWR);
678 if (bufmgr_sprd->tgl_fd < 0) {
679 TBM_SPRD_ERROR("fail to open global_lock:%s\n", tgl_devfile);
683 if (!_tgl_get_version(bufmgr_sprd->tgl_fd)) {
684 TBM_SPRD_ERROR("fail to get tgl_version. tgl init failed.\n");
685 close(bufmgr_sprd->tgl_fd);
690 if (!_tgl_init(bufmgr_sprd->tgl_fd, GLOBAL_KEY)) {
691 TBM_SPRD_ERROR("fail to initialize the tgl\n");
692 close(bufmgr_sprd->tgl_fd);
701 _bufmgr_deinit_cache_state(tbm_bufmgr_sprd bufmgr_sprd)
703 SPRD_RETURN_IF_FAIL(bufmgr_sprd != NULL);
705 if (bufmgr_sprd->use_dma_fence)
708 if (bufmgr_sprd->tgl_fd >= 0)
709 close(bufmgr_sprd->tgl_fd);
712 #ifndef USE_CONTIG_ONLY
714 _get_sprd_flag_from_tbm(unsigned int ftbm)
716 unsigned int flags = 0;
719 * TBM_BO_DEFAULT => ION_HEAP_ID_MASK_SYSTEM
720 * TBM_BO_SCANOUT => ION_HEAP_ID_MASK_MM
721 * TBM_BO_VENDOR => ION_HEAP_ID_MASK_OVERLAY
722 * To be updated appropriately once DRM-GEM supports different heap id masks.
725 if (ftbm & TBM_BO_SCANOUT)
726 flags = SPRD_BO_CONTIG;
728 flags = SPRD_BO_NONCONTIG | SPRD_BO_DEV_SYSTEM;
730 if (ftbm & TBM_BO_WC)
732 else if (ftbm & TBM_BO_NONCACHABLE)
733 flags |= SPRD_BO_NONCACHABLE;
739 _get_tbm_flag_from_sprd(unsigned int fsprd)
741 unsigned int flags = 0;
743 if (fsprd & SPRD_BO_NONCONTIG)
744 flags |= TBM_BO_DEFAULT;
746 flags |= TBM_BO_SCANOUT;
748 if (fsprd & SPRD_BO_WC)
750 else if (fsprd & SPRD_BO_CACHABLE)
751 flags |= TBM_BO_DEFAULT;
753 flags |= TBM_BO_NONCACHABLE;
760 _get_name(int fd, unsigned int gem)
762 struct drm_gem_flink arg = {0,};
765 if (drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &arg)) {
766 TBM_SPRD_ERROR("error fail to get flink gem=%d\n", gem);
770 return (unsigned int)arg.name;
774 _sprd_bo_handle(tbm_bo_sprd bo_sprd, int device)
776 tbm_bo_handle bo_handle;
777 memset(&bo_handle, 0x0, sizeof(uint64_t));
780 case TBM_DEVICE_DEFAULT:
782 bo_handle.u32 = (uint32_t)bo_sprd->gem;
785 if (!bo_sprd->pBase) {
786 struct drm_sprd_gem_mmap arg = {0,};
788 arg.handle = bo_sprd->gem;
789 arg.size = bo_sprd->size;
790 if (drmCommandWriteRead(bo_sprd->fd, DRM_SPRD_GEM_MMAP, &arg, sizeof(arg))) {
791 TBM_SPRD_ERROR("error Cannot usrptr gem=%d\n", bo_sprd->gem);
792 return (tbm_bo_handle) NULL;
794 bo_sprd->pBase = (void *)((uint32_t)arg.mapped);
797 bo_handle.ptr = (void *)bo_sprd->pBase;
801 if (!bo_sprd->dmabuf) {
802 struct drm_prime_handle arg = {0, };
803 arg.handle = bo_sprd->gem;
804 if (drmIoctl(bo_sprd->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
805 TBM_SPRD_ERROR("error Cannot dmabuf=%d\n", bo_sprd->gem);
806 return (tbm_bo_handle) NULL;
808 bo_sprd->dmabuf = arg.fd;
811 bo_handle.u32 = (uint32_t)bo_sprd->dmabuf;
818 //TODO : Add ioctl for GSP MAP once available.
819 TBM_SPRD_DEBUG("%s In case TBM_DEVICE_MM: \n", __FUNCTION_);
821 if (!bo_sprd->dmabuf) {
822 struct drm_prime_handle arg = {0, };
824 arg.handle = bo_sprd->gem;
825 if (drmIoctl(bo_sprd->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
826 TBM_SPRD_ERROR("error Cannot dmabuf=%d\n", bo_sprd->gem);
827 return (tbm_bo_handle) NULL;
829 bo_sprd->dmabuf = arg.fd;
832 bo_handle.u32 = (uint32_t)bo_sprd->dmabuf;
836 bo_handle.ptr = (void *) NULL;
844 tbm_sprd_bo_size(tbm_bo bo)
846 SPRD_RETURN_VAL_IF_FAIL(bo != NULL, 0);
850 bo_sprd = (tbm_bo_sprd)tbm_backend_get_bo_priv(bo);
851 SPRD_RETURN_VAL_IF_FAIL(bo_sprd != NULL, 0);
853 return bo_sprd->size;
857 tbm_sprd_bo_alloc(tbm_bo bo, int size, int flags)
859 SPRD_RETURN_VAL_IF_FAIL(bo != NULL, 0);
861 tbm_bufmgr_sprd bufmgr_sprd;
862 unsigned int sprd_flags;
865 bufmgr_sprd = (tbm_bufmgr_sprd)tbm_backend_get_bufmgr_priv(bo);
866 SPRD_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, 0);
868 bo_sprd = calloc(1, sizeof(struct _tbm_bo_sprd));
870 TBM_SPRD_ERROR("error fail to allocate the bo private\n");
874 #ifdef USE_CONTIG_ONLY
875 flags = TBM_BO_SCANOUT;
876 sprd_flags = SPRD_BO_CONTIG;
878 sprd_flags = _get_sprd_flag_from_tbm(flags);
879 if ((flags & TBM_BO_SCANOUT) && (size <= 4 * 1024))
880 sprd_flags |= SPRD_BO_NONCONTIG;
881 #endif // USE_CONTIG_ONLY
883 struct drm_sprd_gem_create arg = {0, };
884 arg.size = (uint64_t)size;
885 arg.flags = sprd_flags;
886 if (drmCommandWriteRead(bufmgr_sprd->fd, DRM_SPRD_GEM_CREATE, &arg,
888 TBM_SPRD_ERROR("error Cannot create bo(flag:%x, size:%d)\n",
889 arg.flags, (unsigned int)arg.size);
894 bo_sprd->fd = bufmgr_sprd->fd;
895 bo_sprd->gem = arg.handle;
896 bo_sprd->size = size;
897 bo_sprd->flags_tbm = flags;
898 bo_sprd->flags_sprd = sprd_flags;
899 bo_sprd->name = _get_name(bo_sprd->fd, bo_sprd->gem);
901 if (!_bo_init_cache_state(bufmgr_sprd, bo_sprd, 0)) {
902 TBM_SPRD_ERROR("error fail init cache state(%d)\n", bo_sprd->name);
907 pthread_mutex_init(&bo_sprd->mutex, NULL);
909 if (bufmgr_sprd->use_dma_fence
910 && !bo_sprd->dmabuf) {
911 struct drm_prime_handle arg = {0, };
913 arg.handle = bo_sprd->gem;
914 if (drmIoctl(bo_sprd->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
915 TBM_SPRD_ERROR("error Cannot dmabuf=%d\n", bo_sprd->gem);
919 bo_sprd->dmabuf = arg.fd;
923 PrivGem *privGem = calloc(1, sizeof(PrivGem));
925 TBM_SPRD_ERROR("error Fail to calloc PrivGem\n");
930 privGem->ref_count = 1;
931 privGem->bo_priv = bo_sprd;
932 if (drmHashInsert(bufmgr_sprd->hashBos, bo_sprd->name, (void *)privGem) < 0)
933 TBM_SPRD_ERROR("error Cannot insert bo to Hash(%d)\n", bo_sprd->name);
935 TBM_SPRD_DEBUG("%s size:%d, gem:%d(%d), flags:%d(%d)\n",
936 __FUNCTION__, bo_sprd->size,
937 bo_sprd->gem, bo_sprd->name,
940 return (void *)bo_sprd;
944 tbm_sprd_bo_free(tbm_bo bo)
947 tbm_bufmgr_sprd bufmgr_sprd;
952 bufmgr_sprd = (tbm_bufmgr_sprd)tbm_backend_get_bufmgr_priv(bo);
953 SPRD_RETURN_IF_FAIL(bufmgr_sprd != NULL);
955 bo_sprd = (tbm_bo_sprd)tbm_backend_get_bo_priv(bo);
956 SPRD_RETURN_IF_FAIL(bo_sprd != NULL);
958 TBM_SPRD_DEBUG("size:%d, gem:%d(%d)\n",
959 bo_sprd->size, bo_sprd->gem, bo_sprd->name);
961 if (bo_sprd->pBase) {
962 if (munmap(bo_sprd->pBase, bo_sprd->size) == -1)
963 TBM_SPRD_ERROR("error fail to munmap.\n");
967 if (bo_sprd->dmabuf) {
968 close(bo_sprd->dmabuf);
972 /* delete bo from hash */
973 PrivGem *privGem = NULL;
976 ret = drmHashLookup(bufmgr_sprd->hashBos, bo_sprd->name, (void **)&privGem);
978 privGem->ref_count--;
979 if (privGem->ref_count == 0) {
980 drmHashDelete(bufmgr_sprd->hashBos, bo_sprd->name);
985 TBM_SPRD_DEBUG("warning Cannot find bo to Hash(%d), ret=%d\n", bo_sprd->name, ret);
988 _bo_destroy_cache_state(bufmgr_sprd, bo_sprd);
990 /* Free gem handle */
991 struct drm_gem_close arg = {0, };
992 memset(&arg, 0, sizeof(arg));
993 arg.handle = bo_sprd->gem;
994 if (drmIoctl(bo_sprd->fd, DRM_IOCTL_GEM_CLOSE, &arg))
995 TBM_SPRD_ERROR("error fail to DRM_IOCTL_GEM_CLOSE\n");
1002 tbm_sprd_bo_import(tbm_bo bo, unsigned int key)
1004 SPRD_RETURN_VAL_IF_FAIL(bo != NULL, NULL);
1006 tbm_bufmgr_sprd bufmgr_sprd;
1007 tbm_bo_sprd bo_sprd;
1008 PrivGem *privGem = NULL;
1011 bufmgr_sprd = (tbm_bufmgr_sprd)tbm_backend_get_bufmgr_priv(bo);
1012 SPRD_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, NULL);
1014 ret = drmHashLookup(bufmgr_sprd->hashBos, key, (void **)&privGem);
1016 return privGem->bo_priv;
1018 struct drm_sprd_gem_info info = {0, };
1019 struct drm_gem_open arg = {0, };
1022 if (drmIoctl(bufmgr_sprd->fd, DRM_IOCTL_GEM_OPEN, &arg)) {
1023 TBM_SPRD_ERROR("error Cannot open gem name=%d\n", key);
1027 info.handle = arg.handle;
1028 if (drmCommandWriteRead(bufmgr_sprd->fd,
1031 sizeof(struct drm_sprd_gem_info))) {
1032 TBM_SPRD_ERROR("error Cannot get gem info=%d\n", key);
1036 bo_sprd = calloc(1, sizeof(struct _tbm_bo_sprd));
1038 TBM_SPRD_ERROR("error fail to allocate the bo private\n");
1042 bo_sprd->fd = bufmgr_sprd->fd;
1043 bo_sprd->gem = arg.handle;
1044 bo_sprd->size = arg.size;
1045 bo_sprd->flags_sprd = info.flags;
1046 bo_sprd->name = key;
1047 #ifdef USE_CONTIG_ONLY
1048 bo_sprd->flags_sprd = SPRD_BO_CONTIG;
1049 bo_sprd->flags_tbm |= TBM_BO_SCANOUT;
1051 bo_sprd->flags_tbm = _get_tbm_flag_from_sprd(bo_sprd->flags_sprd);
1054 if (!_bo_init_cache_state(bufmgr_sprd, bo_sprd, 1)) {
1055 TBM_SPRD_ERROR("error fail init cache state(%d)\n", bo_sprd->name);
1056 goto fail_init_cache;
1059 if (!bo_sprd->dmabuf) {
1060 struct drm_prime_handle arg = {0, };
1062 arg.handle = bo_sprd->gem;
1063 if (drmIoctl(bo_sprd->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
1064 TBM_SPRD_ERROR("error Cannot dmabuf=%d\n", bo_sprd->gem);
1065 goto fail_prime_handle_to_fd;
1067 bo_sprd->dmabuf = arg.fd;
1070 /* add bo to hash */
1071 privGem = calloc(1, sizeof(PrivGem));
1073 TBM_SPRD_ERROR("error Fail to alloc\n");
1074 goto fail_alloc_gem_priv;
1077 privGem->ref_count = 1;
1078 privGem->bo_priv = bo_sprd;
1079 if (drmHashInsert(bufmgr_sprd->hashBos, bo_sprd->name, (void *)privGem) < 0)
1080 TBM_SPRD_ERROR("error Cannot insert bo to Hash(%d)\n", bo_sprd->name);
1082 TBM_SPRD_DEBUG("size:%d, gem:%d(%d), flags:%d(%d)\n",
1083 bo_sprd->size, bo_sprd->gem, bo_sprd->name,
1084 bo_sprd->flags_tbm, bo_sprd->flags_sprd);
1086 return (void *)bo_sprd;
1088 fail_alloc_gem_priv:
1089 if (bo_sprd->dmabuf)
1090 close(bo_sprd->dmabuf);
1091 fail_prime_handle_to_fd:
1092 _bo_destroy_cache_state(bufmgr_sprd, bo_sprd);
1098 struct drm_gem_close gem_close_arg = {arg.handle, 0};
1099 drmIoctl(bufmgr_sprd->fd, DRM_IOCTL_GEM_CLOSE, &gem_close_arg);
1105 tbm_sprd_bo_import_fd(tbm_bo bo, tbm_fd key)
1107 SPRD_RETURN_VAL_IF_FAIL(bo != NULL, NULL);
1109 tbm_bufmgr_sprd bufmgr_sprd;
1110 tbm_bo_sprd bo_sprd;
1114 bufmgr_sprd = (tbm_bufmgr_sprd)tbm_backend_get_bufmgr_priv(bo);
1115 SPRD_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, NULL);
1117 //getting handle from fd
1118 struct drm_prime_handle arg = {0, };
1122 if (drmIoctl(bufmgr_sprd->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &arg)) {
1123 TBM_SPRD_ERROR("error bo:%p Cannot get gem handle from fd:%d (%s)\n",
1124 bo, arg.fd, strerror(errno));
1129 name = _get_name(bufmgr_sprd->fd, gem);
1131 TBM_SPRD_ERROR("error bo:%p Cannot get name from gem:%d, fd:%d (%s)\n",
1132 bo, gem, key, strerror(errno));
1136 if (!drmHashLookup(bufmgr_sprd->hashBos, name, (void **)&privGem)) {
1137 if (gem == privGem->bo_priv->gem)
1138 return privGem->bo_priv;
1141 unsigned int real_size;
1142 struct drm_sprd_gem_info info = {0, };
1144 /* Determine size of bo. The fd-to-handle ioctl really should
1145 * return the size, but it doesn't. If we have kernel 3.12 or
1146 * later, we can lseek on the prime fd to get the size. Older
1147 * kernels will just fail, in which case we fall back to the
1148 * provided (estimated or guess size). */
1149 real_size = lseek(key, 0, SEEK_END);
1152 if (drmCommandWriteRead(bufmgr_sprd->fd,
1155 sizeof(struct drm_sprd_gem_info))) {
1156 TBM_SPRD_ERROR("error bo:%p Cannot get gem info from gem:%d, fd:%d (%s)\n",
1157 bo, gem, key, strerror(errno));
1161 if (real_size == -1)
1162 real_size = info.size;
1164 bo_sprd = calloc(1, sizeof(struct _tbm_bo_sprd));
1166 TBM_SPRD_ERROR("error bo:%p fail to allocate the bo private\n", bo);
1170 bo_sprd->fd = bufmgr_sprd->fd;
1172 bo_sprd->size = real_size;
1173 bo_sprd->flags_sprd = info.flags;
1174 bo_sprd->flags_tbm = _get_tbm_flag_from_sprd(bo_sprd->flags_sprd);
1176 bo_sprd->name = name;
1177 if (!bo_sprd->name) {
1178 TBM_SPRD_ERROR("error bo:%p Cannot get name from gem:%d, fd:%d (%s)\n",
1179 bo, gem, key, strerror(errno));
1180 goto fail_check_name;
1183 if (!_bo_init_cache_state(bufmgr_sprd, bo_sprd, 1)) {
1184 TBM_SPRD_ERROR("error fail init cache state(%d)\n", bo_sprd->name);
1185 goto fail_init_cache;
1188 /* add bo to hash */
1189 privGem = calloc(1, sizeof(PrivGem));
1191 TBM_SPRD_ERROR("error Fail to callocprivGem\n");
1192 goto fail_alloc_gem_priv;
1195 privGem->ref_count = 1;
1196 privGem->bo_priv = bo_sprd;
1197 if (drmHashInsert(bufmgr_sprd->hashBos, bo_sprd->name, (void *)privGem) < 0) {
1198 TBM_SPRD_ERROR("error bo:%p Cannot insert bo to Hash(%d) from gem:%d, fd:%d\n",
1199 bo, bo_sprd->name, gem, key);
1202 TBM_SPRD_DEBUG("bo:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d(%d), size:%d\n",
1204 bo_sprd->gem, bo_sprd->name,
1207 bo_sprd->flags_tbm, bo_sprd->flags_sprd,
1210 return (void *)bo_sprd;
1212 fail_alloc_gem_priv:
1213 _bo_destroy_cache_state(bufmgr_sprd, bo_sprd);
1221 tbm_sprd_bo_export(tbm_bo bo)
1223 SPRD_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1225 tbm_bo_sprd bo_sprd;
1227 bo_sprd = (tbm_bo_sprd)tbm_backend_get_bo_priv(bo);
1228 SPRD_RETURN_VAL_IF_FAIL(bo_sprd != NULL, 0);
1230 if (!bo_sprd->name) {
1231 bo_sprd->name = _get_name(bo_sprd->fd, bo_sprd->gem);
1232 if (!bo_sprd->name) {
1233 TBM_SPRD_ERROR("error Cannot get name\n");
1238 TBM_SPRD_DEBUG("size:%d, gem:%d(%d), flags:%d(%d)\n",
1239 bo_sprd->size, bo_sprd->gem, bo_sprd->name,
1240 bo_sprd->flags_tbm, bo_sprd->flags_sprd);
1242 return (unsigned int)bo_sprd->name;
1246 tbm_sprd_bo_export_fd(tbm_bo bo)
1248 SPRD_RETURN_VAL_IF_FAIL(bo != NULL, -1);
1250 tbm_bo_sprd bo_sprd;
1253 bo_sprd = (tbm_bo_sprd)tbm_backend_get_bo_priv(bo);
1254 SPRD_RETURN_VAL_IF_FAIL(bo_sprd != NULL, -1);
1256 struct drm_prime_handle arg = {0, };
1258 arg.handle = bo_sprd->gem;
1259 ret = drmIoctl(bo_sprd->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg);
1261 TBM_SPRD_ERROR("error bo:%p Cannot dmabuf=%d (%s)\n",
1262 bo, bo_sprd->gem, strerror(errno));
1263 return (tbm_fd) ret;
1266 TBM_SPRD_DEBUG("bo:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d(%d), size:%d\n",
1268 bo_sprd->gem, bo_sprd->name,
1271 bo_sprd->flags_tbm, bo_sprd->flags_sprd,
1274 return (tbm_fd)arg.fd;
1278 static tbm_bo_handle
1279 tbm_sprd_bo_get_handle(tbm_bo bo, int device)
1281 SPRD_RETURN_VAL_IF_FAIL(bo != NULL, (tbm_bo_handle) NULL);
1283 tbm_bo_handle bo_handle;
1284 tbm_bo_sprd bo_sprd;
1286 bo_sprd = (tbm_bo_sprd)tbm_backend_get_bo_priv(bo);
1287 SPRD_RETURN_VAL_IF_FAIL(bo_sprd != NULL, (tbm_bo_handle) NULL);
1289 if (!bo_sprd->gem) {
1290 TBM_SPRD_ERROR("error Cannot map gem=%d\n", bo_sprd->gem);
1291 return (tbm_bo_handle) NULL;
1294 TBM_SPRD_DEBUG("gem:%d(%d), %s\n",
1295 bo_sprd->gem, bo_sprd->name, STR_DEVICE[device]);
1297 /*Get mapped bo_handle*/
1298 bo_handle = _sprd_bo_handle(bo_sprd, device);
1299 if (bo_handle.ptr == NULL) {
1300 TBM_SPRD_ERROR("error Cannot get handle: gem:%d, device:%d\n",
1301 bo_sprd->gem, device);
1302 return (tbm_bo_handle) NULL;
1308 static tbm_bo_handle
1309 tbm_sprd_bo_map(tbm_bo bo, int device, int opt)
1311 SPRD_RETURN_VAL_IF_FAIL(bo != NULL, (tbm_bo_handle) NULL);
1313 tbm_bo_handle bo_handle;
1314 tbm_bo_sprd bo_sprd;
1315 tbm_bufmgr_sprd bufmgr_sprd;
1317 bufmgr_sprd = (tbm_bufmgr_sprd)tbm_backend_get_bufmgr_priv(bo);
1318 SPRD_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, (tbm_bo_handle) NULL);
1320 bo_sprd = (tbm_bo_sprd)tbm_backend_get_bo_priv(bo);
1321 SPRD_RETURN_VAL_IF_FAIL(bo_sprd != NULL, (tbm_bo_handle) NULL);
1323 if (!bo_sprd->gem) {
1324 TBM_SPRD_ERROR("error Cannot map gem=%d\n", bo_sprd->gem);
1325 return (tbm_bo_handle) NULL;
1328 TBM_SPRD_DEBUG("%s gem:%d(%d), %s, %s\n",
1329 __FUNCTION__, bo_sprd->gem, bo_sprd->name, STR_DEVICE[device], STR_OPT[opt]);
1331 /*Get mapped bo_handle*/
1332 bo_handle = _sprd_bo_handle(bo_sprd, device);
1333 if (bo_handle.ptr == NULL) {
1334 TBM_SPRD_ERROR("error Cannot get handle: gem:%d, device:%d, opt:%d\n",
1335 bo_sprd->gem, device, opt);
1336 return (tbm_bo_handle) NULL;
1339 if (bo_sprd->map_cnt == 0)
1340 _bo_set_cache_state(bufmgr_sprd, bo_sprd, device, opt);
1348 tbm_sprd_bo_unmap(tbm_bo bo)
1350 SPRD_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1352 tbm_bufmgr_sprd bufmgr_sprd;
1353 tbm_bo_sprd bo_sprd;
1355 bufmgr_sprd = (tbm_bufmgr_sprd)tbm_backend_get_bufmgr_priv(bo);
1356 SPRD_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, 0);
1358 bo_sprd = (tbm_bo_sprd)tbm_backend_get_bo_priv(bo);
1359 SPRD_RETURN_VAL_IF_FAIL(bo_sprd != NULL, 0);
1366 if (bo_sprd->map_cnt == 0)
1367 _bo_save_cache_state(bufmgr_sprd, bo_sprd);
1369 TBM_SPRD_DEBUG("gem:%d(%d) \n", bo_sprd->gem, bo_sprd->name);
1375 tbm_sprd_bo_lock(tbm_bo bo, int device, int opt)
1377 SPRD_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1379 tbm_bufmgr_sprd bufmgr_sprd;
1380 tbm_bo_sprd bo_sprd;
1382 bo_sprd = (tbm_bo_sprd)tbm_backend_get_bo_priv(bo);
1383 SPRD_RETURN_VAL_IF_FAIL(bo_sprd != NULL, 0);
1385 bufmgr_sprd = (tbm_bufmgr_sprd)tbm_backend_get_bufmgr_priv(bo);
1386 SPRD_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, 0);
1388 #if USE_BACKEND_LOCK
1391 if (bufmgr_sprd->use_dma_fence) {
1392 struct dma_buf_fence fence;
1394 memset(&fence, 0, sizeof(struct dma_buf_fence));
1396 /* Check if the given type is valid or not. */
1397 if (opt & TBM_OPTION_WRITE) {
1398 if (device == TBM_DEVICE_CPU)
1399 fence.type = DMA_BUF_ACCESS_WRITE;
1400 else if (device == TBM_DEVICE_3D)
1401 fence.type = DMA_BUF_ACCESS_WRITE | DMA_BUF_ACCESS_DMA;
1403 TBM_SPRD_DEBUG("GET_FENCE is ignored(device type is not 3D/CPU),\n");
1406 } else if (opt & TBM_OPTION_READ) {
1407 if (device == TBM_DEVICE_CPU)
1408 fence.type = DMA_BUF_ACCESS_READ;
1409 else if (device == TBM_DEVICE_3D)
1410 fence.type = DMA_BUF_ACCESS_READ | DMA_BUF_ACCESS_DMA;
1412 TBM_SPRD_DEBUG("GET_FENCE is ignored(device type is not 3D/CPU),\n");
1416 TBM_SPRD_ERROR("error Invalid argument\n");
1420 ret = ioctl(bo_sprd->dmabuf, DMABUF_IOCTL_GET_FENCE, &fence);
1422 TBM_SPRD_ERROR("error Can not set GET FENCE(%s)\n", strerror(errno));
1426 pthread_mutex_lock(&bo_sprd->mutex);
1428 for (i = 0; i < DMA_FENCE_LIST_MAX; i++) {
1429 if (bo_sprd->dma_fence[i].ctx == 0) {
1430 bo_sprd->dma_fence[i].type = fence.type;
1431 bo_sprd->dma_fence[i].ctx = fence.ctx;
1435 if (i == DMA_FENCE_LIST_MAX) {
1436 //TODO: if dma_fence list is full, it needs realloc. I will fix this. by minseok3.kim
1437 TBM_SPRD_ERROR("error fence list is full\n");
1439 pthread_mutex_unlock(&bo_sprd->mutex);
1441 TBM_SPRD_DEBUG("DMABUF_IOCTL_GET_FENCE! flink_id=%d dmabuf=%d\n",
1442 bo_sprd->name, bo_sprd->dmabuf);
1444 ret = _tgl_lock(bufmgr_sprd->tgl_fd, bo_sprd->name, opt);
1446 TBM_SPRD_DEBUG("lock tgl flink_id:%d\n", bo_sprd->name);
1456 tbm_sprd_bo_unlock(tbm_bo bo)
1458 SPRD_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1460 tbm_bufmgr_sprd bufmgr_sprd;
1461 tbm_bo_sprd bo_sprd;
1463 bo_sprd = (tbm_bo_sprd)tbm_backend_get_bo_priv(bo);
1464 SPRD_RETURN_VAL_IF_FAIL(bo_sprd != NULL, 0);
1466 bufmgr_sprd = (tbm_bufmgr_sprd)tbm_backend_get_bufmgr_priv(bo);
1467 SPRD_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, 0);
1469 #if USE_BACKEND_LOCK
1472 if (bufmgr_sprd->use_dma_fence) {
1473 struct dma_buf_fence fence;
1475 if (!bo_sprd->dma_fence[0].ctx) {
1476 TBM_SPRD_DEBUG("FENCE not support or ignored,\n";
1480 if (!bo_sprd->dma_fence[0].type) {
1481 TBM_SPRD_DEBUG("device type is not 3D/CPU,\n");
1485 pthread_mutex_lock(&bo_sprd->mutex);
1486 fence.type = bo_sprd->dma_fence[0].type;
1487 fence.ctx = bo_sprd->dma_fence[0].ctx;
1489 for (i = 1; i < DMA_FENCE_LIST_MAX; i++) {
1490 bo_sprd->dma_fence[i - 1].type = bo_sprd->dma_fence[i].type;
1491 bo_sprd->dma_fence[i - 1].ctx = bo_sprd->dma_fence[i].ctx;
1493 bo_sprd->dma_fence[DMA_FENCE_LIST_MAX - 1].type = 0;
1494 bo_sprd->dma_fence[DMA_FENCE_LIST_MAX - 1].ctx = 0;
1495 pthread_mutex_unlock(&bo_sprd->mutex);
1497 ret = ioctl(bo_sprd->dmabuf, DMABUF_IOCTL_PUT_FENCE, &fence);
1499 TBM_SPRD_ERROR("error Can not set PUT FENCE(%s)\n", strerror(errno));
1503 TBM_SPRD_DEBUG("DMABUF_IOCTL_PUT_FENCE! flink_id=%d dmabuf=%d\n",
1504 bo_sprd->name, bo_sprd->dmabuf);
1506 ret = _tgl_unlock(bufmgr_sprd->tgl_fd, bo_sprd->name);
1508 TBM_SPRD_DEBUG("unlock tgl flink_id:%d\n", bo_sprd->name);
1518 tbm_sprd_bufmgr_deinit(void *priv)
1520 SPRD_RETURN_IF_FAIL(priv != NULL);
1522 tbm_bufmgr_sprd bufmgr_sprd;
1524 bufmgr_sprd = (tbm_bufmgr_sprd)priv;
1526 if (bufmgr_sprd->hashBos) {
1530 while (drmHashFirst(bufmgr_sprd->hashBos, &key, &value) > 0) {
1532 drmHashDelete(bufmgr_sprd->hashBos, key);
1535 drmHashDestroy(bufmgr_sprd->hashBos);
1536 bufmgr_sprd->hashBos = NULL;
1539 if (bufmgr_sprd->bind_display)
1540 tbm_drm_helper_wl_auth_server_deinit();
1542 if (tbm_backend_is_display_server())
1543 tbm_drm_helper_unset_tbm_master_fd();
1545 if (bufmgr_sprd->device_name)
1546 free(bufmgr_sprd->device_name);
1548 _bufmgr_deinit_cache_state(bufmgr_sprd);
1550 close(bufmgr_sprd->fd);
1556 tbm_sprd_surface_supported_format(uint32_t **formats, uint32_t *num)
1558 uint32_t *color_formats;
1560 color_formats = (uint32_t *)calloc(1,
1561 sizeof(uint32_t) * TBM_COLOR_FORMAT_COUNT);
1562 if (color_formats == NULL)
1565 memcpy(color_formats, tbm_sprd_color_format_list,
1566 sizeof(uint32_t)*TBM_COLOR_FORMAT_COUNT);
1568 *formats = color_formats;
1569 *num = TBM_COLOR_FORMAT_COUNT;
1576 * @brief get the plane data of the surface.
1577 * @param[in] width : the width of the surface
1578 * @param[in] height : the height of the surface
1579 * @param[in] format : the format of the surface
1580 * @param[in] plane_idx : the format of the surface
1581 * @param[out] size : the size of the plane
1582 * @param[out] offset : the offset of the plane
1583 * @param[out] pitch : the pitch of the plane
1584 * @param[out] padding : the padding of the plane
1585 * @return 1 if this function succeeds, otherwise 0.
1588 tbm_sprd_surface_get_plane_data(int width, int height,
1589 tbm_format format, int plane_idx, uint32_t *size, uint32_t *offset,
1590 uint32_t *pitch, int *bo_idx)
1598 int _align_height = 0;
1602 case TBM_FORMAT_XRGB4444:
1603 case TBM_FORMAT_XBGR4444:
1604 case TBM_FORMAT_RGBX4444:
1605 case TBM_FORMAT_BGRX4444:
1606 case TBM_FORMAT_ARGB4444:
1607 case TBM_FORMAT_ABGR4444:
1608 case TBM_FORMAT_RGBA4444:
1609 case TBM_FORMAT_BGRA4444:
1610 case TBM_FORMAT_XRGB1555:
1611 case TBM_FORMAT_XBGR1555:
1612 case TBM_FORMAT_RGBX5551:
1613 case TBM_FORMAT_BGRX5551:
1614 case TBM_FORMAT_ARGB1555:
1615 case TBM_FORMAT_ABGR1555:
1616 case TBM_FORMAT_RGBA5551:
1617 case TBM_FORMAT_BGRA5551:
1618 case TBM_FORMAT_RGB565:
1621 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
1622 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1626 case TBM_FORMAT_RGB888:
1627 case TBM_FORMAT_BGR888:
1630 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
1631 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1635 case TBM_FORMAT_XRGB8888:
1636 case TBM_FORMAT_XBGR8888:
1637 case TBM_FORMAT_RGBX8888:
1638 case TBM_FORMAT_BGRX8888:
1639 case TBM_FORMAT_ARGB8888:
1640 case TBM_FORMAT_ABGR8888:
1641 case TBM_FORMAT_RGBA8888:
1642 case TBM_FORMAT_BGRA8888:
1645 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
1646 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1651 case TBM_FORMAT_YUYV:
1652 case TBM_FORMAT_YVYU:
1653 case TBM_FORMAT_UYVY:
1654 case TBM_FORMAT_VYUY:
1655 case TBM_FORMAT_AYUV:
1658 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1659 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1665 * index 0 = Y plane, [7:0] Y
1666 * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
1668 * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
1670 case TBM_FORMAT_NV12:
1671 case TBM_FORMAT_NV21:
1673 if (plane_idx == 0) {
1675 _pitch = SIZE_ALIGN(width , TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1676 _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1677 _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
1679 } else if (plane_idx == 1) {
1680 _offset = width * height;
1681 _pitch = SIZE_ALIGN(width , TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1682 _align_height = SIZE_ALIGN(height / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1683 _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
1688 case TBM_FORMAT_NV16:
1689 case TBM_FORMAT_NV61:
1691 //if(plane_idx == 0)
1694 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1695 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1700 //else if( plane_idx ==1 )
1703 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1704 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1711 * index 0: Y plane, [7:0] Y
1712 * index 1: Cb plane, [7:0] Cb
1713 * index 2: Cr plane, [7:0] Cr
1715 * index 1: Cr plane, [7:0] Cr
1716 * index 2: Cb plane, [7:0] Cb
1719 NATIVE_BUFFER_FORMAT_YV12
1720 NATIVE_BUFFER_FORMAT_I420
1722 case TBM_FORMAT_YUV410:
1723 case TBM_FORMAT_YVU410:
1726 case TBM_FORMAT_YUV411:
1727 case TBM_FORMAT_YVU411:
1728 case TBM_FORMAT_YUV420:
1729 case TBM_FORMAT_YVU420:
1731 //if(plane_idx == 0)
1734 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1735 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1740 //else if( plane_idx == 1 )
1743 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1744 _size = SIZE_ALIGN(_pitch * (height / 2), TBM_SURFACE_ALIGNMENT_PLANE);
1749 //else if (plane_idx == 2 )
1752 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1753 _size = SIZE_ALIGN(_pitch * (height / 2), TBM_SURFACE_ALIGNMENT_PLANE);
1757 case TBM_FORMAT_YUV422:
1758 case TBM_FORMAT_YVU422:
1760 //if(plane_idx == 0)
1763 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1764 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1769 //else if( plane_idx == 1 )
1772 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1773 _size = SIZE_ALIGN(_pitch * (height), TBM_SURFACE_ALIGNMENT_PLANE);
1778 //else if (plane_idx == 2 )
1781 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1782 _size = SIZE_ALIGN(_pitch * (height), TBM_SURFACE_ALIGNMENT_PLANE);
1786 case TBM_FORMAT_YUV444:
1787 case TBM_FORMAT_YVU444:
1789 //if(plane_idx == 0)
1792 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1793 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1798 //else if( plane_idx == 1 )
1801 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1802 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1807 //else if (plane_idx == 2 )
1810 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1811 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1829 tbm_sprd_bo_get_flags(tbm_bo bo)
1831 SPRD_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1833 tbm_bo_sprd bo_sprd;
1835 bo_sprd = (tbm_bo_sprd)tbm_backend_get_bo_priv(bo);
1836 SPRD_RETURN_VAL_IF_FAIL(bo_sprd != NULL, 0);
1838 return bo_sprd->flags_tbm;
1842 tbm_sprd_bufmgr_bind_native_display(tbm_bufmgr bufmgr, void *NativeDisplay)
1844 tbm_bufmgr_sprd bufmgr_sprd;
1846 bufmgr_sprd = tbm_backend_get_priv_from_bufmgr(bufmgr);
1847 SPRD_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, 0);
1849 if (!tbm_drm_helper_wl_auth_server_init(NativeDisplay, bufmgr_sprd->fd,
1850 bufmgr_sprd->device_name, 0)) {
1851 TBM_SPRD_ERROR("fail to tbm_drm_helper_wl_server_init\n");
1855 bufmgr_sprd->bind_display = NativeDisplay;
1860 MODULEINITPPROTO(init_tbm_bufmgr_priv);
1862 static TBMModuleVersionInfo SprdVersRec = {
1868 TBMModuleData tbmModuleData = { &SprdVersRec, init_tbm_bufmgr_priv};
1871 init_tbm_bufmgr_priv(tbm_bufmgr bufmgr, int fd)
1873 tbm_bufmgr_backend bufmgr_backend;
1874 tbm_bufmgr_sprd bufmgr_sprd;
1880 bufmgr_sprd = calloc(1, sizeof(struct _tbm_bufmgr_sprd));
1882 TBM_SPRD_ERROR("fail to alloc bufmgr_sprd!\n");
1886 if (tbm_backend_is_display_server()) {
1887 bufmgr_sprd->fd = tbm_drm_helper_get_master_fd();
1888 if (bufmgr_sprd->fd < 0) {
1889 bufmgr_sprd->fd = _tbm_sprd_open_drm();
1890 if (bufmgr_sprd->fd < 0) {
1891 TBM_SPRD_ERROR("fail to open drm!\n");
1896 tbm_drm_helper_set_tbm_master_fd(bufmgr_sprd->fd);
1898 bufmgr_sprd->device_name = drmGetDeviceNameFromFd(bufmgr_sprd->fd);
1899 if (!bufmgr_sprd->device_name) {
1900 TBM_SPRD_ERROR("fail to get device name!\n");
1901 tbm_drm_helper_unset_tbm_master_fd();
1902 goto fail_get_device_name;
1905 if (!tbm_drm_helper_get_auth_info(&(bufmgr_sprd->fd), &(bufmgr_sprd->device_name), NULL)) {
1906 TBM_SPRD_ERROR("fail to get auth drm info!\n");
1907 goto fail_get_auth_info;
1912 bufmgr_sprd->hashBos = drmHashCreate();
1914 //Check if the tbm manager supports dma fence or not.
1915 fp = open("/sys/module/dmabuf_sync/parameters/enabled", O_RDONLY);
1918 int length = read(fp, buf, 1);
1920 if (length == 1 && buf[0] == '1')
1921 bufmgr_sprd->use_dma_fence = 1;
1926 if (!_bufmgr_init_cache_state(bufmgr_sprd)) {
1927 TBM_SPRD_ERROR("fail to init bufmgr cache state\n");
1928 goto fail_init_cache_state;
1931 bufmgr_backend = tbm_backend_alloc();
1932 if (!bufmgr_backend) {
1933 TBM_SPRD_ERROR("fail to alloc backend!\n");
1934 goto fail_alloc_backend;
1937 bufmgr_backend->priv = (void *)bufmgr_sprd;
1938 bufmgr_backend->bufmgr_deinit = tbm_sprd_bufmgr_deinit;
1939 bufmgr_backend->bo_size = tbm_sprd_bo_size;
1940 bufmgr_backend->bo_alloc = tbm_sprd_bo_alloc;
1941 bufmgr_backend->bo_free = tbm_sprd_bo_free;
1942 bufmgr_backend->bo_import = tbm_sprd_bo_import;
1943 bufmgr_backend->bo_import_fd = tbm_sprd_bo_import_fd;
1944 bufmgr_backend->bo_export = tbm_sprd_bo_export;
1945 bufmgr_backend->bo_export_fd = tbm_sprd_bo_export_fd;
1946 bufmgr_backend->bo_get_handle = tbm_sprd_bo_get_handle;
1947 bufmgr_backend->bo_map = tbm_sprd_bo_map;
1948 bufmgr_backend->bo_unmap = tbm_sprd_bo_unmap;
1949 bufmgr_backend->surface_get_plane_data = tbm_sprd_surface_get_plane_data;
1950 bufmgr_backend->surface_supported_format = tbm_sprd_surface_supported_format;
1951 bufmgr_backend->bo_get_flags = tbm_sprd_bo_get_flags;
1952 bufmgr_backend->bo_lock = tbm_sprd_bo_lock;
1953 bufmgr_backend->bo_unlock = tbm_sprd_bo_unlock;
1954 bufmgr_backend->bufmgr_bind_native_display = tbm_sprd_bufmgr_bind_native_display;
1956 if (!tbm_backend_init(bufmgr, bufmgr_backend)) {
1957 TBM_SPRD_ERROR("fail to init backend!\n");
1958 goto fail_init_backend;
1965 env = getenv("TBM_SPRD_DEBUG");
1968 TBM_SPRD_ERROR("TBM_SPRD_DEBUG=%s\n", env);
1974 TBM_SPRD_DEBUG("DMABUF FENCE is %s\n",
1975 bufmgr_sprd->use_dma_fence ? "supported!" : "NOT supported!");
1976 TBM_SPRD_DEBUG("fd:%d\n", bufmgr_sprd->fd);
1981 tbm_backend_free(bufmgr_backend);
1983 _bufmgr_deinit_cache_state(bufmgr_sprd);
1984 fail_init_cache_state:
1985 if (bufmgr_sprd->hashBos)
1986 drmHashDestroy(bufmgr_sprd->hashBos);
1987 if (tbm_backend_is_display_server())
1988 tbm_drm_helper_unset_tbm_master_fd();
1989 fail_get_device_name:
1990 close(bufmgr_sprd->fd);