1 /**************************************************************************
5 Copyright 2012 Samsung Electronics co., Ltd. All Rights Reserved.
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
9 Permission is hereby granted, free of charge, to any person obtaining a
10 copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sub license, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial portions
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
22 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
24 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
25 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
26 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
27 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 **************************************************************************/
43 #include <sys/ioctl.h>
44 #include <sys/types.h>
51 #include <tbm_bufmgr.h>
52 #include <tbm_bufmgr_backend.h>
53 #include <drm/sprd_drm.h>
55 #include <tbm_surface.h>
56 #include <tbm_drm_helper.h>
59 #include "tbm_bufmgr_tgl.h"
61 //#define USE_CONTIG_ONLY
65 #define TBM_COLOR_FORMAT_COUNT 4
68 #define LOG_TAG "TBM_BACKEND"
70 static int bDebug = 0;
72 #define SPRD_DRM_NAME "sprd"
77 static char app_name[128] = {0, };
78 static int initialized = 0;
85 /* get the application name */
86 f = fopen("/proc/self/cmdline", "r");
90 if (fgets(app_name, 100, f) == NULL) {
97 slash = strrchr(app_name, '/');
99 memmove(app_name, slash + 1, strlen(slash));
106 #define TBM_SPRD_ERROR(fmt, args...) LOGE("\033[31m" "[%s] " fmt "\033[0m", _target_name(), ##args)
107 #define TBM_SPRD_DEBUG(fmt, args...) if (bDebug&01) LOGD("[%s] " fmt, _target_name(), ##args)
109 #define TBM_SPRD_ERROR(...)
110 #define TBM_SPRD_DEBUG(...)
113 #define SIZE_ALIGN(value, base) (((value) + ((base) - 1)) & ~((base) - 1))
115 #define TBM_SURFACE_ALIGNMENT_PLANE (64)
116 #define TBM_SURFACE_ALIGNMENT_PITCH_RGB (128)
117 #define TBM_SURFACE_ALIGNMENT_PITCH_YUV (16)
120 /* check condition */
121 #define SPRD_RETURN_IF_FAIL(cond) {\
123 TBM_SPRD_ERROR("[%s] : '%s' failed.\n", __FUNCTION__, #cond);\
127 #define SPRD_RETURN_VAL_IF_FAIL(cond, val) {\
129 TBM_SPRD_ERROR("[%s] : '%s' failed.\n", __FUNCTION__, #cond);\
134 struct dma_buf_info {
136 unsigned int fence_supported;
137 unsigned int padding;
140 #define DMA_BUF_ACCESS_READ 0x1
141 #define DMA_BUF_ACCESS_WRITE 0x2
142 #define DMA_BUF_ACCESS_DMA 0x4
143 #define DMA_BUF_ACCESS_MAX 0x8
145 #define DMA_FENCE_LIST_MAX 5
147 struct dma_buf_fence {
152 #define DMABUF_IOCTL_BASE 'F'
153 #define DMABUF_IOWR(nr, type) _IOWR(DMABUF_IOCTL_BASE, nr, type)
155 #define DMABUF_IOCTL_GET_INFO DMABUF_IOWR(0x00, struct dma_buf_info)
156 #define DMABUF_IOCTL_GET_FENCE DMABUF_IOWR(0x01, struct dma_buf_fence)
157 #define DMABUF_IOCTL_PUT_FENCE DMABUF_IOWR(0x02, struct dma_buf_fence)
160 #define GLOBAL_KEY ((unsigned int)(-1))
162 #define TBM_SPRD_CACHE_INV 0x01 /**< cache invalidate */
163 #define TBM_SPRD_CACHE_CLN 0x02 /**< cache clean */
164 #define TBM_SPRD_CACHE_ALL 0x10 /**< cache all */
165 #define TBM_SPRD_CACHE_FLUSH (TBM_SPRD_CACHE_INV|TBM_SPRD_CACHE_CLN) /**< cache flush */
166 #define TBM_SPRD_CACHE_FLUSH_ALL (TBM_SPRD_CACHE_FLUSH|TBM_SPRD_CACHE_ALL) /**< cache flush all */
170 DEVICE_CA, /* cache aware device */
171 DEVICE_CO /* cache oblivious device */
174 typedef union _tbm_bo_cache_state tbm_bo_cache_state;
176 union _tbm_bo_cache_state {
179 unsigned int cntFlush:16; /*Flush all index for sync */
180 unsigned int isCached:1;
181 unsigned int isDirtied:2;
185 typedef struct _tbm_bufmgr_sprd *tbm_bufmgr_sprd;
186 typedef struct _tbm_bo_sprd *tbm_bo_sprd;
188 typedef struct _sprd_private {
190 struct _tbm_bo_sprd *bo_priv;
193 /* tbm buffor object for sprd */
194 struct _tbm_bo_sprd {
197 unsigned int name; /* FLINK ID */
199 unsigned int gem; /* GEM Handle */
201 unsigned int dmabuf; /* fd for dmabuf */
203 void *pBase; /* virtual address */
207 unsigned int flags_sprd;
208 unsigned int flags_tbm;
212 pthread_mutex_t mutex;
213 struct dma_buf_fence dma_fence[DMA_FENCE_LIST_MAX];
217 tbm_bo_cache_state cache_state;
218 unsigned int map_cnt;
221 /* tbm bufmgr private for sprd */
222 struct _tbm_bufmgr_sprd {
234 char *STR_DEVICE[] = {
250 uint32_t tbm_sprd_color_format_list[TBM_COLOR_FORMAT_COUNT] = {
258 _tgl_get_version(int fd)
260 struct tgl_ver_data data;
263 err = ioctl(fd, TGL_IOCTL_GET_VERSION, &data);
265 TBM_SPRD_ERROR("error(%s) %s:%d\n", strerror(errno));
269 TBM_SPRD_DEBUG("tgl version is (%u, %u).\n", data.major, data.minor);
276 _tgl_init(int fd, unsigned int key)
278 struct tgl_reg_data data;
282 data.timeout_ms = 1000;
284 err = ioctl(fd, TGL_IOCTL_REGISTER, &data);
286 TBM_SPRD_ERROR("error(%s) key:%d\n", strerror(errno), key);
294 _tgl_destroy(int fd, unsigned int key)
296 struct tgl_reg_data data;
300 err = ioctl(fd, TGL_IOCTL_UNREGISTER, &data);
302 TBM_SPRD_ERROR("error(%s) key:%d\n", strerror(errno), key);
310 _tgl_lock(int fd, unsigned int key, int opt)
312 struct tgl_lock_data data;
313 enum tgl_type_data tgl_type;
317 case TBM_OPTION_READ:
318 tgl_type = TGL_TYPE_READ;
320 case TBM_OPTION_WRITE:
321 tgl_type = TGL_TYPE_WRITE;
324 tgl_type = TGL_TYPE_NONE;
329 data.type = tgl_type;
331 err = ioctl(fd, TGL_IOCTL_LOCK, data);
333 TBM_SPRD_ERROR("error(%s) key:%d opt:%d\n",
334 strerror(errno), key, opt);
342 _tgl_unlock(int fd, unsigned int key)
344 struct tgl_lock_data data;
348 data.type = TGL_TYPE_NONE;
350 err = ioctl(fd, TGL_IOCTL_UNLOCK, data);
352 TBM_SPRD_ERROR("error(%s) key:%d\n",
353 strerror(errno), key);
362 _tgl_set_data(int fd, unsigned int key, unsigned int val)
364 struct tgl_usr_data data;
370 err = ioctl(fd, TGL_IOCTL_SET_DATA, &data);
372 TBM_SPRD_ERROR("error(%s) key:%d\n",
373 strerror(errno), key);
380 static inline unsigned int
381 _tgl_get_data(int fd, unsigned int key, unsigned int *locked)
383 struct tgl_usr_data data = { 0, };
388 err = ioctl(fd, TGL_IOCTL_GET_DATA, &data);
390 TBM_SPRD_ERROR("error(%s) key:%d\n",
391 strerror(errno), key);
396 *locked = (unsigned int)data.status;
403 _tbm_sprd_open_drm(void)
406 struct udev_device *drm_device = NULL;
407 struct udev_list_entry *entry = NULL;
408 struct udev_enumerate *e;
409 const char *filepath;
415 fd = drmOpen(SPRD_DRM_NAME, NULL);
420 TBM_SPRD_DEBUG("warning fail to open drm. search drm-device by udev\n");
424 TBM_SPRD_ERROR("udev_new() failed.\n");
428 e = udev_enumerate_new(udev);
429 udev_enumerate_add_match_subsystem(e, "drm");
430 udev_enumerate_add_match_sysname(e, "card[0-9]*");
431 udev_enumerate_scan_devices(e);
433 udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
434 struct udev_device *device, *device_parent;
436 device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
437 udev_list_entry_get_name(entry));
438 device_parent = udev_device_get_parent(device);
439 /* Not need unref device_parent. device_parent and device have same refcnt */
441 if (strcmp(udev_device_get_sysname(device_parent), "sprd-drm") == 0) {
443 TBM_SPRD_DEBUG("Found render device: '%s' (%s)\n",
444 udev_device_get_syspath(drm_device),
445 udev_device_get_sysname(device_parent));
449 udev_device_unref(device);
452 udev_enumerate_unref(e);
454 /* Get device file path. */
455 filepath = udev_device_get_devnode(drm_device);
457 TBM_SPRD_ERROR("udev_device_get_devnode() failed.\n");
458 udev_device_unref(drm_device);
463 udev_device_unref(drm_device);
466 /* Open DRM device file and check validity. */
467 fd = open(filepath, O_RDWR | O_CLOEXEC);
469 TBM_SPRD_ERROR("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
474 TBM_SPRD_ERROR("fstat() failed %s.\n");
479 TBM_SPRD_ERROR("warning fail to open drm\n",
488 _sprd_bo_cache_flush(tbm_bufmgr_sprd bufmgr_sprd, tbm_bo_sprd bo_sprd, int flags)
490 SPRD_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, 0);
492 /* cache flush is managed by kernel side when using dma-fence. */
493 if (bufmgr_sprd->use_dma_fence)
495 // TODO: The tm1 kernel does not support ioctl for cache flush right now.
496 // The drm in tm1 kernel has to support cache_flush to turn on this feature(TBM_SRPD_CACHE_FLUSH).
497 #if TBM_SRPD_CACHE_FLUSH
498 struct drm_sprd_gem_cache_op cache_op = {0, };
501 /* if bo_sprd is null, do cache_flush_all */
504 cache_op.usr_addr = (uint64_t)((uint32_t)bo_sprd->pBase);
505 cache_op.size = bo_sprd->size;
507 flags = TBM_SPRD_CACHE_FLUSH_ALL;
509 cache_op.usr_addr = 0;
513 if (flags & TBM_SPRD_CACHE_INV) {
514 if (flags & TBM_SPRD_CACHE_ALL)
515 cache_op.flags |= SPRD_DRM_CACHE_INV_ALL;
517 cache_op.flags |= SPRD_DRM_CACHE_INV_RANGE;
520 if (flags & TBM_SPRD_CACHE_CLN) {
521 if (flags & TBM_SPRD_CACHE_ALL)
522 cache_op.flags |= SPRD_DRM_CACHE_CLN_ALL;
524 cache_op.flags |= SPRD_DRM_CACHE_CLN_RANGE;
527 if (flags & TBM_SPRD_CACHE_ALL)
528 cache_op.flags |= SPRD_DRM_ALL_CACHES_CORES;
530 ret = drmCommandWriteRead(bufmgr_sprd->fd, DRM_SPRD_GEM_CACHE_OP, &cache_op,
533 TBM_SPRD_ERROR("error fail to flush the cache.\n");
542 _bo_init_cache_state(tbm_bufmgr_sprd bufmgr_sprd, tbm_bo_sprd bo_sprd, int import)
544 SPRD_RETURN_VAL_IF_FAIL(bo_sprd != NULL, 0);
545 SPRD_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, 0);
547 if (bufmgr_sprd->use_dma_fence)
550 _tgl_init(bufmgr_sprd->tgl_fd, bo_sprd->name);
553 tbm_bo_cache_state cache_state;
556 cache_state.data.isDirtied = DEVICE_NONE;
557 cache_state.data.isCached = 0;
558 cache_state.data.cntFlush = 0;
560 _tgl_set_data(bufmgr_sprd->tgl_fd, bo_sprd->name, cache_state.val);
568 _bo_set_cache_state(tbm_bufmgr_sprd bufmgr_sprd, tbm_bo_sprd bo_sprd, int device, int opt)
571 SPRD_RETURN_VAL_IF_FAIL(bo_sprd != NULL, 0);
572 SPRD_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, 0);
575 unsigned short cntFlush = 0;
577 if (bufmgr_sprd->use_dma_fence)
580 if (bo_sprd->flags_sprd & SPRD_BO_NONCACHABLE)
583 /* get cache state of a bo */
584 bo_sprd->cache_state.val = _tgl_get_data(bufmgr_sprd->tgl_fd, bo_sprd->name, NULL);
586 /* get global cache flush count */
587 cntFlush = (unsigned short)_tgl_get_data(bufmgr_sprd->tgl_fd, GLOBAL_KEY, NULL);
589 if (opt == TBM_DEVICE_CPU) {
590 if (bo_sprd->cache_state.data.isDirtied == DEVICE_CO &&
591 bo_sprd->cache_state.data.isCached)
592 need_flush = TBM_SPRD_CACHE_INV;
594 bo_sprd->cache_state.data.isCached = 1;
595 if (opt & TBM_OPTION_WRITE)
596 bo_sprd->cache_state.data.isDirtied = DEVICE_CA;
598 if (bo_sprd->cache_state.data.isDirtied != DEVICE_CA)
599 bo_sprd->cache_state.data.isDirtied = DEVICE_NONE;
602 if (bo_sprd->cache_state.data.isDirtied == DEVICE_CA &&
603 bo_sprd->cache_state.data.isCached &&
604 bo_sprd->cache_state.data.cntFlush == cntFlush)
605 need_flush = TBM_SPRD_CACHE_CLN | TBM_SPRD_CACHE_ALL;
607 if (opt & TBM_OPTION_WRITE)
608 bo_sprd->cache_state.data.isDirtied = DEVICE_CO;
610 if (bo_sprd->cache_state.data.isDirtied != DEVICE_CO)
611 bo_sprd->cache_state.data.isDirtied = DEVICE_NONE;
616 if (need_flush & TBM_SPRD_CACHE_ALL)
617 _tgl_set_data(bufmgr_sprd->tgl_fd, GLOBAL_KEY, (unsigned int)(++cntFlush));
619 /* call cache flush */
620 _sprd_bo_cache_flush(bufmgr_sprd, bo_sprd, need_flush);
622 TBM_SPRD_DEBUG("\tcache(%d,%d)....flush:0x%x, cntFlush(%d)\n",
623 bo_sprd->cache_state.data.isCached,
624 bo_sprd->cache_state.data.isDirtied,
634 _bo_save_cache_state(tbm_bufmgr_sprd bufmgr_sprd, tbm_bo_sprd bo_sprd)
637 SPRD_RETURN_VAL_IF_FAIL(bo_sprd != NULL, 0);
638 SPRD_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, 0);
640 if (bufmgr_sprd->use_dma_fence)
643 unsigned short cntFlush = 0;
645 /* get global cache flush count */
646 cntFlush = (unsigned short)_tgl_get_data(bufmgr_sprd->tgl_fd, GLOBAL_KEY, NULL);
648 /* save global cache flush count */
649 bo_sprd->cache_state.data.cntFlush = cntFlush;
650 _tgl_set_data(bufmgr_sprd->tgl_fd, bo_sprd->name, bo_sprd->cache_state.val);
657 _bo_destroy_cache_state(tbm_bufmgr_sprd bufmgr_sprd, tbm_bo_sprd bo_sprd)
659 SPRD_RETURN_IF_FAIL(bo_sprd != NULL);
660 SPRD_RETURN_IF_FAIL(bufmgr_sprd != NULL);
662 if (bufmgr_sprd->use_dma_fence)
665 _tgl_destroy(bufmgr_sprd->tgl_fd, bo_sprd->name);
669 _bufmgr_init_cache_state(tbm_bufmgr_sprd bufmgr_sprd)
671 SPRD_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, 0);
673 if (bufmgr_sprd->use_dma_fence)
676 /* open tgl fd for saving cache flush data */
677 bufmgr_sprd->tgl_fd = open(tgl_devfile, O_RDWR);
679 if (bufmgr_sprd->tgl_fd < 0) {
680 bufmgr_sprd->tgl_fd = open(tgl_devfile1, O_RDWR);
681 if (bufmgr_sprd->tgl_fd < 0) {
682 TBM_SPRD_ERROR("fail to open global_lock:%s\n",
688 if (!_tgl_get_version(bufmgr_sprd->tgl_fd)) {
689 TBM_SPRD_ERROR("fail to get tgl_version. tgl init failed.\n");
690 close(bufmgr_sprd->tgl_fd);
695 if (!_tgl_init(bufmgr_sprd->tgl_fd, GLOBAL_KEY)) {
696 TBM_SPRD_ERROR("fail to initialize the tgl\n");
697 close(bufmgr_sprd->tgl_fd);
706 _bufmgr_deinit_cache_state(tbm_bufmgr_sprd bufmgr_sprd)
708 SPRD_RETURN_IF_FAIL(bufmgr_sprd != NULL);
710 if (bufmgr_sprd->use_dma_fence)
713 if (bufmgr_sprd->tgl_fd >= 0)
714 close(bufmgr_sprd->tgl_fd);
717 #ifndef USE_CONTIG_ONLY
719 _get_sprd_flag_from_tbm(unsigned int ftbm)
721 unsigned int flags = 0;
724 * TBM_BO_DEFAULT => ION_HEAP_ID_MASK_SYSTEM
725 * TBM_BO_SCANOUT => ION_HEAP_ID_MASK_MM
726 * TBM_BO_VENDOR => ION_HEAP_ID_MASK_OVERLAY
727 * To be updated appropriately once DRM-GEM supports different heap id masks.
730 if (ftbm & TBM_BO_SCANOUT)
731 flags = SPRD_BO_CONTIG;
733 flags = SPRD_BO_NONCONTIG | SPRD_BO_DEV_SYSTEM;
735 if (ftbm & TBM_BO_WC)
737 else if (ftbm & TBM_BO_NONCACHABLE)
738 flags |= SPRD_BO_NONCACHABLE;
744 _get_tbm_flag_from_sprd(unsigned int fsprd)
746 unsigned int flags = 0;
748 if (fsprd & SPRD_BO_NONCONTIG)
749 flags |= TBM_BO_DEFAULT;
751 flags |= TBM_BO_SCANOUT;
753 if (fsprd & SPRD_BO_WC)
755 else if (fsprd & SPRD_BO_CACHABLE)
756 flags |= TBM_BO_DEFAULT;
758 flags |= TBM_BO_NONCACHABLE;
765 _get_name(int fd, unsigned int gem)
767 struct drm_gem_flink arg = {0,};
770 if (drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &arg)) {
771 TBM_SPRD_ERROR("error fail to get flink gem=%d\n", gem);
775 return (unsigned int)arg.name;
779 _sprd_bo_handle(tbm_bo_sprd bo_sprd, int device)
781 tbm_bo_handle bo_handle;
782 memset(&bo_handle, 0x0, sizeof(uint64_t));
785 case TBM_DEVICE_DEFAULT:
787 bo_handle.u32 = (uint32_t)bo_sprd->gem;
790 if (!bo_sprd->pBase) {
791 struct drm_sprd_gem_mmap arg = {0,};
793 arg.handle = bo_sprd->gem;
794 arg.size = bo_sprd->size;
795 if (drmCommandWriteRead(bo_sprd->fd, DRM_SPRD_GEM_MMAP, &arg, sizeof(arg))) {
796 TBM_SPRD_ERROR("error Cannot usrptr gem=%d\n", bo_sprd->gem);
797 return (tbm_bo_handle) NULL;
799 bo_sprd->pBase = (void *)((uint32_t)arg.mapped);
802 bo_handle.ptr = (void *)bo_sprd->pBase;
806 if (!bo_sprd->dmabuf) {
807 struct drm_prime_handle arg = {0, };
808 arg.handle = bo_sprd->gem;
809 if (drmIoctl(bo_sprd->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
810 TBM_SPRD_ERROR("error Cannot dmabuf=%d\n", bo_sprd->gem);
811 return (tbm_bo_handle) NULL;
813 bo_sprd->dmabuf = arg.fd;
816 bo_handle.u32 = (uint32_t)bo_sprd->dmabuf;
823 //TODO : Add ioctl for GSP MAP once available.
824 TBM_SPRD_DEBUG("%s In case TBM_DEVICE_MM: \n", __FUNCTION_);
826 if (!bo_sprd->dmabuf) {
827 struct drm_prime_handle arg = {0, };
829 arg.handle = bo_sprd->gem;
830 if (drmIoctl(bo_sprd->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
831 TBM_SPRD_ERROR("error Cannot dmabuf=%d\n", bo_sprd->gem);
832 return (tbm_bo_handle) NULL;
834 bo_sprd->dmabuf = arg.fd;
837 bo_handle.u32 = (uint32_t)bo_sprd->dmabuf;
841 bo_handle.ptr = (void *) NULL;
849 tbm_sprd_bo_size(tbm_bo bo)
851 SPRD_RETURN_VAL_IF_FAIL(bo != NULL, 0);
855 bo_sprd = (tbm_bo_sprd)tbm_backend_get_bo_priv(bo);
856 SPRD_RETURN_VAL_IF_FAIL(bo_sprd != NULL, 0);
858 return bo_sprd->size;
862 tbm_sprd_bo_alloc(tbm_bo bo, int size, int flags)
864 SPRD_RETURN_VAL_IF_FAIL(bo != NULL, 0);
866 tbm_bufmgr_sprd bufmgr_sprd;
867 unsigned int sprd_flags;
870 bufmgr_sprd = (tbm_bufmgr_sprd)tbm_backend_get_bufmgr_priv(bo);
871 SPRD_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, 0);
873 bo_sprd = calloc(1, sizeof(struct _tbm_bo_sprd));
875 TBM_SPRD_ERROR("error fail to allocate the bo private\n");
879 #ifdef USE_CONTIG_ONLY
880 flags = TBM_BO_SCANOUT;
881 sprd_flags = SPRD_BO_CONTIG;
883 sprd_flags = _get_sprd_flag_from_tbm(flags);
884 if ((flags & TBM_BO_SCANOUT) && (size <= 4 * 1024))
885 sprd_flags |= SPRD_BO_NONCONTIG;
886 #endif // USE_CONTIG_ONLY
888 struct drm_sprd_gem_create arg = {0, };
889 arg.size = (uint64_t)size;
890 arg.flags = sprd_flags;
891 if (drmCommandWriteRead(bufmgr_sprd->fd, DRM_SPRD_GEM_CREATE, &arg,
893 TBM_SPRD_ERROR("error Cannot create bo(flag:%x, size:%d)\n",
894 arg.flags, (unsigned int)arg.size);
899 bo_sprd->fd = bufmgr_sprd->fd;
900 bo_sprd->gem = arg.handle;
901 bo_sprd->size = size;
902 bo_sprd->flags_tbm = flags;
903 bo_sprd->flags_sprd = sprd_flags;
904 bo_sprd->name = _get_name(bo_sprd->fd, bo_sprd->gem);
906 if (!_bo_init_cache_state(bufmgr_sprd, bo_sprd, 0)) {
907 TBM_SPRD_ERROR("error fail init cache state(%d)\n", bo_sprd->name);
912 pthread_mutex_init(&bo_sprd->mutex, NULL);
914 if (bufmgr_sprd->use_dma_fence
915 && !bo_sprd->dmabuf) {
916 struct drm_prime_handle arg = {0, };
918 arg.handle = bo_sprd->gem;
919 if (drmIoctl(bo_sprd->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
920 TBM_SPRD_ERROR("error Cannot dmabuf=%d\n", bo_sprd->gem);
924 bo_sprd->dmabuf = arg.fd;
928 PrivGem *privGem = calloc(1, sizeof(PrivGem));
930 TBM_SPRD_ERROR("error Fail to calloc PrivGem\n");
935 privGem->ref_count = 1;
936 privGem->bo_priv = bo_sprd;
937 if (drmHashInsert(bufmgr_sprd->hashBos, bo_sprd->name, (void *)privGem) < 0)
938 TBM_SPRD_ERROR("error Cannot insert bo to Hash(%d)\n", bo_sprd->name);
940 TBM_SPRD_DEBUG("%s size:%d, gem:%d(%d), flags:%d(%d)\n",
941 __FUNCTION__, bo_sprd->size,
942 bo_sprd->gem, bo_sprd->name,
945 return (void *)bo_sprd;
949 tbm_sprd_bo_free(tbm_bo bo)
952 tbm_bufmgr_sprd bufmgr_sprd;
957 bufmgr_sprd = (tbm_bufmgr_sprd)tbm_backend_get_bufmgr_priv(bo);
958 SPRD_RETURN_IF_FAIL(bufmgr_sprd != NULL);
960 bo_sprd = (tbm_bo_sprd)tbm_backend_get_bo_priv(bo);
961 SPRD_RETURN_IF_FAIL(bo_sprd != NULL);
963 TBM_SPRD_DEBUG("size:%d, gem:%d(%d)\n",
964 bo_sprd->size, bo_sprd->gem, bo_sprd->name);
966 if (bo_sprd->pBase) {
967 if (munmap(bo_sprd->pBase, bo_sprd->size) == -1)
968 TBM_SPRD_ERROR("error fail to munmap.\n");
972 if (bo_sprd->dmabuf) {
973 close(bo_sprd->dmabuf);
977 /* delete bo from hash */
978 PrivGem *privGem = NULL;
981 ret = drmHashLookup(bufmgr_sprd->hashBos, bo_sprd->name, (void **)&privGem);
983 privGem->ref_count--;
984 if (privGem->ref_count == 0) {
985 drmHashDelete(bufmgr_sprd->hashBos, bo_sprd->name);
990 TBM_SPRD_DEBUG("warning Cannot find bo to Hash(%d), ret=%d\n", bo_sprd->name, ret);
993 _bo_destroy_cache_state(bufmgr_sprd, bo_sprd);
995 /* Free gem handle */
996 struct drm_gem_close arg = {0, };
997 memset(&arg, 0, sizeof(arg));
998 arg.handle = bo_sprd->gem;
999 if (drmIoctl(bo_sprd->fd, DRM_IOCTL_GEM_CLOSE, &arg))
1000 TBM_SPRD_ERROR("error fail to DRM_IOCTL_GEM_CLOSE\n");
1007 tbm_sprd_bo_import(tbm_bo bo, unsigned int key)
1009 SPRD_RETURN_VAL_IF_FAIL(bo != NULL, NULL);
1011 tbm_bufmgr_sprd bufmgr_sprd;
1012 tbm_bo_sprd bo_sprd;
1013 PrivGem *privGem = NULL;
1016 bufmgr_sprd = (tbm_bufmgr_sprd)tbm_backend_get_bufmgr_priv(bo);
1017 SPRD_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, NULL);
1019 ret = drmHashLookup(bufmgr_sprd->hashBos, key, (void **)&privGem);
1021 return privGem->bo_priv;
1023 struct drm_sprd_gem_info info = {0, };
1024 struct drm_gem_open arg = {0, };
1027 if (drmIoctl(bufmgr_sprd->fd, DRM_IOCTL_GEM_OPEN, &arg)) {
1028 TBM_SPRD_ERROR("error Cannot open gem name=%d\n", key);
1032 info.handle = arg.handle;
1033 if (drmCommandWriteRead(bufmgr_sprd->fd,
1036 sizeof(struct drm_sprd_gem_info))) {
1037 TBM_SPRD_ERROR("error Cannot get gem info=%d\n", key);
1041 bo_sprd = calloc(1, sizeof(struct _tbm_bo_sprd));
1043 TBM_SPRD_ERROR("error fail to allocate the bo private\n");
1047 bo_sprd->fd = bufmgr_sprd->fd;
1048 bo_sprd->gem = arg.handle;
1049 bo_sprd->size = arg.size;
1050 bo_sprd->flags_sprd = info.flags;
1051 bo_sprd->name = key;
1052 #ifdef USE_CONTIG_ONLY
1053 bo_sprd->flags_sprd = SPRD_BO_CONTIG;
1054 bo_sprd->flags_tbm |= TBM_BO_SCANOUT;
1056 bo_sprd->flags_tbm = _get_tbm_flag_from_sprd(bo_sprd->flags_sprd);
1059 if (!_bo_init_cache_state(bufmgr_sprd, bo_sprd, 1)) {
1060 TBM_SPRD_ERROR("error fail init cache state(%d)\n", bo_sprd->name);
1061 goto fail_init_cache;
1064 if (!bo_sprd->dmabuf) {
1065 struct drm_prime_handle arg = {0, };
1067 arg.handle = bo_sprd->gem;
1068 if (drmIoctl(bo_sprd->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
1069 TBM_SPRD_ERROR("error Cannot dmabuf=%d\n", bo_sprd->gem);
1070 goto fail_prime_handle_to_fd;
1072 bo_sprd->dmabuf = arg.fd;
1075 /* add bo to hash */
1076 privGem = calloc(1, sizeof(PrivGem));
1078 TBM_SPRD_ERROR("error Fail to alloc\n");
1079 goto fail_alloc_gem_priv;
1082 privGem->ref_count = 1;
1083 privGem->bo_priv = bo_sprd;
1084 if (drmHashInsert(bufmgr_sprd->hashBos, bo_sprd->name, (void *)privGem) < 0)
1085 TBM_SPRD_ERROR("error Cannot insert bo to Hash(%d)\n", bo_sprd->name);
1087 TBM_SPRD_DEBUG("size:%d, gem:%d(%d), flags:%d(%d)\n",
1088 bo_sprd->size, bo_sprd->gem, bo_sprd->name,
1089 bo_sprd->flags_tbm, bo_sprd->flags_sprd);
1091 return (void *)bo_sprd;
1093 fail_alloc_gem_priv:
1094 if (bo_sprd->dmabuf)
1095 close(bo_sprd->dmabuf);
1096 fail_prime_handle_to_fd:
1097 _bo_destroy_cache_state(bufmgr_sprd, bo_sprd);
1103 struct drm_gem_close gem_close_arg = {arg.handle, 0};
1104 drmIoctl(bufmgr_sprd->fd, DRM_IOCTL_GEM_CLOSE, &gem_close_arg);
1110 tbm_sprd_bo_import_fd(tbm_bo bo, tbm_fd key)
1112 SPRD_RETURN_VAL_IF_FAIL(bo != NULL, NULL);
1114 tbm_bufmgr_sprd bufmgr_sprd;
1115 tbm_bo_sprd bo_sprd;
1119 bufmgr_sprd = (tbm_bufmgr_sprd)tbm_backend_get_bufmgr_priv(bo);
1120 SPRD_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, NULL);
1122 //getting handle from fd
1123 struct drm_prime_handle arg = {0, };
1127 if (drmIoctl(bufmgr_sprd->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &arg)) {
1128 TBM_SPRD_ERROR("error bo:%p Cannot get gem handle from fd:%d (%s)\n",
1129 bo, arg.fd, strerror(errno));
1134 name = _get_name(bufmgr_sprd->fd, gem);
1136 TBM_SPRD_ERROR("error bo:%p Cannot get name from gem:%d, fd:%d (%s)\n",
1137 bo, gem, key, strerror(errno));
1141 if (!drmHashLookup(bufmgr_sprd->hashBos, name, (void **)&privGem)) {
1142 if (gem == privGem->bo_priv->gem)
1143 return privGem->bo_priv;
1146 unsigned int real_size;
1147 struct drm_sprd_gem_info info = {0, };
1149 /* Determine size of bo. The fd-to-handle ioctl really should
1150 * return the size, but it doesn't. If we have kernel 3.12 or
1151 * later, we can lseek on the prime fd to get the size. Older
1152 * kernels will just fail, in which case we fall back to the
1153 * provided (estimated or guess size). */
1154 real_size = lseek(key, 0, SEEK_END);
1157 if (drmCommandWriteRead(bufmgr_sprd->fd,
1160 sizeof(struct drm_sprd_gem_info))) {
1161 TBM_SPRD_ERROR("error bo:%p Cannot get gem info from gem:%d, fd:%d (%s)\n",
1162 bo, gem, key, strerror(errno));
1166 if (real_size == -1)
1167 real_size = info.size;
1169 bo_sprd = calloc(1, sizeof(struct _tbm_bo_sprd));
1171 TBM_SPRD_ERROR("error bo:%p fail to allocate the bo private\n", bo);
1175 bo_sprd->fd = bufmgr_sprd->fd;
1177 bo_sprd->size = real_size;
1178 bo_sprd->flags_sprd = info.flags;
1179 bo_sprd->flags_tbm = _get_tbm_flag_from_sprd(bo_sprd->flags_sprd);
1181 bo_sprd->name = name;
1182 if (!bo_sprd->name) {
1183 TBM_SPRD_ERROR("error bo:%p Cannot get name from gem:%d, fd:%d (%s)\n",
1184 bo, gem, key, strerror(errno));
1185 goto fail_check_name;
1188 if (!_bo_init_cache_state(bufmgr_sprd, bo_sprd, 1)) {
1189 TBM_SPRD_ERROR("error fail init cache state(%d)\n", bo_sprd->name);
1190 goto fail_init_cache;
1193 /* add bo to hash */
1194 privGem = calloc(1, sizeof(PrivGem));
1196 TBM_SPRD_ERROR("error Fail to callocprivGem\n");
1197 goto fail_alloc_gem_priv;
1200 privGem->ref_count = 1;
1201 privGem->bo_priv = bo_sprd;
1202 if (drmHashInsert(bufmgr_sprd->hashBos, bo_sprd->name, (void *)privGem) < 0) {
1203 TBM_SPRD_ERROR("error bo:%p Cannot insert bo to Hash(%d) from gem:%d, fd:%d\n",
1204 bo, bo_sprd->name, gem, key);
1207 TBM_SPRD_DEBUG("bo:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d(%d), size:%d\n",
1209 bo_sprd->gem, bo_sprd->name,
1212 bo_sprd->flags_tbm, bo_sprd->flags_sprd,
1215 return (void *)bo_sprd;
1217 fail_alloc_gem_priv:
1218 _bo_destroy_cache_state(bufmgr_sprd, bo_sprd);
1226 tbm_sprd_bo_export(tbm_bo bo)
1228 SPRD_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1230 tbm_bo_sprd bo_sprd;
1232 bo_sprd = (tbm_bo_sprd)tbm_backend_get_bo_priv(bo);
1233 SPRD_RETURN_VAL_IF_FAIL(bo_sprd != NULL, 0);
1235 if (!bo_sprd->name) {
1236 bo_sprd->name = _get_name(bo_sprd->fd, bo_sprd->gem);
1237 if (!bo_sprd->name) {
1238 TBM_SPRD_ERROR("error Cannot get name\n");
1243 TBM_SPRD_DEBUG("size:%d, gem:%d(%d), flags:%d(%d)\n",
1244 bo_sprd->size, bo_sprd->gem, bo_sprd->name,
1245 bo_sprd->flags_tbm, bo_sprd->flags_sprd);
1247 return (unsigned int)bo_sprd->name;
1251 tbm_sprd_bo_export_fd(tbm_bo bo)
1253 SPRD_RETURN_VAL_IF_FAIL(bo != NULL, -1);
1255 tbm_bo_sprd bo_sprd;
1258 bo_sprd = (tbm_bo_sprd)tbm_backend_get_bo_priv(bo);
1259 SPRD_RETURN_VAL_IF_FAIL(bo_sprd != NULL, -1);
1261 struct drm_prime_handle arg = {0, };
1263 arg.handle = bo_sprd->gem;
1264 ret = drmIoctl(bo_sprd->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg);
1266 TBM_SPRD_ERROR("error bo:%p Cannot dmabuf=%d (%s)\n",
1267 bo, bo_sprd->gem, strerror(errno));
1268 return (tbm_fd) ret;
1271 TBM_SPRD_DEBUG("bo:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d(%d), size:%d\n",
1273 bo_sprd->gem, bo_sprd->name,
1276 bo_sprd->flags_tbm, bo_sprd->flags_sprd,
1279 return (tbm_fd)arg.fd;
1283 static tbm_bo_handle
1284 tbm_sprd_bo_get_handle(tbm_bo bo, int device)
1286 SPRD_RETURN_VAL_IF_FAIL(bo != NULL, (tbm_bo_handle) NULL);
1288 tbm_bo_handle bo_handle;
1289 tbm_bo_sprd bo_sprd;
1291 bo_sprd = (tbm_bo_sprd)tbm_backend_get_bo_priv(bo);
1292 SPRD_RETURN_VAL_IF_FAIL(bo_sprd != NULL, (tbm_bo_handle) NULL);
1294 if (!bo_sprd->gem) {
1295 TBM_SPRD_ERROR("error Cannot map gem=%d\n", bo_sprd->gem);
1296 return (tbm_bo_handle) NULL;
1299 TBM_SPRD_DEBUG("gem:%d(%d), %s\n",
1300 bo_sprd->gem, bo_sprd->name, STR_DEVICE[device]);
1302 /*Get mapped bo_handle*/
1303 bo_handle = _sprd_bo_handle(bo_sprd, device);
1304 if (bo_handle.ptr == NULL) {
1305 TBM_SPRD_ERROR("error Cannot get handle: gem:%d, device:%d\n",
1306 bo_sprd->gem, device);
1307 return (tbm_bo_handle) NULL;
1313 static tbm_bo_handle
1314 tbm_sprd_bo_map(tbm_bo bo, int device, int opt)
1316 SPRD_RETURN_VAL_IF_FAIL(bo != NULL, (tbm_bo_handle) NULL);
1318 tbm_bo_handle bo_handle;
1319 tbm_bo_sprd bo_sprd;
1320 tbm_bufmgr_sprd bufmgr_sprd;
1322 bufmgr_sprd = (tbm_bufmgr_sprd)tbm_backend_get_bufmgr_priv(bo);
1323 SPRD_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, (tbm_bo_handle) NULL);
1325 bo_sprd = (tbm_bo_sprd)tbm_backend_get_bo_priv(bo);
1326 SPRD_RETURN_VAL_IF_FAIL(bo_sprd != NULL, (tbm_bo_handle) NULL);
1328 if (!bo_sprd->gem) {
1329 TBM_SPRD_ERROR("error Cannot map gem=%d\n", bo_sprd->gem);
1330 return (tbm_bo_handle) NULL;
1333 TBM_SPRD_DEBUG("%s gem:%d(%d), %s, %s\n",
1334 __FUNCTION__, bo_sprd->gem, bo_sprd->name, STR_DEVICE[device], STR_OPT[opt]);
1336 /*Get mapped bo_handle*/
1337 bo_handle = _sprd_bo_handle(bo_sprd, device);
1338 if (bo_handle.ptr == NULL) {
1339 TBM_SPRD_ERROR("error Cannot get handle: gem:%d, device:%d, opt:%d\n",
1340 bo_sprd->gem, device, opt);
1341 return (tbm_bo_handle) NULL;
1344 if (bo_sprd->map_cnt == 0)
1345 _bo_set_cache_state(bufmgr_sprd, bo_sprd, device, opt);
1353 tbm_sprd_bo_unmap(tbm_bo bo)
1355 SPRD_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1357 tbm_bufmgr_sprd bufmgr_sprd;
1358 tbm_bo_sprd bo_sprd;
1360 bufmgr_sprd = (tbm_bufmgr_sprd)tbm_backend_get_bufmgr_priv(bo);
1361 SPRD_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, 0);
1363 bo_sprd = (tbm_bo_sprd)tbm_backend_get_bo_priv(bo);
1364 SPRD_RETURN_VAL_IF_FAIL(bo_sprd != NULL, 0);
1371 if (bo_sprd->map_cnt == 0)
1372 _bo_save_cache_state(bufmgr_sprd, bo_sprd);
1374 TBM_SPRD_DEBUG("gem:%d(%d) \n", bo_sprd->gem, bo_sprd->name);
1380 tbm_sprd_bo_lock(tbm_bo bo, int device, int opt)
1382 SPRD_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1384 tbm_bufmgr_sprd bufmgr_sprd;
1385 tbm_bo_sprd bo_sprd;
1387 bo_sprd = (tbm_bo_sprd)tbm_backend_get_bo_priv(bo);
1388 SPRD_RETURN_VAL_IF_FAIL(bo_sprd != NULL, 0);
1390 bufmgr_sprd = (tbm_bufmgr_sprd)tbm_backend_get_bufmgr_priv(bo);
1391 SPRD_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, 0);
1393 #if USE_BACKEND_LOCK
1396 if (bufmgr_sprd->use_dma_fence) {
1397 struct dma_buf_fence fence;
1399 memset(&fence, 0, sizeof(struct dma_buf_fence));
1401 /* Check if the given type is valid or not. */
1402 if (opt & TBM_OPTION_WRITE) {
1403 if (device == TBM_DEVICE_CPU)
1404 fence.type = DMA_BUF_ACCESS_WRITE;
1405 else if (device == TBM_DEVICE_3D)
1406 fence.type = DMA_BUF_ACCESS_WRITE | DMA_BUF_ACCESS_DMA;
1408 TBM_SPRD_DEBUG("GET_FENCE is ignored(device type is not 3D/CPU),\n");
1411 } else if (opt & TBM_OPTION_READ) {
1412 if (device == TBM_DEVICE_CPU)
1413 fence.type = DMA_BUF_ACCESS_READ;
1414 else if (device == TBM_DEVICE_3D)
1415 fence.type = DMA_BUF_ACCESS_READ | DMA_BUF_ACCESS_DMA;
1417 TBM_SPRD_DEBUG("GET_FENCE is ignored(device type is not 3D/CPU),\n");
1421 TBM_SPRD_ERROR("error Invalid argument\n");
1425 ret = ioctl(bo_sprd->dmabuf, DMABUF_IOCTL_GET_FENCE, &fence);
1427 TBM_SPRD_ERROR("error Can not set GET FENCE(%s)\n", strerror(errno));
1431 pthread_mutex_lock(&bo_sprd->mutex);
1433 for (i = 0; i < DMA_FENCE_LIST_MAX; i++) {
1434 if (bo_sprd->dma_fence[i].ctx == 0) {
1435 bo_sprd->dma_fence[i].type = fence.type;
1436 bo_sprd->dma_fence[i].ctx = fence.ctx;
1440 if (i == DMA_FENCE_LIST_MAX) {
1441 //TODO: if dma_fence list is full, it needs realloc. I will fix this. by minseok3.kim
1442 TBM_SPRD_ERROR("error fence list is full\n");
1444 pthread_mutex_unlock(&bo_sprd->mutex);
1446 TBM_SPRD_DEBUG("DMABUF_IOCTL_GET_FENCE! flink_id=%d dmabuf=%d\n",
1447 bo_sprd->name, bo_sprd->dmabuf);
1449 ret = _tgl_lock(bufmgr_sprd->tgl_fd, bo_sprd->name, opt);
1451 TBM_SPRD_DEBUG("lock tgl flink_id:%d\n", bo_sprd->name);
1461 tbm_sprd_bo_unlock(tbm_bo bo)
1463 SPRD_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1465 tbm_bufmgr_sprd bufmgr_sprd;
1466 tbm_bo_sprd bo_sprd;
1468 bo_sprd = (tbm_bo_sprd)tbm_backend_get_bo_priv(bo);
1469 SPRD_RETURN_VAL_IF_FAIL(bo_sprd != NULL, 0);
1471 bufmgr_sprd = (tbm_bufmgr_sprd)tbm_backend_get_bufmgr_priv(bo);
1472 SPRD_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, 0);
1474 #if USE_BACKEND_LOCK
1477 if (bufmgr_sprd->use_dma_fence) {
1478 struct dma_buf_fence fence;
1480 if (!bo_sprd->dma_fence[0].ctx) {
1481 TBM_SPRD_DEBUG("FENCE not support or ignored,\n";
1485 if (!bo_sprd->dma_fence[0].type) {
1486 TBM_SPRD_DEBUG("device type is not 3D/CPU,\n");
1490 pthread_mutex_lock(&bo_sprd->mutex);
1491 fence.type = bo_sprd->dma_fence[0].type;
1492 fence.ctx = bo_sprd->dma_fence[0].ctx;
1494 for (i = 1; i < DMA_FENCE_LIST_MAX; i++) {
1495 bo_sprd->dma_fence[i - 1].type = bo_sprd->dma_fence[i].type;
1496 bo_sprd->dma_fence[i - 1].ctx = bo_sprd->dma_fence[i].ctx;
1498 bo_sprd->dma_fence[DMA_FENCE_LIST_MAX - 1].type = 0;
1499 bo_sprd->dma_fence[DMA_FENCE_LIST_MAX - 1].ctx = 0;
1500 pthread_mutex_unlock(&bo_sprd->mutex);
1502 ret = ioctl(bo_sprd->dmabuf, DMABUF_IOCTL_PUT_FENCE, &fence);
1504 TBM_SPRD_ERROR("error Can not set PUT FENCE(%s)\n", strerror(errno));
1508 TBM_SPRD_DEBUG("DMABUF_IOCTL_PUT_FENCE! flink_id=%d dmabuf=%d\n",
1509 bo_sprd->name, bo_sprd->dmabuf);
1511 ret = _tgl_unlock(bufmgr_sprd->tgl_fd, bo_sprd->name);
1513 TBM_SPRD_DEBUG("unlock tgl flink_id:%d\n", bo_sprd->name);
1523 tbm_sprd_bufmgr_deinit(void *priv)
1525 SPRD_RETURN_IF_FAIL(priv != NULL);
1527 tbm_bufmgr_sprd bufmgr_sprd;
1529 bufmgr_sprd = (tbm_bufmgr_sprd)priv;
1531 if (bufmgr_sprd->hashBos) {
1535 while (drmHashFirst(bufmgr_sprd->hashBos, &key, &value) > 0) {
1537 drmHashDelete(bufmgr_sprd->hashBos, key);
1540 drmHashDestroy(bufmgr_sprd->hashBos);
1541 bufmgr_sprd->hashBos = NULL;
1544 if (bufmgr_sprd->bind_display)
1545 tbm_drm_helper_wl_auth_server_deinit();
1547 if (tbm_backend_is_display_server())
1548 tbm_drm_helper_unset_tbm_master_fd();
1550 if (bufmgr_sprd->device_name)
1551 free(bufmgr_sprd->device_name);
1553 _bufmgr_deinit_cache_state(bufmgr_sprd);
1555 close(bufmgr_sprd->fd);
1561 tbm_sprd_surface_supported_format(uint32_t **formats, uint32_t *num)
1563 uint32_t *color_formats;
1565 color_formats = (uint32_t *)calloc(1,
1566 sizeof(uint32_t) * TBM_COLOR_FORMAT_COUNT);
1567 if (color_formats == NULL)
1570 memcpy(color_formats, tbm_sprd_color_format_list,
1571 sizeof(uint32_t)*TBM_COLOR_FORMAT_COUNT);
1573 *formats = color_formats;
1574 *num = TBM_COLOR_FORMAT_COUNT;
1581 * @brief get the plane data of the surface.
1582 * @param[in] width : the width of the surface
1583 * @param[in] height : the height of the surface
1584 * @param[in] format : the format of the surface
1585 * @param[in] plane_idx : the format of the surface
1586 * @param[out] size : the size of the plane
1587 * @param[out] offset : the offset of the plane
1588 * @param[out] pitch : the pitch of the plane
1589 * @param[out] padding : the padding of the plane
1590 * @return 1 if this function succeeds, otherwise 0.
1593 tbm_sprd_surface_get_plane_data(int width, int height,
1594 tbm_format format, int plane_idx, uint32_t *size, uint32_t *offset,
1595 uint32_t *pitch, int *bo_idx)
1603 int _align_height = 0;
1607 case TBM_FORMAT_XRGB4444:
1608 case TBM_FORMAT_XBGR4444:
1609 case TBM_FORMAT_RGBX4444:
1610 case TBM_FORMAT_BGRX4444:
1611 case TBM_FORMAT_ARGB4444:
1612 case TBM_FORMAT_ABGR4444:
1613 case TBM_FORMAT_RGBA4444:
1614 case TBM_FORMAT_BGRA4444:
1615 case TBM_FORMAT_XRGB1555:
1616 case TBM_FORMAT_XBGR1555:
1617 case TBM_FORMAT_RGBX5551:
1618 case TBM_FORMAT_BGRX5551:
1619 case TBM_FORMAT_ARGB1555:
1620 case TBM_FORMAT_ABGR1555:
1621 case TBM_FORMAT_RGBA5551:
1622 case TBM_FORMAT_BGRA5551:
1623 case TBM_FORMAT_RGB565:
1626 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
1627 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1631 case TBM_FORMAT_RGB888:
1632 case TBM_FORMAT_BGR888:
1635 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
1636 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1640 case TBM_FORMAT_XRGB8888:
1641 case TBM_FORMAT_XBGR8888:
1642 case TBM_FORMAT_RGBX8888:
1643 case TBM_FORMAT_BGRX8888:
1644 case TBM_FORMAT_ARGB8888:
1645 case TBM_FORMAT_ABGR8888:
1646 case TBM_FORMAT_RGBA8888:
1647 case TBM_FORMAT_BGRA8888:
1650 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
1651 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1656 case TBM_FORMAT_YUYV:
1657 case TBM_FORMAT_YVYU:
1658 case TBM_FORMAT_UYVY:
1659 case TBM_FORMAT_VYUY:
1660 case TBM_FORMAT_AYUV:
1663 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1664 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1670 * index 0 = Y plane, [7:0] Y
1671 * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
1673 * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
1675 case TBM_FORMAT_NV12:
1676 case TBM_FORMAT_NV21:
1678 if (plane_idx == 0) {
1680 _pitch = SIZE_ALIGN(width , TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1681 _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1682 _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
1684 } else if (plane_idx == 1) {
1685 _offset = width * height;
1686 _pitch = SIZE_ALIGN(width , TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1687 _align_height = SIZE_ALIGN(height / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1688 _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
1693 case TBM_FORMAT_NV16:
1694 case TBM_FORMAT_NV61:
1696 //if(plane_idx == 0)
1699 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1700 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1705 //else if( plane_idx ==1 )
1708 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1709 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1716 * index 0: Y plane, [7:0] Y
1717 * index 1: Cb plane, [7:0] Cb
1718 * index 2: Cr plane, [7:0] Cr
1720 * index 1: Cr plane, [7:0] Cr
1721 * index 2: Cb plane, [7:0] Cb
1724 NATIVE_BUFFER_FORMAT_YV12
1725 NATIVE_BUFFER_FORMAT_I420
1727 case TBM_FORMAT_YUV410:
1728 case TBM_FORMAT_YVU410:
1731 case TBM_FORMAT_YUV411:
1732 case TBM_FORMAT_YVU411:
1733 case TBM_FORMAT_YUV420:
1734 case TBM_FORMAT_YVU420:
1736 //if(plane_idx == 0)
1739 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1740 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1745 //else if( plane_idx == 1 )
1748 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1749 _size = SIZE_ALIGN(_pitch * (height / 2), TBM_SURFACE_ALIGNMENT_PLANE);
1754 //else if (plane_idx == 2 )
1757 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1758 _size = SIZE_ALIGN(_pitch * (height / 2), TBM_SURFACE_ALIGNMENT_PLANE);
1762 case TBM_FORMAT_YUV422:
1763 case TBM_FORMAT_YVU422:
1765 //if(plane_idx == 0)
1768 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1769 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1774 //else if( plane_idx == 1 )
1777 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1778 _size = SIZE_ALIGN(_pitch * (height), TBM_SURFACE_ALIGNMENT_PLANE);
1783 //else if (plane_idx == 2 )
1786 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1787 _size = SIZE_ALIGN(_pitch * (height), TBM_SURFACE_ALIGNMENT_PLANE);
1791 case TBM_FORMAT_YUV444:
1792 case TBM_FORMAT_YVU444:
1794 //if(plane_idx == 0)
1797 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1798 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1803 //else if( plane_idx == 1 )
1806 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1807 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1812 //else if (plane_idx == 2 )
1815 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1816 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1834 tbm_sprd_bo_get_flags(tbm_bo bo)
1836 SPRD_RETURN_VAL_IF_FAIL(bo != NULL, 0);
1838 tbm_bo_sprd bo_sprd;
1840 bo_sprd = (tbm_bo_sprd)tbm_backend_get_bo_priv(bo);
1841 SPRD_RETURN_VAL_IF_FAIL(bo_sprd != NULL, 0);
1843 return bo_sprd->flags_tbm;
1847 tbm_sprd_bufmgr_bind_native_display(tbm_bufmgr bufmgr, void *NativeDisplay)
1849 tbm_bufmgr_sprd bufmgr_sprd;
1851 bufmgr_sprd = tbm_backend_get_priv_from_bufmgr(bufmgr);
1852 SPRD_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, 0);
1854 if (!tbm_drm_helper_wl_auth_server_init(NativeDisplay, bufmgr_sprd->fd,
1855 bufmgr_sprd->device_name, 0)) {
1856 TBM_SPRD_ERROR("fail to tbm_drm_helper_wl_server_init\n");
1860 bufmgr_sprd->bind_display = NativeDisplay;
1865 MODULEINITPPROTO(init_tbm_bufmgr_priv);
1867 static TBMModuleVersionInfo SprdVersRec = {
1873 TBMModuleData tbmModuleData = { &SprdVersRec, init_tbm_bufmgr_priv};
1876 init_tbm_bufmgr_priv(tbm_bufmgr bufmgr, int fd)
1878 tbm_bufmgr_backend bufmgr_backend;
1879 tbm_bufmgr_sprd bufmgr_sprd;
1885 bufmgr_sprd = calloc(1, sizeof(struct _tbm_bufmgr_sprd));
1887 TBM_SPRD_ERROR("fail to alloc bufmgr_sprd!\n");
1891 if (tbm_backend_is_display_server()) {
1892 bufmgr_sprd->fd = tbm_drm_helper_get_master_fd();
1893 if (bufmgr_sprd->fd < 0) {
1894 bufmgr_sprd->fd = _tbm_sprd_open_drm();
1895 if (bufmgr_sprd->fd < 0) {
1896 TBM_SPRD_ERROR("fail to open drm!\n");
1901 tbm_drm_helper_set_tbm_master_fd(bufmgr_sprd->fd);
1903 bufmgr_sprd->device_name = drmGetDeviceNameFromFd(bufmgr_sprd->fd);
1904 if (!bufmgr_sprd->device_name) {
1905 TBM_SPRD_ERROR("fail to get device name!\n");
1906 tbm_drm_helper_unset_tbm_master_fd();
1907 goto fail_get_device_name;
1910 if (!tbm_drm_helper_get_auth_info(&(bufmgr_sprd->fd), &(bufmgr_sprd->device_name), NULL)) {
1911 TBM_SPRD_ERROR("fail to get auth drm info!\n");
1912 goto fail_get_auth_info;
1917 bufmgr_sprd->hashBos = drmHashCreate();
1919 //Check if the tbm manager supports dma fence or not.
1920 fp = open("/sys/module/dmabuf_sync/parameters/enabled", O_RDONLY);
1923 int length = read(fp, buf, 1);
1925 if (length == 1 && buf[0] == '1')
1926 bufmgr_sprd->use_dma_fence = 1;
1931 if (!_bufmgr_init_cache_state(bufmgr_sprd)) {
1932 TBM_SPRD_ERROR("fail to init bufmgr cache state\n");
1933 goto fail_init_cache_state;
1936 bufmgr_backend = tbm_backend_alloc();
1937 if (!bufmgr_backend) {
1938 TBM_SPRD_ERROR("fail to alloc backend!\n");
1939 goto fail_alloc_backend;
1942 bufmgr_backend->priv = (void *)bufmgr_sprd;
1943 bufmgr_backend->bufmgr_deinit = tbm_sprd_bufmgr_deinit;
1944 bufmgr_backend->bo_size = tbm_sprd_bo_size;
1945 bufmgr_backend->bo_alloc = tbm_sprd_bo_alloc;
1946 bufmgr_backend->bo_free = tbm_sprd_bo_free;
1947 bufmgr_backend->bo_import = tbm_sprd_bo_import;
1948 bufmgr_backend->bo_import_fd = tbm_sprd_bo_import_fd;
1949 bufmgr_backend->bo_export = tbm_sprd_bo_export;
1950 bufmgr_backend->bo_export_fd = tbm_sprd_bo_export_fd;
1951 bufmgr_backend->bo_get_handle = tbm_sprd_bo_get_handle;
1952 bufmgr_backend->bo_map = tbm_sprd_bo_map;
1953 bufmgr_backend->bo_unmap = tbm_sprd_bo_unmap;
1954 bufmgr_backend->surface_get_plane_data = tbm_sprd_surface_get_plane_data;
1955 bufmgr_backend->surface_supported_format = tbm_sprd_surface_supported_format;
1956 bufmgr_backend->bo_get_flags = tbm_sprd_bo_get_flags;
1957 bufmgr_backend->bo_lock = tbm_sprd_bo_lock;
1958 bufmgr_backend->bo_unlock = tbm_sprd_bo_unlock;
1959 bufmgr_backend->bufmgr_bind_native_display = tbm_sprd_bufmgr_bind_native_display;
1961 if (!tbm_backend_init(bufmgr, bufmgr_backend)) {
1962 TBM_SPRD_ERROR("fail to init backend!\n");
1963 goto fail_init_backend;
1970 env = getenv("TBM_SPRD_DEBUG");
1973 TBM_SPRD_ERROR("TBM_SPRD_DEBUG=%s\n", env);
1979 TBM_SPRD_DEBUG("DMABUF FENCE is %s\n",
1980 bufmgr_sprd->use_dma_fence ? "supported!" : "NOT supported!");
1981 TBM_SPRD_DEBUG("fd:%d\n", bufmgr_sprd->fd);
1986 tbm_backend_free(bufmgr_backend);
1988 _bufmgr_deinit_cache_state(bufmgr_sprd);
1989 fail_init_cache_state:
1990 if (bufmgr_sprd->hashBos)
1991 drmHashDestroy(bufmgr_sprd->hashBos);
1992 if (tbm_backend_is_display_server())
1993 tbm_drm_helper_unset_tbm_master_fd();
1994 fail_get_device_name:
1995 close(bufmgr_sprd->fd);