1 /**************************************************************************
5 Copyright 2012 Samsung Electronics co., Ltd. All Rights Reserved.
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
9 Permission is hereby granted, free of charge, to any person obtaining a
10 copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sub license, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial portions
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
22 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
24 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
25 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
26 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
27 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 **************************************************************************/
43 #include <sys/ioctl.h>
44 #include <sys/types.h>
51 #include <drm/sprd_drm.h>
53 #include <tbm_backend.h>
54 #include <tbm_drm_helper.h>
57 //#define USE_CONTIG_ONLY
62 #include "tbm_bufmgr_tgl.h"
65 #define TBM_COLOR_FORMAT_COUNT 4
67 #define SPRD_DRM_NAME "sprd"
69 #define STRERR_BUFSIZE 128
71 #define SIZE_ALIGN(value, base) (((value) + ((base) - 1)) & ~((base) - 1))
73 #define TBM_SURFACE_ALIGNMENT_PLANE (64)
74 #define TBM_SURFACE_ALIGNMENT_PITCH_RGB (128)
75 #define TBM_SURFACE_ALIGNMENT_PITCH_YUV (16)
79 unsigned int fence_supported;
83 #define DMA_BUF_ACCESS_READ 0x1
84 #define DMA_BUF_ACCESS_WRITE 0x2
85 #define DMA_BUF_ACCESS_DMA 0x4
86 #define DMA_BUF_ACCESS_MAX 0x8
88 #define DMA_FENCE_LIST_MAX 5
90 struct dma_buf_fence {
95 #define DMABUF_IOCTL_BASE 'F'
96 #define DMABUF_IOWR(nr, type) _IOWR(DMABUF_IOCTL_BASE, nr, type)
98 #define DMABUF_IOCTL_GET_INFO DMABUF_IOWR(0x00, struct dma_buf_info)
99 #define DMABUF_IOCTL_GET_FENCE DMABUF_IOWR(0x01, struct dma_buf_fence)
100 #define DMABUF_IOCTL_PUT_FENCE DMABUF_IOWR(0x02, struct dma_buf_fence)
103 #define GLOBAL_KEY ((unsigned int)(-1))
105 #define TBM_SPRD_CACHE_INV 0x01 /**< cache invalidate */
106 #define TBM_SPRD_CACHE_CLN 0x02 /**< cache clean */
107 #define TBM_SPRD_CACHE_ALL 0x10 /**< cache all */
108 #define TBM_SPRD_CACHE_FLUSH (TBM_SPRD_CACHE_INV|TBM_SPRD_CACHE_CLN) /**< cache flush */
109 #define TBM_SPRD_CACHE_FLUSH_ALL (TBM_SPRD_CACHE_FLUSH|TBM_SPRD_CACHE_ALL) /**< cache flush all */
113 DEVICE_CA, /* cache aware device */
114 DEVICE_CO /* cache oblivious device */
117 typedef union _tbm_bo_cache_state tbm_bo_cache_state;
119 union _tbm_bo_cache_state {
122 unsigned int cntFlush:16; /*Flush all index for sync */
123 unsigned int isCached:1;
124 unsigned int isDirtied:2;
128 typedef struct _tbm_bufmgr_sprd *tbm_bufmgr_sprd;
129 typedef struct _tbm_bo_sprd *tbm_bo_sprd;
131 /* tbm buffor object for sprd */
132 struct _tbm_bo_sprd {
135 unsigned int name; /* FLINK ID */
137 unsigned int gem; /* GEM Handle */
139 unsigned int dmabuf; /* fd for dmabuf */
141 void *pBase; /* virtual address */
145 unsigned int flags_sprd;
146 unsigned int flags_tbm;
148 pthread_mutex_t mutex;
149 struct dma_buf_fence dma_fence[DMA_FENCE_LIST_MAX];
153 tbm_bo_cache_state cache_state;
154 unsigned int map_cnt;
156 tbm_bufmgr_sprd bufmgr_sprd;
159 /* tbm bufmgr private for sprd */
160 struct _tbm_bufmgr_sprd {
171 tbm_backend_bufmgr_func *bufmgr_func;
172 tbm_backend_bo_func *bo_func;
177 char *STR_DEVICE[] = {
193 uint32_t tbm_sprd_color_format_list[TBM_COLOR_FORMAT_COUNT] = {
202 _tgl_get_version(int fd)
204 struct tgl_ver_data data;
206 char buf[STRERR_BUFSIZE];
208 err = ioctl(fd, TGL_IOCTL_GET_VERSION, &data);
210 TBM_ERR("error(%s) %s:%d\n",
211 strerror_r(errno, buf, STRERR_BUFSIZE));
215 TBM_DBG("tgl version is (%u, %u).\n", data.major, data.minor);
221 _tgl_init(int fd, unsigned int key)
223 struct tgl_reg_data data;
225 char buf[STRERR_BUFSIZE];
228 data.timeout_ms = 1000;
230 err = ioctl(fd, TGL_IOCTL_REGISTER, &data);
232 TBM_ERR("error(%s) key:%d\n",
233 strerror_r(errno, buf, STRERR_BUFSIZE), key);
241 _tgl_destroy(int fd, unsigned int key)
243 struct tgl_reg_data data;
245 char buf[STRERR_BUFSIZE];
248 err = ioctl(fd, TGL_IOCTL_UNREGISTER, &data);
250 TBM_ERR("error(%s) key:%d\n",
251 strerror_r(errno, buf, STRERR_BUFSIZE), key);
259 _tgl_lock(int fd, unsigned int key, int opt)
261 struct tgl_lock_data data;
263 char buf[STRERR_BUFSIZE];
264 enum tgl_type_data tgl_type;
267 case TBM_OPTION_READ:
268 tgl_type = TGL_TYPE_READ;
270 case TBM_OPTION_WRITE:
271 tgl_type = TGL_TYPE_WRITE;
274 tgl_type = TGL_TYPE_NONE;
279 data.type = tgl_type;
281 err = ioctl(fd, TGL_IOCTL_LOCK, &data);
283 TBM_ERR("error(%s) key:%d opt:%d\n",
284 strerror_r(errno, buf, STRERR_BUFSIZE), key, opt);
292 _tgl_unlock(int fd, unsigned int key)
294 struct tgl_lock_data data;
296 char buf[STRERR_BUFSIZE];
299 data.type = TGL_TYPE_NONE;
301 err = ioctl(fd, TGL_IOCTL_UNLOCK, &data);
303 TBM_ERR("error(%s) key:%d\n",
304 strerror_r(errno, buf, STRERR_BUFSIZE), key);
312 _tgl_set_data(int fd, unsigned int key, unsigned int val)
314 struct tgl_usr_data data;
316 char buf[STRERR_BUFSIZE];
321 err = ioctl(fd, TGL_IOCTL_SET_DATA, &data);
323 TBM_ERR("error(%s) key:%d\n",
324 strerror_r(errno, buf, STRERR_BUFSIZE), key);
331 static inline unsigned int
332 _tgl_get_data(int fd, unsigned int key, unsigned int *locked)
334 struct tgl_usr_data data = { 0, };
336 char buf[STRERR_BUFSIZE];
340 err = ioctl(fd, TGL_IOCTL_GET_DATA, &data);
342 TBM_ERR("error(%s) key:%d\n",
343 strerror_r(errno, buf, STRERR_BUFSIZE), key);
348 *locked = (unsigned int)data.status;
355 _tbm_sprd_open_drm(void)
358 struct udev_device *drm_device = NULL;
359 struct udev_list_entry *entry = NULL;
360 struct udev_enumerate *e;
361 const char *filepath;
367 fd = drmOpen(SPRD_DRM_NAME, NULL);
372 TBM_DBG("warning fail to open drm. search drm-device by udev\n");
376 TBM_ERR("udev_new() failed.\n");
380 e = udev_enumerate_new(udev);
381 udev_enumerate_add_match_subsystem(e, "drm");
382 udev_enumerate_add_match_sysname(e, "card[0-9]*");
383 udev_enumerate_scan_devices(e);
385 udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
386 struct udev_device *device, *device_parent;
388 device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
389 udev_list_entry_get_name(entry));
390 device_parent = udev_device_get_parent(device);
391 /* Not need unref device_parent. device_parent and device have same refcnt */
393 if (strcmp(udev_device_get_sysname(device_parent), "sprd-drm") == 0) {
395 TBM_DBG("Found render device: '%s' (%s)\n",
396 udev_device_get_syspath(drm_device),
397 udev_device_get_sysname(device_parent));
401 udev_device_unref(device);
404 udev_enumerate_unref(e);
406 /* Get device file path. */
407 filepath = udev_device_get_devnode(drm_device);
409 TBM_ERR("udev_device_get_devnode() failed.\n");
410 udev_device_unref(drm_device);
415 udev_device_unref(drm_device);
418 /* Open DRM device file and check validity. */
419 fd = open(filepath, O_RDWR | O_CLOEXEC);
421 TBM_ERR("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
426 TBM_ERR("fstat() failed %s.\n");
437 _sprd_bo_cache_flush(tbm_bufmgr_sprd bufmgr_sprd, tbm_bo_sprd bo_sprd, int flags)
439 TBM_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, 0);
441 /* cache flush is managed by kernel side when using dma-fence. */
442 if (bufmgr_sprd->use_dma_fence)
444 // TODO: The tm1 kernel does not support ioctl for cache flush right now.
445 // The drm in tm1 kernel has to support cache_flush to turn on this feature(TBM_SRPD_CACHE_FLUSH).
446 #if TBM_SRPD_CACHE_FLUSH
447 struct drm_sprd_gem_cache_op cache_op = {0, };
450 /* if bo_sprd is null, do cache_flush_all */
453 cache_op.usr_addr = (uint64_t)((uint32_t)bo_sprd->pBase);
454 cache_op.size = bo_sprd->size;
456 flags = TBM_SPRD_CACHE_FLUSH_ALL;
458 cache_op.usr_addr = 0;
462 if (flags & TBM_SPRD_CACHE_INV) {
463 if (flags & TBM_SPRD_CACHE_ALL)
464 cache_op.flags |= SPRD_DRM_CACHE_INV_ALL;
466 cache_op.flags |= SPRD_DRM_CACHE_INV_RANGE;
469 if (flags & TBM_SPRD_CACHE_CLN) {
470 if (flags & TBM_SPRD_CACHE_ALL)
471 cache_op.flags |= SPRD_DRM_CACHE_CLN_ALL;
473 cache_op.flags |= SPRD_DRM_CACHE_CLN_RANGE;
476 if (flags & TBM_SPRD_CACHE_ALL)
477 cache_op.flags |= SPRD_DRM_ALL_CACHES_CORES;
479 ret = drmCommandWriteRead(bufmgr_sprd->fd, DRM_SPRD_GEM_CACHE_OP, &cache_op,
482 TBM_ERR("error fail to flush the cache.\n");
492 _bo_init_cache_state(tbm_bufmgr_sprd bufmgr_sprd, tbm_bo_sprd bo_sprd, int import)
495 TBM_RETURN_VAL_IF_FAIL(bo_sprd != NULL, 0);
496 TBM_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, 0);
498 if (bufmgr_sprd->use_dma_fence)
501 _tgl_init(bufmgr_sprd->tgl_fd, bo_sprd->name);
503 tbm_bo_cache_state cache_state;
506 cache_state.data.isDirtied = DEVICE_NONE;
507 cache_state.data.isCached = 0;
508 cache_state.data.cntFlush = 0;
510 _tgl_set_data(bufmgr_sprd->tgl_fd, bo_sprd->name, cache_state.val);
518 _bo_set_cache_state(tbm_bufmgr_sprd bufmgr_sprd, tbm_bo_sprd bo_sprd, int device, int opt)
521 TBM_RETURN_VAL_IF_FAIL(bo_sprd != NULL, 0);
522 TBM_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, 0);
525 unsigned short cntFlush = 0;
527 if (bufmgr_sprd->use_dma_fence)
530 if (bo_sprd->flags_sprd & SPRD_BO_NONCACHABLE)
533 /* get cache state of a bo_sprd */
534 bo_sprd->cache_state.val = _tgl_get_data(bufmgr_sprd->tgl_fd, bo_sprd->name, NULL);
536 /* get global cache flush count */
537 cntFlush = (unsigned short)_tgl_get_data(bufmgr_sprd->tgl_fd, GLOBAL_KEY, NULL);
539 if (opt == TBM_DEVICE_CPU) {
540 if (bo_sprd->cache_state.data.isDirtied == DEVICE_CO &&
541 bo_sprd->cache_state.data.isCached)
542 need_flush = TBM_SPRD_CACHE_INV;
544 bo_sprd->cache_state.data.isCached = 1;
545 if (opt & TBM_OPTION_WRITE)
546 bo_sprd->cache_state.data.isDirtied = DEVICE_CA;
548 if (bo_sprd->cache_state.data.isDirtied != DEVICE_CA)
549 bo_sprd->cache_state.data.isDirtied = DEVICE_NONE;
552 if (bo_sprd->cache_state.data.isDirtied == DEVICE_CA &&
553 bo_sprd->cache_state.data.isCached &&
554 bo_sprd->cache_state.data.cntFlush == cntFlush)
555 need_flush = TBM_SPRD_CACHE_CLN | TBM_SPRD_CACHE_ALL;
557 if (opt & TBM_OPTION_WRITE)
558 bo_sprd->cache_state.data.isDirtied = DEVICE_CO;
560 if (bo_sprd->cache_state.data.isDirtied != DEVICE_CO)
561 bo_sprd->cache_state.data.isDirtied = DEVICE_NONE;
566 if (need_flush & TBM_SPRD_CACHE_ALL)
567 _tgl_set_data(bufmgr_sprd->tgl_fd, GLOBAL_KEY, (unsigned int)(++cntFlush));
569 /* call cache flush */
570 _sprd_bo_cache_flush(bufmgr_sprd, bo_sprd, need_flush);
572 TBM_DBG("\tcache(%d,%d)....flush:0x%x, cntFlush(%d)\n",
573 bo_sprd->cache_state.data.isCached,
574 bo_sprd->cache_state.data.isDirtied,
584 _bo_save_cache_state(tbm_bufmgr_sprd bufmgr_sprd, tbm_bo_sprd bo_sprd)
587 TBM_RETURN_VAL_IF_FAIL(bo_sprd != NULL, 0);
588 TBM_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, 0);
590 if (bufmgr_sprd->use_dma_fence)
593 unsigned short cntFlush = 0;
595 /* get global cache flush count */
596 cntFlush = (unsigned short)_tgl_get_data(bufmgr_sprd->tgl_fd, GLOBAL_KEY, NULL);
598 /* save global cache flush count */
599 bo_sprd->cache_state.data.cntFlush = cntFlush;
600 _tgl_set_data(bufmgr_sprd->tgl_fd, bo_sprd->name, bo_sprd->cache_state.val);
607 _bo_destroy_cache_state(tbm_bufmgr_sprd bufmgr_sprd, tbm_bo_sprd bo_sprd)
610 TBM_RETURN_IF_FAIL(bo_sprd != NULL);
611 TBM_RETURN_IF_FAIL(bufmgr_sprd != NULL);
613 if (bufmgr_sprd->use_dma_fence)
616 _tgl_destroy(bufmgr_sprd->tgl_fd, bo_sprd->name);
621 _bufmgr_init_cache_state(tbm_bufmgr_sprd bufmgr_sprd)
624 TBM_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, 0);
626 if (bufmgr_sprd->use_dma_fence)
629 /* open tgl fd for saving cache flush data */
630 bufmgr_sprd->tgl_fd = open(tgl_devfile, O_RDWR);
632 if (bufmgr_sprd->tgl_fd < 0) {
633 bufmgr_sprd->tgl_fd = open(tgl_devfile1, O_RDWR);
634 if (bufmgr_sprd->tgl_fd < 0) {
635 TBM_ERR("fail to open global_lock:%s\n",
641 if (!_tgl_get_version(bufmgr_sprd->tgl_fd)) {
642 TBM_ERR("fail to get tgl_version. tgl init failed.\n");
643 close(bufmgr_sprd->tgl_fd);
647 if (!_tgl_init(bufmgr_sprd->tgl_fd, GLOBAL_KEY)) {
648 TBM_ERR("fail to initialize the tgl\n");
649 close(bufmgr_sprd->tgl_fd);
658 _bufmgr_deinit_cache_state(tbm_bufmgr_sprd bufmgr_sprd)
661 TBM_RETURN_IF_FAIL(bufmgr_sprd != NULL);
663 if (bufmgr_sprd->use_dma_fence)
666 if (bufmgr_sprd->tgl_fd >= 0)
667 close(bufmgr_sprd->tgl_fd);
671 #ifndef USE_CONTIG_ONLY
673 _get_sprd_flag_from_tbm(unsigned int ftbm)
675 unsigned int flags = 0;
678 * TBM_BO_DEFAULT => ION_HEAP_ID_MASK_SYSTEM
679 * TBM_BO_SCANOUT => ION_HEAP_ID_MASK_MM
680 * TBM_BO_VENDOR => ION_HEAP_ID_MASK_OVERLAY
681 * To be updated appropriately once DRM-GEM supports different heap id masks.
684 if (ftbm & TBM_BO_SCANOUT)
685 flags = SPRD_BO_CONTIG;
687 flags = SPRD_BO_NONCONTIG | SPRD_BO_DEV_SYSTEM;
689 if (ftbm & TBM_BO_WC)
691 else if (ftbm & TBM_BO_NONCACHABLE)
692 flags |= SPRD_BO_NONCACHABLE;
698 _get_tbm_flag_from_sprd(unsigned int fsprd)
700 unsigned int flags = 0;
702 if (fsprd & SPRD_BO_NONCONTIG)
703 flags |= TBM_BO_DEFAULT;
705 flags |= TBM_BO_SCANOUT;
707 if (fsprd & SPRD_BO_WC)
709 else if (fsprd & SPRD_BO_CACHABLE)
710 flags |= TBM_BO_DEFAULT;
712 flags |= TBM_BO_NONCACHABLE;
719 _get_name(int fd, unsigned int gem)
721 struct drm_gem_flink arg = {0,};
724 if (drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &arg)) {
725 TBM_ERR("fail to DRM_IOCTL_GEM_FLINK gem:%d", gem);
729 return (unsigned int)arg.name;
733 _sprd_bo_handle(tbm_bo_sprd bo_sprd, int device)
735 tbm_bo_handle bo_handle;
737 memset(&bo_handle, 0x0, sizeof(uint64_t));
740 case TBM_DEVICE_DEFAULT:
742 bo_handle.u32 = (uint32_t)bo_sprd->gem;
745 if (!bo_sprd->pBase) {
746 struct drm_sprd_gem_mmap arg = {0,};
748 arg.handle = bo_sprd->gem;
749 arg.size = bo_sprd->size;
750 if (drmCommandWriteRead(bo_sprd->fd, DRM_SPRD_GEM_MMAP, &arg, sizeof(arg))) {
751 TBM_ERR("error Cannot usrptr gem=%d\n", bo_sprd->gem);
752 return (tbm_bo_handle) NULL;
754 bo_sprd->pBase = (void *)((uint32_t)arg.mapped);
757 bo_handle.ptr = (void *)bo_sprd->pBase;
761 if (!bo_sprd->dmabuf) {
762 struct drm_prime_handle arg = {0, };
763 arg.handle = bo_sprd->gem;
764 if (drmIoctl(bo_sprd->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
765 TBM_ERR("error Cannot dmabuf=%d\n", bo_sprd->gem);
766 return (tbm_bo_handle) NULL;
768 bo_sprd->dmabuf = arg.fd;
771 bo_handle.u32 = (uint32_t)bo_sprd->dmabuf;
778 //TODO : Add ioctl for GSP MAP once available.
779 TBM_DBG("%s In case TBM_DEVICE_MM: \n", __FUNCTION_);
781 if (!bo_sprd->dmabuf) {
782 struct drm_prime_handle arg = {0, };
784 arg.handle = bo_sprd->gem;
785 if (drmIoctl(bo_sprd->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
786 TBM_ERR("error Cannot dmabuf=%d\n", bo_sprd->gem);
787 return (tbm_bo_handle) NULL;
789 bo_sprd->dmabuf = arg.fd;
792 bo_handle.u32 = (uint32_t)bo_sprd->dmabuf;
796 TBM_ERR("Not supported device:%d\n", device);
797 bo_handle.ptr = (void *) NULL;
804 static tbm_bufmgr_capability
805 tbm_sprd_bufmgr_get_capabilities(tbm_backend_bufmgr_data *bufmgr_data, tbm_error_e *error)
807 tbm_bufmgr_capability capabilities = TBM_BUFMGR_CAPABILITY_NONE;
809 capabilities = TBM_BUFMGR_CAPABILITY_SHARE_KEY|TBM_BUFMGR_CAPABILITY_SHARE_FD;
812 *error = TBM_ERROR_NONE;
818 tbm_sprd_bufmgr_bind_native_display(tbm_backend_bufmgr_data *bufmgr_data, tbm_native_display *native_display)
820 tbm_bufmgr_sprd bufmgr_sprd = (tbm_bufmgr_sprd)bufmgr_data;
821 TBM_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, TBM_ERROR_INVALID_PARAMETER);
823 if (!tbm_drm_helper_wl_auth_server_init(native_display, bufmgr_sprd->fd,
824 bufmgr_sprd->device_name, 0)) {
825 TBM_ERR("fail to tbm_drm_helper_wl_server_init\n");
826 return TBM_ERROR_OPERATION_FAILED;
829 bufmgr_sprd->bind_display = native_display;
831 return TBM_ERROR_NONE;
835 tbm_sprd_bufmgr_get_supported_formats(tbm_backend_bufmgr_data *bufmgr_data,
836 uint32_t **formats, uint32_t *num)
838 tbm_bufmgr_sprd bufmgr_sprd = (tbm_bufmgr_sprd)bufmgr_data;
839 uint32_t *color_formats;
841 TBM_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, TBM_ERROR_INVALID_PARAMETER);
843 color_formats = (uint32_t *)calloc(1, sizeof(uint32_t) * TBM_COLOR_FORMAT_COUNT);
844 if (color_formats == NULL)
845 return TBM_ERROR_OUT_OF_MEMORY;
847 memcpy(color_formats, tbm_sprd_color_format_list, sizeof(uint32_t)*TBM_COLOR_FORMAT_COUNT);
849 *formats = color_formats;
850 *num = TBM_COLOR_FORMAT_COUNT;
852 TBM_DBG("supported format count = %d\n", *num);
854 return TBM_ERROR_NONE;
858 tbm_sprd_bufmgr_get_plane_data(tbm_backend_bufmgr_data *bufmgr_data,
859 tbm_format format, int plane_idx, int width,
860 int height, uint32_t *size, uint32_t *offset,
861 uint32_t *pitch, int *bo_idx)
863 tbm_bufmgr_sprd bufmgr_sprd = (tbm_bufmgr_sprd)bufmgr_data;
869 int _align_height = 0;
871 TBM_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, TBM_ERROR_INVALID_PARAMETER);
875 case TBM_FORMAT_XRGB4444:
876 case TBM_FORMAT_XBGR4444:
877 case TBM_FORMAT_RGBX4444:
878 case TBM_FORMAT_BGRX4444:
879 case TBM_FORMAT_ARGB4444:
880 case TBM_FORMAT_ABGR4444:
881 case TBM_FORMAT_RGBA4444:
882 case TBM_FORMAT_BGRA4444:
883 case TBM_FORMAT_XRGB1555:
884 case TBM_FORMAT_XBGR1555:
885 case TBM_FORMAT_RGBX5551:
886 case TBM_FORMAT_BGRX5551:
887 case TBM_FORMAT_ARGB1555:
888 case TBM_FORMAT_ABGR1555:
889 case TBM_FORMAT_RGBA5551:
890 case TBM_FORMAT_BGRA5551:
891 case TBM_FORMAT_RGB565:
894 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
895 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
899 case TBM_FORMAT_RGB888:
900 case TBM_FORMAT_BGR888:
903 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
904 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
908 case TBM_FORMAT_XRGB8888:
909 case TBM_FORMAT_XBGR8888:
910 case TBM_FORMAT_RGBX8888:
911 case TBM_FORMAT_BGRX8888:
912 case TBM_FORMAT_ARGB8888:
913 case TBM_FORMAT_ABGR8888:
914 case TBM_FORMAT_RGBA8888:
915 case TBM_FORMAT_BGRA8888:
918 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
919 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
924 case TBM_FORMAT_YUYV:
925 case TBM_FORMAT_YVYU:
926 case TBM_FORMAT_UYVY:
927 case TBM_FORMAT_VYUY:
928 case TBM_FORMAT_AYUV:
931 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
932 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
938 * index 0 = Y plane, [7:0] Y
939 * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
941 * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
943 case TBM_FORMAT_NV12:
944 case TBM_FORMAT_NV21:
946 // if (plane_idx == 0)
949 _pitch = SIZE_ALIGN(width , TBM_SURFACE_ALIGNMENT_PITCH_YUV);
950 _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
951 _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
956 // else if (plane_idx == 1)
959 _pitch = SIZE_ALIGN(width , TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
960 _align_height = SIZE_ALIGN(height / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
961 _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
966 case TBM_FORMAT_NV16:
967 case TBM_FORMAT_NV61:
972 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
973 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
978 //else if( plane_idx ==1 )
981 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
982 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
989 * index 0: Y plane, [7:0] Y
990 * index 1: Cb plane, [7:0] Cb
991 * index 2: Cr plane, [7:0] Cr
993 * index 1: Cr plane, [7:0] Cr
994 * index 2: Cb plane, [7:0] Cb
997 NATIVE_BUFFER_FORMAT_YV12
998 NATIVE_BUFFER_FORMAT_I420
1000 case TBM_FORMAT_YUV410:
1001 case TBM_FORMAT_YVU410:
1004 case TBM_FORMAT_YUV411:
1005 case TBM_FORMAT_YVU411:
1006 case TBM_FORMAT_YUV420:
1007 case TBM_FORMAT_YVU420:
1009 //if(plane_idx == 0)
1012 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1013 _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1014 _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
1019 //else if( plane_idx == 1 )
1022 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1023 _align_height = SIZE_ALIGN(height / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1024 _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
1029 //else if (plane_idx == 2 )
1032 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1033 _align_height = SIZE_ALIGN(height / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1034 _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
1038 case TBM_FORMAT_YUV422:
1039 case TBM_FORMAT_YVU422:
1041 //if(plane_idx == 0)
1044 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1045 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1050 //else if( plane_idx == 1 )
1053 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1054 _size = SIZE_ALIGN(_pitch * (height), TBM_SURFACE_ALIGNMENT_PLANE);
1059 //else if (plane_idx == 2 )
1062 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1063 _size = SIZE_ALIGN(_pitch * (height), TBM_SURFACE_ALIGNMENT_PLANE);
1067 case TBM_FORMAT_YUV444:
1068 case TBM_FORMAT_YVU444:
1070 //if(plane_idx == 0)
1073 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1074 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1079 //else if( plane_idx == 1 )
1082 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1083 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1088 //else if (plane_idx == 2 )
1091 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1092 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1106 return TBM_ERROR_NONE;
1109 static tbm_backend_bo_data *
1110 tbm_sprd_bufmgr_alloc_bo(tbm_backend_bufmgr_data *bufmgr_data, unsigned int size,
1111 tbm_bo_memory_type flags, tbm_error_e *error)
1113 tbm_bufmgr_sprd bufmgr_sprd = (tbm_bufmgr_sprd)bufmgr_data;
1114 tbm_bo_sprd bo_sprd;
1115 unsigned int sprd_flags;
1117 if (bufmgr_sprd == NULL) {
1118 TBM_ERR("bufmgr_data is null\n");
1120 *error = TBM_ERROR_INVALID_PARAMETER;
1124 bo_sprd = calloc(1, sizeof(struct _tbm_bo_sprd));
1126 TBM_ERR("error fail to allocate the bo_sprd\n");
1128 *error = TBM_ERROR_OUT_OF_MEMORY;
1131 bo_sprd->bufmgr_sprd = bufmgr_sprd;
1133 #ifdef USE_CONTIG_ONLY
1134 flags = TBM_BO_SCANOUT;
1135 sprd_flags = SPRD_BO_CONTIG;
1137 sprd_flags = _get_sprd_flag_from_tbm(flags);
1138 if ((flags & TBM_BO_SCANOUT) && (size <= 4 * 1024))
1139 sprd_flags |= SPRD_BO_NONCONTIG;
1140 #endif // USE_CONTIG_ONLY
1142 struct drm_sprd_gem_create arg = {0, };
1144 arg.size = (uint64_t)size;
1145 arg.flags = sprd_flags;
1146 if (drmCommandWriteRead(bufmgr_sprd->fd, DRM_SPRD_GEM_CREATE, &arg,
1148 TBM_ERR("error Cannot create bo_sprd(flag:%x, size:%d)\n",
1149 arg.flags, (unsigned int)arg.size);
1152 *error = TBM_ERROR_OPERATION_FAILED;
1156 bo_sprd->fd = bufmgr_sprd->fd;
1157 bo_sprd->gem = arg.handle;
1158 bo_sprd->size = size;
1159 bo_sprd->flags_tbm = flags;
1160 bo_sprd->flags_sprd = sprd_flags;
1161 bo_sprd->name = _get_name(bo_sprd->fd, bo_sprd->gem);
1163 if (!_bo_init_cache_state(bufmgr_sprd, bo_sprd, 0)) {
1164 TBM_ERR("error fail init cache state(%d)\n", bo_sprd->name);
1167 *error = TBM_ERROR_OPERATION_FAILED;
1171 pthread_mutex_init(&bo_sprd->mutex, NULL);
1173 if (bufmgr_sprd->use_dma_fence && !bo_sprd->dmabuf) {
1174 struct drm_prime_handle arg = {0, };
1176 arg.handle = bo_sprd->gem;
1177 if (drmIoctl(bo_sprd->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
1178 TBM_ERR("error Cannot dmabuf=%d\n", bo_sprd->gem);
1181 *error = TBM_ERROR_OPERATION_FAILED;
1184 bo_sprd->dmabuf = arg.fd;
1187 /* add bo_sprd to hash */
1188 if (drmHashInsert(bufmgr_sprd->hashBos, bo_sprd->name, (void *)bo_sprd) < 0)
1189 TBM_ERR("Cannot insert bo_sprd to Hash(%d)\n", bo_sprd->name);
1191 TBM_DBG("%s size:%d, gem:%d(%d), flags:%d(%d)\n",
1192 __FUNCTION__, bo_sprd->size,
1193 bo_sprd->gem, bo_sprd->name,
1197 *error = TBM_ERROR_NONE;
1199 return (tbm_backend_bo_data *)bo_sprd;
1202 static tbm_backend_bo_data *
1203 tbm_sprd_bufmgr_import_fd(tbm_backend_bufmgr_data *bufmgr_data, tbm_fd key, tbm_error_e *error)
1205 tbm_bufmgr_sprd bufmgr_sprd = (tbm_bufmgr_sprd)bufmgr_data;
1206 tbm_bo_sprd bo_sprd;
1210 char buf[STRERR_BUFSIZE];
1212 if (bufmgr_sprd == NULL) {
1213 TBM_ERR("bufmgr_data is null\n");
1215 *error = TBM_ERROR_INVALID_PARAMETER;
1219 /*getting handle from fd*/
1220 struct drm_prime_handle arg = {0, };
1223 if (drmIoctl(bufmgr_sprd->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &arg)) {
1224 TBM_ERR("Cannot get gem handle from fd:%d (%s)\n",
1225 arg.fd, strerror_r(errno, buf, STRERR_BUFSIZE));
1227 *error = TBM_ERROR_OPERATION_FAILED;
1232 name = _get_name(bufmgr_sprd->fd, gem);
1234 TBM_ERR("Cannot get name from gem:%d, fd:%d (%s)\n",
1235 gem, key, strerror_r(errno, buf, STRERR_BUFSIZE));
1237 *error = TBM_ERROR_OPERATION_FAILED;
1241 ret = drmHashLookup(bufmgr_sprd->hashBos, name, (void **)&bo_sprd);
1243 if (gem == bo_sprd->gem) {
1245 *error = TBM_ERROR_NONE;
1250 /* Determine size of bo_sprd. The fd-to-handle ioctl really should
1251 * return the size, but it doesn't. If we have kernel 3.12 or
1252 * later, we can lseek on the prime fd to get the size. Older
1253 * kernels will just fail, in which case we fall back to the
1254 * provided (estimated or guess size).
1257 unsigned int real_size;
1258 struct drm_sprd_gem_info info = {0, };
1260 real_size = lseek(key, 0, SEEK_END);
1263 if (drmCommandWriteRead(bufmgr_sprd->fd,
1266 sizeof(struct drm_sprd_gem_info))) {
1267 TBM_ERR("Cannot get gem info from gem:%d, fd:%d (%s)\n",
1268 gem, key, strerror_r(errno, buf, STRERR_BUFSIZE));
1270 *error = TBM_ERROR_OPERATION_FAILED;
1274 if (real_size == -1)
1275 real_size = info.size;
1277 bo_sprd = calloc(1, sizeof(struct _tbm_bo_sprd));
1279 TBM_ERR("error bo_sprd:%p fail to allocate the bo_sprd\n", bo_sprd);
1281 *error = TBM_ERROR_OUT_OF_MEMORY;
1284 bo_sprd->bufmgr_sprd = bufmgr_sprd;
1286 bo_sprd->fd = bufmgr_sprd->fd;
1288 bo_sprd->size = real_size;
1289 bo_sprd->flags_sprd = info.flags;
1290 bo_sprd->flags_tbm = _get_tbm_flag_from_sprd(bo_sprd->flags_sprd);
1291 bo_sprd->name = name;
1293 if (!_bo_init_cache_state(bufmgr_sprd, bo_sprd, 1)) {
1294 TBM_ERR("error fail init cache state(%d)\n", bo_sprd->name);
1296 *error = TBM_ERROR_OPERATION_FAILED;
1297 goto fail_init_cache;
1300 /* add bo_sprd to hash */
1301 if (drmHashInsert(bufmgr_sprd->hashBos, bo_sprd->name, (void *)bo_sprd) < 0)
1302 TBM_ERR("bo_sprd:%p Cannot insert bo_sprd to Hash(%d) from gem:%d, fd:%d\n",
1303 bo_sprd, bo_sprd->name, gem, key);
1305 TBM_DBG("bo_sprd:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d(%d), size:%d\n",
1307 bo_sprd->gem, bo_sprd->name,
1310 bo_sprd->flags_tbm, bo_sprd->flags_sprd,
1314 *error = TBM_ERROR_NONE;
1316 return (tbm_backend_bo_data *)bo_sprd;
1323 static tbm_backend_bo_data *
1324 tbm_sprd_bufmgr_import_key(tbm_backend_bufmgr_data *bufmgr_data, tbm_key key, tbm_error_e *error)
1326 tbm_bufmgr_sprd bufmgr_sprd = (tbm_bufmgr_sprd)bufmgr_data;
1327 tbm_bo_sprd bo_sprd;
1330 if (bufmgr_sprd == NULL) {
1331 TBM_ERR("bufmgr_data is null\n");
1333 *error = TBM_ERROR_INVALID_PARAMETER;
1337 ret = drmHashLookup(bufmgr_sprd->hashBos, key, (void **)&bo_sprd);
1340 *error = TBM_ERROR_NONE;
1341 return (tbm_backend_bo_data *)bo_sprd;
1344 struct drm_gem_open arg = {0, };
1345 struct drm_sprd_gem_info info = {0, };
1348 if (drmIoctl(bufmgr_sprd->fd, DRM_IOCTL_GEM_OPEN, &arg)) {
1349 TBM_ERR("error Cannot open gem name=%d\n", key);
1351 *error = TBM_ERROR_OPERATION_FAILED;
1355 info.handle = arg.handle;
1356 if (drmCommandWriteRead(bufmgr_sprd->fd,
1359 sizeof(struct drm_sprd_gem_info))) {
1360 TBM_ERR("error Cannot get gem info=%d\n", key);
1362 *error = TBM_ERROR_OPERATION_FAILED;
1366 bo_sprd = calloc(1, sizeof(struct _tbm_bo_sprd));
1368 TBM_ERR("error fail to allocate the bo_sprd\n");
1370 *error = TBM_ERROR_OUT_OF_MEMORY;
1373 bo_sprd->bufmgr_sprd = bufmgr_sprd;
1375 bo_sprd->fd = bufmgr_sprd->fd;
1376 bo_sprd->gem = arg.handle;
1377 bo_sprd->size = arg.size;
1378 bo_sprd->flags_sprd = info.flags;
1379 bo_sprd->name = key;
1380 #ifdef USE_CONTIG_ONLY
1381 bo_sprd->flags_sprd = SPRD_BO_CONTIG;
1382 bo_sprd->flags_tbm |= TBM_BO_SCANOUT;
1384 bo_sprd->flags_tbm = _get_tbm_flag_from_sprd(bo_sprd->flags_sprd);
1387 if (!_bo_init_cache_state(bufmgr_sprd, bo_sprd, 1)) {
1388 TBM_ERR("error fail init cache state(%d)\n", bo_sprd->name);
1390 *error = TBM_ERROR_OPERATION_FAILED;
1391 goto fail_init_cache;
1394 if (!bo_sprd->dmabuf) {
1395 struct drm_prime_handle arg = {0, };
1397 arg.handle = bo_sprd->gem;
1398 if (drmIoctl(bo_sprd->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
1399 TBM_ERR("error Cannot dmabuf=%d\n", bo_sprd->gem);
1401 *error = TBM_ERROR_OPERATION_FAILED;
1402 goto fail_prime_handle_to_fd;
1404 bo_sprd->dmabuf = arg.fd;
1407 /* add bo_sprd to hash */
1408 if (drmHashInsert(bufmgr_sprd->hashBos, bo_sprd->name, (void *)bo_sprd) < 0)
1409 TBM_ERR("Cannot insert bo_sprd to Hash(%d)\n", bo_sprd->name);
1411 TBM_DBG(" bo_sprd:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d\n",
1413 bo_sprd->gem, bo_sprd->name,
1415 bo_sprd->flags_tbm, bo_sprd->flags_sprd,
1419 *error = TBM_ERROR_NONE;
1421 return (tbm_backend_bo_data *)bo_sprd;
1423 fail_prime_handle_to_fd:
1424 _bo_destroy_cache_state(bufmgr_sprd, bo_sprd);
1430 struct drm_gem_close gem_close_arg = {arg.handle, 0};
1431 drmIoctl(bufmgr_sprd->fd, DRM_IOCTL_GEM_CLOSE, &gem_close_arg);
1437 tbm_sprd_bo_free(tbm_backend_bo_data *bo_data)
1439 tbm_bo_sprd bo_sprd = (tbm_bo_sprd)bo_data;
1441 tbm_bufmgr_sprd bufmgr_sprd;
1442 char buf[STRERR_BUFSIZE];
1448 bufmgr_sprd = bo_sprd->bufmgr_sprd;
1452 TBM_DBG(" bo_sprd:%p, gem:%d(%d), fd:%d, size:%d\n",
1454 bo_sprd->gem, bo_sprd->name,
1458 if (bo_sprd->pBase) {
1459 if (munmap(bo_sprd->pBase, bo_sprd->size) == -1) {
1460 TBM_ERR("bo_sprd:%p fail to munmap(%s)\n",
1461 bo_sprd, strerror_r(errno, buf, STRERR_BUFSIZE));
1466 if (bo_sprd->dmabuf) {
1467 close(bo_sprd->dmabuf);
1468 bo_sprd->dmabuf = 0;
1471 /* delete bo from hash */
1472 ret = drmHashLookup(bufmgr_sprd->hashBos, bo_sprd->name,
1475 drmHashDelete(bufmgr_sprd->hashBos, bo_sprd->name);
1477 TBM_ERR("Cannot find bo_sprd to Hash(%d), ret=%d\n", bo_sprd->name, ret);
1479 if (temp != bo_sprd)
1480 TBM_ERR("hashBos probably has several BOs with same name!!!\n");
1482 _bo_destroy_cache_state(bufmgr_sprd, bo_sprd);
1484 /* Free gem handle */
1485 struct drm_gem_close arg = {0, };
1487 memset(&arg, 0, sizeof(arg));
1488 arg.handle = bo_sprd->gem;
1489 if (drmIoctl(bo_sprd->fd, DRM_IOCTL_GEM_CLOSE, &arg))
1490 TBM_ERR("bo_sprd:%p fail to gem close.(%s)\n",
1491 bo_sprd, strerror_r(errno, buf, STRERR_BUFSIZE));
1496 tbm_sprd_bo_get_size(tbm_backend_bo_data *bo_data, tbm_error_e *error)
1498 tbm_bo_sprd bo_sprd = (tbm_bo_sprd)bo_data;
1502 *error = TBM_ERROR_INVALID_PARAMETER;
1507 *error = TBM_ERROR_NONE;
1509 return bo_sprd->size;
1512 static tbm_bo_memory_type
1513 tbm_sprd_bo_get_memory_type(tbm_backend_bo_data *bo_data, tbm_error_e *error)
1515 tbm_bo_sprd bo_sprd = (tbm_bo_sprd)bo_data;
1519 *error = TBM_ERROR_INVALID_PARAMETER;
1520 return TBM_BO_DEFAULT;
1524 *error = TBM_ERROR_NONE;
1526 return bo_sprd->flags_tbm;
1529 static tbm_bo_handle
1530 tbm_sprd_bo_get_handle(tbm_backend_bo_data *bo_data, tbm_bo_device_type device, tbm_error_e *error)
1532 tbm_bo_sprd bo_sprd = (tbm_bo_sprd)bo_data;
1533 tbm_bo_handle bo_handle;
1537 *error = TBM_ERROR_INVALID_PARAMETER;
1538 return (tbm_bo_handle) NULL;
1541 if (!bo_sprd->gem) {
1542 TBM_ERR("Cannot map gem=%d\n", bo_sprd->gem);
1544 *error = TBM_ERROR_INVALID_PARAMETER;
1545 return (tbm_bo_handle) NULL;
1548 TBM_DBG("bo_sprd:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d, %s\n",
1550 bo_sprd->gem, bo_sprd->name,
1552 bo_sprd->flags_tbm, bo_sprd->flags_sprd,
1554 STR_DEVICE[device]);
1556 /*Get mapped bo_handle*/
1557 bo_handle = _sprd_bo_handle(bo_sprd, device);
1558 if (bo_handle.ptr == NULL) {
1559 TBM_ERR("Cannot get handle: gem:%d, device:%d\n",
1560 bo_sprd->gem, device);
1562 *error = TBM_ERROR_OPERATION_FAILED;
1563 return (tbm_bo_handle) NULL;
1567 *error = TBM_ERROR_NONE;
1572 static tbm_bo_handle
1573 tbm_sprd_bo_map(tbm_backend_bo_data *bo_data, tbm_bo_device_type device,
1574 tbm_bo_access_option opt, tbm_error_e *error)
1576 tbm_bo_sprd bo_sprd = (tbm_bo_sprd)bo_data;
1577 tbm_bo_handle bo_handle;
1578 tbm_bufmgr_sprd bufmgr_sprd;
1582 *error = TBM_ERROR_INVALID_PARAMETER;
1583 return (tbm_bo_handle) NULL;
1586 bufmgr_sprd = bo_sprd->bufmgr_sprd;
1589 *error = TBM_ERROR_INVALID_PARAMETER;
1590 return (tbm_bo_handle) NULL;
1593 if (!bo_sprd->gem) {
1594 TBM_ERR("Cannot map gem=%d\n", bo_sprd->gem);
1596 *error = TBM_ERROR_INVALID_PARAMETER;
1597 return (tbm_bo_handle) NULL;
1600 TBM_DBG(" bo_sprd:%p, gem:%d(%d), fd:%d, %s, %s\n",
1602 bo_sprd->gem, bo_sprd->name,
1607 /*Get mapped bo_handle*/
1608 bo_handle = _sprd_bo_handle(bo_sprd, device);
1609 if (bo_handle.ptr == NULL) {
1610 TBM_ERR("Cannot get handle: gem:%d, device:%d, opt:%d\n",
1611 bo_sprd->gem, device, opt);
1613 *error = TBM_ERROR_OPERATION_FAILED;
1614 return (tbm_bo_handle) NULL;
1617 if (bo_sprd->map_cnt == 0)
1618 _bo_set_cache_state(bufmgr_sprd, bo_sprd, device, opt);
1623 *error = TBM_ERROR_NONE;
1629 tbm_sprd_bo_unmap(tbm_backend_bo_data *bo_data)
1631 tbm_bo_sprd bo_sprd = (tbm_bo_sprd)bo_data;
1632 tbm_bufmgr_sprd bufmgr_sprd;
1635 return TBM_ERROR_INVALID_PARAMETER;
1637 bufmgr_sprd = bo_sprd->bufmgr_sprd;
1639 return TBM_ERROR_INVALID_PARAMETER;
1642 return TBM_ERROR_INVALID_PARAMETER;
1646 if (bo_sprd->map_cnt == 0)
1647 _bo_save_cache_state(bufmgr_sprd, bo_sprd);
1649 TBM_DBG(" bo_sprd:%p, gem:%d(%d), fd:%d\n",
1651 bo_sprd->gem, bo_sprd->name,
1654 return TBM_ERROR_NONE;
1658 tbm_sprd_bo_export_fd(tbm_backend_bo_data *bo_data, tbm_error_e *error)
1660 tbm_bo_sprd bo_sprd = (tbm_bo_sprd)bo_data;
1661 struct drm_prime_handle arg = {0, };
1663 char buf[STRERR_BUFSIZE];
1667 *error = TBM_ERROR_INVALID_PARAMETER;
1671 arg.handle = bo_sprd->gem;
1672 ret = drmIoctl(bo_sprd->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg);
1674 TBM_ERR("bo_sprd:%p Cannot dmabuf=%d (%s)\n",
1675 bo_sprd, bo_sprd->gem, strerror_r(errno, buf, STRERR_BUFSIZE));
1677 *error = TBM_ERROR_OPERATION_FAILED;
1678 return (tbm_fd) ret;
1681 TBM_DBG("bo_sprd:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d(%d), size:%d\n",
1683 bo_sprd->gem, bo_sprd->name,
1686 bo_sprd->flags_tbm, bo_sprd->flags_sprd,
1690 *error = TBM_ERROR_NONE;
1692 return (tbm_fd)arg.fd;
1696 tbm_sprd_bo_export_key(tbm_backend_bo_data *bo_data, tbm_error_e *error)
1698 tbm_bo_sprd bo_sprd = (tbm_bo_sprd)bo_data;
1702 *error = TBM_ERROR_INVALID_PARAMETER;
1706 if (!bo_sprd->name) {
1707 bo_sprd->name = _get_name(bo_sprd->fd, bo_sprd->gem);
1708 if (!bo_sprd->name) {
1709 TBM_ERR("error Cannot get name\n");
1711 *error = TBM_ERROR_INVALID_PARAMETER;
1716 TBM_DBG(" bo_sprd:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d\n",
1718 bo_sprd->gem, bo_sprd->name,
1720 bo_sprd->flags_tbm, bo_sprd->flags_sprd,
1724 *error = TBM_ERROR_NONE;
1726 return (tbm_key)bo_sprd->name;
1730 tbm_sprd_deinit(tbm_backend_bufmgr_data *bufmgr_data)
1732 tbm_bufmgr_sprd bufmgr_sprd = (tbm_bufmgr_sprd)bufmgr_data;
1738 TBM_RETURN_IF_FAIL(bufmgr_sprd != NULL);
1740 bufmgr = bufmgr_sprd->bufmgr;
1742 tbm_backend_bufmgr_free_bufmgr_func(bufmgr, bufmgr_sprd->bufmgr_func);
1743 tbm_backend_bufmgr_free_bo_func(bufmgr, bufmgr_sprd->bo_func);
1745 if (bufmgr_sprd->hashBos) {
1746 while (drmHashFirst(bufmgr_sprd->hashBos, &key, &value) > 0) {
1748 drmHashDelete(bufmgr_sprd->hashBos, key);
1751 drmHashDestroy(bufmgr_sprd->hashBos);
1752 bufmgr_sprd->hashBos = NULL;
1755 if (bufmgr_sprd->bind_display)
1756 tbm_drm_helper_wl_auth_server_deinit();
1758 if (tbm_backend_bufmgr_query_display_server(bufmgr, &error))
1759 tbm_drm_helper_unset_tbm_master_fd();
1761 tbm_drm_helper_unset_fd();
1763 if (bufmgr_sprd->device_name)
1764 free(bufmgr_sprd->device_name);
1766 _bufmgr_deinit_cache_state(bufmgr_sprd);
1768 close(bufmgr_sprd->fd);
1773 static tbm_backend_bufmgr_data *
1774 tbm_sprd_init(tbm_bufmgr bufmgr, tbm_error_e *error)
1776 tbm_bufmgr_sprd bufmgr_sprd = NULL;
1777 tbm_backend_bufmgr_func *bufmgr_func = NULL;
1778 tbm_backend_bo_func *bo_func = NULL;
1783 TBM_ERR("bufmgr is null.\n");
1785 *error = TBM_ERROR_INVALID_PARAMETER;
1789 bufmgr_sprd = calloc(1, sizeof(struct _tbm_bufmgr_sprd));
1791 TBM_ERR("fail to alloc bufmgr_sprd!\n");
1793 *error = TBM_ERROR_OUT_OF_MEMORY;
1797 if (tbm_backend_bufmgr_query_display_server(bufmgr, &err)) {
1798 bufmgr_sprd->fd = tbm_drm_helper_get_master_fd();
1799 if (bufmgr_sprd->fd < 0) {
1800 bufmgr_sprd->fd = _tbm_sprd_open_drm();
1801 if (bufmgr_sprd->fd < 0) {
1802 TBM_ERR("fail to open drm!\n");
1804 *error = TBM_ERROR_OPERATION_FAILED;
1809 tbm_drm_helper_set_tbm_master_fd(bufmgr_sprd->fd);
1811 bufmgr_sprd->device_name = drmGetDeviceNameFromFd(bufmgr_sprd->fd);
1812 if (!bufmgr_sprd->device_name) {
1813 TBM_ERR("fail to get device name!\n");
1814 tbm_drm_helper_unset_tbm_master_fd();
1816 *error = TBM_ERROR_OPERATION_FAILED;
1817 goto fail_get_device_name;
1820 if (!tbm_drm_helper_get_auth_info(&(bufmgr_sprd->fd), &(bufmgr_sprd->device_name), NULL)) {
1821 TBM_ERR("fail to get auth drm info!\n");
1823 *error = TBM_ERROR_OPERATION_FAILED;
1824 goto fail_get_auth_info;
1828 tbm_drm_helper_set_fd(bufmgr_sprd->fd);
1830 //Check if the tbm manager supports dma fence or not.
1831 fp = open("/sys/module/dmabuf_sync/parameters/enabled", O_RDONLY);
1834 int length = read(fp, buf, 1);
1836 if (length == 1 && buf[0] == '1')
1837 bufmgr_sprd->use_dma_fence = 1;
1842 if (!_bufmgr_init_cache_state(bufmgr_sprd)) {
1843 TBM_ERR("fail to init bufmgr cache state\n");
1845 *error = TBM_ERROR_OPERATION_FAILED;
1846 goto fail_init_cache_state;
1849 /*Create Hash Table*/
1850 bufmgr_sprd->hashBos = drmHashCreate();
1852 /* alloc and register bufmgr_funcs */
1853 bufmgr_func = tbm_backend_bufmgr_alloc_bufmgr_func(bufmgr, &err);
1855 TBM_ERR("fail to alloc bufmgr_func! err(%d)\n", err);
1857 *error = TBM_ERROR_OUT_OF_MEMORY;
1858 goto fail_alloc_bufmgr_func;
1861 bufmgr_func->bufmgr_get_capabilities = tbm_sprd_bufmgr_get_capabilities;
1862 bufmgr_func->bufmgr_bind_native_display = tbm_sprd_bufmgr_bind_native_display;
1863 bufmgr_func->bufmgr_get_supported_formats = tbm_sprd_bufmgr_get_supported_formats;
1864 bufmgr_func->bufmgr_get_plane_data = tbm_sprd_bufmgr_get_plane_data;
1865 bufmgr_func->bufmgr_alloc_bo = tbm_sprd_bufmgr_alloc_bo;
1866 bufmgr_func->bufmgr_alloc_bo_with_format = NULL;
1867 bufmgr_func->bufmgr_import_fd = tbm_sprd_bufmgr_import_fd;
1868 bufmgr_func->bufmgr_import_key = tbm_sprd_bufmgr_import_key;
1870 err = tbm_backend_bufmgr_register_bufmgr_func(bufmgr, bufmgr_func);
1871 if (err != TBM_ERROR_NONE) {
1872 TBM_ERR("fail to register bufmgr_func! err(%d)\n", err);
1874 *error = TBM_ERROR_OPERATION_FAILED;
1875 goto fail_register_bufmgr_func;
1877 bufmgr_sprd->bufmgr_func = bufmgr_func;
1879 /* alloc and register bo_funcs */
1880 bo_func = tbm_backend_bufmgr_alloc_bo_func(bufmgr, &err);
1882 TBM_ERR("fail to alloc bo_func! err(%d)\n", err);
1884 *error = TBM_ERROR_OUT_OF_MEMORY;
1885 goto fail_alloc_bo_func;
1888 bo_func->bo_free = tbm_sprd_bo_free;
1889 bo_func->bo_get_size = tbm_sprd_bo_get_size;
1890 bo_func->bo_get_memory_types = tbm_sprd_bo_get_memory_type;
1891 bo_func->bo_get_handle = tbm_sprd_bo_get_handle;
1892 bo_func->bo_map = tbm_sprd_bo_map;
1893 bo_func->bo_unmap = tbm_sprd_bo_unmap;
1894 bo_func->bo_lock = NULL;
1895 bo_func->bo_unlock = NULL;
1896 bo_func->bo_export_fd = tbm_sprd_bo_export_fd;
1897 bo_func->bo_export_key = tbm_sprd_bo_export_key;
1899 err = tbm_backend_bufmgr_register_bo_func(bufmgr, bo_func);
1900 if (err != TBM_ERROR_NONE) {
1901 TBM_ERR("fail to register bo_func! err(%d)\n", err);
1903 *error = TBM_ERROR_OPERATION_FAILED;
1904 goto fail_register_bo_func;
1906 bufmgr_sprd->bo_func = bo_func;
1908 TBM_DBG("DMABUF FENCE is %s\n",
1909 bufmgr_sprd->use_dma_fence ? "supported!" : "NOT supported!");
1910 TBM_DBG("fd:%d\n", bufmgr_sprd->fd);
1913 *error = TBM_ERROR_NONE;
1915 bufmgr_sprd->bufmgr = bufmgr;
1917 return (tbm_backend_bufmgr_data *)bufmgr_sprd;
1919 fail_register_bo_func:
1920 tbm_backend_bufmgr_free_bo_func(bufmgr, bo_func);
1922 fail_register_bufmgr_func:
1923 tbm_backend_bufmgr_free_bufmgr_func(bufmgr, bufmgr_func);
1924 fail_alloc_bufmgr_func:
1925 _bufmgr_deinit_cache_state(bufmgr_sprd);
1926 if (bufmgr_sprd->hashBos)
1927 drmHashDestroy(bufmgr_sprd->hashBos);
1928 fail_init_cache_state:
1929 if (tbm_backend_bufmgr_query_display_server(bufmgr, &err))
1930 tbm_drm_helper_unset_tbm_master_fd();
1931 tbm_drm_helper_unset_fd();
1932 if (bufmgr_sprd->device_name)
1933 free(bufmgr_sprd->device_name);
1934 fail_get_device_name:
1935 close(bufmgr_sprd->fd);
1942 tbm_backend_module tbm_backend_module_data = {
1945 TBM_BACKEND_ABI_VERSION_2_0,