1 /**************************************************************************
5 Copyright 2012 Samsung Electronics co., Ltd. All Rights Reserved.
7 Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
9 Permission is hereby granted, free of charge, to any person obtaining a
10 copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sub license, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial portions
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
22 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
24 IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
25 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
26 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
27 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 **************************************************************************/
43 #include <sys/ioctl.h>
44 #include <sys/types.h>
51 #include <drm/sprd_drm.h>
53 #include <tbm_backend.h>
54 #include <tbm_drm_helper.h>
57 //#define USE_CONTIG_ONLY
62 #include "tbm_bufmgr_tgl.h"
65 #define TBM_COLOR_FORMAT_COUNT 4
67 #define SPRD_DRM_NAME "sprd"
69 #define STRERR_BUFSIZE 128
71 #define SIZE_ALIGN(value, base) (((value) + ((base) - 1)) & ~((base) - 1))
73 #define TBM_SURFACE_ALIGNMENT_PLANE (64)
74 #define TBM_SURFACE_ALIGNMENT_PITCH_RGB (128)
75 #define TBM_SURFACE_ALIGNMENT_PITCH_YUV (16)
79 unsigned int fence_supported;
83 #define DMA_BUF_ACCESS_READ 0x1
84 #define DMA_BUF_ACCESS_WRITE 0x2
85 #define DMA_BUF_ACCESS_DMA 0x4
86 #define DMA_BUF_ACCESS_MAX 0x8
88 #define DMA_FENCE_LIST_MAX 5
90 struct dma_buf_fence {
95 #define DMABUF_IOCTL_BASE 'F'
96 #define DMABUF_IOWR(nr, type) _IOWR(DMABUF_IOCTL_BASE, nr, type)
98 #define DMABUF_IOCTL_GET_INFO DMABUF_IOWR(0x00, struct dma_buf_info)
99 #define DMABUF_IOCTL_GET_FENCE DMABUF_IOWR(0x01, struct dma_buf_fence)
100 #define DMABUF_IOCTL_PUT_FENCE DMABUF_IOWR(0x02, struct dma_buf_fence)
103 #define GLOBAL_KEY ((unsigned int)(-1))
105 #define TBM_SPRD_CACHE_INV 0x01 /**< cache invalidate */
106 #define TBM_SPRD_CACHE_CLN 0x02 /**< cache clean */
107 #define TBM_SPRD_CACHE_ALL 0x10 /**< cache all */
108 #define TBM_SPRD_CACHE_FLUSH (TBM_SPRD_CACHE_INV|TBM_SPRD_CACHE_CLN) /**< cache flush */
109 #define TBM_SPRD_CACHE_FLUSH_ALL (TBM_SPRD_CACHE_FLUSH|TBM_SPRD_CACHE_ALL) /**< cache flush all */
113 DEVICE_CA, /* cache aware device */
114 DEVICE_CO /* cache oblivious device */
117 typedef union _tbm_bo_cache_state tbm_bo_cache_state;
119 union _tbm_bo_cache_state {
122 unsigned int cntFlush:16; /*Flush all index for sync */
123 unsigned int isCached:1;
124 unsigned int isDirtied:2;
128 typedef struct _tbm_bufmgr_sprd *tbm_bufmgr_sprd;
129 typedef struct _tbm_bo_sprd *tbm_bo_sprd;
131 /* tbm buffor object for sprd */
132 struct _tbm_bo_sprd {
135 unsigned int name; /* FLINK ID */
137 unsigned int gem; /* GEM Handle */
139 unsigned int dmabuf; /* fd for dmabuf */
141 void *pBase; /* virtual address */
145 unsigned int flags_sprd;
146 unsigned int flags_tbm;
148 pthread_mutex_t mutex;
149 struct dma_buf_fence dma_fence[DMA_FENCE_LIST_MAX];
153 tbm_bo_cache_state cache_state;
154 unsigned int map_cnt;
156 tbm_bufmgr_sprd bufmgr_sprd;
159 /* tbm bufmgr private for sprd */
160 struct _tbm_bufmgr_sprd {
171 tbm_backend_bufmgr_func *bufmgr_func;
172 tbm_backend_bo_func *bo_func;
177 char *STR_DEVICE[] = {
193 uint32_t tbm_sprd_color_format_list[TBM_COLOR_FORMAT_COUNT] = {
202 _tgl_get_version(int fd)
204 struct tgl_ver_data data;
206 char buf[STRERR_BUFSIZE];
208 err = ioctl(fd, TGL_IOCTL_GET_VERSION, &data);
210 TBM_ERR("error(%s) %s:%d\n",
211 strerror_r(errno, buf, STRERR_BUFSIZE));
215 TBM_DBG("tgl version is (%u, %u).\n", data.major, data.minor);
221 _tgl_init(int fd, unsigned int key)
223 struct tgl_reg_data data;
225 char buf[STRERR_BUFSIZE];
228 data.timeout_ms = 1000;
230 err = ioctl(fd, TGL_IOCTL_REGISTER, &data);
232 TBM_ERR("error(%s) key:%d\n",
233 strerror_r(errno, buf, STRERR_BUFSIZE), key);
241 _tgl_destroy(int fd, unsigned int key)
243 struct tgl_reg_data data;
245 char buf[STRERR_BUFSIZE];
248 err = ioctl(fd, TGL_IOCTL_UNREGISTER, &data);
250 TBM_ERR("error(%s) key:%d\n",
251 strerror_r(errno, buf, STRERR_BUFSIZE), key);
259 _tgl_lock(int fd, unsigned int key, int opt)
261 struct tgl_lock_data data;
263 char buf[STRERR_BUFSIZE];
264 enum tgl_type_data tgl_type;
267 case TBM_OPTION_READ:
268 tgl_type = TGL_TYPE_READ;
270 case TBM_OPTION_WRITE:
271 tgl_type = TGL_TYPE_WRITE;
274 tgl_type = TGL_TYPE_NONE;
279 data.type = tgl_type;
281 err = ioctl(fd, TGL_IOCTL_LOCK, &data);
283 TBM_ERR("error(%s) key:%d opt:%d\n",
284 strerror_r(errno, buf, STRERR_BUFSIZE), key, opt);
292 _tgl_unlock(int fd, unsigned int key)
294 struct tgl_lock_data data;
296 char buf[STRERR_BUFSIZE];
299 data.type = TGL_TYPE_NONE;
301 err = ioctl(fd, TGL_IOCTL_UNLOCK, &data);
303 TBM_ERR("error(%s) key:%d\n",
304 strerror_r(errno, buf, STRERR_BUFSIZE), key);
312 _tgl_set_data(int fd, unsigned int key, unsigned int val)
314 struct tgl_usr_data data;
316 char buf[STRERR_BUFSIZE];
321 err = ioctl(fd, TGL_IOCTL_SET_DATA, &data);
323 TBM_ERR("error(%s) key:%d\n",
324 strerror_r(errno, buf, STRERR_BUFSIZE), key);
331 static inline unsigned int
332 _tgl_get_data(int fd, unsigned int key, unsigned int *locked)
334 struct tgl_usr_data data = { 0, };
336 char buf[STRERR_BUFSIZE];
340 err = ioctl(fd, TGL_IOCTL_GET_DATA, &data);
342 TBM_ERR("error(%s) key:%d\n",
343 strerror_r(errno, buf, STRERR_BUFSIZE), key);
348 *locked = (unsigned int)data.status;
355 _tbm_sprd_open_drm(void)
358 struct udev_device *drm_device = NULL;
359 struct udev_list_entry *entry = NULL;
360 struct udev_enumerate *e;
361 const char *filepath;
367 fd = drmOpen(SPRD_DRM_NAME, NULL);
372 TBM_DBG("warning fail to open drm. search drm-device by udev\n");
376 TBM_ERR("udev_new() failed.\n");
380 e = udev_enumerate_new(udev);
381 udev_enumerate_add_match_subsystem(e, "drm");
382 udev_enumerate_add_match_sysname(e, "card[0-9]*");
383 udev_enumerate_scan_devices(e);
385 udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
386 struct udev_device *device, *device_parent;
388 device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
389 udev_list_entry_get_name(entry));
390 device_parent = udev_device_get_parent(device);
391 /* Not need unref device_parent. device_parent and device have same refcnt */
393 if (strcmp(udev_device_get_sysname(device_parent), "sprd-drm") == 0) {
395 TBM_DBG("Found render device: '%s' (%s)\n",
396 udev_device_get_syspath(drm_device),
397 udev_device_get_sysname(device_parent));
401 udev_device_unref(device);
404 udev_enumerate_unref(e);
406 /* Get device file path. */
407 filepath = udev_device_get_devnode(drm_device);
409 TBM_ERR("udev_device_get_devnode() failed.\n");
410 udev_device_unref(drm_device);
415 udev_device_unref(drm_device);
418 /* Open DRM device file and check validity. */
419 fd = open(filepath, O_RDWR | O_CLOEXEC);
421 TBM_ERR("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
426 TBM_ERR("fstat() failed %s.\n");
437 _sprd_bo_cache_flush(tbm_bufmgr_sprd bufmgr_sprd, tbm_bo_sprd bo_sprd, int flags)
439 TBM_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, 0);
441 /* cache flush is managed by kernel side when using dma-fence. */
442 if (bufmgr_sprd->use_dma_fence)
444 // TODO: The tm1 kernel does not support ioctl for cache flush right now.
445 // The drm in tm1 kernel has to support cache_flush to turn on this feature(TBM_SRPD_CACHE_FLUSH).
446 #if TBM_SRPD_CACHE_FLUSH
447 struct drm_sprd_gem_cache_op cache_op = {0, };
450 /* if bo_sprd is null, do cache_flush_all */
453 cache_op.usr_addr = (uint64_t)((uint32_t)bo_sprd->pBase);
454 cache_op.size = bo_sprd->size;
456 flags = TBM_SPRD_CACHE_FLUSH_ALL;
458 cache_op.usr_addr = 0;
462 if (flags & TBM_SPRD_CACHE_INV) {
463 if (flags & TBM_SPRD_CACHE_ALL)
464 cache_op.flags |= SPRD_DRM_CACHE_INV_ALL;
466 cache_op.flags |= SPRD_DRM_CACHE_INV_RANGE;
469 if (flags & TBM_SPRD_CACHE_CLN) {
470 if (flags & TBM_SPRD_CACHE_ALL)
471 cache_op.flags |= SPRD_DRM_CACHE_CLN_ALL;
473 cache_op.flags |= SPRD_DRM_CACHE_CLN_RANGE;
476 if (flags & TBM_SPRD_CACHE_ALL)
477 cache_op.flags |= SPRD_DRM_ALL_CACHES_CORES;
479 ret = drmCommandWriteRead(bufmgr_sprd->fd, DRM_SPRD_GEM_CACHE_OP, &cache_op,
482 TBM_ERR("error fail to flush the cache.\n");
492 _bo_init_cache_state(tbm_bufmgr_sprd bufmgr_sprd, tbm_bo_sprd bo_sprd, int import)
495 TBM_RETURN_VAL_IF_FAIL(bo_sprd != NULL, 0);
496 TBM_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, 0);
498 if (bufmgr_sprd->use_dma_fence)
501 _tgl_init(bufmgr_sprd->tgl_fd, bo_sprd->name);
503 tbm_bo_cache_state cache_state;
506 cache_state.data.isDirtied = DEVICE_NONE;
507 cache_state.data.isCached = 0;
508 cache_state.data.cntFlush = 0;
510 _tgl_set_data(bufmgr_sprd->tgl_fd, bo_sprd->name, cache_state.val);
518 _bo_set_cache_state(tbm_bufmgr_sprd bufmgr_sprd, tbm_bo_sprd bo_sprd, int device, int opt)
521 TBM_RETURN_VAL_IF_FAIL(bo_sprd != NULL, 0);
522 TBM_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, 0);
525 unsigned short cntFlush = 0;
527 if (bufmgr_sprd->use_dma_fence)
530 if (bo_sprd->flags_sprd & SPRD_BO_NONCACHABLE)
533 /* get cache state of a bo_sprd */
534 bo_sprd->cache_state.val = _tgl_get_data(bufmgr_sprd->tgl_fd, bo_sprd->name, NULL);
536 /* get global cache flush count */
537 cntFlush = (unsigned short)_tgl_get_data(bufmgr_sprd->tgl_fd, GLOBAL_KEY, NULL);
539 if (opt == TBM_DEVICE_CPU) {
540 if (bo_sprd->cache_state.data.isDirtied == DEVICE_CO &&
541 bo_sprd->cache_state.data.isCached)
542 need_flush = TBM_SPRD_CACHE_INV;
544 bo_sprd->cache_state.data.isCached = 1;
545 if (opt & TBM_OPTION_WRITE)
546 bo_sprd->cache_state.data.isDirtied = DEVICE_CA;
548 if (bo_sprd->cache_state.data.isDirtied != DEVICE_CA)
549 bo_sprd->cache_state.data.isDirtied = DEVICE_NONE;
552 if (bo_sprd->cache_state.data.isDirtied == DEVICE_CA &&
553 bo_sprd->cache_state.data.isCached &&
554 bo_sprd->cache_state.data.cntFlush == cntFlush)
555 need_flush = TBM_SPRD_CACHE_CLN | TBM_SPRD_CACHE_ALL;
557 if (opt & TBM_OPTION_WRITE)
558 bo_sprd->cache_state.data.isDirtied = DEVICE_CO;
560 if (bo_sprd->cache_state.data.isDirtied != DEVICE_CO)
561 bo_sprd->cache_state.data.isDirtied = DEVICE_NONE;
566 if (need_flush & TBM_SPRD_CACHE_ALL)
567 _tgl_set_data(bufmgr_sprd->tgl_fd, GLOBAL_KEY, (unsigned int)(++cntFlush));
569 /* call cache flush */
570 _sprd_bo_cache_flush(bufmgr_sprd, bo_sprd, need_flush);
572 TBM_DBG("\tcache(%d,%d)....flush:0x%x, cntFlush(%d)\n",
573 bo_sprd->cache_state.data.isCached,
574 bo_sprd->cache_state.data.isDirtied,
584 _bo_save_cache_state(tbm_bufmgr_sprd bufmgr_sprd, tbm_bo_sprd bo_sprd)
587 TBM_RETURN_VAL_IF_FAIL(bo_sprd != NULL, 0);
588 TBM_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, 0);
590 if (bufmgr_sprd->use_dma_fence)
593 unsigned short cntFlush = 0;
595 /* get global cache flush count */
596 cntFlush = (unsigned short)_tgl_get_data(bufmgr_sprd->tgl_fd, GLOBAL_KEY, NULL);
598 /* save global cache flush count */
599 bo_sprd->cache_state.data.cntFlush = cntFlush;
600 _tgl_set_data(bufmgr_sprd->tgl_fd, bo_sprd->name, bo_sprd->cache_state.val);
607 _bo_destroy_cache_state(tbm_bufmgr_sprd bufmgr_sprd, tbm_bo_sprd bo_sprd)
610 TBM_RETURN_IF_FAIL(bo_sprd != NULL);
611 TBM_RETURN_IF_FAIL(bufmgr_sprd != NULL);
613 if (bufmgr_sprd->use_dma_fence)
616 _tgl_destroy(bufmgr_sprd->tgl_fd, bo_sprd->name);
621 _bufmgr_init_cache_state(tbm_bufmgr_sprd bufmgr_sprd)
624 TBM_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, 0);
626 if (bufmgr_sprd->use_dma_fence)
629 /* open tgl fd for saving cache flush data */
630 bufmgr_sprd->tgl_fd = open(tgl_devfile, O_RDWR);
632 if (bufmgr_sprd->tgl_fd < 0) {
633 bufmgr_sprd->tgl_fd = open(tgl_devfile1, O_RDWR);
634 if (bufmgr_sprd->tgl_fd < 0) {
635 TBM_ERR("fail to open global_lock:%s\n",
641 if (!_tgl_get_version(bufmgr_sprd->tgl_fd)) {
642 TBM_ERR("fail to get tgl_version. tgl init failed.\n");
643 close(bufmgr_sprd->tgl_fd);
647 if (!_tgl_init(bufmgr_sprd->tgl_fd, GLOBAL_KEY)) {
648 TBM_ERR("fail to initialize the tgl\n");
649 close(bufmgr_sprd->tgl_fd);
658 _bufmgr_deinit_cache_state(tbm_bufmgr_sprd bufmgr_sprd)
661 TBM_RETURN_IF_FAIL(bufmgr_sprd != NULL);
663 if (bufmgr_sprd->use_dma_fence)
666 if (bufmgr_sprd->tgl_fd >= 0)
667 close(bufmgr_sprd->tgl_fd);
671 #ifndef USE_CONTIG_ONLY
673 _get_sprd_flag_from_tbm(unsigned int ftbm)
675 unsigned int flags = 0;
678 * TBM_BO_DEFAULT => ION_HEAP_ID_MASK_SYSTEM
679 * TBM_BO_SCANOUT => ION_HEAP_ID_MASK_MM
680 * TBM_BO_VENDOR => ION_HEAP_ID_MASK_OVERLAY
681 * To be updated appropriately once DRM-GEM supports different heap id masks.
684 if (ftbm & TBM_BO_SCANOUT)
685 flags = SPRD_BO_CONTIG;
687 flags = SPRD_BO_NONCONTIG | SPRD_BO_DEV_SYSTEM;
689 if (ftbm & TBM_BO_WC)
691 else if (ftbm & TBM_BO_NONCACHABLE)
692 flags |= SPRD_BO_NONCACHABLE;
698 _get_tbm_flag_from_sprd(unsigned int fsprd)
700 unsigned int flags = 0;
702 if (fsprd & SPRD_BO_NONCONTIG)
703 flags |= TBM_BO_DEFAULT;
705 flags |= TBM_BO_SCANOUT;
707 if (fsprd & SPRD_BO_WC)
709 else if (fsprd & SPRD_BO_CACHABLE)
710 flags |= TBM_BO_DEFAULT;
712 flags |= TBM_BO_NONCACHABLE;
719 _get_name(int fd, unsigned int gem)
721 struct drm_gem_flink arg = {0,};
724 if (drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &arg)) {
725 TBM_ERR("fail to DRM_IOCTL_GEM_FLINK gem:%d", gem);
729 return (unsigned int)arg.name;
733 _sprd_bo_handle(tbm_bo_sprd bo_sprd, int device)
735 tbm_bo_handle bo_handle;
737 memset(&bo_handle, 0x0, sizeof(uint64_t));
740 case TBM_DEVICE_DEFAULT:
742 bo_handle.u32 = (uint32_t)bo_sprd->gem;
745 if (!bo_sprd->pBase) {
746 struct drm_sprd_gem_mmap arg = {0,};
748 arg.handle = bo_sprd->gem;
749 arg.size = bo_sprd->size;
750 if (drmCommandWriteRead(bo_sprd->fd, DRM_SPRD_GEM_MMAP, &arg, sizeof(arg))) {
751 TBM_ERR("error Cannot usrptr gem=%d\n", bo_sprd->gem);
752 return (tbm_bo_handle) NULL;
754 bo_sprd->pBase = (void *)((uint32_t)arg.mapped);
757 bo_handle.ptr = (void *)bo_sprd->pBase;
761 if (!bo_sprd->dmabuf) {
762 struct drm_prime_handle arg = {0, };
763 arg.handle = bo_sprd->gem;
764 if (drmIoctl(bo_sprd->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
765 TBM_ERR("error Cannot dmabuf=%d\n", bo_sprd->gem);
766 return (tbm_bo_handle) NULL;
768 bo_sprd->dmabuf = arg.fd;
771 bo_handle.u32 = (uint32_t)bo_sprd->dmabuf;
778 //TODO : Add ioctl for GSP MAP once available.
779 TBM_DBG("%s In case TBM_DEVICE_MM: \n", __FUNCTION_);
781 if (!bo_sprd->dmabuf) {
782 struct drm_prime_handle arg = {0, };
784 arg.handle = bo_sprd->gem;
785 if (drmIoctl(bo_sprd->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
786 TBM_ERR("error Cannot dmabuf=%d\n", bo_sprd->gem);
787 return (tbm_bo_handle) NULL;
789 bo_sprd->dmabuf = arg.fd;
792 bo_handle.u32 = (uint32_t)bo_sprd->dmabuf;
796 TBM_ERR("Not supported device:%d\n", device);
797 bo_handle.ptr = (void *) NULL;
804 static tbm_bufmgr_capability
805 tbm_sprd_bufmgr_get_capabilities(tbm_backend_bufmgr_data *bufmgr_data, tbm_error_e *error)
807 tbm_bufmgr_capability capabilities = TBM_BUFMGR_CAPABILITY_NONE;
809 capabilities = TBM_BUFMGR_CAPABILITY_SHARE_KEY|TBM_BUFMGR_CAPABILITY_SHARE_FD;
812 *error = TBM_ERROR_NONE;
818 tbm_sprd_bufmgr_bind_native_display(tbm_backend_bufmgr_data *bufmgr_data, tbm_native_display *native_display)
820 tbm_bufmgr_sprd bufmgr_sprd = (tbm_bufmgr_sprd)bufmgr_data;
821 TBM_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, TBM_ERROR_INVALID_PARAMETER);
823 if (!tbm_drm_helper_wl_auth_server_init(native_display, bufmgr_sprd->fd,
824 bufmgr_sprd->device_name, 0)) {
825 TBM_ERR("fail to tbm_drm_helper_wl_server_init\n");
826 return TBM_ERROR_OPERATION_FAILED;
829 bufmgr_sprd->bind_display = native_display;
831 return TBM_ERROR_NONE;
835 tbm_sprd_bufmgr_get_supported_formats(tbm_backend_bufmgr_data *bufmgr_data,
836 uint32_t **formats, uint32_t *num)
838 tbm_bufmgr_sprd bufmgr_sprd = (tbm_bufmgr_sprd)bufmgr_data;
839 uint32_t *color_formats;
841 TBM_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, TBM_ERROR_INVALID_PARAMETER);
843 color_formats = (uint32_t *)calloc(1, sizeof(uint32_t) * TBM_COLOR_FORMAT_COUNT);
844 if (color_formats == NULL)
845 return TBM_ERROR_OUT_OF_MEMORY;
847 memcpy(color_formats, tbm_sprd_color_format_list, sizeof(uint32_t)*TBM_COLOR_FORMAT_COUNT);
849 *formats = color_formats;
850 *num = TBM_COLOR_FORMAT_COUNT;
852 TBM_DBG("supported format count = %d\n", *num);
854 return TBM_ERROR_NONE;
858 tbm_sprd_bufmgr_get_plane_data(tbm_backend_bufmgr_data *bufmgr_data,
859 tbm_format format, int plane_idx, int width,
860 int height, uint32_t *size, uint32_t *offset,
861 uint32_t *pitch, int *bo_idx)
863 tbm_bufmgr_sprd bufmgr_sprd = (tbm_bufmgr_sprd)bufmgr_data;
869 int _align_height = 0;
871 TBM_RETURN_VAL_IF_FAIL(bufmgr_sprd != NULL, TBM_ERROR_INVALID_PARAMETER);
875 case TBM_FORMAT_XRGB4444:
876 case TBM_FORMAT_XBGR4444:
877 case TBM_FORMAT_RGBX4444:
878 case TBM_FORMAT_BGRX4444:
879 case TBM_FORMAT_ARGB4444:
880 case TBM_FORMAT_ABGR4444:
881 case TBM_FORMAT_RGBA4444:
882 case TBM_FORMAT_BGRA4444:
883 case TBM_FORMAT_XRGB1555:
884 case TBM_FORMAT_XBGR1555:
885 case TBM_FORMAT_RGBX5551:
886 case TBM_FORMAT_BGRX5551:
887 case TBM_FORMAT_ARGB1555:
888 case TBM_FORMAT_ABGR1555:
889 case TBM_FORMAT_RGBA5551:
890 case TBM_FORMAT_BGRA5551:
891 case TBM_FORMAT_RGB565:
894 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
895 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
899 case TBM_FORMAT_RGB888:
900 case TBM_FORMAT_BGR888:
903 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
904 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
908 case TBM_FORMAT_XRGB8888:
909 case TBM_FORMAT_XBGR8888:
910 case TBM_FORMAT_RGBX8888:
911 case TBM_FORMAT_BGRX8888:
912 case TBM_FORMAT_ARGB8888:
913 case TBM_FORMAT_ABGR8888:
914 case TBM_FORMAT_RGBA8888:
915 case TBM_FORMAT_BGRA8888:
918 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
919 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
924 case TBM_FORMAT_YUYV:
925 case TBM_FORMAT_YVYU:
926 case TBM_FORMAT_UYVY:
927 case TBM_FORMAT_VYUY:
928 case TBM_FORMAT_AYUV:
931 _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
932 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
938 * index 0 = Y plane, [7:0] Y
939 * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
941 * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
943 case TBM_FORMAT_NV12:
944 case TBM_FORMAT_NV21:
946 // if (plane_idx == 0)
949 _pitch = SIZE_ALIGN(width , TBM_SURFACE_ALIGNMENT_PITCH_YUV);
950 _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
951 _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
956 // else if (plane_idx == 1)
959 _pitch = SIZE_ALIGN(width , TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
960 _align_height = SIZE_ALIGN(height / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
961 _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
966 case TBM_FORMAT_NV16:
967 case TBM_FORMAT_NV61:
972 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
973 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
978 //else if( plane_idx ==1 )
981 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
982 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
989 * index 0: Y plane, [7:0] Y
990 * index 1: Cb plane, [7:0] Cb
991 * index 2: Cr plane, [7:0] Cr
993 * index 1: Cr plane, [7:0] Cr
994 * index 2: Cb plane, [7:0] Cb
997 NATIVE_BUFFER_FORMAT_YV12
998 NATIVE_BUFFER_FORMAT_I420
1000 case TBM_FORMAT_YUV410:
1001 case TBM_FORMAT_YVU410:
1004 case TBM_FORMAT_YUV411:
1005 case TBM_FORMAT_YVU411:
1006 case TBM_FORMAT_YUV420:
1007 case TBM_FORMAT_YVU420:
1009 //if(plane_idx == 0)
1012 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1013 _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1014 _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
1019 //else if( plane_idx == 1 )
1022 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1023 _align_height = SIZE_ALIGN(height / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1024 _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
1029 //else if (plane_idx == 2 )
1032 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1033 _align_height = SIZE_ALIGN(height / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1034 _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
1038 case TBM_FORMAT_YUV422:
1039 case TBM_FORMAT_YVU422:
1041 //if(plane_idx == 0)
1044 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1045 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1050 //else if( plane_idx == 1 )
1053 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1054 _size = SIZE_ALIGN(_pitch * (height), TBM_SURFACE_ALIGNMENT_PLANE);
1059 //else if (plane_idx == 2 )
1062 _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
1063 _size = SIZE_ALIGN(_pitch * (height), TBM_SURFACE_ALIGNMENT_PLANE);
1067 case TBM_FORMAT_YUV444:
1068 case TBM_FORMAT_YVU444:
1070 //if(plane_idx == 0)
1073 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1074 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1079 //else if( plane_idx == 1 )
1082 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1083 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1088 //else if (plane_idx == 2 )
1091 _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
1092 _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
1106 return TBM_ERROR_NONE;
1109 static tbm_backend_bo_data *
1110 tbm_sprd_bufmgr_alloc_bo(tbm_backend_bufmgr_data *bufmgr_data, int size, tbm_bo_memory_type flags, tbm_error_e *error)
1112 tbm_bufmgr_sprd bufmgr_sprd = (tbm_bufmgr_sprd)bufmgr_data;
1113 tbm_bo_sprd bo_sprd;
1114 unsigned int sprd_flags;
1116 if (bufmgr_sprd == NULL) {
1117 TBM_ERR("bufmgr_data is null\n");
1119 *error = TBM_ERROR_INVALID_PARAMETER;
1123 bo_sprd = calloc(1, sizeof(struct _tbm_bo_sprd));
1125 TBM_ERR("error fail to allocate the bo_sprd\n");
1127 *error = TBM_ERROR_OUT_OF_MEMORY;
1130 bo_sprd->bufmgr_sprd = bufmgr_sprd;
1132 #ifdef USE_CONTIG_ONLY
1133 flags = TBM_BO_SCANOUT;
1134 sprd_flags = SPRD_BO_CONTIG;
1136 sprd_flags = _get_sprd_flag_from_tbm(flags);
1137 if ((flags & TBM_BO_SCANOUT) && (size <= 4 * 1024))
1138 sprd_flags |= SPRD_BO_NONCONTIG;
1139 #endif // USE_CONTIG_ONLY
1141 struct drm_sprd_gem_create arg = {0, };
1143 arg.size = (uint64_t)size;
1144 arg.flags = sprd_flags;
1145 if (drmCommandWriteRead(bufmgr_sprd->fd, DRM_SPRD_GEM_CREATE, &arg,
1147 TBM_ERR("error Cannot create bo_sprd(flag:%x, size:%d)\n",
1148 arg.flags, (unsigned int)arg.size);
1151 *error = TBM_ERROR_OPERATION_FAILED;
1155 bo_sprd->fd = bufmgr_sprd->fd;
1156 bo_sprd->gem = arg.handle;
1157 bo_sprd->size = size;
1158 bo_sprd->flags_tbm = flags;
1159 bo_sprd->flags_sprd = sprd_flags;
1160 bo_sprd->name = _get_name(bo_sprd->fd, bo_sprd->gem);
1162 if (!_bo_init_cache_state(bufmgr_sprd, bo_sprd, 0)) {
1163 TBM_ERR("error fail init cache state(%d)\n", bo_sprd->name);
1166 *error = TBM_ERROR_OPERATION_FAILED;
1170 pthread_mutex_init(&bo_sprd->mutex, NULL);
1172 if (bufmgr_sprd->use_dma_fence && !bo_sprd->dmabuf) {
1173 struct drm_prime_handle arg = {0, };
1175 arg.handle = bo_sprd->gem;
1176 if (drmIoctl(bo_sprd->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
1177 TBM_ERR("error Cannot dmabuf=%d\n", bo_sprd->gem);
1180 *error = TBM_ERROR_OPERATION_FAILED;
1183 bo_sprd->dmabuf = arg.fd;
1186 /* add bo_sprd to hash */
1187 if (drmHashInsert(bufmgr_sprd->hashBos, bo_sprd->name, (void *)bo_sprd) < 0)
1188 TBM_ERR("Cannot insert bo_sprd to Hash(%d)\n", bo_sprd->name);
1190 TBM_DBG("%s size:%d, gem:%d(%d), flags:%d(%d)\n",
1191 __FUNCTION__, bo_sprd->size,
1192 bo_sprd->gem, bo_sprd->name,
1196 *error = TBM_ERROR_NONE;
1198 return (tbm_backend_bo_data *)bo_sprd;
1201 static tbm_backend_bo_data *
1202 tbm_sprd_bufmgr_import_fd(tbm_backend_bufmgr_data *bufmgr_data, tbm_fd key, tbm_error_e *error)
1204 tbm_bufmgr_sprd bufmgr_sprd = (tbm_bufmgr_sprd)bufmgr_data;
1205 tbm_bo_sprd bo_sprd;
1209 char buf[STRERR_BUFSIZE];
1211 if (bufmgr_sprd == NULL) {
1212 TBM_ERR("bufmgr_data is null\n");
1214 *error = TBM_ERROR_INVALID_PARAMETER;
1218 /*getting handle from fd*/
1219 struct drm_prime_handle arg = {0, };
1222 if (drmIoctl(bufmgr_sprd->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &arg)) {
1223 TBM_ERR("Cannot get gem handle from fd:%d (%s)\n",
1224 arg.fd, strerror_r(errno, buf, STRERR_BUFSIZE));
1226 *error = TBM_ERROR_OPERATION_FAILED;
1231 name = _get_name(bufmgr_sprd->fd, gem);
1233 TBM_ERR("Cannot get name from gem:%d, fd:%d (%s)\n",
1234 gem, key, strerror_r(errno, buf, STRERR_BUFSIZE));
1236 *error = TBM_ERROR_OPERATION_FAILED;
1240 ret = drmHashLookup(bufmgr_sprd->hashBos, name, (void **)&bo_sprd);
1242 if (gem == bo_sprd->gem) {
1244 *error = TBM_ERROR_NONE;
1249 /* Determine size of bo_sprd. The fd-to-handle ioctl really should
1250 * return the size, but it doesn't. If we have kernel 3.12 or
1251 * later, we can lseek on the prime fd to get the size. Older
1252 * kernels will just fail, in which case we fall back to the
1253 * provided (estimated or guess size).
1256 unsigned int real_size;
1257 struct drm_sprd_gem_info info = {0, };
1259 real_size = lseek(key, 0, SEEK_END);
1262 if (drmCommandWriteRead(bufmgr_sprd->fd,
1265 sizeof(struct drm_sprd_gem_info))) {
1266 TBM_ERR("Cannot get gem info from gem:%d, fd:%d (%s)\n",
1267 gem, key, strerror_r(errno, buf, STRERR_BUFSIZE));
1269 *error = TBM_ERROR_OPERATION_FAILED;
1273 if (real_size == -1)
1274 real_size = info.size;
1276 bo_sprd = calloc(1, sizeof(struct _tbm_bo_sprd));
1278 TBM_ERR("error bo_sprd:%p fail to allocate the bo_sprd\n", bo_sprd);
1280 *error = TBM_ERROR_OUT_OF_MEMORY;
1283 bo_sprd->bufmgr_sprd = bufmgr_sprd;
1285 bo_sprd->fd = bufmgr_sprd->fd;
1287 bo_sprd->size = real_size;
1288 bo_sprd->flags_sprd = info.flags;
1289 bo_sprd->flags_tbm = _get_tbm_flag_from_sprd(bo_sprd->flags_sprd);
1290 bo_sprd->name = name;
1292 if (!_bo_init_cache_state(bufmgr_sprd, bo_sprd, 1)) {
1293 TBM_ERR("error fail init cache state(%d)\n", bo_sprd->name);
1295 *error = TBM_ERROR_OPERATION_FAILED;
1296 goto fail_init_cache;
1299 /* add bo_sprd to hash */
1300 if (drmHashInsert(bufmgr_sprd->hashBos, bo_sprd->name, (void *)bo_sprd) < 0)
1301 TBM_ERR("bo_sprd:%p Cannot insert bo_sprd to Hash(%d) from gem:%d, fd:%d\n",
1302 bo_sprd, bo_sprd->name, gem, key);
1304 TBM_DBG("bo_sprd:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d(%d), size:%d\n",
1306 bo_sprd->gem, bo_sprd->name,
1309 bo_sprd->flags_tbm, bo_sprd->flags_sprd,
1313 *error = TBM_ERROR_NONE;
1315 return (tbm_backend_bo_data *)bo_sprd;
1322 static tbm_backend_bo_data *
1323 tbm_sprd_bufmgr_import_key(tbm_backend_bufmgr_data *bufmgr_data, tbm_key key, tbm_error_e *error)
1325 tbm_bufmgr_sprd bufmgr_sprd = (tbm_bufmgr_sprd)bufmgr_data;
1326 tbm_bo_sprd bo_sprd;
1329 if (bufmgr_sprd == NULL) {
1330 TBM_ERR("bufmgr_data is null\n");
1332 *error = TBM_ERROR_INVALID_PARAMETER;
1336 ret = drmHashLookup(bufmgr_sprd->hashBos, key, (void **)&bo_sprd);
1339 *error = TBM_ERROR_NONE;
1340 return (tbm_backend_bo_data *)bo_sprd;
1343 struct drm_gem_open arg = {0, };
1344 struct drm_sprd_gem_info info = {0, };
1347 if (drmIoctl(bufmgr_sprd->fd, DRM_IOCTL_GEM_OPEN, &arg)) {
1348 TBM_ERR("error Cannot open gem name=%d\n", key);
1350 *error = TBM_ERROR_OPERATION_FAILED;
1354 info.handle = arg.handle;
1355 if (drmCommandWriteRead(bufmgr_sprd->fd,
1358 sizeof(struct drm_sprd_gem_info))) {
1359 TBM_ERR("error Cannot get gem info=%d\n", key);
1361 *error = TBM_ERROR_OPERATION_FAILED;
1365 bo_sprd = calloc(1, sizeof(struct _tbm_bo_sprd));
1367 TBM_ERR("error fail to allocate the bo_sprd\n");
1369 *error = TBM_ERROR_OUT_OF_MEMORY;
1372 bo_sprd->bufmgr_sprd = bufmgr_sprd;
1374 bo_sprd->fd = bufmgr_sprd->fd;
1375 bo_sprd->gem = arg.handle;
1376 bo_sprd->size = arg.size;
1377 bo_sprd->flags_sprd = info.flags;
1378 bo_sprd->name = key;
1379 #ifdef USE_CONTIG_ONLY
1380 bo_sprd->flags_sprd = SPRD_BO_CONTIG;
1381 bo_sprd->flags_tbm |= TBM_BO_SCANOUT;
1383 bo_sprd->flags_tbm = _get_tbm_flag_from_sprd(bo_sprd->flags_sprd);
1386 if (!_bo_init_cache_state(bufmgr_sprd, bo_sprd, 1)) {
1387 TBM_ERR("error fail init cache state(%d)\n", bo_sprd->name);
1389 *error = TBM_ERROR_OPERATION_FAILED;
1390 goto fail_init_cache;
1393 if (!bo_sprd->dmabuf) {
1394 struct drm_prime_handle arg = {0, };
1396 arg.handle = bo_sprd->gem;
1397 if (drmIoctl(bo_sprd->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
1398 TBM_ERR("error Cannot dmabuf=%d\n", bo_sprd->gem);
1400 *error = TBM_ERROR_OPERATION_FAILED;
1401 goto fail_prime_handle_to_fd;
1403 bo_sprd->dmabuf = arg.fd;
1406 /* add bo_sprd to hash */
1407 if (drmHashInsert(bufmgr_sprd->hashBos, bo_sprd->name, (void *)bo_sprd) < 0)
1408 TBM_ERR("Cannot insert bo_sprd to Hash(%d)\n", bo_sprd->name);
1410 TBM_DBG(" bo_sprd:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d\n",
1412 bo_sprd->gem, bo_sprd->name,
1414 bo_sprd->flags_tbm, bo_sprd->flags_sprd,
1418 *error = TBM_ERROR_NONE;
1420 return (tbm_backend_bo_data *)bo_sprd;
1422 fail_prime_handle_to_fd:
1423 _bo_destroy_cache_state(bufmgr_sprd, bo_sprd);
1429 struct drm_gem_close gem_close_arg = {arg.handle, 0};
1430 drmIoctl(bufmgr_sprd->fd, DRM_IOCTL_GEM_CLOSE, &gem_close_arg);
1436 tbm_sprd_bo_free(tbm_backend_bo_data *bo_data)
1438 tbm_bo_sprd bo_sprd = (tbm_bo_sprd)bo_data;
1440 tbm_bufmgr_sprd bufmgr_sprd;
1441 char buf[STRERR_BUFSIZE];
1447 bufmgr_sprd = bo_sprd->bufmgr_sprd;
1451 TBM_DBG(" bo_sprd:%p, gem:%d(%d), fd:%d, size:%d\n",
1453 bo_sprd->gem, bo_sprd->name,
1457 if (bo_sprd->pBase) {
1458 if (munmap(bo_sprd->pBase, bo_sprd->size) == -1) {
1459 TBM_ERR("bo_sprd:%p fail to munmap(%s)\n",
1460 bo_sprd, strerror_r(errno, buf, STRERR_BUFSIZE));
1465 if (bo_sprd->dmabuf) {
1466 close(bo_sprd->dmabuf);
1467 bo_sprd->dmabuf = 0;
1470 /* delete bo from hash */
1471 ret = drmHashLookup(bufmgr_sprd->hashBos, bo_sprd->name,
1474 drmHashDelete(bufmgr_sprd->hashBos, bo_sprd->name);
1476 TBM_ERR("Cannot find bo_sprd to Hash(%d), ret=%d\n", bo_sprd->name, ret);
1478 if (temp != bo_sprd)
1479 TBM_ERR("hashBos probably has several BOs with same name!!!\n");
1481 _bo_destroy_cache_state(bufmgr_sprd, bo_sprd);
1483 /* Free gem handle */
1484 struct drm_gem_close arg = {0, };
1486 memset(&arg, 0, sizeof(arg));
1487 arg.handle = bo_sprd->gem;
1488 if (drmIoctl(bo_sprd->fd, DRM_IOCTL_GEM_CLOSE, &arg))
1489 TBM_ERR("bo_sprd:%p fail to gem close.(%s)\n",
1490 bo_sprd, strerror_r(errno, buf, STRERR_BUFSIZE));
1495 tbm_sprd_bo_get_size(tbm_backend_bo_data *bo_data, tbm_error_e *error)
1497 tbm_bo_sprd bo_sprd = (tbm_bo_sprd)bo_data;
1501 *error = TBM_ERROR_INVALID_PARAMETER;
1506 *error = TBM_ERROR_NONE;
1508 return bo_sprd->size;
1511 static tbm_bo_memory_type
1512 tbm_sprd_bo_get_memory_type(tbm_backend_bo_data *bo_data, tbm_error_e *error)
1514 tbm_bo_sprd bo_sprd = (tbm_bo_sprd)bo_data;
1518 *error = TBM_ERROR_INVALID_PARAMETER;
1519 return TBM_BO_DEFAULT;
1523 *error = TBM_ERROR_NONE;
1525 return bo_sprd->flags_tbm;
1528 static tbm_bo_handle
1529 tbm_sprd_bo_get_handle(tbm_backend_bo_data *bo_data, tbm_bo_device_type device, tbm_error_e *error)
1531 tbm_bo_sprd bo_sprd = (tbm_bo_sprd)bo_data;
1532 tbm_bo_handle bo_handle;
1536 *error = TBM_ERROR_INVALID_PARAMETER;
1537 return (tbm_bo_handle) NULL;
1540 if (!bo_sprd->gem) {
1541 TBM_ERR("Cannot map gem=%d\n", bo_sprd->gem);
1543 *error = TBM_ERROR_INVALID_PARAMETER;
1544 return (tbm_bo_handle) NULL;
1547 TBM_DBG("bo_sprd:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d, %s\n",
1549 bo_sprd->gem, bo_sprd->name,
1551 bo_sprd->flags_tbm, bo_sprd->flags_sprd,
1553 STR_DEVICE[device]);
1555 /*Get mapped bo_handle*/
1556 bo_handle = _sprd_bo_handle(bo_sprd, device);
1557 if (bo_handle.ptr == NULL) {
1558 TBM_ERR("Cannot get handle: gem:%d, device:%d\n",
1559 bo_sprd->gem, device);
1561 *error = TBM_ERROR_OPERATION_FAILED;
1562 return (tbm_bo_handle) NULL;
1566 *error = TBM_ERROR_NONE;
1571 static tbm_bo_handle
1572 tbm_sprd_bo_map(tbm_backend_bo_data *bo_data, tbm_bo_device_type device,
1573 tbm_bo_access_option opt, tbm_error_e *error)
1575 tbm_bo_sprd bo_sprd = (tbm_bo_sprd)bo_data;
1576 tbm_bo_handle bo_handle;
1577 tbm_bufmgr_sprd bufmgr_sprd;
1581 *error = TBM_ERROR_INVALID_PARAMETER;
1582 return (tbm_bo_handle) NULL;
1585 bufmgr_sprd = bo_sprd->bufmgr_sprd;
1588 *error = TBM_ERROR_INVALID_PARAMETER;
1589 return (tbm_bo_handle) NULL;
1592 if (!bo_sprd->gem) {
1593 TBM_ERR("Cannot map gem=%d\n", bo_sprd->gem);
1595 *error = TBM_ERROR_INVALID_PARAMETER;
1596 return (tbm_bo_handle) NULL;
1599 TBM_DBG(" bo_sprd:%p, gem:%d(%d), fd:%d, %s, %s\n",
1601 bo_sprd->gem, bo_sprd->name,
1606 /*Get mapped bo_handle*/
1607 bo_handle = _sprd_bo_handle(bo_sprd, device);
1608 if (bo_handle.ptr == NULL) {
1609 TBM_ERR("Cannot get handle: gem:%d, device:%d, opt:%d\n",
1610 bo_sprd->gem, device, opt);
1612 *error = TBM_ERROR_OPERATION_FAILED;
1613 return (tbm_bo_handle) NULL;
1616 if (bo_sprd->map_cnt == 0)
1617 _bo_set_cache_state(bufmgr_sprd, bo_sprd, device, opt);
1622 *error = TBM_ERROR_NONE;
1628 tbm_sprd_bo_unmap(tbm_backend_bo_data *bo_data)
1630 tbm_bo_sprd bo_sprd = (tbm_bo_sprd)bo_data;
1631 tbm_bufmgr_sprd bufmgr_sprd;
1634 return TBM_ERROR_INVALID_PARAMETER;
1636 bufmgr_sprd = bo_sprd->bufmgr_sprd;
1638 return TBM_ERROR_INVALID_PARAMETER;
1641 return TBM_ERROR_INVALID_PARAMETER;
1645 if (bo_sprd->map_cnt == 0)
1646 _bo_save_cache_state(bufmgr_sprd, bo_sprd);
1648 TBM_DBG(" bo_sprd:%p, gem:%d(%d), fd:%d\n",
1650 bo_sprd->gem, bo_sprd->name,
1653 return TBM_ERROR_NONE;
1657 tbm_sprd_bo_export_fd(tbm_backend_bo_data *bo_data, tbm_error_e *error)
1659 tbm_bo_sprd bo_sprd = (tbm_bo_sprd)bo_data;
1660 struct drm_prime_handle arg = {0, };
1662 char buf[STRERR_BUFSIZE];
1666 *error = TBM_ERROR_INVALID_PARAMETER;
1670 arg.handle = bo_sprd->gem;
1671 ret = drmIoctl(bo_sprd->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg);
1673 TBM_ERR("bo_sprd:%p Cannot dmabuf=%d (%s)\n",
1674 bo_sprd, bo_sprd->gem, strerror_r(errno, buf, STRERR_BUFSIZE));
1676 *error = TBM_ERROR_OPERATION_FAILED;
1677 return (tbm_fd) ret;
1680 TBM_DBG("bo_sprd:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d(%d), size:%d\n",
1682 bo_sprd->gem, bo_sprd->name,
1685 bo_sprd->flags_tbm, bo_sprd->flags_sprd,
1689 *error = TBM_ERROR_NONE;
1691 return (tbm_fd)arg.fd;
1695 tbm_sprd_bo_export_key(tbm_backend_bo_data *bo_data, tbm_error_e *error)
1697 tbm_bo_sprd bo_sprd = (tbm_bo_sprd)bo_data;
1701 *error = TBM_ERROR_INVALID_PARAMETER;
1705 if (!bo_sprd->name) {
1706 bo_sprd->name = _get_name(bo_sprd->fd, bo_sprd->gem);
1707 if (!bo_sprd->name) {
1708 TBM_ERR("error Cannot get name\n");
1710 *error = TBM_ERROR_INVALID_PARAMETER;
1715 TBM_DBG(" bo_sprd:%p, gem:%d(%d), fd:%d, flags:%d(%d), size:%d\n",
1717 bo_sprd->gem, bo_sprd->name,
1719 bo_sprd->flags_tbm, bo_sprd->flags_sprd,
1723 *error = TBM_ERROR_NONE;
1725 return (tbm_key)bo_sprd->name;
1729 tbm_sprd_deinit(tbm_backend_bufmgr_data *bufmgr_data)
1731 tbm_bufmgr_sprd bufmgr_sprd = (tbm_bufmgr_sprd)bufmgr_data;
1737 TBM_RETURN_IF_FAIL(bufmgr_sprd != NULL);
1739 bufmgr = bufmgr_sprd->bufmgr;
1741 tbm_backend_bufmgr_free_bufmgr_func(bufmgr, bufmgr_sprd->bufmgr_func);
1742 tbm_backend_bufmgr_free_bo_func(bufmgr, bufmgr_sprd->bo_func);
1744 if (bufmgr_sprd->hashBos) {
1745 while (drmHashFirst(bufmgr_sprd->hashBos, &key, &value) > 0) {
1747 drmHashDelete(bufmgr_sprd->hashBos, key);
1750 drmHashDestroy(bufmgr_sprd->hashBos);
1751 bufmgr_sprd->hashBos = NULL;
1754 if (bufmgr_sprd->bind_display)
1755 tbm_drm_helper_wl_auth_server_deinit();
1757 if (tbm_backend_bufmgr_query_display_server(bufmgr, &error))
1758 tbm_drm_helper_unset_tbm_master_fd();
1760 tbm_drm_helper_unset_fd();
1762 if (bufmgr_sprd->device_name)
1763 free(bufmgr_sprd->device_name);
1765 _bufmgr_deinit_cache_state(bufmgr_sprd);
1767 close(bufmgr_sprd->fd);
1772 static tbm_backend_bufmgr_data *
1773 tbm_sprd_init(tbm_bufmgr bufmgr, tbm_error_e *error)
1775 tbm_bufmgr_sprd bufmgr_sprd = NULL;
1776 tbm_backend_bufmgr_func *bufmgr_func = NULL;
1777 tbm_backend_bo_func *bo_func = NULL;
1782 TBM_ERR("bufmgr is null.\n");
1784 *error = TBM_ERROR_INVALID_PARAMETER;
1788 bufmgr_sprd = calloc(1, sizeof(struct _tbm_bufmgr_sprd));
1790 TBM_ERR("fail to alloc bufmgr_sprd!\n");
1792 *error = TBM_ERROR_OUT_OF_MEMORY;
1796 if (tbm_backend_bufmgr_query_display_server(bufmgr, &err)) {
1797 bufmgr_sprd->fd = tbm_drm_helper_get_master_fd();
1798 if (bufmgr_sprd->fd < 0) {
1799 bufmgr_sprd->fd = _tbm_sprd_open_drm();
1800 if (bufmgr_sprd->fd < 0) {
1801 TBM_ERR("fail to open drm!\n");
1803 *error = TBM_ERROR_OPERATION_FAILED;
1808 tbm_drm_helper_set_tbm_master_fd(bufmgr_sprd->fd);
1810 bufmgr_sprd->device_name = drmGetDeviceNameFromFd(bufmgr_sprd->fd);
1811 if (!bufmgr_sprd->device_name) {
1812 TBM_ERR("fail to get device name!\n");
1813 tbm_drm_helper_unset_tbm_master_fd();
1815 *error = TBM_ERROR_OPERATION_FAILED;
1816 goto fail_get_device_name;
1819 if (!tbm_drm_helper_get_auth_info(&(bufmgr_sprd->fd), &(bufmgr_sprd->device_name), NULL)) {
1820 TBM_ERR("fail to get auth drm info!\n");
1822 *error = TBM_ERROR_OPERATION_FAILED;
1823 goto fail_get_auth_info;
1827 tbm_drm_helper_set_fd(bufmgr_sprd->fd);
1829 //Check if the tbm manager supports dma fence or not.
1830 fp = open("/sys/module/dmabuf_sync/parameters/enabled", O_RDONLY);
1833 int length = read(fp, buf, 1);
1835 if (length == 1 && buf[0] == '1')
1836 bufmgr_sprd->use_dma_fence = 1;
1841 if (!_bufmgr_init_cache_state(bufmgr_sprd)) {
1842 TBM_ERR("fail to init bufmgr cache state\n");
1844 *error = TBM_ERROR_OPERATION_FAILED;
1845 goto fail_init_cache_state;
1848 /*Create Hash Table*/
1849 bufmgr_sprd->hashBos = drmHashCreate();
1851 /* alloc and register bufmgr_funcs */
1852 bufmgr_func = tbm_backend_bufmgr_alloc_bufmgr_func(bufmgr, &err);
1854 TBM_ERR("fail to alloc bufmgr_func! err(%d)\n", err);
1856 *error = TBM_ERROR_OUT_OF_MEMORY;
1857 goto fail_alloc_bufmgr_func;
1860 bufmgr_func->bufmgr_get_capabilities = tbm_sprd_bufmgr_get_capabilities;
1861 bufmgr_func->bufmgr_bind_native_display = tbm_sprd_bufmgr_bind_native_display;
1862 bufmgr_func->bufmgr_get_supported_formats = tbm_sprd_bufmgr_get_supported_formats;
1863 bufmgr_func->bufmgr_get_plane_data = tbm_sprd_bufmgr_get_plane_data;
1864 bufmgr_func->bufmgr_alloc_bo = tbm_sprd_bufmgr_alloc_bo;
1865 bufmgr_func->bufmgr_alloc_bo_with_format = NULL;
1866 bufmgr_func->bufmgr_import_fd = tbm_sprd_bufmgr_import_fd;
1867 bufmgr_func->bufmgr_import_key = tbm_sprd_bufmgr_import_key;
1869 err = tbm_backend_bufmgr_register_bufmgr_func(bufmgr, bufmgr_func);
1870 if (err != TBM_ERROR_NONE) {
1871 TBM_ERR("fail to register bufmgr_func! err(%d)\n", err);
1873 *error = TBM_ERROR_OPERATION_FAILED;
1874 goto fail_register_bufmgr_func;
1876 bufmgr_sprd->bufmgr_func = bufmgr_func;
1878 /* alloc and register bo_funcs */
1879 bo_func = tbm_backend_bufmgr_alloc_bo_func(bufmgr, &err);
1881 TBM_ERR("fail to alloc bo_func! err(%d)\n", err);
1883 *error = TBM_ERROR_OUT_OF_MEMORY;
1884 goto fail_alloc_bo_func;
1887 bo_func->bo_free = tbm_sprd_bo_free;
1888 bo_func->bo_get_size = tbm_sprd_bo_get_size;
1889 bo_func->bo_get_memory_types = tbm_sprd_bo_get_memory_type;
1890 bo_func->bo_get_handle = tbm_sprd_bo_get_handle;
1891 bo_func->bo_map = tbm_sprd_bo_map;
1892 bo_func->bo_unmap = tbm_sprd_bo_unmap;
1893 bo_func->bo_lock = NULL;
1894 bo_func->bo_unlock = NULL;
1895 bo_func->bo_export_fd = tbm_sprd_bo_export_fd;
1896 bo_func->bo_export_key = tbm_sprd_bo_export_key;
1898 err = tbm_backend_bufmgr_register_bo_func(bufmgr, bo_func);
1899 if (err != TBM_ERROR_NONE) {
1900 TBM_ERR("fail to register bo_func! err(%d)\n", err);
1902 *error = TBM_ERROR_OPERATION_FAILED;
1903 goto fail_register_bo_func;
1905 bufmgr_sprd->bo_func = bo_func;
1907 TBM_DBG("DMABUF FENCE is %s\n",
1908 bufmgr_sprd->use_dma_fence ? "supported!" : "NOT supported!");
1909 TBM_DBG("fd:%d\n", bufmgr_sprd->fd);
1912 *error = TBM_ERROR_NONE;
1914 bufmgr_sprd->bufmgr = bufmgr;
1916 return (tbm_backend_bufmgr_data *)bufmgr_sprd;
1918 fail_register_bo_func:
1919 tbm_backend_bufmgr_free_bo_func(bufmgr, bo_func);
1921 fail_register_bufmgr_func:
1922 tbm_backend_bufmgr_free_bufmgr_func(bufmgr, bufmgr_func);
1923 fail_alloc_bufmgr_func:
1924 _bufmgr_deinit_cache_state(bufmgr_sprd);
1925 if (bufmgr_sprd->hashBos)
1926 drmHashDestroy(bufmgr_sprd->hashBos);
1927 fail_init_cache_state:
1928 if (tbm_backend_bufmgr_query_display_server(bufmgr, &err))
1929 tbm_drm_helper_unset_tbm_master_fd();
1930 tbm_drm_helper_unset_fd();
1931 if (bufmgr_sprd->device_name)
1932 free(bufmgr_sprd->device_name);
1933 fail_get_device_name:
1934 close(bufmgr_sprd->fd);
1941 tbm_backend_module tbm_backend_module_data = {
1944 TBM_BACKEND_ABI_VERSION_2_0,