+ TBM_FORMAT_BGRA8888,
+ TBM_FORMAT_RGBX8888,
+ TBM_FORMAT_RGB888,
+ TBM_FORMAT_NV12,
+ TBM_FORMAT_NV21,
+ TBM_FORMAT_YUV420,
+ TBM_FORMAT_YVU420
+ };
+
+static inline int
+_tgl_init(int fd, unsigned int key)
+{
+ struct tgl_attribute attr;
+ int err;
+
+ attr.key = key;
+ attr.timeout_ms = 1000;
+
+ err = ioctl(fd, TGL_IOC_INIT_LOCK, &attr);
+ if (err) {
+ TBM_SPRD_LOG("[libtbm-sprd:%d] error(%s) %s:%d key:%d\n",
+ getpid(), strerror(errno), __func__, __LINE__, key);
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline int
+_tgl_destroy(int fd, unsigned int key)
+{
+ int err;
+
+ err = ioctl(fd, TGL_IOC_DESTROY_LOCK, key);
+ if (err) {
+ TBM_SPRD_LOG("[libtbm-sprd:%d] "
+ "error(%s) %s:%d key:%d\n",
+ getpid(), strerror(errno), __func__, __LINE__, key);
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline int
+_tgl_lock(int fd, unsigned int key)
+{
+ int err;
+
+ err = ioctl(fd, TGL_IOC_LOCK_LOCK, key);
+ if (err) {
+ TBM_SPRD_LOG("[libtbm-sprd:%d] "
+ "error(%s) %s:%d key:%d\n",
+ getpid(), strerror(errno), __func__, __LINE__, key);
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline int
+_tgl_unlock(int fd, unsigned int key)
+{
+ int err;
+
+ err = ioctl(fd, TGL_IOC_UNLOCK_LOCK, key);
+ if (err) {
+ TBM_SPRD_LOG("[libtbm-sprd:%d] "
+ "error(%s) %s:%d key:%d\n",
+ getpid(), strerror(errno), __func__, __LINE__, key);
+ return 0;
+ }
+
+ return 1;
+}
+
+#ifdef USE_CACHE
+static inline int
+_tgl_set_data(int fd, unsigned int key, unsigned int val)
+{
+ int err;
+ struct tgl_user_data arg;
+
+ arg.key = key;
+ arg.data1 = val;
+ err = ioctl(fd, TGL_IOC_SET_DATA, &arg);
+ if (err) {
+ TBM_SPRD_LOG("[libtbm-sprd:%d] "
+ "error(%s) %s:%d key:%d\n",
+ getpid(), strerror(errno), __func__, __LINE__, key);
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline unsigned int
+_tgl_get_data(int fd, unsigned int key, unsigned int *locked)
+{
+ int err;
+ struct tgl_user_data arg = { 0, };
+
+ arg.key = key;
+ err = ioctl(fd, TGL_IOC_GET_DATA, &arg);
+ if (err) {
+ TBM_SPRD_LOG("[libtbm-sprd:%d] "
+ "error(%s) %s:%d key:%d\n",
+ getpid(), strerror(errno), __func__, __LINE__, key);
+ return 0;
+ }
+
+ if (locked)
+ *locked = arg.locked;
+
+ return arg.data1;
+}
+#endif
+
+static int
+_tbm_sprd_open_drm()
+{
+ int fd = -1;
+
+ fd = drmOpen(SPRD_DRM_NAME, NULL);
+ if (fd < 0) {
+ TBM_SPRD_LOG ("[libtbm-sprd:%d] "
+ "warning %s:%d fail to open drm\n",
+ getpid(), __FUNCTION__, __LINE__);
+ }
+
+#ifdef HAVE_UDEV
+ if (fd < 0) {
+ struct udev *udev = NULL;
+ struct udev_enumerate *e = NULL;
+ struct udev_list_entry *entry = NULL;
+ struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
+ const char *filepath;
+ struct stat s;
+ int fd = -1;
+ int ret;
+
+ TBM_SPRD_LOG ("[libtbm-sprd:%d] "
+ "%s:%d search drm-device by udev\n",
+ getpid(), __FUNCTION__, __LINE__);
+
+ udev = udev_new();
+ if (!udev) {
+ TBM_SPRD_LOG("udev_new() failed.\n");
+ return -1;
+ }
+
+ e = udev_enumerate_new(udev);
+ udev_enumerate_add_match_subsystem(e, "drm");
+ udev_enumerate_add_match_sysname(e, "card[0-9]*");
+ udev_enumerate_scan_devices(e);
+
+ udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
+ device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
+ udev_list_entry_get_name(entry));
+ device_parent = udev_device_get_parent(device);
+ /* Not need unref device_parent. device_parent and device have same refcnt */
+ if (device_parent) {
+ if (strcmp(udev_device_get_sysname(device_parent), "sprd-drm") == 0) {
+ drm_device = device;
+ DBG("[%s] Found render device: '%s' (%s)\n",
+ target_name(),
+ udev_device_get_syspath(drm_device),
+ udev_device_get_sysname(device_parent));
+ break;
+ }
+ }
+ udev_device_unref(device);
+ }
+
+ udev_enumerate_unref(e);
+
+ /* Get device file path. */
+ filepath = udev_device_get_devnode(drm_device);
+ if (!filepath) {
+ TBM_SPRD_LOG("udev_device_get_devnode() failed.\n");
+ udev_device_unref(drm_device);
+ udev_unref(udev);
+ return -1;
+ }
+
+ /* Open DRM device file and check validity. */
+ fd = open(filepath, O_RDWR | O_CLOEXEC);
+ if (fd < 0) {
+ TBM_SPRD_LOG("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
+ udev_device_unref(drm_device);
+ udev_unref(udev);
+ return -1;
+ }
+
+ ret = fstat(fd, &s);
+ if (ret) {
+ TBM_SPRD_LOG("fstat() failed %s.\n");
+ udev_device_unref(drm_device);
+ udev_unref(udev);
+ return -1;
+ }
+
+ udev_device_unref(drm_device);
+ udev_unref(udev);
+ }
+#endif
+
+ return fd;
+}
+
+#ifdef USE_CACHE
+static int
+_sprd_bo_cache_flush (tbm_bufmgr_sprd bufmgr_sprd, tbm_bo_sprd bo_sprd, int flags)
+{
+ SPRD_RETURN_VAL_IF_FAIL (bufmgr_sprd != NULL, 0);
+ SPRD_RETURN_VAL_IF_FAIL (bo_sprd != NULL, 0);
+
+ /* cache flush is managed by kernel side when using dma-fence. */
+ if (bufmgr_sprd->use_dma_fence)
+ return 1;
+
+ struct drm_sprd_gem_cache_op cache_op = {0, };
+ int ret;
+
+ /* if bo_sprd is null, do cache_flush_all */
+ if (bo_sprd) {
+ cache_op.flags = 0;
+ cache_op.usr_addr = (uint64_t)((uint32_t)bo_sprd->pBase);
+ cache_op.size = bo_sprd->size;
+ } else {
+ flags = TBM_SPRD_CACHE_FLUSH_ALL;
+ cache_op.flags = 0;
+ cache_op.usr_addr = 0;
+ cache_op.size = 0;
+ }
+
+ if (flags & TBM_SPRD_CACHE_INV) {
+ if (flags & TBM_SPRD_CACHE_ALL)
+ cache_op.flags |= SPRD_DRM_CACHE_INV_ALL;
+ else
+ cache_op.flags |= SPRD_DRM_CACHE_INV_RANGE;
+ }
+
+ if (flags & TBM_SPRD_CACHE_CLN) {
+ if (flags & TBM_SPRD_CACHE_ALL)
+ cache_op.flags |= SPRD_DRM_CACHE_CLN_ALL;
+ else
+ cache_op.flags |= SPRD_DRM_CACHE_CLN_RANGE;
+ }
+
+ if (flags & TBM_SPRD_CACHE_ALL)
+ cache_op.flags |= SPRD_DRM_ALL_CACHES_CORES;
+
+ ret = drmCommandWriteRead (bufmgr_sprd->fd, DRM_SPRD_GEM_CACHE_OP, &cache_op,
+ sizeof(cache_op));
+ if (ret) {
+ TBM_SPRD_LOG ("[libtbm-sprd:%d] "
+ "error %s:%d fail to flush the cache.\n",
+ getpid(), __FUNCTION__, __LINE__);
+ return 0;
+ }
+
+ return 1;
+}
+#endif
+
+static int
+_bo_init_cache_state(tbm_bufmgr_sprd bufmgr_sprd, tbm_bo_sprd bo_sprd, int import)
+{
+ SPRD_RETURN_VAL_IF_FAIL (bo_sprd != NULL, 0);
+ SPRD_RETURN_VAL_IF_FAIL (bufmgr_sprd != NULL, 0);
+
+ if (bufmgr_sprd->use_dma_fence)
+ return 1;
+
+ _tgl_init(bufmgr_sprd->tgl_fd, bo_sprd->name);
+
+#ifdef USE_CACHE
+ tbm_bo_cache_state cache_state;
+
+ if (import == 0) {
+ cache_state.data.isDirtied = DEVICE_NONE;
+ cache_state.data.isCached = 0;
+ cache_state.data.cntFlush = 0;
+
+ _tgl_set_data(bufmgr_sprd->tgl_fd, bo_sprd->name, cache_state.val);
+ }
+#endif
+
+ return 1;
+}
+
+static int
+_bo_set_cache_state(tbm_bufmgr_sprd bufmgr_sprd, tbm_bo_sprd bo_sprd, int device, int opt)
+{
+#ifdef USE_CACHE
+ SPRD_RETURN_VAL_IF_FAIL (bo_sprd != NULL, 0);
+ SPRD_RETURN_VAL_IF_FAIL (bufmgr_sprd != NULL, 0);
+
+ char need_flush = 0;
+ unsigned short cntFlush = 0;
+
+ if (bufmgr_sprd->use_dma_fence)
+ return 1;
+
+ if (bo_sprd->flags_sprd & SPRD_BO_NONCACHABLE)
+ return 1;
+
+ /* get cache state of a bo */
+ bo_sprd->cache_state.val = _tgl_get_data(bufmgr_sprd->tgl_fd, bo_sprd->name, NULL);
+
+ /* get global cache flush count */
+ cntFlush = (unsigned short)_tgl_get_data(bufmgr_sprd->tgl_fd, GLOBAL_KEY, NULL);
+
+ if (opt == TBM_DEVICE_CPU) {
+ if (bo_sprd->cache_state.data.isDirtied == DEVICE_CO &&
+ bo_sprd->cache_state.data.isCached)
+ need_flush = TBM_SPRD_CACHE_INV;
+
+ bo_sprd->cache_state.data.isCached = 1;
+ if (opt & TBM_OPTION_WRITE)
+ bo_sprd->cache_state.data.isDirtied = DEVICE_CA;
+ else {
+ if (bo_sprd->cache_state.data.isDirtied != DEVICE_CA)
+ bo_sprd->cache_state.data.isDirtied = DEVICE_NONE;
+ }
+ } else {
+ if (bo_sprd->cache_state.data.isDirtied == DEVICE_CA &&
+ bo_sprd->cache_state.data.isCached &&
+ bo_sprd->cache_state.data.cntFlush == cntFlush)
+ need_flush = TBM_SPRD_CACHE_CLN | TBM_SPRD_CACHE_ALL;
+
+ if (opt & TBM_OPTION_WRITE)
+ bo_sprd->cache_state.data.isDirtied = DEVICE_CO;
+ else {
+ if (bo_sprd->cache_state.data.isDirtied != DEVICE_CO)
+ bo_sprd->cache_state.data.isDirtied = DEVICE_NONE;
+ }
+ }
+
+ if (need_flush) {
+ if (need_flush & TBM_SPRD_CACHE_ALL)
+ _tgl_set_data(bufmgr_sprd->tgl_fd, GLOBAL_KEY, (unsigned int)(++cntFlush));
+
+ /* call cache flush */
+ _sprd_bo_cache_flush (bufmgr_sprd, bo_sprd, need_flush);
+
+ DBG("[libtbm:%d] \tcache(%d,%d)....flush:0x%x, cntFlush(%d)\n",
+ getpid(),
+ bo_sprd->cache_state.data.isCached,
+ bo_sprd->cache_state.data.isDirtied,
+ need_flush,
+ cntFlush);
+ }
+#endif
+
+ return 1;
+}
+
+static int
+_bo_save_cache_state(tbm_bufmgr_sprd bufmgr_sprd, tbm_bo_sprd bo_sprd)
+{
+#ifdef USE_CACHE
+ SPRD_RETURN_VAL_IF_FAIL (bo_sprd != NULL, 0);
+ SPRD_RETURN_VAL_IF_FAIL (bufmgr_sprd != NULL, 0);
+
+ if (bufmgr_sprd->use_dma_fence)
+ return 1;
+
+ unsigned short cntFlush = 0;