# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
AC_PREREQ(2.60)
-AC_INIT(libtbm-vc4, 0.1.0)
+AC_INIT(hal-backend-tbm-vc4, 0.1.0)
AC_USE_SYSTEM_EXTENSIONS
AC_CONFIG_SRCDIR([Makefile.am])
AM_INIT_AUTOMAKE([dist-bzip2])
PKG_CHECK_MODULES(LIBDRM, libdrm)
PKG_CHECK_MODULES(LIBDRM_VC4, libdrm_vc4)
-PKG_CHECK_MODULES(LIBTBM, libtbm)
PKG_CHECK_MODULES(HAL_API_COMMON, hal-api-common)
PKG_CHECK_MODULES(HAL_API_TBM, hal-api-tbm)
PKG_CHECK_MODULES(DLOG, dlog)
AC_DEFINE(ALWAYS_BACKEND_CTRL, 1, [Enable always backend ctrl])
fi
-# for libtbm-vc4
-LIBTBM_VC4_CFLAGS="$LIBDRM_CFLAGS $LIBTBM_CFLAGS $DLOG_CFLAGS $LIBUDEV_CFLAGS $LIBDRM_VC4_CFLAGS"
-LIBTBM_VC4_LIBS="$LIBDRM_LIBS $LIBTBM_LIBS $DLOG_LIBS $LIBUDEV_LIBS $LIBDRM_VC4_LIBS"
-AC_SUBST(LIBTBM_VC4_CFLAGS)
-AC_SUBST(LIBTBM_VC4_LIBS)
-
-bufmgr_dir=${libdir#*/}
-AC_SUBST(bufmgr_dir)
-
-# for libhal-backend-tbm-vc4
LIBHAL_BACKEND_TBM_VC4_CFLAGS="$HAL_API_COMMON_CFLAGS $HAL_API_TBM_CFLAGS $LIBDRM_CFLAGS $LIBDRM_VC4_CFLAGS $DLOG_CFLAGS $LIBUDEV_CFLAGS"
LIBHAL_BACKEND_TBM_VC4_LIBS="$HAL_API_COMMON_LIBS $HAL_API_TBM_LIBS $LIBDRM_LIBS $LIBDRM_VC4_LIBS $DLOG_LIBS $LIBUDEV_LIBS"
+
AC_SUBST(LIBHAL_BACKEND_TBM_VC4_CFLAGS)
AC_SUBST(LIBHAL_BACKEND_TBM_VC4_LIBS)
AC_OUTPUT([
Makefile
- src/libtbm-vc4/Makefile
- src/libhal-backend-tbm-vc4/Makefile
src/Makefile])
echo ""
echo "CFLAGS : $CFLAGS"
echo "LDFLAGS : $LDFLAGS"
-echo "LIBTBM_VC4_CFLAGS : $LIBTBM_VC4_CFLAGS"
-echo "LIBTBM_VC4_LIBS : $LIBTBM_VC4_LIBS"
-echo "bufmgr_dir : $bufmgr_dir"
echo "LIBHAL_BACKEND_TBM_VC4_CFLAGS : $LIBHAL_BACKEND_TBM_VC4_CFLAGS"
echo "LIBHAL_BACKEND_TBM_VC4_LIBS : $LIBHAL_BACKEND_TBM_VC4_LIBS"
-echo "hal-libdir : $HAL_LIBDIR"
+echo "HAL_LIBDIR : $HAL_LIBDIR"
echo ""
--- /dev/null
+<manifest>
+ <request>
+ <domain name="_"/>
+ </request>
+</manifest>
--- /dev/null
+Name: hal-backend-tbm-vc4
+Version: 3.0.5
+Release: 1
+License: MIT
+Summary: hal-backend-tbm module for vc4
+Group: System/Libraries
+Source0: %{name}-%{version}.tar.gz
+Source1001: %{name}.manifest
+
+BuildRequires: pkgconfig(libdrm)
+BuildRequires: pkgconfig(libdrm_vc4)
+BuildRequires: pkgconfig(hal-api-common)
+BuildRequires: pkgconfig(hal-api-tbm)
+BuildRequires: pkgconfig(dlog)
+BuildRequires: pkgconfig(libudev)
+ExclusiveArch: %{arm} aarch64
+
+%description
+description: hal tbm backend module for vc4
+
+%prep
+%setup -q
+cp %{SOURCE1001} .
+
+%build
+
+%reconfigure --prefix=%{_prefix} --libdir=%{_libdir} \
+ --with-hal-libdir=%{_hal_libdir} \
+ --disable-align-eight \
+ --disable-cachectrl \
+ CFLAGS="${CFLAGS} -Wall -Werror" LDFLAGS="${LDFLAGS} -Wl,--hash-style=both -Wl,--as-needed"
+
+make %{?_smp_mflags}
+
+%install
+rm -rf %{buildroot}
+%make_install
+
+# make rule and license files
+mkdir -p %{buildroot}%{_hal_libdir}/udev/rules.d/
+cp -af rules/99-libhal-backend-tbm-vc4.rules %{buildroot}%{_hal_libdir}/udev/rules.d/
+mkdir -p %{buildroot}%{_hal_licensedir}/libhal-backend-tbm-vc4
+cp -af COPYING %{buildroot}%{_hal_licensedir}/libhal-backend-tbm-vc4
+
+%post
+if [ -f %{_hal_libdir}/libhal-backend-tbm.so ]; then
+ rm -rf %{_hal_libdir}/libhal-backend-tbm.so
+fi
+ln -s libhal-backend-tbm-vc4.so %{_hal_libdir}/libhal-backend-tbm.so
+
+%postun -p /sbin/ldconfig
+
+%files
+%manifest %{name}.manifest
+%{_hal_licensedir}/libhal-backend-tbm-vc4/COPYING
+%{_hal_libdir}/libhal-backend-*.so*
+%{_hal_libdir}/udev/rules.d/99-libhal-backend-tbm-vc4.rules
+++ /dev/null
-<manifest>
- <request>
- <domain name="_"/>
- </request>
-</manifest>
+++ /dev/null
-<manifest>
- <request>
- <domain name="_"/>
- </request>
-</manifest>
+++ /dev/null
-Name: libtbm-vc4
-Version: 3.0.5
-Release: 1
-License: MIT
-Summary: Tizen Buffer Manager - vc4 backend
-Group: System/Libraries
-Source0: %{name}-%{version}.tar.gz
-Source1001: %{name}.manifest
-Source1002: libhal-backend-tbm-vc4.manifest
-
-BuildRequires: pkgconfig(libdrm)
-BuildRequires: pkgconfig(libdrm_vc4)
-BuildRequires: pkgconfig(libtbm)
-BuildRequires: pkgconfig(hal-api-common)
-BuildRequires: pkgconfig(hal-api-tbm)
-BuildRequires: pkgconfig(dlog)
-BuildRequires: pkgconfig(libudev)
-ExclusiveArch: %{arm} aarch64
-
-%description
-descriptionion: Tizen Buffer manager backend module for vc4
-
-%package -n hal-backend-tbm-vc4
-Summary: hal-backend-tbm module for vc4
-Group: System/Libraries
-Requires: hal-api-tbm
-Requires: hal-api-common
-
-%description -n hal-backend-tbm-vc4
-descriptionion: hal tbm backend module for vc4
-
-%prep
-%setup -q
-cp %{SOURCE1001} .
-cp %{SOURCE1002} .
-
-%build
-
-%reconfigure --prefix=%{_prefix} --libdir=%{_libdir}/bufmgr \
- --with-hal-libdir=%{_hal_libdir} \
- --disable-align-eight \
- --disable-cachectrl \
- CFLAGS="${CFLAGS} -Wall -Werror" LDFLAGS="${LDFLAGS} -Wl,--hash-style=both -Wl,--as-needed"
-
-make %{?_smp_mflags}
-
-%install
-rm -rf %{buildroot}
-%make_install
-
-# make rule for tgl
-mkdir -p %{buildroot}%{_libdir}/udev/rules.d/
-cp -af rules/99-libtbm-vc4.rules %{buildroot}%{_libdir}/udev/rules.d/
-
-# make rule and license files
-mkdir -p %{buildroot}%{_hal_libdir}/udev/rules.d/
-cp -af rules/99-libhal-backend-tbm-vc4.rules %{buildroot}%{_hal_libdir}/udev/rules.d/
-mkdir -p %{buildroot}%{_hal_licensedir}/libhal-backend-tbm-vc4
-cp -af COPYING %{buildroot}%{_hal_licensedir}/libhal-backend-tbm-vc4
-
-%post
-if [ -f %{_libdir}/bufmgr/libtbm_default.so ]; then
- rm -rf %{_libdir}/bufmgr/libtbm_default.so
-fi
-ln -s libtbm-vc4.so %{_libdir}/bufmgr/libtbm_default.so
-
-%postun -p /sbin/ldconfig
-
-%post -n hal-backend-tbm-vc4
-if [ -f %{_hal_libdir}/libhal-backend-tbm.so ]; then
- rm -rf %{_hal_libdir}/libhal-backend-tbm.so
-fi
-ln -s libhal-backend-tbm-vc4.so %{_hal_libdir}/libhal-backend-tbm.so
-
-%postun -n hal-backend-tbm-vc4 -p /sbin/ldconfig
-
-%files
-%manifest %{name}.manifest
-%license COPYING
-%{_libdir}/bufmgr/libtbm-*.so*
-%{_libdir}/udev/rules.d/99-libtbm-vc4.rules
-
-%files -n hal-backend-tbm-vc4
-%manifest libhal-backend-tbm-vc4.manifest
-%{_hal_licensedir}/libhal-backend-tbm-vc4/COPYING
-%{_hal_libdir}/libhal-backend-*.so*
-%{_hal_libdir}/udev/rules.d/99-libhal-backend-tbm-vc4.rules
+++ /dev/null
-KERNEL=="tgl", MODE="0666", GROUP="display", SECLABEL{smack}="*"
-KERNEL=="slp_global_lock*", MODE="0666", GROUP="display", SECLABEL{smack}="*"
-SUBDIRS = libtbm-vc4 libhal-backend-tbm-vc4
\ No newline at end of file
+AM_CFLAGS = \
+ @LIBHAL_BACKEND_TBM_VC4_CFLAGS@ \
+ -I$(top_srcdir) \
+ -I$(top_srcdir)/src
+
+libhal_backend_tbm_vc4_la_LTLIBRARIES = libhal-backend-tbm-vc4.la
+libhal_backend_tbm_vc4_ladir = @HAL_LIBDIR@
+libhal_backend_tbm_vc4_la_LIBADD = @LIBHAL_BACKEND_TBM_VC4_LIBS@
+
+libhal_backend_tbm_vc4_la_SOURCES = \
+ tbm_backend_log.c \
+ tbm_backend_vc4.c
+++ /dev/null
-AM_CFLAGS = \
- @LIBHAL_BACKEND_TBM_VC4_CFLAGS@ \
- -I$(top_srcdir) \
- -I$(top_srcdir)/src/libhal-backend-tbm-vc4
-
-libhal_backend_tbm_vc4_la_LTLIBRARIES = libhal-backend-tbm-vc4.la
-libhal_backend_tbm_vc4_ladir = @HAL_LIBDIR@
-libhal_backend_tbm_vc4_la_LIBADD = @LIBHAL_BACKEND_TBM_VC4_LIBS@
-
-libhal_backend_tbm_vc4_la_SOURCES = \
- tbm_backend_log.c \
- tbm_backend_vc4.c
+++ /dev/null
-/*
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- */
-
-/**
- * \file
- * List macros heavily inspired by the Linux kernel
- * list handling. No list looping yet.
- *
- * Is not threadsafe, so common operations need to
- * be protected using an external mutex.
- */
-#ifndef _U_DOUBLE_LIST_H_
-#define _U_DOUBLE_LIST_H_
-
-#include <stddef.h>
-
-struct list_head {
- struct list_head *prev;
- struct list_head *next;
-};
-
-static inline void list_inithead(struct list_head *item)
-{
- item->prev = item;
- item->next = item;
-}
-
-static inline void list_add(struct list_head *item, struct list_head *list)
-{
- item->prev = list;
- item->next = list->next;
- list->next->prev = item;
- list->next = item;
-}
-
-static inline void list_addtail(struct list_head *item, struct list_head *list)
-{
- item->next = list;
- item->prev = list->prev;
- list->prev->next = item;
- list->prev = item;
-}
-
-static inline void list_replace(struct list_head *from, struct list_head *to)
-{
- to->prev = from->prev;
- to->next = from->next;
- from->next->prev = to;
- from->prev->next = to;
-}
-
-static inline void list_del(struct list_head *item)
-{
- item->prev->next = item->next;
- item->next->prev = item->prev;
-}
-
-static inline void list_delinit(struct list_head *item)
-{
- item->prev->next = item->next;
- item->next->prev = item->prev;
- item->next = item;
- item->prev = item;
-}
-
-static inline int list_length(struct list_head *item)
-{
- struct list_head *next;
- int length = 0;
-
- next = item->next;
- while (next != item) {
- length++;
- next = next->next;
- }
-
- return length;
-}
-
-#define LIST_INITHEAD(__item) list_inithead(__item)
-#define LIST_ADD(__item, __list) list_add(__item, __list)
-#define LIST_ADDTAIL(__item, __list) list_addtail(__item, __list)
-#define LIST_REPLACE(__from, __to) list_replace(__from, __to)
-#define LIST_DEL(__item) list_del(__item)
-#define LIST_DELINIT(__item) list_delinit(__item)
-#define LIST_LENGTH(__item) list_length(__item)
-
-#define LIST_ENTRY(__type, __item, __field) \
- ((__type *)(((char *)(__item)) - offsetof(__type, __field)))
-
-#define LIST_FIRST_ENTRY(__ptr, __type, __field) \
- LIST_ENTRY(__type, (__ptr)->next, __field)
-
-#define LIST_LAST_ENTRY(__ptr, __type, __field) \
- LIST_ENTRY(__type, (__ptr)->prev, __field)
-
-#define LIST_IS_EMPTY(__list) \
- ((__list)->next == (__list))
-
-#ifndef container_of
-#define container_of(ptr, sample, member) \
- (void *)((char *)(ptr) \
- - ((char *)&(sample)->member - (char *)(sample)))
-#endif
-
-#define LIST_FOR_EACH_ENTRY(pos, head, member) \
- for (pos = container_of((head)->next, pos, member); \
- &pos->member != (head); \
- pos = container_of(pos->member.next, pos, member))
-
-#define LIST_FOR_EACH_ENTRY_REV(pos, head, member) \
- for (pos = container_of((head)->prev, pos, member); \
- &pos->member != (head); \
- pos = container_of(pos->member.prev, pos, member))
-
-#define LIST_FOR_EACH_ENTRY_SAFE(pos, storage, head, member) \
- for (pos = container_of((head)->next, pos, member), \
- storage = container_of(pos->member.next, pos, member); \
- &pos->member != (head); \
- pos = storage, storage = container_of(storage->member.next, storage, member))
-
-#define LIST_FOR_EACH_ENTRY_SAFE_REV(pos, storage, head, member) \
- for (pos = container_of((head)->prev, pos, member), \
- storage = container_of(pos->member.prev, pos, member); \
- &pos->member != (head); \
- pos = storage, storage = container_of(storage->member.prev, storage, member))
-
-#define LIST_FOR_EACH_ENTRY_FROM(pos, start, head, member) \
- for (pos = container_of((start), pos, member); \
- &pos->member != (head); \
- pos = container_of(pos->member.next, pos, member))
-
-#define LIST_FOR_EACH_ENTRY_FROM_REV(pos, start, head, member) \
- for (pos = container_of((start), pos, member); \
- &pos->member != (head); \
- pos = container_of(pos->member.prev, pos, member))
-
-#define LIST_FIND_ITEM(item, head, type, member, found) \
- do { \
- type *pos = NULL; \
- found = NULL; \
- LIST_FOR_EACH_ENTRY(pos, head, member) \
- if (pos == item) { found = item; break; } \
- } while (0)
-
-#endif /*_U_DOUBLE_LIST_H_*/
+++ /dev/null
-/**************************************************************************
-
-libtbm_vc4
-
-Copyright 2021 Samsung Electronics co., Ltd. All Rights Reserved.
-
-Contact: SooChan Lim <sc1.lim@samsung.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a
-copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sub license, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice (including the
-next paragraph) shall be included in all copies or substantial portions
-of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
-IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
-ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-**************************************************************************/
-
-#include "tbm_backend_log.h"
-
-#undef LOG_TAG
-#define LOG_TAG "TBM_BACKEND"
-
-unsigned int tbm_log_debug_level = TBM_BACKEND_LOG_LEVEL_INFO;
-
-static void
-_tbm_backend_log_dlog_print(int level, const char *fmt, va_list arg)
-{
- log_priority dlog_prio;
-
- switch (level) {
- case TBM_BACKEND_LOG_LEVEL_ERR:
- dlog_prio = DLOG_ERROR;
- break;
- case TBM_BACKEND_LOG_LEVEL_WRN:
- dlog_prio = DLOG_WARN;
- break;
- case TBM_BACKEND_LOG_LEVEL_INFO:
- dlog_prio = DLOG_INFO;
- break;
- case TBM_BACKEND_LOG_LEVEL_DBG:
- dlog_prio = DLOG_DEBUG;
- break;
- default:
- return;
- }
- __dlog_vprint(LOG_ID_SYSTEM, dlog_prio, LOG_TAG, fmt, arg);
-}
-
-void
-tbm_backend_log_print(int level, const char *fmt, ...)
-{
- va_list arg;
-
- if (level > tbm_log_debug_level)
- return;
-
- va_start(arg, fmt);
- _tbm_backend_log_dlog_print(level, fmt, arg);
- va_end(arg);
-}
-
+++ /dev/null
-/**************************************************************************
-
-libtbm_vc4
-
-Copyright 2021 Samsung Electronics co., Ltd. All Rights Reserved.
-
-Contact: SooChan Lim <sc1.lim@samsung.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a
-copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sub license, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice (including the
-next paragraph) shall be included in all copies or substantial portions
-of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
-IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
-ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-**************************************************************************/
-
-#ifndef __TBM_BACKEND_LOG_H__
-#define __TBM_BACKEND_LOG_H__
-
-#include <sys/syscall.h>
-#include <time.h>
-#include <dlog.h>
-
-enum {
- TBM_BACKEND_LOG_LEVEL_NONE,
- TBM_BACKEND_LOG_LEVEL_ERR,
- TBM_BACKEND_LOG_LEVEL_WRN,
- TBM_BACKEND_LOG_LEVEL_INFO,
- TBM_BACKEND_LOG_LEVEL_DBG,
-};
-
-
-/* log level */
-void tbm_backend_log_print(int level, const char *fmt, ...);
-
-#define TBM_BACKEND_DBG(fmt, args...) \
- do { \
- struct timespec ts; \
- clock_gettime(CLOCK_MONOTONIC, &ts); \
- tbm_backend_log_print(TBM_BACKEND_LOG_LEVEL_DBG, "[%5d.%06d][%d][%s %d]"fmt, \
- (int)ts.tv_sec, (int)ts.tv_nsec / 1000, \
- (int)syscall(SYS_gettid), __FUNCTION__, __LINE__, ##args); \
- } while (0)
-
-#define TBM_BACKEND_INFO(fmt, args...) \
- do { \
- struct timespec ts; \
- clock_gettime(CLOCK_MONOTONIC, &ts); \
- tbm_backend_log_print(TBM_BACKEND_LOG_LEVEL_INFO, "[%5d.%06d][%d][%s %d]"fmt, \
- (int)ts.tv_sec, (int)ts.tv_nsec / 1000, \
- (int)syscall(SYS_gettid), __FUNCTION__, __LINE__, ##args); \
- } while (0)
-
-#define TBM_BACKEND_WRN(fmt, args...) \
- do { \
- struct timespec ts; \
- clock_gettime(CLOCK_MONOTONIC, &ts); \
- tbm_backend_log_print(TBM_BACKEND_LOG_LEVEL_WRN, "[%5d.%06d][%d][%s %d]"fmt, \
- (int)ts.tv_sec, (int)ts.tv_nsec / 1000, \
- (int)syscall(SYS_gettid), __FUNCTION__, __LINE__, ##args); \
- } while (0)
-
-#define TBM_BACKEND_ERR(fmt, args...) \
- do { \
- struct timespec ts; \
- clock_gettime(CLOCK_MONOTONIC, &ts); \
- tbm_backend_log_print(TBM_BACKEND_LOG_LEVEL_ERR, "[%5d.%06d][%d][%s %d]"fmt, \
- (int)ts.tv_sec, (int)ts.tv_nsec / 1000, \
- (int)syscall(SYS_gettid), __FUNCTION__, __LINE__, ##args); \
- } while (0)
-
-#define TBM_BACKEND_RETURN_IF_FAIL(cond) {\
- if (!(cond)) {\
- TBM_BACKEND_ERR("'%s' failed.\n", #cond);\
- return;\
- } \
-}
-#define TBM_BACKEND_RETURN_VAL_IF_FAIL(cond, val) {\
- if (!(cond)) {\
- TBM_BACKEND_ERR("'%s' failed.\n", #cond);\
- return val;\
- } \
-}
-#define TBM_BACKEND_GOTO_VAL_IF_FAIL(cond, val) {\
- if (!(cond)) {\
- TBM_BACKEND_ERR("'%s' failed.\n", #cond);\
- goto val;\
- } \
-}
-
-#endif /* __TBM_BACKEND_LOG_H__ */
\ No newline at end of file
+++ /dev/null
-/**************************************************************************
-
-libtbm_vc4
-
-Copyright 2017 Samsung Electronics co., Ltd. All Rights Reserved.
-
-Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a
-copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sub license, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice (including the
-next paragraph) shall be included in all copies or substantial portions
-of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
-IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
-ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-**************************************************************************/
-
-#ifdef HAVE_CONFIG_H
-#include "config.h"
-#endif
-
-#include <libudev.h>
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include <string.h>
-#include <sys/ioctl.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <errno.h>
-#include <xf86drm.h>
-#include <vc4_drm.h>
-#include <pthread.h>
-#include <hal-common.h>
-#include <hal-tbm-types.h>
-#include <hal-tbm-interface.h>
-#include "tbm_bufmgr_tgl.h"
-#include "tbm_backend_log.h"
-#include "tbm_backend_list.h"
-
-#define VC4_DRM_NAME "vc4"
-
-#define TBM_COLOR_FORMAT_COUNT 4
-#define STRERR_BUFSIZE 128
-#define SIZE_ALIGN(value, base) (((value) + ((base) - 1)) & ~((base) - 1))
-#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
-#define MAX(a, b) ((a) > (b) ? (a) : (b))
-
-#ifdef ALIGN_EIGHT
-#define TBM_SURFACE_ALIGNMENT_PLANE (8)
-#define TBM_SURFACE_ALIGNMENT_PITCH_RGB (8)
-#else
-#define TBM_SURFACE_ALIGNMENT_PLANE (16)
-#define TBM_SURFACE_ALIGNMENT_PITCH_RGB (16)
-#endif
-
-#define TBM_SURFACE_ALIGNMENT_PLANE_NV12 (4096)
-#define TBM_SURFACE_ALIGNMENT_PITCH_YUV (32)
-#define TBM_SURFACE_ALIGNMENT_HEIGHT_YUV (16)
-
-//#define VC4_TILED_FORMAT 1
-
-struct dma_buf_info {
- unsigned long size;
- unsigned int fence_supported;
- unsigned int padding;
-};
-
-#define DMA_BUF_ACCESS_READ 0x1
-#define DMA_BUF_ACCESS_WRITE 0x2
-#define DMA_BUF_ACCESS_DMA 0x4
-#define DMA_BUF_ACCESS_MAX 0x8
-
-#define DMA_FENCE_LIST_MAX 5
-
-struct dma_buf_fence {
- unsigned long ctx;
- unsigned int type;
-};
-
-#define DMABUF_IOCTL_BASE 'F'
-#define DMABUF_IOWR(nr, type) _IOWR(DMABUF_IOCTL_BASE, nr, type)
-
-#define DMABUF_IOCTL_GET_INFO DMABUF_IOWR(0x00, struct dma_buf_info)
-#define DMABUF_IOCTL_GET_FENCE DMABUF_IOWR(0x01, struct dma_buf_fence)
-#define DMABUF_IOCTL_PUT_FENCE DMABUF_IOWR(0x02, struct dma_buf_fence)
-
-/* tgl key values */
-#define GLOBAL_KEY ((unsigned int)(-1))
-/* TBM_CACHE */
-#define TBM_VC4_CACHE_INV 0x01 /**< cache invalidate */
-#define TBM_VC4_CACHE_CLN 0x02 /**< cache clean */
-#define TBM_VC4_CACHE_ALL 0x10 /**< cache all */
-#define TBM_VC4_CACHE_FLUSH (TBM_VC4_CACHE_INV|TBM_VC4_CACHE_CLN) /**< cache flush */
-#define TBM_VC4_CACHE_FLUSH_ALL (TBM_VC4_CACHE_FLUSH|TBM_VC4_CACHE_ALL) /**< cache flush all */
-
-enum {
- DEVICE_NONE = 0,
- DEVICE_CA, /* cache aware device */
- DEVICE_CO /* cache oblivious device */
-};
-
-typedef union _tbm_bo_cache_state tbm_bo_cache_state;
-
-union _tbm_bo_cache_state {
- unsigned int val;
- struct {
- unsigned int cntFlush:16; /*Flush all index for sync */
- unsigned int isCached:1;
- unsigned int isDirtied:2;
- } data;
-};
-
-typedef struct _tbm_vc4_bufmgr tbm_vc4_bufmgr;
-typedef struct _tbm_vc4_surface tbm_vc4_surface;
-typedef struct _tbm_vc4_bo tbm_vc4_bo;
-
-/* tbm surface object for vc4 */
-struct _tbm_vc4_surface {
- struct list_head link;
- uint32_t refcnt;
-
- uint32_t width;
- uint32_t height;
- hal_tbm_format format;
- tbm_vc4_bufmgr *bufmgr_data;
- int num_bos;
- tbm_vc4_bo *bo_data;
-};
-
-/* tbm buffor object for vc4 */
-struct _tbm_vc4_bo {
- int fd;
-
- unsigned int name; /* FLINK ID */
-
- unsigned int gem; /* GEM Handle */
-
- unsigned int dmabuf; /* fd for dmabuf */
-
- void *pBase; /* virtual address */
-
- unsigned int size;
-
- unsigned int flags_tbm; /*not used now*//*currently no values for the flags,but it may be used in future extension*/
-
- pthread_mutex_t mutex;
- struct dma_buf_fence dma_fence[DMA_FENCE_LIST_MAX];
- int device;
- int opt;
-
- tbm_bo_cache_state cache_state;
- unsigned int map_cnt;
- int last_map_device;
-
- tbm_vc4_bufmgr *bufmgr_data;
-};
-
-/* tbm bufmgr private for vc4 */
-struct _tbm_vc4_bufmgr {
- int fd;
- int isLocal;
- void *hashBos;
-
- struct list_head surface_data_list;
-
- int use_dma_fence;
-
- int tgl_fd;
-};
-
-static char *STR_DEVICE[] = {
- "DEF",
- "CPU",
- "2D",
- "3D",
- "MM"
-};
-
-static char *STR_OPT[] = {
- "NONE",
- "RD",
- "WR",
- "RDWR"
-};
-
-
-static uint32_t tbm_vc4_color_format_list[TBM_COLOR_FORMAT_COUNT] = {
- HAL_TBM_FORMAT_ARGB8888,
- HAL_TBM_FORMAT_XRGB8888,
- HAL_TBM_FORMAT_NV12,
- HAL_TBM_FORMAT_YUV420
- };
-
-static hal_tbm_bo *tbm_vc4_bufmgr_alloc_bo(hal_tbm_bufmgr *bufmgr, unsigned int size,
- hal_tbm_bo_memory_type flags, hal_tbm_error *error);
-static void tbm_vc4_bo_free(hal_tbm_bo *bo);
-static hal_tbm_bo *tbm_vc4_bufmgr_import_fd(hal_tbm_bufmgr *bufmgr, hal_tbm_fd key, hal_tbm_error *error);
-static hal_tbm_fd tbm_vc4_bo_export_fd(hal_tbm_bo *bo, hal_tbm_error *error);
-
-#undef ENABLE_CACHECRTL
-#ifdef ENABLE_CACHECRTL
-#ifdef TGL_GET_VERSION
-static inline int
-_tgl_get_version(int fd)
-{
- struct tgl_ver_data data;
- int err;
- char buf[STRERR_BUFSIZE];
-
- err = ioctl(fd, TGL_IOCTL_GET_VERSION, &data);
- if (err) {
- TBM_BACKEND_ERR("error(%s) %s:%d\n",
- strerror_r(errno, buf, STRERR_BUFSIZE));
- return 0;
- }
-
- TBM_BACKEND_DBG("tgl version is (%u, %u).\n", data.major, data.minor);
-
- return 1;
-}
-#endif
-
-static inline int
-_tgl_init(int fd, unsigned int key)
-{
- struct tgl_reg_data data;
- int err;
- char buf[STRERR_BUFSIZE];
-
- data.key = key;
- data.timeout_ms = 1000;
-
- err = ioctl(fd, TGL_IOCTL_REGISTER, &data);
- if (err) {
- TBM_BACKEND_ERR("error(%s) key:%d\n",
- strerror_r(errno, buf, STRERR_BUFSIZE), key);
- return 0;
- }
-
- return 1;
-}
-
-static inline int
-_tgl_destroy(int fd, unsigned int key)
-{
- struct tgl_reg_data data;
- int err;
- char buf[STRERR_BUFSIZE];
-
- data.key = key;
- err = ioctl(fd, TGL_IOCTL_UNREGISTER, &data);
- if (err) {
- TBM_BACKEND_ERR("error(%s) key:%d\n",
- strerror_r(errno, buf, STRERR_BUFSIZE), key);
- return 0;
- }
-
- return 1;
-}
-
-static inline int
-_tgl_lock(int fd, unsigned int key, int opt)
-{
- struct tgl_lock_data data;
- enum tgl_type_data tgl_type;
- int err;
- char buf[STRERR_BUFSIZE];
-
- switch (opt) {
- case TBM_OPTION_READ:
- tgl_type = TGL_TYPE_READ;
- break;
- case TBM_OPTION_WRITE:
- tgl_type = TGL_TYPE_WRITE;
- break;
- default:
- tgl_type = TGL_TYPE_NONE;
- break;
- }
-
- data.key = key;
- data.type = tgl_type;
-
- err = ioctl(fd, TGL_IOCTL_LOCK, &data);
- if (err) {
- TBM_BACKEND_ERR("error(%s) key:%d opt:%d\n",
- strerror_r(errno, buf, STRERR_BUFSIZE), key, opt);
- return 0;
- }
-
- return 1;
-}
-
-static inline int
-_tgl_unlock(int fd, unsigned int key)
-{
- struct tgl_lock_data data;
- int err;
- char buf[STRERR_BUFSIZE];
-
- data.key = key;
- data.type = TGL_TYPE_NONE;
-
- err = ioctl(fd, TGL_IOCTL_UNLOCK, &data);
- if (err) {
- TBM_BACKEND_ERR("error(%s) key:%d\n",
- strerror_r(errno, buf, STRERR_BUFSIZE), key);
- return 0;
- }
-
- return 1;
-}
-
-static inline int
-_tgl_set_data(int fd, unsigned int key, unsigned int val)
-{
- struct tgl_usr_data data;
- int err;
- char buf[STRERR_BUFSIZE];
-
- data.key = key;
- data.data1 = val;
-
- err = ioctl(fd, TGL_IOCTL_SET_DATA, &data);
- if (err) {
- TBM_BACKEND_ERR("error(%s) key:%d\n",
- strerror_r(errno, buf, STRERR_BUFSIZE), key);
- return 0;
- }
-
- return 1;
-}
-
-static inline unsigned int
-_tgl_get_data(int fd, unsigned int key)
-{
- struct tgl_usr_data data = { 0, };
- int err;
- char buf[STRERR_BUFSIZE];
-
- data.key = key;
-
- err = ioctl(fd, TGL_IOCTL_GET_DATA, &data);
- if (err) {
- TBM_BACKEND_ERR("error(%s) key:%d\n",
- strerror_r(errno, buf, STRERR_BUFSIZE), key);
- return 0;
- }
-
- return data.data1;
-}
-
-static int
-_vc4_cache_flush(tbm_vc4_bufmgr *bufmgr_data, tbm_vc4_bo *bo_data, int flags)
-{
- TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, 0);
-
- /* cache flush is managed by kernel side when using dma-fence. */
- if (bufmgr_data->use_dma_fence)
- return 1;
-
- struct drm_vc4_gem_cache_op cache_op = {0, };
- int ret;
-
- /* if bo_data is null, do cache_flush_all */
- if (bo_data) {
- cache_op.flags = 0;
- cache_op.usr_addr = (uint64_t)((uint32_t)bo_data->pBase);
- cache_op.size = bo_data->size;
- } else {
- flags = TBM_VC4_CACHE_FLUSH_ALL;
- cache_op.flags = 0;
- cache_op.usr_addr = 0;
- cache_op.size = 0;
- }
-
- if (flags & TBM_VC4_CACHE_INV) {
- if (flags & TBM_VC4_CACHE_ALL)
- cache_op.flags |= VC4_DRM_CACHE_INV_ALL;
- else
- cache_op.flags |= VC4_DRM_CACHE_INV_RANGE;
- }
-
- if (flags & TBM_VC4_CACHE_CLN) {
- if (flags & TBM_VC4_CACHE_ALL)
- cache_op.flags |= VC4_DRM_CACHE_CLN_ALL;
- else
- cache_op.flags |= VC4_DRM_CACHE_CLN_RANGE;
- }
-
- if (flags & TBM_VC4_CACHE_ALL)
- cache_op.flags |= VC4_DRM_ALL_CACHES_CORES;
-
- ret = drmCommandWriteRead(bufmgr_data->fd, DRM_VC4_GEM_CACHE_OP, &cache_op,
- sizeof(cache_op));
- if (ret) {
- TBM_BACKEND_ERR("fail to flush the cache.\n");
- return 0;
- }
-
- return 1;
-}
-#endif
-
-static int
-_bo_init_cache_state(tbm_vc4_bufmgr *bufmgr_data, tbm_vc4_bo *bo_data, int import)
-{
-#ifdef ENABLE_CACHECRTL
- TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, 0);
- TBM_BACKEND_RETURN_VAL_IF_FAIL(bo_data != NULL, 0);
-
- if (bufmgr_data->use_dma_fence)
- return 1;
-
- _tgl_init(bufmgr_data->tgl_fd, bo_data->name);
-
- tbm_bo_cache_state cache_state;
-
- if (import == 0) {
- cache_state.data.isDirtied = DEVICE_NONE;
- cache_state.data.isCached = 0;
- cache_state.data.cntFlush = 0;
-
- _tgl_set_data(bufmgr_data->tgl_fd, bo_data->name, cache_state.val);
- }
-#endif
-
- return 1;
-}
-
-static int
-_bo_set_cache_state(tbm_vc4_bufmgr *bufmgr_data, tbm_vc4_bo *bo_data, int device, int opt)
-{
-#ifdef ENABLE_CACHECRTL
- TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, 0);
- TBM_BACKEND_RETURN_VAL_IF_FAIL(bo_data != NULL, 0);
-
- if (bufmgr_data->use_dma_fence)
- return 1;
-
- char need_flush = 0;
- unsigned short cntFlush = 0;
-
- /* get cache state of a bo_data */
- bo_data->cache_state.val = _tgl_get_data(bufmgr_data->tgl_fd,
- bo_data->name);
-
- /* get global cache flush count */
- cntFlush = (unsigned short)_tgl_get_data(bufmgr_data->tgl_fd, GLOBAL_KEY);
-
- if (device == HAL_TBM_DEVICE_CPU) {
- if (bo_data->cache_state.data.isDirtied == DEVICE_CO &&
- bo_data->cache_state.data.isCached)
- need_flush = TBM_VC4_CACHE_INV;
-
- bo_data->cache_state.data.isCached = 1;
- if (opt & TBM_OPTION_WRITE)
- bo_data->cache_state.data.isDirtied = DEVICE_CA;
- else {
- if (bo_data->cache_state.data.isDirtied != DEVICE_CA)
- bo_data->cache_state.data.isDirtied = DEVICE_NONE;
- }
- } else {
- if (bo_data->cache_state.data.isDirtied == DEVICE_CA &&
- bo_data->cache_state.data.isCached &&
- bo_data->cache_state.data.cntFlush == cntFlush)
- need_flush = TBM_VC4_CACHE_CLN | TBM_VC4_CACHE_ALL;
-
- if (opt & TBM_OPTION_WRITE)
- bo_data->cache_state.data.isDirtied = DEVICE_CO;
- else {
- if (bo_data->cache_state.data.isDirtied != DEVICE_CO)
- bo_data->cache_state.data.isDirtied = DEVICE_NONE;
- }
- }
-
- if (need_flush) {
- if (need_flush & TBM_VC4_CACHE_ALL)
- _tgl_set_data(bufmgr_data->tgl_fd, GLOBAL_KEY, (unsigned int)(++cntFlush));
-
- /* call cache flush */
- _vc4_cache_flush(bufmgr_data, bo_data, need_flush);
-
- TBM_BACKEND_DBG(" \tcache(%d,%d)....flush:0x%x, cntFlush(%d)\n",
- bo_data->cache_state.data.isCached,
- bo_data->cache_state.data.isDirtied,
- need_flush,
- cntFlush);
- }
-#endif
-
- return 1;
-}
-
-static int
-_bo_save_cache_state(tbm_vc4_bufmgr *bufmgr_data, tbm_vc4_bo *bo_data)
-{
-#ifdef ENABLE_CACHECRTL
- TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, 0);
- TBM_BACKEND_RETURN_VAL_IF_FAIL(bo_data != NULL, 0);
-
- if (bufmgr_data->use_dma_fence)
- return 1;
-
- unsigned short cntFlush = 0;
-
- /* get global cache flush count */
- cntFlush = (unsigned short)_tgl_get_data(bufmgr_data->tgl_fd, GLOBAL_KEY);
-
- /* save global cache flush count */
- bo_data->cache_state.data.cntFlush = cntFlush;
- _tgl_set_data(bufmgr_data->tgl_fd, bo_data->name,
- bo_data->cache_state.val);
-#endif
-
- return 1;
-}
-
-static void
-_bo_destroy_cache_state(tbm_vc4_bufmgr *bufmgr_data, tbm_vc4_bo *bo_data)
-{
-#ifdef ENABLE_CACHECRTL
- TBM_BACKEND_RETURN_IF_FAIL(bufmgr_data != NULL);
- TBM_BACKEND_RETURN_IF_FAIL(bo_data != NULL);
-
- if (bufmgr_data->use_dma_fence)
- return ;
-
- _tgl_destroy(bufmgr_data->tgl_fd, bo_data->name);
-#endif
-}
-
-static int
-_bufmgr_init_cache_state(tbm_vc4_bufmgr *bufmgr_data)
-{
-#ifdef ENABLE_CACHECRTL
- TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, 0);
-
- if (bufmgr_data->use_dma_fence)
- return 1;
-
- /* open tgl fd for saving cache flush data */
- bufmgr_data->tgl_fd = open(tgl_devfile, O_RDWR);
-
- if (bufmgr_data->tgl_fd < 0) {
- bufmgr_data->tgl_fd = open(tgl_devfile1, O_RDWR);
- if (bufmgr_data->tgl_fd < 0) {
- TBM_BACKEND_ERR("fail to open global_lock:%s\n",
- tgl_devfile1);
- return 0;
- }
- }
-
-#ifdef TGL_GET_VERSION
- if (!_tgl_get_version(bufmgr_data->tgl_fd)) {
- TBM_BACKEND_ERR("fail to get tgl_version. tgl init failed.\n");
- close(bufmgr_data->tgl_fd);
- return 0;
- }
-#endif
-
- if (!_tgl_init(bufmgr_data->tgl_fd, GLOBAL_KEY)) {
- TBM_BACKEND_ERR("fail to initialize the tgl\n");
- close(bufmgr_data->tgl_fd);
- return 0;
- }
-#endif
-
- return 1;
-}
-
-static void
-_bufmgr_deinit_cache_state(tbm_vc4_bufmgr *bufmgr_data)
-{
-#ifdef ENABLE_CACHECRTL
- TBM_BACKEND_RETURN_IF_FAIL(bufmgr_data != NULL);
-
- if (bufmgr_data->use_dma_fence)
- return;
-
- if (bufmgr_data->tgl_fd >= 0)
- close(bufmgr_data->tgl_fd);
-#endif
-}
-
-static int
-_tbm_vc4_open_drm()
-{
- int fd = -1;
-
- fd = drmOpen(VC4_DRM_NAME, NULL);
- if (fd < 0) {
- TBM_BACKEND_ERR("fail to open drm.(%s)\n", VC4_DRM_NAME);
- }
-
- if (fd < 0) {
- struct udev *udev = NULL;
- struct udev_enumerate *e = NULL;
- struct udev_list_entry *entry = NULL;
- struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
- const char *filepath;
- struct stat s;
- int ret;
-
- TBM_BACKEND_DBG("search drm-device by udev\n");
-
- udev = udev_new();
- if (!udev) {
- TBM_BACKEND_ERR("udev_new() failed.\n");
- return -1;
- }
-
- e = udev_enumerate_new(udev);
- udev_enumerate_add_match_subsystem(e, "drm");
- udev_enumerate_add_match_sysname(e, "card[0-9]*");
- udev_enumerate_scan_devices(e);
-
- udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
- device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
- udev_list_entry_get_name(entry));
- device_parent = udev_device_get_parent(device);
- /* Not need unref device_parent. device_parent and device have same refcnt */
- if (device_parent) {
- if (strcmp(udev_device_get_sysname(device_parent), "vc4-drm") == 0) {
- drm_device = device;
- TBM_BACKEND_DBG("Found render device: '%s' (%s)\n",
- udev_device_get_syspath(drm_device),
- udev_device_get_sysname(device_parent));
- break;
- }
- }
- udev_device_unref(device);
- }
-
- udev_enumerate_unref(e);
-
- /* Get device file path. */
- filepath = udev_device_get_devnode(drm_device);
- if (!filepath) {
- TBM_BACKEND_ERR("udev_device_get_devnode() failed.\n");
- udev_device_unref(drm_device);
- udev_unref(udev);
- return -1;
- }
-
- /* Open DRM device file and check validity. */
- fd = open(filepath, O_RDWR | O_CLOEXEC);
- if (fd < 0) {
- TBM_BACKEND_ERR("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
- udev_device_unref(drm_device);
- udev_unref(udev);
- return -1;
- }
-
- ret = fstat(fd, &s);
- if (ret) {
- TBM_BACKEND_ERR("fstat() failed %s.\n");
- close(fd);
- udev_device_unref(drm_device);
- udev_unref(udev);
- return -1;
- }
-
- udev_device_unref(drm_device);
- udev_unref(udev);
- }
-
- return fd;
-}
-
-#if 0 // render node functions.
-static int
-_check_render_node(void)
-{
-#ifndef USE_RENDER_NODE
- return 0;
-#else
- struct udev *udev = NULL;
- struct udev_enumerate *e = NULL;
- struct udev_list_entry *entry = NULL;
- struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
-
- udev = udev_new();
- if (!udev) {
- TBM_BACKEND_ERR("udev_new() failed.\n");
- return -1;
- }
-
- e = udev_enumerate_new(udev);
- udev_enumerate_add_match_subsystem(e, "drm");
- udev_enumerate_add_match_sysname(e, "renderD[0-9]*");
- udev_enumerate_scan_devices(e);
-
- udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
- device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
- udev_list_entry_get_name(entry));
- device_parent = udev_device_get_parent(device);
- /* Not need unref device_parent. device_parent and device have same refcnt */
- if (device_parent) {
- if (strcmp(udev_device_get_sysname(device_parent), "vc4-drm") == 0) {
- drm_device = device;
- TBM_BACKEND_DBG("Found render device: '%s' (%s)\n",
- udev_device_get_syspath(drm_device),
- udev_device_get_sysname(device_parent));
- break;
- }
- }
- udev_device_unref(device);
- }
-
- udev_enumerate_unref(e);
- udev_unref(udev);
-
- if (!drm_device) {
- udev_device_unref(drm_device);
- return 0;
- }
-
- udev_device_unref(drm_device);
- return 1;
-#endif
-}
-
-static int
-_get_render_node(void)
-{
- struct udev *udev = NULL;
- struct udev_enumerate *e = NULL;
- struct udev_list_entry *entry = NULL;
- struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
- const char *filepath;
- struct stat s;
- int fd = -1;
- int ret;
-
- udev = udev_new();
- if (!udev) {
- TBM_BACKEND_ERR("udev_new() failed.\n");
- return -1;
- }
-
- e = udev_enumerate_new(udev);
- udev_enumerate_add_match_subsystem(e, "drm");
- udev_enumerate_add_match_sysname(e, "renderD[0-9]*");
- udev_enumerate_scan_devices(e);
-
- udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
- device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
- udev_list_entry_get_name(entry));
- device_parent = udev_device_get_parent(device);
- /* Not need unref device_parent. device_parent and device have same refcnt */
- if (device_parent) {
- if (strcmp(udev_device_get_sysname(device_parent), "vc4-drm") == 0) {
- drm_device = device;
- TBM_BACKEND_DBG("Found render device: '%s' (%s)\n",
- udev_device_get_syspath(drm_device),
- udev_device_get_sysname(device_parent));
- break;
- }
- }
- udev_device_unref(device);
- }
-
- udev_enumerate_unref(e);
-
- /* Get device file path. */
- filepath = udev_device_get_devnode(drm_device);
- if (!filepath) {
- TBM_BACKEND_ERR("udev_device_get_devnode() failed.\n");
- udev_device_unref(drm_device);
- udev_unref(udev);
- return -1;
- }
-
- /* Open DRM device file and check validity. */
- fd = open(filepath, O_RDWR | O_CLOEXEC);
- if (fd < 0) {
- TBM_BACKEND_ERR("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
- udev_device_unref(drm_device);
- udev_unref(udev);
- return -1;
- }
-
- ret = fstat(fd, &s);
- if (ret) {
- TBM_BACKEND_ERR("fstat() failed %s.\n");
- udev_device_unref(drm_device);
- udev_unref(udev);
- close(fd);
- return -1;
- }
-
- udev_device_unref(drm_device);
- udev_unref(udev);
-
- return fd;
-}
-#endif
-
-static unsigned int
-_get_name(int fd, unsigned int gem)
-{
- struct drm_gem_flink arg = {0,};
-
- arg.handle = gem;
- if (drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &arg)) {
- TBM_BACKEND_ERR("fail to DRM_IOCTL_GEM_FLINK gem:%d", gem);
- return 0;
- }
-
- return (unsigned int)arg.name;
-}
-
-static hal_tbm_bo_handle
-_vc4_bo_handle(tbm_vc4_bo *bo_data, int device)
-{
- hal_tbm_bo_handle bo_handle;
-
- memset(&bo_handle, 0x0, sizeof(uint64_t));
-
- switch (device) {
- case HAL_TBM_DEVICE_DEFAULT:
- case HAL_TBM_DEVICE_2D:
- bo_handle.u32 = (uint32_t)bo_data->gem;
- break;
- case HAL_TBM_DEVICE_CPU:
- if (!bo_data->pBase) {
- struct drm_vc4_mmap_bo arg = {0, };
- void *map = NULL;
-
- arg.handle = bo_data->gem;
- if (drmIoctl(bo_data->fd, DRM_IOCTL_VC4_MMAP_BO, &arg)) {
- TBM_BACKEND_ERR("Cannot map_vc4 gem=%d\n", bo_data->gem);
- return (hal_tbm_bo_handle) NULL;
- }
-
- map = mmap(NULL, bo_data->size, PROT_READ | PROT_WRITE, MAP_SHARED,
- bo_data->fd, arg.offset);
- if (map == MAP_FAILED) {
- TBM_BACKEND_ERR("Cannot usrptr gem=%d\n", bo_data->gem);
- return (hal_tbm_bo_handle) NULL;
- }
- bo_data->pBase = map;
- }
- bo_handle.ptr = (void *)bo_data->pBase;
- break;
- case HAL_TBM_DEVICE_3D:
- case HAL_TBM_DEVICE_MM:
- if (!bo_data->dmabuf) {
- struct drm_prime_handle arg = {0, };
-
- arg.handle = bo_data->gem;
- if (drmIoctl(bo_data->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
- TBM_BACKEND_ERR("Cannot dmabuf=%d\n", bo_data->gem);
- return (hal_tbm_bo_handle) NULL;
- }
- bo_data->dmabuf = arg.fd;
- }
-
- bo_handle.u32 = (uint32_t)bo_data->dmabuf;
- break;
- default:
- TBM_BACKEND_ERR("Not supported device:%d\n", device);
- bo_handle.ptr = (void *) NULL;
- break;
- }
-
- return bo_handle;
-}
-
-static hal_tbm_bufmgr_capability
-tbm_vc4_bufmgr_get_capabilities(hal_tbm_bufmgr *bufmgr, hal_tbm_error *error)
-{
- hal_tbm_bufmgr_capability capabilities = HAL_TBM_BUFMGR_CAPABILITY_NONE;
-
-#ifdef VC4_TILED_FORMAT
- capabilities = HAL_TBM_BUFMGR_CAPABILITY_SHARE_KEY|HAL_TBM_BUFMGR_CAPABILITY_SHARE_FD|HAL_TBM_BUFMGR_CAPABILITY_TILED_MEMORY;
-#else
- capabilities = HAL_TBM_BUFMGR_CAPABILITY_SHARE_KEY|HAL_TBM_BUFMGR_CAPABILITY_SHARE_FD;
-#endif
-
- if (error)
- *error = HAL_TBM_ERROR_NONE;
-
- return capabilities;
-}
-
-static hal_tbm_error
-tbm_vc4_bufmgr_get_supported_formats(hal_tbm_bufmgr *bufmgr,
- uint32_t **formats, uint32_t *num)
-{
- tbm_vc4_bufmgr *bufmgr_data = (tbm_vc4_bufmgr *)bufmgr;
- uint32_t *color_formats;
-
- TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, HAL_TBM_ERROR_INVALID_PARAMETER);
-
- color_formats = (uint32_t *)calloc(1, sizeof(uint32_t) * TBM_COLOR_FORMAT_COUNT);
- if (color_formats == NULL)
- return HAL_TBM_ERROR_OUT_OF_MEMORY;
-
- memcpy(color_formats, tbm_vc4_color_format_list, sizeof(uint32_t)*TBM_COLOR_FORMAT_COUNT);
-
- *formats = color_formats;
- *num = TBM_COLOR_FORMAT_COUNT;
-
- TBM_BACKEND_DBG("supported format count = %d\n", *num);
-
- return HAL_TBM_ERROR_NONE;
-}
-
-
-#ifdef VC4_TILED_FORMAT
-#include <drm_fourcc.h>
-static inline uint32_t
-vc4_utile_width(int cpp)
-{
- switch (cpp) {
- case 1:
- case 2:
- return 8;
- case 4:
- return 4;
- case 8:
- return 2;
- default:
- return 4;
- }
-}
-
-static inline uint32_t
-vc4_utile_height(int cpp)
-{
- switch (cpp) {
- case 1:
- return 8;
- case 2:
- case 4:
- case 8:
- return 4;
- default:
- return 4;
- }
-}
-
-static inline bool
-vc4_size_is_lt(uint32_t width, uint32_t height, int cpp)
-{
- return (width <= 4 * vc4_utile_width(cpp) ||
- height <= 4 * vc4_utile_height(cpp));
-}
-
-static hal_tbm_bo *
-tbm_vc4_bufmgr_alloc_bo_with_tiled_format(hal_tbm_bufmgr *bufmgr, int width, int height,
- int cpp, int format, hal_tbm_bo_memory_type flags, int bo_idx, hal_tbm_error *err)
-{
- tbm_vc4_bufmgr *bufmgr_data = (tbm_vc4_bufmgr *)bufmgr;
- tbm_vc4_bo *bo_data;
- uint32_t utile_w = vc4_utile_width(cpp);
- uint32_t utile_h = vc4_utile_height(cpp);
- uint32_t level_width, level_height;
- int size;
- uint32_t stride;
-
-
- level_width = width;
- level_height = height;
-
- if (bufmgr_data == NULL) {
- TBM_BACKEND_ERR("bufmgr is null\n");
- return NULL;
- }
-
- if (vc4_size_is_lt(level_width, level_height, cpp)) {
- level_width = SIZE_ALIGN(level_width, utile_w);
- level_height = SIZE_ALIGN(level_height, utile_h);
- } else {
- level_width = SIZE_ALIGN(level_width,
- 4 * 2 * utile_w);
- level_height = SIZE_ALIGN(level_height,
- 4 * 2 * utile_h);
- }
-
- stride = level_width * cpp;
-
- size = level_height * stride;
- size = SIZE_ALIGN(size, 4096);
-
-
- bo_data = calloc(1, sizeof(struct _tbm_vc4_bo));
- if (!bo_data) {
- TBM_BACKEND_ERR("fail to allocate the bo_data private\n");
- return NULL;
- }
- bo_data->bufmgr_data = bufmgr_data;
-
- struct drm_vc4_create_bo arg = {0, };
-
- arg.size = (__u32)size;
- arg.flags = flags;/*currently no values for the flags,but it may be used in future extension*/
- if (drmIoctl(bufmgr_data->fd, DRM_IOCTL_VC4_CREATE_BO, &arg)) {
- TBM_BACKEND_ERR("Cannot create bo_data(flag:%x, size:%d)\n", arg.flags,
- (unsigned int)arg.size);
- free(bo_data);
- return NULL;
- }
-
- bo_data->fd = bufmgr_data->fd;
- bo_data->gem = (unsigned int)arg.handle;
- bo_data->size = size;
- bo_data->flags_tbm = flags;
- bo_data->name = _get_name(bo_data->fd, bo_data->gem);
-
- if (!_bo_init_cache_state(bufmgr_data, bo_data, 0)) {
- TBM_BACKEND_ERR("fail init cache state(%d)\n", bo_data->name);
- free(bo_data);
- return NULL;
- }
-
- pthread_mutex_init(&bo_data->mutex, NULL);
-
- if (bufmgr_data->use_dma_fence && !bo_data->dmabuf) {
- struct drm_prime_handle arg = {0, };
-
- arg.handle = bo_data->gem;
- if (drmIoctl(bo_data->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
- TBM_BACKEND_ERR("Cannot dmabuf=%d\n", bo_data->gem);
- free(bo_data);
- return NULL;
- }
- bo_data->dmabuf = arg.fd;
- }
-
- //set modifier
- uint64_t modifier;
- modifier = DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
- struct drm_vc4_set_tiling set_tiling = {
- .handle = bo_data->gem,
- .modifier = modifier,
- };
- drmIoctl(bo_data->fd, DRM_IOCTL_VC4_SET_TILING, &set_tiling);
-
-
- /* add bo_data to hash */
- if (drmHashInsert(bufmgr_data->hashBos, bo_data->name, (void *)bo_data) < 0)
- TBM_BACKEND_ERR("Cannot insert bo_data to Hash(%d)\n", bo_data->name);
-
- TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), flags:%d(%d), size:%d\n",
- bo_data,
- bo_data->gem, bo_data->name,
- bo_data->flags_tbm,
- bo_data->size);
-
- return (hal_tbm_bo *)bo_data;
-}
-#endif
-
-static int
-_tbm_vc4_bufmgr_get_num_planes(hal_tbm_format format)
-{
- int num_planes = 0;
-
- switch (format) {
- case HAL_TBM_FORMAT_C8:
- case HAL_TBM_FORMAT_RGB332:
- case HAL_TBM_FORMAT_BGR233:
- case HAL_TBM_FORMAT_XRGB4444:
- case HAL_TBM_FORMAT_XBGR4444:
- case HAL_TBM_FORMAT_RGBX4444:
- case HAL_TBM_FORMAT_BGRX4444:
- case HAL_TBM_FORMAT_ARGB4444:
- case HAL_TBM_FORMAT_ABGR4444:
- case HAL_TBM_FORMAT_RGBA4444:
- case HAL_TBM_FORMAT_BGRA4444:
- case HAL_TBM_FORMAT_XRGB1555:
- case HAL_TBM_FORMAT_XBGR1555:
- case HAL_TBM_FORMAT_RGBX5551:
- case HAL_TBM_FORMAT_BGRX5551:
- case HAL_TBM_FORMAT_ARGB1555:
- case HAL_TBM_FORMAT_ABGR1555:
- case HAL_TBM_FORMAT_RGBA5551:
- case HAL_TBM_FORMAT_BGRA5551:
- case HAL_TBM_FORMAT_RGB565:
- case HAL_TBM_FORMAT_BGR565:
- case HAL_TBM_FORMAT_RGB888:
- case HAL_TBM_FORMAT_BGR888:
- case HAL_TBM_FORMAT_XRGB8888:
- case HAL_TBM_FORMAT_XBGR8888:
- case HAL_TBM_FORMAT_RGBX8888:
- case HAL_TBM_FORMAT_BGRX8888:
- case HAL_TBM_FORMAT_ARGB8888:
- case HAL_TBM_FORMAT_ABGR8888:
- case HAL_TBM_FORMAT_RGBA8888:
- case HAL_TBM_FORMAT_BGRA8888:
- case HAL_TBM_FORMAT_XRGB2101010:
- case HAL_TBM_FORMAT_XBGR2101010:
- case HAL_TBM_FORMAT_RGBX1010102:
- case HAL_TBM_FORMAT_BGRX1010102:
- case HAL_TBM_FORMAT_ARGB2101010:
- case HAL_TBM_FORMAT_ABGR2101010:
- case HAL_TBM_FORMAT_RGBA1010102:
- case HAL_TBM_FORMAT_BGRA1010102:
- case HAL_TBM_FORMAT_YUYV:
- case HAL_TBM_FORMAT_YVYU:
- case HAL_TBM_FORMAT_UYVY:
- case HAL_TBM_FORMAT_VYUY:
- case HAL_TBM_FORMAT_AYUV:
- num_planes = 1;
- break;
- case HAL_TBM_FORMAT_NV12:
- case HAL_TBM_FORMAT_NV12MT:
- case HAL_TBM_FORMAT_NV21:
- case HAL_TBM_FORMAT_NV16:
- case HAL_TBM_FORMAT_NV61:
- num_planes = 2;
- break;
- case HAL_TBM_FORMAT_YUV410:
- case HAL_TBM_FORMAT_YVU410:
- case HAL_TBM_FORMAT_YUV411:
- case HAL_TBM_FORMAT_YVU411:
- case HAL_TBM_FORMAT_YUV420:
- case HAL_TBM_FORMAT_YVU420:
- case HAL_TBM_FORMAT_YUV422:
- case HAL_TBM_FORMAT_YVU422:
- case HAL_TBM_FORMAT_YUV444:
- case HAL_TBM_FORMAT_YVU444:
- num_planes = 3;
- break;
-
- default:
- num_planes = 0;
- TBM_BACKEND_ERR("Invalid format : %d", format);
- break;
- }
-
- return num_planes;
-}
-
-static hal_tbm_error
-tbm_vc4_bufmgr_get_plane_data(hal_tbm_bufmgr *bufmgr,
- hal_tbm_format format, int plane_idx, int width,
- int height, uint32_t *size, uint32_t *offset,
- uint32_t *pitch, int *bo_idx)
-{
- tbm_vc4_bufmgr *bufmgr_data = (tbm_vc4_bufmgr *)bufmgr;
- int bpp;
- int _offset = 0;
- int _pitch = 0;
- int _size = 0;
- int _bo_idx = 0;
- int _align_height = 0;
-
- TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, HAL_TBM_ERROR_INVALID_PARAMETER);
-
- switch (format) {
- /* 16 bpp RGB */
- case HAL_TBM_FORMAT_XRGB4444:
- case HAL_TBM_FORMAT_XBGR4444:
- case HAL_TBM_FORMAT_RGBX4444:
- case HAL_TBM_FORMAT_BGRX4444:
- case HAL_TBM_FORMAT_ARGB4444:
- case HAL_TBM_FORMAT_ABGR4444:
- case HAL_TBM_FORMAT_RGBA4444:
- case HAL_TBM_FORMAT_BGRA4444:
- case HAL_TBM_FORMAT_XRGB1555:
- case HAL_TBM_FORMAT_XBGR1555:
- case HAL_TBM_FORMAT_RGBX5551:
- case HAL_TBM_FORMAT_BGRX5551:
- case HAL_TBM_FORMAT_ARGB1555:
- case HAL_TBM_FORMAT_ABGR1555:
- case HAL_TBM_FORMAT_RGBA5551:
- case HAL_TBM_FORMAT_BGRA5551:
- case HAL_TBM_FORMAT_RGB565:
- bpp = 16;
- _offset = 0;
- _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
- _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- break;
- /* 24 bpp RGB */
- case HAL_TBM_FORMAT_RGB888:
- case HAL_TBM_FORMAT_BGR888:
- bpp = 24;
- _offset = 0;
- _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
- _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- break;
- /* 32 bpp RGB */
- case HAL_TBM_FORMAT_XRGB8888:
- case HAL_TBM_FORMAT_XBGR8888:
- case HAL_TBM_FORMAT_RGBX8888:
- case HAL_TBM_FORMAT_BGRX8888:
- case HAL_TBM_FORMAT_ARGB8888:
- case HAL_TBM_FORMAT_ABGR8888:
- case HAL_TBM_FORMAT_RGBA8888:
- case HAL_TBM_FORMAT_BGRA8888:
- bpp = 32;
- _offset = 0;
-#ifdef VC4_TILED_FORMAT
- if (vc4_size_is_lt(width, height, 4)) {
- width = SIZE_ALIGN(width, vc4_utile_width(4));
- height = SIZE_ALIGN(height, vc4_utile_height(4));
-
- } else {
- width = SIZE_ALIGN(width, 32);
- uint32_t utile_h = vc4_utile_height(bpp);
- height = SIZE_ALIGN(height, 8*utile_h);
- }
-#endif
- _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
- _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- break;
-
- /* packed YCbCr */
- case HAL_TBM_FORMAT_YUYV:
- case HAL_TBM_FORMAT_YVYU:
- case HAL_TBM_FORMAT_UYVY:
- case HAL_TBM_FORMAT_VYUY:
- case HAL_TBM_FORMAT_AYUV:
- bpp = 32;
- _offset = 0;
- _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
- _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- break;
-
- /*
- * 2 plane YCbCr
- * index 0 = Y plane, [7:0] Y
- * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
- * or
- * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
- */
- case HAL_TBM_FORMAT_NV12:
- case HAL_TBM_FORMAT_NV21:
- bpp = 12;
- /*if (plane_idx == 0)*/
- {
- _offset = 0;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
- _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- if (plane_idx == 0)
- break;
- }
- /*else if (plane_idx == 1)*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _align_height = SIZE_ALIGN(height / 2, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
- _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- }
- break;
- case HAL_TBM_FORMAT_NV16:
- case HAL_TBM_FORMAT_NV61:
- bpp = 16;
- /*if(plane_idx == 0)*/
- {
- _offset = 0;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
- _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- if (plane_idx == 0)
- break;
- }
- /*else if( plane_idx ==1 )*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
- _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- }
- break;
-
- /*
- * 3 plane YCbCr
- * index 0: Y plane, [7:0] Y
- * index 1: Cb plane, [7:0] Cb
- * index 2: Cr plane, [7:0] Cr
- * or
- * index 1: Cr plane, [7:0] Cr
- * index 2: Cb plane, [7:0] Cb
- */
-
- /*
- * NATIVE_BUFFER_FORMAT_YV12
- * NATIVE_BUFFER_FORMAT_I420
- */
- case HAL_TBM_FORMAT_YUV410:
- case HAL_TBM_FORMAT_YVU410:
- bpp = 9;
- /*if(plane_idx == 0)*/
- {
- _offset = 0;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
- _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- if (plane_idx == 0)
- break;
- }
- /*else if(plane_idx == 1)*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4);
- _align_height = SIZE_ALIGN(height / 4, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
- _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- if (plane_idx == 1)
- break;
- }
- /*else if (plane_idx == 2)*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4);
- _align_height = SIZE_ALIGN(height / 4, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
- _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- }
- break;
- case HAL_TBM_FORMAT_YUV411:
- case HAL_TBM_FORMAT_YVU411:
- case HAL_TBM_FORMAT_YUV420:
- case HAL_TBM_FORMAT_YVU420:
- bpp = 12;
- /*if(plane_idx == 0)*/
- {
- _offset = 0;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
- _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- if (plane_idx == 0)
- break;
- }
- /*else if(plane_idx == 1)*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
- _align_height = SIZE_ALIGN(height / 2, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV / 2);
- _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- if (plane_idx == 1)
- break;
- }
- /*else if (plane_idx == 2)*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
- _align_height = SIZE_ALIGN(height / 2, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV / 2);
- _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- }
- break;
- case HAL_TBM_FORMAT_YUV422:
- case HAL_TBM_FORMAT_YVU422:
- bpp = 16;
- /*if(plane_idx == 0)*/
- {
- _offset = 0;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
- _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- if (plane_idx == 0)
- break;
- }
- /*else if(plane_idx == 1)*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
- _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
- _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- if (plane_idx == 1)
- break;
- }
- /*else if (plane_idx == 2)*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
- _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
- _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- }
- break;
- case HAL_TBM_FORMAT_YUV444:
- case HAL_TBM_FORMAT_YVU444:
- bpp = 24;
- /*if(plane_idx == 0)*/
- {
- _offset = 0;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
- _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- if (plane_idx == 0)
- break;
- }
- /*else if(plane_idx == 1)*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
- _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- if (plane_idx == 1)
- break;
- }
- /*else if (plane_idx == 2)*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
- _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- }
- break;
- default:
- bpp = 0;
- break;
- }
-
- *size = _size;
- *offset = _offset;
- *pitch = _pitch;
- *bo_idx = _bo_idx;
-
- return HAL_TBM_ERROR_NONE;
-}
-
-static void
-_tbm_vc4_surface_data_set_bo_data(tbm_vc4_surface *surface_data, tbm_vc4_bo *bo_data)
-{
- surface_data->bo_data = bo_data;
- surface_data->num_bos++;
-}
-
-static void
-_tbm_vc4_surface_data_destroy(tbm_vc4_surface *surface_data)
-{
- surface_data->refcnt--;
- if (surface_data->refcnt > 0) {
- TBM_BACKEND_INFO("surface_data->refcnt:%d", surface_data->refcnt);
- return;
- }
-
- TBM_BACKEND_INFO("surface_data destroy");
-
- if (surface_data->bo_data) {
- tbm_vc4_bo_free((hal_tbm_bo *)surface_data->bo_data);
- surface_data->bo_data = NULL;
- }
-
- LIST_DEL(&surface_data->link);
- free(surface_data);
-}
-
-static tbm_vc4_surface *
-_tbm_vc4_surface_data_create(tbm_vc4_bufmgr *bufmgr_data, uint32_t width, uint32_t height, hal_tbm_format format, hal_tbm_error *error)
-{
- tbm_vc4_surface *surface_data;
-
- surface_data = calloc(1, sizeof(struct _tbm_vc4_surface));
- if (!surface_data) {
- TBM_BACKEND_ERR("fail to allocate the surface_data");
- if (error)
- *error = HAL_TBM_ERROR_OUT_OF_MEMORY;
- return NULL;
- }
- surface_data->width = width;
- surface_data->height = height;
- surface_data->format = format;
- surface_data->bufmgr_data = bufmgr_data;
- surface_data->refcnt = 1;
-
- LIST_ADDTAIL(&surface_data->link, &bufmgr_data->surface_data_list);
-
- return surface_data;
-}
-
-static hal_tbm_surface *
-tbm_vc4_bufmgr_alloc_surface(hal_tbm_bufmgr *bufmgr, uint32_t width, uint32_t height, hal_tbm_format format,
- hal_tbm_bo_memory_type mem_types, uint64_t *modifiers, uint32_t num_modifiers, hal_tbm_error *error)
-{
- tbm_vc4_bufmgr *bufmgr_data = (tbm_vc4_bufmgr *)bufmgr;
- tbm_vc4_surface *surface_data;
- tbm_vc4_bo *bo_data;
- uint32_t size = 0, offset = 0, pitch = 0;
- int bo_idx = 0, bo_size = 0;
- int i, num_planes;
-
- if (bufmgr_data == NULL) {
- TBM_BACKEND_ERR("bufmgr is null");
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return NULL;
- }
-
- surface_data = _tbm_vc4_surface_data_create(bufmgr_data, width, height, format, error);
- if (!surface_data) {
- TBM_BACKEND_ERR("fail to create surface_data");
- return NULL;
- }
-
- num_planes = _tbm_vc4_bufmgr_get_num_planes(format);
- if (num_planes == 0) {
- TBM_BACKEND_ERR("fail to get num_planes");
- _tbm_vc4_surface_data_destroy(surface_data);
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return NULL;
- }
-
- for (i = 0; i < num_planes; i++) {
- *error = tbm_vc4_bufmgr_get_plane_data(bufmgr, format, i, (int)width, (int)height, &size, &offset, &pitch, &bo_idx);
- if (*error != HAL_TBM_ERROR_NONE) {
- _tbm_vc4_surface_data_destroy(surface_data);
- TBM_BACKEND_ERR("fail to get plane_data");
- return NULL;
- }
- bo_size += size;
- }
-
- bo_data = tbm_vc4_bufmgr_alloc_bo(bufmgr, bo_size, mem_types, error);
- if (!bo_data) {
- TBM_BACKEND_ERR("fail to allocate the bo_data");
- _tbm_vc4_surface_data_destroy(surface_data);
- if (error)
- *error = HAL_TBM_ERROR_OUT_OF_MEMORY;
- return NULL;
- }
- _tbm_vc4_surface_data_set_bo_data(surface_data, bo_data);
-
- return (hal_tbm_surface *)surface_data;
-}
-
-static tbm_vc4_surface *
-_tbm_vc4_surface_find_same_surface(tbm_vc4_bufmgr *bufmgr_data, tbm_vc4_bo *bo_data)
-{
- tbm_vc4_surface *s = NULL;
-
- LIST_FOR_EACH_ENTRY(s, &bufmgr_data->surface_data_list, link) {
- if (s->bo_data == bo_data) {
- return s;
- }
- }
-
- return NULL;
-}
-
-static hal_tbm_surface *
-tbm_vc4_bufmgr_import_surface(hal_tbm_bufmgr *bufmgr, uint32_t width, uint32_t height, hal_tbm_format format,
- hal_tbm_surface_buffer_data *buffer_data, hal_tbm_error *error)
-{
- tbm_vc4_bufmgr *bufmgr_data = (tbm_vc4_bufmgr *)bufmgr;
- tbm_vc4_surface *surface_data, *surface_data1;
- tbm_vc4_bo *bo_data;
-
- if (bufmgr_data == NULL) {
- TBM_BACKEND_ERR("bufmgr is null");
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return NULL;
- }
-
- if (buffer_data == NULL) {
- TBM_BACKEND_ERR("buffer_data is null");
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return NULL;
- }
-
- // vc4 backend allows to import only one dmabuf-fd.
- if (buffer_data->num_fds != 1) {
- TBM_BACKEND_ERR("buffer_data->num_fds MUST BE 1.");
- TBM_BACKEND_ERR("vc4 backend can only import surface with just one dmabuf-fd.");
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return NULL;
- }
-
- if (buffer_data->fds == NULL) {
- TBM_BACKEND_ERR("buffer_data->fds is null");
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return NULL;
- }
-
- surface_data = _tbm_vc4_surface_data_create(bufmgr_data, width, height, format, error);
- if (!surface_data) {
- TBM_BACKEND_ERR("fail to create surface_data");
- return NULL;
- }
-
- bo_data = tbm_vc4_bufmgr_import_fd(bufmgr, buffer_data->fds[0], error);
- if (!bo_data) {
- TBM_BACKEND_ERR("fail to import the bo_data");
- _tbm_vc4_surface_data_destroy(surface_data);
- return NULL;
- }
-
- // reuse the surface_data when there is a surface_data which already has the same bo_data.
- surface_data1 = _tbm_vc4_surface_find_same_surface(bufmgr_data, bo_data);
- if (surface_data1) {
- _tbm_vc4_surface_data_destroy(surface_data);
- surface_data = surface_data1;
- surface_data->refcnt++;
- } else {
- _tbm_vc4_surface_data_set_bo_data(surface_data, bo_data);
- }
-
- return (hal_tbm_surface *)surface_data;
-}
-
-static hal_tbm_bo *
-tbm_vc4_bufmgr_alloc_bo(hal_tbm_bufmgr *bufmgr, unsigned int size,
- hal_tbm_bo_memory_type flags, hal_tbm_error *error)
-{
- tbm_vc4_bufmgr *bufmgr_data = (tbm_vc4_bufmgr *)bufmgr;
- tbm_vc4_bo *bo_data;
-
- if (bufmgr_data == NULL) {
- TBM_BACKEND_ERR("bufmgr is null\n");
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return NULL;
- }
-
- bo_data = calloc(1, sizeof(struct _tbm_vc4_bo));
- if (!bo_data) {
- TBM_BACKEND_ERR("fail to allocate the bo_data private\n");
- if (error)
- *error = HAL_TBM_ERROR_OUT_OF_MEMORY;
- return NULL;
- }
- bo_data->bufmgr_data = bufmgr_data;
-
- struct drm_vc4_create_bo arg = {0, };
-
- arg.size = (__u32)size;
- arg.flags = flags;/*currently no values for the flags,but it may be used in future extension*/
- if (drmIoctl(bufmgr_data->fd, DRM_IOCTL_VC4_CREATE_BO, &arg)) {
- TBM_BACKEND_ERR("Cannot create bo_data(flag:%x, size:%d)\n", arg.flags,
- (unsigned int)arg.size);
- free(bo_data);
- if (error)
- *error = HAL_TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
-
- bo_data->fd = bufmgr_data->fd;
- bo_data->gem = (unsigned int)arg.handle;
- bo_data->size = size;
- bo_data->flags_tbm = flags;
- bo_data->name = _get_name(bo_data->fd, bo_data->gem);
-
- if (!_bo_init_cache_state(bufmgr_data, bo_data, 0)) {
- TBM_BACKEND_ERR("fail init cache state(%d)\n", bo_data->name);
- free(bo_data);
- if (error)
- *error = HAL_TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
-
- pthread_mutex_init(&bo_data->mutex, NULL);
-
- if (bufmgr_data->use_dma_fence && !bo_data->dmabuf) {
- struct drm_prime_handle arg = {0, };
-
- arg.handle = bo_data->gem;
- if (drmIoctl(bo_data->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
- TBM_BACKEND_ERR("Cannot dmabuf=%d\n", bo_data->gem);
- free(bo_data);
- if (error)
- *error = HAL_TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
- bo_data->dmabuf = arg.fd;
- }
-
- /* add bo_data to hash */
- if (drmHashInsert(bufmgr_data->hashBos, bo_data->name, (void *)bo_data) < 0)
- TBM_BACKEND_ERR("Cannot insert bo_data to Hash(%d)\n", bo_data->name);
-
- TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), flags:%d, size:%d\n",
- bo_data,
- bo_data->gem, bo_data->name,
- bo_data->flags_tbm,
- bo_data->size);
-
- if (error)
- *error = HAL_TBM_ERROR_NONE;
-
- return (hal_tbm_bo *)bo_data;
-}
-
-static hal_tbm_bo *
-tbm_vc4_bufmgr_import_fd(hal_tbm_bufmgr *bufmgr, hal_tbm_fd key, hal_tbm_error *error)
-{
- tbm_vc4_bufmgr *bufmgr_data = (tbm_vc4_bufmgr *)bufmgr;
- tbm_vc4_bo *bo_data;
- unsigned int gem = 0;
- unsigned int name;
- int ret;
- char buf[STRERR_BUFSIZE];
-
- if (bufmgr_data == NULL) {
- TBM_BACKEND_ERR("bufmgr is null\n");
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return NULL;
- }
-
- /*getting handle from fd*/
- struct drm_prime_handle arg = {0, };
-
- arg.fd = key;
- arg.flags = 0;
- if (drmIoctl(bufmgr_data->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &arg)) {
- TBM_BACKEND_ERR("Cannot get gem handle from fd:%d (%s)\n",
- arg.fd, strerror_r(errno, buf, STRERR_BUFSIZE));
- if (error)
- *error = HAL_TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
- gem = arg.handle;
-
- name = _get_name(bufmgr_data->fd, gem);
- if (!name) {
- TBM_BACKEND_ERR("Cannot get name from gem:%d, fd:%d (%s)\n",
- gem, key, strerror_r(errno, buf, STRERR_BUFSIZE));
- if (error)
- *error = HAL_TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
-
- ret = drmHashLookup(bufmgr_data->hashBos, name, (void **)&bo_data);
- if (ret == 0) {
- if (gem == bo_data->gem) {
- if (error)
- *error = HAL_TBM_ERROR_NONE;
- return bo_data;
- }
- }
-
- /* Determine size of bo_data. The fd-to-handle ioctl really should
- * return the size, but it doesn't. If we have kernel 3.12 or
- * later, we can lseek on the prime fd to get the size. Older
- * kernels will just fail, in which case we fall back to the
- * provided (estimated or guess size).
- */
- unsigned int real_size = -1;
- struct drm_gem_open open_arg = {0, };
-
- real_size = lseek(key, 0, SEEK_END);
-
- open_arg.name = name;
- if (drmIoctl(bufmgr_data->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
- TBM_BACKEND_ERR("Cannot get gem info from gem:%d, fd:%d (%s)\n",
- gem, key, strerror_r(errno, buf, STRERR_BUFSIZE));
- if (error)
- *error = HAL_TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
-
- /* Free gem handle to avoid a memory leak*/
- struct drm_gem_close close_arg = {0, };
- memset(&close_arg, 0, sizeof(close_arg));
- close_arg.handle = open_arg.handle;
- if (drmIoctl(bufmgr_data->fd, DRM_IOCTL_GEM_CLOSE, &close_arg)) {
- TBM_BACKEND_ERR("Cannot close gem_handle.\n",
- strerror_r(errno, buf, STRERR_BUFSIZE));
- if (error)
- *error = HAL_TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
-
- if (real_size == -1)
- real_size = open_arg.size;
-
- bo_data = calloc(1, sizeof(struct _tbm_vc4_bo));
- if (!bo_data) {
- TBM_BACKEND_ERR("bo_data:%p fail to allocate the bo_data\n", bo_data);
- if (error)
- *error = HAL_TBM_ERROR_OUT_OF_MEMORY;
- return NULL;
- }
- bo_data->bufmgr_data = bufmgr_data;
-
- bo_data->fd = bufmgr_data->fd;
- bo_data->gem = gem;
- bo_data->size = real_size;
- bo_data->name = name;
- bo_data->flags_tbm = 0;
-
-#ifdef VC4_TILED_FORMAT
- struct drm_vc4_get_tiling get_tiling = {
- .handle = bo_data->gem,
- };
- drmIoctl(bo_data->fd, DRM_IOCTL_VC4_GET_TILING, &get_tiling);
-
- if (get_tiling.modifier == DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED)
- bo_data->flags_tbm |= HAL_TBM_BO_TILED;
-#endif
-
- if (!_bo_init_cache_state(bufmgr_data, bo_data, 1)) {
- TBM_BACKEND_ERR("fail init cache state(%d)\n", bo_data->name);
- free(bo_data);
- if (error)
- *error = HAL_TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
-
- /* add bo_data to hash */
- if (drmHashInsert(bufmgr_data->hashBos, bo_data->name, (void *)bo_data) < 0)
- TBM_BACKEND_ERR("bo_data:%p Cannot insert bo_data to Hash(%d) from gem:%d, fd:%d\n",
- bo_data, bo_data->name, gem, key);
-
- TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d, size:%d\n",
- bo_data,
- bo_data->gem, bo_data->name,
- bo_data->dmabuf,
- key,
- bo_data->flags_tbm,
- bo_data->size);
-
- if (error)
- *error = HAL_TBM_ERROR_NONE;
-
- return (hal_tbm_bo *)bo_data;
-}
-
-static hal_tbm_bo *
-tbm_vc4_bufmgr_import_key(hal_tbm_bufmgr *bufmgr, hal_tbm_key key, hal_tbm_error *error)
-{
- tbm_vc4_bufmgr *bufmgr_data = (tbm_vc4_bufmgr *)bufmgr;
- tbm_vc4_bo *bo_data;
- int ret;
-
- if (bufmgr_data == NULL) {
- TBM_BACKEND_ERR("bufmgr is null\n");
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return NULL;
- }
-
- ret = drmHashLookup(bufmgr_data->hashBos, key, (void **)&bo_data);
- if (ret == 0) {
- if (error)
- *error = HAL_TBM_ERROR_NONE;
- return (hal_tbm_bo *)bo_data;
- }
-
- struct drm_gem_open arg = {0, };
-
- arg.name = key;
- if (drmIoctl(bufmgr_data->fd, DRM_IOCTL_GEM_OPEN, &arg)) {
- TBM_BACKEND_ERR("Cannot open gem name=%d\n", key);
- if (error)
- *error = HAL_TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
-
- bo_data = calloc(1, sizeof(struct _tbm_vc4_bo));
- if (!bo_data) {
- TBM_BACKEND_ERR("fail to allocate the bo_data private\n");
- if (error)
- *error = HAL_TBM_ERROR_OUT_OF_MEMORY;
- return NULL;
- }
- bo_data->bufmgr_data = bufmgr_data;
-
- bo_data->fd = bufmgr_data->fd;
- bo_data->gem = arg.handle;
- bo_data->size = arg.size;
- bo_data->name = key;
- bo_data->flags_tbm = 0;
-
-#ifdef VC4_TILED_FORMAT
- struct drm_vc4_get_tiling get_tiling = {
- .handle = bo_data->gem,
- };
- drmIoctl(bo_data->fd, DRM_IOCTL_VC4_GET_TILING, &get_tiling);
-
- if (get_tiling.modifier == DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED)
- bo_data->flags_tbm |= HAL_TBM_BO_TILED;
-#endif
-
- if (!_bo_init_cache_state(bufmgr_data, bo_data, 1)) {
- TBM_BACKEND_ERR("fail init cache state(%d)\n", bo_data->name);
- free(bo_data);
- if (error)
- *error = HAL_TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
-
- if (!bo_data->dmabuf) {
- struct drm_prime_handle arg = {0, };
-
- arg.handle = bo_data->gem;
- if (drmIoctl(bo_data->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
- TBM_BACKEND_ERR("fail to DRM_IOCTL_PRIME_HANDLE_TO_FD gem=%d\n", bo_data->gem);
- if (error)
- *error = HAL_TBM_ERROR_INVALID_OPERATION;
- free(bo_data);
- return NULL;
- }
- bo_data->dmabuf = arg.fd;
- }
-
- /* add bo_data to hash */
- if (drmHashInsert(bufmgr_data->hashBos, bo_data->name, (void *)bo_data) < 0)
- TBM_BACKEND_ERR("Cannot insert bo_data to Hash(%d)\n", bo_data->name);
-
- TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, flags:%d, size:%d\n",
- bo_data,
- bo_data->gem, bo_data->name,
- bo_data->dmabuf,
- bo_data->flags_tbm,
- bo_data->size);
-
- if (error)
- *error = HAL_TBM_ERROR_NONE;
-
- return (hal_tbm_bo *)bo_data;
-}
-
-static void
-tbm_vc4_surface_free(hal_tbm_surface *surface)
-{
- tbm_vc4_surface *surface_data = (tbm_vc4_surface *)surface;
-
- if (!surface_data)
- return;
-
- _tbm_vc4_surface_data_destroy(surface_data);
-}
-
-static hal_tbm_bo **
-tbm_vc4_surface_get_bos(hal_tbm_surface *surface, int *num_bos, hal_tbm_error *error)
-{
- tbm_vc4_surface *surface_data = (tbm_vc4_surface *)surface;
- hal_tbm_surface **bos;
-
- if (surface_data == NULL) {
- TBM_BACKEND_ERR("surface_data is null\n");
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return NULL;
- }
-
- if (num_bos == NULL) {
- TBM_BACKEND_ERR("num_bos is null\n");
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return NULL;
- }
-
- *num_bos = surface_data->num_bos;
-
- /* will be freed in frontend */
- bos = calloc(*num_bos, sizeof(tbm_vc4_bo *));
- if (!bos) {
- TBM_BACKEND_ERR("failed: alloc bos");
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return NULL;
- }
- bos[0] = surface_data->bo_data;
-
- if (error)
- *error = HAL_TBM_ERROR_NONE;
-
- return bos;
-}
-
-static hal_tbm_error
-tbm_vc4_surface_get_plane_data(hal_tbm_surface *surface, int plane_idx, uint32_t *size, uint32_t *offset, uint32_t *pitch, int *bo_idx)
-{
- tbm_vc4_surface *surface_data = (tbm_vc4_surface *)surface;
- hal_tbm_error error;
- tbm_vc4_bufmgr *bufmgr;
- uint32_t width, height;
- hal_tbm_format format;
-
- if (surface_data == NULL) {
- TBM_BACKEND_ERR("surface_data is null");
- return HAL_TBM_ERROR_INVALID_PARAMETER;
- }
-
- bufmgr = surface_data->bufmgr_data;
- width = surface_data->width;
- height = surface_data->height;
- format = surface_data->format;
-
- error = tbm_vc4_bufmgr_get_plane_data((tbm_vc4_bufmgr *)bufmgr, format, plane_idx,
- (int)width, (int)height, size, offset, pitch, bo_idx);
- if (error != HAL_TBM_ERROR_NONE) {
- TBM_BACKEND_ERR("fail to get plane_data");
- return HAL_TBM_ERROR_INVALID_PARAMETER;
- }
-
- return HAL_TBM_ERROR_NONE;
-}
-
-static hal_tbm_surface_buffer_data *
-tbm_vc4_surface_export(hal_tbm_surface *surface, hal_tbm_error *error)
-{
- tbm_vc4_surface *surface_data = (tbm_vc4_surface *)surface;
- hal_tbm_surface_buffer_data *buffer_data;
-
- if (surface == NULL) {
- TBM_BACKEND_ERR("surface is null");
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return NULL;
- }
-
- buffer_data = calloc(1, sizeof(struct _hal_tbm_surface_buffer_data));
- if (!buffer_data) {
- TBM_BACKEND_ERR("fail to allocate a buffer_data");
- if (error)
- *error = HAL_TBM_ERROR_OUT_OF_MEMORY;
- return NULL;
- }
- buffer_data->num_fds = surface_data->num_bos;
-
- /* will be freed in frontend */
- buffer_data->fds = calloc(buffer_data->num_fds, sizeof(int));
- if (!buffer_data->fds) {
- TBM_BACKEND_ERR("failed: alloc bos");
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- free(buffer_data);
- return NULL;
- }
-
- buffer_data->fds[0] = tbm_vc4_bo_export_fd(surface_data->bo_data, error);
- if (buffer_data->fds[0] < 0) {
- TBM_BACKEND_ERR("fail to export bo_data");
- free(buffer_data->fds);
- free(buffer_data);
- return NULL;
- }
-
- if (error)
- *error = HAL_TBM_ERROR_NONE;
-
- return buffer_data;
-}
-
-static void
-tbm_vc4_bo_free(hal_tbm_bo *bo)
-{
- tbm_vc4_bo *bo_data = (tbm_vc4_bo *)bo;
- tbm_vc4_bo *temp;
- tbm_vc4_bufmgr *bufmgr_data;
- char buf[STRERR_BUFSIZE];
- int ret;
-
- if (!bo_data)
- return;
-
- bufmgr_data = bo_data->bufmgr_data;
- if (!bufmgr_data)
- return;
-
- TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, size:%d\n",
- bo_data,
- bo_data->gem, bo_data->name,
- bo_data->dmabuf,
- bo_data->size);
-
- if (bo_data->pBase) {
- if (munmap(bo_data->pBase, bo_data->size) == -1) {
- TBM_BACKEND_ERR("bo_data:%p fail to munmap(%s)\n",
- bo_data, strerror_r(errno, buf, STRERR_BUFSIZE));
- }
- }
-
- /* close dmabuf */
- if (bo_data->dmabuf) {
- close(bo_data->dmabuf);
- bo_data->dmabuf = 0;
- }
-
- /* delete bo_data from hash */
- ret = drmHashLookup(bufmgr_data->hashBos, bo_data->name, (void **)&temp);
- if (ret == 0)
- drmHashDelete(bufmgr_data->hashBos, bo_data->name);
- else
- TBM_BACKEND_ERR("Cannot find bo_data to Hash(%d), ret=%d\n", bo_data->name, ret);
-
- if (temp != bo_data)
- TBM_BACKEND_ERR("hashBos probably has several BOs with same name!!!\n");
-
- _bo_destroy_cache_state(bufmgr_data, bo_data);
-
- /* Free gem handle */
- struct drm_gem_close arg = {0, };
-
- memset(&arg, 0, sizeof(arg));
- arg.handle = bo_data->gem;
- if (drmIoctl(bo_data->fd, DRM_IOCTL_GEM_CLOSE, &arg))
- TBM_BACKEND_ERR("bo_data:%p fail to gem close.(%s)\n",
- bo_data, strerror_r(errno, buf, STRERR_BUFSIZE));
-
- free(bo_data);
-}
-
-static int
-tbm_vc4_bo_get_size(hal_tbm_bo *bo, hal_tbm_error *error)
-{
- tbm_vc4_bo *bo_data = (tbm_vc4_bo *)bo;
-
- if (!bo_data) {
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return 0;
- }
-
- if (error)
- *error = HAL_TBM_ERROR_NONE;
-
- return bo_data->size;
-}
-
-static hal_tbm_bo_memory_type
-tbm_vc4_bo_get_memory_type(hal_tbm_bo *bo, hal_tbm_error *error)
-{
- tbm_vc4_bo *bo_data = (tbm_vc4_bo *)bo;
-
- if (!bo_data) {
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return HAL_TBM_BO_DEFAULT;
- }
-
- if (error)
- *error = HAL_TBM_ERROR_NONE;
-
- return bo_data->flags_tbm;
-}
-
-static hal_tbm_bo_handle
-tbm_vc4_bo_get_handle(hal_tbm_bo *bo, hal_tbm_bo_device_type device, hal_tbm_error *error)
-{
- tbm_vc4_bo *bo_data = (tbm_vc4_bo *)bo;
- hal_tbm_bo_handle bo_handle;
-
- if (!bo_data) {
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return (hal_tbm_bo_handle) NULL;
- }
-
- if (!bo_data->gem) {
- TBM_BACKEND_ERR("Cannot map gem=%d\n", bo_data->gem);
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return (hal_tbm_bo_handle) NULL;
- }
-
- TBM_BACKEND_DBG("bo_data:%p, gem:%d(%d), fd:%d, flags:%d, size:%d, %s\n",
- bo_data,
- bo_data->gem, bo_data->name,
- bo_data->dmabuf,
- bo_data->flags_tbm,
- bo_data->size,
- STR_DEVICE[device]);
-
- /*Get mapped bo_handle*/
- bo_handle = _vc4_bo_handle(bo_data, device);
- if (bo_handle.ptr == NULL) {
- TBM_BACKEND_ERR("Cannot get handle: gem:%d, device:%d\n",
- bo_data->gem, device);
- if (error)
- *error = HAL_TBM_ERROR_INVALID_OPERATION;
- return (hal_tbm_bo_handle) NULL;
- }
-
- if (error)
- *error = HAL_TBM_ERROR_NONE;
-
- return bo_handle;
-}
-
-static hal_tbm_bo_handle
-tbm_vc4_bo_map(hal_tbm_bo *bo, hal_tbm_bo_device_type device,
- hal_tbm_bo_access_option opt, hal_tbm_error *error)
-{
- tbm_vc4_bo *bo_data = (tbm_vc4_bo *)bo;
- hal_tbm_bo_handle bo_handle;
- tbm_vc4_bufmgr *bufmgr_data;
-
- if (!bo_data) {
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return (hal_tbm_bo_handle) NULL;
- }
-
- bufmgr_data = bo_data->bufmgr_data;
- if (!bufmgr_data) {
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return (hal_tbm_bo_handle) NULL;
- }
-
- if (!bo_data->gem) {
- TBM_BACKEND_ERR("Cannot map gem=%d\n", bo_data->gem);
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return (hal_tbm_bo_handle) NULL;
- }
-
- TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, %s, %s\n",
- bo_data,
- bo_data->gem, bo_data->name,
- bo_data->dmabuf,
- STR_DEVICE[device],
- STR_OPT[opt]);
-
- /*Get mapped bo_handle*/
- bo_handle = _vc4_bo_handle(bo_data, device);
- if (bo_handle.ptr == NULL) {
- TBM_BACKEND_ERR("Cannot get handle: gem:%d, device:%d, opt:%d\n",
- bo_data->gem, device, opt);
- if (error)
- *error = HAL_TBM_ERROR_INVALID_OPERATION;
- return (hal_tbm_bo_handle) NULL;
- }
-
- if (bo_data->map_cnt == 0)
- _bo_set_cache_state(bufmgr_data, bo_data, device, opt);
-
- bo_data->last_map_device = device;
-
- bo_data->map_cnt++;
-
- if (error)
- *error = HAL_TBM_ERROR_NONE;
-
- return bo_handle;
-}
-
-static hal_tbm_error
-tbm_vc4_bo_unmap(hal_tbm_bo *bo)
-{
- tbm_vc4_bo *bo_data = (tbm_vc4_bo *)bo;
- tbm_vc4_bufmgr *bufmgr_data;
-
- if (!bo_data)
- return HAL_TBM_ERROR_INVALID_PARAMETER;
-
- bufmgr_data = bo_data->bufmgr_data;
- if (!bufmgr_data)
- return HAL_TBM_ERROR_INVALID_PARAMETER;
-
- if (!bo_data->gem)
- return HAL_TBM_ERROR_INVALID_PARAMETER;
-
- bo_data->map_cnt--;
-
- if (bo_data->map_cnt == 0)
- _bo_save_cache_state(bufmgr_data, bo_data);
-
-#ifdef ENABLE_CACHECRTL
- if (bo_data->last_map_device == HAL_TBM_DEVICE_CPU)
- _vc4_cache_flush(bufmgr_data, bo_data, TBM_VC4_CACHE_FLUSH_ALL);
-#endif
-
- bo_data->last_map_device = -1;
-
- TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d\n",
- bo_data,
- bo_data->gem, bo_data->name,
- bo_data->dmabuf);
-
- return HAL_TBM_ERROR_NONE;
-}
-
-static hal_tbm_error
-tbm_vc4_bo_lock(hal_tbm_bo *bo, hal_tbm_bo_device_type device,
- hal_tbm_bo_access_option opt)
-{
-#ifndef ALWAYS_BACKEND_CTRL
- tbm_vc4_bo *bo_data = (tbm_vc4_bo *)bo;
- tbm_vc4_bufmgr *bufmgr_data;
- struct dma_buf_fence fence;
- struct flock filelock;
- int ret = 0;
- char buf[STRERR_BUFSIZE];
-
- if (!bo_data)
- return HAL_TBM_ERROR_INVALID_PARAMETER;
-
- bufmgr_data = bo_data->bufmgr_data;
- if (!bufmgr_data)
- return HAL_TBM_ERROR_INVALID_PARAMETER;
-
- if (device != HAL_TBM_DEVICE_3D && device != HAL_TBM_DEVICE_CPU) {
- TBM_BACKEND_DBG("Not support device type,\n");
- return HAL_TBM_ERROR_INVALID_OPERATION;
- }
-
- memset(&fence, 0, sizeof(struct dma_buf_fence));
-
- /* Check if the given type is valid or not. */
- if (opt & TBM_OPTION_WRITE) {
- if (device == HAL_TBM_DEVICE_3D)
- fence.type = DMA_BUF_ACCESS_WRITE | DMA_BUF_ACCESS_DMA;
- } else if (opt & TBM_OPTION_READ) {
- if (device == HAL_TBM_DEVICE_3D)
- fence.type = DMA_BUF_ACCESS_READ | DMA_BUF_ACCESS_DMA;
- } else {
- TBM_BACKEND_ERR("Invalid argument\n");
- return HAL_TBM_ERROR_INVALID_PARAMETER;
- }
-
- /* Check if the tbm manager supports dma fence or not. */
- if (!bufmgr_data->use_dma_fence) {
- TBM_BACKEND_ERR("Not support DMA FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
- return HAL_TBM_ERROR_INVALID_OPERATION;
-
- }
-
- if (device == HAL_TBM_DEVICE_3D) {
- ret = ioctl(bo_data->dmabuf, DMABUF_IOCTL_GET_FENCE, &fence);
- if (ret < 0) {
- TBM_BACKEND_ERR("Cannot set GET FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
- return HAL_TBM_ERROR_INVALID_OPERATION;
- }
- } else {
- if (opt & TBM_OPTION_WRITE)
- filelock.l_type = F_WRLCK;
- else
- filelock.l_type = F_RDLCK;
-
- filelock.l_whence = SEEK_CUR;
- filelock.l_start = 0;
- filelock.l_len = 0;
-
- if (-1 == fcntl(bo_data->dmabuf, F_SETLKW, &filelock))
- return HAL_TBM_ERROR_INVALID_OPERATION;
- }
-
- pthread_mutex_lock(&bo_data->mutex);
-
- if (device == HAL_TBM_DEVICE_3D) {
- int i;
-
- for (i = 0; i < DMA_FENCE_LIST_MAX; i++) {
- if (bo_data->dma_fence[i].ctx == 0) {
- bo_data->dma_fence[i].type = fence.type;
- bo_data->dma_fence[i].ctx = fence.ctx;
- break;
- }
- }
-
- if (i == DMA_FENCE_LIST_MAX) {
- /*TODO: if dma_fence list is full, it needs realloc. I will fix this. by minseok3.kim*/
- TBM_BACKEND_ERR("fence list is full\n");
- }
- }
-
- pthread_mutex_unlock(&bo_data->mutex);
-
- TBM_BACKEND_DBG("DMABUF_IOCTL_GET_FENCE! bo_data:%p, gem:%d(%d), fd:%ds\n",
- bo_data,
- bo_data->gem, bo_data->name,
- bo_data->dmabuf);
-#endif /* ALWAYS_BACKEND_CTRL */
-
- return HAL_TBM_ERROR_NONE;
-}
-
-static hal_tbm_error
-tbm_vc4_bo_unlock(hal_tbm_bo *bo)
-{
-#ifndef ALWAYS_BACKEND_CTRL
- tbm_vc4_bo *bo_data = (tbm_vc4_bo *)bo;
- struct dma_buf_fence fence;
- struct flock filelock;
- unsigned int dma_type = 0;
- int ret = 0;
- char buf[STRERR_BUFSIZE];
-
- bufmgr_data = bo_data->bufmgr_data;
- if (!bufmgr_data)
- return HAL_TBM_ERROR_INVALID_PARAMETER;
-
- if (bo_data->dma_fence[0].type & DMA_BUF_ACCESS_DMA)
- dma_type = 1;
-
- if (!bo_data->dma_fence[0].ctx && dma_type) {
- TBM_BACKEND_DBG("FENCE not support or ignored,\n");
- return HAL_TBM_ERROR_INVALID_OPERATION;
- }
-
- if (!bo_data->dma_fence[0].ctx && dma_type) {
- TBM_BACKEND_DBG("device type is not 3D/CPU,\n");
- return HAL_TBM_ERROR_INVALID_OPERATION;
- }
-
- pthread_mutex_lock(&bo_data->mutex);
-
- if (dma_type) {
- fence.type = bo_data->dma_fence[0].type;
- fence.ctx = bo_data->dma_fence[0].ctx;
- int i;
-
- for (i = 1; i < DMA_FENCE_LIST_MAX; i++) {
- bo_data->dma_fence[i - 1].type = bo_data->dma_fence[i].type;
- bo_data->dma_fence[i - 1].ctx = bo_data->dma_fence[i].ctx;
- }
- bo_data->dma_fence[DMA_FENCE_LIST_MAX - 1].type = 0;
- bo_data->dma_fence[DMA_FENCE_LIST_MAX - 1].ctx = 0;
- }
-
- pthread_mutex_unlock(&bo_data->mutex);
-
- if (dma_type) {
- ret = ioctl(bo_data->dmabuf, DMABUF_IOCTL_PUT_FENCE, &fence);
- if (ret < 0) {
- TBM_BACKEND_ERR("Can not set PUT FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
- return HAL_TBM_ERROR_INVALID_OPERATION;
- }
- } else {
- filelock.l_type = F_UNLCK;
- filelock.l_whence = SEEK_CUR;
- filelock.l_start = 0;
- filelock.l_len = 0;
-
- if (-1 == fcntl(bo_data->dmabuf, F_SETLKW, &filelock))
- return HAL_TBM_ERROR_INVALID_OPERATION;
- }
-
- TBM_BACKEND_DBG("DMABUF_IOCTL_PUT_FENCE! bo_data:%p, gem:%d(%d), fd:%ds\n",
- bo_data,
- bo_data->gem, bo_data->name,
- bo_data->dmabuf);
-#endif /* ALWAYS_BACKEND_CTRL */
-
- return HAL_TBM_ERROR_NONE;
-}
-
-static hal_tbm_fd
-tbm_vc4_bo_export_fd(hal_tbm_bo *bo, hal_tbm_error *error)
-{
- tbm_vc4_bo *bo_data = (tbm_vc4_bo *)bo;
- int ret;
- char buf[STRERR_BUFSIZE];
-
- if (!bo_data) {
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return -1;
- }
-
- struct drm_prime_handle arg = {0, };
-
- arg.handle = bo_data->gem;
- ret = drmIoctl(bo_data->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg);
- if (ret) {
- TBM_BACKEND_ERR("bo_data:%p Cannot dmabuf=%d (%s)\n",
- bo_data, bo_data->gem, strerror_r(errno, buf, STRERR_BUFSIZE));
- if (error)
- *error = HAL_TBM_ERROR_INVALID_OPERATION;
- return (hal_tbm_fd) ret;
- }
-
- TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d, size:%d\n",
- bo_data,
- bo_data->gem, bo_data->name,
- bo_data->dmabuf,
- arg.fd,
- bo_data->flags_tbm,
- bo_data->size);
-
- if (error)
- *error = HAL_TBM_ERROR_NONE;
-
- return (hal_tbm_fd)arg.fd;
-}
-
-static hal_tbm_key
-tbm_vc4_bo_export_key(hal_tbm_bo *bo, hal_tbm_error *error)
-{
- tbm_vc4_bo *bo_data = (tbm_vc4_bo *)bo;
-
- if (!bo_data) {
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return 0;
- }
-
- if (!bo_data->name) {
- bo_data->name = _get_name(bo_data->fd, bo_data->gem);
- if (!bo_data->name) {
- TBM_BACKEND_ERR("error Cannot get name\n");
- if (error)
- *error = HAL_TBM_ERROR_INVALID_PARAMETER;
- return 0;
- }
- }
-
- TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, flags:%d, size:%d\n",
- bo_data,
- bo_data->gem, bo_data->name,
- bo_data->dmabuf,
- bo_data->flags_tbm,
- bo_data->size);
-
- if (error)
- *error = HAL_TBM_ERROR_NONE;
-
- return (hal_tbm_key)bo_data->name;
-}
-
-static hal_tbm_error
-_tbm_vc4_authenticated_drm_fd_handler(hal_tbm_fd auth_fd, void *user_data)
-{
- tbm_vc4_bufmgr *bufmgr_data = (tbm_vc4_bufmgr *) user_data;
-
- TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, HAL_TBM_ERROR_INVALID_PARAMETER);
-
- bufmgr_data->fd = auth_fd;
- TBM_BACKEND_INFO("Get the authenticated drm_fd(%d)!\n", bufmgr_data->fd);
-
- return HAL_TBM_ERROR_NONE;
-}
-
-static int
-hal_backend_tbm_vc4_exit(void *data)
-{
- hal_tbm_backend_data *backend_data = (hal_tbm_backend_data *)data;
- tbm_vc4_bufmgr *bufmgr_data;
- tbm_vc4_surface *s = NULL, *ss = NULL;
- unsigned long key;
- void *value;
-
- TBM_BACKEND_RETURN_VAL_IF_FAIL(backend_data != NULL, -1);
-
- bufmgr_data = (tbm_vc4_bufmgr *)backend_data->bufmgr;
- TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, -1);
-
- if (backend_data->bo_funcs)
- free(backend_data->bo_funcs);
- if (backend_data->surface_funcs)
- free(backend_data->surface_funcs);
- if (backend_data->bufmgr_funcs)
- free(backend_data->bufmgr_funcs);
-
- if (!LIST_IS_EMPTY(&bufmgr_data->surface_data_list)) {
- LIST_FOR_EACH_ENTRY_SAFE(s, ss, &bufmgr_data->surface_data_list, link) {
- LIST_DEL(&s->link);
- tbm_vc4_surface_free(s);
- }
- }
-
- if (bufmgr_data->hashBos) {
- while (drmHashFirst(bufmgr_data->hashBos, &key, &value) > 0) {
- drmHashDelete(bufmgr_data->hashBos, key);
- free(value);
- }
- drmHashDestroy(bufmgr_data->hashBos);
- }
-
- _bufmgr_deinit_cache_state(bufmgr_data);
-
- close(bufmgr_data->fd);
-
- free(backend_data->bufmgr);
- free(backend_data);
-
- return HAL_TBM_ERROR_NONE;
-}
-
-static int
-hal_backend_tbm_vc4_init(void **data)
-{
- hal_tbm_backend_data *backend_data = NULL;
- hal_tbm_bufmgr_funcs *bufmgr_funcs = NULL;
- hal_tbm_surface_funcs *surface_funcs = NULL;
- hal_tbm_bo_funcs *bo_funcs = NULL;
- tbm_vc4_bufmgr *bufmgr_data = NULL;
- int drm_fd = -1;
- int fp;
-
- /* allocate a hal_tbm_backend_data */
- backend_data = calloc(1, sizeof(struct _hal_tbm_backend_data));
- if (!backend_data) {
- TBM_BACKEND_ERR("fail to alloc backend_data!\n");
- *data = NULL;
- return -1;
- }
- *data = backend_data;
-
- /* allocate a hal_tbm_bufmgr */
- bufmgr_data = calloc(1, sizeof(struct _tbm_vc4_bufmgr));
- if (!bufmgr_data) {
- TBM_BACKEND_ERR("fail to alloc bufmgr_data!\n");
- goto fail_alloc_bufmgr_data;
- }
- backend_data->bufmgr = (hal_tbm_bufmgr *)bufmgr_data;
-
- // open drm_fd
- drm_fd = _tbm_vc4_open_drm();
- if (drm_fd < 0) {
- TBM_BACKEND_ERR("fail to open drm!\n");
- goto fail_open_drm;
- }
-
- // set true when backend has a drm_device.
- backend_data->has_drm_device = 1;
-
- // check if drm_fd is master_drm_fd.
- if (drmIsMaster(drm_fd)) {
- // drm_fd is a master_drm_fd.
- backend_data->drm_info.drm_fd = drm_fd;
- backend_data->drm_info.is_master = 1;
-
- bufmgr_data->fd = drm_fd;
- TBM_BACKEND_INFO("Get the master drm_fd(%d)!\n", bufmgr_data->fd);
- } else {
- // drm_fd is not a master_drm_fd.
- // request authenticated fd
- close(drm_fd);
- backend_data->drm_info.drm_fd = -1;
- backend_data->drm_info.is_master = 0;
- backend_data->drm_info.auth_drm_fd_func = _tbm_vc4_authenticated_drm_fd_handler;
- backend_data->drm_info.user_data = bufmgr_data;
-
- TBM_BACKEND_INFO("A backend requests an authenticated drm_fd.\n");
- }
-
- //Check if the tbm manager supports dma fence or not.
- fp = open("/sys/module/dmabuf_sync/parameters/enabled", O_RDONLY);
- if (fp != -1) {
- char buf[1];
- int length = read(fp, buf, 1);
-
- if (length == 1 && buf[0] == '1')
- bufmgr_data->use_dma_fence = 1;
-
- close(fp);
- }
-
- if (!_bufmgr_init_cache_state(bufmgr_data)) {
- TBM_BACKEND_ERR("fail to init bufmgr cache state\n");
- goto fail_init_cache_state;
- }
-
- /*Create Hash Table*/
- bufmgr_data->hashBos = drmHashCreate();
-
- // initialize the surface_data list
- LIST_INITHEAD(&bufmgr_data->surface_data_list);
-
- /* alloc and register bufmgr_funcs */
- bufmgr_funcs = calloc(1, sizeof(struct _hal_tbm_bufmgr_funcs));
- if (!bufmgr_funcs) {
- TBM_BACKEND_ERR("fail to alloc bufmgr_funcs!\n");
- goto fail_alloc_bufmgr_funcs;
- }
- backend_data->bufmgr_funcs = bufmgr_funcs;
-
- bufmgr_funcs->bufmgr_get_capabilities = tbm_vc4_bufmgr_get_capabilities;
- bufmgr_funcs->bufmgr_get_supported_formats = tbm_vc4_bufmgr_get_supported_formats;
- bufmgr_funcs->bufmgr_get_plane_data = tbm_vc4_bufmgr_get_plane_data;
- bufmgr_funcs->bufmgr_alloc_surface = tbm_vc4_bufmgr_alloc_surface;
- bufmgr_funcs->bufmgr_import_surface = tbm_vc4_bufmgr_import_surface;
- bufmgr_funcs->bufmgr_alloc_bo = tbm_vc4_bufmgr_alloc_bo;
-#ifdef VC4_TILED_FORMAT
- bufmgr_funcs->bufmgr_alloc_bo_with_format = tbm_vc4_bufmgr_alloc_bo_with_tiled_format;
-#else
- bufmgr_funcs->bufmgr_alloc_bo_with_format = NULL;
-#endif
- bufmgr_funcs->bufmgr_import_fd = tbm_vc4_bufmgr_import_fd;
- bufmgr_funcs->bufmgr_import_key = tbm_vc4_bufmgr_import_key;
-
- /* alloc and register surface_funcs */
- surface_funcs = calloc(1, sizeof(struct _hal_tbm_surface_funcs));
- if (!surface_funcs) {
- TBM_BACKEND_ERR("fail to alloc surface_funcs!\n");
- goto fail_alloc_surface_funcs;
- }
- backend_data->surface_funcs = surface_funcs;
-
- surface_funcs->surface_free = tbm_vc4_surface_free;
- surface_funcs->surface_get_bos = tbm_vc4_surface_get_bos;
- surface_funcs->surface_get_plane_data = tbm_vc4_surface_get_plane_data;
- surface_funcs->surface_export = tbm_vc4_surface_export;
-
- /* alloc and register bo_funcs */
- bo_funcs = calloc(1, sizeof(struct _hal_tbm_bo_funcs));
- if (!bo_funcs) {
- TBM_BACKEND_ERR("fail to alloc bo_funcs!\n");
- goto fail_alloc_bo_funcs;
- }
- backend_data->bo_funcs = bo_funcs;
-
- bo_funcs->bo_free = tbm_vc4_bo_free;
- bo_funcs->bo_get_size = tbm_vc4_bo_get_size;
- bo_funcs->bo_get_memory_types = tbm_vc4_bo_get_memory_type;
- bo_funcs->bo_get_handle = tbm_vc4_bo_get_handle;
- bo_funcs->bo_map = tbm_vc4_bo_map;
- bo_funcs->bo_unmap = tbm_vc4_bo_unmap;
- bo_funcs->bo_lock = tbm_vc4_bo_lock;
- bo_funcs->bo_unlock = tbm_vc4_bo_unlock;
- bo_funcs->bo_export_fd = tbm_vc4_bo_export_fd;
- bo_funcs->bo_export_key = tbm_vc4_bo_export_key;
-
- TBM_BACKEND_DBG("drm_fd:%d\n", bufmgr_data->fd);
-
- return HAL_TBM_ERROR_NONE;
-
-fail_alloc_bo_funcs:
- free(surface_funcs);
-fail_alloc_surface_funcs:
- free(bufmgr_funcs);
-fail_alloc_bufmgr_funcs:
- _bufmgr_deinit_cache_state(bufmgr_data);
- if (bufmgr_data->hashBos)
- drmHashDestroy(bufmgr_data->hashBos);
-fail_init_cache_state:
- close(bufmgr_data->fd);
-fail_open_drm:
- free(bufmgr_data);
-fail_alloc_bufmgr_data:
- free(backend_data);
-
- *data = NULL;
-
- return -1;
-}
-
-hal_backend hal_backend_tbm_data = {
- "vc4",
- "Samsung",
- HAL_ABI_VERSION_TIZEN_6_5,
- hal_backend_tbm_vc4_init,
- hal_backend_tbm_vc4_exit
-};
+++ /dev/null
-/**************************************************************************
- *
- * libtbm
- *
- * Copyright 2012 Samsung Electronics co., Ltd. All Rights Reserved.
- *
- * Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
- * Boram Park <boram1288.park@samsung.com>, Changyeon Lee <cyeon.lee@samsung.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * **************************************************************************/
-
-#ifndef __TBM_BUFMGR_TGL_H__
-#define __TBM_BUFMGR_TGL_H__
-
-#include <linux/ioctl.h>
-
-#ifdef ENABLE_CACHECRTL
-static char tgl_devfile[] = "/dev/slp_global_lock";
-static char tgl_devfile1[] = "/dev/tgl";
-#endif
-
-#define TGL_IOCTL_BASE 0x32
-#define TGL_IO(nr) _IO(TGL_IOCTL_BASE, nr)
-#define TGL_IOR(nr, type) _IOR(TGL_IOCTL_BASE, nr, type)
-#define TGL_IOW(nr, type) _IOW(TGL_IOCTL_BASE, nr, type)
-#define TGL_IOWR(nr, type) _IOWR(TGL_IOCTL_BASE, nr, type)
-
-/**
- * struct tgl_ver_data - tgl version data structure
- * @major: major version
- * @minor: minor version
- */
-struct tgl_ver_data {
- unsigned int major;
- unsigned int minor;
-};
-
-/**
- * struct tgl_reg_data - tgl data structure
- * @key: lookup key
- * @timeout_ms: timeout value for waiting event
- */
-struct tgl_reg_data {
- unsigned int key;
- unsigned int timeout_ms;
-};
-
-enum tgl_type_data {
- TGL_TYPE_NONE = 0,
- TGL_TYPE_READ = (1 << 0),
- TGL_TYPE_WRITE = (1 << 1),
-};
-
-/**
- * struct tgl_lock_data - tgl lock data structure
- * @key: lookup key
- * @type: lock type that is in tgl_type_data
- */
-struct tgl_lock_data {
- unsigned int key;
- enum tgl_type_data type;
-};
-
-enum tgl_status_data {
- TGL_STATUS_UNLOCKED,
- TGL_STATUS_LOCKED,
-};
-
-/**
- * struct tgl_usr_data - tgl user data structure
- * @key: lookup key
- * @data1: user data 1
- * @data2: user data 2
- * @status: lock status that is in tgl_status_data
- */
-struct tgl_usr_data {
- unsigned int key;
- unsigned int data1;
- unsigned int data2;
- enum tgl_status_data status;
-};
-
-enum {
- _TGL_GET_VERSION,
- _TGL_REGISTER,
- _TGL_UNREGISTER,
- _TGL_LOCK,
- _TGL_UNLOCK,
- _TGL_SET_DATA,
- _TGL_GET_DATA,
-};
-
-/* get version information */
-#define TGL_IOCTL_GET_VERSION TGL_IOR(_TGL_GET_VERSION, struct tgl_ver_data)
-/* register key */
-#define TGL_IOCTL_REGISTER TGL_IOW(_TGL_REGISTER, struct tgl_reg_data)
-/* unregister key */
-#define TGL_IOCTL_UNREGISTER TGL_IOW(_TGL_UNREGISTER, struct tgl_reg_data)
-/* lock with key */
-#define TGL_IOCTL_LOCK TGL_IOW(_TGL_LOCK, struct tgl_lock_data)
-/* unlock with key */
-#define TGL_IOCTL_UNLOCK TGL_IOW(_TGL_UNLOCK, struct tgl_lock_data)
-/* set user data with key */
-#define TGL_IOCTL_SET_DATA TGL_IOW(_TGL_SET_DATA, struct tgl_usr_data)
-/* get user data with key */
-#define TGL_IOCTL_GET_DATA TGL_IOR(_TGL_GET_DATA, struct tgl_usr_data)
-
-#ifdef ENABLE_CACHECRTL
-/* indicate cache units. */
-enum e_drm_vc4_gem_cache_sel {
- VC4_DRM_L1_CACHE = 1 << 0,
- VC4_DRM_L2_CACHE = 1 << 1,
- VC4_DRM_ALL_CORES = 1 << 2,
- VC4_DRM_ALL_CACHES = VC4_DRM_L1_CACHE |
- VC4_DRM_L2_CACHE,
- VC4_DRM_ALL_CACHES_CORES = VC4_DRM_L1_CACHE |
- VC4_DRM_L2_CACHE |
- VC4_DRM_ALL_CORES,
- VC4_DRM_CACHE_SEL_MASK = VC4_DRM_ALL_CACHES_CORES
-};
-
-/* indicate cache operation types. */
-enum e_drm_vc4_gem_cache_op {
- VC4_DRM_CACHE_INV_ALL = 1 << 3,
- VC4_DRM_CACHE_INV_RANGE = 1 << 4,
- VC4_DRM_CACHE_CLN_ALL = 1 << 5,
- VC4_DRM_CACHE_CLN_RANGE = 1 << 6,
- VC4_DRM_CACHE_FSH_ALL = VC4_DRM_CACHE_INV_ALL |
- VC4_DRM_CACHE_CLN_ALL,
- VC4_DRM_CACHE_FSH_RANGE = VC4_DRM_CACHE_INV_RANGE |
- VC4_DRM_CACHE_CLN_RANGE,
- VC4_DRM_CACHE_OP_MASK = VC4_DRM_CACHE_FSH_ALL |
- VC4_DRM_CACHE_FSH_RANGE
-};
-
-/**
- * A structure for cache operation.
- *
- * @usr_addr: user space address.
- * P.S. it SHOULD BE user space.
- * @size: buffer size for cache operation.
- * @flags: select cache unit and cache operation.
- * @gem_handle: a handle to a gem object.
- * this gem handle is needed for cache range operation to L2 cache.
- */
-struct drm_vc4_gem_cache_op {
- uint64_t usr_addr;
- unsigned int size;
- unsigned int flags;
- unsigned int gem_handle;
-};
-
-#define DRM_VC4_GEM_CACHE_OP 0x12
-
-#define DRM_IOCTL_VC4_GEM_CACHE_OP DRM_IOWR(DRM_COMMAND_BASE + \
- DRM_VC4_GEM_CACHE_OP, struct drm_vc4_gem_cache_op)
-
-#endif
-
-#endif /* __TBM_BUFMGR_TGL_H__ */
+++ /dev/null
-AM_CFLAGS = \
- @LIBTBM_VC4_CFLAGS@ \
- -I$(top_srcdir) \
- -I$(top_srcdir)/src/libtbm-vc4
-
-libtbm_vc4_la_LTLIBRARIES = libtbm-vc4.la
-libtbm_vc4_ladir = /${bufmgr_dir}
-libtbm_vc4_la_LIBADD = @LIBTBM_VC4_LIBS@
-
-libtbm_vc4_la_SOURCES = \
- tbm_bufmgr_vc4.c
+++ /dev/null
-/**************************************************************************
- *
- * libtbm
- *
- * Copyright 2012 Samsung Electronics co., Ltd. All Rights Reserved.
- *
- * Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
- * Boram Park <boram1288.park@samsung.com>, Changyeon Lee <cyeon.lee@samsung.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * **************************************************************************/
-
-#ifndef __TBM_BUFMGR_TGL_H__
-#define __TBM_BUFMGR_TGL_H__
-
-#include <linux/ioctl.h>
-
-#ifdef ENABLE_CACHECRTL
-static char tgl_devfile[] = "/dev/slp_global_lock";
-static char tgl_devfile1[] = "/dev/tgl";
-#endif
-
-#define TGL_IOCTL_BASE 0x32
-#define TGL_IO(nr) _IO(TGL_IOCTL_BASE, nr)
-#define TGL_IOR(nr, type) _IOR(TGL_IOCTL_BASE, nr, type)
-#define TGL_IOW(nr, type) _IOW(TGL_IOCTL_BASE, nr, type)
-#define TGL_IOWR(nr, type) _IOWR(TGL_IOCTL_BASE, nr, type)
-
-/**
- * struct tgl_ver_data - tgl version data structure
- * @major: major version
- * @minor: minor version
- */
-struct tgl_ver_data {
- unsigned int major;
- unsigned int minor;
-};
-
-/**
- * struct tgl_reg_data - tgl data structure
- * @key: lookup key
- * @timeout_ms: timeout value for waiting event
- */
-struct tgl_reg_data {
- unsigned int key;
- unsigned int timeout_ms;
-};
-
-enum tgl_type_data {
- TGL_TYPE_NONE = 0,
- TGL_TYPE_READ = (1 << 0),
- TGL_TYPE_WRITE = (1 << 1),
-};
-
-/**
- * struct tgl_lock_data - tgl lock data structure
- * @key: lookup key
- * @type: lock type that is in tgl_type_data
- */
-struct tgl_lock_data {
- unsigned int key;
- enum tgl_type_data type;
-};
-
-enum tgl_status_data {
- TGL_STATUS_UNLOCKED,
- TGL_STATUS_LOCKED,
-};
-
-/**
- * struct tgl_usr_data - tgl user data structure
- * @key: lookup key
- * @data1: user data 1
- * @data2: user data 2
- * @status: lock status that is in tgl_status_data
- */
-struct tgl_usr_data {
- unsigned int key;
- unsigned int data1;
- unsigned int data2;
- enum tgl_status_data status;
-};
-
-enum {
- _TGL_GET_VERSION,
- _TGL_REGISTER,
- _TGL_UNREGISTER,
- _TGL_LOCK,
- _TGL_UNLOCK,
- _TGL_SET_DATA,
- _TGL_GET_DATA,
-};
-
-/* get version information */
-#define TGL_IOCTL_GET_VERSION TGL_IOR(_TGL_GET_VERSION, struct tgl_ver_data)
-/* register key */
-#define TGL_IOCTL_REGISTER TGL_IOW(_TGL_REGISTER, struct tgl_reg_data)
-/* unregister key */
-#define TGL_IOCTL_UNREGISTER TGL_IOW(_TGL_UNREGISTER, struct tgl_reg_data)
-/* lock with key */
-#define TGL_IOCTL_LOCK TGL_IOW(_TGL_LOCK, struct tgl_lock_data)
-/* unlock with key */
-#define TGL_IOCTL_UNLOCK TGL_IOW(_TGL_UNLOCK, struct tgl_lock_data)
-/* set user data with key */
-#define TGL_IOCTL_SET_DATA TGL_IOW(_TGL_SET_DATA, struct tgl_usr_data)
-/* get user data with key */
-#define TGL_IOCTL_GET_DATA TGL_IOR(_TGL_GET_DATA, struct tgl_usr_data)
-
-#ifdef ENABLE_CACHECRTL
-/* indicate cache units. */
-enum e_drm_vc4_gem_cache_sel {
- VC4_DRM_L1_CACHE = 1 << 0,
- VC4_DRM_L2_CACHE = 1 << 1,
- VC4_DRM_ALL_CORES = 1 << 2,
- VC4_DRM_ALL_CACHES = VC4_DRM_L1_CACHE |
- VC4_DRM_L2_CACHE,
- VC4_DRM_ALL_CACHES_CORES = VC4_DRM_L1_CACHE |
- VC4_DRM_L2_CACHE |
- VC4_DRM_ALL_CORES,
- VC4_DRM_CACHE_SEL_MASK = VC4_DRM_ALL_CACHES_CORES
-};
-
-/* indicate cache operation types. */
-enum e_drm_vc4_gem_cache_op {
- VC4_DRM_CACHE_INV_ALL = 1 << 3,
- VC4_DRM_CACHE_INV_RANGE = 1 << 4,
- VC4_DRM_CACHE_CLN_ALL = 1 << 5,
- VC4_DRM_CACHE_CLN_RANGE = 1 << 6,
- VC4_DRM_CACHE_FSH_ALL = VC4_DRM_CACHE_INV_ALL |
- VC4_DRM_CACHE_CLN_ALL,
- VC4_DRM_CACHE_FSH_RANGE = VC4_DRM_CACHE_INV_RANGE |
- VC4_DRM_CACHE_CLN_RANGE,
- VC4_DRM_CACHE_OP_MASK = VC4_DRM_CACHE_FSH_ALL |
- VC4_DRM_CACHE_FSH_RANGE
-};
-
-/**
- * A structure for cache operation.
- *
- * @usr_addr: user space address.
- * P.S. it SHOULD BE user space.
- * @size: buffer size for cache operation.
- * @flags: select cache unit and cache operation.
- * @gem_handle: a handle to a gem object.
- * this gem handle is needed for cache range operation to L2 cache.
- */
-struct drm_vc4_gem_cache_op {
- uint64_t usr_addr;
- unsigned int size;
- unsigned int flags;
- unsigned int gem_handle;
-};
-
-#define DRM_VC4_GEM_CACHE_OP 0x12
-
-#define DRM_IOCTL_VC4_GEM_CACHE_OP DRM_IOWR(DRM_COMMAND_BASE + \
- DRM_VC4_GEM_CACHE_OP, struct drm_vc4_gem_cache_op)
-
-#endif
-
-#endif /* __TBM_BUFMGR_TGL_H__ */
+++ /dev/null
-/**************************************************************************
-
-libtbm_vc4
-
-Copyright 2017 Samsung Electronics co., Ltd. All Rights Reserved.
-
-Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a
-copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sub license, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice (including the
-next paragraph) shall be included in all copies or substantial portions
-of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
-IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
-ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-**************************************************************************/
-
-#ifdef HAVE_CONFIG_H
-#include "config.h"
-#endif
-
-#include <libudev.h>
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include <string.h>
-#include <sys/ioctl.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <errno.h>
-#include <xf86drm.h>
-#include <vc4_drm.h>
-#include <pthread.h>
-#include <tbm_backend.h>
-#include <tbm_drm_helper.h>
-#include <tbm_log.h>
-#include "tbm_bufmgr_tgl.h"
-
-#define TBM_COLOR_FORMAT_COUNT 4
-
-#define VC4_DRM_NAME "vc4"
-
-#define STRERR_BUFSIZE 128
-
-#define SIZE_ALIGN(value, base) (((value) + ((base) - 1)) & ~((base) - 1))
-#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
-#define MAX(a, b) ((a) > (b) ? (a) : (b))
-
-#ifdef ALIGN_EIGHT
-#define TBM_SURFACE_ALIGNMENT_PLANE (8)
-#define TBM_SURFACE_ALIGNMENT_PITCH_RGB (8)
-#else
-#define TBM_SURFACE_ALIGNMENT_PLANE (16)
-#define TBM_SURFACE_ALIGNMENT_PITCH_RGB (16)
-#endif
-
-#define TBM_SURFACE_ALIGNMENT_PLANE_NV12 (4096)
-#define TBM_SURFACE_ALIGNMENT_PITCH_YUV (32)
-#define TBM_SURFACE_ALIGNMENT_HEIGHT_YUV (16)
-
-//#define VC4_TILED_FORMAT 1
-
-struct dma_buf_info {
- unsigned long size;
- unsigned int fence_supported;
- unsigned int padding;
-};
-
-#define DMA_BUF_ACCESS_READ 0x1
-#define DMA_BUF_ACCESS_WRITE 0x2
-#define DMA_BUF_ACCESS_DMA 0x4
-#define DMA_BUF_ACCESS_MAX 0x8
-
-#define DMA_FENCE_LIST_MAX 5
-
-struct dma_buf_fence {
- unsigned long ctx;
- unsigned int type;
-};
-
-#define DMABUF_IOCTL_BASE 'F'
-#define DMABUF_IOWR(nr, type) _IOWR(DMABUF_IOCTL_BASE, nr, type)
-
-#define DMABUF_IOCTL_GET_INFO DMABUF_IOWR(0x00, struct dma_buf_info)
-#define DMABUF_IOCTL_GET_FENCE DMABUF_IOWR(0x01, struct dma_buf_fence)
-#define DMABUF_IOCTL_PUT_FENCE DMABUF_IOWR(0x02, struct dma_buf_fence)
-
-/* tgl key values */
-#define GLOBAL_KEY ((unsigned int)(-1))
-/* TBM_CACHE */
-#define TBM_VC4_CACHE_INV 0x01 /**< cache invalidate */
-#define TBM_VC4_CACHE_CLN 0x02 /**< cache clean */
-#define TBM_VC4_CACHE_ALL 0x10 /**< cache all */
-#define TBM_VC4_CACHE_FLUSH (TBM_VC4_CACHE_INV|TBM_VC4_CACHE_CLN) /**< cache flush */
-#define TBM_VC4_CACHE_FLUSH_ALL (TBM_VC4_CACHE_FLUSH|TBM_VC4_CACHE_ALL) /**< cache flush all */
-
-enum {
- DEVICE_NONE = 0,
- DEVICE_CA, /* cache aware device */
- DEVICE_CO /* cache oblivious device */
-};
-
-typedef union _tbm_bo_cache_state tbm_bo_cache_state;
-
-union _tbm_bo_cache_state {
- unsigned int val;
- struct {
- unsigned int cntFlush:16; /*Flush all index for sync */
- unsigned int isCached:1;
- unsigned int isDirtied:2;
- } data;
-};
-
-typedef struct _tbm_bufmgr_vc4 *tbm_bufmgr_vc4;
-typedef struct _tbm_bo_vc4 *tbm_bo_vc4;
-
-/* tbm buffor object for vc4 */
-struct _tbm_bo_vc4 {
- int fd;
-
- unsigned int name; /* FLINK ID */
-
- unsigned int gem; /* GEM Handle */
-
- unsigned int dmabuf; /* fd for dmabuf */
-
- void *pBase; /* virtual address */
-
- unsigned int size;
-
- unsigned int flags_tbm; /*not used now*//*currently no values for the flags,but it may be used in future extension*/
-
- pthread_mutex_t mutex;
- struct dma_buf_fence dma_fence[DMA_FENCE_LIST_MAX];
- int device;
- int opt;
-
- tbm_bo_cache_state cache_state;
- unsigned int map_cnt;
- int last_map_device;
-
- tbm_bufmgr_vc4 bufmgr_vc4;
-};
-
-/* tbm bufmgr private for vc4 */
-struct _tbm_bufmgr_vc4 {
- int fd;
- int isLocal;
- void *hashBos;
-
- int use_dma_fence;
-
- int tgl_fd;
-
- char *device_name;
- void *bind_display;
-
- tbm_backend_bufmgr_func *bufmgr_func;
- tbm_backend_bo_func *bo_func;
-
- tbm_bufmgr bufmgr;
-};
-
-static char *STR_DEVICE[] = {
- "DEF",
- "CPU",
- "2D",
- "3D",
- "MM"
-};
-
-static char *STR_OPT[] = {
- "NONE",
- "RD",
- "WR",
- "RDWR"
-};
-
-
-static uint32_t tbm_vc4_color_format_list[TBM_COLOR_FORMAT_COUNT] = {
- TBM_FORMAT_ARGB8888,
- TBM_FORMAT_XRGB8888,
- TBM_FORMAT_NV12,
- TBM_FORMAT_YUV420
- };
-#undef ENABLE_CACHECRTL
-#ifdef ENABLE_CACHECRTL
-#ifdef TGL_GET_VERSION
-static inline int
-_tgl_get_version(int fd)
-{
- struct tgl_ver_data data;
- int err;
- char buf[STRERR_BUFSIZE];
-
- err = ioctl(fd, TGL_IOCTL_GET_VERSION, &data);
- if (err) {
- TBM_ERR("error(%s) %s:%d\n",
- strerror_r(errno, buf, STRERR_BUFSIZE));
- return 0;
- }
-
- TBM_DBG("tgl version is (%u, %u).\n", data.major, data.minor);
-
- return 1;
-}
-#endif
-
-static inline int
-_tgl_init(int fd, unsigned int key)
-{
- struct tgl_reg_data data;
- int err;
- char buf[STRERR_BUFSIZE];
-
- data.key = key;
- data.timeout_ms = 1000;
-
- err = ioctl(fd, TGL_IOCTL_REGISTER, &data);
- if (err) {
- TBM_ERR("error(%s) key:%d\n",
- strerror_r(errno, buf, STRERR_BUFSIZE), key);
- return 0;
- }
-
- return 1;
-}
-
-static inline int
-_tgl_destroy(int fd, unsigned int key)
-{
- struct tgl_reg_data data;
- int err;
- char buf[STRERR_BUFSIZE];
-
- data.key = key;
- err = ioctl(fd, TGL_IOCTL_UNREGISTER, &data);
- if (err) {
- TBM_ERR("error(%s) key:%d\n",
- strerror_r(errno, buf, STRERR_BUFSIZE), key);
- return 0;
- }
-
- return 1;
-}
-
-static inline int
-_tgl_lock(int fd, unsigned int key, int opt)
-{
- struct tgl_lock_data data;
- enum tgl_type_data tgl_type;
- int err;
- char buf[STRERR_BUFSIZE];
-
- switch (opt) {
- case TBM_OPTION_READ:
- tgl_type = TGL_TYPE_READ;
- break;
- case TBM_OPTION_WRITE:
- tgl_type = TGL_TYPE_WRITE;
- break;
- default:
- tgl_type = TGL_TYPE_NONE;
- break;
- }
-
- data.key = key;
- data.type = tgl_type;
-
- err = ioctl(fd, TGL_IOCTL_LOCK, &data);
- if (err) {
- TBM_ERR("error(%s) key:%d opt:%d\n",
- strerror_r(errno, buf, STRERR_BUFSIZE), key, opt);
- return 0;
- }
-
- return 1;
-}
-
-static inline int
-_tgl_unlock(int fd, unsigned int key)
-{
- struct tgl_lock_data data;
- int err;
- char buf[STRERR_BUFSIZE];
-
- data.key = key;
- data.type = TGL_TYPE_NONE;
-
- err = ioctl(fd, TGL_IOCTL_UNLOCK, &data);
- if (err) {
- TBM_ERR("error(%s) key:%d\n",
- strerror_r(errno, buf, STRERR_BUFSIZE), key);
- return 0;
- }
-
- return 1;
-}
-
-static inline int
-_tgl_set_data(int fd, unsigned int key, unsigned int val)
-{
- struct tgl_usr_data data;
- int err;
- char buf[STRERR_BUFSIZE];
-
- data.key = key;
- data.data1 = val;
-
- err = ioctl(fd, TGL_IOCTL_SET_DATA, &data);
- if (err) {
- TBM_ERR("error(%s) key:%d\n",
- strerror_r(errno, buf, STRERR_BUFSIZE), key);
- return 0;
- }
-
- return 1;
-}
-
-static inline unsigned int
-_tgl_get_data(int fd, unsigned int key)
-{
- struct tgl_usr_data data = { 0, };
- int err;
- char buf[STRERR_BUFSIZE];
-
- data.key = key;
-
- err = ioctl(fd, TGL_IOCTL_GET_DATA, &data);
- if (err) {
- TBM_ERR("error(%s) key:%d\n",
- strerror_r(errno, buf, STRERR_BUFSIZE), key);
- return 0;
- }
-
- return data.data1;
-}
-
-static int
-_vc4_cache_flush(tbm_bufmgr_vc4 bufmgr_vc4, tbm_bo_vc4 bo_vc4, int flags)
-{
- TBM_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
-
- /* cache flush is managed by kernel side when using dma-fence. */
- if (bufmgr_vc4->use_dma_fence)
- return 1;
-
- struct drm_vc4_gem_cache_op cache_op = {0, };
- int ret;
-
- /* if bo_vc4 is null, do cache_flush_all */
- if (bo_vc4) {
- cache_op.flags = 0;
- cache_op.usr_addr = (uint64_t)((uint32_t)bo_vc4->pBase);
- cache_op.size = bo_vc4->size;
- } else {
- flags = TBM_VC4_CACHE_FLUSH_ALL;
- cache_op.flags = 0;
- cache_op.usr_addr = 0;
- cache_op.size = 0;
- }
-
- if (flags & TBM_VC4_CACHE_INV) {
- if (flags & TBM_VC4_CACHE_ALL)
- cache_op.flags |= VC4_DRM_CACHE_INV_ALL;
- else
- cache_op.flags |= VC4_DRM_CACHE_INV_RANGE;
- }
-
- if (flags & TBM_VC4_CACHE_CLN) {
- if (flags & TBM_VC4_CACHE_ALL)
- cache_op.flags |= VC4_DRM_CACHE_CLN_ALL;
- else
- cache_op.flags |= VC4_DRM_CACHE_CLN_RANGE;
- }
-
- if (flags & TBM_VC4_CACHE_ALL)
- cache_op.flags |= VC4_DRM_ALL_CACHES_CORES;
-
- ret = drmCommandWriteRead(bufmgr_vc4->fd, DRM_VC4_GEM_CACHE_OP, &cache_op,
- sizeof(cache_op));
- if (ret) {
- TBM_ERR("fail to flush the cache.\n");
- return 0;
- }
-
- return 1;
-}
-#endif
-
-static int
-_bo_init_cache_state(tbm_bufmgr_vc4 bufmgr_vc4, tbm_bo_vc4 bo_vc4, int import)
-{
-#ifdef ENABLE_CACHECRTL
- TBM_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
- TBM_RETURN_VAL_IF_FAIL(bo_vc4 != NULL, 0);
-
- if (bufmgr_vc4->use_dma_fence)
- return 1;
-
- _tgl_init(bufmgr_vc4->tgl_fd, bo_vc4->name);
-
- tbm_bo_cache_state cache_state;
-
- if (import == 0) {
- cache_state.data.isDirtied = DEVICE_NONE;
- cache_state.data.isCached = 0;
- cache_state.data.cntFlush = 0;
-
- _tgl_set_data(bufmgr_vc4->tgl_fd, bo_vc4->name, cache_state.val);
- }
-#endif
-
- return 1;
-}
-
-static int
-_bo_set_cache_state(tbm_bufmgr_vc4 bufmgr_vc4, tbm_bo_vc4 bo_vc4, int device, int opt)
-{
-#ifdef ENABLE_CACHECRTL
- TBM_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
- TBM_RETURN_VAL_IF_FAIL(bo_vc4 != NULL, 0);
-
- if (bufmgr_vc4->use_dma_fence)
- return 1;
-
- char need_flush = 0;
- unsigned short cntFlush = 0;
-
- /* get cache state of a bo_vc4 */
- bo_vc4->cache_state.val = _tgl_get_data(bufmgr_vc4->tgl_fd,
- bo_vc4->name);
-
- /* get global cache flush count */
- cntFlush = (unsigned short)_tgl_get_data(bufmgr_vc4->tgl_fd, GLOBAL_KEY);
-
- if (device == TBM_DEVICE_CPU) {
- if (bo_vc4->cache_state.data.isDirtied == DEVICE_CO &&
- bo_vc4->cache_state.data.isCached)
- need_flush = TBM_VC4_CACHE_INV;
-
- bo_vc4->cache_state.data.isCached = 1;
- if (opt & TBM_OPTION_WRITE)
- bo_vc4->cache_state.data.isDirtied = DEVICE_CA;
- else {
- if (bo_vc4->cache_state.data.isDirtied != DEVICE_CA)
- bo_vc4->cache_state.data.isDirtied = DEVICE_NONE;
- }
- } else {
- if (bo_vc4->cache_state.data.isDirtied == DEVICE_CA &&
- bo_vc4->cache_state.data.isCached &&
- bo_vc4->cache_state.data.cntFlush == cntFlush)
- need_flush = TBM_VC4_CACHE_CLN | TBM_VC4_CACHE_ALL;
-
- if (opt & TBM_OPTION_WRITE)
- bo_vc4->cache_state.data.isDirtied = DEVICE_CO;
- else {
- if (bo_vc4->cache_state.data.isDirtied != DEVICE_CO)
- bo_vc4->cache_state.data.isDirtied = DEVICE_NONE;
- }
- }
-
- if (need_flush) {
- if (need_flush & TBM_VC4_CACHE_ALL)
- _tgl_set_data(bufmgr_vc4->tgl_fd, GLOBAL_KEY, (unsigned int)(++cntFlush));
-
- /* call cache flush */
- _vc4_cache_flush(bufmgr_vc4, bo_vc4, need_flush);
-
- TBM_DBG(" \tcache(%d,%d)....flush:0x%x, cntFlush(%d)\n",
- bo_vc4->cache_state.data.isCached,
- bo_vc4->cache_state.data.isDirtied,
- need_flush,
- cntFlush);
- }
-#endif
-
- return 1;
-}
-
-static int
-_bo_save_cache_state(tbm_bufmgr_vc4 bufmgr_vc4, tbm_bo_vc4 bo_vc4)
-{
-#ifdef ENABLE_CACHECRTL
- TBM_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
- TBM_RETURN_VAL_IF_FAIL(bo_vc4 != NULL, 0);
-
- if (bufmgr_vc4->use_dma_fence)
- return 1;
-
- unsigned short cntFlush = 0;
-
- /* get global cache flush count */
- cntFlush = (unsigned short)_tgl_get_data(bufmgr_vc4->tgl_fd, GLOBAL_KEY);
-
- /* save global cache flush count */
- bo_vc4->cache_state.data.cntFlush = cntFlush;
- _tgl_set_data(bufmgr_vc4->tgl_fd, bo_vc4->name,
- bo_vc4->cache_state.val);
-#endif
-
- return 1;
-}
-
-static void
-_bo_destroy_cache_state(tbm_bufmgr_vc4 bufmgr_vc4, tbm_bo_vc4 bo_vc4)
-{
-#ifdef ENABLE_CACHECRTL
- TBM_RETURN_IF_FAIL(bufmgr_vc4 != NULL);
- TBM_RETURN_IF_FAIL(bo_vc4 != NULL);
-
- if (bufmgr_vc4->use_dma_fence)
- return ;
-
- _tgl_destroy(bufmgr_vc4->tgl_fd, bo_vc4->name);
-#endif
-}
-
-static int
-_bufmgr_init_cache_state(tbm_bufmgr_vc4 bufmgr_vc4)
-{
-#ifdef ENABLE_CACHECRTL
- TBM_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, 0);
-
- if (bufmgr_vc4->use_dma_fence)
- return 1;
-
- /* open tgl fd for saving cache flush data */
- bufmgr_vc4->tgl_fd = open(tgl_devfile, O_RDWR);
-
- if (bufmgr_vc4->tgl_fd < 0) {
- bufmgr_vc4->tgl_fd = open(tgl_devfile1, O_RDWR);
- if (bufmgr_vc4->tgl_fd < 0) {
- TBM_ERR("fail to open global_lock:%s\n",
- tgl_devfile1);
- return 0;
- }
- }
-
-#ifdef TGL_GET_VERSION
- if (!_tgl_get_version(bufmgr_vc4->tgl_fd)) {
- TBM_ERR("fail to get tgl_version. tgl init failed.\n");
- close(bufmgr_vc4->tgl_fd);
- return 0;
- }
-#endif
-
- if (!_tgl_init(bufmgr_vc4->tgl_fd, GLOBAL_KEY)) {
- TBM_ERR("fail to initialize the tgl\n");
- close(bufmgr_vc4->tgl_fd);
- return 0;
- }
-#endif
-
- return 1;
-}
-
-static void
-_bufmgr_deinit_cache_state(tbm_bufmgr_vc4 bufmgr_vc4)
-{
-#ifdef ENABLE_CACHECRTL
- TBM_RETURN_IF_FAIL(bufmgr_vc4 != NULL);
-
- if (bufmgr_vc4->use_dma_fence)
- return;
-
- if (bufmgr_vc4->tgl_fd >= 0)
- close(bufmgr_vc4->tgl_fd);
-#endif
-}
-
-static int
-_tbm_vc4_open_drm()
-{
- int fd = -1;
-
- fd = drmOpen(VC4_DRM_NAME, NULL);
- if (fd < 0) {
- TBM_ERR("fail to open drm.(%s)\n", VC4_DRM_NAME);
- }
-
- if (fd < 0) {
- struct udev *udev = NULL;
- struct udev_enumerate *e = NULL;
- struct udev_list_entry *entry = NULL;
- struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
- const char *filepath;
- struct stat s;
- int ret;
-
- TBM_DBG("search drm-device by udev\n");
-
- udev = udev_new();
- if (!udev) {
- TBM_ERR("udev_new() failed.\n");
- return -1;
- }
-
- e = udev_enumerate_new(udev);
- udev_enumerate_add_match_subsystem(e, "drm");
- udev_enumerate_add_match_sysname(e, "card[0-9]*");
- udev_enumerate_scan_devices(e);
-
- udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
- device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
- udev_list_entry_get_name(entry));
- device_parent = udev_device_get_parent(device);
- /* Not need unref device_parent. device_parent and device have same refcnt */
- if (device_parent) {
- if (strcmp(udev_device_get_sysname(device_parent), "vc4-drm") == 0) {
- drm_device = device;
- TBM_DBG("Found render device: '%s' (%s)\n",
- udev_device_get_syspath(drm_device),
- udev_device_get_sysname(device_parent));
- break;
- }
- }
- udev_device_unref(device);
- }
-
- udev_enumerate_unref(e);
-
- /* Get device file path. */
- filepath = udev_device_get_devnode(drm_device);
- if (!filepath) {
- TBM_ERR("udev_device_get_devnode() failed.\n");
- udev_device_unref(drm_device);
- udev_unref(udev);
- return -1;
- }
-
- /* Open DRM device file and check validity. */
- fd = open(filepath, O_RDWR | O_CLOEXEC);
- if (fd < 0) {
- TBM_ERR("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
- udev_device_unref(drm_device);
- udev_unref(udev);
- return -1;
- }
-
- ret = fstat(fd, &s);
- if (ret) {
- TBM_ERR("fstat() failed %s.\n");
- close(fd);
- udev_device_unref(drm_device);
- udev_unref(udev);
- return -1;
- }
-
- udev_device_unref(drm_device);
- udev_unref(udev);
- }
-
- return fd;
-}
-
-static int
-_check_render_node(void)
-{
-#ifndef USE_RENDER_NODE
- return 0;
-#else
- struct udev *udev = NULL;
- struct udev_enumerate *e = NULL;
- struct udev_list_entry *entry = NULL;
- struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
-
- udev = udev_new();
- if (!udev) {
- TBM_ERR("udev_new() failed.\n");
- return -1;
- }
-
- e = udev_enumerate_new(udev);
- udev_enumerate_add_match_subsystem(e, "drm");
- udev_enumerate_add_match_sysname(e, "renderD[0-9]*");
- udev_enumerate_scan_devices(e);
-
- udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
- device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
- udev_list_entry_get_name(entry));
- device_parent = udev_device_get_parent(device);
- /* Not need unref device_parent. device_parent and device have same refcnt */
- if (device_parent) {
- if (strcmp(udev_device_get_sysname(device_parent), "vc4-drm") == 0) {
- drm_device = device;
- TBM_DBG("Found render device: '%s' (%s)\n",
- udev_device_get_syspath(drm_device),
- udev_device_get_sysname(device_parent));
- break;
- }
- }
- udev_device_unref(device);
- }
-
- udev_enumerate_unref(e);
- udev_unref(udev);
-
- if (!drm_device) {
- udev_device_unref(drm_device);
- return 0;
- }
-
- udev_device_unref(drm_device);
- return 1;
-#endif
-}
-
-static int
-_get_render_node(void)
-{
- struct udev *udev = NULL;
- struct udev_enumerate *e = NULL;
- struct udev_list_entry *entry = NULL;
- struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
- const char *filepath;
- struct stat s;
- int fd = -1;
- int ret;
-
- udev = udev_new();
- if (!udev) {
- TBM_ERR("udev_new() failed.\n");
- return -1;
- }
-
- e = udev_enumerate_new(udev);
- udev_enumerate_add_match_subsystem(e, "drm");
- udev_enumerate_add_match_sysname(e, "renderD[0-9]*");
- udev_enumerate_scan_devices(e);
-
- udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
- device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
- udev_list_entry_get_name(entry));
- device_parent = udev_device_get_parent(device);
- /* Not need unref device_parent. device_parent and device have same refcnt */
- if (device_parent) {
- if (strcmp(udev_device_get_sysname(device_parent), "vc4-drm") == 0) {
- drm_device = device;
- TBM_DBG("Found render device: '%s' (%s)\n",
- udev_device_get_syspath(drm_device),
- udev_device_get_sysname(device_parent));
- break;
- }
- }
- udev_device_unref(device);
- }
-
- udev_enumerate_unref(e);
-
- /* Get device file path. */
- filepath = udev_device_get_devnode(drm_device);
- if (!filepath) {
- TBM_ERR("udev_device_get_devnode() failed.\n");
- udev_device_unref(drm_device);
- udev_unref(udev);
- return -1;
- }
-
- /* Open DRM device file and check validity. */
- fd = open(filepath, O_RDWR | O_CLOEXEC);
- if (fd < 0) {
- TBM_ERR("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
- udev_device_unref(drm_device);
- udev_unref(udev);
- return -1;
- }
-
- ret = fstat(fd, &s);
- if (ret) {
- TBM_ERR("fstat() failed %s.\n");
- udev_device_unref(drm_device);
- udev_unref(udev);
- close(fd);
- return -1;
- }
-
- udev_device_unref(drm_device);
- udev_unref(udev);
-
- return fd;
-}
-
-static unsigned int
-_get_name(int fd, unsigned int gem)
-{
- struct drm_gem_flink arg = {0,};
-
- arg.handle = gem;
- if (drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &arg)) {
- TBM_ERR("fail to DRM_IOCTL_GEM_FLINK gem:%d", gem);
- return 0;
- }
-
- return (unsigned int)arg.name;
-}
-
-static tbm_bo_handle
-_vc4_bo_handle(tbm_bo_vc4 bo_vc4, int device)
-{
- tbm_bo_handle bo_handle;
-
- memset(&bo_handle, 0x0, sizeof(uint64_t));
-
- switch (device) {
- case TBM_DEVICE_DEFAULT:
- case TBM_DEVICE_2D:
- bo_handle.u32 = (uint32_t)bo_vc4->gem;
- break;
- case TBM_DEVICE_CPU:
- if (!bo_vc4->pBase) {
- struct drm_vc4_mmap_bo arg = {0, };
- void *map = NULL;
-
- arg.handle = bo_vc4->gem;
- if (drmIoctl(bo_vc4->fd, DRM_IOCTL_VC4_MMAP_BO, &arg)) {
- TBM_ERR("Cannot map_vc4 gem=%d\n", bo_vc4->gem);
- return (tbm_bo_handle) NULL;
- }
-
- map = mmap(NULL, bo_vc4->size, PROT_READ | PROT_WRITE, MAP_SHARED,
- bo_vc4->fd, arg.offset);
- if (map == MAP_FAILED) {
- TBM_ERR("Cannot usrptr gem=%d\n", bo_vc4->gem);
- return (tbm_bo_handle) NULL;
- }
- bo_vc4->pBase = map;
- }
- bo_handle.ptr = (void *)bo_vc4->pBase;
- break;
- case TBM_DEVICE_3D:
- case TBM_DEVICE_MM:
- if (!bo_vc4->dmabuf) {
- struct drm_prime_handle arg = {0, };
-
- arg.handle = bo_vc4->gem;
- if (drmIoctl(bo_vc4->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
- TBM_ERR("Cannot dmabuf=%d\n", bo_vc4->gem);
- return (tbm_bo_handle) NULL;
- }
- bo_vc4->dmabuf = arg.fd;
- }
-
- bo_handle.u32 = (uint32_t)bo_vc4->dmabuf;
- break;
- default:
- TBM_ERR("Not supported device:%d\n", device);
- bo_handle.ptr = (void *) NULL;
- break;
- }
-
- return bo_handle;
-}
-
-static tbm_bufmgr_capability
-tbm_vc4_bufmgr_get_capabilities(tbm_backend_bufmgr_data *bufmgr_data, tbm_error_e *error)
-{
- tbm_bufmgr_capability capabilities = TBM_BUFMGR_CAPABILITY_NONE;
-
-#ifdef VC4_TILED_FORMAT
- capabilities = TBM_BUFMGR_CAPABILITY_SHARE_KEY|TBM_BUFMGR_CAPABILITY_SHARE_FD|TBM_BUFMGR_CAPABILITY_TILED_MEMORY;
-#else
- capabilities = TBM_BUFMGR_CAPABILITY_SHARE_KEY|TBM_BUFMGR_CAPABILITY_SHARE_FD;
-#endif
-
- if (error)
- *error = TBM_ERROR_NONE;
-
- return capabilities;
-}
-
-static tbm_error_e
-tbm_vc4_bufmgr_bind_native_display(tbm_backend_bufmgr_data *bufmgr_data, tbm_native_display *native_display)
-{
- tbm_bufmgr_vc4 bufmgr_vc4 = (tbm_bufmgr_vc4)bufmgr_data;
- TBM_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, TBM_ERROR_INVALID_PARAMETER);
-
- if (!tbm_drm_helper_wl_auth_server_init(native_display, bufmgr_vc4->fd,
- bufmgr_vc4->device_name, 0)) {
- TBM_ERR("fail to tbm_drm_helper_wl_server_init\n");
- return TBM_ERROR_INVALID_OPERATION;
- }
-
- bufmgr_vc4->bind_display = native_display;
-
- return TBM_ERROR_NONE;
-}
-static tbm_error_e
-tbm_vc4_bufmgr_get_supported_formats(tbm_backend_bufmgr_data *bufmgr_data,
- uint32_t **formats, uint32_t *num)
-{
- tbm_bufmgr_vc4 bufmgr_vc4 = (tbm_bufmgr_vc4)bufmgr_data;
- uint32_t *color_formats;
-
- TBM_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, TBM_ERROR_INVALID_PARAMETER);
-
- color_formats = (uint32_t *)calloc(1, sizeof(uint32_t) * TBM_COLOR_FORMAT_COUNT);
- if (color_formats == NULL)
- return TBM_ERROR_OUT_OF_MEMORY;
-
- memcpy(color_formats, tbm_vc4_color_format_list, sizeof(uint32_t)*TBM_COLOR_FORMAT_COUNT);
-
- *formats = color_formats;
- *num = TBM_COLOR_FORMAT_COUNT;
-
- TBM_DBG("supported format count = %d\n", *num);
-
- return TBM_ERROR_NONE;
-}
-
-
-#ifdef VC4_TILED_FORMAT
-#include <drm_fourcc.h>
-static inline uint32_t
-vc4_utile_width(int cpp)
-{
- switch (cpp) {
- case 1:
- case 2:
- return 8;
- case 4:
- return 4;
- case 8:
- return 2;
- default:
- return 4;
- }
-}
-
-static inline uint32_t
-vc4_utile_height(int cpp)
-{
- switch (cpp) {
- case 1:
- return 8;
- case 2:
- case 4:
- case 8:
- return 4;
- default:
- return 4;
- }
-}
-
-static inline bool
-vc4_size_is_lt(uint32_t width, uint32_t height, int cpp)
-{
- return (width <= 4 * vc4_utile_width(cpp) ||
- height <= 4 * vc4_utile_height(cpp));
-}
-
-static tbm_backend_bo_data *
-tbm_vc4_bufmgr_alloc_bo_with_tiled_format(tbm_backend_bufmgr_data *bufmgr_data, int width, int height,
- int cpp, int format, tbm_bo_memory_type flags, int bo_idx, tbm_error_e *err)
-{
- tbm_bufmgr_vc4 bufmgr_vc4 = (tbm_bufmgr_vc4)bufmgr_data;
- tbm_bo_vc4 bo_vc4;
- uint32_t utile_w = vc4_utile_width(cpp);
- uint32_t utile_h = vc4_utile_height(cpp);
- uint32_t level_width, level_height;
- int size;
- uint32_t stride;
-
-
- level_width = width;
- level_height = height;
-
- if (bufmgr_vc4 == NULL) {
- TBM_ERR("bufmgr_data is null\n");
- return NULL;
- }
-
- if (vc4_size_is_lt(level_width, level_height, cpp)) {
- level_width = SIZE_ALIGN(level_width, utile_w);
- level_height = SIZE_ALIGN(level_height, utile_h);
- } else {
- level_width = SIZE_ALIGN(level_width,
- 4 * 2 * utile_w);
- level_height = SIZE_ALIGN(level_height,
- 4 * 2 * utile_h);
- }
-
- stride = level_width * cpp;
-
- size = level_height * stride;
- size = SIZE_ALIGN(size, 4096);
-
-
- bo_vc4 = calloc(1, sizeof(struct _tbm_bo_vc4));
- if (!bo_vc4) {
- TBM_ERR("fail to allocate the bo_vc4 private\n");
- return NULL;
- }
- bo_vc4->bufmgr_vc4 = bufmgr_vc4;
-
- struct drm_vc4_create_bo arg = {0, };
-
- arg.size = (__u32)size;
- arg.flags = flags;/*currently no values for the flags,but it may be used in future extension*/
- if (drmIoctl(bufmgr_vc4->fd, DRM_IOCTL_VC4_CREATE_BO, &arg)) {
- TBM_ERR("Cannot create bo_vc4(flag:%x, size:%d)\n", arg.flags,
- (unsigned int)arg.size);
- free(bo_vc4);
- return NULL;
- }
-
- bo_vc4->fd = bufmgr_vc4->fd;
- bo_vc4->gem = (unsigned int)arg.handle;
- bo_vc4->size = size;
- bo_vc4->flags_tbm = flags;
- bo_vc4->name = _get_name(bo_vc4->fd, bo_vc4->gem);
-
- if (!_bo_init_cache_state(bufmgr_vc4, bo_vc4, 0)) {
- TBM_ERR("fail init cache state(%d)\n", bo_vc4->name);
- free(bo_vc4);
- return NULL;
- }
-
- pthread_mutex_init(&bo_vc4->mutex, NULL);
-
- if (bufmgr_vc4->use_dma_fence && !bo_vc4->dmabuf) {
- struct drm_prime_handle arg = {0, };
-
- arg.handle = bo_vc4->gem;
- if (drmIoctl(bo_vc4->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
- TBM_ERR("Cannot dmabuf=%d\n", bo_vc4->gem);
- free(bo_vc4);
- return NULL;
- }
- bo_vc4->dmabuf = arg.fd;
- }
-
- //set modifier
- uint64_t modifier;
- modifier = DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
- struct drm_vc4_set_tiling set_tiling = {
- .handle = bo_vc4->gem,
- .modifier = modifier,
- };
- drmIoctl(bo_vc4->fd, DRM_IOCTL_VC4_SET_TILING, &set_tiling);
-
-
- /* add bo_vc4 to hash */
- if (drmHashInsert(bufmgr_vc4->hashBos, bo_vc4->name, (void *)bo_vc4) < 0)
- TBM_ERR("Cannot insert bo_vc4 to Hash(%d)\n", bo_vc4->name);
-
- TBM_DBG(" bo_vc4:%p, gem:%d(%d), flags:%d(%d), size:%d\n",
- bo_vc4,
- bo_vc4->gem, bo_vc4->name,
- bo_vc4->flags_tbm,
- bo_vc4->size);
-
- return (tbm_backend_bo_data *)bo_vc4;
-}
-#endif
-
-static tbm_error_e
-tbm_vc4_bufmgr_get_plane_data(tbm_backend_bufmgr_data *bufmgr_data,
- tbm_format format, int plane_idx, int width,
- int height, uint32_t *size, uint32_t *offset,
- uint32_t *pitch, int *bo_idx)
-{
- tbm_bufmgr_vc4 bufmgr_vc4 = (tbm_bufmgr_vc4)bufmgr_data;
- int bpp;
- int _offset = 0;
- int _pitch = 0;
- int _size = 0;
- int _bo_idx = 0;
- int _align_height = 0;
-
- TBM_RETURN_VAL_IF_FAIL(bufmgr_vc4 != NULL, TBM_ERROR_INVALID_PARAMETER);
-
- switch (format) {
- /* 16 bpp RGB */
- case TBM_FORMAT_XRGB4444:
- case TBM_FORMAT_XBGR4444:
- case TBM_FORMAT_RGBX4444:
- case TBM_FORMAT_BGRX4444:
- case TBM_FORMAT_ARGB4444:
- case TBM_FORMAT_ABGR4444:
- case TBM_FORMAT_RGBA4444:
- case TBM_FORMAT_BGRA4444:
- case TBM_FORMAT_XRGB1555:
- case TBM_FORMAT_XBGR1555:
- case TBM_FORMAT_RGBX5551:
- case TBM_FORMAT_BGRX5551:
- case TBM_FORMAT_ARGB1555:
- case TBM_FORMAT_ABGR1555:
- case TBM_FORMAT_RGBA5551:
- case TBM_FORMAT_BGRA5551:
- case TBM_FORMAT_RGB565:
- bpp = 16;
- _offset = 0;
- _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
- _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- break;
- /* 24 bpp RGB */
- case TBM_FORMAT_RGB888:
- case TBM_FORMAT_BGR888:
- bpp = 24;
- _offset = 0;
- _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
- _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- break;
- /* 32 bpp RGB */
- case TBM_FORMAT_XRGB8888:
- case TBM_FORMAT_XBGR8888:
- case TBM_FORMAT_RGBX8888:
- case TBM_FORMAT_BGRX8888:
- case TBM_FORMAT_ARGB8888:
- case TBM_FORMAT_ABGR8888:
- case TBM_FORMAT_RGBA8888:
- case TBM_FORMAT_BGRA8888:
- bpp = 32;
- _offset = 0;
-#ifdef VC4_TILED_FORMAT
- if (vc4_size_is_lt(width, height, 4)) {
- width = SIZE_ALIGN(width, vc4_utile_width(4));
- height = SIZE_ALIGN(height, vc4_utile_height(4));
-
- } else {
- width = SIZE_ALIGN(width, 32);
- uint32_t utile_h = vc4_utile_height(bpp);
- height = SIZE_ALIGN(height, 8*utile_h);
- }
-#endif
- _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
- _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- break;
-
- /* packed YCbCr */
- case TBM_FORMAT_YUYV:
- case TBM_FORMAT_YVYU:
- case TBM_FORMAT_UYVY:
- case TBM_FORMAT_VYUY:
- case TBM_FORMAT_AYUV:
- bpp = 32;
- _offset = 0;
- _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
- _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- break;
-
- /*
- * 2 plane YCbCr
- * index 0 = Y plane, [7:0] Y
- * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
- * or
- * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
- */
- case TBM_FORMAT_NV12:
- case TBM_FORMAT_NV21:
- bpp = 12;
- /*if (plane_idx == 0)*/
- {
- _offset = 0;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
- _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- if (plane_idx == 0)
- break;
- }
- /*else if (plane_idx == 1)*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _align_height = SIZE_ALIGN(height / 2, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
- _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- }
- break;
- case TBM_FORMAT_NV16:
- case TBM_FORMAT_NV61:
- bpp = 16;
- /*if(plane_idx == 0)*/
- {
- _offset = 0;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
- _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- if (plane_idx == 0)
- break;
- }
- /*else if( plane_idx ==1 )*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
- _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- }
- break;
-
- /*
- * 3 plane YCbCr
- * index 0: Y plane, [7:0] Y
- * index 1: Cb plane, [7:0] Cb
- * index 2: Cr plane, [7:0] Cr
- * or
- * index 1: Cr plane, [7:0] Cr
- * index 2: Cb plane, [7:0] Cb
- */
-
- /*
- * NATIVE_BUFFER_FORMAT_YV12
- * NATIVE_BUFFER_FORMAT_I420
- */
- case TBM_FORMAT_YUV410:
- case TBM_FORMAT_YVU410:
- bpp = 9;
- /*if(plane_idx == 0)*/
- {
- _offset = 0;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
- _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- if (plane_idx == 0)
- break;
- }
- /*else if(plane_idx == 1)*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4);
- _align_height = SIZE_ALIGN(height / 4, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
- _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- if (plane_idx == 1)
- break;
- }
- /*else if (plane_idx == 2)*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4);
- _align_height = SIZE_ALIGN(height / 4, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
- _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- }
- break;
- case TBM_FORMAT_YUV411:
- case TBM_FORMAT_YVU411:
- case TBM_FORMAT_YUV420:
- case TBM_FORMAT_YVU420:
- bpp = 12;
- /*if(plane_idx == 0)*/
- {
- _offset = 0;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
- _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- if (plane_idx == 0)
- break;
- }
- /*else if(plane_idx == 1)*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
- _align_height = SIZE_ALIGN(height / 2, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV / 2);
- _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- if (plane_idx == 1)
- break;
- }
- /*else if (plane_idx == 2)*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
- _align_height = SIZE_ALIGN(height / 2, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV / 2);
- _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- }
- break;
- case TBM_FORMAT_YUV422:
- case TBM_FORMAT_YVU422:
- bpp = 16;
- /*if(plane_idx == 0)*/
- {
- _offset = 0;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
- _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- if (plane_idx == 0)
- break;
- }
- /*else if(plane_idx == 1)*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
- _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
- _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- if (plane_idx == 1)
- break;
- }
- /*else if (plane_idx == 2)*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
- _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
- _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- }
- break;
- case TBM_FORMAT_YUV444:
- case TBM_FORMAT_YVU444:
- bpp = 24;
- /*if(plane_idx == 0)*/
- {
- _offset = 0;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
- _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- if (plane_idx == 0)
- break;
- }
- /*else if(plane_idx == 1)*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
- _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- if (plane_idx == 1)
- break;
- }
- /*else if (plane_idx == 2)*/
- {
- _offset += _size;
- _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
- _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
- _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
- _bo_idx = 0;
- }
- break;
- default:
- bpp = 0;
- break;
- }
-
- *size = _size;
- *offset = _offset;
- *pitch = _pitch;
- *bo_idx = _bo_idx;
-
- return TBM_ERROR_NONE;
-}
-
-static tbm_backend_bo_data *
-tbm_vc4_bufmgr_alloc_bo(tbm_backend_bufmgr_data *bufmgr_data, unsigned int size,
- tbm_bo_memory_type flags, tbm_error_e *error)
-{
- tbm_bufmgr_vc4 bufmgr_vc4 = (tbm_bufmgr_vc4)bufmgr_data;
- tbm_bo_vc4 bo_vc4;
-
- if (bufmgr_vc4 == NULL) {
- TBM_ERR("bufmgr_data is null\n");
- if (error)
- *error = TBM_ERROR_INVALID_PARAMETER;
- return NULL;
- }
-
- bo_vc4 = calloc(1, sizeof(struct _tbm_bo_vc4));
- if (!bo_vc4) {
- TBM_ERR("fail to allocate the bo_vc4 private\n");
- if (error)
- *error = TBM_ERROR_OUT_OF_MEMORY;
- return NULL;
- }
- bo_vc4->bufmgr_vc4 = bufmgr_vc4;
-
- struct drm_vc4_create_bo arg = {0, };
-
- arg.size = (__u32)size;
- arg.flags = flags;/*currently no values for the flags,but it may be used in future extension*/
- if (drmIoctl(bufmgr_vc4->fd, DRM_IOCTL_VC4_CREATE_BO, &arg)) {
- TBM_ERR("Cannot create bo_vc4(flag:%x, size:%d)\n", arg.flags,
- (unsigned int)arg.size);
- free(bo_vc4);
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
-
- bo_vc4->fd = bufmgr_vc4->fd;
- bo_vc4->gem = (unsigned int)arg.handle;
- bo_vc4->size = size;
- bo_vc4->flags_tbm = flags;
- bo_vc4->name = _get_name(bo_vc4->fd, bo_vc4->gem);
-
- if (!_bo_init_cache_state(bufmgr_vc4, bo_vc4, 0)) {
- TBM_ERR("fail init cache state(%d)\n", bo_vc4->name);
- free(bo_vc4);
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
-
- pthread_mutex_init(&bo_vc4->mutex, NULL);
-
- if (bufmgr_vc4->use_dma_fence && !bo_vc4->dmabuf) {
- struct drm_prime_handle arg = {0, };
-
- arg.handle = bo_vc4->gem;
- if (drmIoctl(bo_vc4->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
- TBM_ERR("Cannot dmabuf=%d\n", bo_vc4->gem);
- free(bo_vc4);
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
- bo_vc4->dmabuf = arg.fd;
- }
-
- /* add bo_vc4 to hash */
- if (drmHashInsert(bufmgr_vc4->hashBos, bo_vc4->name, (void *)bo_vc4) < 0)
- TBM_ERR("Cannot insert bo_vc4 to Hash(%d)\n", bo_vc4->name);
-
- TBM_DBG(" bo_vc4:%p, gem:%d(%d), flags:%d, size:%d\n",
- bo_vc4,
- bo_vc4->gem, bo_vc4->name,
- bo_vc4->flags_tbm,
- bo_vc4->size);
-
- if (error)
- *error = TBM_ERROR_NONE;
-
- return (tbm_backend_bo_data *)bo_vc4;
-}
-
-static tbm_backend_bo_data *
-tbm_vc4_bufmgr_import_fd(tbm_backend_bufmgr_data *bufmgr_data, tbm_fd key, tbm_error_e *error)
-{
- tbm_bufmgr_vc4 bufmgr_vc4 = (tbm_bufmgr_vc4)bufmgr_data;
- tbm_bo_vc4 bo_vc4;
- unsigned int gem = 0;
- unsigned int name;
- int ret;
- char buf[STRERR_BUFSIZE];
-
- if (bufmgr_vc4 == NULL) {
- TBM_ERR("bufmgr_data is null\n");
- if (error)
- *error = TBM_ERROR_INVALID_PARAMETER;
- return NULL;
- }
-
- /*getting handle from fd*/
- struct drm_prime_handle arg = {0, };
-
- arg.fd = key;
- arg.flags = 0;
- if (drmIoctl(bufmgr_vc4->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &arg)) {
- TBM_ERR("Cannot get gem handle from fd:%d (%s)\n",
- arg.fd, strerror_r(errno, buf, STRERR_BUFSIZE));
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
- gem = arg.handle;
-
- name = _get_name(bufmgr_vc4->fd, gem);
- if (!name) {
- TBM_ERR("Cannot get name from gem:%d, fd:%d (%s)\n",
- gem, key, strerror_r(errno, buf, STRERR_BUFSIZE));
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
-
- ret = drmHashLookup(bufmgr_vc4->hashBos, name, (void **)&bo_vc4);
- if (ret == 0) {
- if (gem == bo_vc4->gem) {
- if (error)
- *error = TBM_ERROR_NONE;
- return bo_vc4;
- }
- }
-
- /* Determine size of bo_vc4. The fd-to-handle ioctl really should
- * return the size, but it doesn't. If we have kernel 3.12 or
- * later, we can lseek on the prime fd to get the size. Older
- * kernels will just fail, in which case we fall back to the
- * provided (estimated or guess size).
- */
- unsigned int real_size = -1;
- struct drm_gem_open open_arg = {0, };
-
- real_size = lseek(key, 0, SEEK_END);
-
- open_arg.name = name;
- if (drmIoctl(bufmgr_vc4->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
- TBM_ERR("Cannot get gem info from gem:%d, fd:%d (%s)\n",
- gem, key, strerror_r(errno, buf, STRERR_BUFSIZE));
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
-
- /* Free gem handle to avoid a memory leak*/
- struct drm_gem_close close_arg = {0, };
- memset(&close_arg, 0, sizeof(close_arg));
- close_arg.handle = open_arg.handle;
- if (drmIoctl(bufmgr_vc4->fd, DRM_IOCTL_GEM_CLOSE, &close_arg)) {
- TBM_ERR("Cannot close gem_handle.\n",
- strerror_r(errno, buf, STRERR_BUFSIZE));
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
-
- if (real_size == -1)
- real_size = open_arg.size;
-
- bo_vc4 = calloc(1, sizeof(struct _tbm_bo_vc4));
- if (!bo_vc4) {
- TBM_ERR("bo_vc4:%p fail to allocate the bo_vc4\n", bo_vc4);
- if (error)
- *error = TBM_ERROR_OUT_OF_MEMORY;
- return NULL;
- }
- bo_vc4->bufmgr_vc4 = bufmgr_vc4;
-
- bo_vc4->fd = bufmgr_vc4->fd;
- bo_vc4->gem = gem;
- bo_vc4->size = real_size;
- bo_vc4->name = name;
- bo_vc4->flags_tbm = 0;
-
-#ifdef VC4_TILED_FORMAT
- struct drm_vc4_get_tiling get_tiling = {
- .handle = bo_vc4->gem,
- };
- drmIoctl(bo_vc4->fd, DRM_IOCTL_VC4_GET_TILING, &get_tiling);
-
- if (get_tiling.modifier == DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED)
- bo_vc4->flags_tbm |= TBM_BO_TILED;
-#endif
-
- if (!_bo_init_cache_state(bufmgr_vc4, bo_vc4, 1)) {
- TBM_ERR("fail init cache state(%d)\n", bo_vc4->name);
- free(bo_vc4);
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
-
- /* add bo_vc4 to hash */
- if (drmHashInsert(bufmgr_vc4->hashBos, bo_vc4->name, (void *)bo_vc4) < 0)
- TBM_ERR("bo_vc4:%p Cannot insert bo_vc4 to Hash(%d) from gem:%d, fd:%d\n",
- bo_vc4, bo_vc4->name, gem, key);
-
- TBM_DBG(" bo_vc4:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d, size:%d\n",
- bo_vc4,
- bo_vc4->gem, bo_vc4->name,
- bo_vc4->dmabuf,
- key,
- bo_vc4->flags_tbm,
- bo_vc4->size);
-
- if (error)
- *error = TBM_ERROR_NONE;
-
- return (tbm_backend_bo_data *)bo_vc4;
-}
-
-static tbm_backend_bo_data *
-tbm_vc4_bufmgr_import_key(tbm_backend_bufmgr_data *bufmgr_data, tbm_key key, tbm_error_e *error)
-{
- tbm_bufmgr_vc4 bufmgr_vc4 = (tbm_bufmgr_vc4)bufmgr_data;
- tbm_bo_vc4 bo_vc4;
- int ret;
-
- if (bufmgr_vc4 == NULL) {
- TBM_ERR("bufmgr_data is null\n");
- if (error)
- *error = TBM_ERROR_INVALID_PARAMETER;
- return NULL;
- }
-
- ret = drmHashLookup(bufmgr_vc4->hashBos, key, (void **)&bo_vc4);
- if (ret == 0) {
- if (error)
- *error = TBM_ERROR_NONE;
- return (tbm_backend_bo_data *)bo_vc4;
- }
-
- struct drm_gem_open arg = {0, };
-
- arg.name = key;
- if (drmIoctl(bufmgr_vc4->fd, DRM_IOCTL_GEM_OPEN, &arg)) {
- TBM_ERR("Cannot open gem name=%d\n", key);
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
-
- bo_vc4 = calloc(1, sizeof(struct _tbm_bo_vc4));
- if (!bo_vc4) {
- TBM_ERR("fail to allocate the bo_vc4 private\n");
- if (error)
- *error = TBM_ERROR_OUT_OF_MEMORY;
- return NULL;
- }
- bo_vc4->bufmgr_vc4 = bufmgr_vc4;
-
- bo_vc4->fd = bufmgr_vc4->fd;
- bo_vc4->gem = arg.handle;
- bo_vc4->size = arg.size;
- bo_vc4->name = key;
- bo_vc4->flags_tbm = 0;
-
-#ifdef VC4_TILED_FORMAT
- struct drm_vc4_get_tiling get_tiling = {
- .handle = bo_vc4->gem,
- };
- drmIoctl(bo_vc4->fd, DRM_IOCTL_VC4_GET_TILING, &get_tiling);
-
- if (get_tiling.modifier == DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED)
- bo_vc4->flags_tbm |= TBM_BO_TILED;
-#endif
-
- if (!_bo_init_cache_state(bufmgr_vc4, bo_vc4, 1)) {
- TBM_ERR("fail init cache state(%d)\n", bo_vc4->name);
- free(bo_vc4);
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- return NULL;
- }
-
- if (!bo_vc4->dmabuf) {
- struct drm_prime_handle arg = {0, };
-
- arg.handle = bo_vc4->gem;
- if (drmIoctl(bo_vc4->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
- TBM_ERR("fail to DRM_IOCTL_PRIME_HANDLE_TO_FD gem=%d\n", bo_vc4->gem);
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- free(bo_vc4);
- return NULL;
- }
- bo_vc4->dmabuf = arg.fd;
- }
-
- /* add bo_vc4 to hash */
- if (drmHashInsert(bufmgr_vc4->hashBos, bo_vc4->name, (void *)bo_vc4) < 0)
- TBM_ERR("Cannot insert bo_vc4 to Hash(%d)\n", bo_vc4->name);
-
- TBM_DBG(" bo_vc4:%p, gem:%d(%d), fd:%d, flags:%d, size:%d\n",
- bo_vc4,
- bo_vc4->gem, bo_vc4->name,
- bo_vc4->dmabuf,
- bo_vc4->flags_tbm,
- bo_vc4->size);
-
- if (error)
- *error = TBM_ERROR_NONE;
-
- return (tbm_backend_bo_data *)bo_vc4;
-}
-
-static void
-tbm_vc4_bo_free(tbm_backend_bo_data *bo_data)
-{
- tbm_bo_vc4 bo_vc4 = (tbm_bo_vc4)bo_data;
- tbm_bo_vc4 temp;
- tbm_bufmgr_vc4 bufmgr_vc4;
- char buf[STRERR_BUFSIZE];
- int ret;
-
- if (!bo_data)
- return;
-
- bufmgr_vc4 = bo_vc4->bufmgr_vc4;
- if (!bufmgr_vc4)
- return;
-
- TBM_DBG(" bo_vc4:%p, gem:%d(%d), fd:%d, size:%d\n",
- bo_vc4,
- bo_vc4->gem, bo_vc4->name,
- bo_vc4->dmabuf,
- bo_vc4->size);
-
- if (bo_vc4->pBase) {
- if (munmap(bo_vc4->pBase, bo_vc4->size) == -1) {
- TBM_ERR("bo_vc4:%p fail to munmap(%s)\n",
- bo_vc4, strerror_r(errno, buf, STRERR_BUFSIZE));
- }
- }
-
- /* close dmabuf */
- if (bo_vc4->dmabuf) {
- close(bo_vc4->dmabuf);
- bo_vc4->dmabuf = 0;
- }
-
- /* delete bo_vc4 from hash */
- ret = drmHashLookup(bufmgr_vc4->hashBos, bo_vc4->name, (void **)&temp);
- if (ret == 0)
- drmHashDelete(bufmgr_vc4->hashBos, bo_vc4->name);
- else
- TBM_ERR("Cannot find bo_vc4 to Hash(%d), ret=%d\n", bo_vc4->name, ret);
-
- if (temp != bo_vc4)
- TBM_ERR("hashBos probably has several BOs with same name!!!\n");
-
- _bo_destroy_cache_state(bufmgr_vc4, bo_vc4);
-
- /* Free gem handle */
- struct drm_gem_close arg = {0, };
-
- memset(&arg, 0, sizeof(arg));
- arg.handle = bo_vc4->gem;
- if (drmIoctl(bo_vc4->fd, DRM_IOCTL_GEM_CLOSE, &arg))
- TBM_ERR("bo_vc4:%p fail to gem close.(%s)\n",
- bo_vc4, strerror_r(errno, buf, STRERR_BUFSIZE));
-
- free(bo_vc4);
-}
-
-static int
-tbm_vc4_bo_get_size(tbm_backend_bo_data *bo_data, tbm_error_e *error)
-{
- tbm_bo_vc4 bo_vc4 = (tbm_bo_vc4)bo_data;
-
- if (!bo_vc4) {
- if (error)
- *error = TBM_ERROR_INVALID_PARAMETER;
- return 0;
- }
-
- if (error)
- *error = TBM_ERROR_NONE;
-
- return bo_vc4->size;
-}
-
-static tbm_bo_memory_type
-tbm_vc4_bo_get_memory_type(tbm_backend_bo_data *bo_data, tbm_error_e *error)
-{
- tbm_bo_vc4 bo_vc4 = (tbm_bo_vc4)bo_data;
-
- if (!bo_vc4) {
- if (error)
- *error = TBM_ERROR_INVALID_PARAMETER;
- return TBM_BO_DEFAULT;
- }
-
- if (error)
- *error = TBM_ERROR_NONE;
-
- return bo_vc4->flags_tbm;
-}
-
-static tbm_bo_handle
-tbm_vc4_bo_get_handle(tbm_backend_bo_data *bo_data, tbm_bo_device_type device, tbm_error_e *error)
-{
- tbm_bo_vc4 bo_vc4 = (tbm_bo_vc4)bo_data;
- tbm_bo_handle bo_handle;
-
- if (!bo_vc4) {
- if (error)
- *error = TBM_ERROR_INVALID_PARAMETER;
- return (tbm_bo_handle) NULL;
- }
-
- if (!bo_vc4->gem) {
- TBM_ERR("Cannot map gem=%d\n", bo_vc4->gem);
- if (error)
- *error = TBM_ERROR_INVALID_PARAMETER;
- return (tbm_bo_handle) NULL;
- }
-
- TBM_DBG("bo_vc4:%p, gem:%d(%d), fd:%d, flags:%d, size:%d, %s\n",
- bo_vc4,
- bo_vc4->gem, bo_vc4->name,
- bo_vc4->dmabuf,
- bo_vc4->flags_tbm,
- bo_vc4->size,
- STR_DEVICE[device]);
-
- /*Get mapped bo_handle*/
- bo_handle = _vc4_bo_handle(bo_vc4, device);
- if (bo_handle.ptr == NULL) {
- TBM_ERR("Cannot get handle: gem:%d, device:%d\n",
- bo_vc4->gem, device);
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- return (tbm_bo_handle) NULL;
- }
-
- if (error)
- *error = TBM_ERROR_NONE;
-
- return bo_handle;
-}
-
-static tbm_bo_handle
-tbm_vc4_bo_map(tbm_backend_bo_data *bo_data, tbm_bo_device_type device,
- tbm_bo_access_option opt, tbm_error_e *error)
-{
- tbm_bo_vc4 bo_vc4 = (tbm_bo_vc4)bo_data;
- tbm_bo_handle bo_handle;
- tbm_bufmgr_vc4 bufmgr_vc4;
-
- if (!bo_vc4) {
- if (error)
- *error = TBM_ERROR_INVALID_PARAMETER;
- return (tbm_bo_handle) NULL;
- }
-
- bufmgr_vc4 = bo_vc4->bufmgr_vc4;
- if (!bufmgr_vc4) {
- if (error)
- *error = TBM_ERROR_INVALID_PARAMETER;
- return (tbm_bo_handle) NULL;
- }
-
- if (!bo_vc4->gem) {
- TBM_ERR("Cannot map gem=%d\n", bo_vc4->gem);
- if (error)
- *error = TBM_ERROR_INVALID_PARAMETER;
- return (tbm_bo_handle) NULL;
- }
-
- TBM_DBG(" bo_vc4:%p, gem:%d(%d), fd:%d, %s, %s\n",
- bo_vc4,
- bo_vc4->gem, bo_vc4->name,
- bo_vc4->dmabuf,
- STR_DEVICE[device],
- STR_OPT[opt]);
-
- /*Get mapped bo_handle*/
- bo_handle = _vc4_bo_handle(bo_vc4, device);
- if (bo_handle.ptr == NULL) {
- TBM_ERR("Cannot get handle: gem:%d, device:%d, opt:%d\n",
- bo_vc4->gem, device, opt);
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- return (tbm_bo_handle) NULL;
- }
-
- if (bo_vc4->map_cnt == 0)
- _bo_set_cache_state(bufmgr_vc4, bo_vc4, device, opt);
-
- bo_vc4->last_map_device = device;
-
- bo_vc4->map_cnt++;
-
- if (error)
- *error = TBM_ERROR_NONE;
-
- return bo_handle;
-}
-
-static tbm_error_e
-tbm_vc4_bo_unmap(tbm_backend_bo_data *bo_data)
-{
- tbm_bo_vc4 bo_vc4 = (tbm_bo_vc4)bo_data;
- tbm_bufmgr_vc4 bufmgr_vc4;
-
- if (!bo_vc4)
- return TBM_ERROR_INVALID_PARAMETER;
-
- bufmgr_vc4 = bo_vc4->bufmgr_vc4;
- if (!bufmgr_vc4)
- return TBM_ERROR_INVALID_PARAMETER;
-
- if (!bo_vc4->gem)
- return TBM_ERROR_INVALID_PARAMETER;
-
- bo_vc4->map_cnt--;
-
- if (bo_vc4->map_cnt == 0)
- _bo_save_cache_state(bufmgr_vc4, bo_vc4);
-
-#ifdef ENABLE_CACHECRTL
- if (bo_vc4->last_map_device == TBM_DEVICE_CPU)
- _vc4_cache_flush(bufmgr_vc4, bo_vc4, TBM_VC4_CACHE_FLUSH_ALL);
-#endif
-
- bo_vc4->last_map_device = -1;
-
- TBM_DBG(" bo_vc4:%p, gem:%d(%d), fd:%d\n",
- bo_vc4,
- bo_vc4->gem, bo_vc4->name,
- bo_vc4->dmabuf);
-
- return TBM_ERROR_NONE;
-}
-
-static tbm_error_e
-tbm_vc4_bo_lock(tbm_backend_bo_data *bo_data, tbm_bo_device_type device,
- tbm_bo_access_option opt)
-{
-#ifndef ALWAYS_BACKEND_CTRL
- tbm_bo_vc4 bo_vc4 = (tbm_bo_vc4)bo_data;
- tbm_bufmgr_vc4 bufmgr_vc4;
- struct dma_buf_fence fence;
- struct flock filelock;
- int ret = 0;
- char buf[STRERR_BUFSIZE];
-
- if (!bo_vc4)
- return TBM_ERROR_INVALID_PARAMETER;
-
- bufmgr_vc4 = bo_vc4->bufmgr_vc4;
- if (!bufmgr_vc4)
- return TBM_ERROR_INVALID_PARAMETER;
-
- if (device != TBM_DEVICE_3D && device != TBM_DEVICE_CPU) {
- TBM_DBG("Not support device type,\n");
- return TBM_ERROR_INVALID_OPERATION;
- }
-
- memset(&fence, 0, sizeof(struct dma_buf_fence));
-
- /* Check if the given type is valid or not. */
- if (opt & TBM_OPTION_WRITE) {
- if (device == TBM_DEVICE_3D)
- fence.type = DMA_BUF_ACCESS_WRITE | DMA_BUF_ACCESS_DMA;
- } else if (opt & TBM_OPTION_READ) {
- if (device == TBM_DEVICE_3D)
- fence.type = DMA_BUF_ACCESS_READ | DMA_BUF_ACCESS_DMA;
- } else {
- TBM_ERR("Invalid argument\n");
- return TBM_ERROR_INVALID_PARAMETER;
- }
-
- /* Check if the tbm manager supports dma fence or not. */
- if (!bufmgr_vc4->use_dma_fence) {
- TBM_ERR("Not support DMA FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
- return TBM_ERROR_INVALID_OPERATION;
-
- }
-
- if (device == TBM_DEVICE_3D) {
- ret = ioctl(bo_vc4->dmabuf, DMABUF_IOCTL_GET_FENCE, &fence);
- if (ret < 0) {
- TBM_ERR("Cannot set GET FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
- return TBM_ERROR_INVALID_OPERATION;
- }
- } else {
- if (opt & TBM_OPTION_WRITE)
- filelock.l_type = F_WRLCK;
- else
- filelock.l_type = F_RDLCK;
-
- filelock.l_whence = SEEK_CUR;
- filelock.l_start = 0;
- filelock.l_len = 0;
-
- if (-1 == fcntl(bo_vc4->dmabuf, F_SETLKW, &filelock))
- return TBM_ERROR_INVALID_OPERATION;
- }
-
- pthread_mutex_lock(&bo_vc4->mutex);
-
- if (device == TBM_DEVICE_3D) {
- int i;
-
- for (i = 0; i < DMA_FENCE_LIST_MAX; i++) {
- if (bo_vc4->dma_fence[i].ctx == 0) {
- bo_vc4->dma_fence[i].type = fence.type;
- bo_vc4->dma_fence[i].ctx = fence.ctx;
- break;
- }
- }
-
- if (i == DMA_FENCE_LIST_MAX) {
- /*TODO: if dma_fence list is full, it needs realloc. I will fix this. by minseok3.kim*/
- TBM_ERR("fence list is full\n");
- }
- }
-
- pthread_mutex_unlock(&bo_vc4->mutex);
-
- TBM_DBG("DMABUF_IOCTL_GET_FENCE! bo_vc4:%p, gem:%d(%d), fd:%ds\n",
- bo_vc4,
- bo_vc4->gem, bo_vc4->name,
- bo_vc4->dmabuf);
-#endif /* ALWAYS_BACKEND_CTRL */
-
- return TBM_ERROR_NONE;
-}
-
-static tbm_error_e
-tbm_vc4_bo_unlock(tbm_backend_bo_data *bo_data)
-{
-#ifndef ALWAYS_BACKEND_CTRL
- tbm_bo_vc4 bo_vc4 = (tbm_bo_vc4)bo_data;
- struct dma_buf_fence fence;
- struct flock filelock;
- unsigned int dma_type = 0;
- int ret = 0;
- char buf[STRERR_BUFSIZE];
-
- bufmgr_vc4 = bo_vc4->bufmgr_vc4;
- if (!bufmgr_vc4)
- return TBM_ERROR_INVALID_PARAMETER;
-
- if (bo_vc4->dma_fence[0].type & DMA_BUF_ACCESS_DMA)
- dma_type = 1;
-
- if (!bo_vc4->dma_fence[0].ctx && dma_type) {
- TBM_DBG("FENCE not support or ignored,\n");
- return TBM_ERROR_INVALID_OPERATION;
- }
-
- if (!bo_vc4->dma_fence[0].ctx && dma_type) {
- TBM_DBG("device type is not 3D/CPU,\n");
- return TBM_ERROR_INVALID_OPERATION;
- }
-
- pthread_mutex_lock(&bo_vc4->mutex);
-
- if (dma_type) {
- fence.type = bo_vc4->dma_fence[0].type;
- fence.ctx = bo_vc4->dma_fence[0].ctx;
- int i;
-
- for (i = 1; i < DMA_FENCE_LIST_MAX; i++) {
- bo_vc4->dma_fence[i - 1].type = bo_vc4->dma_fence[i].type;
- bo_vc4->dma_fence[i - 1].ctx = bo_vc4->dma_fence[i].ctx;
- }
- bo_vc4->dma_fence[DMA_FENCE_LIST_MAX - 1].type = 0;
- bo_vc4->dma_fence[DMA_FENCE_LIST_MAX - 1].ctx = 0;
- }
-
- pthread_mutex_unlock(&bo_vc4->mutex);
-
- if (dma_type) {
- ret = ioctl(bo_vc4->dmabuf, DMABUF_IOCTL_PUT_FENCE, &fence);
- if (ret < 0) {
- TBM_ERR("Can not set PUT FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
- return TBM_ERROR_INVALID_OPERATION;
- }
- } else {
- filelock.l_type = F_UNLCK;
- filelock.l_whence = SEEK_CUR;
- filelock.l_start = 0;
- filelock.l_len = 0;
-
- if (-1 == fcntl(bo_vc4->dmabuf, F_SETLKW, &filelock))
- return TBM_ERROR_INVALID_OPERATION;
- }
-
- TBM_DBG("DMABUF_IOCTL_PUT_FENCE! bo_vc4:%p, gem:%d(%d), fd:%ds\n",
- bo_vc4,
- bo_vc4->gem, bo_vc4->name,
- bo_vc4->dmabuf);
-#endif /* ALWAYS_BACKEND_CTRL */
-
- return TBM_ERROR_NONE;
-}
-
-static tbm_fd
-tbm_vc4_bo_export_fd(tbm_backend_bo_data *bo_data, tbm_error_e *error)
-{
- tbm_bo_vc4 bo_vc4 = (tbm_bo_vc4)bo_data;
- int ret;
- char buf[STRERR_BUFSIZE];
-
- if (!bo_vc4) {
- if (error)
- *error = TBM_ERROR_INVALID_PARAMETER;
- return -1;
- }
-
- struct drm_prime_handle arg = {0, };
-
- arg.handle = bo_vc4->gem;
- ret = drmIoctl(bo_vc4->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg);
- if (ret) {
- TBM_ERR("bo_vc4:%p Cannot dmabuf=%d (%s)\n",
- bo_vc4, bo_vc4->gem, strerror_r(errno, buf, STRERR_BUFSIZE));
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- return (tbm_fd) ret;
- }
-
- TBM_DBG(" bo_vc4:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d, size:%d\n",
- bo_vc4,
- bo_vc4->gem, bo_vc4->name,
- bo_vc4->dmabuf,
- arg.fd,
- bo_vc4->flags_tbm,
- bo_vc4->size);
-
- if (error)
- *error = TBM_ERROR_NONE;
-
- return (tbm_fd)arg.fd;
-}
-
-static tbm_key
-tbm_vc4_bo_export_key(tbm_backend_bo_data *bo_data, tbm_error_e *error)
-{
- tbm_bo_vc4 bo_vc4 = (tbm_bo_vc4)bo_data;
-
- if (!bo_vc4) {
- if (error)
- *error = TBM_ERROR_INVALID_PARAMETER;
- return 0;
- }
-
- if (!bo_vc4->name) {
- bo_vc4->name = _get_name(bo_vc4->fd, bo_vc4->gem);
- if (!bo_vc4->name) {
- TBM_ERR("error Cannot get name\n");
- if (error)
- *error = TBM_ERROR_INVALID_PARAMETER;
- return 0;
- }
- }
-
- TBM_DBG(" bo_vc4:%p, gem:%d(%d), fd:%d, flags:%d, size:%d\n",
- bo_vc4,
- bo_vc4->gem, bo_vc4->name,
- bo_vc4->dmabuf,
- bo_vc4->flags_tbm,
- bo_vc4->size);
-
- if (error)
- *error = TBM_ERROR_NONE;
-
- return (tbm_key)bo_vc4->name;
-}
-
-static void
-tbm_vc4_deinit(tbm_backend_bufmgr_data *bufmgr_data)
-{
- tbm_bufmgr_vc4 bufmgr_vc4 = (tbm_bufmgr_vc4)bufmgr_data;
- tbm_bufmgr bufmgr;
- tbm_error_e error;
- unsigned long key;
- void *value;
-
- TBM_RETURN_IF_FAIL(bufmgr_vc4 != NULL);
-
- bufmgr = bufmgr_vc4->bufmgr;
-
- tbm_backend_bufmgr_free_bufmgr_func(bufmgr, bufmgr_vc4->bufmgr_func);
- tbm_backend_bufmgr_free_bo_func(bufmgr, bufmgr_vc4->bo_func);
-
- if (bufmgr_vc4->hashBos) {
- while (drmHashFirst(bufmgr_vc4->hashBos, &key, &value) > 0) {
- free(value);
- drmHashDelete(bufmgr_vc4->hashBos, key);
- }
-
- drmHashDestroy(bufmgr_vc4->hashBos);
- bufmgr_vc4->hashBos = NULL;
- }
-
- _bufmgr_deinit_cache_state(bufmgr_vc4);
-
- if (bufmgr_vc4->bind_display)
- tbm_drm_helper_wl_auth_server_deinit();
-
- if (bufmgr_vc4->device_name)
- free(bufmgr_vc4->device_name);
-
- if (tbm_backend_bufmgr_query_display_server(bufmgr, &error))
- tbm_drm_helper_unset_tbm_master_fd();
- else
- tbm_drm_helper_unset_fd();
-
- close(bufmgr_vc4->fd);
-
- free(bufmgr_vc4);
-}
-
-static tbm_backend_bufmgr_data *
-tbm_vc4_init(tbm_bufmgr bufmgr, tbm_error_e *error)
-{
- tbm_bufmgr_vc4 bufmgr_vc4 = NULL;
- tbm_backend_bufmgr_func *bufmgr_func = NULL;
- tbm_backend_bo_func *bo_func = NULL;
- int fp;
- tbm_error_e err;
- int set_master = 0;
-
- if (!bufmgr) {
- TBM_ERR("bufmgr is null.\n");
- if (error)
- *error = TBM_ERROR_INVALID_PARAMETER;
- return NULL;
- }
-
- bufmgr_vc4 = calloc(1, sizeof(struct _tbm_bufmgr_vc4));
- if (!bufmgr_vc4) {
- TBM_ERR("fail to alloc bufmgr_vc4!\n");
- if (error)
- *error = TBM_ERROR_OUT_OF_MEMORY;
- return NULL;
- }
-
- /* check the master_fd which already had opened */
- bufmgr_vc4->fd = tbm_drm_helper_get_master_fd();
- if (bufmgr_vc4->fd < 0) {
- bufmgr_vc4->fd = _tbm_vc4_open_drm();
- if (bufmgr_vc4->fd < 0) {
- TBM_ERR("fail to open drm!\n");
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- goto fail_open_drm;
- }
- if (drmIsMaster(bufmgr_vc4->fd)) {
- tbm_drm_helper_set_tbm_master_fd(bufmgr_vc4->fd);
- set_master = 1;
-
- bufmgr_vc4->device_name = drmGetDeviceNameFromFd(bufmgr_vc4->fd);
- if (!bufmgr_vc4->device_name) {
- TBM_ERR("fail to get device name!\n");
- tbm_drm_helper_unset_tbm_master_fd();
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- goto fail_get_device_name;
- }
- TBM_INFO("This is Master FD(%d) from open_drm.", bufmgr_vc4->fd);
- } else {
- /* close the fd and get the authenticated fd from the master fd */
- close(bufmgr_vc4->fd);
- bufmgr_vc4->fd = -1;
-
- if (_check_render_node()) {
- bufmgr_vc4->fd = _get_render_node();//TODO
- if (bufmgr_vc4->fd < 0) {
- TBM_ERR("fail to get render node\n");
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- goto fail_get_render_node;
- }
- TBM_INFO("Use render node:%d\n", bufmgr_vc4->fd);
- } else {
- /* get the authenticated drm fd from the master fd */
- if (!tbm_drm_helper_get_auth_info(&(bufmgr_vc4->fd), &(bufmgr_vc4->device_name), NULL)) {
- TBM_ERR("fail to get auth drm info!\n");
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- goto fail_get_auth_info;
- }
- TBM_INFO("This is Authenticated FD(%d)", bufmgr_vc4->fd);
- }
- }
- } else {
- bufmgr_vc4->device_name = drmGetDeviceNameFromFd(bufmgr_vc4->fd);
- if (!bufmgr_vc4->device_name) {
- TBM_ERR("fail to get device name!\n");
- tbm_drm_helper_unset_tbm_master_fd();
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- goto fail_get_device_name;
- }
- TBM_INFO("This is Master FD from tbm_drm_helper_get_master_fd(%d)", bufmgr_vc4->fd);
- }
- tbm_drm_helper_set_fd(bufmgr_vc4->fd);
-
- //Check if the tbm manager supports dma fence or not.
- fp = open("/sys/module/dmabuf_sync/parameters/enabled", O_RDONLY);
- if (fp != -1) {
- char buf[1];
- int length = read(fp, buf, 1);
-
- if (length == 1 && buf[0] == '1')
- bufmgr_vc4->use_dma_fence = 1;
-
- close(fp);
- }
-
- if (!_bufmgr_init_cache_state(bufmgr_vc4)) {
- TBM_ERR("fail to init bufmgr cache state\n");
- goto fail_init_cache_state;
- }
-
- /*Create Hash Table*/
- bufmgr_vc4->hashBos = drmHashCreate();
-
- /* alloc and register bufmgr_funcs */
- bufmgr_func = tbm_backend_bufmgr_alloc_bufmgr_func(bufmgr, &err);
- if (!bufmgr_func) {
- TBM_ERR("fail to alloc bufmgr_func! err(%d)\n", err);
- if (error)
- *error = TBM_ERROR_OUT_OF_MEMORY;
- goto fail_alloc_bufmgr_func;
- }
-
- bufmgr_func->bufmgr_get_capabilities = tbm_vc4_bufmgr_get_capabilities;
- //if (tbm_backend_bufmgr_query_display_server(bufmgr, &err) && !_check_render_node())
- bufmgr_func->bufmgr_bind_native_display = tbm_vc4_bufmgr_bind_native_display;
- bufmgr_func->bufmgr_get_supported_formats = tbm_vc4_bufmgr_get_supported_formats;
- bufmgr_func->bufmgr_get_plane_data = tbm_vc4_bufmgr_get_plane_data;
- bufmgr_func->bufmgr_alloc_bo = tbm_vc4_bufmgr_alloc_bo;
- bufmgr_func->bufmgr_alloc_bo_with_format = NULL;
- bufmgr_func->bufmgr_import_fd = tbm_vc4_bufmgr_import_fd;
- bufmgr_func->bufmgr_import_key = tbm_vc4_bufmgr_import_key;
-#ifdef VC4_TILED_FORMAT
- bufmgr_func->bufmgr_alloc_bo_with_tiled_format = tbm_vc4_bufmgr_alloc_bo_with_tiled_format;
-#else
- bufmgr_func->bufmgr_alloc_bo_with_tiled_format = NULL;
-#endif
- err = tbm_backend_bufmgr_register_bufmgr_func(bufmgr, bufmgr_func);
- if (err != TBM_ERROR_NONE) {
- TBM_ERR("fail to register bufmgr_func! err(%d)\n", err);
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- goto fail_register_bufmgr_func;
- }
- bufmgr_vc4->bufmgr_func = bufmgr_func;
-
- /* alloc and register bo_funcs */
- bo_func = tbm_backend_bufmgr_alloc_bo_func(bufmgr, &err);
- if (!bo_func) {
- TBM_ERR("fail to alloc bo_func! err(%d)\n", err);
- if (error)
- *error = TBM_ERROR_OUT_OF_MEMORY;
- goto fail_alloc_bo_func;
- }
-
- bo_func->bo_free = tbm_vc4_bo_free;
- bo_func->bo_get_size = tbm_vc4_bo_get_size;
- bo_func->bo_get_memory_types = tbm_vc4_bo_get_memory_type;
- bo_func->bo_get_handle = tbm_vc4_bo_get_handle;
- bo_func->bo_map = tbm_vc4_bo_map;
- bo_func->bo_unmap = tbm_vc4_bo_unmap;
- bo_func->bo_lock = tbm_vc4_bo_lock;
- bo_func->bo_unlock = tbm_vc4_bo_unlock;
- bo_func->bo_export_fd = tbm_vc4_bo_export_fd;
- bo_func->bo_export_key = tbm_vc4_bo_export_key;
-
- err = tbm_backend_bufmgr_register_bo_func(bufmgr, bo_func);
- if (err != TBM_ERROR_NONE) {
- TBM_ERR("fail to register bo_func! err(%d)\n", err);
- if (error)
- *error = TBM_ERROR_INVALID_OPERATION;
- goto fail_register_bo_func;
- }
- bufmgr_vc4->bo_func = bo_func;
-
- TBM_DBG("drm_fd:%d\n", bufmgr_vc4->fd);
-
- if (error)
- *error = TBM_ERROR_NONE;
-
- bufmgr_vc4->bufmgr = bufmgr;
-
- return (tbm_backend_bufmgr_data *)bufmgr_vc4;
-
-fail_register_bo_func:
- tbm_backend_bufmgr_free_bo_func(bufmgr, bo_func);
-fail_alloc_bo_func:
-fail_register_bufmgr_func:
- tbm_backend_bufmgr_free_bufmgr_func(bufmgr, bufmgr_func);
-fail_alloc_bufmgr_func:
- _bufmgr_deinit_cache_state(bufmgr_vc4);
- if (bufmgr_vc4->hashBos)
- drmHashDestroy(bufmgr_vc4->hashBos);
-fail_init_cache_state:
- if (set_master)
- tbm_drm_helper_unset_tbm_master_fd();
- tbm_drm_helper_unset_fd();
-fail_get_device_name:
- close(bufmgr_vc4->fd);
-fail_get_auth_info:
-fail_get_render_node:
-fail_open_drm:
- free(bufmgr_vc4);
- return NULL;
-}
-
-tbm_backend_module tbm_backend_module_data = {
- "vc4",
- "Samsung",
- TBM_BACKEND_ABI_VERSION_3_0,
- tbm_vc4_init,
- tbm_vc4_deinit
-};
--- /dev/null
+/*
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+
+/**
+ * \file
+ * List macros heavily inspired by the Linux kernel
+ * list handling. No list looping yet.
+ *
+ * Is not threadsafe, so common operations need to
+ * be protected using an external mutex.
+ */
+#ifndef _U_DOUBLE_LIST_H_
+#define _U_DOUBLE_LIST_H_
+
+#include <stddef.h>
+
+struct list_head {
+ struct list_head *prev;
+ struct list_head *next;
+};
+
+static inline void list_inithead(struct list_head *item)
+{
+ item->prev = item;
+ item->next = item;
+}
+
+static inline void list_add(struct list_head *item, struct list_head *list)
+{
+ item->prev = list;
+ item->next = list->next;
+ list->next->prev = item;
+ list->next = item;
+}
+
+static inline void list_addtail(struct list_head *item, struct list_head *list)
+{
+ item->next = list;
+ item->prev = list->prev;
+ list->prev->next = item;
+ list->prev = item;
+}
+
+static inline void list_replace(struct list_head *from, struct list_head *to)
+{
+ to->prev = from->prev;
+ to->next = from->next;
+ from->next->prev = to;
+ from->prev->next = to;
+}
+
+static inline void list_del(struct list_head *item)
+{
+ item->prev->next = item->next;
+ item->next->prev = item->prev;
+}
+
+static inline void list_delinit(struct list_head *item)
+{
+ item->prev->next = item->next;
+ item->next->prev = item->prev;
+ item->next = item;
+ item->prev = item;
+}
+
+static inline int list_length(struct list_head *item)
+{
+ struct list_head *next;
+ int length = 0;
+
+ next = item->next;
+ while (next != item) {
+ length++;
+ next = next->next;
+ }
+
+ return length;
+}
+
+#define LIST_INITHEAD(__item) list_inithead(__item)
+#define LIST_ADD(__item, __list) list_add(__item, __list)
+#define LIST_ADDTAIL(__item, __list) list_addtail(__item, __list)
+#define LIST_REPLACE(__from, __to) list_replace(__from, __to)
+#define LIST_DEL(__item) list_del(__item)
+#define LIST_DELINIT(__item) list_delinit(__item)
+#define LIST_LENGTH(__item) list_length(__item)
+
+#define LIST_ENTRY(__type, __item, __field) \
+ ((__type *)(((char *)(__item)) - offsetof(__type, __field)))
+
+#define LIST_FIRST_ENTRY(__ptr, __type, __field) \
+ LIST_ENTRY(__type, (__ptr)->next, __field)
+
+#define LIST_LAST_ENTRY(__ptr, __type, __field) \
+ LIST_ENTRY(__type, (__ptr)->prev, __field)
+
+#define LIST_IS_EMPTY(__list) \
+ ((__list)->next == (__list))
+
+#ifndef container_of
+#define container_of(ptr, sample, member) \
+ (void *)((char *)(ptr) \
+ - ((char *)&(sample)->member - (char *)(sample)))
+#endif
+
+#define LIST_FOR_EACH_ENTRY(pos, head, member) \
+ for (pos = container_of((head)->next, pos, member); \
+ &pos->member != (head); \
+ pos = container_of(pos->member.next, pos, member))
+
+#define LIST_FOR_EACH_ENTRY_REV(pos, head, member) \
+ for (pos = container_of((head)->prev, pos, member); \
+ &pos->member != (head); \
+ pos = container_of(pos->member.prev, pos, member))
+
+#define LIST_FOR_EACH_ENTRY_SAFE(pos, storage, head, member) \
+ for (pos = container_of((head)->next, pos, member), \
+ storage = container_of(pos->member.next, pos, member); \
+ &pos->member != (head); \
+ pos = storage, storage = container_of(storage->member.next, storage, member))
+
+#define LIST_FOR_EACH_ENTRY_SAFE_REV(pos, storage, head, member) \
+ for (pos = container_of((head)->prev, pos, member), \
+ storage = container_of(pos->member.prev, pos, member); \
+ &pos->member != (head); \
+ pos = storage, storage = container_of(storage->member.prev, storage, member))
+
+#define LIST_FOR_EACH_ENTRY_FROM(pos, start, head, member) \
+ for (pos = container_of((start), pos, member); \
+ &pos->member != (head); \
+ pos = container_of(pos->member.next, pos, member))
+
+#define LIST_FOR_EACH_ENTRY_FROM_REV(pos, start, head, member) \
+ for (pos = container_of((start), pos, member); \
+ &pos->member != (head); \
+ pos = container_of(pos->member.prev, pos, member))
+
+#define LIST_FIND_ITEM(item, head, type, member, found) \
+ do { \
+ type *pos = NULL; \
+ found = NULL; \
+ LIST_FOR_EACH_ENTRY(pos, head, member) \
+ if (pos == item) { found = item; break; } \
+ } while (0)
+
+#endif /*_U_DOUBLE_LIST_H_*/
--- /dev/null
+/**************************************************************************
+
+libtbm_vc4
+
+Copyright 2021 Samsung Electronics co., Ltd. All Rights Reserved.
+
+Contact: SooChan Lim <sc1.lim@samsung.com>
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sub license, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
+ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+#include "tbm_backend_log.h"
+
+#undef LOG_TAG
+#define LOG_TAG "TBM_BACKEND"
+
+unsigned int tbm_log_debug_level = TBM_BACKEND_LOG_LEVEL_INFO;
+
+static void
+_tbm_backend_log_dlog_print(int level, const char *fmt, va_list arg)
+{
+ log_priority dlog_prio;
+
+ switch (level) {
+ case TBM_BACKEND_LOG_LEVEL_ERR:
+ dlog_prio = DLOG_ERROR;
+ break;
+ case TBM_BACKEND_LOG_LEVEL_WRN:
+ dlog_prio = DLOG_WARN;
+ break;
+ case TBM_BACKEND_LOG_LEVEL_INFO:
+ dlog_prio = DLOG_INFO;
+ break;
+ case TBM_BACKEND_LOG_LEVEL_DBG:
+ dlog_prio = DLOG_DEBUG;
+ break;
+ default:
+ return;
+ }
+ __dlog_vprint(LOG_ID_SYSTEM, dlog_prio, LOG_TAG, fmt, arg);
+}
+
+void
+tbm_backend_log_print(int level, const char *fmt, ...)
+{
+ va_list arg;
+
+ if (level > tbm_log_debug_level)
+ return;
+
+ va_start(arg, fmt);
+ _tbm_backend_log_dlog_print(level, fmt, arg);
+ va_end(arg);
+}
+
--- /dev/null
+/**************************************************************************
+
+libtbm_vc4
+
+Copyright 2021 Samsung Electronics co., Ltd. All Rights Reserved.
+
+Contact: SooChan Lim <sc1.lim@samsung.com>
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sub license, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
+ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+#ifndef __TBM_BACKEND_LOG_H__
+#define __TBM_BACKEND_LOG_H__
+
+#include <sys/syscall.h>
+#include <time.h>
+#include <dlog.h>
+
+enum {
+ TBM_BACKEND_LOG_LEVEL_NONE,
+ TBM_BACKEND_LOG_LEVEL_ERR,
+ TBM_BACKEND_LOG_LEVEL_WRN,
+ TBM_BACKEND_LOG_LEVEL_INFO,
+ TBM_BACKEND_LOG_LEVEL_DBG,
+};
+
+
+/* log level */
+void tbm_backend_log_print(int level, const char *fmt, ...);
+
+#define TBM_BACKEND_DBG(fmt, args...) \
+ do { \
+ struct timespec ts; \
+ clock_gettime(CLOCK_MONOTONIC, &ts); \
+ tbm_backend_log_print(TBM_BACKEND_LOG_LEVEL_DBG, "[%5d.%06d][%d][%s %d]"fmt, \
+ (int)ts.tv_sec, (int)ts.tv_nsec / 1000, \
+ (int)syscall(SYS_gettid), __FUNCTION__, __LINE__, ##args); \
+ } while (0)
+
+#define TBM_BACKEND_INFO(fmt, args...) \
+ do { \
+ struct timespec ts; \
+ clock_gettime(CLOCK_MONOTONIC, &ts); \
+ tbm_backend_log_print(TBM_BACKEND_LOG_LEVEL_INFO, "[%5d.%06d][%d][%s %d]"fmt, \
+ (int)ts.tv_sec, (int)ts.tv_nsec / 1000, \
+ (int)syscall(SYS_gettid), __FUNCTION__, __LINE__, ##args); \
+ } while (0)
+
+#define TBM_BACKEND_WRN(fmt, args...) \
+ do { \
+ struct timespec ts; \
+ clock_gettime(CLOCK_MONOTONIC, &ts); \
+ tbm_backend_log_print(TBM_BACKEND_LOG_LEVEL_WRN, "[%5d.%06d][%d][%s %d]"fmt, \
+ (int)ts.tv_sec, (int)ts.tv_nsec / 1000, \
+ (int)syscall(SYS_gettid), __FUNCTION__, __LINE__, ##args); \
+ } while (0)
+
+#define TBM_BACKEND_ERR(fmt, args...) \
+ do { \
+ struct timespec ts; \
+ clock_gettime(CLOCK_MONOTONIC, &ts); \
+ tbm_backend_log_print(TBM_BACKEND_LOG_LEVEL_ERR, "[%5d.%06d][%d][%s %d]"fmt, \
+ (int)ts.tv_sec, (int)ts.tv_nsec / 1000, \
+ (int)syscall(SYS_gettid), __FUNCTION__, __LINE__, ##args); \
+ } while (0)
+
+#define TBM_BACKEND_RETURN_IF_FAIL(cond) {\
+ if (!(cond)) {\
+ TBM_BACKEND_ERR("'%s' failed.\n", #cond);\
+ return;\
+ } \
+}
+#define TBM_BACKEND_RETURN_VAL_IF_FAIL(cond, val) {\
+ if (!(cond)) {\
+ TBM_BACKEND_ERR("'%s' failed.\n", #cond);\
+ return val;\
+ } \
+}
+#define TBM_BACKEND_GOTO_VAL_IF_FAIL(cond, val) {\
+ if (!(cond)) {\
+ TBM_BACKEND_ERR("'%s' failed.\n", #cond);\
+ goto val;\
+ } \
+}
+
+#endif /* __TBM_BACKEND_LOG_H__ */
\ No newline at end of file
--- /dev/null
+/**************************************************************************
+
+libtbm_vc4
+
+Copyright 2017 Samsung Electronics co., Ltd. All Rights Reserved.
+
+Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sub license, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
+ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <libudev.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <xf86drm.h>
+#include <vc4_drm.h>
+#include <pthread.h>
+#include <hal-common.h>
+#include <hal-tbm-types.h>
+#include <hal-tbm-interface.h>
+#include "tbm_bufmgr_tgl.h"
+#include "tbm_backend_log.h"
+#include "tbm_backend_list.h"
+
+#define VC4_DRM_NAME "vc4"
+
+#define TBM_COLOR_FORMAT_COUNT 4
+#define STRERR_BUFSIZE 128
+#define SIZE_ALIGN(value, base) (((value) + ((base) - 1)) & ~((base) - 1))
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+
+#ifdef ALIGN_EIGHT
+#define TBM_SURFACE_ALIGNMENT_PLANE (8)
+#define TBM_SURFACE_ALIGNMENT_PITCH_RGB (8)
+#else
+#define TBM_SURFACE_ALIGNMENT_PLANE (16)
+#define TBM_SURFACE_ALIGNMENT_PITCH_RGB (16)
+#endif
+
+#define TBM_SURFACE_ALIGNMENT_PLANE_NV12 (4096)
+#define TBM_SURFACE_ALIGNMENT_PITCH_YUV (32)
+#define TBM_SURFACE_ALIGNMENT_HEIGHT_YUV (16)
+
+//#define VC4_TILED_FORMAT 1
+
+struct dma_buf_info {
+ unsigned long size;
+ unsigned int fence_supported;
+ unsigned int padding;
+};
+
+#define DMA_BUF_ACCESS_READ 0x1
+#define DMA_BUF_ACCESS_WRITE 0x2
+#define DMA_BUF_ACCESS_DMA 0x4
+#define DMA_BUF_ACCESS_MAX 0x8
+
+#define DMA_FENCE_LIST_MAX 5
+
+struct dma_buf_fence {
+ unsigned long ctx;
+ unsigned int type;
+};
+
+#define DMABUF_IOCTL_BASE 'F'
+#define DMABUF_IOWR(nr, type) _IOWR(DMABUF_IOCTL_BASE, nr, type)
+
+#define DMABUF_IOCTL_GET_INFO DMABUF_IOWR(0x00, struct dma_buf_info)
+#define DMABUF_IOCTL_GET_FENCE DMABUF_IOWR(0x01, struct dma_buf_fence)
+#define DMABUF_IOCTL_PUT_FENCE DMABUF_IOWR(0x02, struct dma_buf_fence)
+
+/* tgl key values */
+#define GLOBAL_KEY ((unsigned int)(-1))
+/* TBM_CACHE */
+#define TBM_VC4_CACHE_INV 0x01 /**< cache invalidate */
+#define TBM_VC4_CACHE_CLN 0x02 /**< cache clean */
+#define TBM_VC4_CACHE_ALL 0x10 /**< cache all */
+#define TBM_VC4_CACHE_FLUSH (TBM_VC4_CACHE_INV|TBM_VC4_CACHE_CLN) /**< cache flush */
+#define TBM_VC4_CACHE_FLUSH_ALL (TBM_VC4_CACHE_FLUSH|TBM_VC4_CACHE_ALL) /**< cache flush all */
+
+enum {
+ DEVICE_NONE = 0,
+ DEVICE_CA, /* cache aware device */
+ DEVICE_CO /* cache oblivious device */
+};
+
+typedef union _tbm_bo_cache_state tbm_bo_cache_state;
+
+union _tbm_bo_cache_state {
+ unsigned int val;
+ struct {
+ unsigned int cntFlush:16; /*Flush all index for sync */
+ unsigned int isCached:1;
+ unsigned int isDirtied:2;
+ } data;
+};
+
+typedef struct _tbm_vc4_bufmgr tbm_vc4_bufmgr;
+typedef struct _tbm_vc4_surface tbm_vc4_surface;
+typedef struct _tbm_vc4_bo tbm_vc4_bo;
+
+/* tbm surface object for vc4 */
+struct _tbm_vc4_surface {
+ struct list_head link;
+ uint32_t refcnt;
+
+ uint32_t width;
+ uint32_t height;
+ hal_tbm_format format;
+ tbm_vc4_bufmgr *bufmgr_data;
+ int num_bos;
+ tbm_vc4_bo *bo_data;
+};
+
+/* tbm buffor object for vc4 */
+struct _tbm_vc4_bo {
+ int fd;
+
+ unsigned int name; /* FLINK ID */
+
+ unsigned int gem; /* GEM Handle */
+
+ unsigned int dmabuf; /* fd for dmabuf */
+
+ void *pBase; /* virtual address */
+
+ unsigned int size;
+
+ unsigned int flags_tbm; /*not used now*//*currently no values for the flags,but it may be used in future extension*/
+
+ pthread_mutex_t mutex;
+ struct dma_buf_fence dma_fence[DMA_FENCE_LIST_MAX];
+ int device;
+ int opt;
+
+ tbm_bo_cache_state cache_state;
+ unsigned int map_cnt;
+ int last_map_device;
+
+ tbm_vc4_bufmgr *bufmgr_data;
+};
+
+/* tbm bufmgr private for vc4 */
+struct _tbm_vc4_bufmgr {
+ int fd;
+ int isLocal;
+ void *hashBos;
+
+ struct list_head surface_data_list;
+
+ int use_dma_fence;
+
+ int tgl_fd;
+};
+
+static char *STR_DEVICE[] = {
+ "DEF",
+ "CPU",
+ "2D",
+ "3D",
+ "MM"
+};
+
+static char *STR_OPT[] = {
+ "NONE",
+ "RD",
+ "WR",
+ "RDWR"
+};
+
+
+static uint32_t tbm_vc4_color_format_list[TBM_COLOR_FORMAT_COUNT] = {
+ HAL_TBM_FORMAT_ARGB8888,
+ HAL_TBM_FORMAT_XRGB8888,
+ HAL_TBM_FORMAT_NV12,
+ HAL_TBM_FORMAT_YUV420
+ };
+
+static hal_tbm_bo *tbm_vc4_bufmgr_alloc_bo(hal_tbm_bufmgr *bufmgr, unsigned int size,
+ hal_tbm_bo_memory_type flags, hal_tbm_error *error);
+static void tbm_vc4_bo_free(hal_tbm_bo *bo);
+static hal_tbm_bo *tbm_vc4_bufmgr_import_fd(hal_tbm_bufmgr *bufmgr, hal_tbm_fd key, hal_tbm_error *error);
+static hal_tbm_fd tbm_vc4_bo_export_fd(hal_tbm_bo *bo, hal_tbm_error *error);
+
+#undef ENABLE_CACHECRTL
+#ifdef ENABLE_CACHECRTL
+#ifdef TGL_GET_VERSION
+static inline int
+_tgl_get_version(int fd)
+{
+ struct tgl_ver_data data;
+ int err;
+ char buf[STRERR_BUFSIZE];
+
+ err = ioctl(fd, TGL_IOCTL_GET_VERSION, &data);
+ if (err) {
+ TBM_BACKEND_ERR("error(%s) %s:%d\n",
+ strerror_r(errno, buf, STRERR_BUFSIZE));
+ return 0;
+ }
+
+ TBM_BACKEND_DBG("tgl version is (%u, %u).\n", data.major, data.minor);
+
+ return 1;
+}
+#endif
+
+static inline int
+_tgl_init(int fd, unsigned int key)
+{
+ struct tgl_reg_data data;
+ int err;
+ char buf[STRERR_BUFSIZE];
+
+ data.key = key;
+ data.timeout_ms = 1000;
+
+ err = ioctl(fd, TGL_IOCTL_REGISTER, &data);
+ if (err) {
+ TBM_BACKEND_ERR("error(%s) key:%d\n",
+ strerror_r(errno, buf, STRERR_BUFSIZE), key);
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline int
+_tgl_destroy(int fd, unsigned int key)
+{
+ struct tgl_reg_data data;
+ int err;
+ char buf[STRERR_BUFSIZE];
+
+ data.key = key;
+ err = ioctl(fd, TGL_IOCTL_UNREGISTER, &data);
+ if (err) {
+ TBM_BACKEND_ERR("error(%s) key:%d\n",
+ strerror_r(errno, buf, STRERR_BUFSIZE), key);
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline int
+_tgl_lock(int fd, unsigned int key, int opt)
+{
+ struct tgl_lock_data data;
+ enum tgl_type_data tgl_type;
+ int err;
+ char buf[STRERR_BUFSIZE];
+
+ switch (opt) {
+ case TBM_OPTION_READ:
+ tgl_type = TGL_TYPE_READ;
+ break;
+ case TBM_OPTION_WRITE:
+ tgl_type = TGL_TYPE_WRITE;
+ break;
+ default:
+ tgl_type = TGL_TYPE_NONE;
+ break;
+ }
+
+ data.key = key;
+ data.type = tgl_type;
+
+ err = ioctl(fd, TGL_IOCTL_LOCK, &data);
+ if (err) {
+ TBM_BACKEND_ERR("error(%s) key:%d opt:%d\n",
+ strerror_r(errno, buf, STRERR_BUFSIZE), key, opt);
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline int
+_tgl_unlock(int fd, unsigned int key)
+{
+ struct tgl_lock_data data;
+ int err;
+ char buf[STRERR_BUFSIZE];
+
+ data.key = key;
+ data.type = TGL_TYPE_NONE;
+
+ err = ioctl(fd, TGL_IOCTL_UNLOCK, &data);
+ if (err) {
+ TBM_BACKEND_ERR("error(%s) key:%d\n",
+ strerror_r(errno, buf, STRERR_BUFSIZE), key);
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline int
+_tgl_set_data(int fd, unsigned int key, unsigned int val)
+{
+ struct tgl_usr_data data;
+ int err;
+ char buf[STRERR_BUFSIZE];
+
+ data.key = key;
+ data.data1 = val;
+
+ err = ioctl(fd, TGL_IOCTL_SET_DATA, &data);
+ if (err) {
+ TBM_BACKEND_ERR("error(%s) key:%d\n",
+ strerror_r(errno, buf, STRERR_BUFSIZE), key);
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline unsigned int
+_tgl_get_data(int fd, unsigned int key)
+{
+ struct tgl_usr_data data = { 0, };
+ int err;
+ char buf[STRERR_BUFSIZE];
+
+ data.key = key;
+
+ err = ioctl(fd, TGL_IOCTL_GET_DATA, &data);
+ if (err) {
+ TBM_BACKEND_ERR("error(%s) key:%d\n",
+ strerror_r(errno, buf, STRERR_BUFSIZE), key);
+ return 0;
+ }
+
+ return data.data1;
+}
+
+static int
+_vc4_cache_flush(tbm_vc4_bufmgr *bufmgr_data, tbm_vc4_bo *bo_data, int flags)
+{
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, 0);
+
+ /* cache flush is managed by kernel side when using dma-fence. */
+ if (bufmgr_data->use_dma_fence)
+ return 1;
+
+ struct drm_vc4_gem_cache_op cache_op = {0, };
+ int ret;
+
+ /* if bo_data is null, do cache_flush_all */
+ if (bo_data) {
+ cache_op.flags = 0;
+ cache_op.usr_addr = (uint64_t)((uint32_t)bo_data->pBase);
+ cache_op.size = bo_data->size;
+ } else {
+ flags = TBM_VC4_CACHE_FLUSH_ALL;
+ cache_op.flags = 0;
+ cache_op.usr_addr = 0;
+ cache_op.size = 0;
+ }
+
+ if (flags & TBM_VC4_CACHE_INV) {
+ if (flags & TBM_VC4_CACHE_ALL)
+ cache_op.flags |= VC4_DRM_CACHE_INV_ALL;
+ else
+ cache_op.flags |= VC4_DRM_CACHE_INV_RANGE;
+ }
+
+ if (flags & TBM_VC4_CACHE_CLN) {
+ if (flags & TBM_VC4_CACHE_ALL)
+ cache_op.flags |= VC4_DRM_CACHE_CLN_ALL;
+ else
+ cache_op.flags |= VC4_DRM_CACHE_CLN_RANGE;
+ }
+
+ if (flags & TBM_VC4_CACHE_ALL)
+ cache_op.flags |= VC4_DRM_ALL_CACHES_CORES;
+
+ ret = drmCommandWriteRead(bufmgr_data->fd, DRM_VC4_GEM_CACHE_OP, &cache_op,
+ sizeof(cache_op));
+ if (ret) {
+ TBM_BACKEND_ERR("fail to flush the cache.\n");
+ return 0;
+ }
+
+ return 1;
+}
+#endif
+
+static int
+_bo_init_cache_state(tbm_vc4_bufmgr *bufmgr_data, tbm_vc4_bo *bo_data, int import)
+{
+#ifdef ENABLE_CACHECRTL
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, 0);
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bo_data != NULL, 0);
+
+ if (bufmgr_data->use_dma_fence)
+ return 1;
+
+ _tgl_init(bufmgr_data->tgl_fd, bo_data->name);
+
+ tbm_bo_cache_state cache_state;
+
+ if (import == 0) {
+ cache_state.data.isDirtied = DEVICE_NONE;
+ cache_state.data.isCached = 0;
+ cache_state.data.cntFlush = 0;
+
+ _tgl_set_data(bufmgr_data->tgl_fd, bo_data->name, cache_state.val);
+ }
+#endif
+
+ return 1;
+}
+
+static int
+_bo_set_cache_state(tbm_vc4_bufmgr *bufmgr_data, tbm_vc4_bo *bo_data, int device, int opt)
+{
+#ifdef ENABLE_CACHECRTL
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, 0);
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bo_data != NULL, 0);
+
+ if (bufmgr_data->use_dma_fence)
+ return 1;
+
+ char need_flush = 0;
+ unsigned short cntFlush = 0;
+
+ /* get cache state of a bo_data */
+ bo_data->cache_state.val = _tgl_get_data(bufmgr_data->tgl_fd,
+ bo_data->name);
+
+ /* get global cache flush count */
+ cntFlush = (unsigned short)_tgl_get_data(bufmgr_data->tgl_fd, GLOBAL_KEY);
+
+ if (device == HAL_TBM_DEVICE_CPU) {
+ if (bo_data->cache_state.data.isDirtied == DEVICE_CO &&
+ bo_data->cache_state.data.isCached)
+ need_flush = TBM_VC4_CACHE_INV;
+
+ bo_data->cache_state.data.isCached = 1;
+ if (opt & TBM_OPTION_WRITE)
+ bo_data->cache_state.data.isDirtied = DEVICE_CA;
+ else {
+ if (bo_data->cache_state.data.isDirtied != DEVICE_CA)
+ bo_data->cache_state.data.isDirtied = DEVICE_NONE;
+ }
+ } else {
+ if (bo_data->cache_state.data.isDirtied == DEVICE_CA &&
+ bo_data->cache_state.data.isCached &&
+ bo_data->cache_state.data.cntFlush == cntFlush)
+ need_flush = TBM_VC4_CACHE_CLN | TBM_VC4_CACHE_ALL;
+
+ if (opt & TBM_OPTION_WRITE)
+ bo_data->cache_state.data.isDirtied = DEVICE_CO;
+ else {
+ if (bo_data->cache_state.data.isDirtied != DEVICE_CO)
+ bo_data->cache_state.data.isDirtied = DEVICE_NONE;
+ }
+ }
+
+ if (need_flush) {
+ if (need_flush & TBM_VC4_CACHE_ALL)
+ _tgl_set_data(bufmgr_data->tgl_fd, GLOBAL_KEY, (unsigned int)(++cntFlush));
+
+ /* call cache flush */
+ _vc4_cache_flush(bufmgr_data, bo_data, need_flush);
+
+ TBM_BACKEND_DBG(" \tcache(%d,%d)....flush:0x%x, cntFlush(%d)\n",
+ bo_data->cache_state.data.isCached,
+ bo_data->cache_state.data.isDirtied,
+ need_flush,
+ cntFlush);
+ }
+#endif
+
+ return 1;
+}
+
+static int
+_bo_save_cache_state(tbm_vc4_bufmgr *bufmgr_data, tbm_vc4_bo *bo_data)
+{
+#ifdef ENABLE_CACHECRTL
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, 0);
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bo_data != NULL, 0);
+
+ if (bufmgr_data->use_dma_fence)
+ return 1;
+
+ unsigned short cntFlush = 0;
+
+ /* get global cache flush count */
+ cntFlush = (unsigned short)_tgl_get_data(bufmgr_data->tgl_fd, GLOBAL_KEY);
+
+ /* save global cache flush count */
+ bo_data->cache_state.data.cntFlush = cntFlush;
+ _tgl_set_data(bufmgr_data->tgl_fd, bo_data->name,
+ bo_data->cache_state.val);
+#endif
+
+ return 1;
+}
+
+static void
+_bo_destroy_cache_state(tbm_vc4_bufmgr *bufmgr_data, tbm_vc4_bo *bo_data)
+{
+#ifdef ENABLE_CACHECRTL
+ TBM_BACKEND_RETURN_IF_FAIL(bufmgr_data != NULL);
+ TBM_BACKEND_RETURN_IF_FAIL(bo_data != NULL);
+
+ if (bufmgr_data->use_dma_fence)
+ return ;
+
+ _tgl_destroy(bufmgr_data->tgl_fd, bo_data->name);
+#endif
+}
+
+static int
+_bufmgr_init_cache_state(tbm_vc4_bufmgr *bufmgr_data)
+{
+#ifdef ENABLE_CACHECRTL
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, 0);
+
+ if (bufmgr_data->use_dma_fence)
+ return 1;
+
+ /* open tgl fd for saving cache flush data */
+ bufmgr_data->tgl_fd = open(tgl_devfile, O_RDWR);
+
+ if (bufmgr_data->tgl_fd < 0) {
+ bufmgr_data->tgl_fd = open(tgl_devfile1, O_RDWR);
+ if (bufmgr_data->tgl_fd < 0) {
+ TBM_BACKEND_ERR("fail to open global_lock:%s\n",
+ tgl_devfile1);
+ return 0;
+ }
+ }
+
+#ifdef TGL_GET_VERSION
+ if (!_tgl_get_version(bufmgr_data->tgl_fd)) {
+ TBM_BACKEND_ERR("fail to get tgl_version. tgl init failed.\n");
+ close(bufmgr_data->tgl_fd);
+ return 0;
+ }
+#endif
+
+ if (!_tgl_init(bufmgr_data->tgl_fd, GLOBAL_KEY)) {
+ TBM_BACKEND_ERR("fail to initialize the tgl\n");
+ close(bufmgr_data->tgl_fd);
+ return 0;
+ }
+#endif
+
+ return 1;
+}
+
+static void
+_bufmgr_deinit_cache_state(tbm_vc4_bufmgr *bufmgr_data)
+{
+#ifdef ENABLE_CACHECRTL
+ TBM_BACKEND_RETURN_IF_FAIL(bufmgr_data != NULL);
+
+ if (bufmgr_data->use_dma_fence)
+ return;
+
+ if (bufmgr_data->tgl_fd >= 0)
+ close(bufmgr_data->tgl_fd);
+#endif
+}
+
+static int
+_tbm_vc4_open_drm()
+{
+ int fd = -1;
+
+ fd = drmOpen(VC4_DRM_NAME, NULL);
+ if (fd < 0) {
+ TBM_BACKEND_ERR("fail to open drm.(%s)\n", VC4_DRM_NAME);
+ }
+
+ if (fd < 0) {
+ struct udev *udev = NULL;
+ struct udev_enumerate *e = NULL;
+ struct udev_list_entry *entry = NULL;
+ struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
+ const char *filepath;
+ struct stat s;
+ int ret;
+
+ TBM_BACKEND_DBG("search drm-device by udev\n");
+
+ udev = udev_new();
+ if (!udev) {
+ TBM_BACKEND_ERR("udev_new() failed.\n");
+ return -1;
+ }
+
+ e = udev_enumerate_new(udev);
+ udev_enumerate_add_match_subsystem(e, "drm");
+ udev_enumerate_add_match_sysname(e, "card[0-9]*");
+ udev_enumerate_scan_devices(e);
+
+ udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
+ device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
+ udev_list_entry_get_name(entry));
+ device_parent = udev_device_get_parent(device);
+ /* Not need unref device_parent. device_parent and device have same refcnt */
+ if (device_parent) {
+ if (strcmp(udev_device_get_sysname(device_parent), "vc4-drm") == 0) {
+ drm_device = device;
+ TBM_BACKEND_DBG("Found render device: '%s' (%s)\n",
+ udev_device_get_syspath(drm_device),
+ udev_device_get_sysname(device_parent));
+ break;
+ }
+ }
+ udev_device_unref(device);
+ }
+
+ udev_enumerate_unref(e);
+
+ /* Get device file path. */
+ filepath = udev_device_get_devnode(drm_device);
+ if (!filepath) {
+ TBM_BACKEND_ERR("udev_device_get_devnode() failed.\n");
+ udev_device_unref(drm_device);
+ udev_unref(udev);
+ return -1;
+ }
+
+ /* Open DRM device file and check validity. */
+ fd = open(filepath, O_RDWR | O_CLOEXEC);
+ if (fd < 0) {
+ TBM_BACKEND_ERR("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
+ udev_device_unref(drm_device);
+ udev_unref(udev);
+ return -1;
+ }
+
+ ret = fstat(fd, &s);
+ if (ret) {
+ TBM_BACKEND_ERR("fstat() failed %s.\n");
+ close(fd);
+ udev_device_unref(drm_device);
+ udev_unref(udev);
+ return -1;
+ }
+
+ udev_device_unref(drm_device);
+ udev_unref(udev);
+ }
+
+ return fd;
+}
+
+#if 0 // render node functions.
+static int
+_check_render_node(void)
+{
+#ifndef USE_RENDER_NODE
+ return 0;
+#else
+ struct udev *udev = NULL;
+ struct udev_enumerate *e = NULL;
+ struct udev_list_entry *entry = NULL;
+ struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
+
+ udev = udev_new();
+ if (!udev) {
+ TBM_BACKEND_ERR("udev_new() failed.\n");
+ return -1;
+ }
+
+ e = udev_enumerate_new(udev);
+ udev_enumerate_add_match_subsystem(e, "drm");
+ udev_enumerate_add_match_sysname(e, "renderD[0-9]*");
+ udev_enumerate_scan_devices(e);
+
+ udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
+ device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
+ udev_list_entry_get_name(entry));
+ device_parent = udev_device_get_parent(device);
+ /* Not need unref device_parent. device_parent and device have same refcnt */
+ if (device_parent) {
+ if (strcmp(udev_device_get_sysname(device_parent), "vc4-drm") == 0) {
+ drm_device = device;
+ TBM_BACKEND_DBG("Found render device: '%s' (%s)\n",
+ udev_device_get_syspath(drm_device),
+ udev_device_get_sysname(device_parent));
+ break;
+ }
+ }
+ udev_device_unref(device);
+ }
+
+ udev_enumerate_unref(e);
+ udev_unref(udev);
+
+ if (!drm_device) {
+ udev_device_unref(drm_device);
+ return 0;
+ }
+
+ udev_device_unref(drm_device);
+ return 1;
+#endif
+}
+
+static int
+_get_render_node(void)
+{
+ struct udev *udev = NULL;
+ struct udev_enumerate *e = NULL;
+ struct udev_list_entry *entry = NULL;
+ struct udev_device *device = NULL, *drm_device = NULL, *device_parent = NULL;
+ const char *filepath;
+ struct stat s;
+ int fd = -1;
+ int ret;
+
+ udev = udev_new();
+ if (!udev) {
+ TBM_BACKEND_ERR("udev_new() failed.\n");
+ return -1;
+ }
+
+ e = udev_enumerate_new(udev);
+ udev_enumerate_add_match_subsystem(e, "drm");
+ udev_enumerate_add_match_sysname(e, "renderD[0-9]*");
+ udev_enumerate_scan_devices(e);
+
+ udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
+ device = udev_device_new_from_syspath(udev_enumerate_get_udev(e),
+ udev_list_entry_get_name(entry));
+ device_parent = udev_device_get_parent(device);
+ /* Not need unref device_parent. device_parent and device have same refcnt */
+ if (device_parent) {
+ if (strcmp(udev_device_get_sysname(device_parent), "vc4-drm") == 0) {
+ drm_device = device;
+ TBM_BACKEND_DBG("Found render device: '%s' (%s)\n",
+ udev_device_get_syspath(drm_device),
+ udev_device_get_sysname(device_parent));
+ break;
+ }
+ }
+ udev_device_unref(device);
+ }
+
+ udev_enumerate_unref(e);
+
+ /* Get device file path. */
+ filepath = udev_device_get_devnode(drm_device);
+ if (!filepath) {
+ TBM_BACKEND_ERR("udev_device_get_devnode() failed.\n");
+ udev_device_unref(drm_device);
+ udev_unref(udev);
+ return -1;
+ }
+
+ /* Open DRM device file and check validity. */
+ fd = open(filepath, O_RDWR | O_CLOEXEC);
+ if (fd < 0) {
+ TBM_BACKEND_ERR("open(%s, O_RDWR | O_CLOEXEC) failed.\n");
+ udev_device_unref(drm_device);
+ udev_unref(udev);
+ return -1;
+ }
+
+ ret = fstat(fd, &s);
+ if (ret) {
+ TBM_BACKEND_ERR("fstat() failed %s.\n");
+ udev_device_unref(drm_device);
+ udev_unref(udev);
+ close(fd);
+ return -1;
+ }
+
+ udev_device_unref(drm_device);
+ udev_unref(udev);
+
+ return fd;
+}
+#endif
+
+static unsigned int
+_get_name(int fd, unsigned int gem)
+{
+ struct drm_gem_flink arg = {0,};
+
+ arg.handle = gem;
+ if (drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &arg)) {
+ TBM_BACKEND_ERR("fail to DRM_IOCTL_GEM_FLINK gem:%d", gem);
+ return 0;
+ }
+
+ return (unsigned int)arg.name;
+}
+
+static hal_tbm_bo_handle
+_vc4_bo_handle(tbm_vc4_bo *bo_data, int device)
+{
+ hal_tbm_bo_handle bo_handle;
+
+ memset(&bo_handle, 0x0, sizeof(uint64_t));
+
+ switch (device) {
+ case HAL_TBM_DEVICE_DEFAULT:
+ case HAL_TBM_DEVICE_2D:
+ bo_handle.u32 = (uint32_t)bo_data->gem;
+ break;
+ case HAL_TBM_DEVICE_CPU:
+ if (!bo_data->pBase) {
+ struct drm_vc4_mmap_bo arg = {0, };
+ void *map = NULL;
+
+ arg.handle = bo_data->gem;
+ if (drmIoctl(bo_data->fd, DRM_IOCTL_VC4_MMAP_BO, &arg)) {
+ TBM_BACKEND_ERR("Cannot map_vc4 gem=%d\n", bo_data->gem);
+ return (hal_tbm_bo_handle) NULL;
+ }
+
+ map = mmap(NULL, bo_data->size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ bo_data->fd, arg.offset);
+ if (map == MAP_FAILED) {
+ TBM_BACKEND_ERR("Cannot usrptr gem=%d\n", bo_data->gem);
+ return (hal_tbm_bo_handle) NULL;
+ }
+ bo_data->pBase = map;
+ }
+ bo_handle.ptr = (void *)bo_data->pBase;
+ break;
+ case HAL_TBM_DEVICE_3D:
+ case HAL_TBM_DEVICE_MM:
+ if (!bo_data->dmabuf) {
+ struct drm_prime_handle arg = {0, };
+
+ arg.handle = bo_data->gem;
+ if (drmIoctl(bo_data->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
+ TBM_BACKEND_ERR("Cannot dmabuf=%d\n", bo_data->gem);
+ return (hal_tbm_bo_handle) NULL;
+ }
+ bo_data->dmabuf = arg.fd;
+ }
+
+ bo_handle.u32 = (uint32_t)bo_data->dmabuf;
+ break;
+ default:
+ TBM_BACKEND_ERR("Not supported device:%d\n", device);
+ bo_handle.ptr = (void *) NULL;
+ break;
+ }
+
+ return bo_handle;
+}
+
+static hal_tbm_bufmgr_capability
+tbm_vc4_bufmgr_get_capabilities(hal_tbm_bufmgr *bufmgr, hal_tbm_error *error)
+{
+ hal_tbm_bufmgr_capability capabilities = HAL_TBM_BUFMGR_CAPABILITY_NONE;
+
+#ifdef VC4_TILED_FORMAT
+ capabilities = HAL_TBM_BUFMGR_CAPABILITY_SHARE_KEY|HAL_TBM_BUFMGR_CAPABILITY_SHARE_FD|HAL_TBM_BUFMGR_CAPABILITY_TILED_MEMORY;
+#else
+ capabilities = HAL_TBM_BUFMGR_CAPABILITY_SHARE_KEY|HAL_TBM_BUFMGR_CAPABILITY_SHARE_FD;
+#endif
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return capabilities;
+}
+
+static hal_tbm_error
+tbm_vc4_bufmgr_get_supported_formats(hal_tbm_bufmgr *bufmgr,
+ uint32_t **formats, uint32_t *num)
+{
+ tbm_vc4_bufmgr *bufmgr_data = (tbm_vc4_bufmgr *)bufmgr;
+ uint32_t *color_formats;
+
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, HAL_TBM_ERROR_INVALID_PARAMETER);
+
+ color_formats = (uint32_t *)calloc(1, sizeof(uint32_t) * TBM_COLOR_FORMAT_COUNT);
+ if (color_formats == NULL)
+ return HAL_TBM_ERROR_OUT_OF_MEMORY;
+
+ memcpy(color_formats, tbm_vc4_color_format_list, sizeof(uint32_t)*TBM_COLOR_FORMAT_COUNT);
+
+ *formats = color_formats;
+ *num = TBM_COLOR_FORMAT_COUNT;
+
+ TBM_BACKEND_DBG("supported format count = %d\n", *num);
+
+ return HAL_TBM_ERROR_NONE;
+}
+
+
+#ifdef VC4_TILED_FORMAT
+#include <drm_fourcc.h>
+static inline uint32_t
+vc4_utile_width(int cpp)
+{
+ switch (cpp) {
+ case 1:
+ case 2:
+ return 8;
+ case 4:
+ return 4;
+ case 8:
+ return 2;
+ default:
+ return 4;
+ }
+}
+
+static inline uint32_t
+vc4_utile_height(int cpp)
+{
+ switch (cpp) {
+ case 1:
+ return 8;
+ case 2:
+ case 4:
+ case 8:
+ return 4;
+ default:
+ return 4;
+ }
+}
+
+static inline bool
+vc4_size_is_lt(uint32_t width, uint32_t height, int cpp)
+{
+ return (width <= 4 * vc4_utile_width(cpp) ||
+ height <= 4 * vc4_utile_height(cpp));
+}
+
+static hal_tbm_bo *
+tbm_vc4_bufmgr_alloc_bo_with_tiled_format(hal_tbm_bufmgr *bufmgr, int width, int height,
+ int cpp, int format, hal_tbm_bo_memory_type flags, int bo_idx, hal_tbm_error *err)
+{
+ tbm_vc4_bufmgr *bufmgr_data = (tbm_vc4_bufmgr *)bufmgr;
+ tbm_vc4_bo *bo_data;
+ uint32_t utile_w = vc4_utile_width(cpp);
+ uint32_t utile_h = vc4_utile_height(cpp);
+ uint32_t level_width, level_height;
+ int size;
+ uint32_t stride;
+
+
+ level_width = width;
+ level_height = height;
+
+ if (bufmgr_data == NULL) {
+ TBM_BACKEND_ERR("bufmgr is null\n");
+ return NULL;
+ }
+
+ if (vc4_size_is_lt(level_width, level_height, cpp)) {
+ level_width = SIZE_ALIGN(level_width, utile_w);
+ level_height = SIZE_ALIGN(level_height, utile_h);
+ } else {
+ level_width = SIZE_ALIGN(level_width,
+ 4 * 2 * utile_w);
+ level_height = SIZE_ALIGN(level_height,
+ 4 * 2 * utile_h);
+ }
+
+ stride = level_width * cpp;
+
+ size = level_height * stride;
+ size = SIZE_ALIGN(size, 4096);
+
+
+ bo_data = calloc(1, sizeof(struct _tbm_vc4_bo));
+ if (!bo_data) {
+ TBM_BACKEND_ERR("fail to allocate the bo_data private\n");
+ return NULL;
+ }
+ bo_data->bufmgr_data = bufmgr_data;
+
+ struct drm_vc4_create_bo arg = {0, };
+
+ arg.size = (__u32)size;
+ arg.flags = flags;/*currently no values for the flags,but it may be used in future extension*/
+ if (drmIoctl(bufmgr_data->fd, DRM_IOCTL_VC4_CREATE_BO, &arg)) {
+ TBM_BACKEND_ERR("Cannot create bo_data(flag:%x, size:%d)\n", arg.flags,
+ (unsigned int)arg.size);
+ free(bo_data);
+ return NULL;
+ }
+
+ bo_data->fd = bufmgr_data->fd;
+ bo_data->gem = (unsigned int)arg.handle;
+ bo_data->size = size;
+ bo_data->flags_tbm = flags;
+ bo_data->name = _get_name(bo_data->fd, bo_data->gem);
+
+ if (!_bo_init_cache_state(bufmgr_data, bo_data, 0)) {
+ TBM_BACKEND_ERR("fail init cache state(%d)\n", bo_data->name);
+ free(bo_data);
+ return NULL;
+ }
+
+ pthread_mutex_init(&bo_data->mutex, NULL);
+
+ if (bufmgr_data->use_dma_fence && !bo_data->dmabuf) {
+ struct drm_prime_handle arg = {0, };
+
+ arg.handle = bo_data->gem;
+ if (drmIoctl(bo_data->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
+ TBM_BACKEND_ERR("Cannot dmabuf=%d\n", bo_data->gem);
+ free(bo_data);
+ return NULL;
+ }
+ bo_data->dmabuf = arg.fd;
+ }
+
+ //set modifier
+ uint64_t modifier;
+ modifier = DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
+ struct drm_vc4_set_tiling set_tiling = {
+ .handle = bo_data->gem,
+ .modifier = modifier,
+ };
+ drmIoctl(bo_data->fd, DRM_IOCTL_VC4_SET_TILING, &set_tiling);
+
+
+ /* add bo_data to hash */
+ if (drmHashInsert(bufmgr_data->hashBos, bo_data->name, (void *)bo_data) < 0)
+ TBM_BACKEND_ERR("Cannot insert bo_data to Hash(%d)\n", bo_data->name);
+
+ TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), flags:%d(%d), size:%d\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->flags_tbm,
+ bo_data->size);
+
+ return (hal_tbm_bo *)bo_data;
+}
+#endif
+
+static int
+_tbm_vc4_bufmgr_get_num_planes(hal_tbm_format format)
+{
+ int num_planes = 0;
+
+ switch (format) {
+ case HAL_TBM_FORMAT_C8:
+ case HAL_TBM_FORMAT_RGB332:
+ case HAL_TBM_FORMAT_BGR233:
+ case HAL_TBM_FORMAT_XRGB4444:
+ case HAL_TBM_FORMAT_XBGR4444:
+ case HAL_TBM_FORMAT_RGBX4444:
+ case HAL_TBM_FORMAT_BGRX4444:
+ case HAL_TBM_FORMAT_ARGB4444:
+ case HAL_TBM_FORMAT_ABGR4444:
+ case HAL_TBM_FORMAT_RGBA4444:
+ case HAL_TBM_FORMAT_BGRA4444:
+ case HAL_TBM_FORMAT_XRGB1555:
+ case HAL_TBM_FORMAT_XBGR1555:
+ case HAL_TBM_FORMAT_RGBX5551:
+ case HAL_TBM_FORMAT_BGRX5551:
+ case HAL_TBM_FORMAT_ARGB1555:
+ case HAL_TBM_FORMAT_ABGR1555:
+ case HAL_TBM_FORMAT_RGBA5551:
+ case HAL_TBM_FORMAT_BGRA5551:
+ case HAL_TBM_FORMAT_RGB565:
+ case HAL_TBM_FORMAT_BGR565:
+ case HAL_TBM_FORMAT_RGB888:
+ case HAL_TBM_FORMAT_BGR888:
+ case HAL_TBM_FORMAT_XRGB8888:
+ case HAL_TBM_FORMAT_XBGR8888:
+ case HAL_TBM_FORMAT_RGBX8888:
+ case HAL_TBM_FORMAT_BGRX8888:
+ case HAL_TBM_FORMAT_ARGB8888:
+ case HAL_TBM_FORMAT_ABGR8888:
+ case HAL_TBM_FORMAT_RGBA8888:
+ case HAL_TBM_FORMAT_BGRA8888:
+ case HAL_TBM_FORMAT_XRGB2101010:
+ case HAL_TBM_FORMAT_XBGR2101010:
+ case HAL_TBM_FORMAT_RGBX1010102:
+ case HAL_TBM_FORMAT_BGRX1010102:
+ case HAL_TBM_FORMAT_ARGB2101010:
+ case HAL_TBM_FORMAT_ABGR2101010:
+ case HAL_TBM_FORMAT_RGBA1010102:
+ case HAL_TBM_FORMAT_BGRA1010102:
+ case HAL_TBM_FORMAT_YUYV:
+ case HAL_TBM_FORMAT_YVYU:
+ case HAL_TBM_FORMAT_UYVY:
+ case HAL_TBM_FORMAT_VYUY:
+ case HAL_TBM_FORMAT_AYUV:
+ num_planes = 1;
+ break;
+ case HAL_TBM_FORMAT_NV12:
+ case HAL_TBM_FORMAT_NV12MT:
+ case HAL_TBM_FORMAT_NV21:
+ case HAL_TBM_FORMAT_NV16:
+ case HAL_TBM_FORMAT_NV61:
+ num_planes = 2;
+ break;
+ case HAL_TBM_FORMAT_YUV410:
+ case HAL_TBM_FORMAT_YVU410:
+ case HAL_TBM_FORMAT_YUV411:
+ case HAL_TBM_FORMAT_YVU411:
+ case HAL_TBM_FORMAT_YUV420:
+ case HAL_TBM_FORMAT_YVU420:
+ case HAL_TBM_FORMAT_YUV422:
+ case HAL_TBM_FORMAT_YVU422:
+ case HAL_TBM_FORMAT_YUV444:
+ case HAL_TBM_FORMAT_YVU444:
+ num_planes = 3;
+ break;
+
+ default:
+ num_planes = 0;
+ TBM_BACKEND_ERR("Invalid format : %d", format);
+ break;
+ }
+
+ return num_planes;
+}
+
+static hal_tbm_error
+tbm_vc4_bufmgr_get_plane_data(hal_tbm_bufmgr *bufmgr,
+ hal_tbm_format format, int plane_idx, int width,
+ int height, uint32_t *size, uint32_t *offset,
+ uint32_t *pitch, int *bo_idx)
+{
+ tbm_vc4_bufmgr *bufmgr_data = (tbm_vc4_bufmgr *)bufmgr;
+ int bpp;
+ int _offset = 0;
+ int _pitch = 0;
+ int _size = 0;
+ int _bo_idx = 0;
+ int _align_height = 0;
+
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, HAL_TBM_ERROR_INVALID_PARAMETER);
+
+ switch (format) {
+ /* 16 bpp RGB */
+ case HAL_TBM_FORMAT_XRGB4444:
+ case HAL_TBM_FORMAT_XBGR4444:
+ case HAL_TBM_FORMAT_RGBX4444:
+ case HAL_TBM_FORMAT_BGRX4444:
+ case HAL_TBM_FORMAT_ARGB4444:
+ case HAL_TBM_FORMAT_ABGR4444:
+ case HAL_TBM_FORMAT_RGBA4444:
+ case HAL_TBM_FORMAT_BGRA4444:
+ case HAL_TBM_FORMAT_XRGB1555:
+ case HAL_TBM_FORMAT_XBGR1555:
+ case HAL_TBM_FORMAT_RGBX5551:
+ case HAL_TBM_FORMAT_BGRX5551:
+ case HAL_TBM_FORMAT_ARGB1555:
+ case HAL_TBM_FORMAT_ABGR1555:
+ case HAL_TBM_FORMAT_RGBA5551:
+ case HAL_TBM_FORMAT_BGRA5551:
+ case HAL_TBM_FORMAT_RGB565:
+ bpp = 16;
+ _offset = 0;
+ _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
+ _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ break;
+ /* 24 bpp RGB */
+ case HAL_TBM_FORMAT_RGB888:
+ case HAL_TBM_FORMAT_BGR888:
+ bpp = 24;
+ _offset = 0;
+ _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
+ _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ break;
+ /* 32 bpp RGB */
+ case HAL_TBM_FORMAT_XRGB8888:
+ case HAL_TBM_FORMAT_XBGR8888:
+ case HAL_TBM_FORMAT_RGBX8888:
+ case HAL_TBM_FORMAT_BGRX8888:
+ case HAL_TBM_FORMAT_ARGB8888:
+ case HAL_TBM_FORMAT_ABGR8888:
+ case HAL_TBM_FORMAT_RGBA8888:
+ case HAL_TBM_FORMAT_BGRA8888:
+ bpp = 32;
+ _offset = 0;
+#ifdef VC4_TILED_FORMAT
+ if (vc4_size_is_lt(width, height, 4)) {
+ width = SIZE_ALIGN(width, vc4_utile_width(4));
+ height = SIZE_ALIGN(height, vc4_utile_height(4));
+
+ } else {
+ width = SIZE_ALIGN(width, 32);
+ uint32_t utile_h = vc4_utile_height(bpp);
+ height = SIZE_ALIGN(height, 8*utile_h);
+ }
+#endif
+ _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_RGB);
+ _size = SIZE_ALIGN(_pitch * height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ break;
+
+ /* packed YCbCr */
+ case HAL_TBM_FORMAT_YUYV:
+ case HAL_TBM_FORMAT_YVYU:
+ case HAL_TBM_FORMAT_UYVY:
+ case HAL_TBM_FORMAT_VYUY:
+ case HAL_TBM_FORMAT_AYUV:
+ bpp = 32;
+ _offset = 0;
+ _pitch = SIZE_ALIGN((width * bpp) >> 3, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
+ _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ break;
+
+ /*
+ * 2 plane YCbCr
+ * index 0 = Y plane, [7:0] Y
+ * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
+ * or
+ * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
+ */
+ case HAL_TBM_FORMAT_NV12:
+ case HAL_TBM_FORMAT_NV21:
+ bpp = 12;
+ /*if (plane_idx == 0)*/
+ {
+ _offset = 0;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
+ _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ if (plane_idx == 0)
+ break;
+ }
+ /*else if (plane_idx == 1)*/
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _align_height = SIZE_ALIGN(height / 2, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
+ _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ }
+ break;
+ case HAL_TBM_FORMAT_NV16:
+ case HAL_TBM_FORMAT_NV61:
+ bpp = 16;
+ /*if(plane_idx == 0)*/
+ {
+ _offset = 0;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
+ _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ if (plane_idx == 0)
+ break;
+ }
+ /*else if( plane_idx ==1 )*/
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
+ _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ }
+ break;
+
+ /*
+ * 3 plane YCbCr
+ * index 0: Y plane, [7:0] Y
+ * index 1: Cb plane, [7:0] Cb
+ * index 2: Cr plane, [7:0] Cr
+ * or
+ * index 1: Cr plane, [7:0] Cr
+ * index 2: Cb plane, [7:0] Cb
+ */
+
+ /*
+ * NATIVE_BUFFER_FORMAT_YV12
+ * NATIVE_BUFFER_FORMAT_I420
+ */
+ case HAL_TBM_FORMAT_YUV410:
+ case HAL_TBM_FORMAT_YVU410:
+ bpp = 9;
+ /*if(plane_idx == 0)*/
+ {
+ _offset = 0;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
+ _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ if (plane_idx == 0)
+ break;
+ }
+ /*else if(plane_idx == 1)*/
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4);
+ _align_height = SIZE_ALIGN(height / 4, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
+ _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ if (plane_idx == 1)
+ break;
+ }
+ /*else if (plane_idx == 2)*/
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width / 4, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 4);
+ _align_height = SIZE_ALIGN(height / 4, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
+ _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ }
+ break;
+ case HAL_TBM_FORMAT_YUV411:
+ case HAL_TBM_FORMAT_YVU411:
+ case HAL_TBM_FORMAT_YUV420:
+ case HAL_TBM_FORMAT_YVU420:
+ bpp = 12;
+ /*if(plane_idx == 0)*/
+ {
+ _offset = 0;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
+ _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ if (plane_idx == 0)
+ break;
+ }
+ /*else if(plane_idx == 1)*/
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
+ _align_height = SIZE_ALIGN(height / 2, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV / 2);
+ _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ if (plane_idx == 1)
+ break;
+ }
+ /*else if (plane_idx == 2)*/
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
+ _align_height = SIZE_ALIGN(height / 2, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV / 2);
+ _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ }
+ break;
+ case HAL_TBM_FORMAT_YUV422:
+ case HAL_TBM_FORMAT_YVU422:
+ bpp = 16;
+ /*if(plane_idx == 0)*/
+ {
+ _offset = 0;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
+ _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ if (plane_idx == 0)
+ break;
+ }
+ /*else if(plane_idx == 1)*/
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
+ _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
+ _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ if (plane_idx == 1)
+ break;
+ }
+ /*else if (plane_idx == 2)*/
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width / 2, TBM_SURFACE_ALIGNMENT_PITCH_YUV / 2);
+ _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
+ _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ }
+ break;
+ case HAL_TBM_FORMAT_YUV444:
+ case HAL_TBM_FORMAT_YVU444:
+ bpp = 24;
+ /*if(plane_idx == 0)*/
+ {
+ _offset = 0;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
+ _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ if (plane_idx == 0)
+ break;
+ }
+ /*else if(plane_idx == 1)*/
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
+ _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ if (plane_idx == 1)
+ break;
+ }
+ /*else if (plane_idx == 2)*/
+ {
+ _offset += _size;
+ _pitch = SIZE_ALIGN(width, TBM_SURFACE_ALIGNMENT_PITCH_YUV);
+ _align_height = SIZE_ALIGN(height, TBM_SURFACE_ALIGNMENT_HEIGHT_YUV);
+ _size = SIZE_ALIGN(_pitch * _align_height, TBM_SURFACE_ALIGNMENT_PLANE);
+ _bo_idx = 0;
+ }
+ break;
+ default:
+ bpp = 0;
+ break;
+ }
+
+ *size = _size;
+ *offset = _offset;
+ *pitch = _pitch;
+ *bo_idx = _bo_idx;
+
+ return HAL_TBM_ERROR_NONE;
+}
+
+static void
+_tbm_vc4_surface_data_set_bo_data(tbm_vc4_surface *surface_data, tbm_vc4_bo *bo_data)
+{
+ surface_data->bo_data = bo_data;
+ surface_data->num_bos++;
+}
+
+static void
+_tbm_vc4_surface_data_destroy(tbm_vc4_surface *surface_data)
+{
+ surface_data->refcnt--;
+ if (surface_data->refcnt > 0) {
+ TBM_BACKEND_INFO("surface_data->refcnt:%d", surface_data->refcnt);
+ return;
+ }
+
+ TBM_BACKEND_INFO("surface_data destroy");
+
+ if (surface_data->bo_data) {
+ tbm_vc4_bo_free((hal_tbm_bo *)surface_data->bo_data);
+ surface_data->bo_data = NULL;
+ }
+
+ LIST_DEL(&surface_data->link);
+ free(surface_data);
+}
+
+static tbm_vc4_surface *
+_tbm_vc4_surface_data_create(tbm_vc4_bufmgr *bufmgr_data, uint32_t width, uint32_t height, hal_tbm_format format, hal_tbm_error *error)
+{
+ tbm_vc4_surface *surface_data;
+
+ surface_data = calloc(1, sizeof(struct _tbm_vc4_surface));
+ if (!surface_data) {
+ TBM_BACKEND_ERR("fail to allocate the surface_data");
+ if (error)
+ *error = HAL_TBM_ERROR_OUT_OF_MEMORY;
+ return NULL;
+ }
+ surface_data->width = width;
+ surface_data->height = height;
+ surface_data->format = format;
+ surface_data->bufmgr_data = bufmgr_data;
+ surface_data->refcnt = 1;
+
+ LIST_ADDTAIL(&surface_data->link, &bufmgr_data->surface_data_list);
+
+ return surface_data;
+}
+
+static hal_tbm_surface *
+tbm_vc4_bufmgr_alloc_surface(hal_tbm_bufmgr *bufmgr, uint32_t width, uint32_t height, hal_tbm_format format,
+ hal_tbm_bo_memory_type mem_types, uint64_t *modifiers, uint32_t num_modifiers, hal_tbm_error *error)
+{
+ tbm_vc4_bufmgr *bufmgr_data = (tbm_vc4_bufmgr *)bufmgr;
+ tbm_vc4_surface *surface_data;
+ tbm_vc4_bo *bo_data;
+ uint32_t size = 0, offset = 0, pitch = 0;
+ int bo_idx = 0, bo_size = 0;
+ int i, num_planes;
+
+ if (bufmgr_data == NULL) {
+ TBM_BACKEND_ERR("bufmgr is null");
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return NULL;
+ }
+
+ surface_data = _tbm_vc4_surface_data_create(bufmgr_data, width, height, format, error);
+ if (!surface_data) {
+ TBM_BACKEND_ERR("fail to create surface_data");
+ return NULL;
+ }
+
+ num_planes = _tbm_vc4_bufmgr_get_num_planes(format);
+ if (num_planes == 0) {
+ TBM_BACKEND_ERR("fail to get num_planes");
+ _tbm_vc4_surface_data_destroy(surface_data);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return NULL;
+ }
+
+ for (i = 0; i < num_planes; i++) {
+ *error = tbm_vc4_bufmgr_get_plane_data(bufmgr, format, i, (int)width, (int)height, &size, &offset, &pitch, &bo_idx);
+ if (*error != HAL_TBM_ERROR_NONE) {
+ _tbm_vc4_surface_data_destroy(surface_data);
+ TBM_BACKEND_ERR("fail to get plane_data");
+ return NULL;
+ }
+ bo_size += size;
+ }
+
+ bo_data = tbm_vc4_bufmgr_alloc_bo(bufmgr, bo_size, mem_types, error);
+ if (!bo_data) {
+ TBM_BACKEND_ERR("fail to allocate the bo_data");
+ _tbm_vc4_surface_data_destroy(surface_data);
+ if (error)
+ *error = HAL_TBM_ERROR_OUT_OF_MEMORY;
+ return NULL;
+ }
+ _tbm_vc4_surface_data_set_bo_data(surface_data, bo_data);
+
+ return (hal_tbm_surface *)surface_data;
+}
+
+static tbm_vc4_surface *
+_tbm_vc4_surface_find_same_surface(tbm_vc4_bufmgr *bufmgr_data, tbm_vc4_bo *bo_data)
+{
+ tbm_vc4_surface *s = NULL;
+
+ LIST_FOR_EACH_ENTRY(s, &bufmgr_data->surface_data_list, link) {
+ if (s->bo_data == bo_data) {
+ return s;
+ }
+ }
+
+ return NULL;
+}
+
+static hal_tbm_surface *
+tbm_vc4_bufmgr_import_surface(hal_tbm_bufmgr *bufmgr, uint32_t width, uint32_t height, hal_tbm_format format,
+ hal_tbm_surface_buffer_data *buffer_data, hal_tbm_error *error)
+{
+ tbm_vc4_bufmgr *bufmgr_data = (tbm_vc4_bufmgr *)bufmgr;
+ tbm_vc4_surface *surface_data, *surface_data1;
+ tbm_vc4_bo *bo_data;
+
+ if (bufmgr_data == NULL) {
+ TBM_BACKEND_ERR("bufmgr is null");
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return NULL;
+ }
+
+ if (buffer_data == NULL) {
+ TBM_BACKEND_ERR("buffer_data is null");
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return NULL;
+ }
+
+ // vc4 backend allows to import only one dmabuf-fd.
+ if (buffer_data->num_fds != 1) {
+ TBM_BACKEND_ERR("buffer_data->num_fds MUST BE 1.");
+ TBM_BACKEND_ERR("vc4 backend can only import surface with just one dmabuf-fd.");
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return NULL;
+ }
+
+ if (buffer_data->fds == NULL) {
+ TBM_BACKEND_ERR("buffer_data->fds is null");
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return NULL;
+ }
+
+ surface_data = _tbm_vc4_surface_data_create(bufmgr_data, width, height, format, error);
+ if (!surface_data) {
+ TBM_BACKEND_ERR("fail to create surface_data");
+ return NULL;
+ }
+
+ bo_data = tbm_vc4_bufmgr_import_fd(bufmgr, buffer_data->fds[0], error);
+ if (!bo_data) {
+ TBM_BACKEND_ERR("fail to import the bo_data");
+ _tbm_vc4_surface_data_destroy(surface_data);
+ return NULL;
+ }
+
+ // reuse the surface_data when there is a surface_data which already has the same bo_data.
+ surface_data1 = _tbm_vc4_surface_find_same_surface(bufmgr_data, bo_data);
+ if (surface_data1) {
+ _tbm_vc4_surface_data_destroy(surface_data);
+ surface_data = surface_data1;
+ surface_data->refcnt++;
+ } else {
+ _tbm_vc4_surface_data_set_bo_data(surface_data, bo_data);
+ }
+
+ return (hal_tbm_surface *)surface_data;
+}
+
+static hal_tbm_bo *
+tbm_vc4_bufmgr_alloc_bo(hal_tbm_bufmgr *bufmgr, unsigned int size,
+ hal_tbm_bo_memory_type flags, hal_tbm_error *error)
+{
+ tbm_vc4_bufmgr *bufmgr_data = (tbm_vc4_bufmgr *)bufmgr;
+ tbm_vc4_bo *bo_data;
+
+ if (bufmgr_data == NULL) {
+ TBM_BACKEND_ERR("bufmgr is null\n");
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return NULL;
+ }
+
+ bo_data = calloc(1, sizeof(struct _tbm_vc4_bo));
+ if (!bo_data) {
+ TBM_BACKEND_ERR("fail to allocate the bo_data private\n");
+ if (error)
+ *error = HAL_TBM_ERROR_OUT_OF_MEMORY;
+ return NULL;
+ }
+ bo_data->bufmgr_data = bufmgr_data;
+
+ struct drm_vc4_create_bo arg = {0, };
+
+ arg.size = (__u32)size;
+ arg.flags = flags;/*currently no values for the flags,but it may be used in future extension*/
+ if (drmIoctl(bufmgr_data->fd, DRM_IOCTL_VC4_CREATE_BO, &arg)) {
+ TBM_BACKEND_ERR("Cannot create bo_data(flag:%x, size:%d)\n", arg.flags,
+ (unsigned int)arg.size);
+ free(bo_data);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+
+ bo_data->fd = bufmgr_data->fd;
+ bo_data->gem = (unsigned int)arg.handle;
+ bo_data->size = size;
+ bo_data->flags_tbm = flags;
+ bo_data->name = _get_name(bo_data->fd, bo_data->gem);
+
+ if (!_bo_init_cache_state(bufmgr_data, bo_data, 0)) {
+ TBM_BACKEND_ERR("fail init cache state(%d)\n", bo_data->name);
+ free(bo_data);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+
+ pthread_mutex_init(&bo_data->mutex, NULL);
+
+ if (bufmgr_data->use_dma_fence && !bo_data->dmabuf) {
+ struct drm_prime_handle arg = {0, };
+
+ arg.handle = bo_data->gem;
+ if (drmIoctl(bo_data->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
+ TBM_BACKEND_ERR("Cannot dmabuf=%d\n", bo_data->gem);
+ free(bo_data);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+ bo_data->dmabuf = arg.fd;
+ }
+
+ /* add bo_data to hash */
+ if (drmHashInsert(bufmgr_data->hashBos, bo_data->name, (void *)bo_data) < 0)
+ TBM_BACKEND_ERR("Cannot insert bo_data to Hash(%d)\n", bo_data->name);
+
+ TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), flags:%d, size:%d\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->flags_tbm,
+ bo_data->size);
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return (hal_tbm_bo *)bo_data;
+}
+
+static hal_tbm_bo *
+tbm_vc4_bufmgr_import_fd(hal_tbm_bufmgr *bufmgr, hal_tbm_fd key, hal_tbm_error *error)
+{
+ tbm_vc4_bufmgr *bufmgr_data = (tbm_vc4_bufmgr *)bufmgr;
+ tbm_vc4_bo *bo_data;
+ unsigned int gem = 0;
+ unsigned int name;
+ int ret;
+ char buf[STRERR_BUFSIZE];
+
+ if (bufmgr_data == NULL) {
+ TBM_BACKEND_ERR("bufmgr is null\n");
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return NULL;
+ }
+
+ /*getting handle from fd*/
+ struct drm_prime_handle arg = {0, };
+
+ arg.fd = key;
+ arg.flags = 0;
+ if (drmIoctl(bufmgr_data->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &arg)) {
+ TBM_BACKEND_ERR("Cannot get gem handle from fd:%d (%s)\n",
+ arg.fd, strerror_r(errno, buf, STRERR_BUFSIZE));
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+ gem = arg.handle;
+
+ name = _get_name(bufmgr_data->fd, gem);
+ if (!name) {
+ TBM_BACKEND_ERR("Cannot get name from gem:%d, fd:%d (%s)\n",
+ gem, key, strerror_r(errno, buf, STRERR_BUFSIZE));
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+
+ ret = drmHashLookup(bufmgr_data->hashBos, name, (void **)&bo_data);
+ if (ret == 0) {
+ if (gem == bo_data->gem) {
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+ return bo_data;
+ }
+ }
+
+ /* Determine size of bo_data. The fd-to-handle ioctl really should
+ * return the size, but it doesn't. If we have kernel 3.12 or
+ * later, we can lseek on the prime fd to get the size. Older
+ * kernels will just fail, in which case we fall back to the
+ * provided (estimated or guess size).
+ */
+ unsigned int real_size = -1;
+ struct drm_gem_open open_arg = {0, };
+
+ real_size = lseek(key, 0, SEEK_END);
+
+ open_arg.name = name;
+ if (drmIoctl(bufmgr_data->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
+ TBM_BACKEND_ERR("Cannot get gem info from gem:%d, fd:%d (%s)\n",
+ gem, key, strerror_r(errno, buf, STRERR_BUFSIZE));
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+
+ /* Free gem handle to avoid a memory leak*/
+ struct drm_gem_close close_arg = {0, };
+ memset(&close_arg, 0, sizeof(close_arg));
+ close_arg.handle = open_arg.handle;
+ if (drmIoctl(bufmgr_data->fd, DRM_IOCTL_GEM_CLOSE, &close_arg)) {
+ TBM_BACKEND_ERR("Cannot close gem_handle.\n",
+ strerror_r(errno, buf, STRERR_BUFSIZE));
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+
+ if (real_size == -1)
+ real_size = open_arg.size;
+
+ bo_data = calloc(1, sizeof(struct _tbm_vc4_bo));
+ if (!bo_data) {
+ TBM_BACKEND_ERR("bo_data:%p fail to allocate the bo_data\n", bo_data);
+ if (error)
+ *error = HAL_TBM_ERROR_OUT_OF_MEMORY;
+ return NULL;
+ }
+ bo_data->bufmgr_data = bufmgr_data;
+
+ bo_data->fd = bufmgr_data->fd;
+ bo_data->gem = gem;
+ bo_data->size = real_size;
+ bo_data->name = name;
+ bo_data->flags_tbm = 0;
+
+#ifdef VC4_TILED_FORMAT
+ struct drm_vc4_get_tiling get_tiling = {
+ .handle = bo_data->gem,
+ };
+ drmIoctl(bo_data->fd, DRM_IOCTL_VC4_GET_TILING, &get_tiling);
+
+ if (get_tiling.modifier == DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED)
+ bo_data->flags_tbm |= HAL_TBM_BO_TILED;
+#endif
+
+ if (!_bo_init_cache_state(bufmgr_data, bo_data, 1)) {
+ TBM_BACKEND_ERR("fail init cache state(%d)\n", bo_data->name);
+ free(bo_data);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+
+ /* add bo_data to hash */
+ if (drmHashInsert(bufmgr_data->hashBos, bo_data->name, (void *)bo_data) < 0)
+ TBM_BACKEND_ERR("bo_data:%p Cannot insert bo_data to Hash(%d) from gem:%d, fd:%d\n",
+ bo_data, bo_data->name, gem, key);
+
+ TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d, size:%d\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf,
+ key,
+ bo_data->flags_tbm,
+ bo_data->size);
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return (hal_tbm_bo *)bo_data;
+}
+
+static hal_tbm_bo *
+tbm_vc4_bufmgr_import_key(hal_tbm_bufmgr *bufmgr, hal_tbm_key key, hal_tbm_error *error)
+{
+ tbm_vc4_bufmgr *bufmgr_data = (tbm_vc4_bufmgr *)bufmgr;
+ tbm_vc4_bo *bo_data;
+ int ret;
+
+ if (bufmgr_data == NULL) {
+ TBM_BACKEND_ERR("bufmgr is null\n");
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return NULL;
+ }
+
+ ret = drmHashLookup(bufmgr_data->hashBos, key, (void **)&bo_data);
+ if (ret == 0) {
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+ return (hal_tbm_bo *)bo_data;
+ }
+
+ struct drm_gem_open arg = {0, };
+
+ arg.name = key;
+ if (drmIoctl(bufmgr_data->fd, DRM_IOCTL_GEM_OPEN, &arg)) {
+ TBM_BACKEND_ERR("Cannot open gem name=%d\n", key);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+
+ bo_data = calloc(1, sizeof(struct _tbm_vc4_bo));
+ if (!bo_data) {
+ TBM_BACKEND_ERR("fail to allocate the bo_data private\n");
+ if (error)
+ *error = HAL_TBM_ERROR_OUT_OF_MEMORY;
+ return NULL;
+ }
+ bo_data->bufmgr_data = bufmgr_data;
+
+ bo_data->fd = bufmgr_data->fd;
+ bo_data->gem = arg.handle;
+ bo_data->size = arg.size;
+ bo_data->name = key;
+ bo_data->flags_tbm = 0;
+
+#ifdef VC4_TILED_FORMAT
+ struct drm_vc4_get_tiling get_tiling = {
+ .handle = bo_data->gem,
+ };
+ drmIoctl(bo_data->fd, DRM_IOCTL_VC4_GET_TILING, &get_tiling);
+
+ if (get_tiling.modifier == DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED)
+ bo_data->flags_tbm |= HAL_TBM_BO_TILED;
+#endif
+
+ if (!_bo_init_cache_state(bufmgr_data, bo_data, 1)) {
+ TBM_BACKEND_ERR("fail init cache state(%d)\n", bo_data->name);
+ free(bo_data);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return NULL;
+ }
+
+ if (!bo_data->dmabuf) {
+ struct drm_prime_handle arg = {0, };
+
+ arg.handle = bo_data->gem;
+ if (drmIoctl(bo_data->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg)) {
+ TBM_BACKEND_ERR("fail to DRM_IOCTL_PRIME_HANDLE_TO_FD gem=%d\n", bo_data->gem);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ free(bo_data);
+ return NULL;
+ }
+ bo_data->dmabuf = arg.fd;
+ }
+
+ /* add bo_data to hash */
+ if (drmHashInsert(bufmgr_data->hashBos, bo_data->name, (void *)bo_data) < 0)
+ TBM_BACKEND_ERR("Cannot insert bo_data to Hash(%d)\n", bo_data->name);
+
+ TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, flags:%d, size:%d\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf,
+ bo_data->flags_tbm,
+ bo_data->size);
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return (hal_tbm_bo *)bo_data;
+}
+
+static void
+tbm_vc4_surface_free(hal_tbm_surface *surface)
+{
+ tbm_vc4_surface *surface_data = (tbm_vc4_surface *)surface;
+
+ if (!surface_data)
+ return;
+
+ _tbm_vc4_surface_data_destroy(surface_data);
+}
+
+static hal_tbm_bo **
+tbm_vc4_surface_get_bos(hal_tbm_surface *surface, int *num_bos, hal_tbm_error *error)
+{
+ tbm_vc4_surface *surface_data = (tbm_vc4_surface *)surface;
+ hal_tbm_surface **bos;
+
+ if (surface_data == NULL) {
+ TBM_BACKEND_ERR("surface_data is null\n");
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return NULL;
+ }
+
+ if (num_bos == NULL) {
+ TBM_BACKEND_ERR("num_bos is null\n");
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return NULL;
+ }
+
+ *num_bos = surface_data->num_bos;
+
+ /* will be freed in frontend */
+ bos = calloc(*num_bos, sizeof(tbm_vc4_bo *));
+ if (!bos) {
+ TBM_BACKEND_ERR("failed: alloc bos");
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return NULL;
+ }
+ bos[0] = surface_data->bo_data;
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return bos;
+}
+
+static hal_tbm_error
+tbm_vc4_surface_get_plane_data(hal_tbm_surface *surface, int plane_idx, uint32_t *size, uint32_t *offset, uint32_t *pitch, int *bo_idx)
+{
+ tbm_vc4_surface *surface_data = (tbm_vc4_surface *)surface;
+ hal_tbm_error error;
+ tbm_vc4_bufmgr *bufmgr;
+ uint32_t width, height;
+ hal_tbm_format format;
+
+ if (surface_data == NULL) {
+ TBM_BACKEND_ERR("surface_data is null");
+ return HAL_TBM_ERROR_INVALID_PARAMETER;
+ }
+
+ bufmgr = surface_data->bufmgr_data;
+ width = surface_data->width;
+ height = surface_data->height;
+ format = surface_data->format;
+
+ error = tbm_vc4_bufmgr_get_plane_data((tbm_vc4_bufmgr *)bufmgr, format, plane_idx,
+ (int)width, (int)height, size, offset, pitch, bo_idx);
+ if (error != HAL_TBM_ERROR_NONE) {
+ TBM_BACKEND_ERR("fail to get plane_data");
+ return HAL_TBM_ERROR_INVALID_PARAMETER;
+ }
+
+ return HAL_TBM_ERROR_NONE;
+}
+
+static hal_tbm_surface_buffer_data *
+tbm_vc4_surface_export(hal_tbm_surface *surface, hal_tbm_error *error)
+{
+ tbm_vc4_surface *surface_data = (tbm_vc4_surface *)surface;
+ hal_tbm_surface_buffer_data *buffer_data;
+
+ if (surface == NULL) {
+ TBM_BACKEND_ERR("surface is null");
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return NULL;
+ }
+
+ buffer_data = calloc(1, sizeof(struct _hal_tbm_surface_buffer_data));
+ if (!buffer_data) {
+ TBM_BACKEND_ERR("fail to allocate a buffer_data");
+ if (error)
+ *error = HAL_TBM_ERROR_OUT_OF_MEMORY;
+ return NULL;
+ }
+ buffer_data->num_fds = surface_data->num_bos;
+
+ /* will be freed in frontend */
+ buffer_data->fds = calloc(buffer_data->num_fds, sizeof(int));
+ if (!buffer_data->fds) {
+ TBM_BACKEND_ERR("failed: alloc bos");
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ free(buffer_data);
+ return NULL;
+ }
+
+ buffer_data->fds[0] = tbm_vc4_bo_export_fd(surface_data->bo_data, error);
+ if (buffer_data->fds[0] < 0) {
+ TBM_BACKEND_ERR("fail to export bo_data");
+ free(buffer_data->fds);
+ free(buffer_data);
+ return NULL;
+ }
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return buffer_data;
+}
+
+static void
+tbm_vc4_bo_free(hal_tbm_bo *bo)
+{
+ tbm_vc4_bo *bo_data = (tbm_vc4_bo *)bo;
+ tbm_vc4_bo *temp;
+ tbm_vc4_bufmgr *bufmgr_data;
+ char buf[STRERR_BUFSIZE];
+ int ret;
+
+ if (!bo_data)
+ return;
+
+ bufmgr_data = bo_data->bufmgr_data;
+ if (!bufmgr_data)
+ return;
+
+ TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, size:%d\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf,
+ bo_data->size);
+
+ if (bo_data->pBase) {
+ if (munmap(bo_data->pBase, bo_data->size) == -1) {
+ TBM_BACKEND_ERR("bo_data:%p fail to munmap(%s)\n",
+ bo_data, strerror_r(errno, buf, STRERR_BUFSIZE));
+ }
+ }
+
+ /* close dmabuf */
+ if (bo_data->dmabuf) {
+ close(bo_data->dmabuf);
+ bo_data->dmabuf = 0;
+ }
+
+ /* delete bo_data from hash */
+ ret = drmHashLookup(bufmgr_data->hashBos, bo_data->name, (void **)&temp);
+ if (ret == 0)
+ drmHashDelete(bufmgr_data->hashBos, bo_data->name);
+ else
+ TBM_BACKEND_ERR("Cannot find bo_data to Hash(%d), ret=%d\n", bo_data->name, ret);
+
+ if (temp != bo_data)
+ TBM_BACKEND_ERR("hashBos probably has several BOs with same name!!!\n");
+
+ _bo_destroy_cache_state(bufmgr_data, bo_data);
+
+ /* Free gem handle */
+ struct drm_gem_close arg = {0, };
+
+ memset(&arg, 0, sizeof(arg));
+ arg.handle = bo_data->gem;
+ if (drmIoctl(bo_data->fd, DRM_IOCTL_GEM_CLOSE, &arg))
+ TBM_BACKEND_ERR("bo_data:%p fail to gem close.(%s)\n",
+ bo_data, strerror_r(errno, buf, STRERR_BUFSIZE));
+
+ free(bo_data);
+}
+
+static int
+tbm_vc4_bo_get_size(hal_tbm_bo *bo, hal_tbm_error *error)
+{
+ tbm_vc4_bo *bo_data = (tbm_vc4_bo *)bo;
+
+ if (!bo_data) {
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return 0;
+ }
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return bo_data->size;
+}
+
+static hal_tbm_bo_memory_type
+tbm_vc4_bo_get_memory_type(hal_tbm_bo *bo, hal_tbm_error *error)
+{
+ tbm_vc4_bo *bo_data = (tbm_vc4_bo *)bo;
+
+ if (!bo_data) {
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return HAL_TBM_BO_DEFAULT;
+ }
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return bo_data->flags_tbm;
+}
+
+static hal_tbm_bo_handle
+tbm_vc4_bo_get_handle(hal_tbm_bo *bo, hal_tbm_bo_device_type device, hal_tbm_error *error)
+{
+ tbm_vc4_bo *bo_data = (tbm_vc4_bo *)bo;
+ hal_tbm_bo_handle bo_handle;
+
+ if (!bo_data) {
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return (hal_tbm_bo_handle) NULL;
+ }
+
+ if (!bo_data->gem) {
+ TBM_BACKEND_ERR("Cannot map gem=%d\n", bo_data->gem);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return (hal_tbm_bo_handle) NULL;
+ }
+
+ TBM_BACKEND_DBG("bo_data:%p, gem:%d(%d), fd:%d, flags:%d, size:%d, %s\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf,
+ bo_data->flags_tbm,
+ bo_data->size,
+ STR_DEVICE[device]);
+
+ /*Get mapped bo_handle*/
+ bo_handle = _vc4_bo_handle(bo_data, device);
+ if (bo_handle.ptr == NULL) {
+ TBM_BACKEND_ERR("Cannot get handle: gem:%d, device:%d\n",
+ bo_data->gem, device);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return (hal_tbm_bo_handle) NULL;
+ }
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return bo_handle;
+}
+
+static hal_tbm_bo_handle
+tbm_vc4_bo_map(hal_tbm_bo *bo, hal_tbm_bo_device_type device,
+ hal_tbm_bo_access_option opt, hal_tbm_error *error)
+{
+ tbm_vc4_bo *bo_data = (tbm_vc4_bo *)bo;
+ hal_tbm_bo_handle bo_handle;
+ tbm_vc4_bufmgr *bufmgr_data;
+
+ if (!bo_data) {
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return (hal_tbm_bo_handle) NULL;
+ }
+
+ bufmgr_data = bo_data->bufmgr_data;
+ if (!bufmgr_data) {
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return (hal_tbm_bo_handle) NULL;
+ }
+
+ if (!bo_data->gem) {
+ TBM_BACKEND_ERR("Cannot map gem=%d\n", bo_data->gem);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return (hal_tbm_bo_handle) NULL;
+ }
+
+ TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, %s, %s\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf,
+ STR_DEVICE[device],
+ STR_OPT[opt]);
+
+ /*Get mapped bo_handle*/
+ bo_handle = _vc4_bo_handle(bo_data, device);
+ if (bo_handle.ptr == NULL) {
+ TBM_BACKEND_ERR("Cannot get handle: gem:%d, device:%d, opt:%d\n",
+ bo_data->gem, device, opt);
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return (hal_tbm_bo_handle) NULL;
+ }
+
+ if (bo_data->map_cnt == 0)
+ _bo_set_cache_state(bufmgr_data, bo_data, device, opt);
+
+ bo_data->last_map_device = device;
+
+ bo_data->map_cnt++;
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return bo_handle;
+}
+
+static hal_tbm_error
+tbm_vc4_bo_unmap(hal_tbm_bo *bo)
+{
+ tbm_vc4_bo *bo_data = (tbm_vc4_bo *)bo;
+ tbm_vc4_bufmgr *bufmgr_data;
+
+ if (!bo_data)
+ return HAL_TBM_ERROR_INVALID_PARAMETER;
+
+ bufmgr_data = bo_data->bufmgr_data;
+ if (!bufmgr_data)
+ return HAL_TBM_ERROR_INVALID_PARAMETER;
+
+ if (!bo_data->gem)
+ return HAL_TBM_ERROR_INVALID_PARAMETER;
+
+ bo_data->map_cnt--;
+
+ if (bo_data->map_cnt == 0)
+ _bo_save_cache_state(bufmgr_data, bo_data);
+
+#ifdef ENABLE_CACHECRTL
+ if (bo_data->last_map_device == HAL_TBM_DEVICE_CPU)
+ _vc4_cache_flush(bufmgr_data, bo_data, TBM_VC4_CACHE_FLUSH_ALL);
+#endif
+
+ bo_data->last_map_device = -1;
+
+ TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf);
+
+ return HAL_TBM_ERROR_NONE;
+}
+
+static hal_tbm_error
+tbm_vc4_bo_lock(hal_tbm_bo *bo, hal_tbm_bo_device_type device,
+ hal_tbm_bo_access_option opt)
+{
+#ifndef ALWAYS_BACKEND_CTRL
+ tbm_vc4_bo *bo_data = (tbm_vc4_bo *)bo;
+ tbm_vc4_bufmgr *bufmgr_data;
+ struct dma_buf_fence fence;
+ struct flock filelock;
+ int ret = 0;
+ char buf[STRERR_BUFSIZE];
+
+ if (!bo_data)
+ return HAL_TBM_ERROR_INVALID_PARAMETER;
+
+ bufmgr_data = bo_data->bufmgr_data;
+ if (!bufmgr_data)
+ return HAL_TBM_ERROR_INVALID_PARAMETER;
+
+ if (device != HAL_TBM_DEVICE_3D && device != HAL_TBM_DEVICE_CPU) {
+ TBM_BACKEND_DBG("Not support device type,\n");
+ return HAL_TBM_ERROR_INVALID_OPERATION;
+ }
+
+ memset(&fence, 0, sizeof(struct dma_buf_fence));
+
+ /* Check if the given type is valid or not. */
+ if (opt & TBM_OPTION_WRITE) {
+ if (device == HAL_TBM_DEVICE_3D)
+ fence.type = DMA_BUF_ACCESS_WRITE | DMA_BUF_ACCESS_DMA;
+ } else if (opt & TBM_OPTION_READ) {
+ if (device == HAL_TBM_DEVICE_3D)
+ fence.type = DMA_BUF_ACCESS_READ | DMA_BUF_ACCESS_DMA;
+ } else {
+ TBM_BACKEND_ERR("Invalid argument\n");
+ return HAL_TBM_ERROR_INVALID_PARAMETER;
+ }
+
+ /* Check if the tbm manager supports dma fence or not. */
+ if (!bufmgr_data->use_dma_fence) {
+ TBM_BACKEND_ERR("Not support DMA FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
+ return HAL_TBM_ERROR_INVALID_OPERATION;
+
+ }
+
+ if (device == HAL_TBM_DEVICE_3D) {
+ ret = ioctl(bo_data->dmabuf, DMABUF_IOCTL_GET_FENCE, &fence);
+ if (ret < 0) {
+ TBM_BACKEND_ERR("Cannot set GET FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
+ return HAL_TBM_ERROR_INVALID_OPERATION;
+ }
+ } else {
+ if (opt & TBM_OPTION_WRITE)
+ filelock.l_type = F_WRLCK;
+ else
+ filelock.l_type = F_RDLCK;
+
+ filelock.l_whence = SEEK_CUR;
+ filelock.l_start = 0;
+ filelock.l_len = 0;
+
+ if (-1 == fcntl(bo_data->dmabuf, F_SETLKW, &filelock))
+ return HAL_TBM_ERROR_INVALID_OPERATION;
+ }
+
+ pthread_mutex_lock(&bo_data->mutex);
+
+ if (device == HAL_TBM_DEVICE_3D) {
+ int i;
+
+ for (i = 0; i < DMA_FENCE_LIST_MAX; i++) {
+ if (bo_data->dma_fence[i].ctx == 0) {
+ bo_data->dma_fence[i].type = fence.type;
+ bo_data->dma_fence[i].ctx = fence.ctx;
+ break;
+ }
+ }
+
+ if (i == DMA_FENCE_LIST_MAX) {
+ /*TODO: if dma_fence list is full, it needs realloc. I will fix this. by minseok3.kim*/
+ TBM_BACKEND_ERR("fence list is full\n");
+ }
+ }
+
+ pthread_mutex_unlock(&bo_data->mutex);
+
+ TBM_BACKEND_DBG("DMABUF_IOCTL_GET_FENCE! bo_data:%p, gem:%d(%d), fd:%ds\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf);
+#endif /* ALWAYS_BACKEND_CTRL */
+
+ return HAL_TBM_ERROR_NONE;
+}
+
+static hal_tbm_error
+tbm_vc4_bo_unlock(hal_tbm_bo *bo)
+{
+#ifndef ALWAYS_BACKEND_CTRL
+ tbm_vc4_bo *bo_data = (tbm_vc4_bo *)bo;
+ struct dma_buf_fence fence;
+ struct flock filelock;
+ unsigned int dma_type = 0;
+ int ret = 0;
+ char buf[STRERR_BUFSIZE];
+
+ bufmgr_data = bo_data->bufmgr_data;
+ if (!bufmgr_data)
+ return HAL_TBM_ERROR_INVALID_PARAMETER;
+
+ if (bo_data->dma_fence[0].type & DMA_BUF_ACCESS_DMA)
+ dma_type = 1;
+
+ if (!bo_data->dma_fence[0].ctx && dma_type) {
+ TBM_BACKEND_DBG("FENCE not support or ignored,\n");
+ return HAL_TBM_ERROR_INVALID_OPERATION;
+ }
+
+ if (!bo_data->dma_fence[0].ctx && dma_type) {
+ TBM_BACKEND_DBG("device type is not 3D/CPU,\n");
+ return HAL_TBM_ERROR_INVALID_OPERATION;
+ }
+
+ pthread_mutex_lock(&bo_data->mutex);
+
+ if (dma_type) {
+ fence.type = bo_data->dma_fence[0].type;
+ fence.ctx = bo_data->dma_fence[0].ctx;
+ int i;
+
+ for (i = 1; i < DMA_FENCE_LIST_MAX; i++) {
+ bo_data->dma_fence[i - 1].type = bo_data->dma_fence[i].type;
+ bo_data->dma_fence[i - 1].ctx = bo_data->dma_fence[i].ctx;
+ }
+ bo_data->dma_fence[DMA_FENCE_LIST_MAX - 1].type = 0;
+ bo_data->dma_fence[DMA_FENCE_LIST_MAX - 1].ctx = 0;
+ }
+
+ pthread_mutex_unlock(&bo_data->mutex);
+
+ if (dma_type) {
+ ret = ioctl(bo_data->dmabuf, DMABUF_IOCTL_PUT_FENCE, &fence);
+ if (ret < 0) {
+ TBM_BACKEND_ERR("Can not set PUT FENCE(%s)\n", strerror_r(errno, buf, STRERR_BUFSIZE));
+ return HAL_TBM_ERROR_INVALID_OPERATION;
+ }
+ } else {
+ filelock.l_type = F_UNLCK;
+ filelock.l_whence = SEEK_CUR;
+ filelock.l_start = 0;
+ filelock.l_len = 0;
+
+ if (-1 == fcntl(bo_data->dmabuf, F_SETLKW, &filelock))
+ return HAL_TBM_ERROR_INVALID_OPERATION;
+ }
+
+ TBM_BACKEND_DBG("DMABUF_IOCTL_PUT_FENCE! bo_data:%p, gem:%d(%d), fd:%ds\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf);
+#endif /* ALWAYS_BACKEND_CTRL */
+
+ return HAL_TBM_ERROR_NONE;
+}
+
+static hal_tbm_fd
+tbm_vc4_bo_export_fd(hal_tbm_bo *bo, hal_tbm_error *error)
+{
+ tbm_vc4_bo *bo_data = (tbm_vc4_bo *)bo;
+ int ret;
+ char buf[STRERR_BUFSIZE];
+
+ if (!bo_data) {
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return -1;
+ }
+
+ struct drm_prime_handle arg = {0, };
+
+ arg.handle = bo_data->gem;
+ ret = drmIoctl(bo_data->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &arg);
+ if (ret) {
+ TBM_BACKEND_ERR("bo_data:%p Cannot dmabuf=%d (%s)\n",
+ bo_data, bo_data->gem, strerror_r(errno, buf, STRERR_BUFSIZE));
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_OPERATION;
+ return (hal_tbm_fd) ret;
+ }
+
+ TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, key_fd:%d, flags:%d, size:%d\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf,
+ arg.fd,
+ bo_data->flags_tbm,
+ bo_data->size);
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return (hal_tbm_fd)arg.fd;
+}
+
+static hal_tbm_key
+tbm_vc4_bo_export_key(hal_tbm_bo *bo, hal_tbm_error *error)
+{
+ tbm_vc4_bo *bo_data = (tbm_vc4_bo *)bo;
+
+ if (!bo_data) {
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return 0;
+ }
+
+ if (!bo_data->name) {
+ bo_data->name = _get_name(bo_data->fd, bo_data->gem);
+ if (!bo_data->name) {
+ TBM_BACKEND_ERR("error Cannot get name\n");
+ if (error)
+ *error = HAL_TBM_ERROR_INVALID_PARAMETER;
+ return 0;
+ }
+ }
+
+ TBM_BACKEND_DBG(" bo_data:%p, gem:%d(%d), fd:%d, flags:%d, size:%d\n",
+ bo_data,
+ bo_data->gem, bo_data->name,
+ bo_data->dmabuf,
+ bo_data->flags_tbm,
+ bo_data->size);
+
+ if (error)
+ *error = HAL_TBM_ERROR_NONE;
+
+ return (hal_tbm_key)bo_data->name;
+}
+
+static hal_tbm_error
+_tbm_vc4_authenticated_drm_fd_handler(hal_tbm_fd auth_fd, void *user_data)
+{
+ tbm_vc4_bufmgr *bufmgr_data = (tbm_vc4_bufmgr *) user_data;
+
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, HAL_TBM_ERROR_INVALID_PARAMETER);
+
+ bufmgr_data->fd = auth_fd;
+ TBM_BACKEND_INFO("Get the authenticated drm_fd(%d)!\n", bufmgr_data->fd);
+
+ return HAL_TBM_ERROR_NONE;
+}
+
+static int
+hal_backend_tbm_vc4_exit(void *data)
+{
+ hal_tbm_backend_data *backend_data = (hal_tbm_backend_data *)data;
+ tbm_vc4_bufmgr *bufmgr_data;
+ tbm_vc4_surface *s = NULL, *ss = NULL;
+ unsigned long key;
+ void *value;
+
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(backend_data != NULL, -1);
+
+ bufmgr_data = (tbm_vc4_bufmgr *)backend_data->bufmgr;
+ TBM_BACKEND_RETURN_VAL_IF_FAIL(bufmgr_data != NULL, -1);
+
+ if (backend_data->bo_funcs)
+ free(backend_data->bo_funcs);
+ if (backend_data->surface_funcs)
+ free(backend_data->surface_funcs);
+ if (backend_data->bufmgr_funcs)
+ free(backend_data->bufmgr_funcs);
+
+ if (!LIST_IS_EMPTY(&bufmgr_data->surface_data_list)) {
+ LIST_FOR_EACH_ENTRY_SAFE(s, ss, &bufmgr_data->surface_data_list, link) {
+ LIST_DEL(&s->link);
+ tbm_vc4_surface_free(s);
+ }
+ }
+
+ if (bufmgr_data->hashBos) {
+ while (drmHashFirst(bufmgr_data->hashBos, &key, &value) > 0) {
+ drmHashDelete(bufmgr_data->hashBos, key);
+ free(value);
+ }
+ drmHashDestroy(bufmgr_data->hashBos);
+ }
+
+ _bufmgr_deinit_cache_state(bufmgr_data);
+
+ close(bufmgr_data->fd);
+
+ free(backend_data->bufmgr);
+ free(backend_data);
+
+ return HAL_TBM_ERROR_NONE;
+}
+
+static int
+hal_backend_tbm_vc4_init(void **data)
+{
+ hal_tbm_backend_data *backend_data = NULL;
+ hal_tbm_bufmgr_funcs *bufmgr_funcs = NULL;
+ hal_tbm_surface_funcs *surface_funcs = NULL;
+ hal_tbm_bo_funcs *bo_funcs = NULL;
+ tbm_vc4_bufmgr *bufmgr_data = NULL;
+ int drm_fd = -1;
+ int fp;
+
+ /* allocate a hal_tbm_backend_data */
+ backend_data = calloc(1, sizeof(struct _hal_tbm_backend_data));
+ if (!backend_data) {
+ TBM_BACKEND_ERR("fail to alloc backend_data!\n");
+ *data = NULL;
+ return -1;
+ }
+ *data = backend_data;
+
+ /* allocate a hal_tbm_bufmgr */
+ bufmgr_data = calloc(1, sizeof(struct _tbm_vc4_bufmgr));
+ if (!bufmgr_data) {
+ TBM_BACKEND_ERR("fail to alloc bufmgr_data!\n");
+ goto fail_alloc_bufmgr_data;
+ }
+ backend_data->bufmgr = (hal_tbm_bufmgr *)bufmgr_data;
+
+ // open drm_fd
+ drm_fd = _tbm_vc4_open_drm();
+ if (drm_fd < 0) {
+ TBM_BACKEND_ERR("fail to open drm!\n");
+ goto fail_open_drm;
+ }
+
+ // set true when backend has a drm_device.
+ backend_data->has_drm_device = 1;
+
+ // check if drm_fd is master_drm_fd.
+ if (drmIsMaster(drm_fd)) {
+ // drm_fd is a master_drm_fd.
+ backend_data->drm_info.drm_fd = drm_fd;
+ backend_data->drm_info.is_master = 1;
+
+ bufmgr_data->fd = drm_fd;
+ TBM_BACKEND_INFO("Get the master drm_fd(%d)!\n", bufmgr_data->fd);
+ } else {
+ // drm_fd is not a master_drm_fd.
+ // request authenticated fd
+ close(drm_fd);
+ backend_data->drm_info.drm_fd = -1;
+ backend_data->drm_info.is_master = 0;
+ backend_data->drm_info.auth_drm_fd_func = _tbm_vc4_authenticated_drm_fd_handler;
+ backend_data->drm_info.user_data = bufmgr_data;
+
+ TBM_BACKEND_INFO("A backend requests an authenticated drm_fd.\n");
+ }
+
+ //Check if the tbm manager supports dma fence or not.
+ fp = open("/sys/module/dmabuf_sync/parameters/enabled", O_RDONLY);
+ if (fp != -1) {
+ char buf[1];
+ int length = read(fp, buf, 1);
+
+ if (length == 1 && buf[0] == '1')
+ bufmgr_data->use_dma_fence = 1;
+
+ close(fp);
+ }
+
+ if (!_bufmgr_init_cache_state(bufmgr_data)) {
+ TBM_BACKEND_ERR("fail to init bufmgr cache state\n");
+ goto fail_init_cache_state;
+ }
+
+ /*Create Hash Table*/
+ bufmgr_data->hashBos = drmHashCreate();
+
+ // initialize the surface_data list
+ LIST_INITHEAD(&bufmgr_data->surface_data_list);
+
+ /* alloc and register bufmgr_funcs */
+ bufmgr_funcs = calloc(1, sizeof(struct _hal_tbm_bufmgr_funcs));
+ if (!bufmgr_funcs) {
+ TBM_BACKEND_ERR("fail to alloc bufmgr_funcs!\n");
+ goto fail_alloc_bufmgr_funcs;
+ }
+ backend_data->bufmgr_funcs = bufmgr_funcs;
+
+ bufmgr_funcs->bufmgr_get_capabilities = tbm_vc4_bufmgr_get_capabilities;
+ bufmgr_funcs->bufmgr_get_supported_formats = tbm_vc4_bufmgr_get_supported_formats;
+ bufmgr_funcs->bufmgr_get_plane_data = tbm_vc4_bufmgr_get_plane_data;
+ bufmgr_funcs->bufmgr_alloc_surface = tbm_vc4_bufmgr_alloc_surface;
+ bufmgr_funcs->bufmgr_import_surface = tbm_vc4_bufmgr_import_surface;
+ bufmgr_funcs->bufmgr_alloc_bo = tbm_vc4_bufmgr_alloc_bo;
+#ifdef VC4_TILED_FORMAT
+ bufmgr_funcs->bufmgr_alloc_bo_with_format = tbm_vc4_bufmgr_alloc_bo_with_tiled_format;
+#else
+ bufmgr_funcs->bufmgr_alloc_bo_with_format = NULL;
+#endif
+ bufmgr_funcs->bufmgr_import_fd = tbm_vc4_bufmgr_import_fd;
+ bufmgr_funcs->bufmgr_import_key = tbm_vc4_bufmgr_import_key;
+
+ /* alloc and register surface_funcs */
+ surface_funcs = calloc(1, sizeof(struct _hal_tbm_surface_funcs));
+ if (!surface_funcs) {
+ TBM_BACKEND_ERR("fail to alloc surface_funcs!\n");
+ goto fail_alloc_surface_funcs;
+ }
+ backend_data->surface_funcs = surface_funcs;
+
+ surface_funcs->surface_free = tbm_vc4_surface_free;
+ surface_funcs->surface_get_bos = tbm_vc4_surface_get_bos;
+ surface_funcs->surface_get_plane_data = tbm_vc4_surface_get_plane_data;
+ surface_funcs->surface_export = tbm_vc4_surface_export;
+
+ /* alloc and register bo_funcs */
+ bo_funcs = calloc(1, sizeof(struct _hal_tbm_bo_funcs));
+ if (!bo_funcs) {
+ TBM_BACKEND_ERR("fail to alloc bo_funcs!\n");
+ goto fail_alloc_bo_funcs;
+ }
+ backend_data->bo_funcs = bo_funcs;
+
+ bo_funcs->bo_free = tbm_vc4_bo_free;
+ bo_funcs->bo_get_size = tbm_vc4_bo_get_size;
+ bo_funcs->bo_get_memory_types = tbm_vc4_bo_get_memory_type;
+ bo_funcs->bo_get_handle = tbm_vc4_bo_get_handle;
+ bo_funcs->bo_map = tbm_vc4_bo_map;
+ bo_funcs->bo_unmap = tbm_vc4_bo_unmap;
+ bo_funcs->bo_lock = tbm_vc4_bo_lock;
+ bo_funcs->bo_unlock = tbm_vc4_bo_unlock;
+ bo_funcs->bo_export_fd = tbm_vc4_bo_export_fd;
+ bo_funcs->bo_export_key = tbm_vc4_bo_export_key;
+
+ TBM_BACKEND_DBG("drm_fd:%d\n", bufmgr_data->fd);
+
+ return HAL_TBM_ERROR_NONE;
+
+fail_alloc_bo_funcs:
+ free(surface_funcs);
+fail_alloc_surface_funcs:
+ free(bufmgr_funcs);
+fail_alloc_bufmgr_funcs:
+ _bufmgr_deinit_cache_state(bufmgr_data);
+ if (bufmgr_data->hashBos)
+ drmHashDestroy(bufmgr_data->hashBos);
+fail_init_cache_state:
+ close(bufmgr_data->fd);
+fail_open_drm:
+ free(bufmgr_data);
+fail_alloc_bufmgr_data:
+ free(backend_data);
+
+ *data = NULL;
+
+ return -1;
+}
+
+hal_backend hal_backend_tbm_data = {
+ "vc4",
+ "Samsung",
+ HAL_ABI_VERSION_TIZEN_6_5,
+ hal_backend_tbm_vc4_init,
+ hal_backend_tbm_vc4_exit
+};
--- /dev/null
+/**************************************************************************
+ *
+ * libtbm
+ *
+ * Copyright 2012 Samsung Electronics co., Ltd. All Rights Reserved.
+ *
+ * Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
+ * Boram Park <boram1288.park@samsung.com>, Changyeon Lee <cyeon.lee@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * **************************************************************************/
+
+#ifndef __TBM_BUFMGR_TGL_H__
+#define __TBM_BUFMGR_TGL_H__
+
+#include <linux/ioctl.h>
+
+#ifdef ENABLE_CACHECRTL
+static char tgl_devfile[] = "/dev/slp_global_lock";
+static char tgl_devfile1[] = "/dev/tgl";
+#endif
+
+#define TGL_IOCTL_BASE 0x32
+#define TGL_IO(nr) _IO(TGL_IOCTL_BASE, nr)
+#define TGL_IOR(nr, type) _IOR(TGL_IOCTL_BASE, nr, type)
+#define TGL_IOW(nr, type) _IOW(TGL_IOCTL_BASE, nr, type)
+#define TGL_IOWR(nr, type) _IOWR(TGL_IOCTL_BASE, nr, type)
+
+/**
+ * struct tgl_ver_data - tgl version data structure
+ * @major: major version
+ * @minor: minor version
+ */
+struct tgl_ver_data {
+ unsigned int major;
+ unsigned int minor;
+};
+
+/**
+ * struct tgl_reg_data - tgl data structure
+ * @key: lookup key
+ * @timeout_ms: timeout value for waiting event
+ */
+struct tgl_reg_data {
+ unsigned int key;
+ unsigned int timeout_ms;
+};
+
+enum tgl_type_data {
+ TGL_TYPE_NONE = 0,
+ TGL_TYPE_READ = (1 << 0),
+ TGL_TYPE_WRITE = (1 << 1),
+};
+
+/**
+ * struct tgl_lock_data - tgl lock data structure
+ * @key: lookup key
+ * @type: lock type that is in tgl_type_data
+ */
+struct tgl_lock_data {
+ unsigned int key;
+ enum tgl_type_data type;
+};
+
+enum tgl_status_data {
+ TGL_STATUS_UNLOCKED,
+ TGL_STATUS_LOCKED,
+};
+
+/**
+ * struct tgl_usr_data - tgl user data structure
+ * @key: lookup key
+ * @data1: user data 1
+ * @data2: user data 2
+ * @status: lock status that is in tgl_status_data
+ */
+struct tgl_usr_data {
+ unsigned int key;
+ unsigned int data1;
+ unsigned int data2;
+ enum tgl_status_data status;
+};
+
+enum {
+ _TGL_GET_VERSION,
+ _TGL_REGISTER,
+ _TGL_UNREGISTER,
+ _TGL_LOCK,
+ _TGL_UNLOCK,
+ _TGL_SET_DATA,
+ _TGL_GET_DATA,
+};
+
+/* get version information */
+#define TGL_IOCTL_GET_VERSION TGL_IOR(_TGL_GET_VERSION, struct tgl_ver_data)
+/* register key */
+#define TGL_IOCTL_REGISTER TGL_IOW(_TGL_REGISTER, struct tgl_reg_data)
+/* unregister key */
+#define TGL_IOCTL_UNREGISTER TGL_IOW(_TGL_UNREGISTER, struct tgl_reg_data)
+/* lock with key */
+#define TGL_IOCTL_LOCK TGL_IOW(_TGL_LOCK, struct tgl_lock_data)
+/* unlock with key */
+#define TGL_IOCTL_UNLOCK TGL_IOW(_TGL_UNLOCK, struct tgl_lock_data)
+/* set user data with key */
+#define TGL_IOCTL_SET_DATA TGL_IOW(_TGL_SET_DATA, struct tgl_usr_data)
+/* get user data with key */
+#define TGL_IOCTL_GET_DATA TGL_IOR(_TGL_GET_DATA, struct tgl_usr_data)
+
+#ifdef ENABLE_CACHECRTL
+/* indicate cache units. */
+enum e_drm_vc4_gem_cache_sel {
+ VC4_DRM_L1_CACHE = 1 << 0,
+ VC4_DRM_L2_CACHE = 1 << 1,
+ VC4_DRM_ALL_CORES = 1 << 2,
+ VC4_DRM_ALL_CACHES = VC4_DRM_L1_CACHE |
+ VC4_DRM_L2_CACHE,
+ VC4_DRM_ALL_CACHES_CORES = VC4_DRM_L1_CACHE |
+ VC4_DRM_L2_CACHE |
+ VC4_DRM_ALL_CORES,
+ VC4_DRM_CACHE_SEL_MASK = VC4_DRM_ALL_CACHES_CORES
+};
+
+/* indicate cache operation types. */
+enum e_drm_vc4_gem_cache_op {
+ VC4_DRM_CACHE_INV_ALL = 1 << 3,
+ VC4_DRM_CACHE_INV_RANGE = 1 << 4,
+ VC4_DRM_CACHE_CLN_ALL = 1 << 5,
+ VC4_DRM_CACHE_CLN_RANGE = 1 << 6,
+ VC4_DRM_CACHE_FSH_ALL = VC4_DRM_CACHE_INV_ALL |
+ VC4_DRM_CACHE_CLN_ALL,
+ VC4_DRM_CACHE_FSH_RANGE = VC4_DRM_CACHE_INV_RANGE |
+ VC4_DRM_CACHE_CLN_RANGE,
+ VC4_DRM_CACHE_OP_MASK = VC4_DRM_CACHE_FSH_ALL |
+ VC4_DRM_CACHE_FSH_RANGE
+};
+
+/**
+ * A structure for cache operation.
+ *
+ * @usr_addr: user space address.
+ * P.S. it SHOULD BE user space.
+ * @size: buffer size for cache operation.
+ * @flags: select cache unit and cache operation.
+ * @gem_handle: a handle to a gem object.
+ * this gem handle is needed for cache range operation to L2 cache.
+ */
+struct drm_vc4_gem_cache_op {
+ uint64_t usr_addr;
+ unsigned int size;
+ unsigned int flags;
+ unsigned int gem_handle;
+};
+
+#define DRM_VC4_GEM_CACHE_OP 0x12
+
+#define DRM_IOCTL_VC4_GEM_CACHE_OP DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_VC4_GEM_CACHE_OP, struct drm_vc4_gem_cache_op)
+
+#endif
+
+#endif /* __TBM_BUFMGR_TGL_H__ */