--- /dev/null
+1. atomic data type
+ AFFECTED FILE(S): tpl_utils.h
+ DESCRIPTION:
+ Both ARM Mali Midgard and Utgard use unsigned int as atomic int
+ type. Currently, tpl_utikl_osu_atomic is typedef'ed from
+ unsigned int. However, this could change in future releases of
+ respective DDKs, and appropriate typedef must be made or
+ memory corruption will occur.
+2. TPL initialization
+ AFFECTED FILE(S): tpl.c
+ DESCRIPTION:
+ Upon starting EGL (usually, eglGetDisplay()), tpl_egl_funcs_t
+ must be created in the DDK's EGL porting layer, and its pointer
+ must be passed as argument to tpl_set_egl_funcs(). Failure to
+ do so will result in a segmentation fault.
#$(call is-feature-enabled,featurename)
#returns non-empty string if enabled, empty if not
define is-feature-enabled
-$(findstring -$1-,-$(VARIANT)-)
+$(findstring -$1-,-$(TPL_OPTIONS)-)
endef
SRC_DIR = ./src
CFLAGS += -Wall -fPIC -I$(SRC_DIR)
LDFLAGS +=
-CFLAGS += $(pkg-config --cflags gles20 libdrm libtbm)
-LDFLAGS += $(pkg-config --libs gles20 libdrm libtbm)
+CFLAGS += `pkg-config --cflags gles20 libdrm libtbm`
+LDFLAGS += `pkg-config --libs gles20 libdrm libtbm`
ifneq ($(call is-feature-enabled,winsys_dri2),)
CFLAGS += -DWINSYS_DRI2
%define ENABLE_TTRACE 0
+################################################################################
+
Name: libtpl
Summary: Tizen Porting Layer for ARM Mali EGL
%if "%{WINSYS_DRI2}" == "1"
License: MIT
Source: %{name}-%{version}.tar.gz
-BuildRequires: pkgconfig(glesv2)
+BuildRequires: pkg-config
+BuildRequires: pkgconfig(gles20)
BuildRequires: pkgconfig(libdrm)
BuildRequires: pkgconfig(libtbm)
%endif
%if "%{WINSYS_DRI2}" == "1"
-export TPL_OPTIONS=%{TPL_OPTIONS}-winsys_dri2
+export TPL_OPTIONS=${TPL_OPTIONS}-winsys_dri2
%endif
%if "%{WINSYS_DRI3}" == "1"
-export TPL_OPTIONS=%{TPL_OPTIONS}-winsys_dri3
+export TPL_OPTIONS=${TPL_OPTIONS}-winsys_dri3
%endif
%if "%{WINSYS_WL}" == "1"
-export TPL_OPTIONS=%{TPL_OPTIONS}-winsys_wl
+export TPL_OPTIONS=${TPL_OPTIONS}-winsys_wl
%endif
export TPL_VER_MAJOR=%{TPL_VER_MAJOR}
export TPL_VER_MINOR=%{TPL_VER_MINOR}
%if "%{ENABLE_TTRACE}" == "1"
-export TPL_OPTIONS=%{TPL_OPTIONS}-ttrace
+export TPL_OPTIONS=${TPL_OPTIONS}-ttrace
%endif
+export TPL_OPTIONS=${TPL_OPTIONS}-
+
make all
%install
mkdir -p %{buildroot}%{_libdir}
+mkdir -p %{buildroot}%{_includedir}
mkdir -p %{buildroot}%{_libdir}/pkgconfig
cp -a libtpl.so.%{TPL_VERSION} %{buildroot}%{_libdir}/
-ln -sf libtpl.so.%{TPL_VER_MAJOR} %{buildroot}%{_libdir}/libtpl.so.%{TPL_VERSION}
-ln -sf libtpl.so %{buildroot}%{_libdir}/libtpl.so.%{TPL_VER_MAJOR}
+ln -sf libtpl.so.%{TPL_VERSION} %{buildroot}%{_libdir}/libtpl.so.%{TPL_VER_MAJOR}
+ln -sf libtpl.so.%{TPL_VER_MAJOR} %{buildroot}%{_libdir}/libtpl.so
cp -a src/tpl.h %{buildroot}%{_includedir}/
cp -a pkgconfig/tpl.pc %{buildroot}%{_libdir}/pkgconfig/
--- /dev/null
+#include "tpl_internal.h"
+
+unsigned int tpl_log_lvl;
+
+struct _tpl_runtime
+{
+ tpl_egl_funcs_t *egl_funcs;
+ tpl_utils_ptrdict displays[TPL_BACKEND_COUNT];
+};
+
+static tpl_runtime_t *runtime = NULL;
+static pthread_mutex_t runtime_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+static void
+__tpl_runtime_init()
+{
+ if (runtime == NULL)
+ {
+ runtime = (tpl_runtime_t *)calloc(1, sizeof(tpl_runtime_t));
+ TPL_ASSERT(runtime != NULL);
+ }
+}
+
+static void __attribute__((destructor))
+__tpl_runtime_fini()
+{
+ if (runtime != NULL)
+ {
+ int i;
+
+ for (i = 0; i < TPL_BACKEND_COUNT; i++)
+ {
+ if (runtime->displays[i] != NULL)
+ tpl_utils_ptrdict_free(runtime->displays[i]);
+ }
+
+ free(runtime);
+ runtime = NULL;
+ }
+}
+
+void tpl_set_egl_funcs(tpl_egl_funcs_t *eglfuncs)
+{
+ __tpl_runtime_init();
+
+ runtime->egl_funcs = eglfuncs;
+}
+
+/* Begin: DDK dependent types and function definition */
+void tpl_util_sys_yield(void)
+{
+ int status;
+ status = sched_yield();
+ if (0 != status)
+ {
+ printf("sched_yield failed, ret=%.8x\n", status);
+ }
+}
+
+int tpl_util_clz(int input)
+{
+ return runtime->egl_funcs->clz(input);
+}
+
+int tpl_util_osu_atomic_get(const tpl_util_osu_atomic * const atom)
+{
+ return runtime->egl_funcs->atomic_get(atom);
+}
+
+void tpl_util_osu_atomic_set(tpl_util_osu_atomic * const atom, int val)
+{
+ runtime->egl_funcs->atomic_set(atom, val);
+}
+
+int tpl_util_osu_atomic_inc( tpl_util_osu_atomic * const atom )
+{
+ return runtime->egl_funcs->atomic_inc(atom);
+}
+int tpl_util_osu_atomic_dec( tpl_util_osu_atomic * const atom )
+{
+ return runtime->egl_funcs->atomic_dec(atom);
+}
+
+
+
+tpl_utils_ptrdict tpl_utils_ptrdict_allocate(void (*freefunc)(void *))
+{
+ tpl_utils_ptrdict d;
+ d = malloc(runtime->egl_funcs->ptrdict_size);
+
+ if (!d)
+ return NULL;
+
+ runtime->egl_funcs->ptrdict_init(d, NULL, NULL, freefunc);
+ return d;
+}
+
+tpl_bool_t tpl_utils_ptrdict_insert(tpl_utils_ptrdict d, void *name, void *data)
+{
+ return (tpl_bool_t) runtime->egl_funcs->ptrdict_insert(d, name, data);
+}
+
+
+void *tpl_utils_ptrdict_get(tpl_utils_ptrdict d, void *name)
+{
+ void *ret;
+ runtime->egl_funcs->ptrdict_lookup_key(d, name, &ret);
+ return ret;
+}
+
+void tpl_utils_ptrdict_free(tpl_utils_ptrdict d)
+{
+ runtime->egl_funcs->ptrdict_term(d);
+}
+
+void tpl_utils_ptrdict_remove(tpl_utils_ptrdict d, void *name)
+{
+ runtime->egl_funcs->ptrdict_remove(d, name);
+}
+
+void tpl_utils_ptrdict_iterate_init(tpl_utils_ptrdict d, tpl_utils_ptrdict_iter it)
+{
+ runtime->egl_funcs->ptrdict_iter_init(it, d);
+}
+
+void *tpl_utils_ptrdict_next( tpl_utils_ptrdict_iter it, void **value )
+{
+ return runtime->egl_funcs->ptrdict_next(it, value);
+}
+/* End: DDK dependent types and function definition */
+
+tpl_display_t *
+__tpl_runtime_find_display(tpl_backend_type_t type, tpl_handle_t native_display)
+{
+ tpl_display_t *display = NULL;
+
+ if (runtime == NULL)
+ return NULL;
+
+ pthread_mutex_lock(&runtime_mutex);
+
+ if (type != TPL_BACKEND_UNKNOWN)
+ {
+ if (runtime->displays[type] != NULL)
+ {
+ display = (tpl_display_t *) tpl_utils_ptrdict_get(runtime->displays[type],
+ (void *) native_display);
+ }
+ }
+ else
+ {
+ int i;
+
+ for (i = 0; i < TPL_BACKEND_COUNT; i++)
+ {
+ if (runtime->displays[i] != NULL)
+ {
+ display = (tpl_display_t *) tpl_utils_ptrdict_get(runtime->displays[i],
+ (void *) native_display);
+ }
+ if (display != NULL) break;
+ }
+ }
+
+ pthread_mutex_unlock(&runtime_mutex);
+
+ return display;
+}
+
+void
+__tpl_runtime_add_display(tpl_display_t *display)
+{
+ tpl_bool_t ret;
+ tpl_handle_t handle = display->native_handle;
+ tpl_backend_type_t type = display->backend.type;
+
+ pthread_mutex_lock(&runtime_mutex);
+ __tpl_runtime_init();
+
+ if (type != TPL_BACKEND_UNKNOWN)
+ {
+ if (runtime->displays[type] == NULL)
+ runtime->displays[type] = tpl_utils_ptrdict_allocate(NULL);
+
+ ret = tpl_utils_ptrdict_insert(runtime->displays[type], (void *) handle, (void *)display);
+ TPL_ASSERT(ret == TPL_TRUE);
+ }
+
+ pthread_mutex_unlock(&runtime_mutex);
+}
+
+void
+__tpl_runtime_remove_display(tpl_display_t *display)
+{
+ tpl_handle_t handle = display->native_handle;
+ tpl_backend_type_t type = display->backend.type;
+
+ pthread_mutex_lock(&runtime_mutex);
+
+ if (type != TPL_BACKEND_UNKNOWN)
+ {
+ if (runtime != NULL && runtime->displays[type] != NULL)
+ tpl_utils_ptrdict_remove(runtime->displays[type], (void *) handle);
+ }
+
+ pthread_mutex_unlock(&runtime_mutex);
+}
+
+void
+__tpl_runtime_flush_all_display()
+{
+ int i;
+
+ if (runtime == NULL)
+ return;
+
+ pthread_mutex_lock(&runtime_mutex);
+
+ for (i = 0; i < TPL_BACKEND_COUNT; i++)
+ {
+ if (runtime->displays[i] != NULL)
+ {
+ tpl_utils_ptrdict_iter iterator;
+ tpl_display_t *display;
+
+ tpl_utils_ptrdict_iterate_init(runtime->displays[i], &iterator);
+
+ while (tpl_utils_ptrdict_next( &iterator, (void **)(&display)))
+ {
+ TPL_OBJECT_LOCK(display);
+ __tpl_display_flush(display);
+ TPL_OBJECT_UNLOCK(display);
+ }
+ }
+ }
+
+ pthread_mutex_unlock(&runtime_mutex);
+}
+
+tpl_backend_type_t
+__tpl_display_choose_backend(tpl_handle_t native_dpy)
+{
+#if TPL_WITH_WAYLAND == 1
+ if (__tpl_display_choose_backend_wayland(native_dpy) == TPL_TRUE)
+ return TPL_BACKEND_WAYLAND;
+#endif
+#if TPL_WITH_X11_DRI2 == 1
+ if (__tpl_display_choose_backend_x11_dri2(native_dpy) == TPL_TRUE)
+ return TPL_BACKEND_X11_DRI2;
+#endif
+#if TPL_WITH_X11_DRI3 == 1
+ if (__tpl_display_choose_backend_x11_dri3(native_dpy) == TPL_TRUE)
+ return TPL_BACKEND_X11_DRI3;
+#endif
+ return TPL_BACKEND_UNKNOWN;
+}
+
+void
+__tpl_display_init_backend(tpl_display_t *display, tpl_backend_type_t type)
+{
+ switch (type)
+ {
+#if TPL_WITH_WAYLAND == 1
+ case TPL_BACKEND_WAYLAND:
+ __tpl_display_init_backend_wayland(&display->backend);
+ break;
+#endif
+#if TPL_WITH_X11_DRI2 == 1
+ case TPL_BACKEND_X11_DRI2:
+ __tpl_display_init_backend_x11_dri2(&display->backend);
+ break;
+#endif
+#if TPL_WITH_X11_DRI3 == 1
+ case TPL_BACKEND_X11_DRI3:
+ __tpl_display_init_backend_x11_dri3(&display->backend);
+ break;
+#endif
+ default:
+ TPL_ASSERT(TPL_FALSE);
+ break;
+ }
+}
+
+void
+__tpl_surface_init_backend(tpl_surface_t *surface, tpl_backend_type_t type)
+{
+ switch (type)
+ {
+#if TPL_WITH_WAYLAND == 1
+ case TPL_BACKEND_WAYLAND:
+ __tpl_surface_init_backend_wayland(&surface->backend);
+ break;
+#endif
+#if TPL_WITH_X11_DRI2 == 1
+ case TPL_BACKEND_X11_DRI2:
+ __tpl_surface_init_backend_x11_dri2(&surface->backend);
+ break;
+#endif
+#if TPL_WITH_X11_DRI3 == 1
+ case TPL_BACKEND_X11_DRI3:
+ __tpl_surface_init_backend_x11_dri3(&surface->backend);
+ break;
+#endif
+ default:
+ TPL_ASSERT(TPL_FALSE);
+ break;
+ }
+}
+
+void
+__tpl_buffer_init_backend(tpl_buffer_t *buffer, tpl_backend_type_t type)
+{
+ switch (type)
+ {
+#if TPL_WITH_WAYLAND == 1
+ case TPL_BACKEND_WAYLAND:
+ __tpl_buffer_init_backend_wayland(&buffer->backend);
+ break;
+#endif
+#if TPL_WITH_X11_DRI2 == 1
+ case TPL_BACKEND_X11_DRI2:
+ __tpl_buffer_init_backend_x11_dri2(&buffer->backend);
+ break;
+#endif
+#if TPL_WITH_X11_DRI3 == 1
+ case TPL_BACKEND_X11_DRI3:
+ __tpl_buffer_init_backend_x11_dri3(&buffer->backend);
+ break;
+#endif
+ default:
+ TPL_ASSERT(TPL_FALSE);
+ break;
+ }
+}
+
+tpl_bool_t
+tpl_get_native_window_info(tpl_display_t *display, tpl_handle_t window,
+ int *width, int *height, tpl_format_t *format, int depth, int a_size)
+{
+ return display->backend.get_window_info(display, window, width, height, format, depth, a_size);
+}
+
+tpl_bool_t
+tpl_get_native_pixmap_info(tpl_display_t *display, tpl_handle_t pixmap,
+ int *width, int *height, tpl_format_t *format)
+{
+ return display->backend.get_pixmap_info(display, pixmap, width, height, format);
+}
--- /dev/null
+#ifndef TPL_H
+#define TPL_H
+
+/**
+ * @file tpl.h
+ * @brief TPL API header file.
+ *
+ * TPL is an abstraction layer for surface & buffer management on Tizen
+ * platform aimed to implement the EGL porting layer of ARM Mali OpenGLES
+ * driver over various display protocols.
+ *
+ * TPL provides object-oriented interfaces. Every TPL object can be represented
+ * as generic tpl_object_t which is referenced-counted and provides common
+ * functions. Currently, following types of objects are provided.
+ *
+ * Display
+ * Surface
+ * Buffer
+ *
+ * Display, like a normal display, represents a display system which is usually
+ * used for connection to the server, scope for other objects.
+ *
+ * Surface correponds to a native surface like X drawable or wl_surface.
+ * A surface might be configured to use N-buffers. (usually double-buffered or
+ * tripple-buffered).
+ *
+ * Buffer is actually something we can render on it, usually a set of pixels or
+ * block of memory.
+ *
+ * Here is a simple example
+ *
+ * dpy = tpl_display_get(NULL);
+ * sfc = tpl_surface_create(dpy, ...);
+ *
+ * while (1)
+ * {
+ * tpl_surface_begin_frame(sfc);
+ * buf = tpl_surface_get_buffer(sfc);
+ *
+ * draw something...
+ *
+ * tpl_surface_end_frame(sfc);
+ * tpl_surface_post(sfc);
+ * }
+ *
+ * In Mali driver, "draw something..." part is what Mali frame builder does.
+ *
+ * TPL exposes native platform buffer identifiers and managers so that the
+ * buffer can be used in other modules. Currently, dma_buf/DRM is supported for
+ * such kind of purposes.
+ *
+ * EGL porting layer just calls TPL functions to do what it is requested, and
+ * give the result to Mali driver. TPL does all the protocol dependent actions.
+ * Such protocol dependent part can be well-separated into TPL backends.
+ *
+ * Also, TPL backend can be configured at runtime. Users can specify which type
+ * of backend to use when initializing a display object.
+ *
+ * For detailed API semantics, please refer to the API documentations.
+ */
+
+#include <EGL/eglplatform.h>
+
+#define TPL_TRUE 1
+#define TPL_FALSE 0
+
+#define TPL_DONT_CARE -1
+
+/**
+ * Boolean variable type.
+ *
+ * TPL_TRUE or TPL_FALSE
+ */
+typedef unsigned int tpl_bool_t;
+
+/**
+ * Handle to native objects.
+ *
+ * Represent a handle to a native object like pixmap, window, wl_display and
+ * etc.
+ */
+typedef void * tpl_handle_t;
+
+/**
+ * Structure containing function pointers to DDK's EGL layer.
+ *
+ * TPL needs to call DDK specific functions such as atomic operations and hash
+ * tables. The necessary function pointers are registered in this structure.
+ */
+typedef struct _tpl_egl_funcs tpl_egl_funcs_t;
+
+/**
+ * A structure representing generic TPL object.
+ *
+ * Generic base class type for various TPL objects.
+ */
+typedef struct _tpl_object tpl_object_t;
+
+/**
+ * A structure representing TPL display object.
+ *
+ * TPL display is an object representing a system which is used to display
+ * things. This is similar in concept with native displays such as X Display
+ * and wl_display. TPL display is used for following items.
+ *
+ * 1. Communication channel with native display servers.
+ * 2. name space for other TPL objects.
+ */
+typedef struct _tpl_display tpl_display_t;
+
+/**
+ * A structure representing TPL surface object.
+ *
+ * TPL surface is an object representing an image which can be displayed by the
+ * display system. This corresponds to a native surface like X Drawable or
+ * wl_surface. A TPL surface might have several TPL buffers on which we can
+ * render.
+ */
+typedef struct _tpl_surface tpl_surface_t;
+
+/**
+ * A structure representing TPL buffer object.
+ *
+ * TPL buffer is an object representing a set of pixels which is usually a
+ * block of memories.
+ */
+typedef struct _tpl_buffer tpl_buffer_t;
+
+/**
+ * Function type used for freeing some data.
+ */
+typedef void (*tpl_free_func_t)(void *data);
+
+/**
+ * Object types.
+ *
+ * @see tpl_object_get_type()
+ */
+typedef enum
+{
+ TPL_OBJECT_DISPLAY,
+ TPL_OBJECT_SURFACE,
+ TPL_OBJECT_BUFFER
+} tpl_object_type_t;
+
+/**
+ * Surface types.
+ *
+ * On some display system, there're several types of native surfaces. (ex. X11
+ * pixmap and window). Users might want to know what kind of native surface
+ * type a TPL surface was made from.
+ *
+ * @see tpl_surface_create()
+ * @see tpl_surface_get_type()
+ */
+typedef enum
+{
+ TPL_SURFACE_TYPE_WINDOW, /**< surface gets displayed by the display server. */
+ TPL_SURFACE_TYPE_PIXMAP /**< surface is an offscreen pixmap. */
+} tpl_surface_type_t;
+
+/**
+ * Format types.
+ *
+ * TPL specifies several supportable formats. This can be used to specify the
+ * format of a new surface. Each format explains its own depth, color sequence
+ * and color bits. and they are enough attributes to convert to the other
+ * formats (such as GPU drivers, Windowing systems, Buffer managers).
+ *
+ * @see tpl_surface_create()
+ */
+typedef enum
+{
+ TPL_FORMAT_INVALID = 0,
+ TPL_FORMAT_NOT_YET_SPECIFIED = 1,
+ TPL_FORMAT_BGR565,
+ TPL_FORMAT_RGB565 = 16 + (0 << 8),
+ TPL_FORMAT_XBGR8888,
+ TPL_FORMAT_XRGB8888 = 32 + (0 << 8),
+ TPL_FORMAT_ABGR8888,
+ TPL_FORMAT_ARGB8888 = 32 + (1 << 8),
+ TPL_FORMAT_BGRA8888,
+ TPL_FORMAT_RGBA8888,
+ TPL_FORMAT_BGRX8888,
+ TPL_FORMAT_RGBX8888,
+ TPL_FORMAT_BGR888,
+ TPL_FORMAT_RGB888,
+ TPL_FORMAT_ABGR4444,
+ TPL_FORMAT_ARGB4444,
+ TPL_FORMAT_BGRA4444,
+ TPL_FORMAT_RGBA4444,
+ TPL_FORMAT_ABGR1555,
+ TPL_FORMAT_ARGB1555,
+ TPL_FORMAT_BGRA5551,
+ TPL_FORMAT_RGBA5551
+} tpl_format_t;
+
+#define TPL_FORMAT_GET_DEPTH(format) (int)((format) & 0xFF)
+
+/**
+ * Lock usage types.
+ *
+ * TPL provides buffer locks which are used for synchronization. This usage
+ * indicate that what kind of purpose a locking is used for. Depending on the
+ * system, multiple read locks might be allowed. Cache might be flushed when
+ * CPU access is engaged.
+ *
+ * @see tpl_buffer_lock()
+ */
+typedef enum
+{
+ TPL_LOCK_USAGE_INVALID = 0,
+ TPL_LOCK_USAGE_GPU_READ,
+ TPL_LOCK_USAGE_GPU_WRITE,
+ TPL_LOCK_USAGE_CPU_READ,
+ TPL_LOCK_USAGE_CPU_WRITE
+} tpl_lock_usage_t;
+
+/**
+ * Types of TPL backend.
+ *
+ * TPL provides platform independent APIs by implementing platform dependent
+ * things in a backend. These types represent types of such backends. One of
+ * these types should be specified when creating a TPL display object when
+ * calling tpl_display_get().
+ *
+ * @see tpl_display_get()
+ * @see tpl_display_get_backend_type()
+ */
+typedef enum
+{
+ TPL_BACKEND_WAYLAND,
+ TPL_BACKEND_X11_DRI2,
+ TPL_BACKEND_X11_DRI3,
+ TPL_BACKEND_COUNT,
+ TPL_BACKEND_UNKNOWN
+} tpl_backend_type_t;
+
+/**
+ * Initialize TPL with EGL function pointers
+ *
+ * Must be called prior to using TPL.
+ *
+ */
+void tpl_set_egl_funcs(tpl_egl_funcs_t *eglfuncs);
+
+/**
+ * Increase reference count of a TPL object.
+ *
+ * All TPL objects are reference-counted. They have reference count 1 on
+ * creatation. When the reference count drops to 0, the object will be freed.
+ *
+ * @param object object which will be referenced.
+ * @return reference count after reference.
+ *
+ * @see tpl_object_unreference()
+ * @see tpl_object_get_reference()
+ */
+int tpl_object_reference(tpl_object_t *object);
+
+/**
+ * Decrease reference count of a TPL object.
+ *
+ * @param object object which will be unreferenced.
+ * @return reference count after unreference.
+ *
+ * @see tpl_object_reference()
+ * @see tpl_object_get_reference()
+ */
+int tpl_object_unreference(tpl_object_t *object);
+
+/**
+ * Get reference count of a TPL object.
+ *
+ * @oaram object object to get reference count.
+ * @return reference count.
+ *
+ * @see tpl_object_reference()
+ * @see tpl_object_get_reference()
+ */
+int tpl_object_get_reference(tpl_object_t *object);
+
+/**
+ * Get the type of a TPL object.
+ *
+ * @param object object to get type.
+ * @return type of the given object. one of tpl_object_type_t
+ */
+tpl_object_type_t tpl_object_get_type(tpl_object_t *object);
+
+/**
+ * Set user data to a TPL object.
+ *
+ * Users want to relate some data with a TPL object. This function provides
+ * registering a pointer to such data which can be retrieved later using
+ * tpl_object_get_user_data().
+ *
+ * @param object object to set user data to.
+ * @param data pointer to the user data.
+ * @param free_func free function which is used for freeing the user data when the object is destroyed.
+ *
+ * @see tpl_object_get_user_data()
+ */
+void tpl_object_set_user_data(tpl_object_t *object,
+ void *data,
+ tpl_free_func_t free_func);
+
+/**
+ * Get registered user data of a TPL object.
+ *
+ * @param object object to get user data.
+ * @return pointer to the registered user data.
+ *
+ * @see tpl_object_set_user_data()
+ */
+void * tpl_object_get_user_data(tpl_object_t *object);
+
+/**
+ * Create or get TPL display object for the given native display.
+ *
+ * Create a TPL display if there's no already existing TPL display for the
+ * given native display. If given NULL for native_dpy, this function will
+ * return default display.
+ *
+ * @param type backend type of the given native display.
+ * @param native_dpy handle to the native display.
+ * @return pointer to the display on success, NULL on failure.
+ */
+tpl_display_t * tpl_display_get(tpl_backend_type_t type,
+ tpl_handle_t native_dpy);
+
+/**
+ * Bind a client connection(display handle) to the existed TPL display.
+ *
+ * After bound, The TPL display knows a handle of client connection display and
+ * it can recognize client objects (e.g. pixmap surfaces from client
+ * application) which were contained by the client connection. So this function
+ * must be called by the server process (such as compositor) before using
+ * client buffers.
+ *
+ * @param display display to bind a client connection.
+ * @param native_dpy handle of the native client display connection.
+ *
+ * @see tpl_display_unbind_client_display_handle()
+ */
+tpl_bool_t tpl_display_bind_client_display_handle(tpl_display_t *display,
+ tpl_handle_t native_dpy);
+
+/**
+ * Unbind a client connection(display handle) from the existed TPL display.
+ *
+ * After being unbound, the TPL display no longer knows about client
+ * connection, and all resources from the connection can be unreferenced. If
+ * the specified connection was not a bound handle, error occurs.
+ *
+ * @param display display to unbind a client connection.
+ * @param native_dpy handle of the native client display connection.
+ *
+ * @see tpl_display_bind_client_display_handle()
+ */
+tpl_bool_t tpl_display_unbind_client_display_handle(tpl_display_t *display,
+ tpl_handle_t native_dpy);
+
+/**
+ * Get the backend type of a TPL display.
+ *
+ * @param display display to get type.
+ * @return backend type of the given display.
+ *
+ * @see tpl_display_get()
+ */
+tpl_backend_type_t tpl_display_get_backend_type(tpl_display_t *display);
+
+/**
+ * Get file descriptor of the buffer manager for the given TPL display.
+ *
+ * There might be native buffer manager device (ex. DRM). This function exports
+ * such native buffer manager for users to be able to access buffers using the
+ * buffer manager. How returned buffer manager fd is used is fully dependent on
+ * native platform implementation.
+ *
+ * @param display display to get buffer manger fd.
+ * @return file descriptor handle for the buffer manager.
+ */
+int tpl_display_get_bufmgr_fd(tpl_display_t *display);
+
+/**
+ * Get the native display handle which the given TPL display is created for.
+ *
+ * @param display display to get native handle.
+ * @return Handle to the native display.
+ *
+ * @see tpl_display_get()
+ */
+tpl_handle_t tpl_display_get_native_handle(tpl_display_t *display);
+
+/**
+ * Query supported pixel formats for the given TPL display.
+ *
+ * Users might want to know what pixel formats are available on the given
+ * display. This function is used to query such available pixel formats. Give
+ * TPL_DONT_CARE to parameters for size values if any values are acceptable.
+ *
+ * @param display display to query pixel formats.
+ * @param surface_type surface type to query for.
+ * @param red_size Size of the red component in bits.
+ * @param green_size Size of the green component in bits.
+ * @param blue_size Size of the blue component in bits.
+ * @param alpha_size Size of the alpha component in bits.
+ * @param depth_size Size of a pixel in bits (Color depth).
+ * @param native_visual_id Pointer to receive native visual id.
+ * @param is_slow Pointer to receive whether the given config is slow.
+ * @return TPL_TRUE is the given config is supported, TPL_FALSE otherwise.
+ */
+tpl_bool_t tpl_display_query_config(tpl_display_t *display,
+ tpl_surface_type_t surface_type,
+ int red_size,
+ int green_size,
+ int blue_size,
+ int alpha_size,
+ int depth_size,
+ int *native_visual_id,
+ tpl_bool_t *is_slow);
+
+/**
+ * Filter config according to given TPL display.
+ *
+ * This function modifies current config specific to the current given TPL
+ * display.
+ *
+ * @param display display to query pixel formats.
+ * @param visual_id Pointer to receive native visual id.
+ * @param alpha_size Size of the alpha component in bits.
+ * @return TPL_TRUE if the given config has been modified, TPL_FALSE otherwise.
+ */
+tpl_bool_t tpl_display_filter_config(tpl_display_t *display,
+ int *visual_id,
+ int alpha_size);
+
+/**
+ * Flush the TPL display.
+ *
+ * @param display display to flush.
+ *
+ * There might be pending operations on the given TPL display such as X11
+ * native rendering. Flushing TPL display ensures that those pending operations
+ * are done.
+ */
+void tpl_display_flush(tpl_display_t *display);
+
+/**
+ * Create a TPL surface for the given native surface.
+ *
+ * @param display display used for surface creation.
+ * @param handle Handle to the native surface.
+ * @param type Type of the surface (Window or Pixmap).
+ * @param format Pixel format of the surface.
+ * @return Created surface on success, NULL otherwise.
+ */
+tpl_surface_t * tpl_surface_create(tpl_display_t *display,
+ tpl_handle_t handle,
+ tpl_surface_type_t type,
+ tpl_format_t format);
+
+/**
+ * Get the TPL display where the given TPL surface was created from.
+ *
+ * @param surface surface to get display.
+ * @return display of the given surface.
+ *
+ * @see tpl_surface_create()
+ */
+tpl_display_t * tpl_surface_get_display(tpl_surface_t *surface);
+
+/**
+ * Get the native surface handle of the given TPL surface.
+ *
+ * @param surface surface to get native handle.
+ * @return handle to the native surface.
+ *
+ * @see tpl_surface_create()
+ */
+tpl_handle_t tpl_surface_get_native_handle(tpl_surface_t *surface);
+
+/**
+ * Get the type of the given TPL surface.
+ *
+ * @param surface surface to get type.
+ * @return type of the surface.
+ *
+ * @see tpl_surface_create()
+ */
+tpl_surface_type_t tpl_surface_get_type(tpl_surface_t *surface);
+
+/**
+ * Get the current size of the given TPL surface.
+ *
+ * Size of a surface might change when a user resizes window or server resizes
+ * it. TPL updates such size information every time when a buffer is queried
+ * using tpl_surface_get_buffer(). User have to consider that there might be
+ * still mismatch between actual surface size and cached one.
+ *
+ * @param surface surface to get size.
+ * @param width pointer to receive width value.
+ * @param height pointer to receive height value.
+ */
+void tpl_surface_get_size(tpl_surface_t *surface,
+ int *width,
+ int *height);
+
+/**
+ * Begin a new frame of the given TPL surface.
+ *
+ * TPL forces users to process frames in begin/end manner. Information for a
+ * frame such as current buffer can be queried after the frame has begun via
+ * tpl_surface_begin_frame(). When a frame ends by calling
+ * tpl_surface_end_frame(), the frame is queued. Such queued frames are
+ * consumed one by one in FIFO order when tpl_surface_post() is called.
+ *
+ * If previous frame was not ended, this function will implicitly end previous
+ * one.
+ *
+ * This function has no effect if the surface type is not window.
+ *
+ * @param surface surface to begin a frame.
+ *
+ * @see tpl_surface_end_frame()
+ * @see tpl_surface_validate_frame()
+ * @see tpl_surface_post()
+ * @see tpl_surface_get_buffer()
+ */
+void tpl_surface_begin_frame(tpl_surface_t *surface);
+
+/**
+ * End the current frame of the given TPL surface.
+ *
+ * The current frame will be queued in the "frame queue". Frame interval and
+ * damage region for the frame is copied from surface's current states.
+ *
+ * This function has no effect if the surface type is not window.
+ *
+ * @param surface surface to end a frame.
+ *
+ * @see tpl_surface_begin_frame()
+ * @see tpl_surface_validate_frame()
+ * @see tpl_surface_post()
+ * @see tpl_surface_set_post_interval()
+ * @see tpl_surface_set_damage()
+ */
+void tpl_surface_end_frame(tpl_surface_t *surface);
+
+/**
+ * Validate current frame of the given TPL surface.
+ *
+ * Users should call this function before getting actual final render target
+ * buffer. Calling tpl_surface_get_buffer() after calling this function might
+ * give different output with previous one. Buffer returned after calling this
+ * function is guaranteed to be not changing. This is somewhat wierd but
+ * necessary to fully utilize CPU/GPU in a parallel way on tizen DRI2 protocol
+ * implementation (lazy swap).
+ *
+ * @param surface surface to validate its current frame.
+ * @return TPL_TRUE if current buffer is changed due to this validation, TPL_FALSE otherwise.
+ *
+ * @see tpl_surface_begin_frame()
+ * @see tpl_surface_end_frame()
+ * @see tpl_surface_get_buffer()
+ */
+tpl_bool_t tpl_surface_validate_frame(tpl_surface_t *surface);
+
+/**
+ * Get the buffer of the current frame for the given TPL surface.
+ *
+ * This function returns buffer of the current frame. Depending on backend,
+ * communication with the server might be required. Returned buffers are used
+ * for render target to draw current frame.
+ *
+ * Returned buffers are valid until next tpl_surface_get_buffer() returns
+ * TPL_FALSE on reset_buffers parameter. If TPL_TRUE is returned on
+ * reset_buffers, all previsouly returned buffers should no longer be used.
+ *
+ * @param surface surface to get buffer for the current frame.
+ * @param reset_buffers pointer to receive whether previouly returned buffers should be reset or not.
+ * @return buffer for the current frame.
+ *
+ * Calling this function multiple times within a single frame is not guranteed
+ * to return a same buffer.
+ *
+ * @see tpl_surface_begin_frame()
+ * @see tpl_surface_end_frame()
+ */
+tpl_buffer_t * tpl_surface_get_buffer(tpl_surface_t *surface,
+ tpl_bool_t *reset_buffers);
+
+/**
+ * Post a frame from the frame queue of the given surface.
+ *
+ * This function request display server to post a frame. This is the only
+ * function which can dequeue a frame from the frame queue.
+ *
+ * Make sure this function is called exactly once for a frame. Depending on
+ * backend, other TPL functions might wait for this function to be called.
+ * Scheduling post calls on a separate thread is recommended.
+ *
+ * If tpl_surface_end_frame() was not called for the current frame, this
+ * function might implicitly end the current frame.
+ *
+ * @param surface surface to post a frame.
+ *
+ * @see tpl_surface_begin_frame()
+ * @see tpl_surface_end_frame()
+ */
+void tpl_surface_post(tpl_surface_t *surface);
+
+/**
+ * Set frame interval of the given TPL surface.
+ *
+ * Frame interval ensures that only a single frame is posted within the
+ * specified vsync intervals. When a frame ends, the frame's interval is set to
+ * the surface's current interval.
+ *
+ * @param surface surface to set frame interval.
+ * @param interval minimum number of vsync between frames.
+ *
+ * @see tpl_surface_get_post_interval()
+ */
+void tpl_surface_set_post_interval(tpl_surface_t *surface,
+ int interval);
+
+/**
+ * Get frame interval of the given TPL surface.
+ *
+ * @param surface surface to get frame interval.
+ * @return frame interval.
+ *
+ * @see tpl_surface_set_post_interval()
+ */
+int tpl_surface_get_post_interval(tpl_surface_t *surface);
+
+/**
+ * Set damaged region of the given TPL surface.
+ *
+ * Damage information is used for reducing number of pixels composited in the
+ * compositor. When a frame ends, the frames' damage area is copied from the
+ * surface's current damage region. Setting num_rects to 0 or rects to NULL
+ * means entire area is damaged.
+ *
+ * @param surface surface to set damage region.
+ * @param num_rects number of rectangles of the damage region.
+ * @param rects pointer to coordinates of rectangles. x0, y0, w0, h0, x1, y1, w1, h1...
+ *
+ * @see tpl_surface_get_damage()
+ */
+void tpl_surface_set_damage(tpl_surface_t *surface,
+ int num_rects,
+ const int *rects);
+
+/**
+ * Get damaged region of the given TPL surface.
+ *
+ * @param surface surface to get damage region.
+ * @param num_rects Pointer to receive the number of rectangles.
+ * @param rects Pointer to receive the pointer to rectangle coordinate array.
+ *
+ * @see tpl_surface_set_damage()
+ */
+void tpl_surface_get_damage(tpl_surface_t *surface,
+ int *num_rects,
+ const int **rects);
+
+/**
+ * Map the given buffer to the user space address.
+ *
+ * Users can do CPU access to the buffer memory by using this function. It is
+ * recommended to lock the buffer first with appropriate lock usage to avoid
+ * cache coherency problems.
+ *
+ * @param buffer buffer to map.
+ * @param size Size of the area to be mapped. Give 0 for entire buffer.
+ * @return Pointer to the mapped buffer memory.
+ *
+ * @see tpl_buffer_unmap()
+ */
+void * tpl_buffer_map(tpl_buffer_t *buffer,
+ int size);
+
+/**
+ * Unmap the given buffer from the user space address.
+ *
+ * @param buffer buffer to unmap
+ * @param ptr Pointer to the mapped memory. Give NULL if the entire buffer was mapped.
+ * @param size Size of the mapped memory. Give 0 if the entire buffer was mapped.
+ *
+ * @see tpl_buffer_map()
+ */
+void tpl_buffer_unmap(tpl_buffer_t *buffer,
+ void *ptr,
+ int size);
+
+/**
+ * Lock the given buffer.
+ *
+ * Buffer lock is used for synchronizations. The locking is actually done to
+ * the low-level buffer object like dma_buf. So, it is possible that locking
+ * call is blocked although no locking is ever called for the TPL buffer. Other
+ * TPL buffer pointing to the same low-level buffer might be locked or other
+ * process might be holding the lock for the same low-level buffer.
+ *
+ * The lock might work as R/W lock depending on backend.
+ *
+ * @param buffer buffer to lock.
+ * @param usage purpose of the lock.
+ * @return TPL_TRUE on success, TPL_FALSE otherwise.
+ *
+ * @see tpl_buffer_unlock()
+ */
+tpl_bool_t tpl_buffer_lock(tpl_buffer_t *buffer,
+ tpl_lock_usage_t usage);
+
+/**
+ * Unlock the given buffer.
+ *
+ * @param buffer buffer to unlock.
+ *
+ * @see tpl_buffer_lock()
+ */
+void tpl_buffer_unlock(tpl_buffer_t *buffer);
+
+/**
+ * Create a native buffer of the given TPL buffer.
+ *
+ * This function can be used to export a TPL buffer by the returned native
+ * buffer. Some windowing system need a extra buffer export mechanism between
+ * compositor and application.
+ *
+ * @param buffer buffer to export.
+ * @return A native buffer from the buffer. NULL on error.
+ */
+void *tpl_buffer_create_native_buffer(tpl_buffer_t *buffer);
+
+/**
+ * Get the low-level buffer key of the given TPL buffer.
+ *
+ * It is a common method representing buffers with 32bits or 64bits key. A TPL
+ * buffer internally indicate a platform dependent low-level buffer like
+ * dma_buf. This function retrieves such key to the low-level buffer.
+ *
+ * @param buffer buffer to retrieve the key.
+ * @return Key to the low-level buffer.
+ *
+ * @see tpl_buffer_get_fd()
+ */
+unsigned int tpl_buffer_get_key(tpl_buffer_t *buffer);
+
+/**
+ * Get the low-level buffer fd of the given TPL buffer.
+ *
+ * It is also a common method accessing a buffer via file system. This function
+ * returns file descriptor for the low-level buffer.
+ *
+ * @param buffer buffer to retrieve fd.
+ * @return file descriptor of the low-level buffer.
+ *
+ * @see tpl_buffer_get_key()
+ */
+int tpl_buffer_get_fd(tpl_buffer_t *buffer);
+
+/**
+ * Get the age of the given TPL buffer.
+ *
+ * Buffer age gives us information on content which is already rendered on the
+ * buffer. It is used to do partial update which is an optimization techinique
+ * that renders only different area between current frame and previously
+ * rendered buffer content.
+ *
+ * @param buffer buffer to get age.
+ * @return age of the buffer..
+ */
+int tpl_buffer_get_age(tpl_buffer_t *buffer);
+
+/**
+ * Get the TPL surface where the given TPL buffer belongs to.
+ *
+ * @param buffer buffer to get the belonging surface.
+ * @return surface where the given buffer belongs to.
+ *
+ * @see tpl_surface_get_buffer()
+ */
+tpl_surface_t * tpl_buffer_get_surface(tpl_buffer_t *buffer);
+
+/**
+ * Get the size of the given TPL buffer.
+ *
+ * @param buffer buffer to get the size.
+ * @param width pointer to receive the width value.
+ * @param height pointer to receive the height value.
+ */
+void tpl_buffer_get_size(tpl_buffer_t *buffer,
+ int *width,
+ int *height);
+
+/**
+ * Get the color depth of the given TPL buffer.
+ *
+ * @param buffer buffer to get the color depth.
+ * @return color depth of the given buffer.
+ */
+int tpl_buffer_get_depth(tpl_buffer_t *buffer);
+
+/**
+ * Get the pitch value (in bytes) of the given TPL buffer.
+ *
+ * @param buffer buffer to get the pitch.
+ * @return pitch of the given buffer in bytes.
+ */
+int tpl_buffer_get_pitch(tpl_buffer_t *buffer);
+
+/**
+ * Get the ID of the given TPL buffer.
+ *
+ * @param buffer buffer to get the id.
+ * @return id of the given buffer.
+ */
+unsigned int tpl_buffer_get_id(tpl_buffer_t *buffer);
+
+/**
+ * Query information on the given native window.
+ *
+ * @param display display used for query.
+ * @param window handle to the native window.
+ * @param width pointer to receive width of the window.
+ * @param height pointer to receive height of the window.
+ * @param format pointer to receive format of the window.
+ * @return TPL_TRUE if the window is valid, TPL_FALSE otherwise.
+ */
+tpl_bool_t tpl_get_native_window_info(tpl_display_t *display,
+ tpl_handle_t window,
+ int *width,
+ int *height,
+ tpl_format_t *format,
+ int depth,
+ int a_size);
+
+/**
+ * Query information on the given native pixmap.
+ *
+ * @param display display used for query.
+ * @param pixmap handle to the native pixmap.
+ * @param width pointer to receive width of the pixmap.
+ * @param height pointer to receive height of the pixmap.
+ * @param format pointer to receive format of the pixmap.
+ * @return TPL_TRUE if the pixmap is valid, TPL_FALSE otherwise.
+ */
+tpl_bool_t tpl_get_native_pixmap_info(tpl_display_t *display,
+ tpl_handle_t pixmap,
+ int *width,
+ int *height,
+ tpl_format_t *format);
+
+void tpl_display_wait_native(tpl_display_t *dpy);
+
+#endif /* TPL_H */
\ No newline at end of file
--- /dev/null
+#include "tpl_internal.h"
+
+static void
+__tpl_buffer_fini(tpl_buffer_t *buffer)
+{
+ buffer->backend.fini(buffer);
+}
+
+static void
+__tpl_buffer_free(void *buffer)
+{
+ TPL_LOG(3, "buffer(%p)", buffer);
+ __tpl_buffer_fini((tpl_buffer_t *)buffer);
+ free(buffer);
+}
+
+tpl_buffer_t *
+__tpl_buffer_alloc(tpl_surface_t *surface, unsigned int key, int fd, int width, int height,
+ int depth, int pitch)
+{
+ tpl_display_t *display;
+ tpl_buffer_t *buffer;
+
+ TPL_ASSERT(surface != NULL);
+
+ buffer = (tpl_buffer_t *)calloc(1, sizeof(tpl_buffer_t));
+ TPL_ASSERT(buffer != NULL);
+
+ __tpl_object_init(&buffer->base, TPL_OBJECT_BUFFER, __tpl_buffer_free);
+
+ buffer->surface = surface;
+ buffer->key = key;
+ buffer->fd = fd;
+ buffer->age = -1;
+
+ buffer->width = width;
+ buffer->height = height;
+ buffer->depth = depth;
+ buffer->pitch = pitch;
+
+ /* Backend initialization. */
+ __tpl_buffer_init_backend(buffer, surface->display->backend.type);
+
+ if (!buffer->backend.init(buffer))
+ {
+ TPL_ERR("backend init");
+ tpl_object_unreference((tpl_object_t *)buffer);
+ return NULL;
+ }
+
+ TPL_LOG(3, "buffer(%p) surface(%p, %p) key:%d fd:%d %dx%d", (void *) buffer, surface, surface->native_handle, key, fd, width, height);
+ return buffer;
+}
+
+void
+__tpl_buffer_set_surface(tpl_buffer_t *buffer, tpl_surface_t *surface)
+{
+ buffer->surface = surface;
+}
+
+void *
+tpl_buffer_map(tpl_buffer_t *buffer, int size)
+{
+ void *ptr;
+
+ TPL_OBJECT_LOCK(buffer);
+ ptr = buffer->backend.map(buffer, size);
+ TPL_OBJECT_UNLOCK(buffer);
+
+ return ptr;
+}
+
+void
+tpl_buffer_unmap(tpl_buffer_t *buffer, void *ptr, int size)
+{
+ TPL_OBJECT_LOCK(buffer);
+ buffer->backend.unmap(buffer, ptr, size);
+ TPL_OBJECT_UNLOCK(buffer);
+}
+
+tpl_bool_t
+tpl_buffer_lock(tpl_buffer_t *buffer, tpl_lock_usage_t usage)
+{
+ tpl_bool_t result;
+
+ TPL_OBJECT_LOCK(buffer);
+ result = buffer->backend.lock(buffer, usage);
+ TPL_OBJECT_UNLOCK(buffer);
+
+ return result;
+}
+
+void
+tpl_buffer_unlock(tpl_buffer_t *buffer)
+{
+ TPL_OBJECT_LOCK(buffer);
+ buffer->backend.unlock(buffer);
+ TPL_OBJECT_UNLOCK(buffer);
+}
+
+unsigned int
+tpl_buffer_get_key(tpl_buffer_t *buffer)
+{
+ return buffer->key;
+}
+
+int
+tpl_buffer_get_fd(tpl_buffer_t *buffer)
+{
+ return buffer->fd;
+}
+
+tpl_bool_t tpl_buffer_get_reused(tpl_buffer_t *buffer)
+{
+ return buffer->backend.get_reused_flag(buffer);
+}
+
+int
+tpl_buffer_get_age(tpl_buffer_t *buffer)
+{
+ int age;
+
+ TPL_OBJECT_LOCK((tpl_buffer_t *)buffer);
+ age = buffer->age;
+
+ /* Get buffer age from TPL */
+ if (buffer->backend.get_buffer_age != NULL)
+ age = buffer->backend.get_buffer_age(buffer);
+ else
+ age = buffer->age;
+
+ TPL_OBJECT_UNLOCK((tpl_buffer_t *)buffer);
+
+ return age;
+}
+
+tpl_surface_t *
+tpl_buffer_get_surface(tpl_buffer_t *buffer)
+{
+ return buffer->surface;
+}
+
+void
+tpl_buffer_get_size(tpl_buffer_t *buffer, int *width, int *height)
+{
+ if (width)
+ *width = buffer->width;
+
+ if (height)
+ *height = buffer->height;
+}
+
+int
+tpl_buffer_get_depth(tpl_buffer_t *buffer)
+{
+ return buffer->depth;
+}
+
+int
+tpl_buffer_get_pitch(tpl_buffer_t *buffer)
+{
+ return buffer->pitch;
+}
+
+void *
+tpl_buffer_create_native_buffer(tpl_buffer_t *buffer)
+{
+ if (buffer->backend.create_native_buffer != NULL)
+ return buffer->backend.create_native_buffer(buffer);
+ return NULL;
+}
\ No newline at end of file
--- /dev/null
+#include "tpl_internal.h"
+
+void
+__tpl_display_flush(tpl_display_t *display)
+{
+ display->backend.flush(display);
+}
+
+static void
+__tpl_display_fini(tpl_display_t *display)
+{
+ display->backend.fini(display);
+ __tpl_runtime_remove_display(display);
+}
+
+static void
+__tpl_display_free(void *display)
+{
+ __tpl_display_fini((tpl_display_t *)display);
+ free(display);
+}
+
+tpl_display_t *
+tpl_display_get(tpl_backend_type_t type, tpl_handle_t native_dpy)
+{
+ tpl_display_t *display;
+
+ /* Search for an already connected display for the given native display. */
+ display = __tpl_runtime_find_display(type, native_dpy);
+
+ if (display != NULL)
+ return display;
+
+ if (type == TPL_BACKEND_UNKNOWN)
+ type = __tpl_display_choose_backend(native_dpy);
+
+ if (type == TPL_BACKEND_UNKNOWN)
+ return NULL;
+
+ display = (tpl_display_t *)calloc(1, sizeof(tpl_display_t));
+ TPL_ASSERT(display != NULL);
+
+ /* Initialize object base class. */
+ __tpl_object_init(&display->base, TPL_OBJECT_DISPLAY, __tpl_display_free);
+
+ /* Initialize display object. */
+ display->native_handle = native_dpy;
+
+ /* Initialize backend. */
+ __tpl_display_init_backend(display, type);
+
+ if (!display->backend.init(display))
+ {
+ tpl_object_unreference((tpl_object_t *)display);
+ return NULL;
+ }
+
+ /* Add it to the runtime. */
+ __tpl_runtime_add_display(display);
+
+ return display;
+}
+
+tpl_bool_t
+tpl_display_bind_client_display_handle(tpl_display_t *display, tpl_handle_t native_dpy)
+{
+ if (display->backend.bind_client_display_handle != NULL)
+ return display->backend.bind_client_display_handle(display, native_dpy);
+ return TPL_FALSE;
+}
+
+tpl_bool_t
+tpl_display_unbind_client_display_handle(tpl_display_t *display, tpl_handle_t native_dpy)
+{
+ if (display->backend.unbind_client_display_handle != NULL)
+ return display->backend.unbind_client_display_handle(display, native_dpy);
+ return TPL_FALSE;
+}
+
+tpl_backend_type_t
+tpl_display_get_backend_type(tpl_display_t *display)
+{
+ TPL_ASSERT(__tpl_object_is_valid(&display->base));
+ return display->backend.type;
+}
+
+int
+tpl_display_get_bufmgr_fd(tpl_display_t *display)
+{
+ TPL_ASSERT(__tpl_object_is_valid(&display->base));
+ return display->bufmgr_fd;
+}
+
+tpl_handle_t
+tpl_display_get_native_handle(tpl_display_t *display)
+{
+ TPL_ASSERT(__tpl_object_is_valid(&display->base));
+ return display->native_handle;
+}
+
+tpl_bool_t
+tpl_display_query_config(tpl_display_t *display,
+ tpl_surface_type_t surface_type,
+ int red_size,
+ int green_size,
+ int blue_size,
+ int alpha_size,
+ int depth_size,
+ int *native_visual_id,
+ tpl_bool_t *is_slow)
+{
+ TPL_ASSERT(__tpl_object_is_valid(&display->base));
+ return display->backend.query_config(display,
+ surface_type,
+ red_size, green_size, blue_size, alpha_size,
+ depth_size, native_visual_id, is_slow);
+}
+
+tpl_bool_t
+tpl_display_filter_config(tpl_display_t *display,
+ int *visual_id,
+ int alpha_size)
+{
+ TPL_ASSERT(__tpl_object_is_valid(&display->base));
+ if(display->backend.filter_config != NULL)
+ return display->backend.filter_config(display, visual_id, alpha_size);
+
+ return TPL_FALSE;
+}
+
+void
+tpl_display_flush(tpl_display_t *display)
+{
+ if (display == NULL)
+ __tpl_runtime_flush_all_display();
+ else
+ {
+ TPL_OBJECT_LOCK(display);
+ __tpl_display_flush(display);
+ TPL_OBJECT_UNLOCK(display);
+ }
+}
+
+void
+tpl_display_wait_native(tpl_display_t *dpy)
+{
+ TPL_ASSERT(dpy != NULL);
+
+ dpy->backend.wait_native(dpy);
+}
+
--- /dev/null
+#include "tpl_internal.h"
+
+tpl_frame_t *
+__tpl_frame_alloc()
+{
+ tpl_frame_t *frame;
+
+ frame = (tpl_frame_t *)calloc(1, sizeof(tpl_frame_t));
+ TPL_ASSERT(frame != NULL);
+
+ return frame;
+}
+
+void
+__tpl_frame_free(tpl_frame_t *frame)
+{
+ if (frame->buffer)
+ tpl_object_unreference((tpl_object_t *)frame->buffer);
+
+ tpl_region_fini(&frame->damage);
+ free(frame);
+}
+
+void
+__tpl_frame_set_buffer(tpl_frame_t *frame, tpl_buffer_t *buffer)
+{
+ if (frame->buffer)
+ tpl_object_unreference((tpl_object_t *)frame->buffer);
+
+ tpl_object_reference((tpl_object_t *)buffer);
+ frame->buffer = buffer;
+}
--- /dev/null
+#ifndef TPL_INTERNAL_H
+#define TPL_INTERNAL_H
+
+#include "tpl.h"
+#include <stdlib.h>
+#include <pthread.h>
+
+#if defined(WINSYS_DRI2) || defined(WINSYS_DRI3)
+#include <xcb/xcb.h>
+#endif
+
+#include "tpl_utils.h"
+
+#define TPL_OBJECT_LOCK(object) __tpl_object_lock((tpl_object_t *)(object))
+#define TPL_OBJECT_UNLOCK(object) __tpl_object_unlock((tpl_object_t *)(object))
+
+typedef struct _tpl_runtime tpl_runtime_t;
+typedef struct _tpl_display_backend tpl_display_backend_t;
+typedef struct _tpl_surface_backend tpl_surface_backend_t;
+typedef struct _tpl_buffer_backend tpl_buffer_backend_t;
+typedef struct _tpl_frame tpl_frame_t;
+
+typedef volatile unsigned int tpl_util_osu_atomic;
+typedef void * tpl_utils_ptrdict;
+typedef void * tpl_utils_ptrdict_iter;
+
+typedef enum
+{
+ TPL_FRAME_STATE_INVALID,
+ TPL_FRAME_STATE_READY,
+ TPL_FRAME_STATE_QUEUED,
+ TPL_FRAME_STATE_POSTED
+} tpl_frame_state_t;
+
+struct _tpl_frame
+{
+ tpl_buffer_t *buffer;
+ int interval;
+ tpl_region_t damage;
+ tpl_frame_state_t state;
+};
+
+struct _tpl_egl_funcs
+{
+ int atomic_int_size;
+ int ptrdict_size;
+ int ptrdict_iter_size;
+
+ void (*yield)();
+ int (*clz)(int input);
+
+ int (*atomic_get)(const tpl_util_osu_atomic * const atom);
+ void (*atomic_set)(tpl_util_osu_atomic * const atom, int val);
+ int (*atomic_inc)(tpl_util_osu_atomic * const atom);
+ int (*atomic_dec)(tpl_util_osu_atomic * const atom);
+
+ tpl_utils_ptrdict (*ptrdict_init)(tpl_utils_ptrdict d, void *mem_alloc_context, void (*allocfunc)(void *, size_t), void (*freefunc)(void *));
+ tpl_bool_t (*ptrdict_insert)(tpl_utils_ptrdict d, void *name, void *data);
+ void * (*ptrdict_lookup_key)(tpl_utils_ptrdict d, void *key, void **value);
+ void (*ptrdict_term)(tpl_utils_ptrdict d);
+ void (*ptrdict_remove)(tpl_utils_ptrdict d, void *name);
+ void (*ptrdict_iter_init)(tpl_utils_ptrdict_iter *it, tpl_utils_ptrdict d);
+ void * (*ptrdict_next)(tpl_utils_ptrdict_iter *it, void **value);
+};
+
+struct _tpl_display_backend
+{
+ tpl_backend_type_t type;
+ void *data;
+
+ tpl_bool_t (*init)(tpl_display_t *display);
+ void (*fini)(tpl_display_t *display);
+
+ tpl_bool_t (*bind_client_display_handle)(tpl_display_t *display, tpl_handle_t native_dpy);
+ tpl_bool_t (*unbind_client_display_handle)(tpl_display_t *display, tpl_handle_t native_dpy);
+
+ tpl_bool_t (*query_config)(tpl_display_t *display,
+ tpl_surface_type_t surface_type, int red_bits,
+ int green_bits, int blue_bits, int alpha_bits,
+ int color_depth, int *native_visual_id, tpl_bool_t *is_slow);
+ tpl_bool_t (*filter_config)(tpl_display_t *display, int *visual_id, int alpha_bits);
+
+ tpl_bool_t (*get_window_info)(tpl_display_t *display, tpl_handle_t window,
+ int *width, int *height, tpl_format_t *format, int depth,int a_size);
+ tpl_bool_t (*get_pixmap_info)(tpl_display_t *display, tpl_handle_t pixmap,
+ int *width, int *height, tpl_format_t *format);
+
+ void (*flush)(tpl_display_t *display);
+ void (*wait_native)(tpl_display_t *display);
+
+};
+
+struct _tpl_surface_backend
+{
+ tpl_backend_type_t type;
+ void *data;
+
+ tpl_bool_t (*init)(tpl_surface_t *surface);
+ void (*fini)(tpl_surface_t *surface);
+
+ void (*begin_frame)(tpl_surface_t *surface);
+ void (*end_frame)(tpl_surface_t *surface);
+ tpl_bool_t (*validate_frame)(tpl_surface_t *surface);
+
+ tpl_buffer_t * (*get_buffer)(tpl_surface_t *surface, tpl_bool_t *reset_buffers);
+ void (*post)(tpl_surface_t *surface, tpl_frame_t *frame);
+};
+
+struct _tpl_buffer_backend
+{
+ tpl_backend_type_t type;
+ void *data;
+ unsigned int flags;
+
+ tpl_bool_t (*init)(tpl_buffer_t *buffer);
+ void (*fini)(tpl_buffer_t *buffer);
+
+ void * (*map)(tpl_buffer_t *buffer, int size);
+ void (*unmap)(tpl_buffer_t *buffer, void *ptr, int size);
+
+ tpl_bool_t (*lock)(tpl_buffer_t *buffer, tpl_lock_usage_t usage);
+ void (*unlock)(tpl_buffer_t *buffer);
+
+ void * (*create_native_buffer)(tpl_buffer_t *buffer);
+ int (*get_buffer_age)(tpl_buffer_t *buffer);
+};
+
+struct _tpl_object
+{
+ tpl_object_type_t type;
+ tpl_util_osu_atomic reference;
+ tpl_free_func_t free;
+ pthread_mutex_t mutex;
+
+ struct {
+ void *data;
+ tpl_free_func_t free;
+ } user_data;
+};
+
+struct _tpl_display
+{
+ tpl_object_t base;
+
+ tpl_handle_t native_handle;
+
+ int bufmgr_fd;
+ tpl_display_backend_t backend;
+#if defined(WINSYS_DRI2) || defined(WINSYS_DRI3)
+ xcb_connection_t *xcb_connection;
+#endif
+};
+
+struct _tpl_surface
+{
+ tpl_object_t base;
+
+ tpl_display_t *display;
+ tpl_handle_t native_handle;
+ tpl_surface_type_t type;
+ tpl_format_t format;
+ int width, height;
+
+ tpl_frame_t *frame;
+ int post_interval;
+ tpl_region_t damage;
+ tpl_list_t frame_queue;
+
+ tpl_surface_backend_t backend;
+};
+
+struct _tpl_buffer
+{
+ tpl_object_t base;
+
+ tpl_surface_t *surface;
+ unsigned int key;
+ int fd;
+ int age;
+
+ int width;
+ int height;
+ int depth;
+ int pitch;
+
+ tpl_buffer_backend_t backend;
+};
+
+/* Object functions. */
+tpl_bool_t __tpl_object_is_valid(tpl_object_t *object);
+void __tpl_object_init(tpl_object_t *object, tpl_object_type_t type, tpl_free_func_t free_func);
+void __tpl_object_lock(tpl_object_t *object);
+void __tpl_object_unlock(tpl_object_t *object);
+
+/* Frame functions. */
+tpl_frame_t * __tpl_frame_alloc();
+void __tpl_frame_free(tpl_frame_t *frame);
+
+void __tpl_frame_set_buffer(tpl_frame_t *frame, tpl_buffer_t *buffer);
+
+/* Display functions. */
+tpl_handle_t __tpl_display_get_native_handle(tpl_display_t *display);
+void __tpl_display_flush(tpl_display_t *display);
+
+/* Surface functions. */
+tpl_frame_t * __tpl_surface_get_latest_frame(tpl_surface_t *surface);
+void __tpl_surface_wait_all_frames(tpl_surface_t *surface);
+
+void __tpl_surface_set_backend_data(tpl_surface_t *surface, void *data);
+void * __tpl_surface_get_backend_data(tpl_surface_t *surface);
+
+/* Buffer functions. */
+tpl_buffer_t * __tpl_buffer_alloc(tpl_surface_t *surface, unsigned int key, int fd, int width, int height, int depth, int pitch);
+void __tpl_buffer_set_surface(tpl_buffer_t *buffer, tpl_surface_t *surface);
+
+/* Runtime functions. */
+tpl_display_t * __tpl_runtime_find_display(tpl_backend_type_t type, tpl_handle_t native_display);
+void __tpl_runtime_add_display(tpl_display_t *display);
+void __tpl_runtime_remove_display(tpl_display_t *display);
+void __tpl_runtime_flush_all_display();
+
+/* Backend initialization functions. */
+tpl_backend_type_t __tpl_display_choose_backend(tpl_handle_t native_dpy);
+
+tpl_bool_t __tpl_display_choose_backend_wayland(tpl_handle_t native_dpy);
+tpl_bool_t __tpl_display_choose_backend_x11_dri2(tpl_handle_t native_dpy);
+tpl_bool_t __tpl_display_choose_backend_x11_dri3(tpl_handle_t native_dpy);
+
+void __tpl_display_init_backend(tpl_display_t *display, tpl_backend_type_t type);
+void __tpl_surface_init_backend(tpl_surface_t *surface, tpl_backend_type_t type);
+void __tpl_buffer_init_backend(tpl_buffer_t *buffer, tpl_backend_type_t type);
+
+void __tpl_display_init_backend_wayland(tpl_display_backend_t *backend);
+void __tpl_display_init_backend_x11_dri2(tpl_display_backend_t *backend);
+void __tpl_display_init_backend_x11_dri3(tpl_display_backend_t *backend);
+
+void __tpl_surface_init_backend_wayland(tpl_surface_backend_t *backend);
+void __tpl_surface_init_backend_x11_dri2(tpl_surface_backend_t *backend);
+void __tpl_surface_init_backend_x11_dri3(tpl_surface_backend_t *backend);
+
+void __tpl_buffer_init_backend_wayland(tpl_buffer_backend_t *backend);
+void __tpl_buffer_init_backend_x11_dri2(tpl_buffer_backend_t *backend);
+void __tpl_buffer_init_backend_x11_dri3(tpl_buffer_backend_t *backend);
+
+/* DDK dependent functions */
+void tpl_util_sys_yield(void);
+int tpl_util_clz(int input);
+int tpl_util_osu_atomic_get(const tpl_util_osu_atomic * const atom);
+void tpl_util_osu_atomic_set(tpl_util_osu_atomic * const atom, int val);
+int tpl_util_osu_atomic_inc( tpl_util_osu_atomic * const atom );
+int tpl_util_osu_atomic_dec( tpl_util_osu_atomic * const atom );
+tpl_utils_ptrdict tpl_utils_ptrdict_allocate(void (*freefunc)(void *));
+tpl_bool_t tpl_utils_ptrdict_insert(tpl_utils_ptrdict d, void *name, void *data);
+void *tpl_utils_ptrdict_get(tpl_utils_ptrdict d, void *name);
+void tpl_utils_ptrdict_free(tpl_utils_ptrdict d);
+void tpl_utils_ptrdict_remove(tpl_utils_ptrdict d, void *name);
+void tpl_utils_ptrdict_iterate_init(tpl_utils_ptrdict d, tpl_utils_ptrdict_iter it);
+void *tpl_utils_ptrdict_next( tpl_utils_ptrdict_iter it, void **value );
+
+#endif /* TPL_INTERNAL_H */
--- /dev/null
+#include "tpl_internal.h"
+#include <pthread.h>
+
+tpl_bool_t
+__tpl_object_is_valid(tpl_object_t *object)
+{
+ return tpl_util_osu_atomic_get(&object->reference) != 0;
+}
+
+void
+__tpl_object_init(tpl_object_t *object, tpl_object_type_t type, tpl_free_func_t free_func)
+{
+ object->type = type;
+ object->free = free_func;
+ tpl_util_osu_atomic_set(&object->reference, 1);
+ pthread_mutex_init(&object->mutex, NULL);
+}
+
+void
+__tpl_object_fini(tpl_object_t *object)
+{
+ pthread_mutex_destroy(&object->mutex);
+
+ if (object->user_data.free)
+ object->user_data.free(object->user_data.data);
+}
+
+void
+__tpl_object_lock(tpl_object_t *object)
+{
+ TPL_ASSERT(__tpl_object_is_valid(object));
+ pthread_mutex_lock(&object->mutex);
+}
+
+void
+__tpl_object_unlock(tpl_object_t *object)
+{
+ TPL_ASSERT(__tpl_object_is_valid(object));
+ pthread_mutex_unlock(&object->mutex);
+}
+
+int
+tpl_object_reference(tpl_object_t *object)
+{
+ TPL_ASSERT(__tpl_object_is_valid(object));
+ return (int) tpl_util_osu_atomic_inc(&object->reference);
+}
+
+int
+tpl_object_unreference(tpl_object_t *object)
+{
+ int reference;
+
+ TPL_ASSERT(__tpl_object_is_valid(object));
+
+ reference = (int)tpl_util_osu_atomic_dec(&object->reference);
+
+ if (reference == 0)
+ {
+ __tpl_object_fini(object);
+ object->free(object);
+ }
+
+ return reference;
+}
+
+int
+tpl_object_get_reference(tpl_object_t *object)
+{
+ TPL_ASSERT(__tpl_object_is_valid(object));
+ return (int)tpl_util_osu_atomic_get(&object->reference);
+}
+
+tpl_object_type_t
+tpl_object_get_type(tpl_object_t *object)
+{
+ TPL_ASSERT(__tpl_object_is_valid(object));
+ return object->type;
+}
+
+void
+tpl_object_set_user_data(tpl_object_t *object, void *data, tpl_free_func_t free_func)
+{
+ TPL_ASSERT(__tpl_object_is_valid(object));
+
+ __tpl_object_lock(object);
+ object->user_data.data = data;
+ object->user_data.free = free_func;
+ __tpl_object_unlock(object);
+}
+
+void *
+tpl_object_get_user_data(tpl_object_t *object)
+{
+ void *data;
+
+ TPL_ASSERT(__tpl_object_is_valid(object));
+ __tpl_object_lock(object);
+ data = object->user_data.data;
+ __tpl_object_unlock(object);
+
+ return data;
+}
--- /dev/null
+#include "tpl_utils.h"
+#include <string.h>
+
+void
+tpl_region_init(tpl_region_t *region)
+{
+ region->num_rects = 0;
+
+ /* tpl_region_t will initially provide TPL_MIN_REGION_RECTS number of
+ storage space for rects after which heap memory will be allocated
+ if the number of required space exceeds TPL_MIN_REGION_RECTS
+ */
+ region->rects = ®ion->rects_static[0];
+ region->num_rects_allocated = TPL_MIN_REGION_RECTS;
+
+ TPL_LOG(3, "region:%p {%d, %p, %p, %d}", region, region->num_rects, region->rects,
+ ®ion->rects_static[0], region->num_rects_allocated);
+}
+
+void
+tpl_region_fini(tpl_region_t *region)
+{
+ TPL_LOG(3, "region:%p {%d, %p, %p, %d}", region, region->num_rects, region->rects,
+ ®ion->rects_static[0], region->num_rects_allocated);
+
+ if (region != NULL && region->rects != NULL &&
+ region->rects != ®ion->rects_static[0])
+ {
+ free(region->rects);
+ }
+}
+
+tpl_region_t *
+tpl_region_alloc()
+{
+ tpl_region_t *region;
+
+ region = (tpl_region_t *)calloc(1, sizeof(tpl_region_t));
+
+ if (region == NULL)
+ {
+ TPL_ASSERT(TPL_FALSE);
+ return NULL;
+ }
+
+ tpl_region_init(region);
+ return region;
+}
+
+void
+tpl_region_free(tpl_region_t **region)
+{
+ if (*region == NULL)
+ return;
+
+ tpl_region_fini(*region);
+ free(*region);
+ *region = NULL;
+}
+
+tpl_bool_t
+tpl_region_is_empty(const tpl_region_t *region)
+{
+ TPL_LOG(3, "region:%p {%d, %p, %p, %d}\n", region, region->num_rects,
+ region->rects, ®ion->rects_static[0], region->num_rects_allocated);
+
+ return (region->num_rects == 0);
+}
+
+void
+tpl_region_copy(tpl_region_t *dst, const tpl_region_t *src)
+{
+ tpl_region_set_rects(dst, src->num_rects, src->rects);
+}
+
+void
+tpl_region_set_rects(tpl_region_t *region, int num_rects, const int *rects)
+{
+ TPL_LOG(3, "region:%p {%d, %p, %p, %d}, num_rects:%d, rects:%p\n", region,
+ region->num_rects, region->rects, ®ion->rects_static[0],
+ region->num_rects_allocated, num_rects, rects);
+
+ /* allocate memory if the number of rects exceed the allocated memory */
+ if (num_rects > region->num_rects_allocated)
+ {
+ if (region->rects != ®ion->rects_static[0])
+ free(region->rects);
+
+ region->rects = (int *)malloc(num_rects * 4 * sizeof(int));
+
+ if (region->rects == NULL)
+ {
+ TPL_ASSERT(TPL_FALSE);
+ return;
+ }
+
+ region->num_rects_allocated = num_rects;
+ }
+
+ region->num_rects = num_rects;
+ memcpy(region->rects, rects, num_rects * 4 * sizeof(int));
+}
--- /dev/null
+#include "tpl_internal.h"
+
+static void
+__tpl_surface_fini(tpl_surface_t *surface)
+{
+ tpl_region_fini(&surface->damage);
+ tpl_list_fini(&surface->frame_queue, (tpl_free_func_t)__tpl_frame_free);
+
+ if (surface->frame)
+ __tpl_frame_free(surface->frame);
+
+ surface->backend.fini(surface);
+}
+
+static void
+__tpl_surface_free(void *data)
+{
+ __tpl_surface_fini((tpl_surface_t *)data);
+ free(data);
+}
+
+static void
+__tpl_surface_enqueue_frame(tpl_surface_t *surface)
+{
+ /* Set swap attributes. */
+ surface->frame->interval = surface->post_interval;
+ tpl_region_copy(&surface->frame->damage, &surface->damage);
+
+ /* Enqueue the frame object. */
+ tpl_list_push_back(&surface->frame_queue, surface->frame);
+ surface->frame->state = TPL_FRAME_STATE_QUEUED;
+
+ /* Reset surface frame to NULL. */
+ surface->backend.end_frame(surface);
+ surface->frame = NULL;
+}
+
+tpl_frame_t *
+__tpl_surface_get_latest_frame(tpl_surface_t *surface)
+{
+ if (tpl_list_is_empty(&surface->frame_queue))
+ return NULL;
+
+ return (tpl_frame_t *)tpl_list_get_back(&surface->frame_queue);
+}
+
+void
+__tpl_surface_wait_all_frames(tpl_surface_t *surface)
+{
+ while (!tpl_list_is_empty(&surface->frame_queue))
+ {
+ TPL_OBJECT_UNLOCK(surface);
+ tpl_util_sys_yield();
+ TPL_OBJECT_LOCK(surface);
+ }
+}
+
+tpl_surface_t *
+tpl_surface_create(tpl_display_t *display, tpl_handle_t handle, tpl_surface_type_t type, tpl_format_t format)
+{
+ tpl_surface_t *surface;
+
+ TPL_ASSERT(display != NULL);
+
+ surface = (tpl_surface_t *)calloc(1, sizeof(tpl_surface_t));
+ TPL_ASSERT(surface != NULL);
+
+ TPL_LOG(3, "surface->damage:%p {%d, %p, %p, %d}\n", &surface->damage, surface->damage.num_rects,
+ surface->damage.rects, &surface->damage.rects_static[0], surface->damage.num_rects_allocated);
+
+ __tpl_object_init(&surface->base, TPL_OBJECT_SURFACE, __tpl_surface_free);
+
+ surface->display = display;
+ surface->native_handle = handle;
+ surface->type = type;
+ surface->format = format;
+
+ surface->frame = NULL;
+ surface->post_interval = 1;
+
+ tpl_region_init(&surface->damage);
+ tpl_list_init(&surface->frame_queue);
+
+ /* Intialize backend. */
+ __tpl_surface_init_backend(surface, display->backend.type);
+
+ if (!surface->backend.init(surface))
+ {
+ tpl_object_unreference(&surface->base);
+ return NULL;
+ }
+
+ return surface;
+}
+
+tpl_display_t *
+tpl_surface_get_display(tpl_surface_t *surface)
+{
+ return surface->display;
+}
+
+tpl_handle_t
+tpl_surface_get_native_handle(tpl_surface_t *surface)
+{
+ return surface->native_handle;
+}
+
+tpl_surface_type_t
+tpl_surface_get_type(tpl_surface_t *surface)
+{
+ return surface->type;
+}
+
+void
+tpl_surface_get_size(tpl_surface_t *surface, int *width, int *height)
+{
+ if (width)
+ *width = surface->width;
+
+ if (height)
+ *height = surface->height;
+}
+
+void
+tpl_surface_begin_frame(tpl_surface_t *surface)
+{
+ TRACE_BEGIN("TPL:BEGINFRAME");
+
+ if (surface->type != TPL_SURFACE_TYPE_WINDOW)
+ {
+ TRACE_END();
+ return;
+ }
+
+ TPL_OBJECT_LOCK(surface);
+
+ /* Queue previous frame if it has not been queued. */
+ if (surface->frame)
+ __tpl_surface_enqueue_frame(surface);
+
+ /* Allocate a new frame. */
+ surface->frame = __tpl_frame_alloc();
+ TPL_ASSERT(surface->frame != NULL);
+
+ surface->frame->state = TPL_FRAME_STATE_READY;
+
+ /* There might be some frames which is enqueued but not posted. Some backend requires native
+ * surface to be posted to be able to get the next output buffer (i.e. x11_dri2). Runtime
+ * just request buffer for the frame and it is totally upto backend if it calls post
+ * internally or not.
+ *
+ * In case of backend calling internal post, backend must mark the frame as posted.
+ * tpl_surface_post() will skip calling backend post if the frame is marked as posted and
+ * it will be just removed from the queue. */
+
+ /* Let backend handle the new frame event. */
+ surface->backend.begin_frame(surface);
+
+ TPL_OBJECT_UNLOCK(surface);
+ TRACE_END();
+}
+
+void
+tpl_surface_end_frame(tpl_surface_t *surface)
+{
+ TRACE_BEGIN("TPL:ENDFRAME");
+ if (surface->type != TPL_SURFACE_TYPE_WINDOW)
+ return;
+
+ TPL_OBJECT_LOCK(surface);
+
+ TPL_LOG(3, "surface->frame:%p, surface->damage:%p, surface->frame->damage:%p",
+ surface->frame, &surface->damage, (surface->frame)?(&surface->frame->damage):NULL);
+
+ if (surface->frame)
+ __tpl_surface_enqueue_frame(surface);
+
+ TPL_OBJECT_UNLOCK(surface);
+ TRACE_END();
+}
+
+tpl_bool_t
+tpl_surface_validate_frame(tpl_surface_t *surface)
+{
+ tpl_bool_t was_valid = TPL_TRUE;
+
+ TRACE_BEGIN("TPL:VALIDATEFRAME");
+
+ if (surface->type != TPL_SURFACE_TYPE_WINDOW)
+ {
+ TRACE_END();
+ return was_valid;
+ }
+
+ TPL_OBJECT_LOCK(surface);
+ TPL_ASSERT(surface->frame != NULL);
+
+ if (!surface->backend.validate_frame(surface))
+ was_valid = TPL_FALSE;
+
+ TPL_OBJECT_UNLOCK(surface);
+ TRACE_END();
+ return was_valid;
+}
+
+void
+tpl_surface_set_post_interval(tpl_surface_t *surface, int interval)
+{
+ if (surface->type != TPL_SURFACE_TYPE_WINDOW)
+ return;
+
+ TPL_OBJECT_LOCK(surface);
+ surface->post_interval = interval;
+ TPL_OBJECT_UNLOCK(surface);
+}
+
+int
+tpl_surface_get_post_interval(tpl_surface_t *surface)
+{
+ int interval;
+
+ if (surface->type != TPL_SURFACE_TYPE_WINDOW)
+ return 0;
+
+ TPL_OBJECT_LOCK(surface);
+ interval = surface->post_interval;
+ TPL_OBJECT_UNLOCK(surface);
+
+ return interval;
+}
+
+void
+tpl_surface_set_damage(tpl_surface_t *surface, int num_rects, const int *rects)
+{
+ if (surface->type != TPL_SURFACE_TYPE_WINDOW)
+ return;
+
+ TPL_OBJECT_LOCK(surface);
+ tpl_region_set_rects(&surface->damage, num_rects, rects);
+ TPL_OBJECT_UNLOCK(surface);
+}
+
+void
+tpl_surface_get_damage(tpl_surface_t *surface, int *num_rects, const int **rects)
+{
+ if (surface->type != TPL_SURFACE_TYPE_WINDOW)
+ {
+ *num_rects = 0;
+ *rects = NULL;
+ return;
+ }
+
+ TPL_OBJECT_LOCK(surface);
+ *num_rects = surface->damage.num_rects;
+ *rects = surface->damage.rects;
+ TPL_OBJECT_UNLOCK(surface);
+}
+
+tpl_buffer_t *
+tpl_surface_get_buffer(tpl_surface_t *surface, tpl_bool_t *reset_buffers)
+{
+ tpl_buffer_t *buffer = NULL;
+
+ TRACE_BEGIN("TPL:GETBUFFER");
+ TPL_OBJECT_LOCK(surface);
+
+ buffer = surface->backend.get_buffer(surface, reset_buffers);
+
+ if(buffer != NULL)
+ {
+ /* Update size of the surface. */
+ surface->width = buffer->width;
+ surface->height = buffer->height;
+
+ if (surface->frame)
+ __tpl_frame_set_buffer(surface->frame, buffer);
+ }
+
+ TPL_OBJECT_UNLOCK(surface);
+ TRACE_END();
+
+ return buffer;
+}
+
+void
+tpl_surface_post(tpl_surface_t *surface)
+{
+ tpl_frame_t *frame;
+
+ TRACE_BEGIN("TPL:POST");
+ if (surface->type != TPL_SURFACE_TYPE_WINDOW)
+ {
+ TRACE_END();
+ return;
+ }
+
+ TPL_OBJECT_LOCK(surface);
+
+ TPL_LOG(3, "surface->frame:%p, surface->damage:%p, surface->frame->damage:%p",
+ surface->frame, &surface->damage, (surface->frame)?(&surface->frame->damage):NULL);
+
+ if (tpl_list_is_empty(&surface->frame_queue))
+ {
+ /* Queue is empty and swap is called.
+ * This means that current frame is not enqueued yet
+ * and there's no pending frames.
+ * So, this post is for the current frame. */
+ TPL_ASSERT(surface->frame != NULL);
+
+ __tpl_surface_enqueue_frame(surface);
+ }
+
+ /* Dequeue a frame from the queue. */
+ frame = (tpl_frame_t *)tpl_list_pop_front(&surface->frame_queue, NULL);
+
+ /* Call backend post if it has not been called for the frame. */
+ if (frame->state != TPL_FRAME_STATE_POSTED)
+ surface->backend.post(surface, frame);
+
+ __tpl_frame_free(frame);
+ TPL_OBJECT_UNLOCK(surface);
+ TRACE_END();
+}
\ No newline at end of file
--- /dev/null
+#ifndef TPL_UTILS_H
+#define TPL_UTILS_H
+
+#include "tpl.h"
+#include <stdlib.h>
+#include <assert.h>
+#include <pthread.h>
+#define TPL_MIN_REGION_RECTS 16
+
+#define TPL_ASSERT(expr) assert(expr)
+#define TPL_INLINE __inline__
+#define TPL_IGNORE(x) (void)x
+
+#if (TTRACE_ENABLE)
+#include <ttrace.h>
+#define DDK_TAG TTRACE_TAG_GRAPHICS
+#define TRACEBEGIN(fmt,...) traceBegin(DDK_TAG, fmt, ##__VA_ARGS__)
+#define TRACEEND() traceEnd(DDK_TAG)
+#else
+#define TRACEBEGIN(fmt,...)
+#define TRACEEND()
+#endif
+
+#define TPL_DEBUG 1
+#if TPL_DEBUG
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <signal.h>
+
+/* 0:uninitialized, 1:initialized,no log, 2:user log */
+extern unsigned int tpl_log_lvl;
+
+#define TPL_LOG(lvl, f, x...) \
+ { \
+ if (tpl_log_lvl == 1) \
+ {} \
+ else if (tpl_log_lvl > 1) \
+ { \
+ if (tpl_log_lvl <= lvl) \
+ TPL_LOG_PRINT(f, ##x) \
+ } \
+ else \
+ { \
+ char *env = getenv("TPL_LOG_LEVEL"); \
+ if (env == NULL) \
+ tpl_log_lvl = 1; \
+ else \
+ tpl_log_lvl = atoi(env); \
+ \
+ if (tpl_log_lvl > 1 && tpl_log_lvl <= lvl) \
+ TPL_LOG_PRINT(f, ##x) \
+ } \
+ }
+
+#define TPL_LOG_PRINT(fmt, args...) \
+ { \
+ printf("[\x1b[36mTPL\x1b[0m|%d:%d|\x1b[36m%s\x1b[0m|%d] " fmt "\n", \
+ getpid(), (int) syscall(SYS_gettid), __func__, __LINE__, \
+ ##args); \
+ }
+
+#define TPL_ERR(f, x...) \
+ { \
+ printf("[\x1b[31mTPL_ERR %d:%d|%s|%d\x1b[0m] " f "\n", \
+ getpid(), (int) syscall(SYS_gettid), __func__, \
+ __LINE__, ##x); \
+ }
+
+#define TPL_WARN(f, x...) \
+ { \
+ printf("[\x1b[33mTPL_WARN %d:%d|%s|%d\x1b[0m] " f "\n", \
+ getpid(), (int) syscall(SYS_gettid), __func__, \
+ __LINE__, ##x); \
+ }
+
+#else
+#define TPL_LOG(lvl, f, x...)
+#define TPL_ERR(f, x...)
+#define TPL_WARN(f, x...)
+#endif
+
+#define TPL_CHECK_ON_NULL_RETURN(exp) \
+ do \
+ { \
+ if ((exp) == NULL) \
+ { \
+ TPL_ERR("%s", "check failed: " # exp " == NULL"); \
+ return; \
+ } \
+ } \
+ while (0)
+
+#define TPL_CHECK_ON_NULL_RETURN_VAL(exp, val) \
+ do \
+ { \
+ if ((exp) == NULL) \
+ { \
+ TPL_ERR("%s", "check failed: " # exp " == NULL"); \
+ return (val); \
+ } \
+ } \
+ while (0)
+
+#define TPL_CHECK_ON_NULL_GOTO(exp, label) \
+ do \
+ { \
+ if ((exp) == NULL) \
+ { \
+ TPL_ERR("%s", "check failed: " # exp " == NULL"); \
+ goto label; \
+ } \
+ } \
+ while (0)
+
+#define TPL_CHECK_ON_TRUE_RETURN(exp) \
+ do \
+ { \
+ if (exp) \
+ { \
+ TPL_ERR("%s", "check failed: " # exp " is true"); \
+ return; \
+ } \
+ } \
+ while (0)
+
+#define TPL_CHECK_ON_TRUE_RETURN_VAL(exp, val) \
+ do \
+ { \
+ if (exp) \
+ { \
+ TPL_ERR("%s", "check failed: " # exp " is true"); \
+ return val; \
+ } \
+ } \
+ while (0)
+
+#define TPL_CHECK_ON_TRUE_GOTO(exp, label) \
+ do \
+ { \
+ if (exp) \
+ { \
+ TPL_ERR("%s", "check failed: " # exp " is true"); \
+ goto label; \
+ } \
+ } \
+ while (0)
+
+#define TPL_CHECK_ON_FALSE_RETURN(exp) \
+ do \
+ { \
+ if (!(exp)) \
+ { \
+ TPL_ERR("%s", "check failed: " # exp " is false"); \
+ return; \
+ } \
+ } \
+ while (0)
+
+#define TPL_CHECK_ON_FALSE_RETURN_VAL(exp, val) \
+ do \
+ { \
+ if (!(exp)) \
+ { \
+ TPL_ERR("%s", "check failed: " # exp " is false"); \
+ return val; \
+ } \
+ } \
+ while (0)
+
+#define TPL_CHECK_ON_FALSE_GOTO(exp, label) \
+ do \
+ { \
+ if (!(exp)) \
+ { \
+ TPL_ERR("%s", "check failed: " # exp " is false"); \
+ goto label; \
+ } \
+ } \
+ while (0)
+
+typedef struct _tpl_list_node tpl_list_node_t;
+typedef struct _tpl_list tpl_list_t;
+typedef struct _tpl_region tpl_region_t;
+
+enum _tpl_occurance
+{
+ TPL_FIRST,
+ TPL_LAST,
+ TPL_ALL
+};
+
+struct _tpl_list_node
+{
+ tpl_list_node_t *prev;
+ tpl_list_node_t *next;
+ void *data;
+ tpl_list_t *list;
+};
+
+struct _tpl_list
+{
+ tpl_list_node_t head;
+ tpl_list_node_t tail;
+ int count;
+};
+
+/**
+* num_rects: number of rects.
+* rects: collection of rects where each rect is specified by 4 integers which
+* are upper left (x, y) and lower right (x, y) coordinates.
+* rects_static: initial storage space for rects. will be replaced by heap heap
+* memory if num_rects exceeds TPL_MIN_REGION_RECTS.
+* num_rects_allocated: number of rects currently allocated. minimum is
+* TPL_MIN_REGION_RECTS (initial value).
+*/
+struct _tpl_region
+{
+ int num_rects;
+ int *rects;
+ int rects_static[TPL_MIN_REGION_RECTS * 4];
+ int num_rects_allocated;
+};
+
+static TPL_INLINE int
+tpl_list_get_count(const tpl_list_t *list)
+{
+ return list->count;
+}
+
+static TPL_INLINE tpl_bool_t
+tpl_list_is_empty(const tpl_list_t *list)
+{
+ return list->count == 0;
+}
+
+static TPL_INLINE void
+tpl_list_init(tpl_list_t *list)
+{
+ list->head.list = list;
+ list->tail.list = list;
+
+ list->head.prev = NULL;
+ list->head.next = &list->tail;
+ list->tail.prev = &list->head;
+ list->tail.next = NULL;
+
+ list->count = 0;
+}
+
+static TPL_INLINE void
+tpl_list_fini(tpl_list_t *list, tpl_free_func_t func)
+{
+ tpl_list_node_t *node = list->head.next;
+
+ while (node != &list->tail)
+ {
+ tpl_list_node_t *free_node = node;
+ node = node->next;
+
+ if (func)
+ func(free_node->data);
+
+ free(free_node);
+ }
+
+ tpl_list_init(list);
+}
+
+static TPL_INLINE tpl_list_t *
+tpl_list_alloc()
+{
+ tpl_list_t *list;
+
+ list = (tpl_list_t *)malloc(sizeof(tpl_list_t));
+ TPL_ASSERT(list != NULL);
+
+ tpl_list_init(list);
+
+ return list;
+}
+
+static TPL_INLINE void
+tpl_list_free(tpl_list_t *list, tpl_free_func_t func)
+{
+ tpl_list_fini(list, func);
+ free(list);
+}
+
+static TPL_INLINE void *
+tpl_list_node_get_data(const tpl_list_node_t *node)
+{
+ return node->data;
+}
+
+static TPL_INLINE tpl_list_node_t *
+tpl_list_get_front_node(tpl_list_t *list)
+{
+ if (tpl_list_is_empty(list))
+ return NULL;
+
+ return list->head.next;
+}
+
+static TPL_INLINE tpl_list_node_t *
+tpl_list_get_back_node(tpl_list_t *list)
+{
+ if (tpl_list_is_empty(list))
+ return NULL;
+
+ return list->tail.prev;
+}
+
+static TPL_INLINE tpl_list_node_t *
+tpl_list_node_prev(tpl_list_node_t *node)
+{
+ if (node->prev != &node->list->head)
+ return (tpl_list_node_t *)node->prev;
+
+ return NULL;
+}
+
+static TPL_INLINE tpl_list_node_t *
+tpl_list_node_next(tpl_list_node_t *node)
+{
+ if (node->next != &node->list->tail)
+ return node->next;
+
+ return NULL;
+}
+
+static TPL_INLINE void *
+tpl_list_get_front(const tpl_list_t *list)
+{
+ if (tpl_list_is_empty(list))
+ return NULL;
+
+ return list->head.next->data;
+}
+
+static TPL_INLINE void *
+tpl_list_get_back(const tpl_list_t *list)
+{
+ if (tpl_list_is_empty(list))
+ return NULL;
+
+ return list->tail.prev->data;
+}
+
+static TPL_INLINE void
+tpl_list_remove(tpl_list_node_t *node, tpl_free_func_t func)
+{
+ node->prev->next = node->next;
+ node->next->prev = node->prev;
+
+ if (func)
+ func(node->data);
+
+ node->list->count--;
+ free(node);
+}
+
+static TPL_INLINE void
+tpl_list_insert(tpl_list_node_t *pos, void *data)
+{
+ tpl_list_node_t *node = (tpl_list_node_t *)malloc(sizeof(tpl_list_node_t));
+ TPL_ASSERT(node != NULL);
+
+ node->data = data;
+ node->list = pos->list;
+
+ pos->next->prev = node;
+ node->next = pos->next;
+
+ pos->next = node;
+ node->prev = pos;
+
+ pos->list->count++;
+}
+
+static TPL_INLINE void
+tpl_list_remove_data(tpl_list_t *list, void *data, int occurance, tpl_free_func_t func)
+{
+ tpl_list_node_t *node;
+
+ if (occurance == TPL_FIRST)
+ {
+ node = list->head.next;
+
+ while (node != &list->tail)
+ {
+ tpl_list_node_t *curr = node;
+ node = node->next;
+
+ if (curr->data == data)
+ {
+ if (func)
+ func(data);
+
+ tpl_list_remove(curr, func);
+ return;
+ }
+ }
+ }
+ else if (occurance == TPL_LAST)
+ {
+ node = list->tail.prev;
+
+ while (node != &list->head)
+ {
+ tpl_list_node_t *curr = node;
+ node = node->prev;
+
+ if (curr->data == data)
+ {
+ if (func)
+ func(data);
+
+ tpl_list_remove(curr, func);
+ return;
+ }
+ }
+ }
+ else if (occurance == TPL_ALL)
+ {
+ node = list->head.next;
+
+ while (node != &list->tail)
+ {
+ tpl_list_node_t *curr = node;
+ node = node->next;
+
+ if (curr->data == data)
+ {
+ if (func)
+ func(data);
+
+ tpl_list_remove(curr, func);
+ }
+ }
+ }
+}
+
+static TPL_INLINE void
+tpl_list_push_front(tpl_list_t *list, void *data)
+{
+ tpl_list_insert(&list->head, data);
+}
+
+static TPL_INLINE void
+tpl_list_push_back(tpl_list_t *list, void *data)
+{
+ tpl_list_insert(list->tail.prev, data);
+}
+
+static TPL_INLINE void *
+tpl_list_pop_front(tpl_list_t *list, tpl_free_func_t func)
+{
+ void *data;
+
+ if (tpl_list_is_empty(list))
+ return NULL;
+
+ data = list->head.next->data;
+ tpl_list_remove(list->head.next, func);
+
+ return data;
+}
+
+static TPL_INLINE void *
+tpl_list_pop_back(tpl_list_t *list, tpl_free_func_t func)
+{
+ void *data;
+
+ if (tpl_list_is_empty(list))
+ return NULL;
+
+ data = list->tail.prev->data;
+ tpl_list_remove(list->tail.prev, func);
+
+ return data;
+}
+
+/* Region functions. */
+void tpl_region_init(tpl_region_t *region);
+void tpl_region_fini(tpl_region_t *region);
+tpl_region_t * tpl_region_alloc();
+void tpl_region_free(tpl_region_t **region);
+tpl_bool_t tpl_region_is_empty(const tpl_region_t *region);
+void tpl_region_copy(tpl_region_t *dst, const tpl_region_t *src);
+void tpl_region_set_rects(tpl_region_t *region, int num_rects, const int *rects);
+
+#endif /* TPL_UTILS_H */
--- /dev/null
+#define inline __inline__
+
+#include <EGL/egl.h>
+
+#include <wayland-drm.h>
+
+#include "wayland-egl-priv.h"
+
+#include <wayland-client.h>
+#include <wayland-drm-client-protocol.h>
+
+#include <drm.h>
+#include <tbm_bufmgr.h>
+#include <gbm.h>
+#include "gbm_tbm.h"
+
+#undef inline
+
+#include "tpl_internal.h"
+
+
+#include <string.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+/* In wayland, application and compositor create its own drawing buffers. Recommend size is more than 2. */
+#define TPL_BUFFER_ALLOC_SIZE_APP 3
+#define TPL_BUFFER_ALLOC_SIZE_COMPOSITOR 4
+
+#define TPL_BUFFER_ALLOC_PITCH_ALIGNMENT 64
+#define ALIGN_TO_64BYTE(byte) (((byte) + TPL_BUFFER_ALLOC_PITCH_ALIGNMENT - 1) & ~(TPL_BUFFER_ALLOC_PITCH_ALIGNMENT - 1))
+
+typedef struct _tpl_wayland_display tpl_wayland_display_t;
+typedef struct _tpl_wayland_surface tpl_wayland_surface_t;
+typedef struct _tpl_wayland_buffer tpl_wayland_buffer_t;
+
+struct _tpl_wayland_display
+{
+ struct wl_drm *wl_drm;
+ tbm_bufmgr bufmgr;
+
+ union
+ {
+ struct
+ {
+ tpl_bool_t authenticated;
+ struct wl_event_queue *wl_queue;
+ struct wl_registry *wl_registry;
+ } app;
+ struct
+ {
+ tpl_list_t cached_buffers;
+ } comp;
+ } proc;
+};
+
+struct _tpl_wayland_surface
+{
+ tpl_list_t able_rendering_queue;
+ tpl_buffer_t *current_rendering_buffer;
+ tpl_list_t done_rendering_queue;
+};
+
+struct _tpl_wayland_buffer
+{
+ tpl_display_t *display;
+ tbm_bo bo;
+ int reused;
+
+ union
+ {
+ struct
+ {
+ struct wl_resource *wl_resource;
+ tpl_bool_t resized;
+ } app;
+ struct
+ {
+ struct gbm_bo *gbm_bo;
+ tpl_bool_t posted;
+ } comp;
+ } proc;
+};
+
+static const struct wl_registry_listener registry_listener;
+static const struct wl_callback_listener sync_listener;
+static const struct wl_callback_listener frame_listener;
+static const struct wl_buffer_listener buffer_release_listener;
+static struct wayland_drm_callbacks wl_drm_server_listener;
+
+static struct gbm_bo *__cb_server_gbm_surface_lock_front_buffer(struct gbm_surface *gbm_surf);
+static void __cb_server_gbm_surface_release_buffer(struct gbm_surface *gbm_surf, struct gbm_bo *gbm_bo);
+static int __cb_server_gbm_surface_has_free_buffers(struct gbm_surface *gbm_surf);
+
+#define TPL_BUFFER_CACHE_MAX_ENTRIES 40
+static TPL_INLINE void
+__tpl_wayland_surface_buffer_cache_add(tpl_list_t *buffer_cache, tpl_buffer_t *buffer)
+{
+ tpl_buffer_t *evict = NULL;
+
+ if (tpl_list_get_count(buffer_cache) >= TPL_BUFFER_CACHE_MAX_ENTRIES)
+ {
+ evict = tpl_list_pop_front(buffer_cache, NULL);
+ tpl_object_unreference(&evict->base);
+ }
+
+ tpl_object_reference(&buffer->base);
+ tpl_list_push_back(buffer_cache, (void *)buffer);
+
+ TPL_LOG(3, "buf:%10p buf->base:%10p evict:%10p", buffer, buffer->base, evict);
+}
+
+static TPL_INLINE void
+__tpl_wayland_surface_buffer_cache_remove(tpl_list_t *buffer_cache, unsigned int name)
+{
+ tpl_list_node_t *node = tpl_list_get_front_node(buffer_cache);
+
+ while (node)
+ {
+ tpl_buffer_t *buffer = (tpl_buffer_t *)tpl_list_node_get_data(node);
+
+ if (buffer->key == name)
+ {
+ tpl_object_unreference(&buffer->base);
+ tpl_list_remove(node, NULL);
+ TPL_LOG(3, "name:%d buf:%10p buf->base:%10p", name, buffer, buffer->base);
+ return;
+ }
+
+ node = tpl_list_node_next(node);
+ }
+
+ TPL_LOG(3, "Buffer named %d not found in cache", name);
+}
+
+static TPL_INLINE tpl_buffer_t *
+__tpl_wayland_surface_buffer_cache_find(tpl_list_t *buffer_cache, unsigned int name)
+{
+ tpl_list_node_t *node = tpl_list_get_front_node(buffer_cache);
+
+ while (node)
+ {
+ tpl_buffer_t *buffer = (tpl_buffer_t *)tpl_list_node_get_data(node);
+
+ if (buffer->key == name)
+ {
+ TPL_LOG(3, "name:%d buf:%10p buf->base:%10p", name, buffer, buffer->base);
+ return buffer;
+ }
+
+ node = tpl_list_node_next(node);
+ }
+
+ TPL_LOG(3, "Buffer named %d not found in cache", name);
+
+ return NULL;
+}
+
+
+static TPL_INLINE tpl_bool_t
+__tpl_wayland_display_is_wl_display(tpl_handle_t native_dpy)
+{
+ /* MAGIC CHECK: A native display handle is a wl_display if the de-referenced first value
+ is a memory address pointing the structure of wl_display_interface. */
+ if (*(void **)native_dpy == &wl_display_interface) return TPL_TRUE;
+ return TPL_FALSE;
+}
+
+static TPL_INLINE tpl_bool_t
+__tpl_wayland_display_is_gbm_device(tpl_handle_t native_dpy)
+{
+ /* MAGIC CHECK: A native display handle is a gbm_device if the de-referenced first value
+ is a memory address pointing gbm_create_surface(). */
+ if (*(void **)native_dpy == gbm_create_device) return TPL_TRUE;
+ return TPL_FALSE;
+}
+
+static int
+__tpl_wayland_display_roundtrip(tpl_display_t *display)
+{
+ struct wl_display *wl_dpy = (struct wl_display *)display->native_handle;
+ tpl_wayland_display_t *wayland_display = (tpl_wayland_display_t *)display->backend.data;
+ struct wl_callback *callback;
+ int done = 0, ret = 0;
+
+ callback = wl_display_sync(wl_dpy);
+ wl_callback_add_listener(callback, &sync_listener, &done);
+ wl_proxy_set_queue((struct wl_proxy *)callback, wayland_display->proc.app.wl_queue);
+ while (ret != -1 && !done)
+ {
+ ret = wl_display_dispatch_queue(wl_dpy, wayland_display->proc.app.wl_queue);
+ }
+ return ret;
+}
+
+static tpl_bool_t
+__tpl_wayland_display_init(tpl_display_t *display)
+{
+ tpl_wayland_display_t *wayland_display = NULL;
+
+ /* Do not allow default display in wayland. */
+ if (display->native_handle == NULL)
+ return TPL_FALSE;
+
+ wayland_display = (tpl_wayland_display_t *)calloc(1, sizeof(tpl_wayland_display_t));
+ if (wayland_display == NULL)
+ return TPL_FALSE;
+
+ display->backend.data = wayland_display;
+
+ display->bufmgr_fd = -1;
+
+ if (__tpl_wayland_display_is_wl_display(display->native_handle))
+ {
+ struct wl_display *wl_dpy = (struct wl_display *)display->native_handle;
+
+ wayland_display->proc.app.wl_queue = wl_display_create_queue(wl_dpy);
+ wayland_display->proc.app.wl_registry = wl_display_get_registry(wl_dpy);
+ wl_proxy_set_queue((struct wl_proxy *)wayland_display->proc.app.wl_registry, wayland_display->proc.app.wl_queue);
+ wl_registry_add_listener(wayland_display->proc.app.wl_registry, ®istry_listener, display);
+
+ /* Initialization roundtrip steps */
+ if (__tpl_wayland_display_roundtrip(display) < 0 || wayland_display->wl_drm == NULL) goto error;
+ if (__tpl_wayland_display_roundtrip(display) < 0 || display->bufmgr_fd == -1) goto error;
+ if (__tpl_wayland_display_roundtrip(display) < 0 || wayland_display->proc.app.authenticated == TPL_FALSE) goto error;
+
+ wayland_display->bufmgr = tbm_bufmgr_init(display->bufmgr_fd);
+ }
+ else if (__tpl_wayland_display_is_gbm_device(display->native_handle))
+ {
+ struct gbm_device *gbm = (struct gbm_device *)display->native_handle;
+ struct gbm_tbm_device *gbm_tbm = (struct gbm_tbm_device *)gbm;
+
+ /* Hook gbm backend callbacks. If the compositor calls gbm APIs to get a buffer,
+ then we return a suitable buffer to the compositor instead of gbm does. */
+ gbm_tbm_device_set_callback_surface_has_free_buffers(gbm_tbm, __cb_server_gbm_surface_has_free_buffers);
+ gbm_tbm_device_set_callback_surface_lock_front_buffer(gbm_tbm, __cb_server_gbm_surface_lock_front_buffer);
+ gbm_tbm_device_set_callback_surface_release_buffer(gbm_tbm, __cb_server_gbm_surface_release_buffer);
+
+ tpl_list_init(&wayland_display->proc.comp.cached_buffers);
+ }
+ else
+ goto error;
+
+ return TPL_TRUE;
+
+error:
+ if (wayland_display != NULL)
+ {
+ free(wayland_display);
+ display->backend.data = NULL;
+ }
+ return TPL_FALSE;
+}
+
+static void
+__tpl_wayland_display_fini(tpl_display_t *display)
+{
+ tpl_wayland_display_t *wayland_display = (tpl_wayland_display_t *)display->backend.data;
+
+ if (wayland_display != NULL)
+ {
+ if (__tpl_wayland_display_is_wl_display(display->native_handle))
+ {
+ tbm_bufmgr_deinit(wayland_display->bufmgr);
+ close(display->bufmgr_fd);
+ }
+ if (__tpl_wayland_display_is_gbm_device(display->native_handle))
+ {
+ struct gbm_device *gbm = (struct gbm_device *)display->native_handle;
+ struct gbm_tbm_device *gbm_tbm = (struct gbm_tbm_device *)gbm;
+
+ gbm_tbm_device_set_callback_surface_has_free_buffers(gbm_tbm, NULL);
+ gbm_tbm_device_set_callback_surface_lock_front_buffer(gbm_tbm, NULL);
+ gbm_tbm_device_set_callback_surface_release_buffer(gbm_tbm, NULL);
+
+ tpl_list_fini(&wayland_display->proc.comp.cached_buffers, (tpl_free_func_t) tpl_object_unreference);
+ }
+
+ free(wayland_display);
+ }
+ display->backend.data = NULL;
+}
+
+static tpl_bool_t
+__tpl_wayland_display_query_config(tpl_display_t *display, tpl_surface_type_t surface_type,
+ int red_size, int green_size, int blue_size, int alpha_size,
+ int color_depth, int *native_visual_id, tpl_bool_t *is_slow)
+{
+ if (surface_type == TPL_SURFACE_TYPE_WINDOW &&
+ red_size == 8 &&
+ green_size == 8 &&
+ blue_size == 8 &&
+ (color_depth == 32 || color_depth == 24))
+ {
+ if (alpha_size == 8)
+ {
+ if (__tpl_wayland_display_is_wl_display(display->native_handle))
+ {
+ if (native_visual_id != NULL) *native_visual_id = WL_DRM_FORMAT_ARGB8888;
+ }
+ else if (__tpl_wayland_display_is_gbm_device(display->native_handle) &&
+ gbm_device_is_format_supported((struct gbm_device *)display->native_handle,
+ GBM_FORMAT_ARGB8888,
+ GBM_BO_USE_RENDERING) == 1)
+ {
+ if (native_visual_id != NULL) *native_visual_id = GBM_FORMAT_ARGB8888;
+ }
+ else
+ return TPL_FALSE;
+
+ if (is_slow != NULL) *is_slow = TPL_FALSE;
+ return TPL_TRUE;
+ }
+ if (alpha_size == 0)
+ {
+ if (__tpl_wayland_display_is_wl_display(display->native_handle))
+ {
+ if (native_visual_id != NULL) *native_visual_id = WL_DRM_FORMAT_XRGB8888;
+ }
+ else if (__tpl_wayland_display_is_gbm_device(display->native_handle) &&
+ gbm_device_is_format_supported((struct gbm_device *)display->native_handle,
+ GBM_FORMAT_XRGB8888,
+ GBM_BO_USE_RENDERING) == 1)
+ {
+ if (native_visual_id != NULL) *native_visual_id = GBM_FORMAT_XRGB8888;
+ }
+ else
+ return TPL_FALSE;
+
+ if (is_slow != NULL) *is_slow = TPL_FALSE;
+ return TPL_TRUE;
+ }
+ }
+
+ return TPL_FALSE;
+}
+
+static tpl_bool_t
+__tpl_wayland_display_filter_config(tpl_display_t *display,
+ int *visual_id, int alpha_size)
+{
+ TPL_IGNORE(display);
+
+ if (*visual_id == GBM_FORMAT_ARGB8888 && alpha_size == 0)
+ {
+ *visual_id = GBM_FORMAT_XRGB8888;
+ return TPL_TRUE;
+ }
+
+ return TPL_FALSE;
+}
+
+static tpl_bool_t
+__tpl_wayland_display_get_window_info(tpl_display_t *display, tpl_handle_t window,
+ int *width, int *height, tpl_format_t *format)
+{
+ if (__tpl_wayland_display_is_wl_display(display->native_handle))
+ {
+ struct wl_egl_window *wl_egl_window = (struct wl_egl_window *)window;
+
+ if (format != NULL)
+ {
+ /* Wayland-egl window doesn't have native format information.
+ It is fixed from 'EGLconfig' when called eglCreateWindowSurface().
+ So we use the tpl_surface format instead. */
+ tpl_surface_t *surface = wl_egl_window->private;
+ if (surface != NULL)
+ *format = surface->format;
+ else
+ *format = TPL_FORMAT_ARGB8888;
+ }
+ if (width != NULL) *width = wl_egl_window->width;
+ if (height != NULL) *height = wl_egl_window->height;
+
+ return TPL_TRUE;
+ }
+ else if (__tpl_wayland_display_is_gbm_device(display->native_handle) == TPL_TRUE)
+ {
+ struct gbm_surface *gbm_surface = (struct gbm_surface *)window;
+ struct gbm_tbm_surface *gbm_tbm_surface = (struct gbm_tbm_surface *)gbm_surface;
+
+ if (format != NULL)
+ {
+ switch (gbm_tbm_surface_get_format(gbm_tbm_surface))
+ {
+ case GBM_FORMAT_ARGB8888: *format = TPL_FORMAT_ARGB8888; break;
+ case GBM_FORMAT_XRGB8888: *format = TPL_FORMAT_XRGB8888; break;
+ case GBM_FORMAT_RGB565: *format = TPL_FORMAT_RGB565; break;
+ default: *format = TPL_FORMAT_INVALID; break;
+ }
+ }
+ if (width != NULL) *width = gbm_tbm_surface_get_width(gbm_tbm_surface);
+ if (height != NULL) *height = gbm_tbm_surface_get_height(gbm_tbm_surface);
+
+ return TPL_TRUE;
+ }
+
+ return TPL_FALSE;
+}
+
+static tpl_bool_t
+__tpl_wayland_display_get_pixmap_info(tpl_display_t *display, tpl_handle_t pixmap,
+ int *width, int *height, tpl_format_t *format)
+{
+ tpl_wayland_display_t *wayland_display = (tpl_wayland_display_t *)display->backend.data;
+ struct wl_drm_buffer *drm_buffer = NULL;
+
+ if (wayland_display->wl_drm == NULL)
+ return TPL_FALSE;
+
+ drm_buffer = wayland_drm_buffer_get(wayland_display->wl_drm, (struct wl_resource *)pixmap);
+
+ if (drm_buffer != NULL)
+ {
+ if (format != NULL)
+ {
+ switch (drm_buffer->format)
+ {
+ case WL_DRM_FORMAT_ARGB8888: *format = TPL_FORMAT_ARGB8888; break;
+ case WL_DRM_FORMAT_XRGB8888: *format = TPL_FORMAT_XRGB8888; break;
+ case WL_DRM_FORMAT_RGB565: *format = TPL_FORMAT_RGB565; break;
+ default: *format = TPL_FORMAT_INVALID; break;
+ }
+ }
+ if (width != NULL) *width = drm_buffer->width;
+ if (height != NULL) *height = drm_buffer->height;
+
+ return TPL_TRUE;
+ }
+
+ return TPL_FALSE;
+}
+
+static void
+__tpl_wayland_display_flush(tpl_display_t *display)
+{
+ TPL_IGNORE(display);
+
+ /* Do nothing. */
+}
+
+static tpl_bool_t
+__tpl_wayland_surface_init(tpl_surface_t *surface)
+{
+ tpl_wayland_surface_t *wayland_surface = NULL;
+ int i;
+
+ wayland_surface = (tpl_wayland_surface_t *)calloc(1, sizeof(tpl_wayland_surface_t));
+ TPL_CHECK_ON_NULL_RETURN_VAL(wayland_surface, TPL_FALSE);
+
+ surface->backend.data = (void *)wayland_surface;
+
+ tpl_list_init(&wayland_surface->able_rendering_queue);
+ tpl_list_init(&wayland_surface->done_rendering_queue);
+
+ if (surface->type == TPL_SURFACE_TYPE_WINDOW)
+ {
+ if (__tpl_wayland_display_is_wl_display(surface->display->native_handle))
+ {
+ struct wl_egl_window *wl_egl_window = (struct wl_egl_window *)surface->native_handle;
+ wl_egl_window->private = surface;
+
+ /* Create renderable buffer queue. Fill with empty(=NULL) buffers. */
+ for (i = 0; i < TPL_BUFFER_ALLOC_SIZE_APP; i++)
+ {
+ tpl_list_push_back(&wayland_surface->able_rendering_queue, NULL);
+ }
+ }
+ if (__tpl_wayland_display_is_gbm_device(surface->display->native_handle))
+ {
+ struct gbm_surface *gbm_surface = surface->native_handle;
+ struct gbm_tbm_surface *gbm_tbm_surface = (struct gbm_tbm_surface *)gbm_surface;
+ gbm_tbm_surface_set_user_data(gbm_tbm_surface, surface);
+
+ /* Create renderable buffer queue. Fill with empty(=NULL) buffers. */
+ for (i = 0; i < TPL_BUFFER_ALLOC_SIZE_COMPOSITOR; i++)
+ {
+ tpl_list_push_back(&wayland_surface->able_rendering_queue, NULL);
+ }
+ }
+
+ __tpl_wayland_display_get_window_info(surface->display, surface->native_handle,
+ &surface->width, &surface->height, NULL);
+
+ TPL_LOG(3, "window(%p, %p) %dx%d", surface, surface->native_handle, surface->width, surface->height);
+ return TPL_TRUE;
+ }
+
+ if (surface->type == TPL_SURFACE_TYPE_PIXMAP)
+ {
+ __tpl_wayland_display_get_pixmap_info(surface->display, surface->native_handle,
+ &surface->width, &surface->height, NULL);
+ return TPL_TRUE;
+ }
+
+ return TPL_FALSE;
+}
+
+static void
+__tpl_wayland_surface_buffer_free(tpl_buffer_t *buffer)
+{
+ TPL_LOG(3, "buffer(%p) key:%d", buffer, buffer?buffer->key:-1);
+ if (buffer != NULL)
+ {
+ __tpl_buffer_set_surface(buffer, NULL);
+ tpl_object_unreference((tpl_object_t *)buffer);
+ }
+}
+
+static void
+__tpl_wayland_surface_fini(tpl_surface_t *surface)
+{
+ tpl_wayland_surface_t *wayland_surface = (tpl_wayland_surface_t *)surface->backend.data;
+
+ TPL_CHECK_ON_NULL_RETURN(wayland_surface);
+ TPL_LOG(3, "window(%p, %p)", surface, surface->native_handle);
+
+ while (!tpl_list_is_empty(&surface->frame_queue))
+ {
+ tpl_util_sys_yield();
+ }
+
+ if (wayland_surface != NULL)
+ {
+ TPL_LOG(3, "free buffers able(%d), current:%d, done:%d",
+ tpl_list_get_count(&wayland_surface->able_rendering_queue),
+ wayland_surface->current_rendering_buffer?1:0,
+ tpl_list_get_count(&wayland_surface->done_rendering_queue));
+
+ tpl_list_fini(&wayland_surface->able_rendering_queue, (tpl_free_func_t)__tpl_wayland_surface_buffer_free);
+ __tpl_wayland_surface_buffer_free(wayland_surface->current_rendering_buffer);
+ tpl_list_fini(&wayland_surface->done_rendering_queue, (tpl_free_func_t)__tpl_wayland_surface_buffer_free);
+
+ if (surface->type == TPL_SURFACE_TYPE_WINDOW)
+ {
+ if (__tpl_wayland_display_is_wl_display(surface->display->native_handle))
+ {
+ tpl_wayland_display_t *wayland_display = (tpl_wayland_display_t *)surface->display->backend.data;
+ struct wl_egl_window *wl_egl_window = (struct wl_egl_window *)surface->native_handle;
+ wl_egl_window->private = NULL;
+
+ /* Detach all pending buffers */
+ if (wl_egl_window->surface && wl_egl_window->width == wl_egl_window->attached_width &&
+ wl_egl_window->height == wl_egl_window->attached_height)
+ {
+ wl_surface_attach(wl_egl_window->surface, NULL, 0, 0);
+ wl_surface_commit(wl_egl_window->surface);
+ }
+
+ wl_display_flush(surface->display->native_handle);
+ __tpl_wayland_display_roundtrip(surface->display);
+ }
+
+ if (__tpl_wayland_display_is_gbm_device(surface->display->native_handle))
+ {
+ struct gbm_surface *gbm_surface = surface->native_handle;
+ struct gbm_tbm_surface *gbm_tbm_surface = (struct gbm_tbm_surface *)gbm_surface;
+
+ gbm_tbm_surface_set_user_data(gbm_tbm_surface, NULL);
+ }
+ }
+
+ free(wayland_surface);
+ }
+ surface->backend.data = NULL;
+}
+
+static void
+__tpl_wayland_surface_post(tpl_surface_t *surface, tpl_frame_t *frame)
+{
+ TPL_ASSERT(frame->buffer != NULL);
+ TPL_LOG(3, "window(%p, %p)", surface, surface->native_handle);
+
+ if (__tpl_wayland_display_is_wl_display(surface->display->native_handle))
+ {
+ tpl_wayland_display_t *wayland_display = (tpl_wayland_display_t *)surface->display->backend.data;
+ tpl_wayland_buffer_t *wayland_buffer = (tpl_wayland_buffer_t *)frame->buffer->backend.data;
+ struct wl_egl_window *wl_egl_window = NULL;
+ int i;
+
+ TPL_LOG(3, "\t buffer(%p, %p) key:%d", frame->buffer, wayland_buffer->proc.app.wl_resource, frame->buffer->key);
+ wl_egl_window = (struct wl_egl_window *)surface->native_handle;
+ tpl_object_reference((tpl_object_t *)frame->buffer);
+ wl_surface_attach(wl_egl_window->surface,
+ (void *)wayland_buffer->proc.app.wl_resource,
+ wl_egl_window->dx,
+ wl_egl_window->dy);
+
+ wl_egl_window->attached_width = wl_egl_window->width;
+ wl_egl_window->attached_height = wl_egl_window->height;
+
+ for (i = 0; i < frame->damage.num_rects; i++)
+ {
+ wl_surface_damage(wl_egl_window->surface,
+ frame->damage.rects[i * 4 + 0],
+ frame->damage.rects[i * 4 + 1],
+ frame->damage.rects[i * 4 + 2],
+ frame->damage.rects[i * 4 + 3]);
+ }
+ if (frame->damage.num_rects == 0) {
+ wl_surface_damage(wl_egl_window->surface,
+ wl_egl_window->dx, wl_egl_window->dy,
+ wl_egl_window->width, wl_egl_window->height);
+ }
+
+
+ {
+ /* Register a meaningless surface frame callback.
+ Because the buffer_release callback only be triggered if this callback is registered. */
+ struct wl_callback *frame_callback = NULL;
+ frame_callback = wl_surface_frame(wl_egl_window->surface);
+ wl_callback_add_listener(frame_callback, &frame_listener, frame->buffer);
+ wl_proxy_set_queue((struct wl_proxy *)frame_callback, wayland_display->proc.app.wl_queue);
+ }
+ wl_surface_commit(wl_egl_window->surface);
+
+ wl_display_flush(surface->display->native_handle);
+ }
+ if (__tpl_wayland_display_is_gbm_device(surface->display->native_handle))
+ {
+ tpl_wayland_buffer_t *wayland_buffer = (tpl_wayland_buffer_t *)frame->buffer->backend.data;
+
+ wayland_buffer->proc.comp.posted = TPL_TRUE;
+ }
+}
+
+static void
+__tpl_wayland_surface_begin_frame(tpl_surface_t *surface)
+{
+ tpl_wayland_display_t *wayland_display = (tpl_wayland_display_t *)surface->display->backend.data;
+ tpl_wayland_surface_t *wayland_surface = (tpl_wayland_surface_t *)surface->backend.data;
+
+ TPL_ASSERT(wayland_surface->current_rendering_buffer == NULL);
+ TPL_LOG(3, "window(%p, %p)", surface, surface->native_handle);
+
+ if (__tpl_wayland_display_is_wl_display(surface->display->native_handle))
+ {
+ TPL_OBJECT_UNLOCK(surface);
+
+ __tpl_wayland_display_roundtrip(surface->display);
+
+ while (tpl_list_is_empty(&wayland_surface->able_rendering_queue))
+ {
+ /* Application sent all buffers to the server. Wait for server response. */
+ if (wl_display_dispatch_queue(surface->display->native_handle, wayland_display->proc.app.wl_queue) == -1)
+ {
+ TPL_OBJECT_LOCK(surface);
+ return;
+ }
+ }
+
+ TPL_OBJECT_LOCK(surface);
+ }
+ if (__tpl_wayland_display_is_gbm_device(surface->display->native_handle))
+ {
+ while (1)
+ {
+ if (!tpl_list_is_empty(&wayland_surface->able_rendering_queue))
+ {
+ tpl_buffer_t *buffer = NULL;
+ tpl_wayland_buffer_t *wayland_buffer = NULL;
+
+ buffer = tpl_list_get_front(&wayland_surface->able_rendering_queue);
+ if (buffer == NULL) break;
+
+ wayland_buffer = (tpl_wayland_buffer_t *)buffer->backend.data;
+ if (wayland_buffer->proc.comp.posted) break;
+ }
+ /* Compositor over-drawed all buffers, but no buffer has done yet. Wait for frame post. */
+ TPL_OBJECT_UNLOCK(surface);
+ tpl_util_sys_yield();
+ TPL_OBJECT_LOCK(surface);
+ }
+ }
+ /* MOVE BUFFER : [able queue] --> (current buffer) */
+ wayland_surface->current_rendering_buffer = tpl_list_pop_front(&wayland_surface->able_rendering_queue, NULL);
+ TPL_LOG(3, "set current buffer(%p)", wayland_surface->current_rendering_buffer);
+}
+
+static tpl_bool_t
+__tpl_wayland_surface_validate_frame(tpl_surface_t *surface)
+{
+ TPL_IGNORE(surface);
+
+ return TPL_TRUE;
+}
+
+static void
+__tpl_wayland_surface_end_frame(tpl_surface_t *surface)
+{
+ tpl_wayland_surface_t *wayland_surface = (tpl_wayland_surface_t *)surface->backend.data;
+
+ TPL_LOG(3, "window(%p, %p)", surface, surface->native_handle);
+
+ if (__tpl_wayland_display_is_gbm_device(surface->display->native_handle))
+ {
+ tpl_wayland_buffer_t *wayland_buffer = (tpl_wayland_buffer_t *)wayland_surface->current_rendering_buffer->backend.data;
+
+ /* Current GBM front buffer is moved to back when calling eglSwapBuffers()=end_frame(). */
+ if (!tpl_list_is_empty(&wayland_surface->done_rendering_queue))
+ {
+ /* MOVE BUFFER : [done queue] --> [able queue] */
+ tpl_list_push_back(&wayland_surface->able_rendering_queue, tpl_list_pop_front(&wayland_surface->done_rendering_queue, NULL));
+ }
+
+ /* Prepare to check post for current GBM back buffer. */
+ wayland_buffer->proc.comp.posted = TPL_FALSE;
+ }
+
+ /* MOVE BUFFER : (current buffer) --> [done queue] */
+ tpl_list_push_back(&wayland_surface->done_rendering_queue, wayland_surface->current_rendering_buffer);
+ wayland_surface->current_rendering_buffer = NULL;
+
+}
+
+static tpl_buffer_t *
+__tpl_wayland_surface_create_buffer_from_wl_egl(tpl_surface_t *surface, tpl_bool_t *reset_buffers)
+{
+ tpl_wayland_surface_t *wayland_surface = (tpl_wayland_surface_t *)surface->backend.data;
+ tpl_buffer_t *buffer = NULL;
+ tpl_wayland_buffer_t *wayland_buffer = NULL;
+ tbm_bo bo;
+ tbm_bo_handle bo_handle;
+ int width, height, depth, stride;
+ tpl_format_t format;
+ unsigned int key = 0;
+
+ tpl_wayland_display_t *wayland_display = (tpl_wayland_display_t *)surface->display->backend.data;
+ struct wl_resource *wl_resource = NULL;
+ unsigned int name = -1;
+ uint32_t wl_format = 0;
+
+ __tpl_wayland_display_get_window_info(surface->display, surface->native_handle,
+ &width, &height, &format);
+
+ depth = 32;//TPL_FORMAT_GET_DEPTH(format);
+
+ stride = ALIGN_TO_8BYTE(width * depth / 8);
+
+ /* Allocate a buffer */
+ bo = tbm_bo_alloc(wayland_display->bufmgr, stride * height, TBM_BO_DEFAULT);
+ TPL_CHECK_ON_NULL_RETURN_VAL(bo, NULL);
+
+ /* Create tpl buffer. */
+ bo_handle = tbm_bo_get_handle(bo, TBM_DEVICE_3D);
+
+
+ name = tbm_bo_export(bo);
+ buffer = __tpl_buffer_alloc(surface, (int)name, (int)bo_handle.u32, width, height, depth, stride);
+ TPL_CHECK_ON_NULL_RETURN_VAL(buffer, NULL);
+
+ wayland_buffer = (tpl_wayland_buffer_t *)calloc(1, sizeof(tpl_wayland_buffer_t));
+ if (wayland_buffer == NULL)
+ {
+ TPL_ERR("wayland_buffer==NULL");
+
+ tbm_bo_unref(bo);
+ tpl_object_unreference((tpl_object_t *)buffer);
+ return NULL;
+ }
+ buffer->backend.data = (void *)wayland_buffer;
+ surface->format = TPL_FORMAT_ARGB8888;
+ /* Post process : Create a wl_drm_buffer and notify the buffer to the server. */
+ switch (surface->format)
+ {
+ case TPL_FORMAT_ARGB8888: wl_format = WL_DRM_FORMAT_ARGB8888; break;
+ case TPL_FORMAT_XRGB8888: wl_format = WL_DRM_FORMAT_XRGB8888; break;
+ case TPL_FORMAT_RGB565: wl_format = WL_DRM_FORMAT_RGB565; break;
+ default:
+ TPL_ERR("surface->format==Unknown");
+
+ tbm_bo_unref(bo);
+ tpl_object_unreference((tpl_object_t *)buffer);
+ return NULL;
+ }
+
+ wl_resource = (struct wl_resource *)wl_drm_create_buffer(wayland_display->wl_drm, (uint32_t)name, width, height,
+ stride, wl_format);
+
+ wl_proxy_set_queue((struct wl_proxy *)wl_resource, wayland_display->proc.app.wl_queue);
+ wl_buffer_add_listener((void *)wl_resource, &buffer_release_listener, buffer);
+
+ wl_display_flush((struct wl_display *)surface->display->native_handle);
+
+ wayland_buffer->display = surface->display;
+ wayland_buffer->bo = bo;
+ wayland_buffer->proc.app.wl_resource = wl_resource;
+ wayland_buffer->proc.app.resized = TPL_FALSE;
+
+ if (reset_buffers != NULL)
+ *reset_buffers = TPL_FALSE;
+
+ TPL_LOG(3, "buffer(%p,%p) name:%d, %dx%d", buffer, wl_resource, name, width, height);
+ return buffer;
+}
+
+static tpl_buffer_t *
+__tpl_wayland_surface_create_buffer_from_gbm_surface(tpl_surface_t *surface, tpl_bool_t *reset_buffers)
+{
+ tpl_wayland_surface_t *wayland_surface = (tpl_wayland_surface_t *)surface->backend.data;
+ tpl_buffer_t *buffer = NULL;
+ tpl_wayland_buffer_t *wayland_buffer = NULL;
+ tbm_bo bo;
+ tbm_bo_handle bo_handle;
+ int width, height, depth, stride;
+ tpl_format_t format;
+ unsigned int key = 0;
+
+ struct gbm_device *gbm = (struct gbm_device *)surface->display->native_handle;
+ struct gbm_surface *gbm_surface = surface->native_handle;
+ struct gbm_tbm_surface *gbm_tbm_surface = (struct gbm_tbm_surface *)gbm_surface;
+ struct gbm_bo *gbm_bo = NULL;
+ struct gbm_tbm_bo *gbm_tbm_bo = NULL;
+
+ __tpl_wayland_display_get_window_info(surface->display, surface->native_handle,
+ &width, &height, &format);
+
+ depth = 32;//TPL_FORMAT_GET_DEPTH(format);
+
+ stride = ALIGN_TO_8BYTE(width * depth / 8);
+
+ /* gbm does not support stride so we must ensure the width is same as stride. */
+ if (width > 1 && (width * depth / 8 != stride) )
+ {
+ TPL_WARN("Unsupported stride %d", stride);
+ return NULL;
+ }
+
+ /* Allocate a buffer */
+ gbm_bo = gbm_bo_create(gbm, width, height,
+ gbm_tbm_surface_get_format(gbm_tbm_surface),
+ gbm_tbm_surface_get_flags(gbm_tbm_surface));
+
+ if (gbm_bo == NULL)
+ {
+ TPL_WARN("Failed to allocate gbm_bo | gbm:%p %dx%d", gbm, width, height);
+ return NULL;
+ }
+
+ gbm_tbm_bo = (struct gbm_tbm_bo *)(gbm_bo);
+ bo = tbm_bo_ref(gbm_tbm_bo_get_tbm_bo(gbm_tbm_bo));
+
+ /* Create tpl buffer. */
+ bo_handle = tbm_bo_get_handle(bo, TBM_DEVICE_3D);
+
+ buffer = __tpl_buffer_alloc(surface, (int)bo_handle.u32,
+ (int)bo_handle.u32, width, height, depth, stride,
+ tbm_bo_export(bo));
+ if (buffer == NULL)
+ {
+ tbm_bo_unref(bo);
+ TPL_WARN("Failed to allocate tpl buffer | surf:%p bo_hnd:%d WxHxD:%dx%dx%d",
+ surface, (int) bo_handle.u32, width, height, depth);
+ return NULL;
+ }
+
+ wayland_buffer = (tpl_wayland_buffer_t *)calloc(1, sizeof(tpl_wayland_buffer_t));
+ if (wayland_buffer == NULL)
+ {
+ tbm_bo_unref(bo);
+ tpl_object_unreference((tpl_object_t *)buffer);
+ TPL_WARN("Failed to allocate wayland buffer (calloc)");
+ return NULL;
+ }
+ buffer->backend.data = (void *)wayland_buffer;
+
+ /* Post process */
+ wayland_buffer->display = surface->display;
+ wayland_buffer->bo = bo;
+ wayland_buffer->proc.comp.gbm_bo = gbm_bo;
+
+ if (reset_buffers != NULL)
+ *reset_buffers = TPL_FALSE;
+
+ TPL_LOG(3, "buffer:%p gbm_bo:%p bo_hnd:%d, %dx%d", buffer, gbm_bo, (int) bo_handle.u32, width, height);
+
+ return buffer;
+}
+
+static tpl_buffer_t *
+__tpl_wayland_surface_create_buffer_from_wl_drm(tpl_surface_t *surface, tpl_bool_t *reset_buffers)
+{
+ tpl_wayland_surface_t *wayland_surface = (tpl_wayland_surface_t *)surface->backend.data;
+ tpl_buffer_t *buffer = NULL;
+ tpl_wayland_buffer_t *wayland_buffer = NULL;
+ tbm_bo bo;
+ tbm_bo_handle bo_handle;
+ int width = 0, height = 0, depth, stride;
+ tpl_format_t format = TPL_FORMAT_INVALID;
+ unsigned int key = 0;
+
+ tpl_wayland_display_t *wayland_display = (tpl_wayland_display_t *)surface->display->backend.data;
+ struct wl_drm_buffer *drm_buffer = NULL;
+
+ TPL_ASSERT(wayland_display->wl_drm != NULL);
+
+ /* Get the allocated buffer */
+ drm_buffer = wayland_drm_buffer_get(wayland_display->wl_drm, (struct wl_resource *)surface->native_handle);
+
+ buffer = __tpl_wayland_surface_buffer_cache_find(&wayland_display->proc.comp.cached_buffers, drm_buffer);
+ if (buffer != NULL)
+ {
+ __tpl_buffer_set_surface(buffer, surface);
+ tpl_object_reference((tpl_object_t *)buffer);
+ }
+ else
+ {
+ __tpl_wayland_display_get_pixmap_info(surface->display, surface->native_handle,
+ &width, &height, &format);
+
+ depth = 32;//TPL_FORMAT_GET_DEPTH(format);
+
+ stride = drm_buffer->stride[0];
+
+ bo = tbm_bo_ref((tbm_bo)wayland_drm_buffer_get_buffer(drm_buffer));
+
+ /* Create tpl buffer. */
+ bo_handle = tbm_bo_get_handle(bo, TBM_DEVICE_3D);
+ key = (unsigned int)drm_buffer;
+ buffer = __tpl_buffer_alloc(surface, key,
+ (int)bo_handle.u32, width, height, depth, stride,
+ tbm_bo_export(bo));
+ if (buffer == NULL)
+ {
+ tbm_bo_unref(bo);
+ return NULL;
+ }
+
+ wayland_buffer = (tpl_wayland_buffer_t *)calloc(1, sizeof(tpl_wayland_buffer_t));
+ if (wayland_buffer == NULL)
+ {
+ tbm_bo_unref(bo);
+ tpl_object_unreference((tpl_object_t *)buffer);
+ return NULL;
+ }
+ buffer->backend.data = (void *)wayland_buffer;
+
+ /* Post process */
+ wayland_buffer->display = surface->display;
+ wayland_buffer->bo = bo;
+ buffer->key = key;
+ __tpl_wayland_surface_buffer_cache_add(&wayland_display->proc.comp.cached_buffers, buffer); /* TODO: do we need error handle? */
+ }
+
+ if (reset_buffers != NULL)
+ *reset_buffers = TPL_FALSE;
+
+ return buffer;
+}
+
+static tpl_buffer_t *
+__tpl_wayland_surface_get_buffer(tpl_surface_t *surface, tpl_bool_t *reset_buffers)
+{
+ tpl_wayland_surface_t *wayland_surface = (tpl_wayland_surface_t *)surface->backend.data;
+ int i;
+
+ if (reset_buffers != NULL)
+ *reset_buffers = TPL_FALSE;
+
+ TPL_LOG(3, "window(%p, %p), current(%p)", surface, surface->native_handle,
+ wayland_surface->current_rendering_buffer);
+
+ if (wayland_surface->current_rendering_buffer == NULL)
+ {
+ if (surface->type == TPL_SURFACE_TYPE_WINDOW)
+ {
+ if (__tpl_wayland_display_is_wl_display(surface->display->native_handle))
+ {
+ wayland_surface->current_rendering_buffer =
+ __tpl_wayland_surface_create_buffer_from_wl_egl(surface, reset_buffers);
+ }
+ if (__tpl_wayland_display_is_gbm_device(surface->display->native_handle))
+ {
+ wayland_surface->current_rendering_buffer =
+ __tpl_wayland_surface_create_buffer_from_gbm_surface(surface, reset_buffers);
+ }
+ }
+ if (surface->type == TPL_SURFACE_TYPE_PIXMAP)
+ {
+ wayland_surface->current_rendering_buffer =
+ __tpl_wayland_surface_create_buffer_from_wl_drm(surface, reset_buffers);
+ }
+ }
+ else
+ {
+ int reused = 1;
+
+ if (surface->type == TPL_SURFACE_TYPE_WINDOW &&
+ __tpl_wayland_display_is_wl_display(surface->display->native_handle))
+ {
+ int width, height;
+
+ __tpl_wayland_display_get_window_info(surface->display, surface->native_handle,
+ &width, &height, NULL);
+
+ /* Check whether the surface was resized by wayland_egl */
+ if (width != wayland_surface->current_rendering_buffer->width ||
+ height != wayland_surface->current_rendering_buffer->height)
+ {
+ int count;
+ TPL_LOG(3, "window(%p, %p) size changed: %dx%d -> %dx%d", surface, surface->native_handle,
+ wayland_surface->current_rendering_buffer->width, wayland_surface->current_rendering_buffer->height,
+ width, height);
+
+ /* Marks 'resized' on done queue. Rendered buffers can be used by server(other process).
+ So these buffers will be destroyed when reuse. */
+ tpl_list_node_t *node = tpl_list_get_front_node(&wayland_surface->done_rendering_queue);
+ while (node != NULL)
+ {
+ tpl_buffer_t *node_buffer = (tpl_buffer_t *)tpl_list_node_get_data(node);
+ tpl_wayland_buffer_t *node_wayland_buffer = (tpl_wayland_buffer_t *)node_buffer->backend.data;
+
+ node_wayland_buffer->proc.app.resized = TPL_TRUE;
+ node = tpl_list_node_next(node);
+ }
+
+ /* Throw away all able queue. (these are completely free buffers.)
+ And reconstruct renderable buffer queue */
+ count = tpl_list_get_count(&wayland_surface->able_rendering_queue);
+ tpl_list_fini(&wayland_surface->able_rendering_queue, (tpl_free_func_t)__tpl_wayland_surface_buffer_free);
+ for (i = 0; i < count; i++)
+ {
+ tpl_list_push_back(&wayland_surface->able_rendering_queue, NULL);
+ }
+
+ /* Replace current current buffer */
+ TPL_LOG(3, "free current buffer(%p)", wayland_surface->current_rendering_buffer);
+ __tpl_wayland_surface_buffer_free(wayland_surface->current_rendering_buffer);
+ wayland_surface->current_rendering_buffer =
+ __tpl_wayland_surface_create_buffer_from_wl_egl(surface, reset_buffers);
+
+ reused = 0;
+ if (reset_buffers != NULL)
+ *reset_buffers = TPL_TRUE;
+
+ }
+ }
+
+ if (surface->type == TPL_SURFACE_TYPE_WINDOW)
+ {
+ tpl_wayland_buffer_t *tpl_wayland_buffer;
+
+ tpl_wayland_buffer = (tpl_wayland_buffer_t *)wayland_surface->current_rendering_buffer->backend.data;
+ tpl_wayland_buffer->reused = reused;
+
+ }
+ }
+
+ TPL_LOG(3, "\t buffer(%p) key:%d", wayland_surface->current_rendering_buffer, wayland_surface->current_rendering_buffer->key);
+ return wayland_surface->current_rendering_buffer;
+}
+
+static tpl_bool_t
+__tpl_wayland_buffer_init(tpl_buffer_t *buffer)
+{
+ TPL_IGNORE(buffer);
+
+ return TPL_TRUE;
+}
+
+static void
+__tpl_wayland_buffer_fini(tpl_buffer_t *buffer)
+{
+ TPL_LOG(3, "tpl_buffer(%p) key:%d fd:%d %dx%d", buffer, buffer->key, buffer->fd, buffer->width, buffer->height);
+
+ if (buffer->backend.data)
+ {
+ tpl_wayland_buffer_t *wayland_buffer = (tpl_wayland_buffer_t *)buffer->backend.data;
+
+ if (wayland_buffer->bo != NULL)
+ {
+ tbm_bo_unref(wayland_buffer->bo);
+ wayland_buffer->bo = NULL;
+ }
+
+ if (__tpl_wayland_display_is_wl_display(wayland_buffer->display->native_handle))
+ {
+ wl_display_flush((struct wl_display *)wayland_buffer->display->native_handle);
+
+ if (wayland_buffer->proc.app.wl_resource != NULL)
+ wl_buffer_destroy((void *)wayland_buffer->proc.app.wl_resource);
+ }
+ if (__tpl_wayland_display_is_gbm_device(wayland_buffer->display->native_handle))
+ {
+ if (wayland_buffer->proc.comp.gbm_bo != NULL)
+ {
+ gbm_bo_destroy(wayland_buffer->proc.comp.gbm_bo);
+ }
+ }
+
+ buffer->backend.data = NULL;
+ free(wayland_buffer);
+ }
+}
+
+static void *
+__tpl_wayland_buffer_map(tpl_buffer_t *buffer, int size)
+{
+ tpl_wayland_buffer_t *wayland_buffer = (tpl_wayland_buffer_t *)buffer->backend.data;
+ tbm_bo_handle handle;
+
+ TPL_ASSERT(wayland_buffer != NULL);
+ TPL_ASSERT(wayland_buffer->bo != NULL);
+
+ handle = tbm_bo_get_handle(wayland_buffer->bo, TBM_DEVICE_CPU);
+ return handle.ptr;
+}
+
+static void
+__tpl_wayland_buffer_unmap(tpl_buffer_t *buffer, void *ptr, int size)
+{
+ TPL_IGNORE(buffer);
+ TPL_IGNORE(ptr);
+ TPL_IGNORE(size);
+
+ /* Do nothing. */
+}
+
+static tpl_bool_t
+__tpl_wayland_buffer_lock(tpl_buffer_t *buffer, tpl_lock_usage_t usage)
+{
+ tpl_wayland_buffer_t *wayland_buffer = (tpl_wayland_buffer_t *)buffer->backend.data;
+ tbm_bo_handle handle;
+
+ TPL_ASSERT(wayland_buffer != NULL);
+ TPL_ASSERT(wayland_buffer->bo != NULL);
+
+ switch (usage)
+ {
+ case TPL_LOCK_USAGE_GPU_READ:
+ handle = tbm_bo_map(wayland_buffer->bo, TBM_DEVICE_MM, TBM_OPTION_READ);
+ break;
+ case TPL_LOCK_USAGE_GPU_WRITE:
+ handle = tbm_bo_map(wayland_buffer->bo, TBM_DEVICE_MM, TBM_OPTION_WRITE);
+ break;
+ case TPL_LOCK_USAGE_CPU_READ:
+ handle = tbm_bo_map(wayland_buffer->bo, TBM_DEVICE_CPU, TBM_OPTION_READ);
+ break;
+ case TPL_LOCK_USAGE_CPU_WRITE:
+ handle = tbm_bo_map(wayland_buffer->bo, TBM_DEVICE_CPU, TBM_OPTION_WRITE);
+ break;
+ default:
+ TPL_ASSERT(TPL_FALSE);
+ return TPL_FALSE;
+ }
+
+ if (handle.u32 != 0 || handle.ptr != NULL)
+ return TPL_FALSE;
+
+ return TPL_TRUE;
+}
+
+static void
+__tpl_wayland_buffer_unlock(tpl_buffer_t *buffer)
+{
+ tpl_wayland_buffer_t *wayland_buffer = (tpl_wayland_buffer_t *)buffer->backend.data;
+
+ TPL_ASSERT(wayland_buffer != NULL);
+ TPL_ASSERT(wayland_buffer->bo != NULL);
+
+ tbm_bo_unmap(wayland_buffer->bo);
+}
+
+static void *
+__tpl_wayland_buffer_create_native_buffer(tpl_buffer_t *buffer)
+{
+ tpl_surface_t *surface = buffer->surface;
+ tpl_wayland_display_t *wayland_display = (tpl_wayland_display_t *)surface->display->backend.data;
+ tpl_wayland_buffer_t *wayland_buffer = (tpl_wayland_buffer_t *)buffer->backend.data;
+ struct wl_resource *wl_resource = NULL;
+ uint32_t wl_format = 0;
+ unsigned int name = 0;
+
+ if (wayland_display->wl_drm == NULL)
+ return TPL_FALSE;
+
+ switch (surface->format)
+ {
+ case TPL_FORMAT_ARGB8888: wl_format = WL_DRM_FORMAT_ARGB8888; break;
+ case TPL_FORMAT_XRGB8888: wl_format = WL_DRM_FORMAT_XRGB8888; break;
+ case TPL_FORMAT_RGB565: wl_format = WL_DRM_FORMAT_RGB565; break;
+ default: return TPL_FALSE;
+ }
+
+ name = tbm_bo_export(wayland_buffer->bo);
+
+ wl_resource = (struct wl_resource *)wl_drm_create_buffer(wayland_display->wl_drm, (uint32_t)name,
+ buffer->width, buffer->height, buffer->pitch, wl_format);
+
+ /* Remove from the default queue. */
+ if (wl_resource)
+ wl_proxy_set_queue((struct wl_proxy *)wl_resource, NULL);
+
+ return (void *)wl_resource;
+}
+
+tpl_bool_t
+__tpl_display_choose_backend_wayland(tpl_handle_t native_dpy)
+{
+ if (native_dpy == NULL) return TPL_FALSE;
+
+ if (__tpl_wayland_display_is_wl_display(native_dpy)) return TPL_TRUE;
+ if (__tpl_wayland_display_is_gbm_device(native_dpy)) return TPL_TRUE;
+
+ return TPL_FALSE;
+}
+
+tpl_bool_t __tpl_wayland_buffer_get_reused_flag(tpl_buffer_t *buffer)
+{
+ tpl_wayland_buffer_t *tpl_wayland_buffer;
+ tpl_wayland_buffer = (tpl_wayland_buffer_t *)buffer->backend.data;
+ if (tpl_wayland_buffer->reused)
+ return TPL_TRUE;
+ else
+ return TPL_FALSE;
+}
+
+void
+__tpl_display_init_backend_wayland(tpl_display_backend_t *backend)
+{
+ backend->type = TPL_BACKEND_WAYLAND;
+ backend->data = NULL;
+
+ backend->init = __tpl_wayland_display_init;
+ backend->fini = __tpl_wayland_display_fini;
+ backend->query_config = __tpl_wayland_display_query_config;
+ backend->get_window_info = __tpl_wayland_display_get_window_info;
+ backend->get_pixmap_info = __tpl_wayland_display_get_pixmap_info;
+ backend->flush = __tpl_wayland_display_flush;
+}
+
+void
+__tpl_surface_init_backend_wayland(tpl_surface_backend_t *backend)
+{
+ backend->type = TPL_BACKEND_WAYLAND;
+ backend->data = NULL;
+
+ backend->init = __tpl_wayland_surface_init;
+ backend->fini = __tpl_wayland_surface_fini;
+ backend->begin_frame = __tpl_wayland_surface_begin_frame;
+ backend->end_frame = __tpl_wayland_surface_end_frame;
+ backend->validate_frame = __tpl_wayland_surface_validate_frame;
+ backend->get_buffer = __tpl_wayland_surface_get_buffer;
+ backend->post = __tpl_wayland_surface_post;
+}
+
+void
+__tpl_buffer_init_backend_wayland(tpl_buffer_backend_t *backend)
+{
+ backend->type = TPL_BACKEND_WAYLAND;
+ backend->data = NULL;
+
+ backend->init = __tpl_wayland_buffer_init;
+ backend->fini = __tpl_wayland_buffer_fini;
+ backend->map = __tpl_wayland_buffer_map;
+ backend->unmap = __tpl_wayland_buffer_unmap;
+ backend->lock = __tpl_wayland_buffer_lock;
+ backend->unlock = __tpl_wayland_buffer_unlock;
+ backend->create_native_buffer = __tpl_wayland_buffer_create_native_buffer;
+ backend->get_reused_flag = __tpl_wayland_buffer_get_reused_flag;
+}
+
+/**********************************************************************************/
+
+static void
+__cb_client_wayland_drm_handle_device(void *user_data, struct wl_drm *drm, const char *device)
+{
+ tpl_display_t *display = (tpl_display_t *)user_data;
+ tpl_wayland_display_t *wayland_display = (tpl_wayland_display_t *)display->backend.data;
+ drm_magic_t magic;
+
+ TPL_IGNORE(drm);
+
+ display->bufmgr_fd = open(device, O_RDWR | O_CLOEXEC);
+
+ drmGetMagic(display->bufmgr_fd, &magic);
+ wl_drm_authenticate(wayland_display->wl_drm, magic);
+}
+
+static void
+__cb_client_wayland_drm_handle_format(void *data, struct wl_drm *drm, uint32_t format)
+{
+ TPL_IGNORE(data);
+ TPL_IGNORE(drm);
+ TPL_IGNORE(format);
+
+ return;
+}
+
+static void
+__cb_client_wayland_drm_handle_authenticated(void *user_data, struct wl_drm *drm)
+{
+ tpl_display_t *display = (tpl_display_t *)user_data;
+ tpl_wayland_display_t *wayland_display = (tpl_wayland_display_t *)display->backend.data;
+
+ TPL_IGNORE(drm);
+
+ wayland_display->proc.app.authenticated = TPL_TRUE;
+}
+
+static void
+__cb_client_wayland_drm_handle_capabilities(void *data, struct wl_drm *drm, uint32_t value)
+{
+ TPL_IGNORE(data);
+ TPL_IGNORE(drm);
+ TPL_IGNORE(value);
+
+ return;
+}
+
+static const struct wl_drm_listener wl_drm_client_listener =
+{
+ __cb_client_wayland_drm_handle_device,
+ __cb_client_wayland_drm_handle_format,
+ __cb_client_wayland_drm_handle_authenticated,
+ __cb_client_wayland_drm_handle_capabilities
+};
+
+static void
+__cb_client_registry_handle_global(void *user_data, struct wl_registry *registry, uint32_t name,
+ const char *interface, uint32_t version)
+{
+ tpl_display_t *display = (tpl_display_t *)user_data;
+ tpl_wayland_display_t *wayland_display = (tpl_wayland_display_t *)display->backend.data;
+
+ if (strcmp(interface, "wl_drm") == 0)
+ {
+ wayland_display->wl_drm = wl_registry_bind(registry, name, &wl_drm_interface, (version > 2) ? 2 : version);
+ wl_drm_add_listener(wayland_display->wl_drm, &wl_drm_client_listener, display);
+ }
+}
+
+static const struct wl_registry_listener registry_listener =
+{
+ __cb_client_registry_handle_global,
+ NULL
+};
+
+static void
+__cb_client_sync_callback(void *data, struct wl_callback *callback, uint32_t serial)
+{
+ int *done = data;
+
+ *done = 1;
+ wl_callback_destroy(callback);
+}
+
+static const struct wl_callback_listener sync_listener =
+{
+ __cb_client_sync_callback
+};
+
+static void
+__cb_client_frame_callback(void *data, struct wl_callback *callback, uint32_t time)
+{
+ /* We moved the buffer reclaim logic to buffer_release_callback().
+ buffer_release_callback() is more suitable point to delete or reuse buffer instead of frame_callback().
+ But we remain this callback because buffer_release_callback() works only when frame_callback() is activated.*/
+ TPL_IGNORE(data);
+ TPL_IGNORE(time);
+
+ wl_callback_destroy(callback);
+}
+
+static const struct wl_callback_listener frame_listener =
+{
+ __cb_client_frame_callback
+};
+
+static void
+__cb_client_buffer_release_callback(void *data, struct wl_resource *resource)
+{
+ tpl_buffer_t *buffer = (tpl_buffer_t *)data;
+ tpl_surface_t *surface = buffer->surface;
+
+ TPL_LOG(3, "release window(%p, %p), buffer(%p), key:%d",
+ surface, surface?surface->native_handle:NULL,
+ buffer, buffer->key);
+
+ if (surface != NULL)
+ {
+ TPL_OBJECT_LOCK(surface);
+
+ /* MOVE BUFFER : [done queue] --> [able queue] */
+ {
+ tpl_wayland_surface_t *wayland_surface = (tpl_wayland_surface_t *)surface->backend.data;
+ tpl_list_node_t *node = tpl_list_get_front_node(&wayland_surface->done_rendering_queue);
+
+ while (node != NULL)
+ {
+ tpl_buffer_t *node_buffer = (tpl_buffer_t *)tpl_list_node_get_data(node);
+
+ if (node_buffer == buffer)
+ {
+ tpl_wayland_buffer_t *wayland_buffer = (tpl_wayland_buffer_t *)buffer->backend.data;
+
+ if (wayland_buffer->proc.app.resized)
+ {
+ /* Delete the buffer that had been created before resized. */
+ __tpl_wayland_surface_buffer_free(buffer);
+
+ tpl_list_push_back(&wayland_surface->able_rendering_queue, NULL);
+ }
+ else
+ {
+ /* Reuse the buffer because the buffer size is not changed and reusable. */
+ tpl_list_push_back(&wayland_surface->able_rendering_queue, buffer);
+ }
+
+ tpl_list_remove(node, NULL);
+ break;
+ }
+
+ node = tpl_list_node_next(node);
+ }
+ TPL_ASSERT(node != NULL);
+ }
+ TPL_OBJECT_UNLOCK(surface);
+ }
+
+ tpl_object_unreference((tpl_object_t *)buffer);
+}
+
+static const struct wl_buffer_listener buffer_release_listener = {
+ (void *)__cb_client_buffer_release_callback,
+};
+
+/**********************************************************************************/
+
+static struct gbm_bo *
+__cb_server_gbm_surface_lock_front_buffer(struct gbm_surface *gbm_surf)
+{
+ struct gbm_tbm_surface *gbm_tbm_surf = (struct gbm_tbm_surface *)gbm_surf;
+ tpl_surface_t *surface = (tpl_surface_t *)gbm_tbm_surface_get_user_data(gbm_tbm_surf);
+ tpl_wayland_surface_t *wayland_surface = NULL;
+ tpl_buffer_t *buffer = NULL;
+ tpl_wayland_buffer_t *wayland_buffer = NULL;
+
+ TPL_ASSERT(surface != NULL);
+
+ TPL_OBJECT_LOCK(surface);
+
+ wayland_surface = (tpl_wayland_surface_t *)surface->backend.data;
+
+ while (1)
+ {
+ /* Wait for posted to prevent locking not-rendered buffer. */
+ if (!tpl_list_is_empty(&wayland_surface->done_rendering_queue))
+ {
+ buffer = tpl_list_get_front(&wayland_surface->done_rendering_queue);
+ wayland_buffer = (tpl_wayland_buffer_t *)buffer->backend.data;
+ if (wayland_buffer->proc.comp.posted)
+ break;
+ }
+ TPL_OBJECT_UNLOCK(surface);
+ tpl_util_sys_yield();
+ TPL_OBJECT_LOCK(surface);
+ }
+
+ TPL_ASSERT(buffer != NULL);
+
+ wayland_buffer = (tpl_wayland_buffer_t *)buffer->backend.data;
+
+ tbm_bo_map(wayland_buffer->bo, TBM_DEVICE_MM, TBM_OPTION_READ | TBM_OPTION_WRITE);
+
+ TPL_OBJECT_UNLOCK(surface);
+
+ return wayland_buffer->proc.comp.gbm_bo;
+}
+
+static void
+__cb_server_gbm_surface_release_buffer(struct gbm_surface *gbm_surf, struct gbm_bo *gbm_bo)
+{
+ struct gbm_tbm_bo *gbm_tbm_bo = (struct gbm_tbm_bo *)gbm_bo;
+ tbm_bo bo;
+
+ TPL_IGNORE(gbm_surf);
+
+ bo = gbm_tbm_bo_get_tbm_bo(gbm_tbm_bo);
+ TPL_ASSERT(bo);
+
+ tbm_bo_unmap(bo);
+}
+
+static int
+__cb_server_gbm_surface_has_free_buffers(struct gbm_surface *gbm_surf)
+{
+ struct gbm_tbm_surface *gbm_tbm_surf = (struct gbm_tbm_surface *)gbm_surf;
+ tpl_surface_t *surface = (tpl_surface_t *)gbm_tbm_surface_get_user_data(gbm_tbm_surf);
+ tpl_wayland_surface_t *wayland_surface = NULL;
+ tpl_buffer_t *buffer = NULL;
+ tpl_wayland_buffer_t *wayland_buffer = NULL;
+
+ TPL_ASSERT(surface != NULL);
+
+ TPL_OBJECT_LOCK(surface);
+
+ wayland_surface = (tpl_wayland_surface_t *)surface->backend.data;
+
+ if (tpl_list_is_empty(&wayland_surface->done_rendering_queue)) {
+ TPL_OBJECT_UNLOCK(surface);
+ return 0;
+ }
+
+ buffer = tpl_list_get_front(&wayland_surface->done_rendering_queue);
+ wayland_buffer = (tpl_wayland_buffer_t *)buffer->backend.data;
+
+ if (wayland_buffer->proc.comp.posted) {
+ TPL_OBJECT_UNLOCK(surface);
+ return 1;
+ }
+
+ TPL_OBJECT_UNLOCK(surface);
+
+ return 0;
+}
+
+static int
+__cb_server_wayland_drm_display_authenticate(void *user_data, uint32_t magic)
+{
+ tpl_display_t *display = (tpl_display_t *)user_data;
+
+ return drmAuthMagic(display->bufmgr_fd, magic);
+}
+
+static void
+__cb_server_wayland_drm_reference_buffer(void *user_data, uint32_t name, int fd, struct wl_drm_buffer *buffer)
+{
+ tpl_display_t *display = (tpl_display_t *)user_data;
+ tpl_wayland_display_t *wayland_display = (tpl_wayland_display_t *)display->backend.data;
+
+ TPL_IGNORE(name);
+ TPL_IGNORE(fd);
+
+ buffer->driver_buffer = tbm_bo_import(wayland_display->bufmgr, name);
+}
+
+static void
+__cb_server_wayland_drm_unreference_buffer(void *user_data, struct wl_drm_buffer *buffer)
+{
+ tpl_display_t *display = (tpl_display_t *)user_data;
+ tpl_wayland_display_t *wayland_display = (tpl_wayland_display_t *)display->backend.data;
+
+ tbm_bo_unref(buffer->driver_buffer);
+ buffer->driver_buffer = NULL;
+
+ /* TODO: tpl_buffer is NULL, it's not right */
+ __tpl_wayland_surface_buffer_cache_remove(&wayland_display->proc.comp.cached_buffers, (unsigned int)buffer);
+}
+
+static struct wayland_drm_callbacks wl_drm_server_listener =
+{
+ __cb_server_wayland_drm_display_authenticate,
+ __cb_server_wayland_drm_reference_buffer,
+ __cb_server_wayland_drm_unreference_buffer
+};
+
+#if EGL_BIND_WL_DISPLAY == 1
+
+EGLBoolean __egl_platform_bind_wayland_display(EGLNativeDisplayType display, struct wl_display *wayland_display)
+{
+ tpl_display_t *tpl_display = tpl_display_get(TPL_BACKEND_WAYLAND, display);
+ tpl_wayland_display_t *tpl_wayland_display = (tpl_wayland_display_t *)tpl_display->backend.data;
+ char *device_name = NULL;
+
+ tpl_display->bufmgr_fd = dup(gbm_device_get_fd(tpl_display->native_handle));
+ tpl_wayland_display->bufmgr = tbm_bufmgr_init(tpl_display->bufmgr_fd);
+
+ device_name = drmGetDeviceNameFromFd(tpl_display->bufmgr_fd);
+ tpl_wayland_display->wl_drm = wayland_drm_init((struct wl_display *)wayland_display, device_name, &wl_drm_server_listener, tpl_display, 0);
+
+ return TPL_TRUE;
+}
+
+EGLBoolean __egl_platform_unbind_wayland_display(EGLNativeDisplayType display, struct wl_display *wayland_display)
+{
+ tpl_display_t *tpl_display = tpl_display_get(TPL_BACKEND_WAYLAND, display);
+ tpl_wayland_display_t *tpl_wayland_display = (tpl_wayland_display_t *)tpl_display->backend.data;
+
+ if (tpl_wayland_display->wl_drm == NULL)
+ return TPL_FALSE;
+
+ wayland_drm_uninit(tpl_wayland_display->wl_drm);
+ tbm_bufmgr_deinit(tpl_wayland_display->bufmgr);
+ close(tpl_display->bufmgr_fd);
+
+ return TPL_TRUE;
+}
+
+EGLBoolean __egl_platform_query_wayland_buffer(EGLNativeDisplayType display, struct wl_resource *wayland_buffer,
+ EGLint attribute, EGLint *value)
+{
+ tpl_display_t *tpl_display = tpl_display_get(TPL_BACKEND_WAYLAND, display);
+ int width = 0, height = 0;
+ tpl_format_t format = TPL_FORMAT_INVALID;
+
+
+ if(NULL == tpl_display)
+ {
+ return EGL_FALSE;
+ }
+
+ if (!tpl_get_native_pixmap_info(tpl_display, (tpl_handle_t)wayland_buffer, &width, &height, &format))
+ return EGL_FALSE;
+
+ switch (attribute)
+ {
+ case EGL_TEXTURE_FORMAT:
+ switch (format)
+ {
+ case TPL_FORMAT_ARGB8888:
+ *value = EGL_TEXTURE_RGBA;
+ break;
+ case TPL_FORMAT_XRGB8888:
+ case TPL_FORMAT_RGB565:
+ *value = EGL_TEXTURE_RGB;
+ break;
+ default:
+ return EGL_FALSE;
+ }
+ break;
+ case EGL_WIDTH:
+ *value = width;
+ break;
+ case EGL_HEIGHT:
+ *value = height;
+ break;
+ default:
+ return EGL_FALSE;
+ }
+
+ return EGL_TRUE;
+}
+#endif
--- /dev/null
+#include <stdlib.h>
+#include <stdint.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <pthread.h>
+
+#include <X11/Xlib.h>
+#include <X11/Xutil.h>
+#include <X11/Xproto.h>
+
+#include <libdrm/drm.h>
+#include <xf86drm.h>
+
+#include <dri2/dri2.h>
+#include <tbm_bufmgr.h>
+
+#include "tpl_internal.h"
+
+#include "tpl_x11_internal.h"
+
+static pthread_mutex_t global_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+pthread_mutex_t
+__tpl_x11_get_global_mutex()
+{
+ return global_mutex;
+}
+
+void
+__tpl_x11_swap_str_to_swap_type(char *str, tpl_x11_swap_type_t *type)
+{
+ int swap_type;
+
+ if (str == NULL)
+ return;
+
+ swap_type = strtol(str, NULL, 0);
+
+ switch (swap_type)
+ {
+ case TPL_X11_SWAP_TYPE_SYNC:
+ case TPL_X11_SWAP_TYPE_ASYNC:
+ case TPL_X11_SWAP_TYPE_LAZY:
+ *type = swap_type;
+ break;
+ default:
+ break;
+ }
+}
+
+tpl_buffer_t *
+__tpl_x11_surface_buffer_cache_find(tpl_list_t *buffer_cache, unsigned int name)
+{
+ tpl_list_node_t *node = tpl_list_get_front_node(buffer_cache);
+
+ while (node)
+ {
+ tpl_buffer_t *buffer = (tpl_buffer_t *)tpl_list_node_get_data(node);
+
+ if (buffer->key == name)
+ return buffer;
+
+ node = tpl_list_node_next(node);
+ }
+
+ return NULL;
+}
+
+void
+__tpl_x11_surface_buffer_cache_remove(tpl_list_t *buffer_cache, unsigned int name)
+{
+ tpl_list_node_t *node = tpl_list_get_front_node(buffer_cache);
+
+ while (node)
+ {
+ tpl_buffer_t *buffer = (tpl_buffer_t *)tpl_list_node_get_data(node);
+
+ if (buffer->key == name)
+ {
+ tpl_object_unreference(&buffer->base);
+ tpl_list_remove(node, NULL);
+ return;
+ }
+
+ node = tpl_list_node_next(node);
+ }
+}
+
+void
+__tpl_x11_surface_buffer_cache_add(tpl_list_t *buffer_cache, tpl_buffer_t *buffer)
+{
+ if (tpl_list_get_count(buffer_cache) >= TPL_BUFFER_CACHE_MAX_ENTRIES)
+ {
+ tpl_buffer_t *evict = tpl_list_pop_front(buffer_cache, NULL);
+ tpl_object_unreference(&evict->base);
+ }
+
+ tpl_object_reference(&buffer->base);
+ tpl_list_push_back(buffer_cache, (void *)buffer);
+}
+
+void
+__tpl_x11_surface_buffer_cache_clear(tpl_list_t *buffer_cache)
+{
+ tpl_list_fini(buffer_cache, (tpl_free_func_t)tpl_object_unreference);
+}
+
+
+tpl_bool_t
+__tpl_x11_display_query_config(tpl_display_t *display,
+ tpl_surface_type_t surface_type, int red_size,
+ int green_size, int blue_size, int alpha_size,
+ int color_depth, int *native_visual_id, tpl_bool_t *is_slow)
+{
+ Display *native_display;
+
+ TPL_IGNORE(alpha_size);
+
+ native_display = (Display *)display->native_handle;
+
+ if (red_size != TPL_DONT_CARE || green_size != TPL_DONT_CARE ||
+ blue_size != TPL_DONT_CARE || color_depth != TPL_DONT_CARE)
+ {
+ if (surface_type == TPL_SURFACE_TYPE_WINDOW)
+ {
+ XVisualInfo *visual_formats;
+ int num_visual_formats;
+ int i;
+
+ visual_formats = XGetVisualInfo(native_display, 0, NULL,
+ &num_visual_formats);
+ TPL_ASSERT(visual_formats);
+ for (i = 0; i < num_visual_formats; i++)
+ {
+ int clz[3];
+ int col_size[3];
+
+ clz[0] = tpl_util_clz(visual_formats[i].red_mask);
+ clz[1] = tpl_util_clz(visual_formats[i].green_mask);
+ clz[2] = tpl_util_clz(visual_formats[i].blue_mask);
+
+ col_size[0] = clz[1] - clz[0];
+ col_size[1] = clz[2] - clz[1];
+ col_size[2] = 32 - clz[2];
+
+ if ((red_size == TPL_DONT_CARE || col_size[0] == red_size) &&
+ (green_size == TPL_DONT_CARE || col_size[1] == green_size) &&
+ (blue_size == TPL_DONT_CARE || col_size[2] == blue_size))
+ {
+ if (native_visual_id != NULL)
+ *native_visual_id = visual_formats[i].visualid;
+
+ if (is_slow != NULL)
+ *is_slow = TPL_FALSE;
+
+ return TPL_TRUE;
+ }
+ }
+ XFree(visual_formats);
+ visual_formats = NULL;
+ }
+
+ if (surface_type == TPL_SURFACE_TYPE_PIXMAP)
+ {
+ XPixmapFormatValues *pixmap_formats;
+ int num_pixmap_formats;
+ int i;
+
+ pixmap_formats = XListPixmapFormats(native_display, &num_pixmap_formats);
+ TPL_ASSERT(pixmap_formats);
+ for (i = 0; i < num_pixmap_formats; i++)
+ {
+ if (color_depth == TPL_DONT_CARE ||
+ pixmap_formats[i].depth == color_depth)
+ {
+ if (is_slow != NULL)
+ *is_slow = TPL_FALSE;
+
+ return TPL_TRUE;
+ }
+ }
+ XFree(pixmap_formats);
+ pixmap_formats = NULL;
+ }
+
+ return TPL_FALSE;
+
+ }
+
+ return TPL_TRUE;
+}
+
+static void tpl_handle_and_free_error( Display *dpy, xcb_generic_error_t *error, const char* request_string )
+{
+ char error_txt[256];
+
+ if( error )
+ {
+ int len = sizeof(error_txt)/sizeof(error_txt[0]);
+
+ XGetErrorText( dpy, error->error_code, error_txt, len );
+ error_txt[ len - 1] = '\0';
+ CDBG_PRINT_WARN(CDBG_EGL, "%s failed \"[%d]:%s\"", request_string, error->error_code, error_txt );
+ free(error);
+ }
+ else
+ {
+ CDBG_PRINT_WARN(CDBG_EGL, "%s failed \"Unknown error\"", request_string );
+ }
+}
+
+static tpl_bool_t tpl_check_reply_for_error(Display *dpy, xcb_generic_reply_t *reply, xcb_generic_error_t *error,
+ const char *request_string)
+{
+ tpl_bool_t retval = TPL_FALSE;
+
+ if (error || reply == NULL)
+ {
+ tpl_handle_and_free_error( dpy, error, request_string );
+ }
+ else
+ {
+ retval = TPL_TRUE;
+ }
+
+ return retval;
+}
+static XVisualInfo* tpl_find_visual( Display *dpy, xcb_visualid_t visual_id )
+{
+ XVisualInfo *visual_info;
+ XVisualInfo visual_info_template;
+ int matching_count;
+
+ visual_info_template.visualid = visual_id;
+
+ visual_info = XGetVisualInfo(dpy, VisualIDMask, &visual_info_template, &matching_count);
+
+
+ return visual_info;
+}
+static int tpl_get_alpha_offset( int offset_r, int offset_g, int offset_b, int bpp )
+{
+ int ret = -1;
+
+ CDBG_ASSERT_MSG( bpp == 32, "alpha only supported for 32bits pixel formats");
+
+ if( offset_r != 0 && offset_g != 0 && offset_b != 0 )
+ {
+ ret = 0;
+ }
+ else if( offset_r != 24 && offset_g != 24 && offset_b != 24 )
+ {
+ ret = 24;
+ }
+ else
+ {
+ CDBG_ASSERT_MSG(MALI_FALSE, "Alpha component has to be at either the offset 0 or 24");
+ }
+
+ return ret;
+}
+static int tpl_get_offset( unsigned long mask, int depth )
+{
+ int res = -1;
+ int count;
+
+ for (count = 0; count < depth; count++)
+ {
+ if (mask & 1)
+ {
+ res = count;
+ break;
+ }
+ mask = mask >> 1;
+ }
+
+ return res;
+}
+/* Convert the given combination of offsets and bpp into a color buffer format */
+static tpl_format_t tpl_offsets_to_color_buffer_format( int offset_r, int offset_g, int offset_b, int offset_a, int bpp )
+{
+ tpl_format_t retval = TPL_FORMAT_INVALID;
+
+ if ( offset_b == 11 && offset_g == 5 && offset_r == 0 && offset_a == -1 && bpp == 16)
+ {
+ retval = TPL_FORMAT_BGR565;
+ }
+ else if( offset_r == 11 && offset_g == 5 && offset_b == 0 && offset_a == -1 && bpp == 16)
+ {
+ retval = TPL_FORMAT_RGB565;
+ }
+
+ else if( offset_a == 24 && offset_b == 16 && offset_g == 8 && offset_r == 0 && bpp == 32)
+ {
+ retval = TPL_FORMAT_ABGR8888;
+ }
+ else if( offset_a == 24 && offset_r == 16 && offset_g == 8 && offset_b == 0 && bpp == 32)
+ {
+ retval = TPL_FORMAT_ARGB8888;
+ }
+ else if( offset_b == 24 && offset_g == 16 && offset_r == 8 && offset_a == 0 && bpp == 32)
+ {
+ retval = TPL_FORMAT_BGRA8888;
+ }
+ else if( offset_r == 24 && offset_g == 16 && offset_b == 8 && offset_a == 0 && bpp == 32)
+ {
+ retval = TPL_FORMAT_RGBA8888;
+ }
+
+ else if( offset_b == 16 && offset_g == 8 && offset_r == 0 && offset_a == -1 && bpp == 32)
+ {
+ retval = TPL_FORMAT_XBGR8888;
+ }
+ else if( offset_r == 16 && offset_g == 8 && offset_b == 0 && offset_a == -1 && bpp == 32)
+ {
+ retval = TPL_FORMAT_XRGB8888;
+ }
+ else if( offset_b == 24 && offset_g == 16 && offset_r == 8 && offset_a == -1 && bpp == 32)
+ {
+ retval = TPL_FORMAT_BGRX8888;
+ }
+ else if( offset_r == 24 && offset_g == 16 && offset_b == 8 && offset_a == -1 && bpp == 32)
+ {
+ retval = TPL_FORMAT_RGBX8888;
+ }
+
+ else if( offset_b == 16 && offset_g == 8 && offset_r == 0 && offset_a == -1 && bpp == 24)
+ {
+ retval = TPL_FORMAT_BGR888;
+ }
+ else if( offset_r == 16 && offset_g == 8 && offset_b == 0 && offset_a == -1 && bpp == 24)
+ {
+ retval = TPL_FORMAT_RGB888;
+ }
+
+ else if( offset_a == 12 && offset_b == 8 && offset_g == 4 && offset_r == 0 && bpp == 16)
+ {
+ retval = TPL_FORMAT_ABGR4444;
+ }
+ else if( offset_a == 12 && offset_r == 8 && offset_g == 4 && offset_b == 0 && bpp == 16)
+ {
+ retval = TPL_FORMAT_ARGB4444;
+ }
+ else if( offset_b == 12 && offset_g == 8 && offset_r == 4 && offset_a == 0 && bpp == 16)
+ {
+ retval = TPL_FORMAT_BGRA4444;
+ }
+ else if( offset_r == 12 && offset_g == 8 && offset_b == 4 && offset_a == 0 && bpp == 16)
+ {
+ retval = TPL_FORMAT_RGBA4444;
+ }
+
+ else if( offset_a == 15 && offset_b == 10 && offset_g == 5 && offset_r == 0 && bpp == 16)
+ {
+ retval = TPL_FORMAT_ABGR1555;
+ }
+ else if( offset_a == 15 && offset_r == 10 && offset_g == 5 && offset_b == 0 && bpp == 16)
+ {
+ retval = TPL_FORMAT_ARGB1555;
+ }
+ else if( offset_b == 11 && offset_g == 6 && offset_r == 1 && offset_a == 0 && bpp == 16)
+ {
+ retval = TPL_FORMAT_BGRA5551;
+ }
+ else if( offset_r == 11 && offset_g == 6 && offset_b == 1 && offset_a == 0 && bpp == 16)
+ {
+ retval = TPL_FORMAT_RGBA5551;
+ }
+
+ else
+ {
+ CDBG_PRINT_WARN(CDBG_EGL, "Format not supported: offset_r=%d, offset_g=%d, offset_b=%d, offset_a=%d, bpp=%d",
+ offset_r, offset_g, offset_b, offset_a, bpp);
+ }
+
+ return retval;
+}
+
+
+tpl_bool_t
+__tpl_x11_display_get_window_info(tpl_display_t *display, tpl_handle_t window,
+ int *width, int *height, tpl_format_t *format, int depth, int a_size)
+{
+ Status x_res;
+ XWindowAttributes att;
+
+ x_res = XGetWindowAttributes((Display *)display->native_handle, (Window)window, &att);
+
+ if (x_res == BadWindow)
+ return TPL_FALSE;
+
+ if (width != NULL) *width = att.width;
+ if (height != NULL) *height = att.height;
+
+ if(format == NULL)
+ return TPL_TRUE;
+
+
+ if(display->xcb_connection == NULL)
+ return TPL_FALSE;
+
+ tpl_bool_t res;
+ int r, g, b, a;
+ int bpp = depth;
+ xcb_generic_error_t *attr_error;
+ xcb_get_window_attributes_reply_t * attr_reply;
+ xcb_get_window_attributes_cookie_t attr_cookie;
+ XVisualInfo *visual_info = NULL;
+
+
+ attr_cookie = xcb_get_window_attributes( display->xcb_connection, window );
+ attr_reply = xcb_get_window_attributes_reply( display->xcb_connection, attr_cookie, &attr_error );
+
+ res = tpl_check_reply_for_error((Display *)display->native_handle, (xcb_generic_reply_t *)attr_reply, attr_error, "xcb_get_window_attributes");
+ if (res == TPL_FALSE)
+ {
+ goto error;
+ }
+
+ visual_info = tpl_find_visual( (Display *)display->native_handle, attr_reply->visual );
+ free( attr_reply );
+ if (visual_info == NULL)
+ {
+ goto error;
+ }
+
+ /* Find the position of each component inside the pixel: (RGB, BGR, etc) */
+ r = tpl_get_offset(visual_info->red_mask, bpp );
+ g = tpl_get_offset(visual_info->green_mask, bpp );
+ b = tpl_get_offset(visual_info->blue_mask, bpp );
+
+ /* [FIXME] Temporary fix to enforce ARGB8888 */
+ if (bpp == 32) {
+ a_size = 8;
+ }
+
+ if( a_size != 0 )
+ {
+ /* What's its offset ? (ARGB, BGRA, etc) */
+ a = tpl_get_alpha_offset( r, g, b, bpp );
+ }
+ else
+ {
+ a = -1;
+ }
+
+ if (format)
+ *format = tpl_offsets_to_color_buffer_format( r, g, b, a, bpp );
+ return TPL_TRUE;
+
+error:
+ if (NULL != visual_info)
+ {
+ XFree(visual_info);
+ }
+ return TPL_FALSE;
+
+}
+
+tpl_bool_t
+__tpl_x11_display_get_pixmap_info(tpl_display_t *display, tpl_handle_t pixmap,
+ int *width, int *height, tpl_format_t *format)
+{
+ Status x_res;
+ Window root = None;
+ int x, y;
+ unsigned int w, h, bw, d;
+
+ x_res = XGetGeometry((Display *)display->native_handle, (Pixmap)pixmap, &root,
+ &x, &y, &w, &h, &bw, &d);
+
+ if (x_res != BadDrawable)
+ {
+ if (format != NULL)
+ {
+ switch (d)
+ {
+ case 32: *format = TPL_FORMAT_ARGB8888; break;
+ case 24: *format = TPL_FORMAT_XRGB8888; break;
+ case 16: *format = TPL_FORMAT_RGB565; break;
+ default: *format = TPL_FORMAT_INVALID; break;
+ }
+ }
+ if (width != NULL) *width = w;
+ if (height != NULL) *height = h;
+ if (format != NULL)
+ *format = TPL_FORMAT_ARGB8888;/*TODO: temp for argb8888*/
+ return TPL_TRUE;
+ }
+
+ return TPL_FALSE;
+}
+
+void
+__tpl_x11_display_flush(tpl_display_t *display)
+{
+ Display *native_display = (Display *)display->native_handle;
+ XFlush(native_display);
+ XSync(native_display, False);
+}
+
+
+tpl_bool_t
+__tpl_x11_buffer_init(tpl_buffer_t *buffer)
+{
+ TPL_IGNORE(buffer);
+ return TPL_TRUE;
+}
+
+void
+__tpl_x11_buffer_fini(tpl_buffer_t *buffer)
+{
+ if (buffer->backend.data)
+ {
+ tbm_bo_map((tbm_bo)buffer->backend.data, TBM_DEVICE_3D, TBM_OPTION_READ);
+ tbm_bo_unmap((tbm_bo)buffer->backend.data);
+ tbm_bo_unref((tbm_bo)buffer->backend.data);
+ buffer->backend.data = NULL;
+ }
+}
+
+void *
+__tpl_x11_buffer_map(tpl_buffer_t *buffer, int size)
+{
+ tbm_bo bo;
+ tbm_bo_handle handle;
+
+ bo = (tbm_bo)buffer->backend.data;
+ TPL_ASSERT(bo);
+
+ handle = tbm_bo_get_handle(bo, TBM_DEVICE_CPU);
+ return handle.ptr;
+}
+
+void
+__tpl_x11_buffer_unmap(tpl_buffer_t *buffer, void *ptr, int size)
+{
+ TPL_IGNORE(buffer);
+ TPL_IGNORE(ptr);
+ TPL_IGNORE(size);
+
+ /* Do nothing. */
+}
+
+tpl_bool_t
+__tpl_x11_buffer_lock(tpl_buffer_t *buffer, tpl_lock_usage_t usage)
+{
+ tbm_bo bo;
+ tbm_bo_handle handle;
+
+ bo = (tbm_bo)buffer->backend.data;
+ TPL_ASSERT(bo);
+
+ switch (usage)
+ {
+ case TPL_LOCK_USAGE_GPU_READ:
+ handle = tbm_bo_map(bo, TBM_DEVICE_3D, TBM_OPTION_READ);
+ break;
+ case TPL_LOCK_USAGE_GPU_WRITE:
+ handle = tbm_bo_map(bo, TBM_DEVICE_3D, TBM_OPTION_WRITE);
+ break;
+ case TPL_LOCK_USAGE_CPU_READ:
+ handle = tbm_bo_map(bo, TBM_DEVICE_CPU, TBM_OPTION_READ);
+ break;
+ case TPL_LOCK_USAGE_CPU_WRITE:
+ handle = tbm_bo_map(bo, TBM_DEVICE_CPU, TBM_OPTION_WRITE);
+ break;
+ default:
+ TPL_ASSERT(TPL_FALSE);
+ return TPL_FALSE;
+ }
+
+ if (handle.u32 != 0 || handle.ptr != NULL)
+ return TPL_FALSE;
+
+ return TPL_TRUE;
+}
+
+void
+__tpl_x11_buffer_unlock(tpl_buffer_t *buffer)
+{
+ tbm_bo bo;
+
+ bo = (tbm_bo)buffer->backend.data;
+ TPL_ASSERT(bo);
+
+ tbm_bo_unmap(bo);
+}
+
+tpl_bool_t __tpl_x11_buffer_get_reused_flag(tpl_buffer_t *buffer)
+{
+ if (DRI2_BUFFER_IS_REUSED(buffer->backend.flags))
+ return TPL_TRUE;
+ else
+ return TPL_FALSE;
+}
+
+void __tpl_x11_display_wait_native(tpl_display_t *display)
+{
+ Display *xlib_display = NULL;
+ xlib_display = (Display *)display->native_handle;
+ if (xlib_display != NULL)
+ {
+
+
+ /* Leave events in the queue since we only care they have arrived. */
+ XSync(xlib_display, 0);
+
+ }
+}
+
--- /dev/null
+#include <stdlib.h>
+#include <stdint.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <pthread.h>
+
+#include <X11/Xlib.h>
+#include <X11/Xutil.h>
+#include <X11/Xproto.h>
+
+#include <libdrm/drm.h>
+#include <xf86drm.h>
+
+#include <dri2/dri2.h>
+#include <tbm_bufmgr.h>
+
+#include "tpl_internal.h"
+
+#include "tpl_x11_internal.h"
+
+
+typedef struct _tpl_x11_dri2_surface tpl_x11_dri2_surface_t;
+
+
+struct _tpl_x11_dri2_surface
+{
+ int latest_post_interval;
+ XserverRegion damage;
+ tpl_list_t buffer_cache;
+ tpl_buffer_t *latest_render_target;
+};
+
+
+
+static tpl_x11_global_t global =
+{
+ 0,
+ NULL,
+ -1,
+ NULL,
+ TPL_X11_SWAP_TYPE_ASYNC,
+ TPL_X11_SWAP_TYPE_SYNC
+};
+
+static Display *
+__tpl_x11_dri2_get_worker_display(void)
+{
+ Display *display;
+ pthread_mutex_t mutex = __tpl_x11_get_global_mutex();
+
+ pthread_mutex_lock(&mutex);
+ TPL_ASSERT(global.display_count > 0);
+
+ /* Use dummy display for worker thread. :-) */
+ display = global.worker_display;
+
+ pthread_mutex_unlock(&mutex);
+
+ return display;
+}
+
+static void
+__tpl_x11_dri2_surface_post_internal(tpl_surface_t *surface, tpl_frame_t *frame,
+ tpl_bool_t is_worker)
+{
+ Display *display;
+ Drawable drawable;
+ CARD64 swap_count;
+ tpl_x11_dri2_surface_t *x11_surface;
+ XRectangle *xrects;
+ XRectangle xrects_stack[TPL_STACK_XRECTANGLE_SIZE];
+ int interval = frame->interval;
+
+ x11_surface = (tpl_x11_dri2_surface_t *)surface->backend.data;
+
+ if (is_worker)
+ display = __tpl_x11_dri2_get_worker_display();
+ else
+ display = surface->display->native_handle;
+
+ drawable = (Drawable)surface->native_handle;
+
+ if (interval < 1)
+ interval = 1;
+
+ if (interval != x11_surface->latest_post_interval)
+ {
+ DRI2SwapInterval(display, drawable, interval);
+ x11_surface->latest_post_interval = interval;
+ }
+
+ if (tpl_region_is_empty(&frame->damage))
+ {
+ DRI2SwapBuffers(display, drawable, 0, 0, 0, &swap_count);
+ }
+ else
+ {
+ int i;
+
+ if (frame->damage.num_rects > TPL_STACK_XRECTANGLE_SIZE)
+ {
+ xrects = (XRectangle *)malloc(sizeof(XRectangle) *
+ frame->damage.num_rects);
+ }
+ else
+ {
+ xrects = &xrects_stack[0];
+ }
+
+ for (i = 0; i < frame->damage.num_rects; i++)
+ {
+ const int *rects = &frame->damage.rects[i * 4];
+
+ xrects[i].x = rects[0];
+ xrects[i].y = frame->buffer->height - rects[1] - rects[3];
+ xrects[i].width = rects[2];
+ xrects[i].height = rects[3];
+ }
+
+ if (x11_surface->damage == None)
+ {
+ x11_surface->damage =
+ XFixesCreateRegion(display, xrects, frame->damage.num_rects);
+ }
+ else
+ {
+ XFixesSetRegion(display, x11_surface->damage,
+ xrects, frame->damage.num_rects);
+ }
+
+ DRI2SwapBuffersWithRegion(display, drawable, x11_surface->damage, &swap_count);
+ }
+
+ frame->state = TPL_FRAME_STATE_POSTED;
+}
+
+static tpl_bool_t
+__tpl_x11_dri2_display_init(tpl_display_t *display)
+{
+ pthread_mutex_t mutex = __tpl_x11_get_global_mutex();
+ if (display->native_handle == NULL)
+ {
+ display->native_handle = XOpenDisplay(NULL);
+ TPL_ASSERT(display->native_handle != NULL);
+ }
+ display->xcb_connection = XGetXCBConnection( (Display*)display->native_handle );
+ if( NULL == display->xcb_connection )
+ {
+ CDBG_PRINT_WARN( CDBG_EGL, "XGetXCBConnection failed");
+ }
+
+ pthread_mutex_lock(&mutex);
+
+ if (global.display_count == 0)
+ {
+ Bool xres = False;
+ char *drv = NULL;
+ char *dev = NULL;
+ int major = -1;
+ int minor = -1;
+ int event_base = -1;
+ int error_base = -1;
+ Window root = 0;
+ drm_magic_t magic;
+
+ /* Open a dummy display connection. */
+ global.worker_display = XOpenDisplay(NULL);
+ TPL_ASSERT(global.worker_display != NULL);
+
+ /* Get default root window. */
+ root = DefaultRootWindow(global.worker_display);
+
+ /* Initialize DRI2. */
+ xres = DRI2QueryExtension(global.worker_display, &event_base, &error_base);
+ TPL_ASSERT(xres == True);
+
+ xres = DRI2QueryVersion(global.worker_display, &major, &minor);
+ TPL_ASSERT(xres == True);
+
+ xres = DRI2Connect(global.worker_display, root, &drv, &dev);
+ TPL_ASSERT(xres == True);
+
+ /* Initialize buffer manager. */
+ global.bufmgr_fd = open(dev, O_RDWR);
+ drmGetMagic(global.bufmgr_fd, &magic);
+ global.bufmgr = tbm_bufmgr_init(global.bufmgr_fd);
+
+ /* DRI2 authentication. */
+ xres = DRI2Authenticate(global.worker_display, root, magic);
+ TPL_ASSERT(xres == True);
+
+ /* Initialize swap type configuration. */
+ __tpl_x11_swap_str_to_swap_type(getenv(EGL_X11_WINDOW_SWAP_TYPE_ENV_NAME),
+ &global.win_swap_type);
+
+ __tpl_x11_swap_str_to_swap_type(getenv(EGL_X11_FB_SWAP_TYPE_ENV_NAME),
+ &global.fb_swap_type);
+ }
+
+ global.display_count++;
+ display->bufmgr_fd = global.bufmgr_fd;
+
+ pthread_mutex_unlock(&mutex);
+ return TPL_TRUE;
+}
+
+static void
+__tpl_x11_dri2_display_fini(tpl_display_t *display)
+{
+
+ pthread_mutex_t mutex = __tpl_x11_get_global_mutex();
+ TPL_IGNORE(display);
+ pthread_mutex_lock(&mutex);
+
+ if (--global.display_count == 0)
+ {
+ tbm_bufmgr_deinit(global.bufmgr);
+ close(global.bufmgr_fd);
+ XCloseDisplay(global.worker_display);
+
+ global.worker_display = NULL;
+ global.bufmgr_fd = -1;
+ global.bufmgr = NULL;
+ }
+
+ pthread_mutex_unlock(&mutex);
+
+}
+
+static tpl_bool_t
+__tpl_x11_dri2_surface_init(tpl_surface_t *surface)
+{
+ Display *display;
+ Drawable drawable;
+ tpl_x11_dri2_surface_t *x11_surface;
+ char *env_str_comp;
+ tpl_format_t format = TPL_FORMAT_INVALID;
+
+ if (surface->type == TPL_SURFACE_TYPE_WINDOW)
+ {
+ if (!__tpl_x11_display_get_window_info(surface->display, surface->native_handle,
+ &surface->width, &surface->height, NULL,0,0))
+ return TPL_FALSE;
+ }
+ else
+ {
+ if (!__tpl_x11_display_get_pixmap_info(surface->display, surface->native_handle,
+ &surface->width, &surface->height, &format))
+ return TPL_FALSE;
+ }
+
+ x11_surface = (tpl_x11_dri2_surface_t *)calloc(1, sizeof(tpl_x11_dri2_surface_t));
+
+ if (x11_surface == NULL)
+ {
+ TPL_ASSERT(TPL_FALSE);
+ return TPL_FALSE;
+ }
+
+ x11_surface->latest_post_interval = -1;
+ tpl_list_init(&x11_surface->buffer_cache);
+
+ display = (Display *)surface->display->native_handle;
+ drawable = (Drawable)surface->native_handle;
+ DRI2CreateDrawable(display, drawable);
+
+ surface->backend.data = (void *)x11_surface;
+
+ return TPL_TRUE;
+}
+
+static void
+__tpl_x11_dri2_surface_fini(tpl_surface_t *surface)
+{
+ Display *display;
+ Drawable drawable;
+ tpl_x11_dri2_surface_t *x11_surface;
+
+ display = (Display *)surface->display->native_handle;
+ drawable = (Drawable)surface->native_handle;
+ x11_surface = (tpl_x11_dri2_surface_t *)surface->backend.data;
+
+ if (x11_surface)
+ {
+ __tpl_x11_surface_buffer_cache_clear(&x11_surface->buffer_cache);
+
+ if (x11_surface->damage)
+ XFixesDestroyRegion(display, x11_surface->damage);
+
+ free(x11_surface);
+ }
+
+ DRI2DestroyDrawable(display, drawable);
+ surface->backend.data = NULL;
+}
+
+
+static void
+__tpl_x11_dri2_surface_post(tpl_surface_t *surface, tpl_frame_t *frame)
+{
+ __tpl_x11_dri2_surface_post_internal(surface, frame, TPL_TRUE);
+}
+
+static void
+__tpl_x11_surface_begin_frame(tpl_surface_t *surface)
+{
+ tpl_frame_t *prev_frame;
+
+ if (surface->type != TPL_SURFACE_TYPE_WINDOW)
+ return;
+
+ prev_frame = __tpl_surface_get_latest_frame(surface);
+
+ if (prev_frame && prev_frame->state != TPL_FRAME_STATE_POSTED)
+ {
+ if ((DRI2_BUFFER_IS_FB(prev_frame->buffer->backend.flags) &&
+ global.fb_swap_type == TPL_X11_SWAP_TYPE_SYNC) ||
+ (!DRI2_BUFFER_IS_FB(prev_frame->buffer->backend.flags) &&
+ global.win_swap_type == TPL_X11_SWAP_TYPE_SYNC))
+ {
+ __tpl_surface_wait_all_frames(surface);
+ }
+ }
+}
+
+static tpl_bool_t
+__tpl_x11_surface_validate_frame(tpl_surface_t *surface)
+{
+ tpl_x11_dri2_surface_t *x11_surface = (tpl_x11_dri2_surface_t *)surface->backend.data;
+
+ if (surface->type != TPL_SURFACE_TYPE_WINDOW)
+ return TPL_TRUE;
+
+ if (surface->frame == NULL)
+ return TPL_TRUE;
+
+ if ((DRI2_BUFFER_IS_FB(surface->frame->buffer->backend.flags) &&
+ global.fb_swap_type == TPL_X11_SWAP_TYPE_LAZY) ||
+ (!DRI2_BUFFER_IS_FB(surface->frame->buffer->backend.flags) &&
+ global.win_swap_type == TPL_X11_SWAP_TYPE_LAZY))
+ {
+ if (x11_surface->latest_render_target == surface->frame->buffer)
+ {
+ __tpl_surface_wait_all_frames(surface);
+ return TPL_FALSE;
+ }
+ }
+
+ return TPL_TRUE;
+}
+
+static void
+__tpl_x11_surface_end_frame(tpl_surface_t *surface)
+{
+ tpl_frame_t *frame = __tpl_surface_get_latest_frame(surface);
+ tpl_x11_dri2_surface_t *x11_surface = (tpl_x11_dri2_surface_t *)surface->backend.data;
+
+ if (frame)
+ {
+ x11_surface->latest_render_target = frame->buffer;
+
+ if ((DRI2_BUFFER_IS_FB(frame->buffer->backend.flags) &&
+ global.fb_swap_type == TPL_X11_SWAP_TYPE_ASYNC) ||
+ (!DRI2_BUFFER_IS_FB(frame->buffer->backend.flags) &&
+ global.win_swap_type == TPL_X11_SWAP_TYPE_ASYNC))
+ {
+ __tpl_x11_dri2_surface_post_internal(surface, frame, TPL_FALSE);
+ }
+ }
+}
+
+static tpl_buffer_t *
+__tpl_x11_dri2_surface_get_buffer(tpl_surface_t *surface, tpl_bool_t *reset_buffers)
+{
+ tpl_buffer_t *buffer = NULL;
+ Display *display;
+ Drawable drawable;
+ DRI2Buffer *dri2_buffers;
+ uint32_t attachments[1] = { DRI2BufferBackLeft };
+ tbm_bo bo;
+ tbm_bo_handle bo_handle;
+ int width, height, num_buffers;
+ tpl_x11_dri2_surface_t *x11_surface = (tpl_x11_dri2_surface_t *)surface->backend.data;
+
+ if (surface->type == TPL_SURFACE_TYPE_PIXMAP)
+ attachments[0] = DRI2BufferFrontLeft;
+
+ display = (Display *)surface->display->native_handle;
+ drawable = (Drawable)surface->native_handle;
+
+ /* Get the current buffer via DRI2. */
+ dri2_buffers = DRI2GetBuffers(display, drawable,
+ &width, &height, attachments, 1, &num_buffers);
+ if (dri2_buffers == NULL)
+ goto err_buffer;
+
+ if (DRI2_BUFFER_IS_REUSED(dri2_buffers[0].flags))
+ {
+ /* Buffer is reused. So it should be in the buffer cache.
+ * However, sometimes we get a strange result of having reused flag for a newly
+ * received buffer. I don't know the meaning of such cases but just handle it. */
+ buffer = __tpl_x11_surface_buffer_cache_find(&x11_surface->buffer_cache, dri2_buffers[0].name);
+
+ if (buffer)
+ {
+ /* Need to update buffer flag */
+ buffer->backend.flags = dri2_buffers[0].flags;
+ /* just update the buffer age. */
+#if (TIZEN_FEATURES_ENABLE)
+ buffer->age = DRI2_BUFFER_GET_AGE(dri2_buffers[0].flags);
+#endif
+ goto done;
+ }
+ }
+ else
+ {
+ /* Buffer configuration of the server is changed. We have to reset all previsouly
+ * received buffers. */
+ __tpl_x11_surface_buffer_cache_clear(&x11_surface->buffer_cache);
+ }
+
+ /* Create a TBM buffer object for the buffer name. */
+ bo = tbm_bo_import(global.bufmgr, dri2_buffers[0].name);
+
+ if (bo == NULL)
+ {
+ TPL_ASSERT(TPL_FALSE);
+ goto done;
+ }
+
+ bo_handle = tbm_bo_get_handle(bo, TBM_DEVICE_3D);
+
+ /* Create tpl buffer. */
+ buffer = __tpl_buffer_alloc(surface, dri2_buffers[0].name, (int)bo_handle.u32,
+ width, height, dri2_buffers[0].cpp * 8, dri2_buffers[0].pitch);
+
+#if (TIZEN_FEATURES_ENABLE)
+ buffer->age = DRI2_BUFFER_GET_AGE(dri2_buffers[0].flags);
+#endif
+ buffer->backend.data = (void *)bo;
+ buffer->backend.flags = dri2_buffers[0].flags;
+
+ /* Add the buffer to the buffer cache. The cache will hold a reference to the buffer. */
+ __tpl_x11_surface_buffer_cache_add(&x11_surface->buffer_cache, buffer);
+ tpl_object_unreference(&buffer->base);
+
+done:
+ if (reset_buffers)
+ {
+ /* Users use this output value to check if they have to reset previous buffers. */
+ *reset_buffers = !DRI2_BUFFER_IS_REUSED(dri2_buffers[0].flags) ||
+ width != surface->width || height != surface->height;
+ }
+
+ XFree(dri2_buffers);
+err_buffer:
+ return buffer;
+}
+
+tpl_bool_t
+__tpl_display_choose_backend_x11_dri2(tpl_handle_t native_dpy)
+{
+ /* X11 display accepts any type of handle. So other backends must be choosen before this. */
+ return TPL_TRUE;
+}
+
+void
+__tpl_display_init_backend_x11_dri2(tpl_display_backend_t *backend)
+{
+ backend->type = TPL_BACKEND_X11_DRI2;
+ backend->data = NULL;
+
+ backend->init = __tpl_x11_dri2_display_init;
+ backend->fini = __tpl_x11_dri2_display_fini;
+ backend->query_config = __tpl_x11_display_query_config;
+ backend->get_window_info = __tpl_x11_display_get_window_info;
+ backend->get_pixmap_info = __tpl_x11_display_get_pixmap_info;
+ backend->flush = __tpl_x11_display_flush;
+ backend->wait_native = __tpl_x11_display_wait_native;
+}
+
+void
+__tpl_surface_init_backend_x11_dri2(tpl_surface_backend_t *backend)
+{
+ backend->type = TPL_BACKEND_X11_DRI2;
+ backend->data = NULL;
+
+ backend->init = __tpl_x11_dri2_surface_init;
+ backend->fini = __tpl_x11_dri2_surface_fini;
+ backend->begin_frame = __tpl_x11_surface_begin_frame;
+ backend->end_frame = __tpl_x11_surface_end_frame;
+ backend->validate_frame = __tpl_x11_surface_validate_frame;
+ backend->get_buffer = __tpl_x11_dri2_surface_get_buffer;
+ backend->post = __tpl_x11_dri2_surface_post;
+}
+
+void
+__tpl_buffer_init_backend_x11_dri2(tpl_buffer_backend_t *backend)
+{
+ backend->type = TPL_BACKEND_X11_DRI2;
+ backend->data = NULL;
+
+ backend->init = __tpl_x11_buffer_init;
+ backend->fini = __tpl_x11_buffer_fini;
+ backend->map = __tpl_x11_buffer_map;
+ backend->unmap = __tpl_x11_buffer_unmap;
+ backend->lock = __tpl_x11_buffer_lock;
+ backend->unlock = __tpl_x11_buffer_unlock;
+ backend->get_reused_flag= __tpl_x11_buffer_get_reused_flag;
+}
--- /dev/null
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <dlfcn.h>
+
+#include <EGL/egl.h>
+/*#include <EGL/mali_egl.h>*/
+
+#include <X11/Xlib-xcb.h>
+#include <X11/Xlib.h>
+#include <X11/Xutil.h>
+#include <X11/Xproto.h>
+#include <X11/extensions/Xfixes.h>
+
+
+#include <libdrm/drm.h>
+#include <xf86drm.h>
+
+#include <X11/xshmfence.h>
+#include <xcb/xcb.h>
+#include <xcb/dri3.h>
+#include <xcb/xcbext.h>
+#include <xcb/present.h>
+#include <xcb/sync.h>
+
+#include <tbm_bufmgr.h>
+
+
+#include "tpl_internal.h"
+
+#include "tpl_x11_internal.h"
+
+static int dri3_max_back = 0;/*max number of back buffer*/
+#define DRI3_NUM_BUFFERS 20
+#define DRI3_BUFFER_REUSED 0x08
+
+#define MALI_DEBUG_PRINT(string,args) do {} while(0)
+
+typedef struct _tpl_x11_dri3_surface tpl_x11_dri3_surface_t;
+
+struct _tpl_x11_dri3_surface
+{
+ int latest_post_interval;
+ XserverRegion damage;
+ tpl_list_t buffer_cache;
+ tpl_buffer_t *latest_render_target;
+
+ void *drawable;
+};
+
+enum dri3_buffer_type
+{
+ dri3_buffer_back = 0,
+ dri3_buffer_front = 1
+};
+
+typedef struct _dri3_buffer
+{
+ tbm_bo tbo;
+ uint32_t pixmap;
+ uint32_t sync_fence; /* XID of X SyncFence object */
+ struct xshmfence *shm_fence; /* pointer to xshmfence object */
+ uint32_t busy; /* Set on swap, cleared on IdleNotify */
+ void *driverPrivate;
+
+ /*param of buffer */
+ uint32_t size;
+ uint32_t pitch;
+ uint32_t cpp;
+ uint32_t flags;
+ int32_t width, height, depth;
+ uint64_t last_swap;
+ int32_t own_pixmap; /* We allocated the pixmap ID,
+ free on destroy */
+ uint32_t dma_buf_fd; /* fd of dma buffer */
+ /* [BEGIN: 20141125-xuelian.bai] Add old dma fd to save old fd
+ * before use new fd */
+ uint32_t old_dma_fd;
+ /* [END: 20141125-xuelian.bai] */
+ enum dri3_buffer_type buffer_type; /* back=0,front=1 */
+} dri3_buffer;
+
+typedef struct _dri3_drawable
+{
+ Display *dpy;
+ XID xDrawable;
+
+ tbm_bufmgr bufmgr; /* tbm bufmgr */
+
+ int32_t width, height, depth;
+ int32_t swap_interval;
+ uint8_t have_back;
+ uint8_t have_fake_front;
+ tpl_bool_t is_pixmap; /*whether the drawable is pixmap*/
+ uint8_t flipping; /*whether the drawable can use pageFlip*/
+
+ uint32_t present_capabilities; /* Present extension capabilities*/
+ uint64_t send_sbc; /* swap buffer counter */
+ uint64_t recv_sbc;
+ uint64_t ust, msc; /* Last received UST/MSC values */
+ uint32_t send_msc_serial; /* Serial numbers for tracking
+ wait_for_msc events */
+ uint32_t recv_msc_serial;
+
+ dri3_buffer *buffers[DRI3_NUM_BUFFERS]; /*buffer array of all buffers*/
+ int cur_back;
+
+ uint32_t stamp;
+ xcb_present_event_t eid;
+ xcb_special_event_t *special_event;
+} dri3_drawable;
+
+typedef struct _dri3_drawable_node
+{
+ XID xDrawable;
+ dri3_drawable *drawable;
+} dri3_drawable_node;
+static tpl_x11_global_t global =
+{
+ 0,
+ NULL,
+ -1,
+ NULL,
+ TPL_X11_SWAP_TYPE_ASYNC,
+ TPL_X11_SWAP_TYPE_SYNC
+};
+
+static tpl_list_t dri3_drawable_list;
+static void
+dri3_free_render_buffer(dri3_drawable *pdraw, dri3_buffer *buffer);
+
+
+static inline void
+dri3_fence_reset(dri3_buffer *buffer)
+{
+ xshmfence_reset(buffer->shm_fence);
+}
+
+static inline void
+dri3_fence_set(dri3_buffer *buffer)
+{
+ xshmfence_trigger(buffer->shm_fence);
+}
+
+static inline void
+dri3_fence_trigger(xcb_connection_t *c, dri3_buffer *buffer)
+{
+ xcb_sync_trigger_fence(c, buffer->sync_fence);
+}
+
+static inline void
+dri3_fence_await(xcb_connection_t *c, dri3_buffer *buffer)
+{
+ xcb_flush(c);
+ xshmfence_await(buffer->shm_fence);
+}
+
+static inline tpl_bool_t
+dri3_fence_triggered(dri3_buffer *buffer)
+{
+ return xshmfence_query(buffer->shm_fence);
+}
+
+
+/******************************************
+ * dri3_handle_present_event
+ * Process Present event from xserver
+ *****************************************/
+void
+dri3_handle_present_event(dri3_drawable *priv, xcb_present_generic_event_t *ge)
+{
+ switch (ge->evtype)
+ {
+ case XCB_PRESENT_CONFIGURE_NOTIFY:
+ {
+ xcb_present_configure_notify_event_t *ce = (void *) ge;
+ MALI_DEBUG_PRINT(0,
+ ("%s: XCB_PRESENT_CONFIGURE_NOTIFY\n", __func__));
+ priv->width = ce->width;
+ priv->height = ce->height;
+ break;
+ }
+ case XCB_PRESENT_COMPLETE_NOTIFY:
+ {
+ xcb_present_complete_notify_event_t *ce = (void *) ge;
+ MALI_DEBUG_PRINT(0,
+ ("%s: XCB_PRESENT_COMPLETE_NOTIFY\n", __func__));
+ /* Compute the processed SBC number from the received
+ * 32-bit serial number merged with the upper 32-bits
+ * of the sent 64-bit serial number while checking for
+ * wrap
+ */
+ if (ce->kind == XCB_PRESENT_COMPLETE_KIND_PIXMAP)
+ {
+ priv->recv_sbc =
+ (priv->send_sbc & 0xffffffff00000000LL) |
+ ce->serial;
+ if (priv->recv_sbc > priv->send_sbc)
+ priv->recv_sbc -= 0x100000000;
+ switch (ce->mode)
+ {
+ case XCB_PRESENT_COMPLETE_MODE_FLIP:
+ priv->flipping = 1;
+ break;
+ case XCB_PRESENT_COMPLETE_MODE_COPY:
+ priv->flipping = 0;
+ break;
+ }
+ } else
+ {
+ priv->recv_msc_serial = ce->serial;
+ }
+ priv->ust = ce->ust;
+ priv->msc = ce->msc;
+ break;
+ }
+ case XCB_PRESENT_EVENT_IDLE_NOTIFY:
+ {
+ xcb_present_idle_notify_event_t *ie = (void *) ge;
+ uint32_t b;
+
+
+ for (b = 0; b < sizeof (priv->buffers) / sizeof (priv->buffers[0]); b++)
+ {
+ dri3_buffer *buf = priv->buffers[b];
+
+ if (buf && buf->pixmap == ie->pixmap)
+ {
+ MALI_DEBUG_PRINT(0,
+ ("%s: id=%d XCB_PRESENT_EVENT_IDLE_NOTIFY\n", __func__, b));
+ buf->busy = 0;
+ tbm_bo_unref(priv->buffers[b]->tbo);
+ break;
+ }
+ }
+ break;
+ }
+ }
+ free(ge);
+ }
+
+ /******************************************************
+ * dri3_flush_present_events
+ *
+ * Process any present events that have been received from the X server
+ * called when get buffer or swap buffer
+ ******************************************************/
+static void
+dri3_flush_present_events(dri3_drawable *priv)
+{
+ xcb_connection_t *c = XGetXCBConnection(priv->dpy);
+
+ /* Check to see if any configuration changes have occurred
+ * since we were last invoked
+ */
+ if (priv->special_event)
+ {
+ xcb_generic_event_t *ev;
+
+ while ((ev = xcb_poll_for_special_event(c, priv->special_event)) != NULL)
+ {
+ xcb_present_generic_event_t *ge = (void *) ev;
+ dri3_handle_present_event(priv, ge);
+ }
+ }
+}
+
+
+/** dri3_update_drawable
+ *
+ * Called the first time we use the drawable and then
+ * after we receive present configure notify events to
+ * track the geometry of the drawable
+ */
+static int
+dri3_update_drawable(void *loaderPrivate)
+{
+ dri3_drawable *priv = loaderPrivate;
+ xcb_connection_t *c = XGetXCBConnection(priv->dpy);
+ xcb_extension_t xcb_present_id = { "Present", 0 };
+
+ /* First time through, go get the current drawable geometry
+ */ /*TODO*/
+ if (priv->special_event == NULL)
+ {
+ xcb_get_geometry_cookie_t geom_cookie;
+ xcb_get_geometry_reply_t *geom_reply;
+ xcb_void_cookie_t cookie;
+ xcb_generic_error_t *error;
+ xcb_present_query_capabilities_cookie_t present_capabilities_cookie;
+ xcb_present_query_capabilities_reply_t *present_capabilities_reply;
+
+
+ /* Try to select for input on the window.
+ *
+ * If the drawable is a window, this will get our events
+ * delivered.
+ *
+ * Otherwise, we'll get a BadWindow error back from this
+ * request which will let us know that the drawable is a
+ * pixmap instead.
+ */
+ cookie = xcb_present_select_input_checked(c,
+ (priv->eid = xcb_generate_id(c)),
+ priv->xDrawable,
+ XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY|
+ XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY|
+ XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
+
+ present_capabilities_cookie = xcb_present_query_capabilities(c, priv->xDrawable);
+
+ /* Create an XCB event queue to hold present events outside of the usual
+ * application event queue
+ */
+ priv->special_event = xcb_register_for_special_xge(c,
+ &xcb_present_id,
+ priv->eid,
+ &priv->stamp);
+
+ geom_cookie = xcb_get_geometry(c, priv->xDrawable);
+
+ geom_reply = xcb_get_geometry_reply(c, geom_cookie, NULL);
+ TPL_ASSERT(geom_reply != NULL);
+
+ priv->width = geom_reply->width;
+ priv->height = geom_reply->height;
+ priv->depth = geom_reply->depth;
+ priv->is_pixmap = EGL_FALSE;
+
+ free(geom_reply);
+
+ /* Check to see if our select input call failed. If it failed
+ * with a BadWindow error, then assume the drawable is a pixmap.
+ * Destroy the special event queue created above and mark the
+ * drawable as a pixmap
+ */
+
+ error = xcb_request_check(c, cookie);
+
+ present_capabilities_reply = xcb_present_query_capabilities_reply(c,
+ present_capabilities_cookie,
+ NULL);
+
+ if (present_capabilities_reply)
+ {
+ priv->present_capabilities = present_capabilities_reply->capabilities;
+ free(present_capabilities_reply);
+ } else
+ priv->present_capabilities = 0;
+
+ if (error)
+ {
+ MALI_DEBUG_PRINT(0, ("%s:select input error=%d\n",
+ __func__, error->error_code));
+ if (error->error_code != BadWindow)
+ {
+ free(error);
+ return EGL_FALSE;
+ }
+ priv->is_pixmap = EGL_TRUE;
+ xcb_unregister_for_special_event(c, priv->special_event);
+ priv->special_event = NULL;
+ }
+ }
+ dri3_flush_present_events(priv);
+ return EGL_TRUE;
+}
+
+/** dri3_get_pixmap_buffer
+ *
+ * Get the DRM object for a pixmap from the X server
+ */
+static dri3_buffer *
+dri3_get_pixmap_buffer(void *loaderPrivate, Pixmap pixmap)/*TODO:format*/
+{
+ dri3_drawable *pdraw = loaderPrivate;
+ dri3_buffer *buffer = pdraw->buffers[dri3_max_back];
+ xcb_dri3_buffer_from_pixmap_cookie_t bp_cookie;
+ xcb_dri3_buffer_from_pixmap_reply_t *bp_reply;
+ int *fds;
+ Display *dpy;
+ xcb_connection_t *c;
+ xcb_sync_fence_t sync_fence;
+ struct xshmfence *shm_fence;
+ int fence_fd;
+ tbm_bo tbo = NULL;
+
+ /* Reuse this pixmap buffer if it already exist */
+ if (buffer)
+ {
+ buffer->flags = DRI3_BUFFER_REUSED;
+ tbm_bo_ref(buffer->tbo);
+ return buffer;
+ }
+ dpy = pdraw->dpy;
+ c = XGetXCBConnection(dpy);
+
+ buffer = calloc(1, sizeof (dri3_buffer));
+ if (!buffer)
+ goto no_buffer;
+
+ fence_fd = xshmfence_alloc_shm();
+ if (fence_fd < 0)
+ goto no_fence;
+ shm_fence = xshmfence_map_shm(fence_fd);
+ if (shm_fence == NULL)
+ {
+ close (fence_fd);
+ goto no_fence;
+ }
+
+ xcb_dri3_fence_from_fd(c,
+ pixmap,
+ (sync_fence = xcb_generate_id(c)),
+ EGL_FALSE,
+ fence_fd);
+
+ /* Get an FD for the pixmap object
+ */
+ bp_cookie = xcb_dri3_buffer_from_pixmap(c, pixmap);
+ bp_reply = xcb_dri3_buffer_from_pixmap_reply(c, bp_cookie, NULL);
+ if (!bp_reply)
+ goto no_image;
+ fds = xcb_dri3_buffer_from_pixmap_reply_fds(c, bp_reply);
+
+ tbo = tbm_bo_import_fd(pdraw->bufmgr,(tbm_fd)(*fds));
+ MALI_DEBUG_PRINT(0, ("imported tbo==%x, FUNC:%s\n",tbo,__func__));
+ if(NULL == tbo)
+ {
+ MALI_DEBUG_PRINT(0, ("error:tbo==NULL, FUNC:%s\n",__func__));
+ }
+ tbm_bo_ref(tbo);/* add a ref when created,and unref in dri3_free_render_buffer */
+ buffer->tbo = tbo;
+ buffer->dma_buf_fd = *fds;
+ buffer->pixmap = pixmap;
+ buffer->own_pixmap = EGL_FALSE;
+ buffer->width = bp_reply->width;
+ buffer->height = bp_reply->height;
+ buffer->pitch = bp_reply->width*(bp_reply->bpp/8);
+ buffer->buffer_type = dri3_buffer_front;
+ buffer->shm_fence = shm_fence;
+ buffer->sync_fence = sync_fence;
+
+ pdraw->buffers[dri3_max_back] = buffer;
+ return buffer;
+
+no_image:
+ MALI_DEBUG_PRINT(0, ("error:no_image,buffer_from_pixmap failed in FUNC:%s",
+ __func__));
+ xcb_sync_destroy_fence(c, sync_fence);
+ xshmfence_unmap_shm(shm_fence);
+no_fence:
+ free(buffer);
+ MALI_DEBUG_PRINT(0, ("error:no_fence,xshmfence_map_shm failed in FUNC:%s",
+ __func__));
+no_buffer:
+ return NULL;
+}
+
+/** dri3_find_back
+ *
+ * Find an idle back buffer. If there isn't one, then
+ * wait for a present idle notify event from the X server
+ */
+static int
+dri3_find_back(xcb_connection_t *c, dri3_drawable *priv)
+{
+ int b;
+ xcb_generic_event_t *ev;
+ xcb_present_generic_event_t *ge;
+
+
+ for (;;)
+ {
+ for (b = 0; b < dri3_max_back; b++)
+ {
+ int id = (b + priv->cur_back + 1) % dri3_max_back;
+ dri3_buffer *buffer = priv->buffers[id];
+
+ MALI_DEBUG_PRINT(0, ("%s id=%d,buffer=%p\n",
+ __func__, id, buffer));
+ if (!buffer || !buffer->busy)
+ {
+ MALI_DEBUG_PRINT(0, ("%s find buffer success:id=%d,buffer=%p\n",
+ __func__, id, buffer));
+ priv->cur_back = id;
+ return id;
+ }
+ }
+ xcb_flush(c);
+ ev = xcb_wait_for_special_event(c, priv->special_event);
+ if (!ev)
+ return -1;
+ ge = (void *) ev;
+ dri3_handle_present_event(priv, ge);
+ }
+}
+
+/** dri3_alloc_render_buffer
+ *
+ * allocate a render buffer and create an X pixmap from that
+ *
+ * Allocate an xshmfence for synchronization
+ */
+static dri3_buffer *
+dri3_alloc_render_buffer(dri3_drawable *priv,
+ int width, int height, int depth, int cpp)
+{
+ Display *dpy = priv->dpy;
+ Drawable draw = priv->xDrawable;
+ dri3_buffer *buffer = NULL;
+ xcb_connection_t *c = XGetXCBConnection(dpy);
+ xcb_pixmap_t pixmap = 0;
+ xcb_sync_fence_t sync_fence;
+ struct xshmfence *shm_fence;
+ int buffer_fd, fence_fd;
+ int size;
+ tbm_bo_handle handle;
+ xcb_void_cookie_t cookie;
+ xcb_generic_error_t *error;
+ char error_txt[256];
+
+ /* Create an xshmfence object and
+ * prepare to send that to the X server
+ */
+ fence_fd = xshmfence_alloc_shm();
+ if (fence_fd < 0)
+ {
+ MALI_DEBUG_PRINT(0, ("%s:error:xshmfence_alloc_shm failed\n",
+ __func__));
+ return NULL;
+ }
+
+ shm_fence = xshmfence_map_shm(fence_fd);
+ if (shm_fence == NULL)
+ {
+ MALI_DEBUG_PRINT(0, ("%s:error:xshmfence_map_shm failed\n",
+ __func__));
+ goto no_shm_fence;
+ }
+
+ /* Allocate the image from the driver
+ */
+ buffer = calloc(1, sizeof (dri3_buffer));
+ if (!buffer)
+ {
+ MALI_DEBUG_PRINT(0, ("%s:error:buffer alloc failed\n",
+ __func__));
+ goto no_buffer;
+ }
+
+ /* size = height * width * depth/8;*/
+ /* size = ((width * 32)>>5) * 4 * height; */
+ /* [BEGIN: 20141125-xing.huang] calculate pitch and size
+ * by input parameter cpp */
+ buffer->pitch = width*(cpp/8);
+ size = buffer->pitch*height;
+ /* [END:20141125-xing.huang] */
+
+ buffer->tbo = tbm_bo_alloc(priv->bufmgr, size, TBM_BO_DEFAULT);
+ if (NULL == buffer->tbo)
+ {
+ MALI_DEBUG_PRINT(0, ("%s:error: buffer->tbo==NULL\n",
+ __func__));
+ goto no_buffer;
+ }
+
+ /* dup tbo, because X will close it */
+ handle = tbm_bo_get_handle(buffer->tbo, TBM_DEVICE_3D);
+ buffer_fd = dup(handle.u32);
+ buffer->dma_buf_fd = handle.u32;
+ buffer->size = size;
+ cookie = xcb_dri3_pixmap_from_buffer_checked(c,
+ (pixmap = xcb_generate_id(c)),
+ draw,
+ buffer->size,
+ width, height, buffer->pitch,
+ depth, cpp,
+ buffer_fd);
+ error = xcb_request_check( c, cookie);
+ if (error)
+ {
+ MALI_DEBUG_PRINT(0, ("%s: xcb_dri3_pixmap_from_buffer failed, err_code=%d\n",
+ __func__, error->error_code));
+ goto no_pixmap;
+ }
+ if (0 == pixmap)
+ {
+ MALI_DEBUG_PRINT(0, ("%s: error:xcb_dri3_pixmap_from_buffer pixmap=0\n",
+ __func__));
+ goto no_pixmap;
+ }
+ cookie = xcb_dri3_fence_from_fd_checked(c,
+ pixmap,
+ (sync_fence = xcb_generate_id(c)),
+ EGL_FALSE,
+ fence_fd);
+ error = xcb_request_check( c, cookie);
+ if (error)
+ {
+ MALI_DEBUG_PRINT(0, ("%s: xcb_dri3_fence_from_fd failed,err_code=%d\n",
+ __func__, error->error_code));
+ goto no_pixmap;
+ }
+ buffer->pixmap = pixmap;
+ buffer->own_pixmap = EGL_TRUE;
+ buffer->sync_fence = sync_fence;
+ buffer->shm_fence = shm_fence;
+ buffer->width = width;
+ buffer->height = height;
+ buffer->depth = depth;
+ buffer->cpp = cpp;
+ buffer->flags = 0;
+
+ /* Mark the buffer as idle
+ */
+ dri3_fence_set(buffer);
+
+ return buffer;
+no_pixmap:
+ tbm_bo_unref(buffer->tbo);
+ free(buffer);
+no_buffer:
+ xshmfence_unmap_shm(shm_fence);
+no_shm_fence:
+ close(fence_fd);
+ return NULL;
+}
+
+/** dri3_free_render_buffer
+ *
+ * Free everything associated with one render buffer including pixmap, fence
+ * stuff
+ */
+static void
+dri3_free_render_buffer(dri3_drawable *pdraw, dri3_buffer *buffer)
+{
+ xcb_connection_t *c = XGetXCBConnection(pdraw->dpy);
+
+ MALI_DEBUG_PRINT(0, ("%s buffer=%p\n",__func__, buffer));
+ if (buffer->own_pixmap)
+ xcb_free_pixmap(c, buffer->pixmap);
+ xcb_sync_destroy_fence(c, buffer->sync_fence);
+ xshmfence_unmap_shm(buffer->shm_fence);
+ if (buffer->busy)
+ tbm_bo_unref(buffer->tbo);
+ /* added a ref when created and unref while free, see dri3_get_pixmap_buffer */
+ if(pdraw->is_pixmap)
+ {
+ tbm_bo_unref(buffer->tbo);
+ }
+ free(buffer);
+ buffer = NULL;
+}
+
+/** dri3_free_buffers
+ *
+ * Free the front bufffer or all of the back buffers. Used
+ * when the application changes which buffers it needs
+ */
+static void
+dri3_free_buffers(enum dri3_buffer_type buffer_type,
+ void *loaderPrivate)
+{
+ dri3_drawable *priv = loaderPrivate;
+ dri3_buffer *buffer;
+ int first_id;
+ int n_id;
+ int buf_id;
+
+ switch (buffer_type)
+ {
+ case dri3_buffer_back:
+ first_id = 0;
+ n_id = dri3_max_back;
+ break;
+ case dri3_buffer_front:
+ first_id = dri3_max_back;
+ n_id = 1;
+ }
+
+ for (buf_id = first_id; buf_id < first_id + n_id; buf_id++)
+ {
+ buffer = priv->buffers[buf_id];
+ if (buffer)
+ {
+ dri3_free_render_buffer(priv, buffer);
+ priv->buffers[buf_id] = NULL;
+ }
+ }
+}
+
+/** dri3_get_window_buffer
+ *
+ * Find a front or back buffer, allocating new ones as necessary
+ */
+static dri3_buffer *
+dri3_get_window_buffer(void *loaderPrivate, int cpp)
+{
+ dri3_drawable *priv = loaderPrivate;
+ xcb_connection_t *c = XGetXCBConnection(priv->dpy);
+ dri3_buffer *backbuffer = NULL;
+ int back_buf_id,reuse = 1;
+ uint32_t old_dma_fd = 0;
+ back_buf_id = dri3_find_back(c, priv);
+
+ backbuffer = priv->buffers[back_buf_id];
+
+
+ /* Allocate a new buffer if there isn't an old one, or if that
+ * old one is the wrong size.
+ */
+ if (!backbuffer || backbuffer->width != priv->width ||
+ backbuffer->height != priv->height)
+ {
+ dri3_buffer *new_buffer;
+
+ /* Allocate the new buffers
+ */
+ new_buffer = dri3_alloc_render_buffer(priv,
+ priv->width, priv->height, priv->depth, cpp);
+
+ if (!new_buffer)
+ {
+ MALI_DEBUG_PRINT(0, ("%s:error: alloc new buffer failed\n",
+ __func__));
+ return NULL;
+ }
+ reuse = 0;
+ /* When resizing, copy the contents of the old buffer,
+ * waiting for that copy to complete using our fences
+ * before proceeding
+ */
+ if (backbuffer)
+ {
+ dri3_fence_reset(new_buffer);
+ dri3_fence_await(c, backbuffer);
+ /* [BEGIN: 20141125-xuelian.bai] Size not match,this buffer
+ * must be removed from buffer cache, so we have to save
+ * dma_buf_fd of old buffer.*/
+ old_dma_fd = backbuffer->dma_buf_fd;
+ /* [END: 20141125-xuelian.bai] */
+ dri3_free_render_buffer(priv, backbuffer);
+ }
+ backbuffer = new_buffer;
+ backbuffer->buffer_type = dri3_buffer_back;
+ backbuffer->old_dma_fd = old_dma_fd;/*save dma_buf_fd of old buffer*/
+ priv->buffers[back_buf_id] = backbuffer;
+ goto no_need_wait;/* Skip dri3_fence_await */
+ }
+
+ dri3_fence_await(c, backbuffer);
+no_need_wait:
+ backbuffer->flags = DRI2_BUFFER_FB;
+ if (!reuse)
+ {
+ MALI_DEBUG_PRINT(0, ("%s:allocate new buffer\n", __func__));
+ }
+ else
+ {
+ backbuffer->flags |= DRI2_BUFFER_REUSED;
+ MALI_DEBUG_PRINT(0, ("%s:reuse old buffer\n", __func__));
+ }
+ backbuffer->busy = 1;
+ tbm_bo_ref(backbuffer->tbo);
+
+ /* Return the requested buffer */
+ return backbuffer;
+}
+
+static dri3_buffer *dri3_get_buffers(XID drawable, void *loaderPrivate,
+ int *width, int *height, unsigned int *attachments,
+ int count, int *out_count, int cpp)
+{
+ dri3_drawable *priv = loaderPrivate;
+ dri3_buffer *buffers = NULL;
+ int i = 0;
+ unsigned int format;
+
+ MALI_DEBUG_PRINT(0, ("%s:begin\n",__func__));
+
+ if (drawable != priv->xDrawable)
+ {
+ MALI_DEBUG_PRINT(0, ("%s error:drawable mismatch\n", __func__));
+ return NULL;
+ }
+
+ if (!dri3_update_drawable(loaderPrivate))
+ {
+ MALI_DEBUG_PRINT(0, ("%s dri3_update_drawable filed\n", __func__));
+ return NULL;
+ }
+
+ /*buffers = calloc((count + 1), sizeof(buffers[0]));
+
+ if (!buffers)
+ return NULL;*//*TODO*/
+
+ format = 2; /*TODO*/
+ if (*attachments == dri3_buffer_front)
+ buffers = dri3_get_pixmap_buffer(loaderPrivate, priv->xDrawable);
+ else
+ buffers = dri3_get_window_buffer(loaderPrivate, cpp);
+
+ *out_count = 1;
+ *width = (int)buffers->width;
+ *height = (int)buffers->height;
+
+ MALI_DEBUG_PRINT(0, ("%s end\n",__func__));
+ return buffers;
+}
+
+/******************************************************
+ * dri3_swap_buffers
+ * swap back buffer with front buffer
+ * Make the current back buffer visible using the present extension
+ * if (region_t==0),swap whole frame, else swap with region
+ ******************************************************/
+static int64_t
+dri3_swap_buffers(Display *dpy, void *priv, int interval, XID region_t)
+{
+
+ int64_t ret = -1;
+ int64_t target_msc = 0;
+ int64_t divisor = 0;
+ int64_t remainder = 0;
+ xcb_connection_t *c = XGetXCBConnection(dpy);
+ dri3_drawable *pDrawable = (dri3_drawable*)priv;
+ dri3_buffer *back = NULL;
+
+ MALI_DEBUG_PRINT(0, ("\n#########%s begin######\n",__func__));
+
+
+ back = pDrawable->buffers[pDrawable->cur_back];
+
+ /* Process any present events that have been received from the X
+ * server
+ */
+ dri3_flush_present_events(pDrawable);
+
+ if ((back == NULL)||(pDrawable == NULL)||(pDrawable->is_pixmap != 0))
+ {
+ MALI_DEBUG_PRINT(0, ("%s error:input error:\n",__func__));
+ MALI_DEBUG_PRINT(0, ("\t back=%p,pDrawable=%p,pDrawable->is_pixmap=%d\n",
+ back,pDrawable,pDrawable->is_pixmap));
+ return ret;
+ }
+
+ dri3_fence_reset(back);
+
+ /* Compute when we want the frame shown by taking the last known
+ * successful MSC and adding in a swap interval for each outstanding
+ * swap request
+ */
+ if (pDrawable->swap_interval != interval)
+ pDrawable->swap_interval = interval;
+
+ ++pDrawable->send_sbc;
+ if (target_msc == 0)
+ target_msc = pDrawable->msc + pDrawable->swap_interval *
+ (pDrawable->send_sbc - pDrawable->recv_sbc);
+
+ /* set busy flag */
+ back->busy = 1;
+ back->last_swap = pDrawable->send_sbc;
+
+ xcb_present_pixmap(c,
+ pDrawable->xDrawable, /* dst */
+ back->pixmap, /* src */
+ (uint32_t) pDrawable->send_sbc,
+ 0, /* valid */
+ region_t, /* update */
+ 0, /* x_off */
+ 0, /* y_off */
+ None, /* target_crtc */
+ None,
+ back->sync_fence,
+ XCB_PRESENT_OPTION_NONE,
+ target_msc,
+ divisor,
+ remainder, 0, NULL);
+
+ ret = (int64_t) pDrawable->send_sbc;
+ if (ret == -1)
+ MALI_DEBUG_PRINT(0, ("%s swap failed\n",__func__));
+ else
+ MALI_DEBUG_PRINT(0, ("######%s finish! send_sbc=%d#######\n\n",
+ __func__, ret));
+
+ xcb_flush(c);
+
+ ++(pDrawable->stamp);
+ return ret;
+}
+
+
+/* Wrapper around xcb_dri3_open*/
+static int
+dri3_open(Display *dpy, Window root, CARD32 provider)
+{
+ xcb_dri3_open_cookie_t cookie;
+ xcb_dri3_open_reply_t *reply;
+ xcb_connection_t *c = XGetXCBConnection(dpy);
+ int fd;
+
+ MALI_DEBUG_PRINT(0, ("\n--------%s begin-------\n",__func__));
+ cookie = xcb_dri3_open(c,
+ root,
+ provider);
+
+ reply = xcb_dri3_open_reply(c, cookie, NULL);
+ if (!reply)
+ {
+ MALI_DEBUG_PRINT(0, ("%s xcb_dri3_open failed\n", __func__));
+ return -1;
+ }
+
+ if (reply->nfd != 1)
+ {
+ MALI_DEBUG_PRINT(0, ("%s xcb_dri3_open reply error\n", __func__));
+ free(reply);
+ return -1;
+ }
+
+ fd = xcb_dri3_open_reply_fds(c, reply)[0];
+ fcntl(fd, F_SETFD, FD_CLOEXEC);
+ if (0 == fd)
+ MALI_DEBUG_PRINT(0, ("%s error: fd=0\n",__func__));
+ else
+ MALI_DEBUG_PRINT(0, ("%s open successfully fd=%d:\n",__func__, fd));
+
+ free(reply);
+ MALI_DEBUG_PRINT(0, ("\n---------%s end---------\n",__func__));
+ return fd;
+}
+
+static void *
+dri3_create_drawable(Display *dpy, XID xDrawable)
+{
+ dri3_drawable *pdraw = NULL;
+ xcb_connection_t *c = XGetXCBConnection(dpy);
+ xcb_get_geometry_cookie_t geom_cookie;
+ xcb_get_geometry_reply_t *geom_reply;
+ int i;
+ tpl_list_node_t *node;
+ dri3_drawable_node *drawable_node;
+
+
+ MALI_DEBUG_PRINT(0, ("\n--------%s begin-------\n",__func__));
+
+ /* Check drawable list to find that if it has been created*/
+ node = tpl_list_get_front_node(&dri3_drawable_list);
+ while (node)
+ {
+ dri3_drawable_node *drawable = (dri3_drawable_node *)tpl_list_node_get_data(node);
+
+ if (drawable->xDrawable == xDrawable)
+ {
+ pdraw = drawable->drawable;
+ return (void *)pdraw;/* Reuse old drawable */
+ }
+ node = tpl_list_node_next(node);
+ }
+ pdraw = calloc(1, sizeof(*pdraw));
+ TPL_ASSERT(pdraw != NULL);
+
+ geom_cookie = xcb_get_geometry(c, xDrawable);
+ geom_reply = xcb_get_geometry_reply(c, geom_cookie, NULL);
+ TPL_ASSERT(geom_reply != NULL);
+
+ pdraw->bufmgr = global.bufmgr;
+ pdraw->width = geom_reply->width;
+ pdraw->height = geom_reply->height;
+ pdraw->depth = geom_reply->depth;
+ pdraw->is_pixmap = TPL_FALSE;
+
+ free(geom_reply);
+ pdraw->dpy = dpy;
+ pdraw->xDrawable = xDrawable;
+
+ for (i = 0; i < dri3_max_back + 1;i++)
+ pdraw->buffers[i] = NULL;
+
+
+ /* Add new allocated drawable to drawable list */
+ drawable_node = calloc(1, sizeof(dri3_drawable_node));
+ drawable_node->drawable = pdraw;
+ drawable_node->xDrawable = xDrawable;
+ tpl_list_push_back(&dri3_drawable_list, (void *)drawable_node);
+
+ MALI_DEBUG_PRINT(0, ("\n---------%s end---------\n",__func__));
+ return (void*)pdraw;
+}
+
+static tpl_bool_t
+dri3_display_init(Display *dpy)
+{
+ int fd = 0;
+
+ /* Initialize DRI3 & DRM */
+ xcb_connection_t *c = XGetXCBConnection(dpy);
+ xcb_dri3_query_version_cookie_t dri3_cookie;
+ xcb_dri3_query_version_reply_t *dri3_reply;
+ xcb_present_query_version_cookie_t present_cookie;
+ xcb_present_query_version_reply_t *present_reply;
+ xcb_generic_error_t *error;
+ const xcb_query_extension_reply_t *extension;
+ xcb_extension_t xcb_dri3_id = { "DRI3", 0 };
+ xcb_extension_t xcb_present_id = { "Present", 0 };
+
+ MALI_DEBUG_PRINT(0, ("\n---------%s begin---------\n",__func__));
+ xcb_prefetch_extension_data(c, &xcb_dri3_id);
+ xcb_prefetch_extension_data(c, &xcb_present_id);
+
+ extension = xcb_get_extension_data(c, &xcb_dri3_id);
+ if (!(extension && extension->present))
+ {
+ MALI_DEBUG_PRINT(0, ("%s get dri3 extension failed\n", __func__));
+ return TPL_FALSE;
+ }
+
+ extension = xcb_get_extension_data(c, &xcb_present_id);
+ if (!(extension && extension->present))
+ {
+ MALI_DEBUG_PRINT(0, ("%s get present extension failed\n", __func__));
+ return TPL_FALSE;
+ }
+
+ dri3_cookie = xcb_dri3_query_version(c,
+ XCB_DRI3_MAJOR_VERSION,
+ XCB_DRI3_MINOR_VERSION);
+ dri3_reply = xcb_dri3_query_version_reply(c, dri3_cookie, &error);
+ if (!dri3_reply)
+ {
+ MALI_DEBUG_PRINT(0, ("%s query dri3 version failed\n", __func__));
+ free(error);
+ return TPL_FALSE;
+ }
+ free(dri3_reply);
+
+ present_cookie = xcb_present_query_version(c,
+ XCB_PRESENT_MAJOR_VERSION,
+ XCB_PRESENT_MINOR_VERSION);
+ present_reply = xcb_present_query_version_reply(c, present_cookie, &error);
+ if (!present_reply)
+ {
+ MALI_DEBUG_PRINT(0, ("%s query present version failed\n", __func__));
+ free(error);
+ return TPL_FALSE;
+ }
+ free(present_reply);
+ MALI_DEBUG_PRINT(0, ("\n---------%s end---------\n",__func__));
+ return TPL_TRUE;
+}
+
+static void
+dri3_destroy_drawable(Display *dpy, XID xDrawable)
+{
+ dri3_drawable *pdraw;
+ xcb_connection_t *c = XGetXCBConnection(dpy);
+ int i;
+ tpl_list_node_t *node;
+ dri3_drawable_node *drawable;
+ MALI_DEBUG_PRINT(0, ("\n---------%s begin---------\n",__func__));
+
+ /* Remove drawable from list */
+ node = tpl_list_get_front_node(&dri3_drawable_list);
+ while (node)
+ {
+ drawable = (dri3_drawable_node *)tpl_list_node_get_data(node);
+
+ if (drawable->xDrawable== xDrawable)
+ {
+ pdraw = drawable->drawable;
+
+ if (!pdraw)
+ return;
+
+ for (i = 0; i < dri3_max_back + 1; i++)
+ {
+ if (pdraw->buffers[i])
+ dri3_free_render_buffer(pdraw, pdraw->buffers[i]);
+ }
+
+ if (pdraw->special_event)
+ xcb_unregister_for_special_event(c, pdraw->special_event);
+ free(pdraw);
+ pdraw = NULL;
+ tpl_list_remove(node, free);
+ return;
+ }
+
+ node = tpl_list_node_next(node);
+ }
+
+ /* If didn't find the drawable, means it is already free*/
+ MALI_DEBUG_PRINT(0, ("\n---------%s end---------\n",__func__));
+ return;
+}
+
+static Display *
+__tpl_x11_dri3_get_worker_display()
+{
+ Display *display;
+ pthread_mutex_t mutex = __tpl_x11_get_global_mutex();
+
+ pthread_mutex_lock(&mutex);
+ TPL_ASSERT(global.display_count > 0);
+
+ /* Use dummy display for worker thread. :-) */
+ display = global.worker_display;
+
+ pthread_mutex_unlock(&mutex);
+
+ return display;
+}
+
+static tpl_bool_t
+__tpl_x11_dri3_display_init(tpl_display_t *display)
+{
+ pthread_mutex_t mutex = __tpl_x11_get_global_mutex();
+ if (display->native_handle == NULL)
+ {
+ display->native_handle = XOpenDisplay(NULL);
+ TPL_ASSERT(display->native_handle != NULL);
+ }
+ display->xcb_connection = XGetXCBConnection( (Display*)display->native_handle );
+ if( NULL == display->xcb_connection )
+ {
+ CDBG_PRINT_WARN( CDBG_EGL, "XGetXCBConnection failed");
+ }
+
+ pthread_mutex_lock(&mutex);
+
+ if (global.display_count == 0)
+ {
+ tpl_bool_t xres = TPL_FALSE;
+ Window root = 0;
+ drm_magic_t magic;
+
+ /* Open a dummy display connection. */
+ global.worker_display = XOpenDisplay(NULL);
+ TPL_ASSERT(global.worker_display != NULL);
+
+ /* Get default root window. */
+ root = DefaultRootWindow(global.worker_display);
+
+ /* Initialize DRI3. */
+ xres = dri3_display_init(global.worker_display);
+ TPL_ASSERT(xres == TPL_TRUE);
+
+
+ /* Initialize buffer manager. */
+ global.bufmgr_fd = dri3_open(global.worker_display, root, 0);
+ drmGetMagic(global.bufmgr_fd, &magic);
+ global.bufmgr = tbm_bufmgr_init(global.bufmgr_fd);
+
+ tpl_list_init(&dri3_drawable_list);
+
+ /* Initialize swap type configuration. */
+ __tpl_x11_swap_str_to_swap_type(getenv(EGL_X11_WINDOW_SWAP_TYPE_ENV_NAME),
+ &global.win_swap_type);
+
+ __tpl_x11_swap_str_to_swap_type(getenv(EGL_X11_FB_SWAP_TYPE_ENV_NAME),
+ &global.fb_swap_type);
+ /* [BEGIN: 20141125-xuelian.bai] Add env for setting number of back buffers*/
+ {
+ const char *backend_env;
+ int count = 0;
+ backend_env = getenv("MALI_EGL_DRI3_BUF_NUM");
+ if (!backend_env || strlen(backend_env) == 0)
+ dri3_max_back = 5; /* Default value is 5*/
+ else
+ {
+ count = atoi(backend_env);
+ if (count == 1)/* one buffer doesn't work,min is 2 */
+ dri3_max_back = 2;
+ else if (count < 20)
+ dri3_max_back = count;
+ else
+ dri3_max_back = 5;
+ }
+ }
+ /* [END: 20141125-xuelian.bai] */
+ }
+
+ global.display_count++;
+ display->bufmgr_fd = global.bufmgr_fd;
+
+ pthread_mutex_unlock(&mutex);
+ return TPL_TRUE;
+}
+
+static void
+__tpl_x11_dri3_display_fini(tpl_display_t *display)
+{
+ pthread_mutex_t mutex = __tpl_x11_get_global_mutex();
+ TPL_IGNORE(display);
+ pthread_mutex_lock(&mutex);
+
+ if (--global.display_count == 0)
+ {
+ tbm_bufmgr_deinit(global.bufmgr);
+ close(global.bufmgr_fd);
+ XCloseDisplay(global.worker_display);
+
+ global.worker_display = NULL;
+ global.bufmgr_fd = -1;
+ global.bufmgr = NULL;
+
+ tpl_list_fini(&dri3_drawable_list, NULL);
+ }
+
+ pthread_mutex_unlock(&mutex);
+
+}
+
+static tpl_bool_t
+__tpl_x11_dri3_surface_init(tpl_surface_t *surface)
+{
+ Display *display;
+ XID drawable;
+ tpl_x11_dri3_surface_t *x11_surface;
+
+ x11_surface = (tpl_x11_dri3_surface_t *)calloc(1,
+ sizeof(tpl_x11_dri3_surface_t));
+
+ if (x11_surface == NULL)
+ {
+ TPL_ASSERT(TPL_FALSE);
+ return TPL_FALSE;
+ }
+
+ x11_surface->latest_post_interval = -1;
+ tpl_list_init(&x11_surface->buffer_cache);
+
+ display = (Display *)surface->display->native_handle;
+ drawable = (XID)surface->native_handle;
+
+ x11_surface->drawable = dri3_create_drawable(display, drawable);
+
+ surface->backend.data = (void *)x11_surface;
+ MALI_DEBUG_PRINT(0, ("%s surface type:%d\n",__func__, surface->type));
+ if (surface->type == TPL_SURFACE_TYPE_WINDOW)
+ {
+ __tpl_x11_display_get_window_info(surface->display,
+ surface->native_handle,
+ &surface->width, &surface->height, NULL,0,0);
+ }
+ else
+ {
+ __tpl_x11_display_get_pixmap_info(surface->display,
+ surface->native_handle,
+ &surface->width, &surface->height, NULL);
+ }
+
+ return TPL_TRUE;
+}
+
+static void
+__tpl_x11_dri3_surface_fini(tpl_surface_t *surface)
+{
+ Display *display;
+ tpl_x11_dri3_surface_t *x11_surface;
+
+ display = (Display *)surface->display->native_handle;
+ x11_surface = (tpl_x11_dri3_surface_t *)surface->backend.data;
+
+ dri3_destroy_drawable(display, (XID)surface->native_handle);
+
+ if (x11_surface)
+ {
+ __tpl_x11_surface_buffer_cache_clear(&x11_surface->buffer_cache);
+
+
+ if (x11_surface->damage)
+ XFixesDestroyRegion(display, x11_surface->damage);
+
+ free(x11_surface);
+ }
+
+ surface->backend.data = NULL;
+}
+
+static void
+__tpl_x11_dri3_surface_post_internal(tpl_surface_t *surface, tpl_frame_t *frame,
+ tpl_bool_t is_worker)
+{
+ Display *display;
+ Drawable drawable;
+ CARD64 swap_count;
+ tpl_x11_dri3_surface_t *x11_surface;
+ XRectangle *xrects;
+ XRectangle xrects_stack[TPL_STACK_XRECTANGLE_SIZE];
+ void *pDrawable;
+
+ x11_surface = (tpl_x11_dri3_surface_t *)surface->backend.data;
+
+ if (is_worker)
+ display = __tpl_x11_dri3_get_worker_display();
+ else
+ display = surface->display->native_handle;
+
+ drawable = (Drawable)surface->native_handle;
+ if (frame->interval != x11_surface->latest_post_interval)
+ {
+ x11_surface->latest_post_interval = frame->interval;/*FIXME:set interval?*/
+ }
+
+ if (tpl_region_is_empty(&frame->damage))
+ {
+ dri3_swap_buffers(display, x11_surface->drawable,0,0);/*TODO*/
+ }
+ else
+ {
+ int i;
+
+ if (frame->damage.num_rects > TPL_STACK_XRECTANGLE_SIZE)
+ {
+ xrects = (XRectangle *)malloc(sizeof(XRectangle) *
+ frame->damage.num_rects);
+ }
+ else
+ {
+ xrects = &xrects_stack[0];
+ }
+
+ for (i = 0; i < frame->damage.num_rects; i++)
+ {
+ const int *rects = &frame->damage.rects[i * 4];
+
+ xrects[i].x = rects[0];
+ xrects[i].y = frame->buffer->height - rects[1] -
+ rects[3];
+ xrects[i].width = rects[2];
+ xrects[i].height = rects[3];
+ }
+
+ if (x11_surface->damage == None)
+ {
+ x11_surface->damage =
+ XFixesCreateRegion(display, xrects,
+ frame->damage.num_rects);
+ }
+ else
+ {
+ XFixesSetRegion(display, x11_surface->damage,
+ xrects, frame->damage.num_rects);
+ }
+
+ dri3_swap_buffers(display, x11_surface->drawable, 0,
+ x11_surface->damage);
+ }
+ frame->state = TPL_FRAME_STATE_POSTED;
+}
+
+
+static void
+__tpl_x11_dri3_surface_post(tpl_surface_t *surface, tpl_frame_t *frame)
+{
+ __tpl_x11_dri3_surface_post_internal(surface, frame, TPL_TRUE);
+}
+
+static void
+__tpl_x11_dri3_surface_begin_frame(tpl_surface_t *surface)
+{
+ tpl_frame_t *prev_frame;
+
+ if (surface->type != TPL_SURFACE_TYPE_WINDOW)
+ return;
+
+ prev_frame = __tpl_surface_get_latest_frame(surface);
+
+ if (prev_frame && prev_frame->state != TPL_FRAME_STATE_POSTED)
+ {
+ if ((DRI2_BUFFER_IS_FB(prev_frame->buffer->backend.flags) &&
+ global.fb_swap_type == TPL_X11_SWAP_TYPE_SYNC) ||
+ (!DRI2_BUFFER_IS_FB(prev_frame->buffer->backend.flags) &&
+ global.win_swap_type == TPL_X11_SWAP_TYPE_SYNC))
+ {
+ __tpl_surface_wait_all_frames(surface);
+ }
+ }
+}
+
+static tpl_bool_t
+__tpl_x11_dri3_surface_validate_frame(tpl_surface_t *surface)
+{
+ tpl_x11_dri3_surface_t *x11_surface = (tpl_x11_dri3_surface_t *)surface->backend.data;
+
+ if (surface->type != TPL_SURFACE_TYPE_WINDOW)
+ return TPL_TRUE;
+
+ if (surface->frame == NULL)
+ return TPL_TRUE;
+
+ if ((DRI2_BUFFER_IS_FB(surface->frame->buffer->backend.flags) &&
+ global.fb_swap_type == TPL_X11_SWAP_TYPE_LAZY) ||
+ (!DRI2_BUFFER_IS_FB(surface->frame->buffer->backend.flags) &&
+ global.win_swap_type == TPL_X11_SWAP_TYPE_LAZY))
+ {
+ if (x11_surface->latest_render_target == surface->frame->buffer)
+ {
+ __tpl_surface_wait_all_frames(surface);
+ return TPL_FALSE;
+ }
+ }
+
+ return TPL_TRUE;
+}
+
+static void
+__tpl_x11_dri3_surface_end_frame(tpl_surface_t *surface)
+{
+ tpl_frame_t *frame = __tpl_surface_get_latest_frame(surface);
+ tpl_x11_dri3_surface_t *x11_surface = (tpl_x11_dri3_surface_t *)surface->backend.data;
+
+ if (frame)
+ {
+ x11_surface->latest_render_target = frame->buffer;
+
+ if ((DRI2_BUFFER_IS_FB(frame->buffer->backend.flags) &&
+ global.fb_swap_type == TPL_X11_SWAP_TYPE_ASYNC) ||
+ (!DRI2_BUFFER_IS_FB(frame->buffer->backend.flags) &&
+ global.win_swap_type == TPL_X11_SWAP_TYPE_ASYNC))
+ {
+ __tpl_x11_dri3_surface_post_internal(surface, frame, TPL_FALSE);
+ }
+ }
+}
+
+static tpl_buffer_t *
+__tpl_x11_dri3_surface_get_buffer(tpl_surface_t *surface, tpl_bool_t *reset_buffers)
+{
+ tpl_buffer_t *buffer = NULL;
+ Display *display;
+ Drawable drawable;
+ dri3_buffer *buffers = NULL;
+ uint32_t attachments[1] = { dri3_buffer_back };
+ tbm_bo bo;
+ tbm_bo_handle bo_handle;
+ int width, height, num_buffers;
+ tpl_x11_dri3_surface_t *x11_surface =
+ (tpl_x11_dri3_surface_t *)surface->backend.data;
+ void *data;
+ int cpp = 0;
+
+ if (surface->type == TPL_SURFACE_TYPE_PIXMAP)
+ {
+ attachments[0] = dri3_buffer_front;
+ }
+
+
+ display = (Display *)surface->display->native_handle;
+ drawable = (Drawable)surface->native_handle;
+
+ /* [BEGIN: 20141125-xing.huang] Get the current buffer via DRI3. */
+ cpp = 32;/*_mali_surface_specifier_bpp(&(surface->sformat));/* cpp get from mali is not right */
+ /* [END: 20141125-xing.huang] */
+ buffers = dri3_get_buffers(drawable, x11_surface->drawable, &width,
+ &height, attachments, 1, &num_buffers, cpp);
+
+ if (DRI2_BUFFER_IS_REUSED(buffers->flags))
+ {
+ buffer = __tpl_x11_surface_buffer_cache_find(
+ &x11_surface->buffer_cache,
+ buffers->dma_buf_fd);
+
+ if (buffer)
+ {
+ /* If the buffer name is reused and there's a cache
+ * entry for that name, just update the buffer age
+ * and return. */
+ buffer->age = DRI2_BUFFER_GET_AGE(buffers->flags);
+ MALI_DEBUG_PRINT(0, ("%s reuse tplbuffer\n",
+ __func__));
+ goto done;
+ }
+
+ }
+ else
+ {
+ /* [BEGIN: 20141125-xuelian.bai] Remove the buffer from the cache. */
+ __tpl_x11_surface_buffer_cache_remove(
+ &x11_surface->buffer_cache,
+ buffers->dma_buf_fd);
+ /* [END: 20141125-xuelian.bai] */
+ /* [BEGIN: 20141125-xuelian.bai] old_dma_fd stands for the find reused
+ * buffer but size not match. It must be removed from the list and
+ * make a unref. */
+ if(buffers->old_dma_fd != 0)
+ {
+ __tpl_x11_surface_buffer_cache_remove(
+ &x11_surface->buffer_cache,
+ buffers->old_dma_fd);
+ }
+ /* [END: 20141125-xuelian.bai] */
+ }
+
+
+ bo = buffers->tbo;
+
+
+ if (bo == NULL)
+ {
+ TPL_ASSERT(TPL_FALSE);
+ goto done;
+ }
+
+
+
+ bo_handle = tbm_bo_get_handle(bo, TBM_DEVICE_3D);
+ MALI_DEBUG_PRINT(0, ("%s dma_buf_fd=%d,handle=%d\n",
+ __func__, buffers->dma_buf_fd, bo_handle.u32));
+
+ /* Create tpl buffer. */
+ buffer = __tpl_buffer_alloc(surface, buffers->dma_buf_fd,
+ (int)bo_handle.u32,
+ width, height, buffers->depth, buffers->pitch);
+
+ buffer->age = DRI2_BUFFER_GET_AGE(buffers->flags);
+ buffer->backend.data = (void *)bo;
+ buffer->backend.flags = buffers->flags;
+
+ __tpl_x11_surface_buffer_cache_add(&x11_surface->buffer_cache, buffer);
+ tpl_object_unreference(&buffer->base);
+
+done:
+ if (reset_buffers)
+ {
+ /* Users use this output value to check if they have to reset previous buffers. */
+ *reset_buffers = !DRI2_BUFFER_IS_REUSED(buffers->flags) ||
+ width != surface->width || height != surface->height;
+ }
+ /*XFree(buffers);*/
+ return buffer;
+}
+
+tpl_bool_t
+__tpl_display_choose_backend_x11_dri3(tpl_handle_t native_dpy)
+{
+ /* X11 display accepts any type of handle. So other backends must be choosen before this. */
+ return TPL_TRUE;
+}
+
+void
+__tpl_display_init_backend_x11_dri3(tpl_display_backend_t *backend)
+{
+ backend->type = TPL_BACKEND_X11_DRI3;
+ backend->data = NULL;
+
+ backend->init = __tpl_x11_dri3_display_init;
+ backend->fini = __tpl_x11_dri3_display_fini;
+ backend->query_config = __tpl_x11_display_query_config;
+ backend->get_window_info = __tpl_x11_display_get_window_info;
+ backend->get_pixmap_info = __tpl_x11_display_get_pixmap_info;
+ backend->flush = __tpl_x11_display_flush;
+ backend->wait_native = __tpl_x11_display_wait_native;
+}
+
+void
+__tpl_surface_init_backend_x11_dri3(tpl_surface_backend_t *backend)
+{
+ backend->type = TPL_BACKEND_X11_DRI3;
+ backend->data = NULL;
+
+ backend->init = __tpl_x11_dri3_surface_init;
+ backend->fini = __tpl_x11_dri3_surface_fini;
+ backend->begin_frame = __tpl_x11_dri3_surface_begin_frame;
+ backend->end_frame = __tpl_x11_dri3_surface_end_frame;
+ backend->validate_frame = __tpl_x11_dri3_surface_validate_frame;
+ backend->get_buffer = __tpl_x11_dri3_surface_get_buffer;
+ backend->post = __tpl_x11_dri3_surface_post;
+}
+
+void
+__tpl_buffer_init_backend_x11_dri3(tpl_buffer_backend_t *backend)
+{
+ backend->type = TPL_BACKEND_X11_DRI3;
+ backend->data = NULL;
+
+ backend->init = __tpl_x11_buffer_init;
+ backend->fini = __tpl_x11_buffer_fini;
+ backend->map = __tpl_x11_buffer_map;
+ backend->unmap = __tpl_x11_buffer_unmap;
+ backend->lock = __tpl_x11_buffer_lock;
+ backend->unlock = __tpl_x11_buffer_unlock;
+ backend->get_reused_flag = __tpl_x11_buffer_get_reused_flag;
+}
+
+
--- /dev/null
+#ifndef TPL_X11_INTERNAL_H
+#define TPL_X11_INTERNAL_H
+
+#include "tpl.h"
+#include <stdlib.h>
+#include <pthread.h>
+
+#include "tpl_utils.h"
+
+#define TIZEN_FEATURES_ENABLE 0
+
+#define DRI2_BUFFER_FB 0x02
+#define DRI2_BUFFER_MAPPED 0x04
+#define DRI2_BUFFER_REUSED 0x08
+#define DRI2_BUFFER_AGE 0x70 /* 01110000 */
+
+#define DRI2_BUFFER_IS_FB(flag) ((flag & DRI2_BUFFER_FB) ? 1 : 0)
+#define DRI2_BUFFER_IS_REUSED(flag) ((flag & DRI2_BUFFER_REUSED) ? 1 : 0)
+#define DRI2_BUFFER_GET_AGE(flag) ((flag & DRI2_BUFFER_AGE) >> 4)
+
+#define TPL_STACK_XRECTANGLE_SIZE 16
+/* [BEGIN: 20141125-xuelian.bai] DRI3 need lots of buffer cache. or it will get
+ * slow */
+#define TPL_BUFFER_CACHE_MAX_ENTRIES 40
+/* [END: 20141125-xuelian.bai] */
+
+#define EGL_X11_WINDOW_SWAP_TYPE_ENV_NAME "EGL_X11_SWAP_TYPE_WINDOW"
+#define EGL_X11_FB_SWAP_TYPE_ENV_NAME "EGL_X11_SWAP_TYPE_FB"
+
+typedef struct _tpl_x11_global tpl_x11_global_t;
+
+typedef enum
+{
+ TPL_X11_SWAP_TYPE_SYNC = 0,
+ TPL_X11_SWAP_TYPE_ASYNC,
+ TPL_X11_SWAP_TYPE_LAZY,
+} tpl_x11_swap_type_t;
+
+struct _tpl_x11_global
+{
+ int display_count;
+
+ Display *worker_display;
+ int bufmgr_fd;
+ tbm_bufmgr bufmgr;
+
+ tpl_x11_swap_type_t win_swap_type;
+ tpl_x11_swap_type_t fb_swap_type;
+};
+
+pthread_mutex_t
+__tpl_x11_get_global_mutex(void);
+
+void
+__tpl_x11_swap_str_to_swap_type(char *str, tpl_x11_swap_type_t *type);
+
+tpl_buffer_t *
+__tpl_x11_surface_buffer_cache_find(tpl_list_t *buffer_cache, unsigned int name);
+void
+__tpl_x11_surface_buffer_cache_remove(tpl_list_t *buffer_cache, unsigned int name);
+void
+__tpl_x11_surface_buffer_cache_add(tpl_list_t *buffer_cache, tpl_buffer_t *buffer);
+void
+__tpl_x11_surface_buffer_cache_clear(tpl_list_t *buffer_cache);
+tpl_bool_t
+__tpl_x11_display_query_config(tpl_display_t *display,
+ tpl_surface_type_t surface_type, int red_size,
+ int green_size, int blue_size, int alpha_size,
+ int color_depth, int *native_visual_id, tpl_bool_t *is_slow);
+tpl_bool_t
+__tpl_x11_display_get_window_info(tpl_display_t *display, tpl_handle_t window,
+ int *width, int *height, tpl_format_t *format,int depth,int a_size);
+tpl_bool_t
+__tpl_x11_display_get_pixmap_info(tpl_display_t *display, tpl_handle_t pixmap,
+ int *width, int *height, tpl_format_t *format);
+void
+__tpl_x11_display_flush(tpl_display_t *display);
+tpl_bool_t
+__tpl_x11_buffer_init(tpl_buffer_t *buffer);
+void
+__tpl_x11_buffer_fini(tpl_buffer_t *buffer);
+void *
+__tpl_x11_buffer_map(tpl_buffer_t *buffer, int size);
+void
+__tpl_x11_buffer_unmap(tpl_buffer_t *buffer, void *ptr, int size);
+tpl_bool_t
+__tpl_x11_buffer_lock(tpl_buffer_t *buffer, tpl_lock_usage_t usage);
+void
+__tpl_x11_buffer_unlock(tpl_buffer_t *buffer);
+tpl_bool_t __tpl_x11_buffer_get_reused_flag(tpl_buffer_t *buffer);
+void
+__tpl_x11_display_wait_native(tpl_display_t *display);
+
+#endif /* TPL_X11_INTERNAL_H */
--- /dev/null
+SRC_DIR = .
+BIN_NAME = gbm_tbm.so
+INST_LIBDIR = $(libdir)
+INST_INCDIR = $(includedir)
+
+CC ?= gcc
+
+CFLAGS += -Wall -fPIC -I$(SRC_DIR)
+LDFLAGS +=
+
+CFLAGS += `pkg-config --cflags libtbm` -I/usr/include/libdrm
+LDFLAGS += `pkg-config --libs libtbm` -lwayland-drm
+
+HEADERS = \
+ $(SRC_DIR)/gbm_tbm.h \
+ $(SRC_DIR)/gbm_tbmint.h
+
+SRCS = \
+ $(SRC_DIR)/gbm_tbm.c
+
+OBJS = $(SRCS:%.c=%.o)
+
+################################################################################
+all: $(BIN_NAME)
+
+$(BIN_NAME): $(OBJS) $(HEADERS)
+ $(CC) -o $@ $(OBJS) -shared $(CFLAGS) $(LDFLAGS)
+
+%.o: %.c
+ $(CC) -c -o $@ $< $(CFLAGS)
+
+clean:
+ find . -name "*.o" -exec rm -vf {} \;
+ find . -name "*~" -exec rm -vf {} \;
+ rm -vf $(BIN_NAME)
+
+install: all
+ mkdir -p $(INST_INCDIR)/gbm
+ mkdir -p $(INST_LIBDIR)/gbm
+ cp -va $(BIN_NAME) $(INST_LIBDIR)/gbm/
+ ln -sf gbm/gbm_tbm.so $(INST_LIBDIR)/libgbm_tbm.so
+ ln -sf gbm/gbm_tbm.so $(INST_LIBDIR)/gbm/libgbm_tbm.so
+
+uninstall:
+ rm -f $(INST_LIBDIR)/gbm/$(BIN_NAME)
+ rm -f $(INST_LIBDIR)/libgbm_tbm.so
+ rm -f $(INST_LIBDIR)/gbm/libgbm_tbm.so
--- /dev/null
+/**************************************************************************
+
+Copyright 2012 Samsung Electronics co., Ltd. All Rights Reserved.
+
+Contact: Sangjin Lee <lsj119@samsung.com>
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sub license, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
+ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+#include <limits.h>
+
+#include <sys/types.h>
+#include <unistd.h>
+#include <dlfcn.h>
+#include <xf86drm.h>
+
+#include "gbm_tbmint.h"
+
+#include <wayland-drm.h>
+
+GBM_EXPORT tbm_bo
+gbm_tbm_bo_get_tbm_bo(struct gbm_tbm_bo *bo)
+{
+ return bo->bo;
+}
+
+GBM_EXPORT uint32_t
+gbm_tbm_surface_get_width(struct gbm_tbm_surface *surf)
+{
+ return surf->base.width;
+}
+
+GBM_EXPORT uint32_t
+gbm_tbm_surface_get_height(struct gbm_tbm_surface *surf)
+{
+ return surf->base.height;
+}
+
+GBM_EXPORT uint32_t
+gbm_tbm_surface_get_format(struct gbm_tbm_surface *surf)
+{
+ return surf->base.format;
+}
+
+GBM_EXPORT uint32_t
+gbm_tbm_surface_get_flags(struct gbm_tbm_surface *surf)
+{
+ return surf->base.flags;
+}
+
+GBM_EXPORT void
+gbm_tbm_surface_set_user_data(struct gbm_tbm_surface *surf, void *data)
+{
+ surf->tbm_private = data;
+}
+
+GBM_EXPORT void *
+gbm_tbm_surface_get_user_data(struct gbm_tbm_surface *surf)
+{
+ return surf->tbm_private;
+}
+
+GBM_EXPORT void
+gbm_tbm_device_set_callback_surface_has_free_buffers(struct gbm_tbm_device *gbm_tbm, int (*callback)(struct gbm_surface *))
+{
+ gbm_tbm->base.base.surface_has_free_buffers = callback;
+}
+
+GBM_EXPORT void
+gbm_tbm_device_set_callback_surface_lock_front_buffer(struct gbm_tbm_device *gbm_tbm, struct gbm_bo *(*callback)(struct gbm_surface *))
+{
+ gbm_tbm->base.base.surface_lock_front_buffer = callback;
+}
+
+GBM_EXPORT void
+gbm_tbm_device_set_callback_surface_release_buffer(struct gbm_tbm_device *gbm_tbm, void (*callback)(struct gbm_surface *, struct gbm_bo *))
+{
+ gbm_tbm->base.base.surface_release_buffer = callback;
+}
+
+static int
+__gbm_tbm_is_format_supported(struct gbm_device *gbm,
+ uint32_t format,
+ uint32_t usage)
+{
+ switch (format)
+ {
+ case GBM_BO_FORMAT_XRGB8888:
+ case GBM_FORMAT_XRGB8888:
+ break;
+ case GBM_BO_FORMAT_ARGB8888:
+ case GBM_FORMAT_ARGB8888:
+ if (usage & GBM_BO_USE_SCANOUT)
+ return 0;
+ break;
+ default:
+ return 0;
+ }
+
+ if (usage & GBM_BO_USE_CURSOR_64X64 &&
+ usage & GBM_BO_USE_RENDERING)
+ return 0;
+
+ return 1;
+}
+
+static int
+__gbm_tbm_bo_write(struct gbm_bo *_bo, const void *buf, size_t count)
+{
+ struct gbm_tbm_bo *bo = gbm_tbm_bo(_bo);
+ void *mapped = NULL;
+
+ mapped = (void *)tbm_bo_map(bo->bo, TBM_DEVICE_CPU, TBM_OPTION_WRITE).ptr;
+ memcpy(mapped, buf, count);
+ tbm_bo_unmap(bo->bo);
+
+ return 0;
+}
+
+static int
+__gbm_tbm_bo_get_fd(struct gbm_bo *_bo)
+{
+ struct gbm_tbm_bo *bo = gbm_tbm_bo(_bo);
+ tbm_bo_handle handle;
+
+ handle = tbm_bo_get_handle(bo->bo, TBM_DEVICE_MM);
+
+ return handle.s32;
+}
+
+static void
+__gbm_tbm_bo_destroy(struct gbm_bo *_bo)
+{
+ struct gbm_tbm_bo *bo = gbm_tbm_bo(_bo);
+
+ if(bo->bo)
+ {
+ tbm_bo_unref(bo->bo);
+ bo->bo = NULL;
+ }
+
+ free(bo);
+}
+
+struct gbm_bo *
+__gbm_tbm_bo_import(struct gbm_device *gbm, uint32_t type,
+ void *buffer, uint32_t usage)
+{
+ struct gbm_tbm_device *dri = gbm_tbm_device(gbm);
+ struct gbm_tbm_bo *bo;
+ struct wl_drm_buffer *drm_buffer;
+ tbm_bo tbo;
+ uint32_t width, height;
+ unsigned int stride, format;
+ tbm_bo_handle handle;
+
+ bo = calloc(1, sizeof *bo);
+ if (bo == NULL)
+ return NULL;
+
+ switch (type)
+ {
+ case GBM_BO_IMPORT_WL_BUFFER:
+ {
+ if (!dri->wl_drm)
+ {
+ free(bo);
+ return NULL;
+ }
+
+ drm_buffer = wayland_drm_buffer_get((struct wl_drm *)dri->wl_drm, (struct wl_resource *)buffer);
+ if (!drm_buffer)
+ {
+ free(bo);
+ return NULL;
+ }
+
+ switch (drm_buffer->format)
+ {
+ case WL_DRM_FORMAT_ARGB8888:
+ format = GBM_FORMAT_ARGB8888;
+ break;
+ case WL_DRM_FORMAT_XRGB8888:
+ format = GBM_FORMAT_XRGB8888;
+ break;
+ default:
+ free(bo);
+ return NULL;
+ }
+
+ width = drm_buffer->width;
+ height = drm_buffer->height;
+ stride = drm_buffer->stride[0];
+
+ tbo = drm_buffer->driver_buffer;
+ break;
+ }
+ default:
+ free(bo);
+ return NULL;
+ }
+
+ bo->base.base.gbm = gbm;
+ bo->base.base.width = width;
+ bo->base.base.height = height;
+ bo->base.base.format = format;
+ bo->base.base.stride = stride;
+
+ bo->format = format;
+ bo->usage = usage;
+
+ bo->bo = tbo;
+ handle = tbm_bo_get_handle(bo->bo, TBM_DEVICE_DEFAULT);
+
+ bo->base.base.handle.u64 = handle.u64;
+ return &bo->base.base;
+}
+
+static struct gbm_bo *
+__gbm_tbm_bo_create(struct gbm_device *gbm,
+ uint32_t width, uint32_t height,
+ uint32_t format, uint32_t usage)
+{
+ struct gbm_tbm_device *dri = gbm_tbm_device(gbm);
+ struct gbm_tbm_bo *bo;
+ unsigned int size, stride;
+ int flags = TBM_BO_DEFAULT;
+ tbm_bo_handle handle;
+
+ bo = calloc(1, sizeof *bo);
+ if (bo == NULL)
+ return NULL;
+
+ bo->base.base.gbm = gbm;
+ bo->base.base.width = width;
+ bo->base.base.height = height;
+ bo->base.base.format = format;
+
+ bo->format = format;
+ bo->usage = usage;
+
+ switch (format)
+ {
+ case GBM_FORMAT_RGB565:
+ stride = width * 2;
+ break;
+ case GBM_FORMAT_XRGB8888:
+ case GBM_BO_FORMAT_XRGB8888:
+ case GBM_FORMAT_ARGB8888:
+ case GBM_BO_FORMAT_ARGB8888:
+ case GBM_FORMAT_ABGR8888:
+ stride = width * 4;
+ break;
+ default:
+ free(bo);
+ return NULL;
+ }
+
+ if ((usage & GBM_BO_USE_SCANOUT) || (usage & GBM_BO_USE_CURSOR_64X64))
+ {
+ flags |= TBM_BO_SCANOUT;
+ }
+
+ size = stride * height;
+ bo->bo = tbm_bo_alloc(dri->bufmgr, size, flags);
+ bo->base.base.stride = stride;
+ handle = tbm_bo_get_handle(bo->bo, TBM_DEVICE_DEFAULT);
+
+ bo->base.base.handle.ptr = handle.ptr;
+
+ return &bo->base.base;
+}
+
+static struct gbm_surface *
+__gbm_tbm_surface_create(struct gbm_device *gbm,
+ uint32_t width, uint32_t height,
+ uint32_t format, uint32_t flags)
+{
+ struct gbm_tbm_surface *surf;
+
+ surf = calloc(1, sizeof *surf);
+ if (surf == NULL)
+ return NULL;
+
+ surf->base.gbm = gbm;
+ surf->base.width = width;
+ surf->base.height = height;
+ surf->base.format = format;
+ surf->base.flags = flags;
+
+ return &surf->base;
+}
+
+static void
+__gbm_tbm_surface_destroy(struct gbm_surface *_surf)
+{
+ struct gbm_tbm_surface *surf = gbm_tbm_surface(_surf);
+
+ free(surf);
+}
+
+static void
+__tbm_destroy(struct gbm_device *gbm)
+{
+ struct gbm_tbm_device *dri = gbm_tbm_device(gbm);
+
+ if (dri->bufmgr)
+ tbm_bufmgr_deinit(dri->bufmgr);
+
+ if (dri->base.driver_name)
+ free(dri->base.driver_name);
+
+ free(dri);
+}
+
+static struct gbm_device *
+__tbm_device_create(int fd)
+{
+ struct gbm_tbm_device *dri;
+
+ dri = calloc(1, sizeof *dri);
+
+ dri->base.driver_name = drmGetDeviceNameFromFd(fd);
+ if (dri->base.driver_name == NULL)
+ goto bail;
+
+ dri->bufmgr = tbm_bufmgr_init(fd);
+ if (dri->bufmgr == NULL)
+ goto bail;
+
+ dri->base.base.fd = fd;
+ dri->base.base.bo_create = __gbm_tbm_bo_create;
+ dri->base.base.bo_import = __gbm_tbm_bo_import;
+ dri->base.base.is_format_supported = __gbm_tbm_is_format_supported;
+ dri->base.base.bo_write = __gbm_tbm_bo_write;
+ dri->base.base.bo_get_fd = __gbm_tbm_bo_get_fd;
+ dri->base.base.bo_destroy = __gbm_tbm_bo_destroy;
+ dri->base.base.destroy = __tbm_destroy;
+ dri->base.base.surface_create = __gbm_tbm_surface_create;
+ dri->base.base.surface_destroy = __gbm_tbm_surface_destroy;
+
+ dri->base.type = GBM_DRM_DRIVER_TYPE_DRI;
+ dri->base.base.name = "drm";
+
+ return &dri->base.base;
+
+bail:
+ if (dri->bufmgr)
+ tbm_bufmgr_deinit(dri->bufmgr);
+
+ if (dri->base.driver_name)
+ free(dri->base.driver_name);
+
+ free(dri);
+ return NULL;
+}
+
+struct gbm_backend gbm_backend = {
+ .backend_name = "tbm",
+ .create_device = __tbm_device_create,
+};
--- /dev/null
+#ifndef _GBM_TBM_H_
+#define _GBM_TBM_H_
+
+#include <gbm.h>
+#include <tbm_bufmgr.h>
+
+struct gbm_tbm_device;
+struct gbm_tbm_bo;
+struct gbm_tbm_surface;
+
+
+void
+gbm_tbm_device_set_callback_surface_has_free_buffers(struct gbm_tbm_device *gbm_tbm, int (*callback)(struct gbm_surface *));
+
+void
+gbm_tbm_device_set_callback_surface_lock_front_buffer(struct gbm_tbm_device *gbm_tbm, struct gbm_bo *(*callback)(struct gbm_surface *));
+
+void
+gbm_tbm_device_set_callback_surface_release_buffer(struct gbm_tbm_device *gbm_tbm, void (*callback)(struct gbm_surface *, struct gbm_bo *));
+
+uint32_t
+gbm_tbm_surface_get_width(struct gbm_tbm_surface *surf);
+
+uint32_t
+gbm_tbm_surface_get_height(struct gbm_tbm_surface *surf);
+
+uint32_t
+gbm_tbm_surface_get_format(struct gbm_tbm_surface *surf);
+
+uint32_t
+gbm_tbm_surface_get_flags(struct gbm_tbm_surface *surf);
+
+void
+gbm_tbm_surface_set_user_data(struct gbm_tbm_surface *surf, void *data);
+
+void *
+gbm_tbm_surface_get_user_data(struct gbm_tbm_surface *surf);
+
+tbm_bo
+gbm_tbm_bo_get_tbm_bo(struct gbm_tbm_bo *bo);
+
+#endif
--- /dev/null
+/**************************************************************************
+
+Copyright 2012 Samsung Electronics co., Ltd. All Rights Reserved.
+
+Contact: Sangjin Lee <lsj119@samsung.com>
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sub license, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
+ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+#ifndef _GBM_TBM_INTERNAL_H_
+#define _GBM_TBM_INTERNAL_H_
+
+#include "gbm_tbm.h"
+#include <gbm/gbmint.h>
+#include <gbm/common_drm.h>
+
+#include <tbm_bufmgr.h>
+
+struct gbm_tbm_device {
+ struct gbm_drm_device base;
+
+ tbm_bufmgr bufmgr;
+ void* wl_drm; /*strcut wl_drm*/
+};
+
+struct gbm_tbm_bo {
+ struct gbm_drm_bo base;
+
+ uint32_t format;
+ uint32_t usage;
+ tbm_bo bo;
+};
+
+struct gbm_tbm_surface {
+ struct gbm_surface base;
+
+ void *tbm_private;
+};
+
+static inline struct gbm_tbm_device *
+gbm_tbm_device(struct gbm_device *gbm)
+{
+ return (struct gbm_tbm_device *) gbm;
+}
+
+static inline struct gbm_tbm_bo *
+gbm_tbm_bo(struct gbm_bo *bo)
+{
+ return (struct gbm_tbm_bo *) bo;
+}
+
+static inline struct gbm_tbm_surface *
+gbm_tbm_surface(struct gbm_surface *surface)
+{
+ return (struct gbm_tbm_surface *) surface;
+}
+
+#endif