struct _tpl_runtime {
tpl_hlist_t *displays[TPL_BACKEND_COUNT];
+ tpl_hlist_t *surfaces[TPL_BACKEND_COUNT];
};
static tpl_runtime_t *runtime = NULL;
}
}
+tpl_surface_t *
+__tpl_runtime_find_surface(tpl_backend_type_t type, tpl_handle_t native_surface)
+{
+ tpl_surface_t *surface = NULL;
+
+ if (runtime == NULL)
+ return NULL;
+
+ pthread_mutex_lock(&runtime_mutex);
+
+ if (type != TPL_BACKEND_UNKNOWN) {
+ if (runtime->surfaces[type] != NULL) {
+ surface = (tpl_surface_t *) __tpl_hashlist_lookup(runtime->surfaces[type],
+ (size_t) native_surface);
+ }
+ } else {
+ int i;
+
+ for (i = 0; i < TPL_BACKEND_COUNT; i++) {
+ if (runtime->surfaces[i] != NULL) {
+ surface = (tpl_surface_t *) __tpl_hashlist_lookup(runtime->surfaces[i],
+ (size_t) native_surface);
+ }
+ if (surface != NULL) break;
+ }
+ }
+
+ pthread_mutex_unlock(&runtime_mutex);
+
+ return surface;
+}
+
+tpl_result_t
+__tpl_runtime_add_surface(tpl_surface_t *surface)
+{
+ tpl_result_t ret;
+ tpl_handle_t handle;
+ tpl_backend_type_t type;
+
+ TPL_ASSERT(surface);
+
+ handle = surface->native_handle;
+ type = surface->display->backend.type;
+
+ TPL_ASSERT(0 <= type && TPL_BACKEND_COUNT > type);
+
+ if (0 != pthread_mutex_lock(&runtime_mutex)) {
+ TPL_ERR("runtime_mutex pthread_mutex_lock failed.");
+ return TPL_ERROR_INVALID_OPERATION;
+ }
+
+ if (TPL_ERROR_NONE != __tpl_runtime_init()) {
+ TPL_ERR("__tpl_runtime_init() failed.");
+ pthread_mutex_unlock(&runtime_mutex);
+ return TPL_ERROR_INVALID_OPERATION;
+ }
+
+ if (NULL == runtime->surfaces[type]) {
+ runtime->surfaces[type] = __tpl_hashlist_create();
+ if (NULL == runtime->surfaces[type]) {
+ TPL_ERR("__tpl_hashlist_create failed.");
+ pthread_mutex_unlock(&runtime_mutex);
+ return TPL_ERROR_INVALID_OPERATION;
+ }
+ }
+
+ ret = __tpl_hashlist_insert(runtime->surfaces[type],
+ (size_t) handle, (void *) surface);
+ if (TPL_ERROR_NONE != ret) {
+ TPL_ERR("__tpl_hashlist_insert failed. list(%p), handle(%p), surface(%p)",
+ runtime->surfaces[type], handle, surface);
+ pthread_mutex_unlock(&runtime_mutex);
+ return TPL_ERROR_INVALID_OPERATION;
+ }
+
+ pthread_mutex_unlock(&runtime_mutex);
+
+ return TPL_ERROR_NONE;
+}
+
+void
+__tpl_runtime_remove_surface(tpl_surface_t *surface)
+{
+ tpl_handle_t handle = surface->native_handle;
+ tpl_backend_type_t type = surface->backend.type;
+
+ pthread_mutex_lock(&runtime_mutex);
+
+ if (type != TPL_BACKEND_UNKNOWN) {
+ if (runtime != NULL && runtime->surfaces[type] != NULL)
+ __tpl_hashlist_delete(runtime->surfaces[type],
+ (size_t) handle);
+ }
+
+ pthread_mutex_unlock(&runtime_mutex);
+}
+
void
__tpl_surface_init_backend(tpl_surface_t *surface, tpl_backend_type_t type)
{
TPL_ASSERT(surface);
surface->backend.fini(surface);
+ __tpl_runtime_remove_surface(surface);
}
static void
tpl_surface_create(tpl_display_t *display, tpl_handle_t handle,
tpl_surface_type_t type, tbm_format format)
{
- tpl_surface_t *surface;
+ tpl_surface_t *surface = NULL;
+ tpl_result_t ret = TPL_ERROR_NONE;
if (!display) {
TPL_ERR("Display is NULL!");
return NULL;
}
+ surface = __tpl_runtime_find_surface(type, handle);
+ if (surface) {
+ TPL_LOG_F("[REUSE] tpl_display_t(%p) tpl_surface_t(%p) native_handle(%p) format(%d)",
+ display, surface, handle, format);
+ return surface;
+ }
+
surface = (tpl_surface_t *) calloc(1, sizeof(tpl_surface_t));
if (!surface) {
TPL_ERR("Failed to allocate memory for surface!");
return NULL;
}
+ /* Add it to the runtime. */
+ ret = __tpl_runtime_add_surface(surface);
+ if (ret != TPL_ERROR_NONE) {
+ TPL_ERR("Failed to add surface to runtime list!");
+ tpl_object_unreference((tpl_object_t *) surface);
+ return NULL;
+ }
+
TPL_LOG_F("tpl_display_t(%p) tpl_surface_t(%p) native_handle(%p) format(%d)",
display, surface, handle, format);
return surface;