tpl_wl_egl_thread: Implemented force_flush to use can_dequeue_wait_timeout.
authorJoonbum Ko <joonbum.ko@samsung.com>
Fri, 28 Dec 2018 07:37:25 +0000 (16:37 +0900)
committerJoonbum Ko <joonbum.ko@samsung.com>
Fri, 28 Dec 2018 09:16:17 +0000 (18:16 +0900)
 - This is a patch to solve the problem of blocking in tbm_surface_queue_can_dequeue()
  if release_event is missing due to wayland socket or server logic problem.
 - If TBM_SURFACE_QUEUE_ERROR_TIMEOUT occurs, new buffers are allocated
  to escape blocking without waiting for buffer release events.

Change-Id: I883c65b3af3cfa66336f4301628614ede0b421a7
Signed-off-by: Joonbum Ko <joonbum.ko@samsung.com>
src/tpl_wayland_egl_thread.c
src/tpl_wayland_egl_thread.h
src/tpl_wl_egl_thread.c

index ce5f854..3e9a913 100644 (file)
@@ -3169,6 +3169,41 @@ twe_surface_wait_dequeueable(twe_surface_h twe_surface, uint64_t timeout_ns)
        return TPL_ERROR_NONE;
 }
 
+tpl_result_t
+twe_surface_queue_force_flush(twe_surface_h twe_surface)
+{
+       twe_wl_surf_source *surf_source = (twe_wl_surf_source *)twe_surface;
+       tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+
+       if ((tsq_err = tbm_surface_queue_flush(surf_source->tbm_queue))
+               != TBM_SURFACE_QUEUE_ERROR_NONE) {
+               TPL_ERR("[TIMEOUT_RESET] Failed to flush tbm_surface_queue(%p) tsq_err(%d)",
+                               surf_source->tbm_queue, tsq_err);
+               return TPL_ERROR_INVALID_OPERATION;
+       }
+
+       if (surf_source->committed_buffers) {
+               while (!__tpl_list_is_empty(surf_source->committed_buffers)) {
+                               tbm_surface_h tbm_surface =
+                               __tpl_list_pop_front(surf_source->committed_buffers,
+                                                                        (tpl_free_func_t)__cb_buffer_remove_from_list);
+                       TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE] BO(%d)",
+                                                       tbm_bo_export(tbm_surface_internal_get_bo(
+                                                       tbm_surface, 0)));
+                       tsq_err = tbm_surface_queue_release(surf_source->tbm_queue, tbm_surface);
+                       if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+                               TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
+                                               tbm_surface, tsq_err);
+               }
+       }
+
+       TPL_LOG_T(BACKEND,
+                         "[FORCE_FLUSH] surf_source(%p) tbm_queue(%p)",
+                         surf_source, surf_source->tbm_queue);
+
+       return TPL_ERROR_NONE;
+}
+
 
 tpl_bool_t
 twe_check_native_handle_is_wl_display(tpl_handle_t display)
index 4f7ba97..dfe6f6f 100644 (file)
@@ -98,6 +98,9 @@ twe_surface_create_sync_fd(tbm_surface_h tbm_surface);
 tpl_result_t
 twe_surface_wait_dequeueable(twe_surface_h twe_surface, uint64_t timeout_ns);
 
+tpl_result_t
+twe_surface_queue_force_flush(twe_surface_h twe_surface);
+
 tpl_bool_t
 twe_check_native_handle_is_wl_display(tpl_handle_t display);
 
index 6f7f4a4..f819b45 100644 (file)
@@ -634,6 +634,8 @@ __tpl_wl_egl_surface_cancel_dequeued_buffer(tpl_surface_t *surface,
        return TPL_ERROR_NONE;
 }
 
+#define CAN_DEQUEUE_TIMEOUT_MS 5000
+
 static tbm_surface_h
 __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
                                                                                 tbm_fd *sync_fence)
@@ -658,11 +660,8 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
                *sync_fence = -1;
 
        TPL_OBJECT_UNLOCK(surface);
-       if (!tbm_surface_queue_can_dequeue(wayland_egl_surface->tbm_queue, 1)) {
-               TPL_ERR("Failed to query can_dequeue. tbm_queue(%p)", wayland_egl_surface->tbm_queue);
-               TPL_OBJECT_LOCK(surface);
-               return NULL;
-       }
+       tsq_err = tbm_surface_queue_can_dequeue_wait_timeout(
+                               wayland_egl_surface->tbm_queue, CAN_DEQUEUE_TIMEOUT_MS);
        TPL_OBJECT_LOCK(surface);
 
        /* After the can dequeue state, call twe_display_lock to prevent other
@@ -670,6 +669,27 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
         * during below dequeue procedure. */
        lock_ret = twe_display_lock(wayland_egl_display->twe_display);
 
+       if (tsq_err == TBM_SURFACE_QUEUE_ERROR_TIMEOUT) {
+               TPL_ERR("[CAN_DEQUEUE_TIMEOUT] queue(%p) will be reset",
+                               wayland_egl_surface->tbm_queue);
+               if (twe_surface_queue_force_flush(wayland_egl_surface->twe_surface)
+                       != TPL_ERROR_NONE) {
+                       TPL_ERR("Failed to timeout reset. tbm_queue(%p)", wayland_egl_surface->tbm_queue);
+                       if (lock_ret == TPL_ERROR_NONE)
+                               twe_display_unlock(wayland_egl_display->twe_display);
+                       return NULL;
+               } else {
+                       tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+               }
+       }
+
+       if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+               TPL_ERR("Failed to query can_dequeue. tbm_queue(%p)", wayland_egl_surface->tbm_queue);
+               if (lock_ret == TPL_ERROR_NONE)
+                       twe_display_unlock(wayland_egl_display->twe_display);
+               return NULL;
+       }
+
        /* wayland client can check their states (ACTIVATED or DEACTIVATED) with
         * below function [wayland_tbm_client_queue_check_activate()].
         * This function has to be called before tbm_surface_queue_dequeue()