tpl_wayland_egl: Modified to use tbm_surface_queue with GUARANTEE_CYCLE mode. 31/175431/2
authorjoonbum.ko <joonbum.ko@samsung.com>
Tue, 10 Apr 2018 06:39:36 +0000 (15:39 +0900)
committerSooChan Lim <sc1.lim@samsung.com>
Fri, 13 Apr 2018 07:47:58 +0000 (07:47 +0000)
 - the reset flag of wayland_egl_buffer will be not used more.

Change-Id: I6d40cce03bd15db043560d57bc1892863b8bc6a5
Signed-off-by: joonbum.ko <joonbum.ko@samsung.com>
src/tpl_wayland_egl.c

index 2a42fda..f7b77b0 100644 (file)
@@ -64,7 +64,6 @@ struct _tpl_wayland_egl_buffer {
        tbm_bo bo;
        tpl_bool_t w_rotated; /* TRUE if need to call wayland_tbm_client_set_buffer_transform */
        tpl_bool_t rotated; /* TRUE if need to call wl_surface_set_buffer_transform */
-       tpl_bool_t reset; /* TRUE if queue reseted by external */
        tpl_bool_t is_new; /* for frontbuffer mode */
        tpl_bool_t need_to_release; /* for checking need release */
        struct wl_proxy *wl_proxy; /* wl_buffer proxy */
@@ -413,27 +412,6 @@ static void
 __cb_client_window_set_frontbuffer_mode(struct wl_egl_window *wl_egl_window,
                                                                                void *private, int set);
 
-static TPL_INLINE void
-__tpl_wayland_egl_buffer_set_reset_flag(tpl_list_t *tracking_list)
-{
-       tpl_list_node_t *node = __tpl_list_get_front_node(tracking_list);
-
-       while (node) {
-               tbm_surface_h tbm_surface =
-                               (tbm_surface_h)__tpl_list_node_get_data(node);
-
-               if (tbm_surface) {
-                       tpl_wayland_egl_buffer_t *wayland_egl_buffer =
-                               __tpl_wayland_egl_get_wayland_buffer_from_tbm_surface(tbm_surface);
-
-                       if (wayland_egl_buffer)
-                               wayland_egl_buffer->reset = TPL_TRUE;
-               }
-
-               node = __tpl_list_node_next(node);
-       }
-}
-
 static void
 __cb_tbm_surface_queue_reset_callback(tbm_surface_queue_h surface_queue,
                                                                          void *data)
@@ -455,12 +433,6 @@ __cb_tbm_surface_queue_reset_callback(tbm_surface_queue_h surface_queue,
 
        wayland_egl_surface->reset = TPL_TRUE;
 
-       /* Set the reset flag of the buffers which attached but not released to TPL_TRUE. */
-       __tpl_wayland_egl_buffer_set_reset_flag(wayland_egl_surface->attached_buffers);
-
-       /* Set the reset flag of the buffers which dequeued but not enqueued to TPL_TRUE. */
-       __tpl_wayland_egl_buffer_set_reset_flag(wayland_egl_surface->dequeued_buffers);
-
        TPL_OBJECT_UNLOCK(&wayland_egl_surface->base);
 
        if (surface->reset_cb)
@@ -573,6 +545,14 @@ __tpl_wayland_egl_surface_init(tpl_surface_t *surface)
                goto queue_create_fail;
        }
 
+       if (tbm_surface_queue_set_modes(wayland_egl_surface->tbm_queue,
+                       TBM_SURFACE_QUEUE_MODE_GUARANTEE_CYCLE) !=
+                               TBM_SURFACE_QUEUE_ERROR_NONE) {
+               TPL_ERR("Failed to set queue mode to tbm_surface_queue(%p)",
+                               wayland_egl_surface->tbm_queue);
+               goto guarantee_mode_set_fail;
+       }
+
        /* Set reset_callback to tbm_queue */
        if (tbm_surface_queue_add_reset_cb(wayland_egl_surface->tbm_queue,
                                   __cb_tbm_surface_queue_reset_callback,
@@ -624,8 +604,10 @@ create_vblank_fail:
        tbm_surface_queue_remove_reset_cb(wayland_egl_surface->tbm_queue,
                                          __cb_tbm_surface_queue_reset_callback,
                                          (void *)surface);
+guarantee_mode_set_fail:
 add_reset_cb_fail:
        tbm_surface_queue_destroy(wayland_egl_surface->tbm_queue);
+       wayland_egl_surface->tbm_queue = NULL;
 queue_create_fail:
        __tpl_list_free(wayland_egl_surface->attached_buffers, NULL);
 alloc_dequeue_buffers_fail:
@@ -680,6 +662,30 @@ __tpl_wayland_egl_surface_fini(tpl_surface_t *surface)
                lock_res = pthread_mutex_lock(&wayland_egl_display->wl_event_mutex);
                wl_display_dispatch_queue_pending(wayland_egl_display->wl_dpy,
                                                                                  wayland_egl_display->wl_tbm_event_queue);
+               /* When surface is destroyed, unreference tbm_surface which tracked by
+                * the list of attached_buffers in order to free the created resources.
+                * (tpl_wayland_egl_buffer_t or wl_buffer) */
+               if (wayland_egl_surface->attached_buffers) {
+                       TPL_OBJECT_LOCK(&wayland_egl_surface->base);
+                       while (!__tpl_list_is_empty(wayland_egl_surface->attached_buffers)) {
+                               tbm_surface_queue_error_e tsq_err;
+                               tbm_surface_h tbm_surface =
+                                       __tpl_list_pop_front(wayland_egl_surface->attached_buffers, NULL);
+                               TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE_CB] BO_NAME:%d",
+                                                               tbm_bo_export(tbm_surface_internal_get_bo(
+                                                                       tbm_surface, 0)));
+                               tbm_surface_internal_unref(tbm_surface);
+                               tsq_err = tbm_surface_queue_release(wayland_egl_surface->tbm_queue, tbm_surface);
+                               if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+                                       TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
+                                                       tbm_surface, tsq_err);
+                       }
+
+                       __tpl_list_free(wayland_egl_surface->attached_buffers, NULL);
+                       wayland_egl_surface->attached_buffers = NULL;
+                       TPL_OBJECT_UNLOCK(&wayland_egl_surface->base);
+               }
+
                if (lock_res == 0) pthread_mutex_unlock(&wayland_egl_display->wl_event_mutex);
 
                TPL_LOG_B("WL_EGL",
@@ -689,22 +695,6 @@ __tpl_wayland_egl_surface_fini(tpl_surface_t *surface)
                wayland_egl_surface->tbm_queue = NULL;
        }
 
-       /* When surface is destroyed, unreference tbm_surface which tracked by
-        * the list of attached_buffers in order to free the created resources.
-        * (tpl_wayland_egl_buffer_t or wl_buffer) */
-       if (wayland_egl_surface->attached_buffers) {
-               TPL_OBJECT_LOCK(&wayland_egl_surface->base);
-               while (!__tpl_list_is_empty(wayland_egl_surface->attached_buffers)) {
-                       tbm_surface_h tbm_surface =
-                               __tpl_list_pop_front(wayland_egl_surface->attached_buffers, NULL);
-                       tbm_surface_internal_unref(tbm_surface);
-               }
-
-               __tpl_list_free(wayland_egl_surface->attached_buffers, NULL);
-               wayland_egl_surface->attached_buffers = NULL;
-               TPL_OBJECT_UNLOCK(&wayland_egl_surface->base);
-       }
-
        /* the list of dequeued_buffers just does deletion */
        if (wayland_egl_surface->dequeued_buffers) {
                TPL_OBJECT_LOCK(&wayland_egl_surface->base);
@@ -959,73 +949,19 @@ __tpl_wayland_egl_surface_enqueue_buffer(tpl_surface_t *surface,
                TPL_OBJECT_UNLOCK(&wayland_egl_surface->base);
        }
 
-       if (wayland_egl_buffer->reset) {
-               /*
-                * When tbm_surface_queue being reset for receiving
-                * scan-out buffer or resized buffer
-                * tbm_surface_queue_enqueue will return error.
-                * This error condition leads to skip frame.
-                *
-                * tbm_surface received from argument this function,
-                * may be rendered done. So this tbm_surface is better to do
-                * commit forcibly without handling queue in order to prevent
-                * frame skipping.
-                */
-               __tpl_wayland_egl_surface_commit(surface, tbm_surface,
-                                                                                num_rects, rects);
-               return TPL_ERROR_NONE;
-       }
-
        tsq_err = tbm_surface_queue_enqueue(wayland_egl_surface->tbm_queue,
                                                                                tbm_surface);
-       if (tsq_err == TBM_SURFACE_QUEUE_ERROR_NONE) {
-               /*
-                * If tbm_surface_queue has not been reset, tbm_surface_queue_enqueue
-                * will return ERROR_NONE. Otherwise, queue has been reset
-                * this tbm_surface may have only one ref_count. So we need to
-                * unreference this tbm_surface after getting ERROR_NONE result from
-                * tbm_surface_queue_enqueue in order to prevent destruction.
-                */
-               tbm_surface_internal_unref(tbm_surface);
-       } else {
-               /*
-                * If tbm_surface is valid but it is not tracked by tbm_surface_queue,
-                * tbm_surface_queue_enqueue will return below value.
-                * TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE
-                * It means tbm_surface_queue has been reset before client try
-                * to enqueue this tbm_surface.
-                * We should commit this buffer to display to assure the latest frame.
-                *
-                * In enlightenment(E20) of TIZEN platform, depending on
-                * some situation(Activate, Deactivate), the compositor may or may not
-                * display the last forcibly commited buffer in this way.
-                *
-                * In this situation, the compositor's display policy may vary from
-                * server to server.
-                */
-               if (tsq_err == TBM_SURFACE_QUEUE_ERROR_UNKNOWN_SURFACE) {
-                       __tpl_wayland_egl_surface_commit(surface, tbm_surface,
-                                                                                        num_rects, rects);
-                       return TPL_ERROR_NONE;
-               }
-
-               TPL_ERR("Failed to enqeueue tbm_surface(%p). | tsq_err = %d",
-                               tbm_surface, tsq_err);
+       if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+               TPL_ERR("Failed to enqueue tbm_surface(%p) to tbm_queue(%p) | tsq_err = %d",
+                               tbm_surface, wayland_egl_surface->tbm_queue, tsq_err);
                return TPL_ERROR_INVALID_OPERATION;
        }
 
-       if (tsq_err == TBM_SURFACE_QUEUE_ERROR_NONE &&
-               wayland_egl_buffer->reset == TPL_TRUE) {
-               wayland_egl_buffer->reset = TPL_FALSE;
-       }
-
        tsq_err = tbm_surface_queue_acquire(wayland_egl_surface->tbm_queue,
                                                                                &tbm_surface);
-       if (tsq_err == TBM_SURFACE_QUEUE_ERROR_NONE) {
-               tbm_surface_internal_ref(tbm_surface);
-       } else {
-               TPL_ERR("Failed to acquire tbm_surface(%p). | tsq_err = %d",
-                               tbm_surface, tsq_err);
+       if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+               TPL_ERR("Failed to acquire tbm_surface(%p) from tbm_queue(%p). | tsq_err = %d",
+                               tbm_surface, wayland_egl_surface->tbm_queue, tsq_err);
                return TPL_ERROR_INVALID_OPERATION;
        }
 
@@ -1123,6 +1059,7 @@ __tpl_wayland_egl_surface_cancel_dequeued_buffer(tpl_surface_t *surface,
                                                                                                 tbm_surface_h tbm_surface)
 {
        tpl_wayland_egl_surface_t *wayland_egl_surface = NULL;
+       tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
 
        wayland_egl_surface = (tpl_wayland_egl_surface_t *)surface->backend.data;
        if (!wayland_egl_surface) {
@@ -1146,6 +1083,14 @@ __tpl_wayland_egl_surface_cancel_dequeued_buffer(tpl_surface_t *surface,
 
        tbm_surface_internal_unref(tbm_surface);
 
+       tsq_err = tbm_surface_queue_cancel_dequeue(wayland_egl_surface->tbm_queue,
+                                                                                          tbm_surface);
+       if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+               TPL_ERR("Failed to cancel dequeue tbm_surface(%p) to tbm_queue(%p)",
+                               tbm_surface, wayland_egl_surface->tbm_queue);
+               return TPL_ERROR_INVALID_OPERATION;
+       }
+
        TPL_LOG_B("WL_EGL", "[CANCEL BUFFER] tpl_surface(%p) tbm_surface(%p)",
                          surface, tbm_surface);
 
@@ -1282,8 +1227,6 @@ __tpl_wayland_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeou
                        wayland_egl_buffer->rotated = TPL_FALSE;
                }
 
-               wayland_egl_buffer->reset = TPL_FALSE;
-
                if (wayland_egl_surface->is_activated != is_activated)
                        wayland_egl_buffer->is_new = TPL_TRUE;
 
@@ -1318,6 +1261,7 @@ __tpl_wayland_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeou
        if (!wayland_egl_buffer) {
                TPL_ERR("Mem alloc for wayland_egl_buffer failed!");
                tbm_surface_internal_unref(tbm_surface);
+               tbm_surface_queue_cancel_dequeue(wayland_egl_surface->tbm_queue, tbm_surface);
                if (lock_res == 0) pthread_mutex_unlock(&wayland_egl_display->wl_event_mutex);
                return NULL;
        }
@@ -1328,6 +1272,7 @@ __tpl_wayland_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeou
        if (!wl_proxy) {
                TPL_ERR("Failed to create TBM client buffer!");
                tbm_surface_internal_unref(tbm_surface);
+               tbm_surface_queue_cancel_dequeue(wayland_egl_surface->tbm_queue, tbm_surface);
                free(wayland_egl_buffer);
                if (lock_res == 0) pthread_mutex_unlock(&wayland_egl_display->wl_event_mutex);
                return NULL;
@@ -1362,10 +1307,6 @@ __tpl_wayland_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeou
                wayland_egl_buffer->rotated = TPL_FALSE;
        }
 
-       /* reset flag is to check whether it is the buffer before
-        * tbm_surface_queue is reset or not. */
-       wayland_egl_buffer->reset = TPL_FALSE;
-
        /* 'is_new' flag is to check wheter it is a new buffer need to commit
         * in frontbuffer mode. */
        wayland_egl_buffer->is_new = TPL_TRUE;
@@ -1747,9 +1688,17 @@ static void __cb_tizen_surface_shm_flusher_flush_callback(void *data,
        if (wayland_egl_surface->attached_buffers) {
                TPL_OBJECT_LOCK(&wayland_egl_surface->base);
                while (!__tpl_list_is_empty(wayland_egl_surface->attached_buffers)) {
+                       tbm_surface_queue_error_e tsq_err;
                        tbm_surface_h tbm_surface =
                                __tpl_list_pop_front(wayland_egl_surface->attached_buffers, NULL);
+                       TRACE_ASYNC_END((int)tbm_surface, "[COMMIT ~ RELEASE_CB] BO_NAME:%d",
+                                                       tbm_bo_export(tbm_surface_internal_get_bo(
+                                                               tbm_surface, 0)));
                        tbm_surface_internal_unref(tbm_surface);
+                       tsq_err = tbm_surface_queue_release(wayland_egl_surface->tbm_queue, tbm_surface);
+                       if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE)
+                               TPL_ERR("Failed to release. tbm_surface(%p) tsq_err(%d)",
+                                               tbm_surface, tsq_err);
                }
                TPL_OBJECT_UNLOCK(&wayland_egl_surface->base);
        }