tpl: Added a frontend API to cancel dequeued buffer. 01/134601/6
authorjoonbum.ko <joonbum.ko@samsung.com>
Mon, 19 Jun 2017 07:41:23 +0000 (16:41 +0900)
committerSooChan Lim <sc1.lim@samsung.com>
Thu, 22 Jun 2017 09:02:02 +0000 (09:02 +0000)
 - Previously, when dequeued buffer was canceled, ref counts were run to be destroyed
  without tbm_surface_queue_release. However, for the normal operation of wayland_egl_thread,
  DDK need to cancel the dequeued buffer before using it.
 - In wayland_egl_thread structure, tbm_surface_queue_release should be called so that tbm_surface
  can be completely destroyed.
 - The newly added below API calls tbm_surface_queue_release directly to destroy unused tbm_surface.

    tpl_surface_cancel_dequeued_buffer(tpl_surface_t *surface, tbm_surface_h tbm_surface)

 - The type of tpl_surface must be window surface,
  and tbm_surface must be a buffer in the state before rendering begins.

Change-Id: Ibf4950dc2846f1f6a01998ea39bae99bc8300b4f
Signed-off-by: joonbum.ko <joonbum.ko@samsung.com>
src/tpl.h
src/tpl_internal.h
src/tpl_surface.c
src/tpl_wayland_egl.c
src/tpl_wl_egl_thread.c

index 341afef..8748add 100644 (file)
--- a/src/tpl.h
+++ b/src/tpl.h
@@ -751,6 +751,19 @@ tpl_result_t
 tpl_surface_set_rotation_capability(tpl_surface_t *surface, tpl_bool_t set);
 
 /**
+ * Cancel dequeued buffer before use.
+ *
+ * canceled buffer will be released after unref.
+
+ * @param surface surface used to dequeue.
+ *  - surface type should be TPL_SURFACE_TYPE_WINDOW.
+ * @param tbm_surface buffer to cancel use.
+ */
+tpl_result_t
+tpl_surface_cancel_dequeued_buffer(tpl_surface_t *surface,
+                                                                  tbm_surface_h tbm_surface);
+
+/**
  * Present mode types.
  *
  * @TPL_DISPLAY_MODE_IMMEDIATE_KHR: The presentation engine does not wait for
index 0eee34b..c72fde6 100644 (file)
@@ -86,6 +86,8 @@ struct _tpl_surface_backend {
        tpl_result_t (*init)(tpl_surface_t *surface);
        void (*fini)(tpl_surface_t *surface);
        tpl_bool_t (*validate)(tpl_surface_t *surface);
+       tpl_result_t (*cancel_dequeued_buffer)(tpl_surface_t *surface,
+                                                                                  tbm_surface_h tbm_surface);
        tbm_surface_h (*dequeue_buffer)(tpl_surface_t *surface, uint64_t timeout_ns,
                                                                        tbm_fd *sync_fence);
        tpl_result_t (*enqueue_buffer)(tpl_surface_t *surface,
index 7dd1edc..254da8d 100644 (file)
@@ -192,6 +192,37 @@ tpl_surface_get_post_interval(tpl_surface_t *surface)
        return interval;
 }
 
+
+tpl_result_t
+tpl_surface_cancel_dequeued_buffer(tpl_surface_t *surface,
+                                                                  tbm_surface_h tbm_surface)
+{
+       tpl_result_t ret = TPL_ERROR_NONE;
+
+       if (!surface || (surface->type != TPL_SURFACE_TYPE_WINDOW)) {
+               TPL_ERR("Invalid surface(%p)", surface);
+               return TPL_ERROR_INVALID_PARAMETER;
+       }
+
+       if (!surface->backend.cancel_dequeued_buffer) {
+               TPL_ERR("TPL surface has not been initialized correctly!");
+               return TPL_ERROR_INVALID_PARAMETER;
+       }
+
+       if (!tbm_surface) {
+               TPL_ERR("Invalid parameter. tbm_surface(%p)", tbm_surface);
+               return TPL_ERROR_INVALID_PARAMETER;
+       }
+
+       TPL_LOG_F("tpl_surface_t(%p) tbm_surface(%p)", surface, tbm_surface);
+
+       TPL_OBJECT_LOCK(surface);
+       ret = surface->backend.cancel_dequeued_buffer(surface, tbm_surface);
+       TPL_OBJECT_UNLOCK(surface);
+
+       return ret;
+}
+
 tbm_surface_h
 tpl_surface_dequeue_buffer(tpl_surface_t *surface)
 {
index 27f667b..c5ad096 100644 (file)
@@ -1045,6 +1045,41 @@ __tpl_wayland_egl_surface_wait_dequeuable(tpl_surface_t *surface)
        return ret;
 }
 
+
+static tpl_result_t
+__tpl_wayland_egl_surface_cancel_dequeued_buffer(tpl_surface_t *surface,
+                                                                                                tbm_surface_h tbm_surface)
+{
+       tpl_wayland_egl_surface_t *wayland_egl_surface = NULL;
+       tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+
+       wayland_egl_surface = (tpl_wayland_egl_surface_t *)surface->backend.data;
+       if (!wayland_egl_surface) {
+               TPL_ERR("Invalid backend surface. surface(%p) wayland_egl_surface(%p)",
+                               surface, wayland_egl_surface);
+               return TPL_ERROR_INVALID_PARAMETER;
+       }
+
+       if (!tbm_surface_internal_is_valid(tbm_surface)) {
+               TPL_ERR("Invalid buffer. tbm_surface(%p)", tbm_surface);
+                       return TPL_ERROR_INVALID_PARAMETER;
+       }
+
+       tbm_surface_internal_unref(tbm_surface);
+
+       tsq_err = tbm_surface_queue_release(wayland_egl_surface->tbm_queue,
+                                                                               tbm_surface);
+       if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+               TPL_ERR("Failed to release tbm_surface(%p)", tbm_surface);
+               return TPL_ERROR_INVALID_OPERATION;
+       }
+
+       TPL_LOG_B("WL_EGL", "[CANCEL BUFFER] tpl_surface(%p) tbm_surface(%p)",
+                         surface, tbm_surface);
+
+       return TPL_ERROR_NONE;
+}
+
 static tbm_surface_h
 __tpl_wayland_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
                                                                                 tbm_fd *sync_fence)
@@ -1336,6 +1371,8 @@ __tpl_surface_init_backend_wayland_egl(tpl_surface_backend_t *backend)
        backend->init = __tpl_wayland_egl_surface_init;
        backend->fini = __tpl_wayland_egl_surface_fini;
        backend->validate = __tpl_wayland_egl_surface_validate;
+       backend->cancel_dequeued_buffer =
+               __tpl_wayland_egl_surface_cancel_dequeued_buffer;
        backend->dequeue_buffer = __tpl_wayland_egl_surface_dequeue_buffer;
        backend->enqueue_buffer = __tpl_wayland_egl_surface_enqueue_buffer;
 }
index dcde2e1..7006a41 100644 (file)
@@ -525,6 +525,40 @@ __tpl_wl_egl_surface_validate(tpl_surface_t *surface)
        return retval;
 }
 
+static tpl_result_t
+__tpl_wl_egl_surface_cancel_dequeued_buffer(tpl_surface_t *surface,
+                                                                                       tbm_surface_h tbm_surface)
+{
+       tpl_wayland_egl_surface_t *wayland_egl_surface = NULL;
+       tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+
+       wayland_egl_surface = (tpl_wayland_egl_surface_t *)surface->backend.data;
+       if (!wayland_egl_surface) {
+               TPL_ERR("Invalid backend surface. surface(%p) wayland_egl_surface(%p)",
+                               surface, wayland_egl_surface);
+               return TPL_ERROR_INVALID_PARAMETER;
+       }
+
+       if (!tbm_surface_internal_is_valid(tbm_surface)) {
+               TPL_ERR("Invalid buffer. tbm_surface(%p)", tbm_surface);
+               return TPL_ERROR_INVALID_PARAMETER;
+       }
+
+       tbm_surface_internal_unref(tbm_surface);
+
+       tsq_err = tbm_surface_queue_release(wayland_egl_surface->tbm_queue,
+                                                                               tbm_surface);
+       if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+               TPL_ERR("Failed to release tbm_surface(%p)", tbm_surface);
+               return TPL_ERROR_INVALID_OPERATION;
+       }
+
+       TPL_LOG_B("WL_EGL", "[CANCEL BUFFER] tpl_surface(%p) tbm_surface(%p)",
+                         surface, tbm_surface);
+
+       return TPL_ERROR_NONE;
+}
+
 static tbm_surface_h
 __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
                                                                                 tbm_fd *sync_fence)
@@ -673,6 +707,8 @@ __tpl_surface_init_backend_wl_egl_thread(tpl_surface_backend_t *backend)
        backend->init = __tpl_wl_egl_surface_init;
        backend->fini = __tpl_wl_egl_surface_fini;
        backend->validate = __tpl_wl_egl_surface_validate;
+       backend->cancel_dequeued_buffer =
+               __tpl_wl_egl_surface_cancel_dequeued_buffer;
        backend->dequeue_buffer = __tpl_wl_egl_surface_dequeue_buffer;
        backend->enqueue_buffer = __tpl_wl_egl_surface_enqueue_buffer;
        backend->set_rotation_capability =