- Previously, when dequeued buffer was canceled, ref counts were run to be destroyed
without tbm_surface_queue_release. However, for the normal operation of wayland_egl_thread,
DDK need to cancel the dequeued buffer before using it.
- In wayland_egl_thread structure, tbm_surface_queue_release should be called so that tbm_surface
can be completely destroyed.
- The newly added below API calls tbm_surface_queue_release directly to destroy unused tbm_surface.
tpl_surface_cancel_dequeued_buffer(tpl_surface_t *surface, tbm_surface_h tbm_surface)
- The type of tpl_surface must be window surface,
and tbm_surface must be a buffer in the state before rendering begins.
Change-Id: Ibf4950dc2846f1f6a01998ea39bae99bc8300b4f
Signed-off-by: joonbum.ko <joonbum.ko@samsung.com>
tpl_surface_set_rotation_capability(tpl_surface_t *surface, tpl_bool_t set);
/**
+ * Cancel dequeued buffer before use.
+ *
+ * canceled buffer will be released after unref.
+
+ * @param surface surface used to dequeue.
+ * - surface type should be TPL_SURFACE_TYPE_WINDOW.
+ * @param tbm_surface buffer to cancel use.
+ */
+tpl_result_t
+tpl_surface_cancel_dequeued_buffer(tpl_surface_t *surface,
+ tbm_surface_h tbm_surface);
+
+/**
* Present mode types.
*
* @TPL_DISPLAY_MODE_IMMEDIATE_KHR: The presentation engine does not wait for
tpl_result_t (*init)(tpl_surface_t *surface);
void (*fini)(tpl_surface_t *surface);
tpl_bool_t (*validate)(tpl_surface_t *surface);
+ tpl_result_t (*cancel_dequeued_buffer)(tpl_surface_t *surface,
+ tbm_surface_h tbm_surface);
tbm_surface_h (*dequeue_buffer)(tpl_surface_t *surface, uint64_t timeout_ns,
tbm_fd *sync_fence);
tpl_result_t (*enqueue_buffer)(tpl_surface_t *surface,
return interval;
}
+
+tpl_result_t
+tpl_surface_cancel_dequeued_buffer(tpl_surface_t *surface,
+ tbm_surface_h tbm_surface)
+{
+ tpl_result_t ret = TPL_ERROR_NONE;
+
+ if (!surface || (surface->type != TPL_SURFACE_TYPE_WINDOW)) {
+ TPL_ERR("Invalid surface(%p)", surface);
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ if (!surface->backend.cancel_dequeued_buffer) {
+ TPL_ERR("TPL surface has not been initialized correctly!");
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ if (!tbm_surface) {
+ TPL_ERR("Invalid parameter. tbm_surface(%p)", tbm_surface);
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ TPL_LOG_F("tpl_surface_t(%p) tbm_surface(%p)", surface, tbm_surface);
+
+ TPL_OBJECT_LOCK(surface);
+ ret = surface->backend.cancel_dequeued_buffer(surface, tbm_surface);
+ TPL_OBJECT_UNLOCK(surface);
+
+ return ret;
+}
+
tbm_surface_h
tpl_surface_dequeue_buffer(tpl_surface_t *surface)
{
return ret;
}
+
+static tpl_result_t
+__tpl_wayland_egl_surface_cancel_dequeued_buffer(tpl_surface_t *surface,
+ tbm_surface_h tbm_surface)
+{
+ tpl_wayland_egl_surface_t *wayland_egl_surface = NULL;
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+
+ wayland_egl_surface = (tpl_wayland_egl_surface_t *)surface->backend.data;
+ if (!wayland_egl_surface) {
+ TPL_ERR("Invalid backend surface. surface(%p) wayland_egl_surface(%p)",
+ surface, wayland_egl_surface);
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ if (!tbm_surface_internal_is_valid(tbm_surface)) {
+ TPL_ERR("Invalid buffer. tbm_surface(%p)", tbm_surface);
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ tbm_surface_internal_unref(tbm_surface);
+
+ tsq_err = tbm_surface_queue_release(wayland_egl_surface->tbm_queue,
+ tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to release tbm_surface(%p)", tbm_surface);
+ return TPL_ERROR_INVALID_OPERATION;
+ }
+
+ TPL_LOG_B("WL_EGL", "[CANCEL BUFFER] tpl_surface(%p) tbm_surface(%p)",
+ surface, tbm_surface);
+
+ return TPL_ERROR_NONE;
+}
+
static tbm_surface_h
__tpl_wayland_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
tbm_fd *sync_fence)
backend->init = __tpl_wayland_egl_surface_init;
backend->fini = __tpl_wayland_egl_surface_fini;
backend->validate = __tpl_wayland_egl_surface_validate;
+ backend->cancel_dequeued_buffer =
+ __tpl_wayland_egl_surface_cancel_dequeued_buffer;
backend->dequeue_buffer = __tpl_wayland_egl_surface_dequeue_buffer;
backend->enqueue_buffer = __tpl_wayland_egl_surface_enqueue_buffer;
}
return retval;
}
+static tpl_result_t
+__tpl_wl_egl_surface_cancel_dequeued_buffer(tpl_surface_t *surface,
+ tbm_surface_h tbm_surface)
+{
+ tpl_wayland_egl_surface_t *wayland_egl_surface = NULL;
+ tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+
+ wayland_egl_surface = (tpl_wayland_egl_surface_t *)surface->backend.data;
+ if (!wayland_egl_surface) {
+ TPL_ERR("Invalid backend surface. surface(%p) wayland_egl_surface(%p)",
+ surface, wayland_egl_surface);
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ if (!tbm_surface_internal_is_valid(tbm_surface)) {
+ TPL_ERR("Invalid buffer. tbm_surface(%p)", tbm_surface);
+ return TPL_ERROR_INVALID_PARAMETER;
+ }
+
+ tbm_surface_internal_unref(tbm_surface);
+
+ tsq_err = tbm_surface_queue_release(wayland_egl_surface->tbm_queue,
+ tbm_surface);
+ if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+ TPL_ERR("Failed to release tbm_surface(%p)", tbm_surface);
+ return TPL_ERROR_INVALID_OPERATION;
+ }
+
+ TPL_LOG_B("WL_EGL", "[CANCEL BUFFER] tpl_surface(%p) tbm_surface(%p)",
+ surface, tbm_surface);
+
+ return TPL_ERROR_NONE;
+}
+
static tbm_surface_h
__tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
tbm_fd *sync_fence)
backend->init = __tpl_wl_egl_surface_init;
backend->fini = __tpl_wl_egl_surface_fini;
backend->validate = __tpl_wl_egl_surface_validate;
+ backend->cancel_dequeued_buffer =
+ __tpl_wl_egl_surface_cancel_dequeued_buffer;
backend->dequeue_buffer = __tpl_wl_egl_surface_dequeue_buffer;
backend->enqueue_buffer = __tpl_wl_egl_surface_enqueue_buffer;
backend->set_rotation_capability =