wl_egl: queue resize will be run in thread 04/304504/4
authorJoonbum Ko <joonbum.ko@samsung.com>
Wed, 17 Jan 2024 04:40:11 +0000 (13:40 +0900)
committerJoonbum Ko <joonbum.ko@samsung.com>
Thu, 18 Jan 2024 11:08:10 +0000 (20:08 +0900)
 AS-IS
  - In an arbitrary thread where wl_egl_window_resize called,
  if the reisze callback is called inside it,
  and reset the tbm_surface_queue to the requested size.
 PROBLEMS
  - It is difficult to protect against all cases of resizing at
  unpredictable times.
 TO-BE
  - All resets of the tbm_surface_queue are performed only in
  wl-egl-thread.

Change-Id: Idaa6609594e47321c3c28715d46001ff8269711a
Signed-off-by: Joonbum Ko <joonbum.ko@samsung.com>
src/tpl_wl_egl_thread.c

index cc431b6b1e1e1d42108a088384a061c8ffbb8c36..14025d94d518e223b52db4dbb26d16693d2f4133 100755 (executable)
@@ -91,12 +91,14 @@ typedef enum surf_message {
        INIT_SURFACE = 1,
        ACQUIRABLE = 2,
        FORCE_FLUSH = 4,
+       QUEUE_RESIZE = 8,
 } surf_message;
 
 struct _tpl_wl_egl_surface {
        tpl_gsource                  *surf_source;
 
        tbm_surface_queue_h           tbm_queue;
+       tpl_result_t                  reset_result;
        int                           num_buffers;
 
        struct wl_egl_window         *wl_egl_window;
@@ -270,7 +272,7 @@ _get_tbm_surface_bo_name(tbm_surface_h tbm_surface);
 static void
 _print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface);
 static tpl_bool_t
-_check_buffer_validate(tpl_wl_egl_surface_t *wl_egl_surface, tbm_surface_h tbm_surface);
+_check_tbm_surface_validate(tpl_wl_egl_surface_t *wl_egl_surface, tbm_surface_h tbm_surface);
 static void
 __cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer);
 static tpl_wl_egl_buffer_t *
@@ -286,6 +288,8 @@ _thread_surface_queue_acquire(tpl_wl_egl_surface_t *wl_egl_surface);
 static void
 _thread_tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface);
 static void
+_thread_tbm_queue_resize(tpl_wl_egl_surface_t *wl_egl_surface);
+static void
 _thread_wl_surface_commit(tpl_wl_egl_surface_t *wl_egl_surface,
                                                  tpl_wl_egl_buffer_t *wl_egl_buffer);
 static void
@@ -1253,7 +1257,7 @@ __cb_resize_callback(struct wl_egl_window *wl_egl_window, void *private)
 
        struct tizen_private tizen_private(private);
        tpl_wl_egl_surface_t wl_egl_surface(tizen_private->data);
-       int cur_w, cur_h, req_w, req_h, format;
+       int cur_w, cur_h, req_w, req_h;
 
        if (!wl_egl_surface) {
                TPL_ERR("Invalid wl_egl_window(%p) tizen_private->data is null.",
@@ -1261,21 +1265,34 @@ __cb_resize_callback(struct wl_egl_window *wl_egl_window, void *private)
                return;
        }
 
-       format = wl_egl_surface->format;
+       tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
+
        cur_w = wl_egl_surface->width;
        cur_h = wl_egl_surface->height;
        req_w = wl_egl_window->width;
        req_h = wl_egl_window->height;
 
+       if (cur_w == req_w && cur_h == req_h) {
+               TPL_INFO("[RESIZE_IGNORED]", "wl_egl_surface(%p) wl_egl_window(%p) (%dx%d)",
+                                wl_egl_surface, wl_egl_window, cur_w, cur_h);
+               tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
+               return;
+       }
+
        TPL_INFO("[WINDOW_RESIZE]",
                         "wl_egl_surface(%p) wl_egl_window(%p) (%dx%d) -> (%dx%d)",
                         wl_egl_surface, wl_egl_window, cur_w, cur_h, req_w, req_h);
 
-       if (tbm_surface_queue_reset(wl_egl_surface->tbm_queue, req_w, req_h, format)
-                       != TBM_SURFACE_QUEUE_ERROR_NONE) {
-               TPL_ERR("Failed to reset tbm_surface_queue(%p)", wl_egl_surface->tbm_queue);
-               return;
-       }
+       tpl_gsource_send_message(wl_egl_surface->surf_source,
+                                                        QUEUE_RESIZE);
+       tpl_bool_t resize_done;
+       do {
+               tpl_gcond_wait(&wl_egl_surface->surf_cond, &wl_egl_surface->surf_mutex);
+               resize_done = (req_w == tbm_surface_queue_get_width(wl_egl_surface->tbm_queue) &&
+                                          req_h == tbm_surface_queue_get_height(wl_egl_surface->tbm_queue));
+       } while (!resize_done && wl_egl_surface->reset_result == TPL_ERROR_NONE);
+
+       tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
 }
 /* -- END -- wl_egl_window callback functions */
 
@@ -1698,6 +1715,13 @@ __thread_func_surf_dispatch(tpl_gsource *gsource, uint64_t message)
                tpl_gcond_signal(&wl_egl_surface->surf_cond);
        }
 
+       if (message & QUEUE_RESIZE) {
+               TPL_LOG_D("[MSG_RECEIVED]", "wl_egl_surface(%p) queue resize message received!",
+                                 wl_egl_surface);
+               _thread_tbm_queue_resize(wl_egl_surface);
+               tpl_gcond_signal(&wl_egl_surface->surf_cond);
+       }
+
        wl_egl_surface->sent_message = NONE_MESSAGE;
 
        tpl_gmutex_unlock(&wl_egl_surface->surf_mutex);
@@ -1785,6 +1809,8 @@ __tpl_wl_egl_surface_init(tpl_surface_t *surface)
        wl_egl_surface->initialized_in_thread  = TPL_FALSE;
        wl_egl_surface->frontbuffer_activated  = TPL_FALSE;
 
+       wl_egl_surface->reset_result           = TPL_ERROR_NONE;
+
        wl_egl_surface->latest_transform       = -1;
        wl_egl_surface->serial                 = 0;
 
@@ -2458,6 +2484,26 @@ __tpl_wl_egl_surface_fence_sync_is_available(tpl_surface_t *surface)
        return !wl_egl_surface->frontbuffer_activated;
 }
 
+static void
+_thread_tbm_queue_resize(tpl_wl_egl_surface_t *wl_egl_surface)
+{
+       int req_w = wl_egl_surface->wl_egl_window->width;
+       int req_h = wl_egl_surface->wl_egl_window->height;
+       int format = wl_egl_surface->format;
+
+       TPL_INFO("[QUEUE_RESIZE]", "wl_egl_surface(%p) tbm_queue(%p)",
+                        wl_egl_surface, wl_egl_surface->tbm_queue);
+
+       if (tbm_surface_queue_reset(wl_egl_surface->tbm_queue, req_w, req_h, format)
+                       != TBM_SURFACE_QUEUE_ERROR_NONE) {
+               TPL_ERR("Failed to reset tbm_surface_queue(%p)",
+                               wl_egl_surface->tbm_queue);
+               wl_egl_surface->reset_result = TPL_ERROR_INVALID_OPERATION;
+       } else {
+               wl_egl_surface->reset_result = TPL_ERROR_NONE;
+       }
+}
+
 void
 _thread_tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface)
 {
@@ -2629,8 +2675,7 @@ __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns,
        TPL_OBJECT_UNLOCK(surface);
        tpl_gmutex_lock(&wl_egl_surface->surf_mutex);
        if (wl_egl_surface->reset == TPL_TRUE) {
-               if (_check_buffer_validate(wl_egl_surface, wl_egl_surface->last_enq_buffer) &&
-                       tbm_surface_internal_is_valid(wl_egl_surface->last_enq_buffer)) {
+               if (_check_tbm_surface_validate(wl_egl_surface, wl_egl_surface->last_enq_buffer)) {
                        tbm_surface_h last_enq_buffer = wl_egl_surface->last_enq_buffer;
                        tpl_wl_egl_buffer_t *enqueued_buffer =
                                _get_wl_egl_buffer(last_enq_buffer);
@@ -3841,7 +3886,7 @@ _print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface)
 }
 
 static tpl_bool_t
-_check_buffer_validate(tpl_wl_egl_surface_t *wl_egl_surface, tbm_surface_h tbm_surface)
+_check_tbm_surface_validate(tpl_wl_egl_surface_t *wl_egl_surface, tbm_surface_h tbm_surface)
 {
        tpl_list_node_t *node = NULL;
        tpl_bool_t ret = TPL_FALSE;
@@ -3850,6 +3895,9 @@ _check_buffer_validate(tpl_wl_egl_surface_t *wl_egl_surface, tbm_surface_h tbm_s
        if (!wl_egl_surface || !tbm_surface)
                return ret;
 
+       if (!tbm_surface_internal_is_valid(tbm_surface))
+               return ret;
+
        tpl_gmutex_rec_lock(&wl_egl_surface->buffers_mutex);
        node = __tpl_list_get_front_node(wl_egl_surface->buffers);
        do {