From: Joonbum Ko Date: Fri, 12 Jan 2024 11:38:41 +0000 (+0900) Subject: wl_egl: change buffers_mutex to tpl_gmutex_rec from tpl_gmutex X-Git-Tag: accepted/tizen/unified/20240122.175410~6 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=refs%2Fchanges%2F01%2F304501%2F4;p=platform%2Fcore%2Fuifw%2Flibtpl-egl.git wl_egl: change buffers_mutex to tpl_gmutex_rec from tpl_gmutex - When buffers are released while traversing the buffer list, both traversal and deletion from the list in wl_egl_buffer_free must be protected by buffers_mutex. - Since it is possible to continuously lock on the same thread, changed buffers_mutex to use GRecMutex. Change-Id: I22ee38bfb19f2cef84b5a0b681ed9f37e20a54e3 Signed-off-by: Joonbum Ko --- diff --git a/src/tpl_wl_egl_thread.c b/src/tpl_wl_egl_thread.c index cfe5ef6..ebca071 100755 --- a/src/tpl_wl_egl_thread.c +++ b/src/tpl_wl_egl_thread.c @@ -127,7 +127,7 @@ struct _tpl_wl_egl_surface { /* wl_egl_buffer list for buffer tracing */ tpl_list_t *buffers; int buffer_cnt; /* the number of using wl_egl_buffers */ - tpl_gmutex buffers_mutex; + tpl_gmutex_rec buffers_mutex; tbm_surface_h last_enq_buffer; tpl_list_t *presentation_feedbacks; /* for tracing presentation feedbacks */ @@ -1833,7 +1833,7 @@ __tpl_wl_egl_surface_init(tpl_surface_t *surface) tpl_gmutex_init(&wl_egl_surface->commit_sync.mutex); tpl_gmutex_init(&wl_egl_surface->presentation_sync.mutex); - tpl_gmutex_init(&wl_egl_surface->buffers_mutex); + tpl_gmutex_rec_init(&wl_egl_surface->buffers_mutex); tpl_gmutex_init(&wl_egl_surface->surf_mutex); tpl_gcond_init(&wl_egl_surface->surf_cond); @@ -2107,6 +2107,8 @@ _buffers_force_release(tpl_wl_egl_surface_t *wl_egl_surface) tpl_list_node_t *node = NULL; TPL_INFO("[BUFFER_FORCE_RELEASE_BEGIN]", "wl_egl_surface(%p)", wl_egl_surface); + + tpl_gmutex_rec_lock(&wl_egl_surface->buffers_mutex); node = __tpl_list_get_front_node(wl_egl_surface->buffers); do { if (!node) break; @@ -2153,6 +2155,7 @@ _buffers_force_release(tpl_wl_egl_surface_t *wl_egl_surface) tbm_surface_internal_unref(wl_egl_buffer->tbm_surface); } while ((node = __tpl_list_node_next(node))); + tpl_gmutex_rec_unlock(&wl_egl_surface->buffers_mutex); wl_egl_surface->need_force_release = TPL_FALSE; TPL_INFO("[BUFFER_FORCE_RELEASE_END]", "wl_egl_surface(%p)", wl_egl_surface); @@ -2175,8 +2178,7 @@ __idle_cb_buffers_finalize(void *data) return TPL_FALSE; } - tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); - + tpl_gmutex_rec_lock(&wl_egl_surface->buffers_mutex); node = __tpl_list_get_front_node(wl_egl_surface->buffers); do { if (!node) break; @@ -2244,7 +2246,7 @@ __idle_cb_buffers_finalize(void *data) } while ((node = __tpl_list_node_next(node))); - tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); + tpl_gmutex_rec_unlock(&wl_egl_surface->buffers_mutex); if (pending_cnt == 0) { wl_egl_surface->buffers_finalize_done = TPL_TRUE; @@ -2353,11 +2355,11 @@ __tpl_wl_egl_surface_fini(tpl_surface_t *surface) wl_egl_surface->wl_egl_display = NULL; wl_egl_surface->tpl_surface = NULL; - tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); + tpl_gmutex_rec_lock(&wl_egl_surface->buffers_mutex); __tpl_list_free(wl_egl_surface->buffers, NULL); wl_egl_surface->buffers = NULL; - tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); - tpl_gmutex_clear(&wl_egl_surface->buffers_mutex); + tpl_gmutex_rec_unlock(&wl_egl_surface->buffers_mutex); + tpl_gmutex_rec_clear(&wl_egl_surface->buffers_mutex); tpl_gmutex_lock(&wl_egl_surface->commit_sync.mutex); tpl_gmutex_unlock(&wl_egl_surface->commit_sync.mutex); @@ -2451,8 +2453,6 @@ __tpl_wl_egl_surface_fence_sync_is_available(tpl_surface_t *surface) return !wl_egl_surface->frontbuffer_activated; } -#define CAN_DEQUEUE_TIMEOUT_MS 10000 - tpl_result_t _tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface) { @@ -2476,6 +2476,7 @@ _tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface) return TPL_ERROR_INVALID_OPERATION; } + tpl_gmutex_rec_lock(&wl_egl_surface->buffers_mutex); while (!__tpl_list_is_empty(wl_egl_surface->buffers)) { tpl_bool_t need_to_release = TPL_FALSE; tpl_wl_egl_buffer_t wl_egl_buffer( @@ -2492,6 +2493,7 @@ _tbm_queue_force_flush(tpl_wl_egl_surface_t *wl_egl_surface) tbm_surface_internal_unref(wl_egl_buffer->tbm_surface); } } + tpl_gmutex_rec_unlock(&wl_egl_surface->buffers_mutex); TPL_INFO("[FORCE_FLUSH]", "wl_egl_surface(%p) tbm_queue(%p)", @@ -2586,9 +2588,9 @@ _wl_egl_buffer_create(tpl_wl_egl_surface_t *wl_egl_surface, tpl_gmutex_init(&wl_egl_buffer->mutex); tpl_gcond_init(&wl_egl_buffer->cond); - tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); + tpl_gmutex_rec_lock(&wl_egl_surface->buffers_mutex); __tpl_list_push_back(wl_egl_surface->buffers, (void *)wl_egl_buffer); - tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); + tpl_gmutex_rec_unlock(&wl_egl_surface->buffers_mutex); TPL_INFO("[WL_EGL_BUFFER_CREATE]", "wl_egl_surface(%p) wl_egl_buffer(%p) tbm_surface(%p) bo(%d)", @@ -2601,6 +2603,8 @@ _wl_egl_buffer_create(tpl_wl_egl_surface_t *wl_egl_surface, return wl_egl_buffer; } +#define CAN_DEQUEUE_TIMEOUT_MS 10000 + static tbm_surface_h __tpl_wl_egl_surface_dequeue_buffer(tpl_surface_t *surface, uint64_t timeout_ns, int32_t *release_fence) @@ -3724,12 +3728,12 @@ __cb_wl_egl_buffer_free(tpl_wl_egl_buffer_t *wl_egl_buffer) TPL_INFO("[BUFFER_FREE]", "tbm_surface(%p) bo(%d)", wl_egl_buffer->tbm_surface, wl_egl_buffer->bo_name); - tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); + tpl_gmutex_rec_lock(&wl_egl_surface->buffers_mutex); if (wl_egl_surface->buffers) { __tpl_list_remove_data(wl_egl_surface->buffers, (void *)wl_egl_buffer, TPL_FIRST, NULL); } - tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); + tpl_gmutex_rec_unlock(&wl_egl_surface->buffers_mutex); if (wl_egl_surface->vblank) { tpl_gmutex_lock(&wl_egl_surface->vblank->mutex); @@ -3805,7 +3809,7 @@ _print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface) int buffer_cnt = 0; int idx = 0; - tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); + tpl_gmutex_rec_lock(&wl_egl_surface->buffers_mutex); buffer_cnt = __tpl_list_get_count(wl_egl_surface->buffers); node = __tpl_list_get_front_node(wl_egl_surface->buffers); @@ -3818,7 +3822,7 @@ _print_buffer_lists(tpl_wl_egl_surface_t *wl_egl_surface) wl_egl_buffer->tbm_surface, wl_egl_buffer->bo_name, status_to_string[wl_egl_buffer->status]); } while ((node = __tpl_list_node_next(node))); - tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); + tpl_gmutex_rec_unlock(&wl_egl_surface->buffers_mutex); } static tpl_bool_t @@ -3831,7 +3835,7 @@ _check_buffer_validate(tpl_wl_egl_surface_t *wl_egl_surface, tbm_surface_h tbm_s if (!wl_egl_surface || !tbm_surface) return ret; - tpl_gmutex_lock(&wl_egl_surface->buffers_mutex); + tpl_gmutex_rec_lock(&wl_egl_surface->buffers_mutex); node = __tpl_list_get_front_node(wl_egl_surface->buffers); do { if (!node) break; @@ -3847,7 +3851,7 @@ _check_buffer_validate(tpl_wl_egl_surface_t *wl_egl_surface, tbm_surface_h tbm_s tbm_surface, wl_egl_surface); } - tpl_gmutex_unlock(&wl_egl_surface->buffers_mutex); + tpl_gmutex_rec_unlock(&wl_egl_surface->buffers_mutex); return ret; }