From: Joonbum Ko Date: Wed, 17 Mar 2021 09:47:54 +0000 (+0900) Subject: Modified structures to be used in the vulkan backend. X-Git-Tag: submit/tizen/20210504.031804~16 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=f528e5c50e88d91703185a9040238f934eacf156;p=platform%2Fcore%2Fuifw%2Flibtpl-egl.git Modified structures to be used in the vulkan backend. Change-Id: Ia5c4c0843b150f988416c38cefed569b13909272 Signed-off-by: Joonbum Ko --- diff --git a/src/tpl_wl_vk_thread.c b/src/tpl_wl_vk_thread.c index cb4f549..74e7e0a 100644 --- a/src/tpl_wl_vk_thread.c +++ b/src/tpl_wl_vk_thread.c @@ -3,31 +3,171 @@ #include "tpl_internal.h" +#include +#include +#include +#include + +#include #include #include #include -#include +#include +#include +#include + +#include + +#include +#include + +#include "tpl_utils_gthread.h" + +#define BUFFER_ARRAY_SIZE 10 + +typedef struct _tpl_wl_vk_surface tpl_wl_vk_display_t; +typedef struct _tpl_wl_vk_surface tpl_wl_vk_surface_t; +typedef struct _tpl_wl_vk_swapchain tpl_wl_vk_swapchain_t; +typedef struct _tpl_wl_vk_buffer tpl_wl_vk_buffer_t; + +struct _tpl_wl_vk_display { + tpl_gsource *disp_source; + tpl_gthread *thread; + tpl_gmutex wl_event_mutex; + + struct wl_display *wl_display; + struct wl_event_queue *ev_queue; + struct wayland_tbm_client *wl_tbm_client; + int last_error; /* errno of the last wl_display error*/ + + tpl_bool_t wl_initialized; + tpl_bool_t tdm_initialized; + + tdm_client *tdm_client; + tpl_gsource *tdm_source; + int tdm_display_fd; + + tpl_bool_t use_wait_vblank; + tpl_bool_t use_explicit_sync; + tpl_bool_t prepared; + + /* device surface capabilities */ + int min_buffer; + int max_buffer; + int present_modes; + + struct tizen_surface_shm *tss; /* used for surface buffer_flush */ + struct zwp_linux_explicit_synchronization_v1 *explicit_sync; /* for explicit fence sync */ +}; + +struct _tpl_wl_vk_swapchain { + tpl_wl_vk_surface_t *wl_vk_surface; + + struct { + int width; + int height; + tbm_format format; + int buffer_count; + int present_mode; + } properties; + + tbm_surface_h *swapchain_buffers; + + tpl_util_atomic_uint ref_cnt; +}; + +struct _tpl_wl_vk_surface { + tpl_gsource *surf_source; + + tpl_wl_vk_swapchain_t *swapchain; + + tbm_surface_queue_h tbm_queue; + + struct wl_surface *wl_surface; + struct zwp_linux_surface_synchronization_v1 *surface_sync; /* for explicit fence sync */ + struct tizen_surface_shm_flusher *tss_flusher; /* used for surface buffer_flush */ + + tdm_client_vblank *vblank; -#include "tpl_wayland_egl_thread.h" + /* surface information */ + int render_done_cnt; -typedef struct _tpl_wayland_vk_wsi_display tpl_wayland_vk_wsi_display_t; -typedef struct _tpl_wayland_vk_wsi_surface tpl_wayland_vk_wsi_surface_t; -typedef struct _tpl_wayland_vk_wsi_buffer tpl_wayland_vk_wsi_buffer_t; + tpl_wl_vk_display_t *wl_vk_display; + tpl_surface_t *tpl_surface; + + /* wl_vk_buffer array for buffer tracing */ + tpl_wl_vk_buffer_t *buffers[BUFFER_ARRAY_SIZE]; + int buffer_cnt; /* the number of using wl_vk_buffers */ + tpl_gmutex buffers_mutex; + + tpl_list_t *vblank_waiting_buffers; /* for FIFO/FIFO_RELAXED modes */ + + tpl_gmutex surf_mutex; + tpl_gcond surf_cond; + + /* for waiting draw done */ + tpl_bool_t is_activated; + tpl_bool_t reset; /* TRUE if queue reseted by external */ + tpl_bool_t vblank_done; +}; -struct _tpl_wayland_vk_wsi_display { - twe_thread *wl_thread; - twe_display_h twe_display; +typedef enum buffer_status { + RELEASED = 0, // 0 + DEQUEUED, // 1 + ENQUEUED, // 2 + ACQUIRED, // 3 + WAITING_SIGNALED, // 4 + WAITING_VBLANK, // 5 + COMMITTED, // 6 +} buffer_status_t; + +static const char *status_to_string[7] = { + "RELEASED", // 0 + "DEQUEUED", // 1 + "ENQUEUED", // 2 + "ACQUIRED", // 3 + "WAITING_SIGNALED", // 4 + "WAITING_VBLANK", // 5 + "COMMITTED", // 6 }; -struct _tpl_wayland_vk_wsi_surface { - twe_surface_h twe_surface; - tbm_surface_queue_h tbm_queue; - tbm_surface_h *swapchain_buffers; - int buffer_count; - tpl_bool_t is_activated; - tpl_bool_t reset; - tpl_util_atomic_uint swapchain_reference; +struct _tpl_wl_vk_buffer { + tbm_surface_h tbm_surface; + int bo_name; + + struct wl_proxy *wl_buffer; + int dx, dy; /* position to attach to wl_surface */ + int width, height; /* size to attach to wl_surface */ + + buffer_status_t status; /* for tracing buffer status */ + int idx; /* position index in buffers array of wl_vk_surface */ + + /* for damage region */ + int num_rects; + int *rects; + + /* for checking need_to_commit (frontbuffer mode) */ + tpl_bool_t need_to_commit; + + /* to get release event via zwp_linux_buffer_release_v1 */ + struct zwp_linux_buffer_release_v1 *buffer_release; + + /* each buffers own its release_fence_fd, until it passes ownership + * to it to EGL */ + int32_t release_fence_fd; + + /* each buffers own its acquire_fence_fd. + * If it use zwp_linux_buffer_release_v1 the ownership of this fd + * will be passed to display server + * Otherwise it will be used as a fence waiting for render done + * on tpl thread */ + int32_t acquire_fence_fd; + + tpl_gmutex mutex; + tpl_gcond cond; + + tpl_wl_vk_surface_t *wl_vk_surface; }; static tpl_result_t __tpl_wl_vk_wsi_surface_destroy_swapchain( @@ -47,7 +187,7 @@ __tpl_wl_vk_wsi_display_is_wl_display(tpl_handle_t native_dpy) static tpl_result_t __tpl_wl_vk_wsi_display_init(tpl_display_t *display) { - tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; TPL_ASSERT(display); @@ -57,10 +197,10 @@ __tpl_wl_vk_wsi_display_init(tpl_display_t *display) return TPL_ERROR_INVALID_PARAMETER; } - wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *) calloc(1, - sizeof(tpl_wayland_vk_wsi_display_t)); + wayland_vk_wsi_display = (tpl_wl_vk_display_t *) calloc(1, + sizeof(tpl_wl_vk_display_t)); if (!wayland_vk_wsi_display) { - TPL_ERR("Failed to allocate memory for new tpl_wayland_vk_wsi_display_t."); + TPL_ERR("Failed to allocate memory for new tpl_wl_vk_display_t."); return TPL_ERROR_OUT_OF_MEMORY; } @@ -117,11 +257,11 @@ free_display: static void __tpl_wl_vk_wsi_display_fini(tpl_display_t *display) { - tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display; + tpl_wl_vk_display_t *wayland_vk_wsi_display; TPL_ASSERT(display); - wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *)display->backend.data; + wayland_vk_wsi_display = (tpl_wl_vk_display_t *)display->backend.data; if (wayland_vk_wsi_display) { TPL_LOG_T("WL_VK", @@ -195,13 +335,13 @@ __tpl_wl_vk_wsi_display_query_window_supported_buffer_count( tpl_display_t *display, tpl_handle_t window, int *min, int *max) { - tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; tpl_result_t res = TPL_ERROR_NONE; TPL_ASSERT(display); TPL_ASSERT(window); - wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *)display->backend.data; + wayland_vk_wsi_display = (tpl_wl_vk_display_t *)display->backend.data; if (!wayland_vk_wsi_display) return TPL_ERROR_INVALID_OPERATION; @@ -221,13 +361,13 @@ __tpl_wl_vk_wsi_display_query_window_supported_present_modes( tpl_display_t *display, tpl_handle_t window, int *modes) { - tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; tpl_result_t res = TPL_ERROR_NONE; TPL_ASSERT(display); TPL_ASSERT(window); - wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *)display->backend.data; + wayland_vk_wsi_display = (tpl_wl_vk_display_t *)display->backend.data; if (!wayland_vk_wsi_display) return TPL_ERROR_INVALID_OPERATION; @@ -247,23 +387,23 @@ __tpl_wl_vk_wsi_display_query_window_supported_present_modes( static tpl_result_t __tpl_wl_vk_wsi_surface_init(tpl_surface_t *surface) { - tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; - tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; twe_surface_h twe_surface = NULL; TPL_ASSERT(surface); TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW); TPL_ASSERT(surface->native_handle); - wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *) calloc(1, - sizeof(tpl_wayland_vk_wsi_surface_t)); + wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) calloc(1, + sizeof(tpl_wl_vk_surface_t)); if (!wayland_vk_wsi_surface) { - TPL_ERR("Failed to allocate memory for new tpl_wayland_vk_wsi_surface_t."); + TPL_ERR("Failed to allocate memory for new tpl_wl_vk_surface_t."); return TPL_ERROR_OUT_OF_MEMORY; } wayland_vk_wsi_display = - (tpl_wayland_vk_wsi_display_t *)surface->display->backend.data; + (tpl_wl_vk_display_t *)surface->display->backend.data; if (!wayland_vk_wsi_display) { TPL_ERR("Invalid parameter. wayland_vk_wsi_display(%p)", wayland_vk_wsi_display); @@ -291,7 +431,7 @@ __tpl_wl_vk_wsi_surface_init(tpl_surface_t *surface) wayland_vk_wsi_surface->swapchain_buffers = NULL; TPL_LOG_T("WL_VK", - "[INIT]tpl_surface(%p) tpl_wayland_vk_wsi_surface(%p) twe_surface(%p)", + "[INIT]tpl_surface(%p) tpl_wl_vk_surface(%p) twe_surface(%p)", surface, wayland_vk_wsi_surface, twe_surface); return TPL_ERROR_NONE; @@ -300,16 +440,16 @@ __tpl_wl_vk_wsi_surface_init(tpl_surface_t *surface) static void __tpl_wl_vk_wsi_surface_fini(tpl_surface_t *surface) { - tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; - tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; TPL_ASSERT(surface); TPL_ASSERT(surface->display); - wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *) surface->backend.data; + wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) surface->backend.data; if (wayland_vk_wsi_surface == NULL) return; - wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *) + wayland_vk_wsi_display = (tpl_wl_vk_display_t *) surface->display->backend.data; if (wayland_vk_wsi_display == NULL) return; @@ -351,8 +491,8 @@ __tpl_wl_vk_wsi_surface_enqueue_buffer(tpl_surface_t *surface, TPL_ASSERT(surface->display->native_handle); TPL_ASSERT(tbm_surface); - tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = - (tpl_wayland_vk_wsi_surface_t *) surface->backend.data; + tpl_wl_vk_surface_t *wayland_vk_wsi_surface = + (tpl_wl_vk_surface_t *) surface->backend.data; tbm_surface_queue_error_e tsq_err; if (!tbm_surface_internal_is_valid(tbm_surface)) { @@ -404,8 +544,8 @@ __tpl_wl_vk_wsi_surface_validate(tpl_surface_t *surface) TPL_ASSERT(surface); TPL_ASSERT(surface->backend.data); - tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = - (tpl_wayland_vk_wsi_surface_t *)surface->backend.data; + tpl_wl_vk_surface_t *wayland_vk_wsi_surface = + (tpl_wl_vk_surface_t *)surface->backend.data; return !(wayland_vk_wsi_surface->reset); } @@ -414,10 +554,10 @@ static tpl_result_t __tpl_wl_vk_wsi_surface_cancel_dequeued_buffer(tpl_surface_t *surface, tbm_surface_h tbm_surface) { - tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE; - wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *)surface->backend.data; + wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *)surface->backend.data; if (!wayland_vk_wsi_surface) { TPL_ERR("Invalid backend surface. surface(%p) wayland_vk_wsi_surface(%p)", surface, wayland_vk_wsi_surface); @@ -454,10 +594,10 @@ __tpl_wl_vk_wsi_surface_dequeue_buffer(tpl_surface_t *surface, TPL_ASSERT(surface->display); tbm_surface_h tbm_surface = NULL; - tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = - (tpl_wayland_vk_wsi_surface_t *)surface->backend.data; - tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = - (tpl_wayland_vk_wsi_display_t *)surface->display->backend.data; + tpl_wl_vk_surface_t *wayland_vk_wsi_surface = + (tpl_wl_vk_surface_t *)surface->backend.data; + tpl_wl_vk_display_t *wayland_vk_wsi_display = + (tpl_wl_vk_display_t *)surface->display->backend.data; tbm_surface_queue_error_e tsq_err = 0; tpl_result_t lock_res = TPL_ERROR_NONE; tpl_result_t res = TPL_ERROR_NONE; @@ -527,8 +667,8 @@ __tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, tbm_surface_h **buffers, int *buffer_count) { - tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; - tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; int i; tpl_result_t ret = TPL_ERROR_NONE; @@ -539,8 +679,8 @@ __tpl_wl_vk_wsi_surface_get_swapchain_buffers(tpl_surface_t *surface, TPL_ASSERT(buffers); TPL_ASSERT(buffer_count); - wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *)surface->backend.data; - wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *)surface->display->backend.data; + wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *)surface->backend.data; + wayland_vk_wsi_display = (tpl_wl_vk_display_t *)surface->display->backend.data; if (twe_display_lock(wayland_vk_wsi_display->twe_display) == TPL_ERROR_NONE) { ret = twe_surface_get_swapchain_buffers(wayland_vk_wsi_surface->twe_surface, @@ -596,13 +736,13 @@ __cb_tbm_queue_reset_callback(tbm_surface_queue_h surface_queue, void *data) { tpl_surface_t *surface = NULL; - tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; tpl_bool_t is_activated = TPL_FALSE; surface = (tpl_surface_t *)data; TPL_CHECK_ON_NULL_RETURN(surface); - wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *)surface->backend.data; + wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *)surface->backend.data; TPL_CHECK_ON_NULL_RETURN(wayland_vk_wsi_surface); /* When queue_reset_callback is called, if is_activated is different from @@ -634,18 +774,18 @@ __tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, tbm_format format, int width, int height, int buffer_count, int present_mode) { - tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; - tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; tpl_result_t res = TPL_ERROR_NONE; TPL_ASSERT(surface); TPL_ASSERT(surface->backend.data); TPL_ASSERT(surface->display); - wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *) surface->backend.data; + wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) surface->backend.data; TPL_ASSERT(wayland_vk_wsi_surface); - wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *) + wayland_vk_wsi_display = (tpl_wl_vk_display_t *) surface->display->backend.data; TPL_ASSERT(wayland_vk_wsi_display); @@ -721,8 +861,8 @@ __tpl_wl_vk_wsi_surface_create_swapchain(tpl_surface_t *surface, static tpl_result_t __tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) { - tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL; - tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL; + tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL; + tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL; tpl_result_t res = TPL_ERROR_NONE; unsigned int ref; @@ -731,8 +871,8 @@ __tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface) TPL_ASSERT(surface->display); TPL_ASSERT(surface->display->backend.data); - wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *) surface->backend.data; - wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *) surface->display->backend.data; + wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) surface->backend.data; + wayland_vk_wsi_display = (tpl_wl_vk_display_t *) surface->display->backend.data; if (twe_display_lock(wayland_vk_wsi_display->twe_display) == TPL_ERROR_NONE) { ref = __tpl_util_atomic_dec(&wayland_vk_wsi_surface->swapchain_reference);