#include "tpl_internal.h"
+#include <string.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/eventfd.h>
+
+#include <tbm_bufmgr.h>
#include <tbm_surface.h>
#include <tbm_surface_internal.h>
#include <tbm_surface_queue.h>
-#include <tbm_sync.h>
+#include <wayland-client.h>
+#include <wayland-tbm-server.h>
+#include <wayland-tbm-client.h>
+
+#include <tdm_client.h>
+
+#include <tizen-surface-client-protocol.h>
+#include <linux-explicit-synchronization-unstable-v1-client-protocol.h>
+
+#include "tpl_utils_gthread.h"
+
+#define BUFFER_ARRAY_SIZE 10
+
+typedef struct _tpl_wl_vk_surface tpl_wl_vk_display_t;
+typedef struct _tpl_wl_vk_surface tpl_wl_vk_surface_t;
+typedef struct _tpl_wl_vk_swapchain tpl_wl_vk_swapchain_t;
+typedef struct _tpl_wl_vk_buffer tpl_wl_vk_buffer_t;
+
+struct _tpl_wl_vk_display {
+ tpl_gsource *disp_source;
+ tpl_gthread *thread;
+ tpl_gmutex wl_event_mutex;
+
+ struct wl_display *wl_display;
+ struct wl_event_queue *ev_queue;
+ struct wayland_tbm_client *wl_tbm_client;
+ int last_error; /* errno of the last wl_display error*/
+
+ tpl_bool_t wl_initialized;
+ tpl_bool_t tdm_initialized;
+
+ tdm_client *tdm_client;
+ tpl_gsource *tdm_source;
+ int tdm_display_fd;
+
+ tpl_bool_t use_wait_vblank;
+ tpl_bool_t use_explicit_sync;
+ tpl_bool_t prepared;
+
+ /* device surface capabilities */
+ int min_buffer;
+ int max_buffer;
+ int present_modes;
+
+ struct tizen_surface_shm *tss; /* used for surface buffer_flush */
+ struct zwp_linux_explicit_synchronization_v1 *explicit_sync; /* for explicit fence sync */
+};
+
+struct _tpl_wl_vk_swapchain {
+ tpl_wl_vk_surface_t *wl_vk_surface;
+
+ struct {
+ int width;
+ int height;
+ tbm_format format;
+ int buffer_count;
+ int present_mode;
+ } properties;
+
+ tbm_surface_h *swapchain_buffers;
+
+ tpl_util_atomic_uint ref_cnt;
+};
+
+struct _tpl_wl_vk_surface {
+ tpl_gsource *surf_source;
+
+ tpl_wl_vk_swapchain_t *swapchain;
+
+ tbm_surface_queue_h tbm_queue;
+
+ struct wl_surface *wl_surface;
+ struct zwp_linux_surface_synchronization_v1 *surface_sync; /* for explicit fence sync */
+ struct tizen_surface_shm_flusher *tss_flusher; /* used for surface buffer_flush */
+
+ tdm_client_vblank *vblank;
-#include "tpl_wayland_egl_thread.h"
+ /* surface information */
+ int render_done_cnt;
-typedef struct _tpl_wayland_vk_wsi_display tpl_wayland_vk_wsi_display_t;
-typedef struct _tpl_wayland_vk_wsi_surface tpl_wayland_vk_wsi_surface_t;
-typedef struct _tpl_wayland_vk_wsi_buffer tpl_wayland_vk_wsi_buffer_t;
+ tpl_wl_vk_display_t *wl_vk_display;
+ tpl_surface_t *tpl_surface;
+
+ /* wl_vk_buffer array for buffer tracing */
+ tpl_wl_vk_buffer_t *buffers[BUFFER_ARRAY_SIZE];
+ int buffer_cnt; /* the number of using wl_vk_buffers */
+ tpl_gmutex buffers_mutex;
+
+ tpl_list_t *vblank_waiting_buffers; /* for FIFO/FIFO_RELAXED modes */
+
+ tpl_gmutex surf_mutex;
+ tpl_gcond surf_cond;
+
+ /* for waiting draw done */
+ tpl_bool_t is_activated;
+ tpl_bool_t reset; /* TRUE if queue reseted by external */
+ tpl_bool_t vblank_done;
+};
-struct _tpl_wayland_vk_wsi_display {
- twe_thread *wl_thread;
- twe_display_h twe_display;
+typedef enum buffer_status {
+ RELEASED = 0, // 0
+ DEQUEUED, // 1
+ ENQUEUED, // 2
+ ACQUIRED, // 3
+ WAITING_SIGNALED, // 4
+ WAITING_VBLANK, // 5
+ COMMITTED, // 6
+} buffer_status_t;
+
+static const char *status_to_string[7] = {
+ "RELEASED", // 0
+ "DEQUEUED", // 1
+ "ENQUEUED", // 2
+ "ACQUIRED", // 3
+ "WAITING_SIGNALED", // 4
+ "WAITING_VBLANK", // 5
+ "COMMITTED", // 6
};
-struct _tpl_wayland_vk_wsi_surface {
- twe_surface_h twe_surface;
- tbm_surface_queue_h tbm_queue;
- tbm_surface_h *swapchain_buffers;
- int buffer_count;
- tpl_bool_t is_activated;
- tpl_bool_t reset;
- tpl_util_atomic_uint swapchain_reference;
+struct _tpl_wl_vk_buffer {
+ tbm_surface_h tbm_surface;
+ int bo_name;
+
+ struct wl_proxy *wl_buffer;
+ int dx, dy; /* position to attach to wl_surface */
+ int width, height; /* size to attach to wl_surface */
+
+ buffer_status_t status; /* for tracing buffer status */
+ int idx; /* position index in buffers array of wl_vk_surface */
+
+ /* for damage region */
+ int num_rects;
+ int *rects;
+
+ /* for checking need_to_commit (frontbuffer mode) */
+ tpl_bool_t need_to_commit;
+
+ /* to get release event via zwp_linux_buffer_release_v1 */
+ struct zwp_linux_buffer_release_v1 *buffer_release;
+
+ /* each buffers own its release_fence_fd, until it passes ownership
+ * to it to EGL */
+ int32_t release_fence_fd;
+
+ /* each buffers own its acquire_fence_fd.
+ * If it use zwp_linux_buffer_release_v1 the ownership of this fd
+ * will be passed to display server
+ * Otherwise it will be used as a fence waiting for render done
+ * on tpl thread */
+ int32_t acquire_fence_fd;
+
+ tpl_gmutex mutex;
+ tpl_gcond cond;
+
+ tpl_wl_vk_surface_t *wl_vk_surface;
};
static tpl_result_t __tpl_wl_vk_wsi_surface_destroy_swapchain(
static tpl_result_t
__tpl_wl_vk_wsi_display_init(tpl_display_t *display)
{
- tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL;
+ tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL;
TPL_ASSERT(display);
return TPL_ERROR_INVALID_PARAMETER;
}
- wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *) calloc(1,
- sizeof(tpl_wayland_vk_wsi_display_t));
+ wayland_vk_wsi_display = (tpl_wl_vk_display_t *) calloc(1,
+ sizeof(tpl_wl_vk_display_t));
if (!wayland_vk_wsi_display) {
- TPL_ERR("Failed to allocate memory for new tpl_wayland_vk_wsi_display_t.");
+ TPL_ERR("Failed to allocate memory for new tpl_wl_vk_display_t.");
return TPL_ERROR_OUT_OF_MEMORY;
}
static void
__tpl_wl_vk_wsi_display_fini(tpl_display_t *display)
{
- tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display;
+ tpl_wl_vk_display_t *wayland_vk_wsi_display;
TPL_ASSERT(display);
- wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *)display->backend.data;
+ wayland_vk_wsi_display = (tpl_wl_vk_display_t *)display->backend.data;
if (wayland_vk_wsi_display) {
TPL_LOG_T("WL_VK",
tpl_display_t *display,
tpl_handle_t window, int *min, int *max)
{
- tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL;
+ tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL;
tpl_result_t res = TPL_ERROR_NONE;
TPL_ASSERT(display);
TPL_ASSERT(window);
- wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *)display->backend.data;
+ wayland_vk_wsi_display = (tpl_wl_vk_display_t *)display->backend.data;
if (!wayland_vk_wsi_display) return TPL_ERROR_INVALID_OPERATION;
tpl_display_t *display,
tpl_handle_t window, int *modes)
{
- tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL;
+ tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL;
tpl_result_t res = TPL_ERROR_NONE;
TPL_ASSERT(display);
TPL_ASSERT(window);
- wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *)display->backend.data;
+ wayland_vk_wsi_display = (tpl_wl_vk_display_t *)display->backend.data;
if (!wayland_vk_wsi_display) return TPL_ERROR_INVALID_OPERATION;
static tpl_result_t
__tpl_wl_vk_wsi_surface_init(tpl_surface_t *surface)
{
- tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL;
- tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL;
+ tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL;
+ tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL;
twe_surface_h twe_surface = NULL;
TPL_ASSERT(surface);
TPL_ASSERT(surface->type == TPL_SURFACE_TYPE_WINDOW);
TPL_ASSERT(surface->native_handle);
- wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *) calloc(1,
- sizeof(tpl_wayland_vk_wsi_surface_t));
+ wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) calloc(1,
+ sizeof(tpl_wl_vk_surface_t));
if (!wayland_vk_wsi_surface) {
- TPL_ERR("Failed to allocate memory for new tpl_wayland_vk_wsi_surface_t.");
+ TPL_ERR("Failed to allocate memory for new tpl_wl_vk_surface_t.");
return TPL_ERROR_OUT_OF_MEMORY;
}
wayland_vk_wsi_display =
- (tpl_wayland_vk_wsi_display_t *)surface->display->backend.data;
+ (tpl_wl_vk_display_t *)surface->display->backend.data;
if (!wayland_vk_wsi_display) {
TPL_ERR("Invalid parameter. wayland_vk_wsi_display(%p)",
wayland_vk_wsi_display);
wayland_vk_wsi_surface->swapchain_buffers = NULL;
TPL_LOG_T("WL_VK",
- "[INIT]tpl_surface(%p) tpl_wayland_vk_wsi_surface(%p) twe_surface(%p)",
+ "[INIT]tpl_surface(%p) tpl_wl_vk_surface(%p) twe_surface(%p)",
surface, wayland_vk_wsi_surface, twe_surface);
return TPL_ERROR_NONE;
static void
__tpl_wl_vk_wsi_surface_fini(tpl_surface_t *surface)
{
- tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL;
- tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL;
+ tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL;
+ tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL;
TPL_ASSERT(surface);
TPL_ASSERT(surface->display);
- wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *) surface->backend.data;
+ wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) surface->backend.data;
if (wayland_vk_wsi_surface == NULL) return;
- wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *)
+ wayland_vk_wsi_display = (tpl_wl_vk_display_t *)
surface->display->backend.data;
if (wayland_vk_wsi_display == NULL) return;
TPL_ASSERT(surface->display->native_handle);
TPL_ASSERT(tbm_surface);
- tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface =
- (tpl_wayland_vk_wsi_surface_t *) surface->backend.data;
+ tpl_wl_vk_surface_t *wayland_vk_wsi_surface =
+ (tpl_wl_vk_surface_t *) surface->backend.data;
tbm_surface_queue_error_e tsq_err;
if (!tbm_surface_internal_is_valid(tbm_surface)) {
TPL_ASSERT(surface);
TPL_ASSERT(surface->backend.data);
- tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface =
- (tpl_wayland_vk_wsi_surface_t *)surface->backend.data;
+ tpl_wl_vk_surface_t *wayland_vk_wsi_surface =
+ (tpl_wl_vk_surface_t *)surface->backend.data;
return !(wayland_vk_wsi_surface->reset);
}
__tpl_wl_vk_wsi_surface_cancel_dequeued_buffer(tpl_surface_t *surface,
tbm_surface_h tbm_surface)
{
- tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL;
+ tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL;
tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
- wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *)surface->backend.data;
+ wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
if (!wayland_vk_wsi_surface) {
TPL_ERR("Invalid backend surface. surface(%p) wayland_vk_wsi_surface(%p)",
surface, wayland_vk_wsi_surface);
TPL_ASSERT(surface->display);
tbm_surface_h tbm_surface = NULL;
- tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface =
- (tpl_wayland_vk_wsi_surface_t *)surface->backend.data;
- tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display =
- (tpl_wayland_vk_wsi_display_t *)surface->display->backend.data;
+ tpl_wl_vk_surface_t *wayland_vk_wsi_surface =
+ (tpl_wl_vk_surface_t *)surface->backend.data;
+ tpl_wl_vk_display_t *wayland_vk_wsi_display =
+ (tpl_wl_vk_display_t *)surface->display->backend.data;
tbm_surface_queue_error_e tsq_err = 0;
tpl_result_t lock_res = TPL_ERROR_NONE;
tpl_result_t res = TPL_ERROR_NONE;
tbm_surface_h **buffers,
int *buffer_count)
{
- tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL;
- tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL;
+ tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL;
+ tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL;
int i;
tpl_result_t ret = TPL_ERROR_NONE;
TPL_ASSERT(buffers);
TPL_ASSERT(buffer_count);
- wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *)surface->backend.data;
- wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *)surface->display->backend.data;
+ wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
+ wayland_vk_wsi_display = (tpl_wl_vk_display_t *)surface->display->backend.data;
if (twe_display_lock(wayland_vk_wsi_display->twe_display) == TPL_ERROR_NONE) {
ret = twe_surface_get_swapchain_buffers(wayland_vk_wsi_surface->twe_surface,
void *data)
{
tpl_surface_t *surface = NULL;
- tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL;
+ tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL;
tpl_bool_t is_activated = TPL_FALSE;
surface = (tpl_surface_t *)data;
TPL_CHECK_ON_NULL_RETURN(surface);
- wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *)surface->backend.data;
+ wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *)surface->backend.data;
TPL_CHECK_ON_NULL_RETURN(wayland_vk_wsi_surface);
/* When queue_reset_callback is called, if is_activated is different from
tbm_format format, int width,
int height, int buffer_count, int present_mode)
{
- tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL;
- tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL;
+ tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL;
+ tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL;
tpl_result_t res = TPL_ERROR_NONE;
TPL_ASSERT(surface);
TPL_ASSERT(surface->backend.data);
TPL_ASSERT(surface->display);
- wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *) surface->backend.data;
+ wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) surface->backend.data;
TPL_ASSERT(wayland_vk_wsi_surface);
- wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *)
+ wayland_vk_wsi_display = (tpl_wl_vk_display_t *)
surface->display->backend.data;
TPL_ASSERT(wayland_vk_wsi_display);
static tpl_result_t
__tpl_wl_vk_wsi_surface_destroy_swapchain(tpl_surface_t *surface)
{
- tpl_wayland_vk_wsi_surface_t *wayland_vk_wsi_surface = NULL;
- tpl_wayland_vk_wsi_display_t *wayland_vk_wsi_display = NULL;
+ tpl_wl_vk_surface_t *wayland_vk_wsi_surface = NULL;
+ tpl_wl_vk_display_t *wayland_vk_wsi_display = NULL;
tpl_result_t res = TPL_ERROR_NONE;
unsigned int ref;
TPL_ASSERT(surface->display);
TPL_ASSERT(surface->display->backend.data);
- wayland_vk_wsi_surface = (tpl_wayland_vk_wsi_surface_t *) surface->backend.data;
- wayland_vk_wsi_display = (tpl_wayland_vk_wsi_display_t *) surface->display->backend.data;
+ wayland_vk_wsi_surface = (tpl_wl_vk_surface_t *) surface->backend.data;
+ wayland_vk_wsi_display = (tpl_wl_vk_display_t *) surface->display->backend.data;
if (twe_display_lock(wayland_vk_wsi_display->twe_display) == TPL_ERROR_NONE) {
ref = __tpl_util_atomic_dec(&wayland_vk_wsi_surface->swapchain_reference);