tbm: implement backend surface functions related to swapchain sandbox/jbko/angle
authorJoonbum Ko <joonbum.ko@samsung.com>
Mon, 16 Dec 2024 05:20:34 +0000 (14:20 +0900)
committerJoonbum Ko <joonbum.ko@samsung.com>
Mon, 16 Dec 2024 05:20:37 +0000 (14:20 +0900)
  Implemented functions related to swapchains without affecting
 the existing TBM backend operation. Since in the TBM backend,
 tbm_surface_queue is a native window, made sure not to directly
 create or destroy tbm_surface_queue when creating or deleting swapchains.

 - cancel_dequeued_buffer
 - get_swapchain_buffers
 - create_swapchain
 - destroy_swapchain

Change-Id: I6e4ce22f6e77fbdc5fe1b16f7186767aa9b9c6a6
Signed-off-by: Joonbum Ko <joonbum.ko@samsung.com>
src/tpl_tbm.c

index 886cf862438b5dfed01529a197b2471d54964d1b..b659742d95e868b46f05788be85f6e8193221a4f 100644 (file)
@@ -13,6 +13,7 @@
 
 typedef struct _tpl_tbm_display tpl_tbm_display_t;
 typedef struct _tpl_tbm_surface tpl_tbm_surface_t;
+typedef struct _tpl_tbm_swapchain tpl_tbm_swapchain_t;
 
 #define MIN_BUFFER 2
 #define MAX_BUFFER 4
@@ -27,11 +28,37 @@ struct _tpl_tbm_display {
        int supported_present_modes;
 };
 
+struct _tpl_tbm_swapchain {
+       tbm_surface_queue_h tbm_queue;
+
+       struct {
+               int width;
+               int height;
+               tbm_format format;
+               int buffer_count;
+               int present_mode;
+       } properties;
+
+       tbm_surface_h *swapchain_buffers;
+       tbm_surface_h *old_swapchain_buffers;
+
+       tpl_util_atomic_uint ref_cnt;
+};
+
 struct _tpl_tbm_surface {
+       tpl_tbm_swapchain_t *swapchain;
        tbm_surface_queue_h tbm_queue;
        tpl_bool_t need_reset;
+
+       tpl_tbm_display_t *tpl_tbm_display;
 };
 
+static int
+_get_tbm_surface_bo_name(tbm_surface_h tbm_surface)
+{
+       return tbm_bo_export(tbm_surface_internal_get_bo(tbm_surface, 0));
+}
+
 static tpl_result_t
 __tpl_tbm_display_init(tpl_display_t *display)
 {
@@ -195,6 +222,8 @@ __tpl_tbm_display_query_supported_buffer_count(tpl_display_t *display,
 
        if (min) *min = MIN_BUFFER;
        if (max) *max = (queue_size > MAX_BUFFER ? queue_size : MAX_BUFFER);
+
+       return TPL_ERROR_NONE;
 }
 
 static tpl_result_t
@@ -235,7 +264,7 @@ __tpl_tbm_surface_init(tpl_surface_t *surface)
        tpl_tbm_surface_t *tpl_tbm_surface = NULL;
        TPL_ASSERT(surface);
 
-       tpl_tbm_surface = (tpl_tbm_surface_t *) calloc(1, sizeof(tpl_tbm_surface_t));
+       tpl_tbm_surface = (tpl_tbm_surface_t *)calloc(1, sizeof(tpl_tbm_surface_t));
        if (!tpl_tbm_surface) {
                TPL_ERR("Failed to allocate memory for new tpl_tbm_surface_t");
                return TPL_ERROR_OUT_OF_MEMORY;
@@ -246,6 +275,7 @@ __tpl_tbm_surface_init(tpl_surface_t *surface)
        tpl_tbm_surface->need_reset = TPL_FALSE;
 
        tpl_tbm_surface->tbm_queue = (tbm_surface_queue_h)surface->native_handle;
+       tpl_tbm_surface->tpl_tbm_display = (tpl_tbm_display_t *)surface->display->backend.data;
 
        /* Set reset_callback to tbm_queue */
        tbm_surface_queue_add_reset_cb(tpl_tbm_surface->tbm_queue,
@@ -280,6 +310,236 @@ __tpl_tbm_surface_fini(tpl_surface_t *surface)
        surface->backend.data = NULL;
 }
 
+static tpl_result_t
+__tpl_tbm_surface_create_swapchain(tpl_surface_t *surface,
+               tbm_format format, int width, int height,
+               int buffer_count, int present_mode)
+{
+       TPL_ASSERT(surface);
+       tpl_tbm_surface_t *tpl_tbm_surface = (tpl_tbm_surface_t *)surface->backend.data;
+       TPL_CHECK_ON_NULL_RETURN_VAL(tpl_tbm_surface, TPL_ERROR_INVALID_PARAMETER);
+
+       tpl_tbm_display_t *tpl_tbm_display = tpl_tbm_surface->tpl_tbm_display;
+       TPL_CHECK_ON_NULL_RETURN_VAL(tpl_tbm_surface, TPL_ERROR_INVALID_PARAMETER);
+
+       if (!(present_mode & tpl_tbm_display->supported_present_modes)) {
+               TPL_ERR("Unsupported present mode(%d).", present_mode);
+               return TPL_ERROR_INVALID_PARAMETER;
+       }
+
+       tpl_tbm_swapchain_t *swapchain = tpl_tbm_surface->swapchain;
+
+       if (swapchain != NULL && (swapchain->properties.present_mode != present_mode)) {
+               /* buffer clear */
+       }
+
+       if (swapchain == NULL) {
+               swapchain = (tpl_tbm_swapchain_t *)calloc(1, sizeof(tpl_tbm_swapchain_t));
+               TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_OUT_OF_MEMORY);
+
+               swapchain->tbm_queue = NULL;
+               __tpl_util_atomic_set(&swapchain->ref_cnt, 1);
+       }
+
+       swapchain->tbm_queue = tpl_tbm_surface->tbm_queue;
+       swapchain->properties.buffer_count = buffer_count;
+       swapchain->properties.width = width;
+       swapchain->properties.height = height;
+       swapchain->properties.present_mode = present_mode;
+       swapchain->properties.format = format;
+       swapchain->swapchain_buffers = NULL;
+       swapchain->old_swapchain_buffers = NULL;
+
+       tpl_tbm_surface->swapchain = swapchain;
+
+       TPL_INFO("[CREATE_SWAPCHAIN]",
+                        "tpl_surface(%p) tpl_tbm_surface_t(%p) tbm_surface_queue(%p)",
+                        surface, tpl_tbm_surface, swapchain->tbm_queue);
+
+       return TPL_ERROR_NONE;
+}
+
+static void
+__untrack_swapchain_buffers(tpl_tbm_surface_t *tpl_tbm_surface, tbm_surface_h *sc_buffers)
+{
+       tpl_tbm_swapchain_t *swapchain = tpl_tbm_surface->swapchain;
+       for (int i = 0; i < swapchain->properties.buffer_count; i++) {
+               if (sc_buffers[i]) {
+                       TPL_INFO("[UNTRACK_BUFFERS]", "[%d] tpl_tbm_surface(%p) sc(%p) tbm_surface(%p) bo(%d)",
+                                        i, tpl_tbm_surface, swapchain, sc_buffers[i],
+                                        _get_tbm_surface_bo_name(sc_buffers[i]));
+                       tbm_surface_internal_unref(sc_buffers[i]);
+                       sc_buffers[i] = NULL;
+               }
+       }
+}
+
+static tpl_result_t
+__tpl_tbm_surface_destroy_swapchain(tpl_surface_t *surface)
+{
+       TPL_ASSERT(surface);
+
+       tpl_tbm_surface_t *tpl_tbm_surface = (tpl_tbm_surface_t *)surface->backend.data;
+       TPL_CHECK_ON_NULL_RETURN_VAL(tpl_tbm_surface, TPL_ERROR_INVALID_PARAMETER);
+
+       tpl_tbm_swapchain_t *swapchain = tpl_tbm_surface->swapchain;
+       TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
+       TPL_CHECK_ON_NULL_RETURN_VAL(swapchain->tbm_queue, TPL_ERROR_INVALID_OPERATION);
+
+       if (__tpl_util_atomic_dec(&swapchain->ref_cnt) > 0) {
+               TPL_INFO("[DESTROY_SWAPCHAIN]",
+                                "tpl_tbm_surface(%p) swapchain(%p) still valid",
+                                tpl_tbm_surface, swapchain);
+               if (swapchain->old_swapchain_buffers) {
+                       __untrack_swapchain_buffers(tpl_tbm_surface, swapchain->old_swapchain_buffers);
+                       free(swapchain->old_swapchain_buffers);
+                       swapchain->old_swapchain_buffers = NULL;
+               }
+
+               return TPL_ERROR_NONE;
+       }
+
+       TPL_INFO("[DESTROY_SWAPCHAIN]", "tpl_tbm_surface(%p) swapchain(%p)",
+                        tpl_tbm_surface, swapchain);
+
+       if (swapchain->swapchain_buffers) {
+               __untrack_swapchain_buffers(tpl_tbm_surface, swapchain->swapchain_buffers);
+               free(swapchain->swapchain_buffers);
+               swapchain->swapchain_buffers = NULL;
+       }
+
+       swapchain->tbm_queue = NULL;
+
+       free(swapchain);
+
+       return TPL_ERROR_NONE;
+}
+
+static tpl_result_t
+__tpl_tbm_surface_get_swapchain_buffers(tpl_surface_t *surface,
+                                                                tbm_surface_h **buffers, int *buffer_count)
+{
+       TPL_ASSERT(surface);
+       TPL_ASSERT(surface->backend.data);
+       TPL_ASSERT(surface->display);
+       TPL_ASSERT(surface->display->backend.data);
+
+       tpl_tbm_surface_t *tpl_tbm_surface = (tpl_tbm_surface_t *)surface->backend.data;
+       tpl_tbm_swapchain_t *swapchain = tpl_tbm_surface->swapchain;
+       tbm_surface_h *swapchain_buffers = NULL;
+       tbm_surface_h *dequeued_buffers = NULL;
+       tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+       int deq_cnt = 0, i = 0;
+       tpl_result_t ret = TPL_ERROR_NONE;
+
+       TPL_CHECK_ON_NULL_RETURN_VAL(swapchain, TPL_ERROR_INVALID_PARAMETER);
+       TPL_CHECK_ON_NULL_RETURN_VAL(buffer_count, TPL_ERROR_INVALID_PARAMETER);
+
+       if (!buffers) {
+               *buffer_count = tbm_surface_queue_get_size(swapchain->tbm_queue);
+               return TPL_ERROR_NONE;
+       }
+
+       TPL_CHECK_ON_FALSE_RETURN_VAL(*buffer_count == tbm_surface_queue_get_size(swapchain->tbm_queue),
+                                                                 TPL_ERROR_INVALID_PARAMETER);
+
+       swapchain_buffers = (tbm_surface_h *)calloc(*buffer_count, sizeof(tbm_surface_h));
+       dequeued_buffers = (tbm_surface_h *)calloc(*buffer_count, sizeof(tbm_surface_h));
+       if (!swapchain_buffers || !dequeued_buffers) {
+               TPL_ERR("Failed to allocate memory for buffers.");
+               goto alloc_failed;
+       }
+
+       while (deq_cnt < *buffer_count && tbm_surface_queue_can_dequeue(swapchain->tbm_queue, 0)) {
+               tsq_err = tbm_surface_queue_dequeue(swapchain->tbm_queue, &dequeued_buffers[deq_cnt]);
+               if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+                       TPL_ERR("Failed to dequeue from tbm_queue(%p) |tsq_err(%d)",
+                                       swapchain->tbm_queue, tsq_err);
+                       ret = TPL_ERROR_OUT_OF_MEMORY;
+                       goto deq_failed;
+               }
+
+               deq_cnt++;
+       }
+
+       for (i = 0; i < deq_cnt; i++) {
+               tsq_err = tbm_surface_queue_release(swapchain->tbm_queue, dequeued_buffers[i]);
+               if (tsq_err!= TBM_SURFACE_QUEUE_ERROR_NONE)
+                       TPL_ERR("Failed to release tbm_surface(%p) to tbm_queue(%p) |tsq_err(%d)",
+                                       swapchain_buffers[i], swapchain->tbm_queue, tsq_err);
+               dequeued_buffers[i] = NULL;
+       }
+
+       deq_cnt = 0;
+       free(dequeued_buffers);
+       dequeued_buffers = NULL;
+
+       tsq_err = tbm_surface_queue_get_surfaces(swapchain->tbm_queue, swapchain_buffers, buffer_count);
+       if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
+               TPL_ERR("Failed to get surfaces from tbm_queue(%p) |tsq_err(%d)",
+                               swapchain->tbm_queue, tsq_err);
+               ret = TPL_ERROR_INVALID_OPERATION;
+               goto alloc_failed;
+       }
+
+       for (i = 0; i < *buffer_count; i++) {
+               if (tbm_surface_internal_is_valid(swapchain_buffers[i])) {
+                       TPL_INFO("[SWAPCHAIN_BUFFERS]", "[%d] tpl_tbm_surface(%p) sc(%p) tbm_surface(%p) bo(%d)",
+                                        i, tpl_tbm_surface, swapchain, swapchain_buffers[i],
+                                        _get_tbm_surface_bo_name(swapchain_buffers[i]));
+                       tbm_surface_internal_ref(swapchain_buffers[i]);
+               }
+       }
+
+       swapchain->swapchain_buffers = swapchain_buffers;
+       *buffers = swapchain_buffers;
+
+       return TPL_ERROR_NONE;
+
+deq_failed:
+       for (i = 0; i < deq_cnt; i++) {
+               tsq_err = tbm_surface_queue_release(swapchain->tbm_queue, dequeued_buffers[i]);
+               if (tsq_err!= TBM_SURFACE_QUEUE_ERROR_NONE) {
+                       TPL_ERR("Failed to release tbm_surface(%p) to tbm_queue(%p) |tsq_err(%d)",
+                                       swapchain_buffers[i], swapchain->tbm_queue, tsq_err);
+                       ret = TPL_ERROR_INVALID_OPERATION;
+               }
+       }
+
+alloc_failed:
+       if (dequeued_buffers) free(dequeued_buffers);
+       if (swapchain_buffers) free(swapchain_buffers);
+
+       return ret;
+}
+
+static tpl_result_t
+__tpl_tbm_surface_cancel_buffer(tpl_surface_t *surface, tbm_surface_h tbm_surface)
+{
+       TPL_ASSERT(surface);
+       TPL_ASSERT(surface->backend.data);
+
+       tpl_tbm_surface_t *tpl_tbm_surface = (tpl_tbm_surface_t *)surface->backend.data;
+       tbm_surface_queue_error_e tsq_err = TBM_SURFACE_QUEUE_ERROR_NONE;
+
+       TPL_CHECK_ON_FALSE_RETURN_VAL(tbm_surface_internal_is_valid(tbm_surface),
+                                                                 TPL_ERROR_INVALID_PARAMETER);
+
+       tbm_surface_internal_unref(tbm_surface);
+
+       TPL_INFO("[CANCEL_BUFFER]", "tpl_tbm_surface(%p) tbm_surface(%p) bo(%d)",
+                        tpl_tbm_surface, tbm_surface, _get_tbm_surface_bo_name(tbm_surface));
+
+       tsq_err = tbm_surface_queue_cancel_dequeue(tpl_tbm_surface->tbm_queue, tbm_surface);
+       if (tsq_err == TBM_SURFACE_QUEUE_ERROR_NONE) {
+               TPL_ERR("Failed to cancel tbm_surface(%p) to tbm_queue(%p)",
+                               tbm_surface, tpl_tbm_surface->tbm_queue);
+               return TPL_ERROR_INVALID_OPERATION;
+       }
+
+       return TPL_ERROR_NONE;
+}
+
 static tpl_result_t
 __tpl_tbm_surface_enqueue_buffer(tpl_surface_t *surface,
                                                                 tbm_surface_h tbm_surface, int num_rects,
@@ -429,7 +689,13 @@ __tpl_surface_init_backend_tbm(tpl_surface_backend_t *backend,
        backend->init = __tpl_tbm_surface_init;
        backend->fini = __tpl_tbm_surface_fini;
        backend->validate = __tpl_tbm_surface_validate;
+       backend->cancel_dequeued_buffer =
+               __tpl_tbm_surface_cancel_buffer;
        backend->dequeue_buffer = __tpl_tbm_surface_dequeue_buffer;
        backend->enqueue_buffer = __tpl_tbm_surface_enqueue_buffer;
+       backend->get_swapchain_buffers =
+               __tpl_tbm_surface_get_swapchain_buffers;
+       backend->create_swapchain = __tpl_tbm_surface_create_swapchain;
+       backend->destroy_swapchain = __tpl_tbm_surface_destroy_swapchain;
 }