client: make queue_info thread safe 73/191373/9
authorChangyeon Lee <cyeon.lee@samsung.com>
Tue, 16 Oct 2018 09:45:33 +0000 (18:45 +0900)
committerChangyeon Lee <cyeon.lee@samsung.com>
Fri, 26 Oct 2018 00:12:24 +0000 (09:12 +0900)
it is possible that wayland_tbm event and tbm_surface_queue callback
is called by different threads. queue_info should be thread safe.

Change-Id: I47221d77acd393d84312f730955a3594223deff8

src/wayland-tbm-client.c

index 0408e0e63dba7e6dc2ff2bcaf23dd4fb4ba37bbf..115a995f5e74e5403b504cc56c2103e7c381e8e6 100644 (file)
@@ -40,6 +40,7 @@ DEALINGS IN THE SOFTWARE.
 #include <tbm_surface_queue.h>
 #include <tbm_surface_internal.h>
 #include <tbm_sync.h>
+#include <pthread.h>
 
 #include "wayland-tbm-client.h"
 #include "wayland-tbm-client-protocol.h"
@@ -89,6 +90,8 @@ struct wayland_tbm_surface_queue {
 
        struct wl_tbm *wl_tbm;
        struct wl_list link;
+
+       pthread_mutex_t lock;
 };
 
 static struct wl_tbm_monitor *tbm_monitor;
@@ -605,10 +608,14 @@ _wayland_tbm_client_find_tbm_buffer_wl_buffer(struct wayland_tbm_client *tbm_cli
        wl_list_for_each(queue_info, &tbm_client->queue_info_list, link) {
                struct wayland_tbm_buffer *buffer = NULL;
 
+               pthread_mutex_lock(&queue_info->lock);
                wl_list_for_each(buffer, &queue_info->attach_bufs, link) {
-                       if (buffer->wl_buffer == wl_buffer)
+                       if (buffer->wl_buffer == wl_buffer) {
+                               pthread_mutex_unlock(&queue_info->lock);
                                return buffer;
+                       }
                }
+               pthread_mutex_unlock(&queue_info->lock);
        }
 
        return NULL;
@@ -625,10 +632,14 @@ _wayland_tbm_client_find_tbm_buffer_surface(struct wayland_tbm_client *tbm_clien
        wl_list_for_each(queue_info, &tbm_client->queue_info_list, link) {
                struct wayland_tbm_buffer *buffer = NULL;
 
+               pthread_mutex_lock(&queue_info->lock);
                wl_list_for_each(buffer, &queue_info->attach_bufs, link) {
-                       if (buffer->tbm_surface == surface)
+                       if (buffer->tbm_surface == surface) {
+                               pthread_mutex_unlock(&queue_info->lock);
                                return buffer;
+                       }
                }
+               pthread_mutex_unlock(&queue_info->lock);
        }
 
        return NULL;
@@ -894,18 +905,6 @@ _wayland_tbm_client_queue_destroy_unused_attach_bufs(struct wayland_tbm_surface_
        }
 }
 
-static void
-_wayland_tbm_client_surface_queue_flush(struct wayland_tbm_surface_queue *queue_info)
-{
-       int flush = 0;
-
-#ifdef DEBUG_TRACE
-       WL_TBM_TRACE("pid:%d", getpid());
-#endif
-       _wayland_tbm_client_queue_destroy_unused_attach_bufs(queue_info, &flush);
-       tbm_surface_queue_set_size(queue_info->tbm_queue, queue_info->queue_size, flush);
-}
-
 static tbm_surface_h
 _wayland_tbm_client_create_surface_from_param(tbm_bufmgr bufmgr,
                                                         int is_fd,
@@ -1009,6 +1008,8 @@ __wayland_tbm_client_surface_alloc_cb(tbm_surface_queue_h surface_queue, void *d
        struct wayland_tbm_buffer *buffer;
        tbm_surface_h surface = NULL;
 
+       pthread_mutex_lock(&queue_info->lock);
+
        if (queue_info->is_active && queue_info->active_flush) {
                wl_list_for_each_reverse(buffer, &queue_info->attach_bufs, link) {
                        if (!buffer->allocated && buffer->usable) {
@@ -1045,6 +1046,8 @@ __wayland_tbm_client_surface_alloc_cb(tbm_surface_queue_h surface_queue, void *d
 #endif
        }
 
+       pthread_mutex_unlock(&queue_info->lock);
+
        return surface;
 }
 
@@ -1059,6 +1062,8 @@ __wayland_tbm_client_surface_free_cb(tbm_surface_queue_h surface_queue, void *da
                                (struct wayland_tbm_surface_queue *)data;
        struct wayland_tbm_buffer *buffer, *tmp;
 
+       pthread_mutex_lock(&queue_info->lock);
+
        wl_list_for_each_safe(buffer, tmp, &queue_info->attach_bufs, link) {
                if (buffer->tbm_surface != surface) continue;
                if (!buffer->allocated) continue;
@@ -1077,6 +1082,8 @@ __wayland_tbm_client_surface_free_cb(tbm_surface_queue_h surface_queue, void *da
 
        /* unref.. pair of __wayland_tbm_client_surface_alloc_cb */
        tbm_surface_internal_unref(surface);
+
+       pthread_mutex_unlock(&queue_info->lock);
 }
 
 static void
@@ -1091,6 +1098,8 @@ handle_tbm_queue_buffer_attached(void *data,
 
        WL_TBM_RETURN_IF_FAIL(wl_buffer != NULL);
 
+       pthread_mutex_lock(&queue_info->lock);
+
        buffer = calloc(1, sizeof(struct wayland_tbm_buffer));
        WL_TBM_GOTO_IF_FAIL(buffer != NULL, fail_alloc);
 
@@ -1105,6 +1114,7 @@ handle_tbm_queue_buffer_attached(void *data,
        buffer->flags = flags;
 
        wl_list_insert(&queue_info->attach_bufs, &buffer->link);
+       pthread_mutex_unlock(&queue_info->lock);
 
 #ifdef DEBUG_TRACE
        WL_TBM_TRACE("pid:%d wl_buffer:%p tbm_surface:%p",
@@ -1116,6 +1126,7 @@ handle_tbm_queue_buffer_attached(void *data,
 fail_get_data:
        free(buffer);
 fail_alloc:
+       pthread_mutex_unlock(&queue_info->lock);
        wl_buffer_destroy(wl_buffer);
 }
 
@@ -1128,11 +1139,15 @@ handle_tbm_queue_active(void *data,
 {
        struct wayland_tbm_surface_queue *queue_info =
                                (struct wayland_tbm_surface_queue *)data;
+       tbm_surface_queue_h tbm_queue = NULL;
 
        WL_TBM_LOG("active queue");
 
+       pthread_mutex_lock(&queue_info->lock);
+
        if (queue_info->is_active) {
                WL_TBM_C_LOG("warning: queue_info is already activated");
+               pthread_mutex_unlock(&queue_info->lock);
                return;
        }
 #ifdef DEBUG_TRACE
@@ -1142,11 +1157,16 @@ handle_tbm_queue_active(void *data,
        queue_info->is_active = 1;
        queue_info->usage = usage;
 
-       if (need_flush) {
-               /* flush the allocated surfaces at the client */
+       if (need_flush)
                queue_info->active_flush = need_flush;
-               tbm_surface_queue_set_size(queue_info->tbm_queue, queue_size, 1);
-       }
+
+       tbm_queue = queue_info->tbm_queue;
+
+       pthread_mutex_unlock(&queue_info->lock);
+
+       /* flush the allocated surfaces at the client */
+       if (need_flush)
+               tbm_surface_queue_set_size(tbm_queue, queue_size, 1);
 }
 
 static void
@@ -1155,6 +1175,10 @@ handle_tbm_queue_deactive(void *data,
 {
        struct wayland_tbm_surface_queue *queue_info =
                                (struct wayland_tbm_surface_queue *)data;
+       int flush = 0;
+       int need_flush = 0;
+       tbm_surface_queue_h tbm_queue = NULL;
+       int queue_size = 0;
 
 #ifdef DEBUG_TRACE
        WL_TBM_TRACE("                  pid:%d", getpid());
@@ -1162,8 +1186,11 @@ handle_tbm_queue_deactive(void *data,
 
        WL_TBM_LOG("deactive queue");
 
+       pthread_mutex_lock(&queue_info->lock);
+
        if (!queue_info->is_active) {
                WL_TBM_C_LOG("warning: queue_info is already deactivated");
+               pthread_mutex_unlock(&queue_info->lock);
                return;
        }
 
@@ -1172,8 +1199,17 @@ handle_tbm_queue_deactive(void *data,
        if (queue_info->active_flush) {
                queue_info->active_flush = 0;
                /* flush the attached surfaces */
-               _wayland_tbm_client_surface_queue_flush(queue_info);
+               _wayland_tbm_client_queue_destroy_unused_attach_bufs(queue_info, &flush);
+               need_flush = 1;
        }
+
+       tbm_queue = queue_info->tbm_queue;
+       queue_size = queue_info->queue_size;
+
+       pthread_mutex_unlock(&queue_info->lock);
+
+       if (need_flush)
+               tbm_surface_queue_set_size(tbm_queue, queue_size, flush);
 }
 
 static void
@@ -1182,19 +1218,27 @@ handle_tbm_queue_flush(void *data,
 {
        struct wayland_tbm_surface_queue *queue_info =
                                (struct wayland_tbm_surface_queue *)data;
+       tbm_surface_queue_h tbm_queue = NULL;
 
 #ifdef DEBUG_TRACE
        WL_TBM_TRACE("pid:%d", getpid());
 #endif
        WL_TBM_LOG("flush queue");
 
+       pthread_mutex_lock(&queue_info->lock);
+
        if (queue_info->is_active && queue_info->active_flush) {
                WL_TBM_C_LOG("warning: Cannot flush the tbm_surface_queueu. The queue is activate.");
+               pthread_mutex_unlock(&queue_info->lock);
                return;
        }
 
+       tbm_queue = queue_info->tbm_queue;
+
+       pthread_mutex_unlock(&queue_info->lock);
+
        /* flush the allocated surfaces at the client */
-       tbm_surface_queue_flush(queue_info->tbm_queue);
+       tbm_surface_queue_flush(tbm_queue);
 }
 
 static void
@@ -1205,22 +1249,27 @@ handle_tbm_queue_buffer_usable(void *data,
        struct wayland_tbm_surface_queue *queue_info =
                                (struct wayland_tbm_surface_queue *)data;
        struct wayland_tbm_buffer *buffer;
+       tbm_surface_queue_h tbm_queue = NULL;
 
        WL_TBM_RETURN_IF_FAIL(wl_buffer != NULL);
 
+       pthread_mutex_lock(&queue_info->lock);
+
        wl_list_for_each(buffer, &queue_info->attach_bufs, link) {
                if (buffer->wl_buffer == wl_buffer)
                        buffer->usable = 1;
        }
 
+       tbm_queue = queue_info->tbm_queue;
+
 #ifdef DEBUG_TRACE
        WL_TBM_TRACE("pid:%d wl_buffer:%p tbm_surface:%p",
                        getpid(), buffer->wl_buffer, buffer->tbm_surface);
 #endif
 
-       tbm_surface_queue_notify_dequeuable(queue_info->tbm_queue);
+       pthread_mutex_unlock(&queue_info->lock);
 
-       return;
+       tbm_surface_queue_notify_dequeuable(tbm_queue);
 }
 
 const struct wl_tbm_queue_listener wl_tbm_queue_listener = {
@@ -1276,6 +1325,7 @@ _handle_tbm_surface_queue_destroy_notify(tbm_surface_queue_h surface_queue,
 #ifdef DEBUG_TRACE
        WL_TBM_TRACE(" pid:%d", getpid());
 #endif
+       pthread_mutex_lock(&queue_info->lock);
 
        /* remove the attach_bufs int the queue_info */
        _wayland_tbm_client_queue_destroy_attach_bufs(queue_info);
@@ -1284,6 +1334,8 @@ _handle_tbm_surface_queue_destroy_notify(tbm_surface_queue_h surface_queue,
                wl_tbm_queue_destroy(queue_info->wl_tbm_queue);
 
        wl_list_remove(&queue_info->link);
+       pthread_mutex_unlock(&queue_info->lock);
+       pthread_mutex_destroy(&queue_info->lock);
        free(queue_info);
 }
 
@@ -1300,6 +1352,8 @@ _handle_tbm_surface_queue_reset_notify(tbm_surface_queue_h surface_queue,
        WL_TBM_TRACE(" pid:%d", getpid());
 #endif
 
+       pthread_mutex_lock(&queue_info->lock);
+
        width = tbm_surface_queue_get_width(surface_queue);
        height = tbm_surface_queue_get_height(surface_queue);
        format = tbm_surface_queue_get_format(surface_queue);
@@ -1307,6 +1361,8 @@ _handle_tbm_surface_queue_reset_notify(tbm_surface_queue_h surface_queue,
        queue_info->width = width;
        queue_info->height = height;
        queue_info->format = format;
+
+       pthread_mutex_unlock(&queue_info->lock);
 }
 
 static void
@@ -1318,7 +1374,12 @@ _handle_tbm_surface_queue_can_dequeue_notify(tbm_surface_queue_h surface_queue,
 
        WL_TBM_RETURN_IF_FAIL(queue_info != NULL);
 
-       if (!queue_info->is_active || !queue_info->active_flush) return;
+       pthread_mutex_lock(&queue_info->lock);
+       if (!queue_info->is_active || !queue_info->active_flush) {
+               pthread_mutex_unlock(&queue_info->lock);
+               return;
+       }
+       pthread_mutex_unlock(&queue_info->lock);
 
        tbm_client = wl_tbm_get_user_data(queue_info->wl_tbm);
        WL_TBM_RETURN_IF_FAIL(tbm_client != NULL);
@@ -1352,16 +1413,23 @@ _handle_tbm_surface_queue_trace_notify(tbm_surface_queue_h surface_queue,
        WL_TBM_RETURN_IF_FAIL(queue_info != NULL);
 
        if (trace != TBM_SURFACE_QUEUE_TRACE_DEQUEUE) return;
-       if (!queue_info->is_active || !queue_info->active_flush) return;
+
+       pthread_mutex_lock(&queue_info->lock);
+
+       if (!queue_info->is_active || !queue_info->active_flush) {
+               pthread_mutex_unlock(&queue_info->lock);
+               return;
+       }
 
        wl_list_for_each(buffer, &queue_info->attach_bufs, link) {
                if (buffer->tbm_surface == tbm_surface)
                        wl_buffer = buffer->wl_buffer;
        }
 
-       if (!wl_buffer) return;
+       if (wl_buffer)
+               wl_tbm_queue_dequeue_buffer(queue_info->wl_tbm_queue, wl_buffer);
 
-       wl_tbm_queue_dequeue_buffer(queue_info->wl_tbm_queue, wl_buffer);
+       pthread_mutex_unlock(&queue_info->lock);
 }
 
 tbm_surface_queue_h
@@ -1430,6 +1498,8 @@ wayland_tbm_client_create_surface_queue(struct wayland_tbm_client *tbm_client,
                width, height, format, queue_size);
 #endif
 
+       pthread_mutex_init(&queue_info->lock, NULL);
+
        /* add queue_info to the list */
        wl_list_insert(&tbm_client->queue_info_list, &queue_info->link);
 
@@ -1446,15 +1516,23 @@ struct wl_tbm_queue *
 wayland_tbm_client_get_wl_tbm_queue(struct wayland_tbm_client *tbm_client, struct wl_surface *surface)
 {
        struct wayland_tbm_surface_queue *queue_info = NULL;
+       struct wl_tbm_queue *wayland_tbm_queue = NULL;
 
        WL_TBM_RETURN_VAL_IF_FAIL(tbm_client != NULL, NULL);
        WL_TBM_RETURN_VAL_IF_FAIL(surface != NULL, NULL);
 
        queue_info = _wayland_tbm_client_find_queue_info_wl_surface(tbm_client, surface);
        WL_TBM_RETURN_VAL_IF_FAIL(queue_info != NULL, NULL);
-       WL_TBM_RETURN_VAL_IF_FAIL(queue_info->wl_tbm_queue != NULL, NULL);
 
-       return queue_info->wl_tbm_queue;
+       pthread_mutex_lock(&queue_info->lock);
+
+       wayland_tbm_queue = queue_info->wl_tbm_queue;
+       if (!wayland_tbm_queue)
+               WL_TBM_LOG_E("wl_tbm_queue is NULL");
+
+       pthread_mutex_unlock(&queue_info->lock);
+
+       return wayland_tbm_queue;
 }
 
 struct wl_tbm *
@@ -1476,7 +1554,14 @@ wayland_tbm_client_queue_check_activate(struct wayland_tbm_client *tbm_client, t
        queue_info = _wayland_tbm_client_find_queue_info_queue(tbm_client, queue);
        WL_TBM_RETURN_VAL_IF_FAIL(queue_info != NULL, 0);
 
-       if (queue_info->is_active) return 1;
+       pthread_mutex_lock(&queue_info->lock);
+
+       if (queue_info->is_active) {
+               pthread_mutex_unlock(&queue_info->lock);
+               return 1;
+       }
+
+       pthread_mutex_unlock(&queue_info->lock);
 
        return 0;
 }
@@ -1504,37 +1589,45 @@ wayland_tbm_client_queue_get_surfaces(struct wayland_tbm_client *tbm_client,
                return 0;
        }
 
-       if (num) {
-               if (queue_info->is_active && queue_info->active_flush)
+       pthread_mutex_lock(&queue_info->lock);
+
+       if (queue_info->is_active && queue_info->active_flush) {
+               if (num)
                        *num = wl_list_length(&queue_info->attach_bufs);
-               else
-                       *num = queue_info->queue_size;
-       }
 
-       if (surfaces) {
-               if (queue_info->is_active && queue_info->active_flush) {
+               if (surfaces) {
                        wl_list_for_each(buffer, &queue_info->attach_bufs, link) {
                                surfaces[index] = buffer->tbm_surface;
                                index++;
                        }
-               } else {
+               }
+
+               pthread_mutex_unlock(&queue_info->lock);
+       } else {
+               if (num)
+                       *num = queue_info->queue_size;
+
+               if (surfaces) {
                        dequeued_surfaces = (tbm_surface_h *)calloc(queue_info->queue_size, sizeof(tbm_surface_h));
                        if (!dequeued_surfaces) {
                                WL_TBM_LOG_E("failed to alloc get_surfaces");
-                               goto fail;
+                               goto alloc_fail;
                        }
 
                        get_surfaces = (tbm_surface_h *)calloc(queue_info->queue_size, sizeof(tbm_surface_h));
                        if (!get_surfaces) {
                                WL_TBM_LOG_E("failed to alloc dequeued_surfaces");
-                               goto fail;
+                               goto alloc_fail;
                        }
 
+                       /* not need queue_info */
+                       pthread_mutex_unlock(&queue_info->lock);
+
                        while (tbm_surface_queue_can_dequeue(queue, 0)) {
                                tsq_err  = tbm_surface_queue_dequeue(queue, &surface);
                                if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
                                        WL_TBM_LOG_E("failed to tbm_surface_queue_dequeue");
-                                       goto fail;
+                                       goto queue_fail;
                                }
 
                                dequeued_surfaces[dequeued_num] = surface;
@@ -1550,7 +1643,7 @@ wayland_tbm_client_queue_get_surfaces(struct wayland_tbm_client *tbm_client,
                        tsq_err = tbm_surface_queue_get_surfaces(queue, get_surfaces, &get_num);
                        if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
                                WL_TBM_LOG_E("failed to tbm_surface_queue_get_surfaces");
-                               goto fail;
+                               goto queue_fail;
                        }
 
                        for (i = 0; i < get_num; i++)
@@ -1558,12 +1651,16 @@ wayland_tbm_client_queue_get_surfaces(struct wayland_tbm_client *tbm_client,
 
                        free(dequeued_surfaces);
                        free(get_surfaces);
+               } else {
+                       pthread_mutex_unlock(&queue_info->lock);
                }
        }
 
        return 1;
 
-fail:
+alloc_fail:
+       pthread_mutex_unlock(&queue_info->lock);
+queue_fail:
        if (num) *num = 0;
 
        if (dequeued_surfaces)