#include <tbm_surface_queue.h>
#include <tbm_surface_internal.h>
#include <tbm_sync.h>
+#include <pthread.h>
#include "wayland-tbm-client.h"
#include "wayland-tbm-client-protocol.h"
struct wl_tbm *wl_tbm;
struct wl_list link;
+
+ pthread_mutex_t lock;
};
static struct wl_tbm_monitor *tbm_monitor;
wl_list_for_each(queue_info, &tbm_client->queue_info_list, link) {
struct wayland_tbm_buffer *buffer = NULL;
+ pthread_mutex_lock(&queue_info->lock);
wl_list_for_each(buffer, &queue_info->attach_bufs, link) {
- if (buffer->wl_buffer == wl_buffer)
+ if (buffer->wl_buffer == wl_buffer) {
+ pthread_mutex_unlock(&queue_info->lock);
return buffer;
+ }
}
+ pthread_mutex_unlock(&queue_info->lock);
}
return NULL;
wl_list_for_each(queue_info, &tbm_client->queue_info_list, link) {
struct wayland_tbm_buffer *buffer = NULL;
+ pthread_mutex_lock(&queue_info->lock);
wl_list_for_each(buffer, &queue_info->attach_bufs, link) {
- if (buffer->tbm_surface == surface)
+ if (buffer->tbm_surface == surface) {
+ pthread_mutex_unlock(&queue_info->lock);
return buffer;
+ }
}
+ pthread_mutex_unlock(&queue_info->lock);
}
return NULL;
}
}
-static void
-_wayland_tbm_client_surface_queue_flush(struct wayland_tbm_surface_queue *queue_info)
-{
- int flush = 0;
-
-#ifdef DEBUG_TRACE
- WL_TBM_TRACE("pid:%d", getpid());
-#endif
- _wayland_tbm_client_queue_destroy_unused_attach_bufs(queue_info, &flush);
- tbm_surface_queue_set_size(queue_info->tbm_queue, queue_info->queue_size, flush);
-}
-
static tbm_surface_h
_wayland_tbm_client_create_surface_from_param(tbm_bufmgr bufmgr,
int is_fd,
struct wayland_tbm_buffer *buffer;
tbm_surface_h surface = NULL;
+ pthread_mutex_lock(&queue_info->lock);
+
if (queue_info->is_active && queue_info->active_flush) {
wl_list_for_each_reverse(buffer, &queue_info->attach_bufs, link) {
if (!buffer->allocated && buffer->usable) {
#endif
}
+ pthread_mutex_unlock(&queue_info->lock);
+
return surface;
}
(struct wayland_tbm_surface_queue *)data;
struct wayland_tbm_buffer *buffer, *tmp;
+ pthread_mutex_lock(&queue_info->lock);
+
wl_list_for_each_safe(buffer, tmp, &queue_info->attach_bufs, link) {
if (buffer->tbm_surface != surface) continue;
if (!buffer->allocated) continue;
/* unref.. pair of __wayland_tbm_client_surface_alloc_cb */
tbm_surface_internal_unref(surface);
+
+ pthread_mutex_unlock(&queue_info->lock);
}
static void
WL_TBM_RETURN_IF_FAIL(wl_buffer != NULL);
+ pthread_mutex_lock(&queue_info->lock);
+
buffer = calloc(1, sizeof(struct wayland_tbm_buffer));
WL_TBM_GOTO_IF_FAIL(buffer != NULL, fail_alloc);
buffer->flags = flags;
wl_list_insert(&queue_info->attach_bufs, &buffer->link);
+ pthread_mutex_unlock(&queue_info->lock);
#ifdef DEBUG_TRACE
WL_TBM_TRACE("pid:%d wl_buffer:%p tbm_surface:%p",
fail_get_data:
free(buffer);
fail_alloc:
+ pthread_mutex_unlock(&queue_info->lock);
wl_buffer_destroy(wl_buffer);
}
{
struct wayland_tbm_surface_queue *queue_info =
(struct wayland_tbm_surface_queue *)data;
+ tbm_surface_queue_h tbm_queue = NULL;
WL_TBM_LOG("active queue");
+ pthread_mutex_lock(&queue_info->lock);
+
if (queue_info->is_active) {
WL_TBM_C_LOG("warning: queue_info is already activated");
+ pthread_mutex_unlock(&queue_info->lock);
return;
}
#ifdef DEBUG_TRACE
queue_info->is_active = 1;
queue_info->usage = usage;
- if (need_flush) {
- /* flush the allocated surfaces at the client */
+ if (need_flush)
queue_info->active_flush = need_flush;
- tbm_surface_queue_set_size(queue_info->tbm_queue, queue_size, 1);
- }
+
+ tbm_queue = queue_info->tbm_queue;
+
+ pthread_mutex_unlock(&queue_info->lock);
+
+ /* flush the allocated surfaces at the client */
+ if (need_flush)
+ tbm_surface_queue_set_size(tbm_queue, queue_size, 1);
}
static void
{
struct wayland_tbm_surface_queue *queue_info =
(struct wayland_tbm_surface_queue *)data;
+ int flush = 0;
+ int need_flush = 0;
+ tbm_surface_queue_h tbm_queue = NULL;
+ int queue_size = 0;
#ifdef DEBUG_TRACE
WL_TBM_TRACE(" pid:%d", getpid());
WL_TBM_LOG("deactive queue");
+ pthread_mutex_lock(&queue_info->lock);
+
if (!queue_info->is_active) {
WL_TBM_C_LOG("warning: queue_info is already deactivated");
+ pthread_mutex_unlock(&queue_info->lock);
return;
}
if (queue_info->active_flush) {
queue_info->active_flush = 0;
/* flush the attached surfaces */
- _wayland_tbm_client_surface_queue_flush(queue_info);
+ _wayland_tbm_client_queue_destroy_unused_attach_bufs(queue_info, &flush);
+ need_flush = 1;
}
+
+ tbm_queue = queue_info->tbm_queue;
+ queue_size = queue_info->queue_size;
+
+ pthread_mutex_unlock(&queue_info->lock);
+
+ if (need_flush)
+ tbm_surface_queue_set_size(tbm_queue, queue_size, flush);
}
static void
{
struct wayland_tbm_surface_queue *queue_info =
(struct wayland_tbm_surface_queue *)data;
+ tbm_surface_queue_h tbm_queue = NULL;
#ifdef DEBUG_TRACE
WL_TBM_TRACE("pid:%d", getpid());
#endif
WL_TBM_LOG("flush queue");
+ pthread_mutex_lock(&queue_info->lock);
+
if (queue_info->is_active && queue_info->active_flush) {
WL_TBM_C_LOG("warning: Cannot flush the tbm_surface_queueu. The queue is activate.");
+ pthread_mutex_unlock(&queue_info->lock);
return;
}
+ tbm_queue = queue_info->tbm_queue;
+
+ pthread_mutex_unlock(&queue_info->lock);
+
/* flush the allocated surfaces at the client */
- tbm_surface_queue_flush(queue_info->tbm_queue);
+ tbm_surface_queue_flush(tbm_queue);
}
static void
struct wayland_tbm_surface_queue *queue_info =
(struct wayland_tbm_surface_queue *)data;
struct wayland_tbm_buffer *buffer;
+ tbm_surface_queue_h tbm_queue = NULL;
WL_TBM_RETURN_IF_FAIL(wl_buffer != NULL);
+ pthread_mutex_lock(&queue_info->lock);
+
wl_list_for_each(buffer, &queue_info->attach_bufs, link) {
if (buffer->wl_buffer == wl_buffer)
buffer->usable = 1;
}
+ tbm_queue = queue_info->tbm_queue;
+
#ifdef DEBUG_TRACE
WL_TBM_TRACE("pid:%d wl_buffer:%p tbm_surface:%p",
getpid(), buffer->wl_buffer, buffer->tbm_surface);
#endif
- tbm_surface_queue_notify_dequeuable(queue_info->tbm_queue);
+ pthread_mutex_unlock(&queue_info->lock);
- return;
+ tbm_surface_queue_notify_dequeuable(tbm_queue);
}
const struct wl_tbm_queue_listener wl_tbm_queue_listener = {
#ifdef DEBUG_TRACE
WL_TBM_TRACE(" pid:%d", getpid());
#endif
+ pthread_mutex_lock(&queue_info->lock);
/* remove the attach_bufs int the queue_info */
_wayland_tbm_client_queue_destroy_attach_bufs(queue_info);
wl_tbm_queue_destroy(queue_info->wl_tbm_queue);
wl_list_remove(&queue_info->link);
+ pthread_mutex_unlock(&queue_info->lock);
+ pthread_mutex_destroy(&queue_info->lock);
free(queue_info);
}
WL_TBM_TRACE(" pid:%d", getpid());
#endif
+ pthread_mutex_lock(&queue_info->lock);
+
width = tbm_surface_queue_get_width(surface_queue);
height = tbm_surface_queue_get_height(surface_queue);
format = tbm_surface_queue_get_format(surface_queue);
queue_info->width = width;
queue_info->height = height;
queue_info->format = format;
+
+ pthread_mutex_unlock(&queue_info->lock);
}
static void
WL_TBM_RETURN_IF_FAIL(queue_info != NULL);
- if (!queue_info->is_active || !queue_info->active_flush) return;
+ pthread_mutex_lock(&queue_info->lock);
+ if (!queue_info->is_active || !queue_info->active_flush) {
+ pthread_mutex_unlock(&queue_info->lock);
+ return;
+ }
+ pthread_mutex_unlock(&queue_info->lock);
tbm_client = wl_tbm_get_user_data(queue_info->wl_tbm);
WL_TBM_RETURN_IF_FAIL(tbm_client != NULL);
WL_TBM_RETURN_IF_FAIL(queue_info != NULL);
if (trace != TBM_SURFACE_QUEUE_TRACE_DEQUEUE) return;
- if (!queue_info->is_active || !queue_info->active_flush) return;
+
+ pthread_mutex_lock(&queue_info->lock);
+
+ if (!queue_info->is_active || !queue_info->active_flush) {
+ pthread_mutex_unlock(&queue_info->lock);
+ return;
+ }
wl_list_for_each(buffer, &queue_info->attach_bufs, link) {
if (buffer->tbm_surface == tbm_surface)
wl_buffer = buffer->wl_buffer;
}
- if (!wl_buffer) return;
+ if (wl_buffer)
+ wl_tbm_queue_dequeue_buffer(queue_info->wl_tbm_queue, wl_buffer);
- wl_tbm_queue_dequeue_buffer(queue_info->wl_tbm_queue, wl_buffer);
+ pthread_mutex_unlock(&queue_info->lock);
}
tbm_surface_queue_h
width, height, format, queue_size);
#endif
+ pthread_mutex_init(&queue_info->lock, NULL);
+
/* add queue_info to the list */
wl_list_insert(&tbm_client->queue_info_list, &queue_info->link);
wayland_tbm_client_get_wl_tbm_queue(struct wayland_tbm_client *tbm_client, struct wl_surface *surface)
{
struct wayland_tbm_surface_queue *queue_info = NULL;
+ struct wl_tbm_queue *wayland_tbm_queue = NULL;
WL_TBM_RETURN_VAL_IF_FAIL(tbm_client != NULL, NULL);
WL_TBM_RETURN_VAL_IF_FAIL(surface != NULL, NULL);
queue_info = _wayland_tbm_client_find_queue_info_wl_surface(tbm_client, surface);
WL_TBM_RETURN_VAL_IF_FAIL(queue_info != NULL, NULL);
- WL_TBM_RETURN_VAL_IF_FAIL(queue_info->wl_tbm_queue != NULL, NULL);
- return queue_info->wl_tbm_queue;
+ pthread_mutex_lock(&queue_info->lock);
+
+ wayland_tbm_queue = queue_info->wl_tbm_queue;
+ if (!wayland_tbm_queue)
+ WL_TBM_LOG_E("wl_tbm_queue is NULL");
+
+ pthread_mutex_unlock(&queue_info->lock);
+
+ return wayland_tbm_queue;
}
struct wl_tbm *
queue_info = _wayland_tbm_client_find_queue_info_queue(tbm_client, queue);
WL_TBM_RETURN_VAL_IF_FAIL(queue_info != NULL, 0);
- if (queue_info->is_active) return 1;
+ pthread_mutex_lock(&queue_info->lock);
+
+ if (queue_info->is_active) {
+ pthread_mutex_unlock(&queue_info->lock);
+ return 1;
+ }
+
+ pthread_mutex_unlock(&queue_info->lock);
return 0;
}
return 0;
}
- if (num) {
- if (queue_info->is_active && queue_info->active_flush)
+ pthread_mutex_lock(&queue_info->lock);
+
+ if (queue_info->is_active && queue_info->active_flush) {
+ if (num)
*num = wl_list_length(&queue_info->attach_bufs);
- else
- *num = queue_info->queue_size;
- }
- if (surfaces) {
- if (queue_info->is_active && queue_info->active_flush) {
+ if (surfaces) {
wl_list_for_each(buffer, &queue_info->attach_bufs, link) {
surfaces[index] = buffer->tbm_surface;
index++;
}
- } else {
+ }
+
+ pthread_mutex_unlock(&queue_info->lock);
+ } else {
+ if (num)
+ *num = queue_info->queue_size;
+
+ if (surfaces) {
dequeued_surfaces = (tbm_surface_h *)calloc(queue_info->queue_size, sizeof(tbm_surface_h));
if (!dequeued_surfaces) {
WL_TBM_LOG_E("failed to alloc get_surfaces");
- goto fail;
+ goto alloc_fail;
}
get_surfaces = (tbm_surface_h *)calloc(queue_info->queue_size, sizeof(tbm_surface_h));
if (!get_surfaces) {
WL_TBM_LOG_E("failed to alloc dequeued_surfaces");
- goto fail;
+ goto alloc_fail;
}
+ /* not need queue_info */
+ pthread_mutex_unlock(&queue_info->lock);
+
while (tbm_surface_queue_can_dequeue(queue, 0)) {
tsq_err = tbm_surface_queue_dequeue(queue, &surface);
if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
WL_TBM_LOG_E("failed to tbm_surface_queue_dequeue");
- goto fail;
+ goto queue_fail;
}
dequeued_surfaces[dequeued_num] = surface;
tsq_err = tbm_surface_queue_get_surfaces(queue, get_surfaces, &get_num);
if (tsq_err != TBM_SURFACE_QUEUE_ERROR_NONE) {
WL_TBM_LOG_E("failed to tbm_surface_queue_get_surfaces");
- goto fail;
+ goto queue_fail;
}
for (i = 0; i < get_num; i++)
free(dequeued_surfaces);
free(get_surfaces);
+ } else {
+ pthread_mutex_unlock(&queue_info->lock);
}
}
return 1;
-fail:
+alloc_fail:
+ pthread_mutex_unlock(&queue_info->lock);
+queue_fail:
if (num) *num = 0;
if (dequeued_surfaces)