2 * Copyright (C) 2009 Edward Hervey <bilboed@bilboed.com>
3 * 2011 Wim Taymans <wim.taymans@gmail.com>
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Library General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Library General Public License for more details.
17 * You should have received a copy of the GNU Library General Public
18 * License along with this library; if not, write to the
19 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
20 * Boston, MA 02110-1301, USA.
23 #include "gst_private.h"
28 #include "gstatomicqueue.h"
29 #include "glib-compat-private.h"
32 * SECTION:gstatomicqueue
33 * @title: GstAtomicQueue
34 * @short_description: An atomic queue implementation
36 * The #GstAtomicQueue object implements a queue that can be used from multiple
37 * threads without performing any blocking operations.
40 G_DEFINE_BOXED_TYPE (GstAtomicQueue, gst_atomic_queue,
41 (GBoxedCopyFunc) gst_atomic_queue_ref,
42 (GBoxedFreeFunc) gst_atomic_queue_unref);
44 /* By default the queue uses 2 * sizeof(gpointer) * clp2 (max_items) of
45 * memory. clp2(x) is the next power of two >= than x.
47 * The queue can operate in low memory mode, in which it consumes almost
48 * half the memory at the expense of extra overhead in the readers. This
49 * is disabled by default because even without LOW_MEM mode, the memory
50 * consumption is still lower than a plain GList.
54 typedef struct _GstAQueueMem GstAQueueMem;
61 volatile gint tail_write;
62 volatile gint tail_read;
79 new_queue_mem (guint size, gint pos)
83 mem = g_new (GstAQueueMem, 1);
85 /* we keep the size as a mask for performance */
86 mem->size = clp2 (MAX (size, 16)) - 1;
87 mem->array = g_new0 (gpointer, mem->size + 1);
89 mem->tail_write = pos;
98 free_queue_mem (GstAQueueMem * mem)
104 struct _GstAtomicQueue
106 volatile gint refcount;
110 GstAQueueMem *head_mem;
111 GstAQueueMem *tail_mem;
112 GstAQueueMem *free_list;
116 add_to_free_list (GstAtomicQueue * queue, GstAQueueMem * mem)
119 mem->free = g_atomic_pointer_get (&queue->free_list);
120 } while (!g_atomic_pointer_compare_and_exchange (&queue->free_list,
125 clear_free_list (GstAtomicQueue * queue)
127 GstAQueueMem *free_list;
129 /* take the free list and replace with NULL */
131 free_list = g_atomic_pointer_get (&queue->free_list);
132 if (free_list == NULL)
134 } while (!g_atomic_pointer_compare_and_exchange (&queue->free_list, free_list,
138 GstAQueueMem *next = free_list->free;
140 free_queue_mem (free_list);
147 * gst_atomic_queue_new:
148 * @initial_size: initial queue size
150 * Create a new atomic queue instance. @initial_size will be rounded up to the
151 * nearest power of 2 and used as the initial size of the queue.
153 * Returns: a new #GstAtomicQueue
156 gst_atomic_queue_new (guint initial_size)
158 GstAtomicQueue *queue;
160 queue = g_new (GstAtomicQueue, 1);
164 queue->num_readers = 0;
166 queue->head_mem = queue->tail_mem = new_queue_mem (initial_size, 0);
167 queue->free_list = NULL;
173 * gst_atomic_queue_ref:
174 * @queue: a #GstAtomicQueue
176 * Increase the refcount of @queue.
179 gst_atomic_queue_ref (GstAtomicQueue * queue)
181 g_return_if_fail (queue != NULL);
183 g_atomic_int_inc (&queue->refcount);
187 gst_atomic_queue_free (GstAtomicQueue * queue)
189 free_queue_mem (queue->head_mem);
190 if (queue->head_mem != queue->tail_mem)
191 free_queue_mem (queue->tail_mem);
192 clear_free_list (queue);
197 * gst_atomic_queue_unref:
198 * @queue: a #GstAtomicQueue
200 * Unref @queue and free the memory when the refcount reaches 0.
203 gst_atomic_queue_unref (GstAtomicQueue * queue)
205 g_return_if_fail (queue != NULL);
207 if (g_atomic_int_dec_and_test (&queue->refcount))
208 gst_atomic_queue_free (queue);
212 * gst_atomic_queue_peek:
213 * @queue: a #GstAtomicQueue
215 * Peek the head element of the queue without removing it from the queue.
217 * Returns: (transfer none) (nullable): the head element of @queue or
218 * %NULL when the queue is empty.
221 gst_atomic_queue_peek (GstAtomicQueue * queue)
223 GstAQueueMem *head_mem;
224 gint head, tail, size;
226 g_return_val_if_fail (queue != NULL, NULL);
231 head_mem = g_atomic_pointer_get (&queue->head_mem);
233 head = g_atomic_int_get (&head_mem->head);
234 tail = g_atomic_int_get (&head_mem->tail_read);
235 size = head_mem->size;
237 /* when we are not empty, we can continue */
238 if (G_LIKELY (head != tail))
241 /* else array empty, try to take next */
242 next = g_atomic_pointer_get (&head_mem->next);
246 /* now we try to move the next array as the head memory. If we fail to do that,
247 * some other reader managed to do it first and we retry */
248 if (!g_atomic_pointer_compare_and_exchange (&queue->head_mem, head_mem,
252 /* when we managed to swing the head pointer the old head is now
253 * useless and we add it to the freelist. We can't free the memory yet
254 * because we first need to make sure no reader is accessing it anymore. */
255 add_to_free_list (queue, head_mem);
258 return head_mem->array[head & size];
262 * gst_atomic_queue_pop:
263 * @queue: a #GstAtomicQueue
265 * Get the head element of the queue.
267 * Returns: (transfer full): the head element of @queue or %NULL when
268 * the queue is empty.
271 gst_atomic_queue_pop (GstAtomicQueue * queue)
274 GstAQueueMem *head_mem;
275 gint head, tail, size;
277 g_return_val_if_fail (queue != NULL, NULL);
280 g_atomic_int_inc (&queue->num_readers);
287 head_mem = g_atomic_pointer_get (&queue->head_mem);
289 head = g_atomic_int_get (&head_mem->head);
290 tail = g_atomic_int_get (&head_mem->tail_read);
291 size = head_mem->size;
293 /* when we are not empty, we can continue */
298 /* else array empty, try to take next */
299 next = g_atomic_pointer_get (&head_mem->next);
303 /* now we try to move the next array as the head memory. If we fail to do that,
304 * some other reader managed to do it first and we retry */
306 (!g_atomic_pointer_compare_and_exchange (&queue->head_mem, head_mem,
310 /* when we managed to swing the head pointer the old head is now
311 * useless and we add it to the freelist. We can't free the memory yet
312 * because we first need to make sure no reader is accessing it anymore. */
313 add_to_free_list (queue, head_mem);
316 ret = head_mem->array[head & size];
318 (!g_atomic_int_compare_and_exchange (&head_mem->head, head, head + 1));
321 /* decrement number of readers, when we reach 0 readers we can be sure that
322 * none is accessing the memory in the free list and we can try to clean up */
323 if (g_atomic_int_dec_and_test (&queue->num_readers))
324 clear_free_list (queue);
331 * gst_atomic_queue_push:
332 * @queue: a #GstAtomicQueue
335 * Append @data to the tail of the queue.
338 gst_atomic_queue_push (GstAtomicQueue * queue, gpointer data)
340 GstAQueueMem *tail_mem;
341 gint head, tail, size;
343 g_return_if_fail (queue != NULL);
349 tail_mem = g_atomic_pointer_get (&queue->tail_mem);
350 head = g_atomic_int_get (&tail_mem->head);
351 tail = g_atomic_int_get (&tail_mem->tail_write);
352 size = tail_mem->size;
354 /* we're not full, continue */
356 (tail - head <= size)
359 /* else we need to grow the array, we store a mask so we have to add 1 */
360 mem = new_queue_mem ((size << 1) + 1, tail);
362 /* try to make our new array visible to other writers */
364 (!g_atomic_pointer_compare_and_exchange (&queue->tail_mem, tail_mem,
366 /* we tried to swap the new writer array but something changed. This is
367 * because some other writer beat us to it, we free our memory and try
369 free_queue_mem (mem);
372 /* make sure that readers can find our new array as well. The one who
373 * manages to swap the pointer is the only one who can set the next
374 * pointer to the new array */
375 g_atomic_pointer_set (&tail_mem->next, mem);
378 (!g_atomic_int_compare_and_exchange (&tail_mem->tail_write, tail, tail + 1));
380 tail_mem->array[tail & size] = data;
382 /* now wait until all writers have completed their write before we move the
383 * tail_read to this new item. It is possible that other writers are still
384 * updating the previous array slots and we don't want to reveal their changes
385 * before they are done. FIXME, it would be nice if we didn't have to busy
388 (!g_atomic_int_compare_and_exchange (&tail_mem->tail_read, tail, tail + 1));
392 * gst_atomic_queue_length:
393 * @queue: a #GstAtomicQueue
395 * Get the amount of items in the queue.
397 * Returns: the number of elements in the queue.
400 gst_atomic_queue_length (GstAtomicQueue * queue)
402 GstAQueueMem *head_mem, *tail_mem;
405 g_return_val_if_fail (queue != NULL, 0);
408 g_atomic_int_inc (&queue->num_readers);
411 head_mem = g_atomic_pointer_get (&queue->head_mem);
412 head = g_atomic_int_get (&head_mem->head);
414 tail_mem = g_atomic_pointer_get (&queue->tail_mem);
415 tail = g_atomic_int_get (&tail_mem->tail_read);
418 if (g_atomic_int_dec_and_test (&queue->num_readers))
419 clear_free_list (queue);