2 This file is part of PulseAudio.
4 Copyright 2006-2008 Lennart Poettering
6 PulseAudio is free software; you can redistribute it and/or modify
7 it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 PulseAudio is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with PulseAudio; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
29 #include <pulsecore/atomic.h>
30 #include <pulsecore/log.h>
31 #include <pulsecore/thread.h>
32 #include <pulsecore/macro.h>
33 #include <pulsecore/core-util.h>
34 #include <pulsecore/llist.h>
35 #include <pulsecore/flist.h>
36 #include <pulse/xmalloc.h>
41 #define ASYNCQ_SIZE 256
43 /* For debugging purposes we can define _Y to put an extra thread
44 * yield between each operation. */
49 #define _Y pa_thread_yield()
51 #define _Y do { } while(0)
56 PA_LLIST_FIELDS(struct localq);
63 pa_fdsem *read_fdsem, *write_fdsem;
65 PA_LLIST_HEAD(struct localq, localq);
66 struct localq *last_localq;
67 pa_bool_t waiting_for_post;
70 PA_STATIC_FLIST_DECLARE(localq, 0, pa_xfree);
72 #define PA_ASYNCQ_CELLS(x) ((pa_atomic_ptr_t*) ((uint8_t*) (x) + PA_ALIGN(sizeof(struct pa_asyncq))))
74 static unsigned reduce(pa_asyncq *l, unsigned value) {
75 return value & (unsigned) (l->size - 1);
78 pa_asyncq *pa_asyncq_new(unsigned size) {
84 pa_assert(pa_is_power_of_two(size));
86 l = pa_xmalloc0(PA_ALIGN(sizeof(pa_asyncq)) + (sizeof(pa_atomic_ptr_t) * size));
90 PA_LLIST_HEAD_INIT(struct localq, l->localq);
91 l->last_localq = NULL;
92 l->waiting_for_post = FALSE;
94 if (!(l->read_fdsem = pa_fdsem_new())) {
99 if (!(l->write_fdsem = pa_fdsem_new())) {
100 pa_fdsem_free(l->read_fdsem);
108 void pa_asyncq_free(pa_asyncq *l, pa_free_cb_t free_cb) {
115 while ((p = pa_asyncq_pop(l, 0)))
119 while ((q = l->localq)) {
123 PA_LLIST_REMOVE(struct localq, l->localq, q);
125 if (pa_flist_push(PA_STATIC_FLIST_GET(localq), q) < 0)
129 pa_fdsem_free(l->read_fdsem);
130 pa_fdsem_free(l->write_fdsem);
134 static int push(pa_asyncq*l, void *p, pa_bool_t wait_op) {
136 pa_atomic_ptr_t *cells;
141 cells = PA_ASYNCQ_CELLS(l);
144 idx = reduce(l, l->write_idx);
146 if (!pa_atomic_ptr_cmpxchg(&cells[idx], NULL, p)) {
151 /* pa_log("sleeping on push"); */
154 pa_fdsem_wait(l->read_fdsem);
155 } while (!pa_atomic_ptr_cmpxchg(&cells[idx], NULL, p));
161 pa_fdsem_post(l->write_fdsem);
166 static pa_bool_t flush_postq(pa_asyncq *l, pa_bool_t wait_op) {
171 while ((q = l->last_localq)) {
173 if (push(l, q->data, wait_op) < 0)
176 l->last_localq = q->prev;
178 PA_LLIST_REMOVE(struct localq, l->localq, q);
180 if (pa_flist_push(PA_STATIC_FLIST_GET(localq), q) < 0)
187 int pa_asyncq_push(pa_asyncq*l, void *p, pa_bool_t wait_op) {
190 if (!flush_postq(l, wait_op))
193 return push(l, p, wait_op);
196 void pa_asyncq_post(pa_asyncq*l, void *p) {
202 if (flush_postq(l, FALSE))
203 if (pa_asyncq_push(l, p, FALSE) >= 0)
206 /* OK, we couldn't push anything in the queue. So let's queue it
207 * locally and push it later */
209 if (pa_log_ratelimit())
210 pa_log_warn("q overrun, queuing locally");
212 if (!(q = pa_flist_pop(PA_STATIC_FLIST_GET(localq))))
213 q = pa_xnew(struct localq, 1);
216 PA_LLIST_PREPEND(struct localq, l->localq, q);
224 void* pa_asyncq_pop(pa_asyncq*l, pa_bool_t wait_op) {
227 pa_atomic_ptr_t *cells;
231 cells = PA_ASYNCQ_CELLS(l);
234 idx = reduce(l, l->read_idx);
236 if (!(ret = pa_atomic_ptr_load(&cells[idx]))) {
241 /* pa_log("sleeping on pop"); */
244 pa_fdsem_wait(l->write_fdsem);
245 } while (!(ret = pa_atomic_ptr_load(&cells[idx])));
250 /* Guaranteed to succeed if we only have a single reader */
251 pa_assert_se(pa_atomic_ptr_cmpxchg(&cells[idx], ret, NULL));
256 pa_fdsem_post(l->read_fdsem);
261 int pa_asyncq_read_fd(pa_asyncq *q) {
264 return pa_fdsem_get(q->write_fdsem);
267 int pa_asyncq_read_before_poll(pa_asyncq *l) {
269 pa_atomic_ptr_t *cells;
273 cells = PA_ASYNCQ_CELLS(l);
276 idx = reduce(l, l->read_idx);
279 if (pa_atomic_ptr_load(&cells[idx]))
282 if (pa_fdsem_before_poll(l->write_fdsem) >= 0)
287 void pa_asyncq_read_after_poll(pa_asyncq *l) {
290 pa_fdsem_after_poll(l->write_fdsem);
293 int pa_asyncq_write_fd(pa_asyncq *q) {
296 return pa_fdsem_get(q->read_fdsem);
299 void pa_asyncq_write_before_poll(pa_asyncq *l) {
304 if (flush_postq(l, FALSE))
307 if (pa_fdsem_before_poll(l->read_fdsem) >= 0) {
308 l->waiting_for_post = TRUE;
314 void pa_asyncq_write_after_poll(pa_asyncq *l) {
317 if (l->waiting_for_post) {
318 pa_fdsem_after_poll(l->read_fdsem);
319 l->waiting_for_post = FALSE;