4 This file is part of PulseAudio.
6 Copyright 2006 Lennart Poettering
8 PulseAudio is free software; you can redistribute it and/or modify
9 it under the terms of the GNU Lesser General Public License as
10 published by the Free Software Foundation; either version 2.1 of the
11 License, or (at your option) any later version.
13 PulseAudio is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public
19 License along with PulseAudio; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
30 #include <pulsecore/atomic.h>
31 #include <pulsecore/log.h>
32 #include <pulsecore/thread.h>
33 #include <pulse/xmalloc.h>
37 /* Algorithm is not perfect, in a few corner cases it will fail to pop
38 * from the flist although it isn't empty, and fail to push into the
39 * flist, although it isn't full.
41 * We keep a fixed size array of entries, each item is either marked
42 * UNUSED, USED or BUSY and contains a user data pointer. When pushing
43 * into the queue we look for an UNUSED cell and mark it BUSY with a
44 * CAS operation. If successful we use it and mark it USED, otherwise
45 * we go on and look for the next UNUSED cell. The algorithm for
46 * popping an item from the queue is practically inverse: look for a
47 * USED cell and and mark it BUSY with a CAS operation, after reading
48 * from it mark it UNUSED again.
50 * To accelerate finding of used/unused cells we maintain a read and a
51 * write index which is used like a ring buffer. After each push we
52 * increase the write index and after each pop we increase the read
55 * The indexes are incremented atomically and are never truncated to
56 * the buffer size. Instead we assume that the buffer size is a power
57 * of two and that the truncation can thus be done by applying a
60 * To make sure that we do not look for empty cells indefinitely we
61 * maintain a length value which stores the number of used cells. From
62 * this value the number of unused cells is easily calculated. Please
63 * note that the length value is not updated atomically with the read
64 * and write index and might thus be a few cells off the real
65 * value. To deal with this we always look for N_EXTRA_SCAN extra
66 * cells when pushing/popping entries.
68 * It might make sense to replace this implementation with a link list
69 * stack or queue, which however requires DCAS to be simple. Patches
72 * Please note that this algorithm is home grown.*/
74 #define FLIST_SIZE 128
75 #define N_EXTRA_SCAN 2
77 /* For debugging purposes we can define _Y to put and extra thread
78 * yield between each operation. */
81 #define _Y pa_thread_yield()
83 #define _Y do { } while(0)
93 pa_atomic_int_t state;
100 pa_atomic_int_t length;
101 pa_atomic_int_t read_idx;
102 pa_atomic_int_t write_idx;
105 static int is_power_of_two(unsigned size) {
106 return !(size & (size - 1));
109 pa_flist *pa_flist_new(unsigned size) {
115 assert(is_power_of_two(size));
117 l = pa_xnew(pa_flist, 1);
120 l->cells = pa_xnew0(struct cell, size);
122 pa_atomic_store(&l->read_idx, 0);
123 pa_atomic_store(&l->write_idx, 0);
124 pa_atomic_store(&l->length, 0);
129 static int reduce(pa_flist *l, int value) {
130 return value & (unsigned) (l->size - 1);
133 void pa_flist_free(pa_flist *l, pa_free_cb_t free_cb) {
139 idx = reduce(l, pa_atomic_load(&l->read_idx));
140 len = pa_atomic_load(&l->length);
142 for (; len > 0; len--) {
144 if (pa_atomic_load(&l->cells[idx].state) == STATE_USED)
145 free_cb(l->cells[idx].data);
147 idx = reduce(l, idx + 1);
155 int pa_flist_push(pa_flist*l, void *p) {
161 n = len = (int) l->size - pa_atomic_load(&l->length) + N_EXTRA_SCAN;
163 idx = reduce(l, pa_atomic_load(&l->write_idx));
165 for (; n > 0 ; n--) {
168 if (pa_atomic_cmpxchg(&l->cells[idx].state, STATE_UNUSED, STATE_BUSY)) {
170 pa_atomic_inc(&l->write_idx);
172 l->cells[idx].data = p;
174 pa_atomic_store(&l->cells[idx].state, STATE_USED);
176 pa_atomic_inc(&l->length);
181 idx = reduce(l, idx + 1);
185 if (len > N_EXTRA_SCAN)
186 pa_log("WARNING: Didn't find free cell after %u iterations.", len);
192 void* pa_flist_pop(pa_flist*l) {
197 n = len = pa_atomic_load(&l->length) + N_EXTRA_SCAN;
199 idx = reduce(l, pa_atomic_load(&l->read_idx));
201 for (; n > 0 ; n--) {
204 if (pa_atomic_cmpxchg(&l->cells[idx].state, STATE_USED, STATE_BUSY)) {
207 pa_atomic_inc(&l->read_idx);
209 p = l->cells[idx].data;
211 pa_atomic_store(&l->cells[idx].state, STATE_UNUSED);
214 pa_atomic_dec(&l->length);
219 idx = reduce(l, idx+1);
223 if (len > N_EXTRA_SCAN)
224 pa_log("WARNING: Didn't find used cell after %u iterations.", len);