4 This file is part of PulseAudio.
6 Copyright 2004-2006 Lennart Poettering
8 PulseAudio is free software; you can redistribute it and/or modify
9 it under the terms of the GNU Lesser General Public License as published
10 by the Free Software Foundation; either version 2 of the License,
11 or (at your option) any later version.
13 PulseAudio is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with PulseAudio; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
35 #include <pulse/xmalloc.h>
37 #include <pulsecore/log.h>
38 #include <pulsecore/mcalign.h>
40 #include "memblockq.h"
42 struct memblock_list {
43 struct memblock_list *next, *prev;
49 struct memblock_list *blocks, *blocks_tail;
51 size_t maxlength, tlength, base, prebuf, minreq;
52 int64_t read_index, write_index;
53 enum { PREBUF, RUNNING } state;
58 pa_memblockq* pa_memblockq_new(
65 pa_memblock *silence) {
70 assert(maxlength >= base);
72 bq = pa_xnew(pa_memblockq, 1);
73 bq->blocks = bq->blocks_tail = NULL;
77 bq->read_index = bq->write_index = idx;
79 pa_log_debug("memblockq requested: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu",
80 (unsigned long)maxlength, (unsigned long)tlength, (unsigned long)base, (unsigned long)prebuf, (unsigned long)minreq);
82 bq->maxlength = ((maxlength+base-1)/base)*base;
83 assert(bq->maxlength >= base);
85 bq->tlength = ((tlength+base-1)/base)*base;
86 if (!bq->tlength || bq->tlength >= bq->maxlength)
87 bq->tlength = bq->maxlength;
89 bq->prebuf = (prebuf == (size_t) -1) ? bq->tlength/2 : prebuf;
90 bq->prebuf = ((bq->prebuf+base-1)/base)*base;
91 if (bq->prebuf > bq->maxlength)
92 bq->prebuf = bq->maxlength;
94 bq->minreq = (minreq/base)*base;
96 if (bq->minreq > bq->tlength - bq->prebuf)
97 bq->minreq = bq->tlength - bq->prebuf;
102 pa_log_debug("memblockq sanitized: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu",
103 (unsigned long)bq->maxlength, (unsigned long)bq->tlength, (unsigned long)bq->base, (unsigned long)bq->prebuf, (unsigned long)bq->minreq);
105 bq->state = bq->prebuf ? PREBUF : RUNNING;
106 bq->silence = silence ? pa_memblock_ref(silence) : NULL;
112 void pa_memblockq_free(pa_memblockq* bq) {
115 pa_memblockq_flush(bq);
118 pa_memblock_unref(bq->silence);
121 pa_mcalign_free(bq->mcalign);
126 static void drop_block(pa_memblockq *bq, struct memblock_list *q) {
130 assert(bq->n_blocks >= 1);
133 q->prev->next = q->next;
135 bq->blocks = q->next;
138 q->next->prev = q->prev;
140 bq->blocks_tail = q->prev;
142 pa_memblock_unref(q->chunk.memblock);
148 static int can_push(pa_memblockq *bq, size_t l) {
153 if (bq->read_index > bq->write_index) {
154 size_t d = bq->read_index - bq->write_index;
162 end = bq->blocks_tail ? bq->blocks_tail->index + bq->blocks_tail->chunk.length : 0;
164 /* Make sure that the list doesn't get too long */
165 if (bq->write_index + (int64_t)l > end)
166 if (bq->write_index + l - bq->read_index > bq->maxlength)
172 int pa_memblockq_push(pa_memblockq* bq, const pa_memchunk *uchunk) {
174 struct memblock_list *q, *n;
179 assert(uchunk->memblock);
180 assert(uchunk->length > 0);
181 assert(uchunk->index + uchunk->length <= uchunk->memblock->length);
183 if (uchunk->length % bq->base)
186 if (!can_push(bq, uchunk->length))
191 if (bq->read_index > bq->write_index) {
193 /* We currently have a buffer underflow, we need to drop some
196 size_t d = bq->read_index - bq->write_index;
198 if (chunk.length > d) {
201 bq->write_index = bq->read_index;
203 /* We drop the incoming data completely */
204 bq->write_index += chunk.length;
209 /* We go from back to front to look for the right place to add
210 * this new entry. Drop data we will overwrite on the way */
215 if (bq->write_index >= q->index + (int64_t)q->chunk.length)
216 /* We found the entry where we need to place the new entry immediately after */
218 else if (bq->write_index + (int64_t)chunk.length <= q->index) {
219 /* This entry isn't touched at all, let's skip it */
221 } else if (bq->write_index <= q->index &&
222 bq->write_index + chunk.length >= q->index + q->chunk.length) {
224 /* This entry is fully replaced by the new entry, so let's drop it */
226 struct memblock_list *p;
230 } else if (bq->write_index >= q->index) {
231 /* The write index points into this memblock, so let's
232 * truncate or split it */
234 if (bq->write_index + chunk.length < q->index + q->chunk.length) {
236 /* We need to save the end of this memchunk */
237 struct memblock_list *p;
240 /* Create a new list entry for the end of thie memchunk */
241 p = pa_xnew(struct memblock_list, 1);
243 pa_memblock_ref(p->chunk.memblock);
245 /* Calculate offset */
246 d = bq->write_index + chunk.length - q->index;
249 /* Drop it from the new entry */
250 p->index = q->index + d;
251 p->chunk.length -= d;
253 /* Add it to the list */
255 if ((p->next = q->next))
264 /* Truncate the chunk */
265 if (!(q->chunk.length = bq->write_index - q->index)) {
266 struct memblock_list *p;
272 /* We had to truncate this block, hence we're now at the right position */
277 assert(bq->write_index + (int64_t)chunk.length > q->index &&
278 bq->write_index + (int64_t)chunk.length < q->index + (int64_t)q->chunk.length &&
279 bq->write_index < q->index);
281 /* The job overwrites the current entry at the end, so let's drop the beginning of this entry */
283 d = bq->write_index + chunk.length - q->index;
286 q->chunk.length -= d;
294 assert(bq->write_index >= q->index + (int64_t)q->chunk.length);
295 assert(!q->next || (bq->write_index + (int64_t)chunk.length <= q->next->index));
297 /* Try to merge memory blocks */
299 if (q->chunk.memblock == chunk.memblock &&
300 q->chunk.index + (int64_t)q->chunk.length == chunk.index &&
301 bq->write_index == q->index + (int64_t)q->chunk.length) {
303 q->chunk.length += chunk.length;
304 bq->write_index += chunk.length;
308 assert(!bq->blocks || (bq->write_index + (int64_t)chunk.length <= bq->blocks->index));
311 n = pa_xnew(struct memblock_list, 1);
313 pa_memblock_ref(n->chunk.memblock);
314 n->index = bq->write_index;
315 bq->write_index += n->chunk.length;
317 n->next = q ? q->next : bq->blocks;
334 int pa_memblockq_peek(pa_memblockq* bq, pa_memchunk *chunk) {
338 if (bq->state == PREBUF) {
340 /* We need to pre-buffer */
341 if (pa_memblockq_get_length(bq) < bq->prebuf)
346 } else if (bq->prebuf > 0 && bq->read_index >= bq->write_index) {
348 /* Buffer underflow protection */
353 /* Do we need to spit out silence? */
354 if (!bq->blocks || bq->blocks->index > bq->read_index) {
358 /* How much silence shall we return? */
359 length = bq->blocks ? bq->blocks->index - bq->read_index : 0;
361 /* We need to return silence, since no data is yet available */
363 chunk->memblock = pa_memblock_ref(bq->silence);
365 if (!length || length > chunk->memblock->length)
366 length = chunk->memblock->length;
368 chunk->length = length;
371 /* If the memblockq is empty, return -1, otherwise return
372 * the time to sleep */
376 chunk->memblock = NULL;
377 chunk->length = length;
384 /* Ok, let's pass real data to the caller */
385 assert(bq->blocks->index == bq->read_index);
387 *chunk = bq->blocks->chunk;
388 pa_memblock_ref(chunk->memblock);
393 void pa_memblockq_drop(pa_memblockq *bq, const pa_memchunk *chunk, size_t length) {
395 assert(length % bq->base == 0);
397 assert(!chunk || length <= chunk->length);
401 if (bq->blocks && bq->blocks->index == bq->read_index) {
402 /* The first item in queue is valid */
404 /* Does the chunk match with what the user supplied us? */
405 if (memcmp(chunk, &bq->blocks->chunk, sizeof(pa_memchunk)) != 0)
411 /* The first item in the queue is not yet relevant */
413 assert(!bq->blocks || bq->blocks->index > bq->read_index);
414 l = bq->blocks ? bq->blocks->index - bq->read_index : 0;
418 if (!l || l > bq->silence->length)
419 l = bq->silence->length;
423 /* Do the entries still match? */
424 if (chunk->index != 0 || chunk->length != l || chunk->memblock != bq->silence)
434 assert(bq->blocks->index >= bq->read_index);
436 d = (size_t) (bq->blocks->index - bq->read_index);
439 /* The first block is too far in the future */
441 bq->read_index += length;
449 assert(bq->blocks->index == bq->read_index);
451 if (bq->blocks->chunk.length <= length) {
452 /* We need to drop the full block */
454 length -= bq->blocks->chunk.length;
455 bq->read_index += bq->blocks->chunk.length;
457 drop_block(bq, bq->blocks);
459 /* Only the start of this block needs to be dropped */
461 bq->blocks->chunk.index += length;
462 bq->blocks->chunk.length -= length;
463 bq->blocks->index += length;
464 bq->read_index += length;
470 /* The list is empty, there's nothing we could drop */
471 bq->read_index += length;
477 int pa_memblockq_is_readable(pa_memblockq *bq) {
480 if (bq->prebuf > 0) {
481 size_t l = pa_memblockq_get_length(bq);
483 if (bq->state == PREBUF && l < bq->prebuf)
493 int pa_memblockq_is_writable(pa_memblockq *bq, size_t length) {
496 if (length % bq->base)
499 return pa_memblockq_get_length(bq) + length <= bq->tlength;
502 size_t pa_memblockq_get_length(pa_memblockq *bq) {
505 if (bq->write_index <= bq->read_index)
508 return (size_t) (bq->write_index - bq->read_index);
511 size_t pa_memblockq_missing(pa_memblockq *bq) {
515 if ((l = pa_memblockq_get_length(bq)) >= bq->tlength)
519 return (l >= bq->minreq) ? l : 0;
522 size_t pa_memblockq_get_minreq(pa_memblockq *bq) {
528 void pa_memblockq_seek(pa_memblockq *bq, int64_t offset, pa_seek_mode_t seek) {
532 case PA_SEEK_RELATIVE:
533 bq->write_index += offset;
535 case PA_SEEK_ABSOLUTE:
536 bq->write_index = offset;
538 case PA_SEEK_RELATIVE_ON_READ:
539 bq->write_index = bq->read_index + offset;
541 case PA_SEEK_RELATIVE_END:
542 bq->write_index = (bq->blocks_tail ? bq->blocks_tail->index + (int64_t)bq->blocks_tail->chunk.length : bq->read_index) + offset;
549 void pa_memblockq_flush(pa_memblockq *bq) {
553 drop_block(bq, bq->blocks);
555 assert(bq->n_blocks == 0);
557 bq->write_index = bq->read_index;
559 pa_memblockq_prebuf_force(bq);
562 size_t pa_memblockq_get_tlength(pa_memblockq *bq) {
568 int64_t pa_memblockq_get_read_index(pa_memblockq *bq) {
570 return bq->read_index;
573 int64_t pa_memblockq_get_write_index(pa_memblockq *bq) {
575 return bq->write_index;
578 int pa_memblockq_push_align(pa_memblockq* bq, const pa_memchunk *chunk) {
582 assert(chunk && bq->base);
585 return pa_memblockq_push(bq, chunk);
588 bq->mcalign = pa_mcalign_new(bq->base);
590 if (!can_push(bq, pa_mcalign_csize(bq->mcalign, chunk->length)))
593 pa_mcalign_push(bq->mcalign, chunk);
595 while (pa_mcalign_pop(bq->mcalign, &rchunk) >= 0) {
597 r = pa_memblockq_push(bq, &rchunk);
598 pa_memblock_unref(rchunk.memblock);
607 void pa_memblockq_shorten(pa_memblockq *bq, size_t length) {
611 l = pa_memblockq_get_length(bq);
614 pa_memblockq_drop(bq, NULL, l - length);
617 void pa_memblockq_prebuf_disable(pa_memblockq *bq) {
620 if (bq->state == PREBUF)
624 void pa_memblockq_prebuf_force(pa_memblockq *bq) {
627 if (bq->state == RUNNING && bq->prebuf > 0)
631 size_t pa_memblockq_get_maxlength(pa_memblockq *bq) {
634 return bq->maxlength;
637 size_t pa_memblockq_get_prebuf(pa_memblockq *bq) {