2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
6 PulseAudio is free software; you can redistribute it and/or modify
7 it under the terms of the GNU Lesser General Public License as published
8 by the Free Software Foundation; either version 2.1 of the License,
9 or (at your option) any later version.
11 PulseAudio is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public License
17 along with PulseAudio; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
32 #include <pulse/xmalloc.h>
34 #include <pulsecore/log.h>
35 #include <pulsecore/mcalign.h>
36 #include <pulsecore/macro.h>
37 #include <pulsecore/flist.h>
39 #include "memblockq.h"
42 struct list_item *next, *prev;
47 PA_STATIC_FLIST_DECLARE(list_items, 0, pa_xfree);
50 struct list_item *blocks, *blocks_tail;
51 struct list_item *current_read, *current_write;
53 size_t maxlength, tlength, base, prebuf, minreq, maxrewind;
54 int64_t read_index, write_index;
62 pa_memblockq* pa_memblockq_new(
70 pa_memchunk *silence) {
76 bq = pa_xnew(pa_memblockq, 1);
77 bq->blocks = bq->blocks_tail = NULL;
78 bq->current_read = bq->current_write = NULL;
82 bq->read_index = bq->write_index = idx;
84 pa_log_debug("memblockq requested: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu maxrewind=%lu",
85 (unsigned long) maxlength, (unsigned long) tlength, (unsigned long) base, (unsigned long) prebuf, (unsigned long) minreq, (unsigned long) maxrewind);
88 bq->requested = bq->maxlength = bq->tlength = bq->prebuf = bq->minreq = bq->maxrewind = 0;
91 pa_memblockq_set_maxlength(bq, maxlength);
92 pa_memblockq_set_tlength(bq, tlength);
93 pa_memblockq_set_minreq(bq, minreq);
94 pa_memblockq_set_prebuf(bq, prebuf);
95 pa_memblockq_set_maxrewind(bq, maxrewind);
97 pa_log_debug("memblockq sanitized: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu maxrewind=%lu",
98 (unsigned long) bq->maxlength, (unsigned long) bq->tlength, (unsigned long) bq->base, (unsigned long) bq->prebuf, (unsigned long) bq->minreq, (unsigned long) bq->maxrewind);
101 bq->silence = *silence;
102 pa_memblock_ref(bq->silence.memblock);
104 pa_memchunk_reset(&bq->silence);
106 bq->mcalign = pa_mcalign_new(bq->base);
111 void pa_memblockq_free(pa_memblockq* bq) {
114 pa_memblockq_silence(bq);
116 if (bq->silence.memblock)
117 pa_memblock_unref(bq->silence.memblock);
120 pa_mcalign_free(bq->mcalign);
125 static void fix_current_read(pa_memblockq *bq) {
128 if (PA_UNLIKELY(!bq->blocks)) {
129 bq->current_read = NULL;
133 if (PA_UNLIKELY(!bq->current_read))
134 bq->current_read = bq->blocks;
137 while (PA_UNLIKELY(bq->current_read->index > bq->read_index))
139 if (bq->current_read->prev)
140 bq->current_read = bq->current_read->prev;
145 while (PA_LIKELY(bq->current_read != NULL) && PA_UNLIKELY(bq->current_read->index + (int64_t) bq->current_read->chunk.length <= bq->read_index))
146 bq->current_read = bq->current_read->next;
148 /* At this point current_read will either point at or left of the
149 next block to play. It may be NULL in case everything in
150 the queue was already played */
153 static void fix_current_write(pa_memblockq *bq) {
156 if (PA_UNLIKELY(!bq->blocks)) {
157 bq->current_write = NULL;
161 if (PA_UNLIKELY(!bq->current_write))
162 bq->current_write = bq->blocks_tail;
165 while (PA_UNLIKELY(bq->current_write->index + (int64_t) bq->current_write->chunk.length <= bq->write_index))
167 if (bq->current_write->next)
168 bq->current_write = bq->current_write->next;
173 while (PA_LIKELY(bq->current_write != NULL) && PA_UNLIKELY(bq->current_write->index > bq->write_index))
174 bq->current_write = bq->current_write->prev;
176 /* At this point current_write will either point at or right of
177 the next block to write data to. It may be NULL in case
178 everything in the queue is still to be played */
181 static void drop_block(pa_memblockq *bq, struct list_item *q) {
185 pa_assert(bq->n_blocks >= 1);
188 q->prev->next = q->next;
190 pa_assert(bq->blocks == q);
191 bq->blocks = q->next;
195 q->next->prev = q->prev;
197 pa_assert(bq->blocks_tail == q);
198 bq->blocks_tail = q->prev;
201 if (bq->current_write == q)
202 bq->current_write = q->prev;
204 if (bq->current_read == q)
205 bq->current_read = q->next;
207 pa_memblock_unref(q->chunk.memblock);
209 if (pa_flist_push(PA_STATIC_FLIST_GET(list_items), q) < 0)
215 static void drop_backlog(pa_memblockq *bq) {
219 boundary = bq->read_index - (int64_t) bq->maxrewind;
221 while (bq->blocks && (bq->blocks->index + (int64_t) bq->blocks->chunk.length <= boundary))
222 drop_block(bq, bq->blocks);
225 static pa_bool_t can_push(pa_memblockq *bq, size_t l) {
230 if (bq->read_index > bq->write_index) {
231 int64_t d = bq->read_index - bq->write_index;
239 end = bq->blocks_tail ? bq->blocks_tail->index + (int64_t) bq->blocks_tail->chunk.length : bq->write_index;
241 /* Make sure that the list doesn't get too long */
242 if (bq->write_index + (int64_t) l > end)
243 if (bq->write_index + (int64_t) l - bq->read_index > (int64_t) bq->maxlength)
249 int pa_memblockq_push(pa_memblockq* bq, const pa_memchunk *uchunk) {
250 struct list_item *q, *n;
256 pa_assert(uchunk->memblock);
257 pa_assert(uchunk->length > 0);
258 pa_assert(uchunk->index + uchunk->length <= pa_memblock_get_length(uchunk->memblock));
260 if (uchunk->length % bq->base)
263 if (!can_push(bq, uchunk->length))
266 old = bq->write_index;
269 fix_current_write(bq);
270 q = bq->current_write;
272 /* First we advance the q pointer right of where we want to
276 while (bq->write_index + (int64_t) chunk.length > q->index)
286 /* We go from back to front to look for the right place to add
287 * this new entry. Drop data we will overwrite on the way */
291 if (bq->write_index >= q->index + (int64_t) q->chunk.length)
292 /* We found the entry where we need to place the new entry immediately after */
294 else if (bq->write_index + (int64_t) chunk.length <= q->index) {
295 /* This entry isn't touched at all, let's skip it */
297 } else if (bq->write_index <= q->index &&
298 bq->write_index + (int64_t) chunk.length >= q->index + (int64_t) q->chunk.length) {
300 /* This entry is fully replaced by the new entry, so let's drop it */
306 } else if (bq->write_index >= q->index) {
307 /* The write index points into this memblock, so let's
308 * truncate or split it */
310 if (bq->write_index + (int64_t) chunk.length < q->index + (int64_t) q->chunk.length) {
312 /* We need to save the end of this memchunk */
316 /* Create a new list entry for the end of thie memchunk */
317 if (!(p = pa_flist_pop(PA_STATIC_FLIST_GET(list_items))))
318 p = pa_xnew(struct list_item, 1);
321 pa_memblock_ref(p->chunk.memblock);
323 /* Calculate offset */
324 d = (size_t) (bq->write_index + (int64_t) chunk.length - q->index);
327 /* Drop it from the new entry */
328 p->index = q->index + (int64_t) d;
329 p->chunk.length -= d;
331 /* Add it to the list */
333 if ((p->next = q->next))
342 /* Truncate the chunk */
343 if (!(q->chunk.length = (size_t) (bq->write_index - q->index))) {
350 /* We had to truncate this block, hence we're now at the right position */
355 pa_assert(bq->write_index + (int64_t)chunk.length > q->index &&
356 bq->write_index + (int64_t)chunk.length < q->index + (int64_t)q->chunk.length &&
357 bq->write_index < q->index);
359 /* The job overwrites the current entry at the end, so let's drop the beginning of this entry */
361 d = (size_t) (bq->write_index + (int64_t) chunk.length - q->index);
362 q->index += (int64_t) d;
364 q->chunk.length -= d;
371 pa_assert(bq->write_index >= q->index + (int64_t)q->chunk.length);
372 pa_assert(!q->next || (bq->write_index + (int64_t)chunk.length <= q->next->index));
374 /* Try to merge memory blocks */
376 if (q->chunk.memblock == chunk.memblock &&
377 q->chunk.index + q->chunk.length == chunk.index &&
378 bq->write_index == q->index + (int64_t) q->chunk.length) {
380 q->chunk.length += chunk.length;
381 bq->write_index += (int64_t) chunk.length;
385 pa_assert(!bq->blocks || (bq->write_index + (int64_t)chunk.length <= bq->blocks->index));
387 if (!(n = pa_flist_pop(PA_STATIC_FLIST_GET(list_items))))
388 n = pa_xnew(struct list_item, 1);
391 pa_memblock_ref(n->chunk.memblock);
392 n->index = bq->write_index;
393 bq->write_index += (int64_t) n->chunk.length;
395 n->next = q ? q->next : bq->blocks;
412 delta = bq->write_index - old;
414 if (delta >= (int64_t) bq->requested) {
415 delta -= (int64_t) bq->requested;
418 bq->requested -= (size_t) delta;
422 bq->missing -= delta;
427 pa_bool_t pa_memblockq_prebuf_active(pa_memblockq *bq) {
431 return pa_memblockq_get_length(bq) < bq->prebuf;
433 return bq->prebuf > 0 && bq->read_index >= bq->write_index;
436 static pa_bool_t update_prebuf(pa_memblockq *bq) {
441 if (pa_memblockq_get_length(bq) < bq->prebuf)
444 bq->in_prebuf = FALSE;
448 if (bq->prebuf > 0 && bq->read_index >= bq->write_index) {
449 bq->in_prebuf = TRUE;
457 int pa_memblockq_peek(pa_memblockq* bq, pa_memchunk *chunk) {
462 /* We need to pre-buffer */
463 if (update_prebuf(bq))
466 fix_current_read(bq);
468 /* Do we need to spit out silence? */
469 if (!bq->current_read || bq->current_read->index > bq->read_index) {
473 /* How much silence shall we return? */
474 if (bq->current_read)
475 length = (size_t) (bq->current_read->index - bq->read_index);
476 else if (bq->write_index > bq->read_index)
477 length = (size_t) (bq->write_index - bq->read_index);
481 /* We need to return silence, since no data is yet available */
482 if (bq->silence.memblock) {
483 *chunk = bq->silence;
484 pa_memblock_ref(chunk->memblock);
486 if (length > 0 && length < chunk->length)
487 chunk->length = length;
491 /* If the memblockq is empty, return -1, otherwise return
492 * the time to sleep */
496 chunk->memblock = NULL;
497 chunk->length = length;
504 /* Ok, let's pass real data to the caller */
505 *chunk = bq->current_read->chunk;
506 pa_memblock_ref(chunk->memblock);
508 pa_assert(bq->read_index >= bq->current_read->index);
509 d = bq->read_index - bq->current_read->index;
510 chunk->index += (size_t) d;
511 chunk->length -= (size_t) d;
516 void pa_memblockq_drop(pa_memblockq *bq, size_t length) {
519 pa_assert(length % bq->base == 0);
521 old = bq->read_index;
525 /* Do not drop any data when we are in prebuffering mode */
526 if (update_prebuf(bq))
529 fix_current_read(bq);
531 if (bq->current_read) {
534 /* We go through this piece by piece to make sure we don't
535 * drop more than allowed by prebuf */
537 p = bq->current_read->index + (int64_t) bq->current_read->chunk.length;
538 pa_assert(p >= bq->read_index);
539 d = p - bq->read_index;
541 if (d > (int64_t) length)
542 d = (int64_t) length;
545 length -= (size_t) d;
549 /* The list is empty, there's nothing we could drop */
550 bq->read_index += (int64_t) length;
557 delta = bq->read_index - old;
558 bq->missing += delta;
561 void pa_memblockq_rewind(pa_memblockq *bq, size_t length) {
563 pa_assert(length % bq->base == 0);
565 /* This is kind of the inverse of pa_memblockq_drop() */
567 bq->read_index -= (int64_t) length;
568 bq->missing -= (int64_t) length;
571 pa_bool_t pa_memblockq_is_readable(pa_memblockq *bq) {
574 if (pa_memblockq_prebuf_active(bq))
577 if (pa_memblockq_get_length(bq) <= 0)
583 size_t pa_memblockq_get_length(pa_memblockq *bq) {
586 if (bq->write_index <= bq->read_index)
589 return (size_t) (bq->write_index - bq->read_index);
592 size_t pa_memblockq_missing(pa_memblockq *bq) {
596 if ((l = pa_memblockq_get_length(bq)) >= bq->tlength)
601 return l >= bq->minreq ? l : 0;
604 void pa_memblockq_seek(pa_memblockq *bq, int64_t offset, pa_seek_mode_t seek, pa_bool_t account) {
608 old = bq->write_index;
611 case PA_SEEK_RELATIVE:
612 bq->write_index += offset;
614 case PA_SEEK_ABSOLUTE:
615 bq->write_index = offset;
617 case PA_SEEK_RELATIVE_ON_READ:
618 bq->write_index = bq->read_index + offset;
620 case PA_SEEK_RELATIVE_END:
621 bq->write_index = (bq->blocks_tail ? bq->blocks_tail->index + (int64_t) bq->blocks_tail->chunk.length : bq->read_index) + offset;
624 pa_assert_not_reached();
629 delta = bq->write_index - old;
632 if (delta >= (int64_t) bq->requested) {
633 delta -= (int64_t) bq->requested;
635 } else if (delta >= 0) {
636 bq->requested -= (size_t) delta;
641 bq->missing -= delta;
644 void pa_memblockq_flush_write(pa_memblockq *bq) {
648 pa_memblockq_silence(bq);
650 old = bq->write_index;
651 bq->write_index = bq->read_index;
653 pa_memblockq_prebuf_force(bq);
655 delta = bq->write_index - old;
657 if (delta >= (int64_t) bq->requested) {
658 delta -= (int64_t) bq->requested;
660 } else if (delta >= 0) {
661 bq->requested -= (size_t) delta;
665 bq->missing -= delta;
668 void pa_memblockq_flush_read(pa_memblockq *bq) {
672 pa_memblockq_silence(bq);
674 old = bq->read_index;
675 bq->read_index = bq->write_index;
677 pa_memblockq_prebuf_force(bq);
679 delta = bq->read_index - old;
680 bq->missing += delta;
683 size_t pa_memblockq_get_tlength(pa_memblockq *bq) {
689 size_t pa_memblockq_get_minreq(pa_memblockq *bq) {
695 size_t pa_memblockq_get_maxrewind(pa_memblockq *bq) {
698 return bq->maxrewind;
701 int64_t pa_memblockq_get_read_index(pa_memblockq *bq) {
704 return bq->read_index;
707 int64_t pa_memblockq_get_write_index(pa_memblockq *bq) {
710 return bq->write_index;
713 int pa_memblockq_push_align(pa_memblockq* bq, const pa_memchunk *chunk) {
720 return pa_memblockq_push(bq, chunk);
722 if (!can_push(bq, pa_mcalign_csize(bq->mcalign, chunk->length)))
725 pa_mcalign_push(bq->mcalign, chunk);
727 while (pa_mcalign_pop(bq->mcalign, &rchunk) >= 0) {
729 r = pa_memblockq_push(bq, &rchunk);
730 pa_memblock_unref(rchunk.memblock);
733 pa_mcalign_flush(bq->mcalign);
741 void pa_memblockq_prebuf_disable(pa_memblockq *bq) {
744 bq->in_prebuf = FALSE;
747 void pa_memblockq_prebuf_force(pa_memblockq *bq) {
751 bq->in_prebuf = TRUE;
754 size_t pa_memblockq_get_maxlength(pa_memblockq *bq) {
757 return bq->maxlength;
760 size_t pa_memblockq_get_prebuf(pa_memblockq *bq) {
766 size_t pa_memblockq_pop_missing(pa_memblockq *bq) {
771 /* pa_log("pop: %lli", bq->missing); */
773 if (bq->missing <= 0)
776 l = (size_t) bq->missing;
783 void pa_memblockq_set_maxlength(pa_memblockq *bq, size_t maxlength) {
786 bq->maxlength = ((maxlength+bq->base-1)/bq->base)*bq->base;
788 if (bq->maxlength < bq->base)
789 bq->maxlength = bq->base;
791 if (bq->tlength > bq->maxlength)
792 pa_memblockq_set_tlength(bq, bq->maxlength);
795 void pa_memblockq_set_tlength(pa_memblockq *bq, size_t tlength) {
799 if (tlength <= 0 || tlength == (size_t) -1)
800 tlength = bq->maxlength;
802 old_tlength = bq->tlength;
803 bq->tlength = ((tlength+bq->base-1)/bq->base)*bq->base;
805 if (bq->tlength > bq->maxlength)
806 bq->tlength = bq->maxlength;
808 if (bq->minreq > bq->tlength)
809 pa_memblockq_set_minreq(bq, bq->tlength);
811 if (bq->prebuf > bq->tlength+bq->base-bq->minreq)
812 pa_memblockq_set_prebuf(bq, bq->tlength+bq->base-bq->minreq);
814 bq->missing += (int64_t) bq->tlength - (int64_t) old_tlength;
817 void pa_memblockq_set_minreq(pa_memblockq *bq, size_t minreq) {
820 bq->minreq = (minreq/bq->base)*bq->base;
822 if (bq->minreq > bq->tlength)
823 bq->minreq = bq->tlength;
825 if (bq->minreq < bq->base)
826 bq->minreq = bq->base;
828 if (bq->prebuf > bq->tlength+bq->base-bq->minreq)
829 pa_memblockq_set_prebuf(bq, bq->tlength+bq->base-bq->minreq);
832 void pa_memblockq_set_prebuf(pa_memblockq *bq, size_t prebuf) {
835 if (prebuf == (size_t) -1)
836 prebuf = bq->tlength+bq->base-bq->minreq;
838 bq->prebuf = ((prebuf+bq->base-1)/bq->base)*bq->base;
840 if (prebuf > 0 && bq->prebuf < bq->base)
841 bq->prebuf = bq->base;
843 if (bq->prebuf > bq->tlength+bq->base-bq->minreq)
844 bq->prebuf = bq->tlength+bq->base-bq->minreq;
846 if (bq->prebuf <= 0 || pa_memblockq_get_length(bq) >= bq->prebuf)
847 bq->in_prebuf = FALSE;
850 void pa_memblockq_set_maxrewind(pa_memblockq *bq, size_t maxrewind) {
853 bq->maxrewind = (maxrewind/bq->base)*bq->base;
856 void pa_memblockq_apply_attr(pa_memblockq *bq, const pa_buffer_attr *a) {
860 pa_memblockq_set_maxlength(bq, a->maxlength);
861 pa_memblockq_set_tlength(bq, a->tlength);
862 pa_memblockq_set_prebuf(bq, a->prebuf);
863 pa_memblockq_set_minreq(bq, a->minreq);
866 void pa_memblockq_get_attr(pa_memblockq *bq, pa_buffer_attr *a) {
870 a->maxlength = (uint32_t) pa_memblockq_get_maxlength(bq);
871 a->tlength = (uint32_t) pa_memblockq_get_tlength(bq);
872 a->prebuf = (uint32_t) pa_memblockq_get_prebuf(bq);
873 a->minreq = (uint32_t) pa_memblockq_get_minreq(bq);
876 int pa_memblockq_splice(pa_memblockq *bq, pa_memblockq *source) {
881 pa_memblockq_prebuf_disable(bq);
886 if (pa_memblockq_peek(source, &chunk) < 0)
889 pa_assert(chunk.length > 0);
891 if (chunk.memblock) {
893 if (pa_memblockq_push_align(bq, &chunk) < 0) {
894 pa_memblock_unref(chunk.memblock);
898 pa_memblock_unref(chunk.memblock);
900 pa_memblockq_seek(bq, (int64_t) chunk.length, PA_SEEK_RELATIVE, TRUE);
902 pa_memblockq_drop(bq, chunk.length);
906 void pa_memblockq_willneed(pa_memblockq *bq) {
911 fix_current_read(bq);
913 for (q = bq->current_read; q; q = q->next)
914 pa_memchunk_will_need(&q->chunk);
917 void pa_memblockq_set_silence(pa_memblockq *bq, pa_memchunk *silence) {
920 if (bq->silence.memblock)
921 pa_memblock_unref(bq->silence.memblock);
924 bq->silence = *silence;
925 pa_memblock_ref(bq->silence.memblock);
927 pa_memchunk_reset(&bq->silence);
930 pa_bool_t pa_memblockq_is_empty(pa_memblockq *bq) {
936 void pa_memblockq_silence(pa_memblockq *bq) {
940 drop_block(bq, bq->blocks);
942 pa_assert(bq->n_blocks == 0);
945 unsigned pa_memblockq_get_nblocks(pa_memblockq *bq) {
951 size_t pa_memblockq_get_base(pa_memblockq *bq) {