core: always allow volume setting with single-channel pa_cvolume
[platform/upstream/pulseaudio.git] / src / pulsecore / memblockq.c
1 /***
2   This file is part of PulseAudio.
3
4   Copyright 2004-2006 Lennart Poettering
5
6   PulseAudio is free software; you can redistribute it and/or modify
7   it under the terms of the GNU Lesser General Public License as published
8   by the Free Software Foundation; either version 2.1 of the License,
9   or (at your option) any later version.
10
11   PulseAudio is distributed in the hope that it will be useful, but
12   WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14   General Public License for more details.
15
16   You should have received a copy of the GNU Lesser General Public License
17   along with PulseAudio; if not, write to the Free Software
18   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
19   USA.
20 ***/
21
22 #ifdef HAVE_CONFIG_H
23 #include <config.h>
24 #endif
25
26 #include <sys/time.h>
27 #include <time.h>
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <string.h>
31
32 #include <pulse/xmalloc.h>
33
34 #include <pulsecore/log.h>
35 #include <pulsecore/mcalign.h>
36 #include <pulsecore/macro.h>
37 #include <pulsecore/flist.h>
38
39 #include "memblockq.h"
40
41 struct list_item {
42     struct list_item *next, *prev;
43     int64_t index;
44     pa_memchunk chunk;
45 };
46
47 PA_STATIC_FLIST_DECLARE(list_items, 0, pa_xfree);
48
49 struct pa_memblockq {
50     struct list_item *blocks, *blocks_tail;
51     struct list_item *current_read, *current_write;
52     unsigned n_blocks;
53     size_t maxlength, tlength, base, prebuf, minreq, maxrewind;
54     int64_t read_index, write_index;
55     pa_bool_t in_prebuf;
56     pa_memchunk silence;
57     pa_mcalign *mcalign;
58     int64_t missing;
59     size_t requested;
60 };
61
62 pa_memblockq* pa_memblockq_new(
63         int64_t idx,
64         size_t maxlength,
65         size_t tlength,
66         size_t base,
67         size_t prebuf,
68         size_t minreq,
69         size_t maxrewind,
70         pa_memchunk *silence) {
71
72     pa_memblockq* bq;
73
74     pa_assert(base > 0);
75
76     bq = pa_xnew(pa_memblockq, 1);
77     bq->blocks = bq->blocks_tail = NULL;
78     bq->current_read = bq->current_write = NULL;
79     bq->n_blocks = 0;
80
81     bq->base = base;
82     bq->read_index = bq->write_index = idx;
83
84     pa_log_debug("memblockq requested: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu maxrewind=%lu",
85                  (unsigned long) maxlength, (unsigned long) tlength, (unsigned long) base, (unsigned long) prebuf, (unsigned long) minreq, (unsigned long) maxrewind);
86
87     bq->missing = 0;
88     bq->requested = bq->maxlength = bq->tlength = bq->prebuf = bq->minreq = bq->maxrewind = 0;
89     bq->in_prebuf = TRUE;
90
91     pa_memblockq_set_maxlength(bq, maxlength);
92     pa_memblockq_set_tlength(bq, tlength);
93     pa_memblockq_set_minreq(bq, minreq);
94     pa_memblockq_set_prebuf(bq, prebuf);
95     pa_memblockq_set_maxrewind(bq, maxrewind);
96
97     pa_log_debug("memblockq sanitized: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu maxrewind=%lu",
98                  (unsigned long) bq->maxlength, (unsigned long) bq->tlength, (unsigned long) bq->base, (unsigned long) bq->prebuf, (unsigned long) bq->minreq, (unsigned long) bq->maxrewind);
99
100     if (silence) {
101         bq->silence = *silence;
102         pa_memblock_ref(bq->silence.memblock);
103     } else
104         pa_memchunk_reset(&bq->silence);
105
106     bq->mcalign = pa_mcalign_new(bq->base);
107
108     return bq;
109 }
110
111 void pa_memblockq_free(pa_memblockq* bq) {
112     pa_assert(bq);
113
114     pa_memblockq_silence(bq);
115
116     if (bq->silence.memblock)
117         pa_memblock_unref(bq->silence.memblock);
118
119     if (bq->mcalign)
120         pa_mcalign_free(bq->mcalign);
121
122     pa_xfree(bq);
123 }
124
125 static void fix_current_read(pa_memblockq *bq) {
126     pa_assert(bq);
127
128     if (PA_UNLIKELY(!bq->blocks)) {
129         bq->current_read = NULL;
130         return;
131     }
132
133     if (PA_UNLIKELY(!bq->current_read))
134         bq->current_read = bq->blocks;
135
136     /* Scan left */
137     while (PA_UNLIKELY(bq->current_read->index > bq->read_index))
138
139         if (bq->current_read->prev)
140             bq->current_read = bq->current_read->prev;
141         else
142             break;
143
144     /* Scan right */
145     while (PA_LIKELY(bq->current_read != NULL) && PA_UNLIKELY(bq->current_read->index + (int64_t) bq->current_read->chunk.length <= bq->read_index))
146         bq->current_read = bq->current_read->next;
147
148     /* At this point current_read will either point at or left of the
149        next block to play. It may be NULL in case everything in
150        the queue was already played */
151 }
152
153 static void fix_current_write(pa_memblockq *bq) {
154     pa_assert(bq);
155
156     if (PA_UNLIKELY(!bq->blocks)) {
157         bq->current_write = NULL;
158         return;
159     }
160
161     if (PA_UNLIKELY(!bq->current_write))
162         bq->current_write = bq->blocks_tail;
163
164     /* Scan right */
165     while (PA_UNLIKELY(bq->current_write->index + (int64_t) bq->current_write->chunk.length <= bq->write_index))
166
167         if (bq->current_write->next)
168             bq->current_write = bq->current_write->next;
169         else
170             break;
171
172     /* Scan left */
173     while (PA_LIKELY(bq->current_write != NULL) && PA_UNLIKELY(bq->current_write->index > bq->write_index))
174         bq->current_write = bq->current_write->prev;
175
176     /* At this point current_write will either point at or right of
177        the next block to write data to. It may be NULL in case
178        everything in the queue is still to be played */
179 }
180
181 static void drop_block(pa_memblockq *bq, struct list_item *q) {
182     pa_assert(bq);
183     pa_assert(q);
184
185     pa_assert(bq->n_blocks >= 1);
186
187     if (q->prev)
188         q->prev->next = q->next;
189     else {
190         pa_assert(bq->blocks == q);
191         bq->blocks = q->next;
192     }
193
194     if (q->next)
195         q->next->prev = q->prev;
196     else {
197         pa_assert(bq->blocks_tail == q);
198         bq->blocks_tail = q->prev;
199     }
200
201     if (bq->current_write == q)
202         bq->current_write = q->prev;
203
204     if (bq->current_read == q)
205         bq->current_read = q->next;
206
207     pa_memblock_unref(q->chunk.memblock);
208
209     if (pa_flist_push(PA_STATIC_FLIST_GET(list_items), q) < 0)
210         pa_xfree(q);
211
212     bq->n_blocks--;
213 }
214
215 static void drop_backlog(pa_memblockq *bq) {
216     int64_t boundary;
217     pa_assert(bq);
218
219     boundary = bq->read_index - (int64_t) bq->maxrewind;
220
221     while (bq->blocks && (bq->blocks->index + (int64_t) bq->blocks->chunk.length <= boundary))
222         drop_block(bq, bq->blocks);
223 }
224
225 static pa_bool_t can_push(pa_memblockq *bq, size_t l) {
226     int64_t end;
227
228     pa_assert(bq);
229
230     if (bq->read_index > bq->write_index) {
231         int64_t d = bq->read_index - bq->write_index;
232
233         if ((int64_t) l > d)
234             l -= (size_t) d;
235         else
236             return TRUE;
237     }
238
239     end = bq->blocks_tail ? bq->blocks_tail->index + (int64_t) bq->blocks_tail->chunk.length : bq->write_index;
240
241     /* Make sure that the list doesn't get too long */
242     if (bq->write_index + (int64_t) l > end)
243         if (bq->write_index + (int64_t) l - bq->read_index > (int64_t) bq->maxlength)
244             return FALSE;
245
246     return TRUE;
247 }
248
249 int pa_memblockq_push(pa_memblockq* bq, const pa_memchunk *uchunk) {
250     struct list_item *q, *n;
251     pa_memchunk chunk;
252     int64_t old, delta;
253
254     pa_assert(bq);
255     pa_assert(uchunk);
256     pa_assert(uchunk->memblock);
257     pa_assert(uchunk->length > 0);
258     pa_assert(uchunk->index + uchunk->length <= pa_memblock_get_length(uchunk->memblock));
259
260     if (uchunk->length % bq->base)
261         return -1;
262
263     if (!can_push(bq, uchunk->length))
264         return -1;
265
266     old = bq->write_index;
267     chunk = *uchunk;
268
269     fix_current_write(bq);
270     q = bq->current_write;
271
272     /* First we advance the q pointer right of where we want to
273      * write to */
274
275     if (q) {
276         while (bq->write_index + (int64_t) chunk.length > q->index)
277             if (q->next)
278                 q = q->next;
279             else
280                 break;
281     }
282
283     if (!q)
284         q = bq->blocks_tail;
285
286     /* We go from back to front to look for the right place to add
287      * this new entry. Drop data we will overwrite on the way */
288
289     while (q) {
290
291         if (bq->write_index >= q->index + (int64_t) q->chunk.length)
292             /* We found the entry where we need to place the new entry immediately after */
293             break;
294         else if (bq->write_index + (int64_t) chunk.length <= q->index) {
295             /* This entry isn't touched at all, let's skip it */
296             q = q->prev;
297         } else if (bq->write_index <= q->index &&
298                    bq->write_index + (int64_t) chunk.length >= q->index + (int64_t) q->chunk.length) {
299
300             /* This entry is fully replaced by the new entry, so let's drop it */
301
302             struct list_item *p;
303             p = q;
304             q = q->prev;
305             drop_block(bq, p);
306         } else if (bq->write_index >= q->index) {
307             /* The write index points into this memblock, so let's
308              * truncate or split it */
309
310             if (bq->write_index + (int64_t) chunk.length < q->index + (int64_t) q->chunk.length) {
311
312                 /* We need to save the end of this memchunk */
313                 struct list_item *p;
314                 size_t d;
315
316                 /* Create a new list entry for the end of thie memchunk */
317                 if (!(p = pa_flist_pop(PA_STATIC_FLIST_GET(list_items))))
318                     p = pa_xnew(struct list_item, 1);
319
320                 p->chunk = q->chunk;
321                 pa_memblock_ref(p->chunk.memblock);
322
323                 /* Calculate offset */
324                 d = (size_t) (bq->write_index + (int64_t) chunk.length - q->index);
325                 pa_assert(d > 0);
326
327                 /* Drop it from the new entry */
328                 p->index = q->index + (int64_t) d;
329                 p->chunk.length -= d;
330
331                 /* Add it to the list */
332                 p->prev = q;
333                 if ((p->next = q->next))
334                     q->next->prev = p;
335                 else
336                     bq->blocks_tail = p;
337                 q->next = p;
338
339                 bq->n_blocks++;
340             }
341
342             /* Truncate the chunk */
343             if (!(q->chunk.length = (size_t) (bq->write_index - q->index))) {
344                 struct list_item *p;
345                 p = q;
346                 q = q->prev;
347                 drop_block(bq, p);
348             }
349
350             /* We had to truncate this block, hence we're now at the right position */
351             break;
352         } else {
353             size_t d;
354
355             pa_assert(bq->write_index + (int64_t)chunk.length > q->index &&
356                    bq->write_index + (int64_t)chunk.length < q->index + (int64_t)q->chunk.length &&
357                    bq->write_index < q->index);
358
359             /* The job overwrites the current entry at the end, so let's drop the beginning of this entry */
360
361             d = (size_t) (bq->write_index + (int64_t) chunk.length - q->index);
362             q->index += (int64_t) d;
363             q->chunk.index += d;
364             q->chunk.length -= d;
365
366             q = q->prev;
367         }
368     }
369
370     if (q) {
371         pa_assert(bq->write_index >=  q->index + (int64_t)q->chunk.length);
372         pa_assert(!q->next || (bq->write_index + (int64_t)chunk.length <= q->next->index));
373
374         /* Try to merge memory blocks */
375
376         if (q->chunk.memblock == chunk.memblock &&
377             q->chunk.index + q->chunk.length == chunk.index &&
378             bq->write_index == q->index + (int64_t) q->chunk.length) {
379
380             q->chunk.length += chunk.length;
381             bq->write_index += (int64_t) chunk.length;
382             goto finish;
383         }
384     } else
385         pa_assert(!bq->blocks || (bq->write_index + (int64_t)chunk.length <= bq->blocks->index));
386
387     if (!(n = pa_flist_pop(PA_STATIC_FLIST_GET(list_items))))
388         n = pa_xnew(struct list_item, 1);
389
390     n->chunk = chunk;
391     pa_memblock_ref(n->chunk.memblock);
392     n->index = bq->write_index;
393     bq->write_index += (int64_t) n->chunk.length;
394
395     n->next = q ? q->next : bq->blocks;
396     n->prev = q;
397
398     if (n->next)
399         n->next->prev = n;
400     else
401         bq->blocks_tail = n;
402
403     if (n->prev)
404         n->prev->next = n;
405     else
406         bq->blocks = n;
407
408     bq->n_blocks++;
409
410 finish:
411
412     delta = bq->write_index - old;
413
414     if (delta >= (int64_t) bq->requested) {
415         delta -= (int64_t) bq->requested;
416         bq->requested = 0;
417     } else {
418         bq->requested -= (size_t) delta;
419         delta = 0;
420     }
421
422     bq->missing -= delta;
423
424     return 0;
425 }
426
427 pa_bool_t pa_memblockq_prebuf_active(pa_memblockq *bq) {
428     pa_assert(bq);
429
430     if (bq->in_prebuf)
431         return pa_memblockq_get_length(bq) < bq->prebuf;
432     else
433         return bq->prebuf > 0 && bq->read_index >= bq->write_index;
434 }
435
436 static pa_bool_t update_prebuf(pa_memblockq *bq) {
437     pa_assert(bq);
438
439     if (bq->in_prebuf) {
440
441         if (pa_memblockq_get_length(bq) < bq->prebuf)
442             return TRUE;
443
444         bq->in_prebuf = FALSE;
445         return FALSE;
446     } else {
447
448         if (bq->prebuf > 0 && bq->read_index >= bq->write_index) {
449             bq->in_prebuf = TRUE;
450             return TRUE;
451         }
452
453         return FALSE;
454     }
455 }
456
457 int pa_memblockq_peek(pa_memblockq* bq, pa_memchunk *chunk) {
458     int64_t d;
459     pa_assert(bq);
460     pa_assert(chunk);
461
462     /* We need to pre-buffer */
463     if (update_prebuf(bq))
464         return -1;
465
466     fix_current_read(bq);
467
468     /* Do we need to spit out silence? */
469     if (!bq->current_read || bq->current_read->index > bq->read_index) {
470
471         size_t length;
472
473         /* How much silence shall we return? */
474         if (bq->current_read)
475             length = (size_t) (bq->current_read->index - bq->read_index);
476         else if (bq->write_index > bq->read_index)
477             length = (size_t) (bq->write_index - bq->read_index);
478         else
479             length = 0;
480
481         /* We need to return silence, since no data is yet available */
482         if (bq->silence.memblock) {
483             *chunk = bq->silence;
484             pa_memblock_ref(chunk->memblock);
485
486             if (length > 0 && length < chunk->length)
487                 chunk->length = length;
488
489         } else {
490
491             /* If the memblockq is empty, return -1, otherwise return
492              * the time to sleep */
493             if (length <= 0)
494                 return -1;
495
496             chunk->memblock = NULL;
497             chunk->length = length;
498         }
499
500         chunk->index = 0;
501         return 0;
502     }
503
504     /* Ok, let's pass real data to the caller */
505     *chunk = bq->current_read->chunk;
506     pa_memblock_ref(chunk->memblock);
507
508     pa_assert(bq->read_index >= bq->current_read->index);
509     d = bq->read_index - bq->current_read->index;
510     chunk->index += (size_t) d;
511     chunk->length -= (size_t) d;
512
513     return 0;
514 }
515
516 void pa_memblockq_drop(pa_memblockq *bq, size_t length) {
517     int64_t old, delta;
518     pa_assert(bq);
519     pa_assert(length % bq->base == 0);
520
521     old = bq->read_index;
522
523     while (length > 0) {
524
525         /* Do not drop any data when we are in prebuffering mode */
526         if (update_prebuf(bq))
527             break;
528
529         fix_current_read(bq);
530
531         if (bq->current_read) {
532             int64_t p, d;
533
534             /* We go through this piece by piece to make sure we don't
535              * drop more than allowed by prebuf */
536
537             p = bq->current_read->index + (int64_t) bq->current_read->chunk.length;
538             pa_assert(p >= bq->read_index);
539             d = p - bq->read_index;
540
541             if (d > (int64_t) length)
542                 d = (int64_t) length;
543
544             bq->read_index += d;
545             length -= (size_t) d;
546
547         } else {
548
549             /* The list is empty, there's nothing we could drop */
550             bq->read_index += (int64_t) length;
551             break;
552         }
553     }
554
555     drop_backlog(bq);
556
557     delta = bq->read_index - old;
558     bq->missing += delta;
559 }
560
561 void pa_memblockq_rewind(pa_memblockq *bq, size_t length) {
562     pa_assert(bq);
563     pa_assert(length % bq->base == 0);
564
565     /* This is kind of the inverse of pa_memblockq_drop() */
566
567     bq->read_index -= (int64_t) length;
568     bq->missing -= (int64_t) length;
569 }
570
571 pa_bool_t pa_memblockq_is_readable(pa_memblockq *bq) {
572     pa_assert(bq);
573
574     if (pa_memblockq_prebuf_active(bq))
575         return FALSE;
576
577     if (pa_memblockq_get_length(bq) <= 0)
578         return FALSE;
579
580     return TRUE;
581 }
582
583 size_t pa_memblockq_get_length(pa_memblockq *bq) {
584     pa_assert(bq);
585
586     if (bq->write_index <= bq->read_index)
587         return 0;
588
589     return (size_t) (bq->write_index - bq->read_index);
590 }
591
592 size_t pa_memblockq_missing(pa_memblockq *bq) {
593     size_t l;
594     pa_assert(bq);
595
596     if ((l = pa_memblockq_get_length(bq)) >= bq->tlength)
597         return 0;
598
599     l = bq->tlength - l;
600
601     return l >= bq->minreq ? l : 0;
602 }
603
604 void pa_memblockq_seek(pa_memblockq *bq, int64_t offset, pa_seek_mode_t seek, pa_bool_t account) {
605     int64_t old, delta;
606     pa_assert(bq);
607
608     old = bq->write_index;
609
610     switch (seek) {
611         case PA_SEEK_RELATIVE:
612             bq->write_index += offset;
613             break;
614         case PA_SEEK_ABSOLUTE:
615             bq->write_index = offset;
616             break;
617         case PA_SEEK_RELATIVE_ON_READ:
618             bq->write_index = bq->read_index + offset;
619             break;
620         case PA_SEEK_RELATIVE_END:
621             bq->write_index = (bq->blocks_tail ? bq->blocks_tail->index + (int64_t) bq->blocks_tail->chunk.length : bq->read_index) + offset;
622             break;
623         default:
624             pa_assert_not_reached();
625     }
626
627     drop_backlog(bq);
628
629     delta = bq->write_index - old;
630
631     if (account) {
632         if (delta >= (int64_t) bq->requested) {
633             delta -= (int64_t) bq->requested;
634             bq->requested = 0;
635         } else if (delta >= 0) {
636             bq->requested -= (size_t) delta;
637             delta = 0;
638         }
639     }
640
641     bq->missing -= delta;
642 }
643
644 void pa_memblockq_flush_write(pa_memblockq *bq) {
645     int64_t old, delta;
646     pa_assert(bq);
647
648     pa_memblockq_silence(bq);
649
650     old = bq->write_index;
651     bq->write_index = bq->read_index;
652
653     pa_memblockq_prebuf_force(bq);
654
655     delta = bq->write_index - old;
656
657     if (delta >= (int64_t) bq->requested) {
658         delta -= (int64_t) bq->requested;
659         bq->requested = 0;
660     } else if (delta >= 0) {
661         bq->requested -= (size_t) delta;
662         delta = 0;
663     }
664
665     bq->missing -= delta;
666 }
667
668 void pa_memblockq_flush_read(pa_memblockq *bq) {
669     int64_t old, delta;
670     pa_assert(bq);
671
672     pa_memblockq_silence(bq);
673
674     old = bq->read_index;
675     bq->read_index = bq->write_index;
676
677     pa_memblockq_prebuf_force(bq);
678
679     delta = bq->read_index - old;
680     bq->missing += delta;
681 }
682
683 size_t pa_memblockq_get_tlength(pa_memblockq *bq) {
684     pa_assert(bq);
685
686     return bq->tlength;
687 }
688
689 size_t pa_memblockq_get_minreq(pa_memblockq *bq) {
690     pa_assert(bq);
691
692     return bq->minreq;
693 }
694
695 size_t pa_memblockq_get_maxrewind(pa_memblockq *bq) {
696     pa_assert(bq);
697
698     return bq->maxrewind;
699 }
700
701 int64_t pa_memblockq_get_read_index(pa_memblockq *bq) {
702     pa_assert(bq);
703
704     return bq->read_index;
705 }
706
707 int64_t pa_memblockq_get_write_index(pa_memblockq *bq) {
708     pa_assert(bq);
709
710     return bq->write_index;
711 }
712
713 int pa_memblockq_push_align(pa_memblockq* bq, const pa_memchunk *chunk) {
714     pa_memchunk rchunk;
715
716     pa_assert(bq);
717     pa_assert(chunk);
718
719     if (bq->base == 1)
720         return pa_memblockq_push(bq, chunk);
721
722     if (!can_push(bq, pa_mcalign_csize(bq->mcalign, chunk->length)))
723         return -1;
724
725     pa_mcalign_push(bq->mcalign, chunk);
726
727     while (pa_mcalign_pop(bq->mcalign, &rchunk) >= 0) {
728         int r;
729         r = pa_memblockq_push(bq, &rchunk);
730         pa_memblock_unref(rchunk.memblock);
731
732         if (r < 0) {
733             pa_mcalign_flush(bq->mcalign);
734             return -1;
735         }
736     }
737
738     return 0;
739 }
740
741 void pa_memblockq_prebuf_disable(pa_memblockq *bq) {
742     pa_assert(bq);
743
744     bq->in_prebuf = FALSE;
745 }
746
747 void pa_memblockq_prebuf_force(pa_memblockq *bq) {
748     pa_assert(bq);
749
750     if (bq->prebuf > 0)
751         bq->in_prebuf = TRUE;
752 }
753
754 size_t pa_memblockq_get_maxlength(pa_memblockq *bq) {
755     pa_assert(bq);
756
757     return bq->maxlength;
758 }
759
760 size_t pa_memblockq_get_prebuf(pa_memblockq *bq) {
761     pa_assert(bq);
762
763     return bq->prebuf;
764 }
765
766 size_t pa_memblockq_pop_missing(pa_memblockq *bq) {
767     size_t l;
768
769     pa_assert(bq);
770
771 /*     pa_log("pop: %lli", bq->missing); */
772
773     if (bq->missing <= 0)
774         return 0;
775
776     l = (size_t) bq->missing;
777     bq->missing = 0;
778     bq->requested += l;
779
780     return l;
781 }
782
783 void pa_memblockq_set_maxlength(pa_memblockq *bq, size_t maxlength) {
784     pa_assert(bq);
785
786     bq->maxlength = ((maxlength+bq->base-1)/bq->base)*bq->base;
787
788     if (bq->maxlength < bq->base)
789         bq->maxlength = bq->base;
790
791     if (bq->tlength > bq->maxlength)
792         pa_memblockq_set_tlength(bq, bq->maxlength);
793 }
794
795 void pa_memblockq_set_tlength(pa_memblockq *bq, size_t tlength) {
796     size_t old_tlength;
797     pa_assert(bq);
798
799     if (tlength <= 0 || tlength == (size_t) -1)
800         tlength = bq->maxlength;
801
802     old_tlength = bq->tlength;
803     bq->tlength = ((tlength+bq->base-1)/bq->base)*bq->base;
804
805     if (bq->tlength > bq->maxlength)
806         bq->tlength = bq->maxlength;
807
808     if (bq->minreq > bq->tlength)
809         pa_memblockq_set_minreq(bq, bq->tlength);
810
811     if (bq->prebuf > bq->tlength+bq->base-bq->minreq)
812         pa_memblockq_set_prebuf(bq, bq->tlength+bq->base-bq->minreq);
813
814     bq->missing += (int64_t) bq->tlength - (int64_t) old_tlength;
815 }
816
817 void pa_memblockq_set_minreq(pa_memblockq *bq, size_t minreq) {
818     pa_assert(bq);
819
820     bq->minreq = (minreq/bq->base)*bq->base;
821
822     if (bq->minreq > bq->tlength)
823         bq->minreq = bq->tlength;
824
825     if (bq->minreq < bq->base)
826         bq->minreq = bq->base;
827
828     if (bq->prebuf > bq->tlength+bq->base-bq->minreq)
829         pa_memblockq_set_prebuf(bq, bq->tlength+bq->base-bq->minreq);
830 }
831
832 void pa_memblockq_set_prebuf(pa_memblockq *bq, size_t prebuf) {
833     pa_assert(bq);
834
835     if (prebuf == (size_t) -1)
836         prebuf = bq->tlength+bq->base-bq->minreq;
837
838     bq->prebuf = ((prebuf+bq->base-1)/bq->base)*bq->base;
839
840     if (prebuf > 0 && bq->prebuf < bq->base)
841         bq->prebuf = bq->base;
842
843     if (bq->prebuf > bq->tlength+bq->base-bq->minreq)
844         bq->prebuf = bq->tlength+bq->base-bq->minreq;
845
846     if (bq->prebuf <= 0 || pa_memblockq_get_length(bq) >= bq->prebuf)
847         bq->in_prebuf = FALSE;
848 }
849
850 void pa_memblockq_set_maxrewind(pa_memblockq *bq, size_t maxrewind) {
851     pa_assert(bq);
852
853     bq->maxrewind = (maxrewind/bq->base)*bq->base;
854 }
855
856 void pa_memblockq_apply_attr(pa_memblockq *bq, const pa_buffer_attr *a) {
857     pa_assert(bq);
858     pa_assert(a);
859
860     pa_memblockq_set_maxlength(bq, a->maxlength);
861     pa_memblockq_set_tlength(bq, a->tlength);
862     pa_memblockq_set_prebuf(bq, a->prebuf);
863     pa_memblockq_set_minreq(bq, a->minreq);
864 }
865
866 void pa_memblockq_get_attr(pa_memblockq *bq, pa_buffer_attr *a) {
867     pa_assert(bq);
868     pa_assert(a);
869
870     a->maxlength = (uint32_t) pa_memblockq_get_maxlength(bq);
871     a->tlength = (uint32_t) pa_memblockq_get_tlength(bq);
872     a->prebuf = (uint32_t) pa_memblockq_get_prebuf(bq);
873     a->minreq = (uint32_t) pa_memblockq_get_minreq(bq);
874 }
875
876 int pa_memblockq_splice(pa_memblockq *bq, pa_memblockq *source) {
877
878     pa_assert(bq);
879     pa_assert(source);
880
881     pa_memblockq_prebuf_disable(bq);
882
883     for (;;) {
884         pa_memchunk chunk;
885
886         if (pa_memblockq_peek(source, &chunk) < 0)
887             return 0;
888
889         pa_assert(chunk.length > 0);
890
891         if (chunk.memblock) {
892
893             if (pa_memblockq_push_align(bq, &chunk) < 0) {
894                 pa_memblock_unref(chunk.memblock);
895                 return -1;
896             }
897
898             pa_memblock_unref(chunk.memblock);
899         } else
900             pa_memblockq_seek(bq, (int64_t) chunk.length, PA_SEEK_RELATIVE, TRUE);
901
902         pa_memblockq_drop(bq, chunk.length);
903     }
904 }
905
906 void pa_memblockq_willneed(pa_memblockq *bq) {
907     struct list_item *q;
908
909     pa_assert(bq);
910
911     fix_current_read(bq);
912
913     for (q = bq->current_read; q; q = q->next)
914         pa_memchunk_will_need(&q->chunk);
915 }
916
917 void pa_memblockq_set_silence(pa_memblockq *bq, pa_memchunk *silence) {
918     pa_assert(bq);
919
920     if (bq->silence.memblock)
921         pa_memblock_unref(bq->silence.memblock);
922
923     if (silence) {
924         bq->silence = *silence;
925         pa_memblock_ref(bq->silence.memblock);
926     } else
927         pa_memchunk_reset(&bq->silence);
928 }
929
930 pa_bool_t pa_memblockq_is_empty(pa_memblockq *bq) {
931     pa_assert(bq);
932
933     return !bq->blocks;
934 }
935
936 void pa_memblockq_silence(pa_memblockq *bq) {
937     pa_assert(bq);
938
939     while (bq->blocks)
940         drop_block(bq, bq->blocks);
941
942     pa_assert(bq->n_blocks == 0);
943 }
944
945 unsigned pa_memblockq_get_nblocks(pa_memblockq *bq) {
946     pa_assert(bq);
947
948     return bq->n_blocks;
949 }
950
951 size_t pa_memblockq_get_base(pa_memblockq *bq) {
952     pa_assert(bq);
953
954     return bq->base;
955 }