Revert r1404 and keep it on a development branch until it is fully tested.
[profile/ivi/pulseaudio-panda.git] / src / pulsecore / memblockq.c
1 /* $Id$ */
2
3 /***
4   This file is part of PulseAudio.
5  
6   PulseAudio is free software; you can redistribute it and/or modify
7   it under the terms of the GNU Lesser General Public License as published
8   by the Free Software Foundation; either version 2 of the License,
9   or (at your option) any later version.
10  
11   PulseAudio is distributed in the hope that it will be useful, but
12   WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14   General Public License for more details.
15  
16   You should have received a copy of the GNU Lesser General Public License
17   along with PulseAudio; if not, write to the Free Software
18   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
19   USA.
20 ***/
21
22 #ifdef HAVE_CONFIG_H
23 #include <config.h>
24 #endif
25
26 #include <sys/time.h>
27 #include <time.h>
28 #include <stdio.h>
29 #include <assert.h>
30 #include <stdlib.h>
31 #include <string.h>
32
33 #include <pulse/xmalloc.h>
34
35 #include <pulsecore/log.h>
36 #include <pulsecore/mcalign.h>
37
38 #include "memblockq.h"
39
40 struct memblock_list {
41     struct memblock_list *next, *prev;
42     int64_t index;
43     pa_memchunk chunk;
44 };
45
46 struct pa_memblockq {
47     struct memblock_list *blocks, *blocks_tail;
48     unsigned n_blocks;
49     size_t maxlength, tlength, base, prebuf, minreq;
50     int64_t read_index, write_index;
51     enum { PREBUF, RUNNING } state;
52     pa_memblock *silence;
53     pa_mcalign *mcalign;
54 };
55
56 pa_memblockq* pa_memblockq_new(
57         int64_t idx,
58         size_t maxlength,
59         size_t tlength,
60         size_t base,
61         size_t prebuf,
62         size_t minreq,
63         pa_memblock *silence) {
64     
65     pa_memblockq* bq;
66     
67     assert(base > 0);
68     assert(maxlength >= base);
69     
70     bq = pa_xnew(pa_memblockq, 1);
71     bq->blocks = bq->blocks_tail = NULL;
72     bq->n_blocks = 0;
73
74     bq->base = base;
75     bq->read_index = bq->write_index = idx;
76
77     pa_log_debug("memblockq requested: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu",
78         (unsigned long)maxlength, (unsigned long)tlength, (unsigned long)base, (unsigned long)prebuf, (unsigned long)minreq);
79
80     bq->maxlength = ((maxlength+base-1)/base)*base;
81     assert(bq->maxlength >= base);
82
83     bq->tlength = ((tlength+base-1)/base)*base;
84     if (!bq->tlength || bq->tlength >= bq->maxlength)
85         bq->tlength = bq->maxlength;
86
87     bq->prebuf = (prebuf == (size_t) -1) ? bq->tlength/2 : prebuf;
88     bq->prebuf = ((bq->prebuf+base-1)/base)*base;
89     if (bq->prebuf > bq->maxlength)
90         bq->prebuf = bq->maxlength;
91
92     bq->minreq = (minreq/base)*base;
93     
94     if (bq->minreq > bq->tlength - bq->prebuf)
95         bq->minreq = bq->tlength - bq->prebuf;
96
97     if (!bq->minreq)
98         bq->minreq = 1;
99     
100     pa_log_debug("memblockq sanitized: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu",
101         (unsigned long)bq->maxlength, (unsigned long)bq->tlength, (unsigned long)bq->base, (unsigned long)bq->prebuf, (unsigned long)bq->minreq);
102
103     bq->state = bq->prebuf ? PREBUF : RUNNING;
104     bq->silence = silence ? pa_memblock_ref(silence) : NULL;
105     bq->mcalign = NULL;
106     
107     return bq;
108 }
109
110 void pa_memblockq_free(pa_memblockq* bq) {
111     assert(bq);
112
113     pa_memblockq_flush(bq);
114
115     if (bq->silence)
116         pa_memblock_unref(bq->silence);
117
118     if (bq->mcalign)
119         pa_mcalign_free(bq->mcalign);
120     
121     pa_xfree(bq);
122 }
123
124 static void drop_block(pa_memblockq *bq, struct memblock_list *q) {
125     assert(bq);
126     assert(q);
127
128     assert(bq->n_blocks >= 1);
129     
130     if (q->prev)
131         q->prev->next = q->next;
132     else
133         bq->blocks = q->next;
134     
135     if (q->next)
136         q->next->prev = q->prev;
137     else
138         bq->blocks_tail = q->prev;
139
140     pa_memblock_unref(q->chunk.memblock);
141     pa_xfree(q);
142
143     bq->n_blocks--;
144 }
145
146 static int can_push(pa_memblockq *bq, size_t l) {
147     int64_t end;
148
149     assert(bq);
150
151     if (bq->read_index > bq->write_index) {
152         size_t d =  bq->read_index - bq->write_index;
153
154         if (l > d)
155             l -= d;
156         else
157             return 1;
158     }
159
160     end = bq->blocks_tail ? bq->blocks_tail->index + bq->blocks_tail->chunk.length : 0;
161
162     /* Make sure that the list doesn't get too long */
163     if (bq->write_index + (int64_t)l > end)
164         if (bq->write_index + l - bq->read_index > bq->maxlength)
165             return 0;
166
167     return 1;
168 }
169
170 int pa_memblockq_push(pa_memblockq* bq, const pa_memchunk *uchunk) {
171     
172     struct memblock_list *q, *n;
173     pa_memchunk chunk;
174     
175     assert(bq);
176     assert(uchunk);
177     assert(uchunk->memblock);
178     assert(uchunk->length > 0);
179     assert(uchunk->index + uchunk->length <= uchunk->memblock->length);
180
181     if (uchunk->length % bq->base)
182         return -1;
183
184     if (!can_push(bq, uchunk->length))
185         return -1;
186
187     chunk = *uchunk;
188     
189     if (bq->read_index > bq->write_index) {
190
191         /* We currently have a buffer underflow, we need to drop some
192          * incoming data */
193
194         size_t d = bq->read_index - bq->write_index;
195
196         if (chunk.length > d) {
197             chunk.index += d;
198             chunk.length -= d;
199             bq->write_index = bq->read_index;
200         } else {
201             /* We drop the incoming data completely */
202             bq->write_index += chunk.length;
203             return 0;
204         }
205     }
206     
207     /* We go from back to front to look for the right place to add
208      * this new entry. Drop data we will overwrite on the way */
209
210     q = bq->blocks_tail;
211     while (q) {
212
213         if (bq->write_index >= q->index + (int64_t)q->chunk.length)
214             /* We found the entry where we need to place the new entry immediately after */
215             break;
216         else if (bq->write_index + (int64_t)chunk.length <= q->index) {
217             /* This entry isn't touched at all, let's skip it */
218             q = q->prev;
219         } else if (bq->write_index <= q->index &&
220             bq->write_index + chunk.length >= q->index + q->chunk.length) {
221
222             /* This entry is fully replaced by the new entry, so let's drop it */
223
224             struct memblock_list *p;
225             p = q;
226             q = q->prev;
227             drop_block(bq, p);
228         } else if (bq->write_index >= q->index) {
229             /* The write index points into this memblock, so let's
230              * truncate or split it */
231
232             if (bq->write_index + chunk.length < q->index + q->chunk.length) {
233
234                 /* We need to save the end of this memchunk */
235                 struct memblock_list *p;
236                 size_t d;
237
238                 /* Create a new list entry for the end of thie memchunk */
239                 p = pa_xnew(struct memblock_list, 1);
240                 p->chunk = q->chunk;
241                 pa_memblock_ref(p->chunk.memblock);
242
243                 /* Calculate offset */
244                 d = bq->write_index + chunk.length - q->index;
245                 assert(d > 0);
246
247                 /* Drop it from the new entry */
248                 p->index = q->index + d;
249                 p->chunk.length -= d;
250
251                 /* Add it to the list */
252                 p->prev = q;
253                 if ((p->next = q->next))
254                     q->next->prev = p;
255                 else
256                     bq->blocks_tail = p;
257                 q->next = p;
258
259                 bq->n_blocks++;
260             }
261
262             /* Truncate the chunk */
263             if (!(q->chunk.length = bq->write_index - q->index)) {
264                 struct memblock_list *p;
265                 p = q;
266                 q = q->prev;
267                 drop_block(bq, p);
268             }
269
270             /* We had to truncate this block, hence we're now at the right position */
271             break;
272         } else {
273             size_t d;
274
275             assert(bq->write_index + (int64_t)chunk.length > q->index &&
276                    bq->write_index + (int64_t)chunk.length < q->index + (int64_t)q->chunk.length &&
277                    bq->write_index < q->index);
278             
279             /* The job overwrites the current entry at the end, so let's drop the beginning of this entry */
280
281             d = bq->write_index + chunk.length - q->index;
282             q->index += d;
283             q->chunk.index += d;
284             q->chunk.length -= d;
285             
286             q = q->prev;
287         }
288         
289     }
290
291     if (q) {
292         assert(bq->write_index >=  q->index + (int64_t)q->chunk.length);
293         assert(!q->next || (bq->write_index + (int64_t)chunk.length <= q->next->index));
294                
295         /* Try to merge memory blocks */
296         
297         if (q->chunk.memblock == chunk.memblock &&
298             q->chunk.index + (int64_t)q->chunk.length == chunk.index &&
299             bq->write_index == q->index + (int64_t)q->chunk.length) {
300             
301             q->chunk.length += chunk.length;
302             bq->write_index += chunk.length;
303             return 0;
304         }
305     } else
306         assert(!bq->blocks || (bq->write_index + (int64_t)chunk.length <= bq->blocks->index));
307
308
309     n = pa_xnew(struct memblock_list, 1);
310     n->chunk = chunk;
311     pa_memblock_ref(n->chunk.memblock);
312     n->index = bq->write_index;
313     bq->write_index += n->chunk.length;
314
315     n->next = q ? q->next : bq->blocks;
316     n->prev = q;
317
318     if (n->next)
319         n->next->prev = n;
320     else
321         bq->blocks_tail = n;
322
323     if (n->prev)
324         n->prev->next = n;
325     else
326         bq->blocks = n;
327     
328     bq->n_blocks++;
329     return 0;
330 }
331
332 int pa_memblockq_peek(pa_memblockq* bq, pa_memchunk *chunk) {
333     assert(bq);
334     assert(chunk);
335
336     if (bq->state == PREBUF) {
337
338         /* We need to pre-buffer */
339         if (pa_memblockq_get_length(bq) < bq->prebuf)
340             return -1;
341
342         bq->state = RUNNING;
343
344     } else if (bq->prebuf > 0 && bq->read_index >= bq->write_index) {
345
346         /* Buffer underflow protection */
347         bq->state = PREBUF;
348         return -1;
349     }
350     
351     /* Do we need to spit out silence? */
352     if (!bq->blocks || bq->blocks->index > bq->read_index) {
353
354         size_t length;
355
356         /* How much silence shall we return? */
357         length = bq->blocks ? bq->blocks->index - bq->read_index : 0;
358
359         /* We need to return silence, since no data is yet available */
360         if (bq->silence) {
361             chunk->memblock = pa_memblock_ref(bq->silence);
362
363             if (!length || length > chunk->memblock->length)
364                 length = chunk->memblock->length;
365                 
366             chunk->length = length;
367         } else {
368
369             /* If the memblockq is empty, return -1, otherwise return
370              * the time to sleep */
371             if (!bq->blocks)
372                 return -1;
373             
374             chunk->memblock = NULL;
375             chunk->length = length;
376         }
377
378         chunk->index = 0;
379         return 0;
380     }
381
382     /* Ok, let's pass real data to the caller */
383     assert(bq->blocks->index == bq->read_index);
384     
385     *chunk = bq->blocks->chunk;
386     pa_memblock_ref(chunk->memblock);
387
388     return 0;
389 }
390
391 void pa_memblockq_drop(pa_memblockq *bq, const pa_memchunk *chunk, size_t length) {
392     assert(bq);
393     assert(length % bq->base == 0);
394
395     assert(!chunk || length <= chunk->length);
396
397     if (chunk) {
398
399         if (bq->blocks && bq->blocks->index == bq->read_index) {
400             /* The first item in queue is valid */
401
402             /* Does the chunk match with what the user supplied us? */
403             if (memcmp(chunk, &bq->blocks->chunk, sizeof(pa_memchunk)) != 0)
404                 return;
405
406         } else {
407             size_t l;
408
409             /* The first item in the queue is not yet relevant */
410
411             assert(!bq->blocks || bq->blocks->index > bq->read_index);
412             l = bq->blocks ? bq->blocks->index - bq->read_index : 0;
413
414             if (bq->silence) {
415
416                 if (!l || l > bq->silence->length)
417                     l = bq->silence->length;
418
419             }
420
421             /* Do the entries still match? */
422             if (chunk->index != 0 || chunk->length != l || chunk->memblock != bq->silence)
423                 return;
424         }
425     }
426
427     while (length > 0) {
428
429         if (bq->blocks) {
430             size_t d;
431
432             assert(bq->blocks->index >= bq->read_index);
433
434             d = (size_t) (bq->blocks->index - bq->read_index);
435             
436             if (d >= length) {
437                 /* The first block is too far in the future */
438                 
439                 bq->read_index += length;
440                 break;
441             } else {
442                 
443                 length -= d;
444                 bq->read_index += d;
445             }
446
447             assert(bq->blocks->index == bq->read_index);
448
449             if (bq->blocks->chunk.length <= length) {
450                 /* We need to drop the full block */
451
452                 length -= bq->blocks->chunk.length;
453                 bq->read_index += bq->blocks->chunk.length;
454
455                 drop_block(bq, bq->blocks);
456             } else {
457                 /* Only the start of this block needs to be dropped */
458
459                 bq->blocks->chunk.index += length;
460                 bq->blocks->chunk.length -= length;
461                 bq->blocks->index += length;
462                 bq->read_index += length;
463                 break;
464             }
465             
466         } else {
467
468             /* The list is empty, there's nothing we could drop */
469             bq->read_index += length;
470             break;
471         }
472     }
473 }
474
475 int pa_memblockq_is_readable(pa_memblockq *bq) {
476     assert(bq);
477
478     if (bq->prebuf > 0) {
479         size_t l = pa_memblockq_get_length(bq);
480         
481         if (bq->state == PREBUF && l < bq->prebuf)
482             return 0;
483
484         if (l <= 0)
485             return 0;
486     }
487
488     return 1;
489 }
490
491 int pa_memblockq_is_writable(pa_memblockq *bq, size_t length) {
492     assert(bq);
493
494     if (length % bq->base)
495         return 0;
496     
497     return pa_memblockq_get_length(bq) + length <= bq->tlength;
498 }
499
500 size_t pa_memblockq_get_length(pa_memblockq *bq) {
501     assert(bq);
502
503     if (bq->write_index <= bq->read_index)
504         return 0;
505     
506     return (size_t) (bq->write_index - bq->read_index);
507 }
508
509 size_t pa_memblockq_missing(pa_memblockq *bq) {
510     size_t l;
511     assert(bq);
512
513     if ((l = pa_memblockq_get_length(bq)) >= bq->tlength)
514         return 0;
515
516     l = bq->tlength - l;
517     return (l >= bq->minreq) ? l : 0;
518 }
519
520 size_t pa_memblockq_get_minreq(pa_memblockq *bq) {
521     assert(bq);
522
523     return bq->minreq;
524 }
525
526 void pa_memblockq_seek(pa_memblockq *bq, int64_t offset, pa_seek_mode_t seek) {
527     assert(bq);
528
529     switch (seek) {
530         case PA_SEEK_RELATIVE:
531             bq->write_index += offset;
532             return;
533         case PA_SEEK_ABSOLUTE:
534             bq->write_index = offset;
535             return;
536         case PA_SEEK_RELATIVE_ON_READ:
537             bq->write_index = bq->read_index + offset;
538             return;
539         case PA_SEEK_RELATIVE_END:
540             bq->write_index = (bq->blocks_tail ? bq->blocks_tail->index + (int64_t)bq->blocks_tail->chunk.length : bq->read_index) + offset;
541             return;
542     }
543
544     assert(0);
545 }
546
547 void pa_memblockq_flush(pa_memblockq *bq) {
548     assert(bq);
549     
550     while (bq->blocks)
551         drop_block(bq, bq->blocks);
552
553     assert(bq->n_blocks == 0);
554
555     bq->write_index = bq->read_index;
556
557     pa_memblockq_prebuf_force(bq);
558 }
559
560 size_t pa_memblockq_get_tlength(pa_memblockq *bq) {
561     assert(bq);
562     
563     return bq->tlength;
564 }
565
566 int64_t pa_memblockq_get_read_index(pa_memblockq *bq) {
567     assert(bq);
568     return bq->read_index;
569 }
570
571 int64_t pa_memblockq_get_write_index(pa_memblockq *bq) {
572     assert(bq);
573     return bq->write_index;
574 }
575
576 int pa_memblockq_push_align(pa_memblockq* bq, const pa_memchunk *chunk) {
577     pa_memchunk rchunk;
578
579     assert(bq);
580     assert(chunk && bq->base);
581         
582     if (bq->base == 1)
583         return pa_memblockq_push(bq, chunk);
584         
585     if (!bq->mcalign)
586         bq->mcalign = pa_mcalign_new(bq->base);
587
588     if (!can_push(bq, pa_mcalign_csize(bq->mcalign, chunk->length)))
589         return -1;
590     
591     pa_mcalign_push(bq->mcalign, chunk);
592         
593     while (pa_mcalign_pop(bq->mcalign, &rchunk) >= 0) {
594         int r;
595         r = pa_memblockq_push(bq, &rchunk);
596         pa_memblock_unref(rchunk.memblock);
597
598         if (r < 0)
599             return -1;
600     }
601
602     return 0;
603 }
604
605 void pa_memblockq_shorten(pa_memblockq *bq, size_t length) {
606     size_t l;
607     assert(bq);
608
609     l = pa_memblockq_get_length(bq);
610
611     if (l > length)
612         pa_memblockq_drop(bq, NULL, l - length);
613 }
614
615 void pa_memblockq_prebuf_disable(pa_memblockq *bq) {
616     assert(bq);
617
618     if (bq->state == PREBUF)
619         bq->state = RUNNING;
620 }
621
622 void pa_memblockq_prebuf_force(pa_memblockq *bq) {
623     assert(bq);
624
625     if (bq->state == RUNNING && bq->prebuf > 0)
626         bq->state = PREBUF;
627 }
628
629 size_t pa_memblockq_get_maxlength(pa_memblockq *bq) {
630     assert(bq);
631
632     return bq->maxlength;
633 }
634
635 size_t pa_memblockq_get_prebuf(pa_memblockq *bq) {
636     assert(bq);
637
638     return bq->prebuf;
639 }