sink,source: Add the ability to disable alternat sample rate switching
[platform/upstream/pulseaudio.git] / src / modules / alsa / alsa-source.c
1 /***
2   This file is part of PulseAudio.
3
4   Copyright 2004-2008 Lennart Poettering
5   Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7   PulseAudio is free software; you can redistribute it and/or modify
8   it under the terms of the GNU Lesser General Public License as published
9   by the Free Software Foundation; either version 2.1 of the License,
10   or (at your option) any later version.
11
12   PulseAudio is distributed in the hope that it will be useful, but
13   WITHOUT ANY WARRANTY; without even the implied warranty of
14   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15   General Public License for more details.
16
17   You should have received a copy of the GNU Lesser General Public License
18   along with PulseAudio; if not, write to the Free Software
19   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20   USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #include <pulse/rtclock.h>
32 #include <pulse/timeval.h>
33 #include <pulse/volume.h>
34 #include <pulse/xmalloc.h>
35
36 #include <pulsecore/core.h>
37 #include <pulsecore/i18n.h>
38 #include <pulsecore/module.h>
39 #include <pulsecore/memchunk.h>
40 #include <pulsecore/sink.h>
41 #include <pulsecore/modargs.h>
42 #include <pulsecore/core-rtclock.h>
43 #include <pulsecore/core-util.h>
44 #include <pulsecore/sample-util.h>
45 #include <pulsecore/log.h>
46 #include <pulsecore/macro.h>
47 #include <pulsecore/thread.h>
48 #include <pulsecore/thread-mq.h>
49 #include <pulsecore/rtpoll.h>
50 #include <pulsecore/time-smoother.h>
51
52 #include <modules/reserve-wrap.h>
53
54 #include "alsa-util.h"
55 #include "alsa-source.h"
56
57 /* #define DEBUG_TIMING */
58
59 #define DEFAULT_DEVICE "default"
60
61 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC)             /* 2s */
62 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC)        /* 20ms */
63
64 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC)       /* 10ms  */
65 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC)        /* 5ms */
66 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC)    /* 20s */
67 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC)   /* 0ms */
68 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
69 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC)           /* 10ms */
70
71 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC)                /* 10ms */
72 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC)                /* 4ms */
73
74 #define SMOOTHER_WINDOW_USEC  (10*PA_USEC_PER_SEC)                 /* 10s */
75 #define SMOOTHER_ADJUST_USEC  (1*PA_USEC_PER_SEC)                  /* 1s */
76
77 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC)                 /* 2ms */
78 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC)               /* 200ms */
79
80 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
81
82 struct userdata {
83     pa_core *core;
84     pa_module *module;
85     pa_source *source;
86
87     pa_thread *thread;
88     pa_thread_mq thread_mq;
89     pa_rtpoll *rtpoll;
90
91     snd_pcm_t *pcm_handle;
92
93     char *paths_dir;
94     pa_alsa_fdlist *mixer_fdl;
95     pa_alsa_mixer_pdata *mixer_pd;
96     snd_mixer_t *mixer_handle;
97     pa_alsa_path_set *mixer_path_set;
98     pa_alsa_path *mixer_path;
99
100     pa_cvolume hardware_volume;
101
102     size_t
103         frame_size,
104         fragment_size,
105         hwbuf_size,
106         tsched_watermark,
107         tsched_watermark_ref,
108         hwbuf_unused,
109         min_sleep,
110         min_wakeup,
111         watermark_inc_step,
112         watermark_dec_step,
113         watermark_inc_threshold,
114         watermark_dec_threshold;
115
116     pa_usec_t watermark_dec_not_before;
117     pa_usec_t min_latency_ref;
118
119     char *device_name;  /* name of the PCM device */
120     char *control_device; /* name of the control device */
121
122     pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1;
123
124     pa_bool_t first;
125
126     pa_rtpoll_item *alsa_rtpoll_item;
127
128     snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
129
130     pa_smoother *smoother;
131     uint64_t read_count;
132     pa_usec_t smoother_interval;
133     pa_usec_t last_smoother_update;
134
135     pa_reserve_wrapper *reserve;
136     pa_hook_slot *reserve_slot;
137     pa_reserve_monitor_wrapper *monitor;
138     pa_hook_slot *monitor_slot;
139 };
140
141 static void userdata_free(struct userdata *u);
142
143 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
144     pa_assert(r);
145     pa_assert(u);
146
147     if (pa_source_suspend(u->source, TRUE, PA_SUSPEND_APPLICATION) < 0)
148         return PA_HOOK_CANCEL;
149
150     return PA_HOOK_OK;
151 }
152
153 static void reserve_done(struct userdata *u) {
154     pa_assert(u);
155
156     if (u->reserve_slot) {
157         pa_hook_slot_free(u->reserve_slot);
158         u->reserve_slot = NULL;
159     }
160
161     if (u->reserve) {
162         pa_reserve_wrapper_unref(u->reserve);
163         u->reserve = NULL;
164     }
165 }
166
167 static void reserve_update(struct userdata *u) {
168     const char *description;
169     pa_assert(u);
170
171     if (!u->source || !u->reserve)
172         return;
173
174     if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
175         pa_reserve_wrapper_set_application_device_name(u->reserve, description);
176 }
177
178 static int reserve_init(struct userdata *u, const char *dname) {
179     char *rname;
180
181     pa_assert(u);
182     pa_assert(dname);
183
184     if (u->reserve)
185         return 0;
186
187     if (pa_in_system_mode())
188         return 0;
189
190     if (!(rname = pa_alsa_get_reserve_name(dname)))
191         return 0;
192
193     /* We are resuming, try to lock the device */
194     u->reserve = pa_reserve_wrapper_get(u->core, rname);
195     pa_xfree(rname);
196
197     if (!(u->reserve))
198         return -1;
199
200     reserve_update(u);
201
202     pa_assert(!u->reserve_slot);
203     u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
204
205     return 0;
206 }
207
208 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
209     pa_bool_t b;
210
211     pa_assert(w);
212     pa_assert(u);
213
214     b = PA_PTR_TO_UINT(busy) && !u->reserve;
215
216     pa_source_suspend(u->source, b, PA_SUSPEND_APPLICATION);
217     return PA_HOOK_OK;
218 }
219
220 static void monitor_done(struct userdata *u) {
221     pa_assert(u);
222
223     if (u->monitor_slot) {
224         pa_hook_slot_free(u->monitor_slot);
225         u->monitor_slot = NULL;
226     }
227
228     if (u->monitor) {
229         pa_reserve_monitor_wrapper_unref(u->monitor);
230         u->monitor = NULL;
231     }
232 }
233
234 static int reserve_monitor_init(struct userdata *u, const char *dname) {
235     char *rname;
236
237     pa_assert(u);
238     pa_assert(dname);
239
240     if (pa_in_system_mode())
241         return 0;
242
243     if (!(rname = pa_alsa_get_reserve_name(dname)))
244         return 0;
245
246     /* We are resuming, try to lock the device */
247     u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
248     pa_xfree(rname);
249
250     if (!(u->monitor))
251         return -1;
252
253     pa_assert(!u->monitor_slot);
254     u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
255
256     return 0;
257 }
258
259 static void fix_min_sleep_wakeup(struct userdata *u) {
260     size_t max_use, max_use_2;
261
262     pa_assert(u);
263     pa_assert(u->use_tsched);
264
265     max_use = u->hwbuf_size - u->hwbuf_unused;
266     max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
267
268     u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
269     u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
270
271     u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
272     u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
273 }
274
275 static void fix_tsched_watermark(struct userdata *u) {
276     size_t max_use;
277     pa_assert(u);
278     pa_assert(u->use_tsched);
279
280     max_use = u->hwbuf_size - u->hwbuf_unused;
281
282     if (u->tsched_watermark > max_use - u->min_sleep)
283         u->tsched_watermark = max_use - u->min_sleep;
284
285     if (u->tsched_watermark < u->min_wakeup)
286         u->tsched_watermark = u->min_wakeup;
287 }
288
289 static void increase_watermark(struct userdata *u) {
290     size_t old_watermark;
291     pa_usec_t old_min_latency, new_min_latency;
292
293     pa_assert(u);
294     pa_assert(u->use_tsched);
295
296     /* First, just try to increase the watermark */
297     old_watermark = u->tsched_watermark;
298     u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
299     fix_tsched_watermark(u);
300
301     if (old_watermark != u->tsched_watermark) {
302         pa_log_info("Increasing wakeup watermark to %0.2f ms",
303                     (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
304         return;
305     }
306
307     /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
308     old_min_latency = u->source->thread_info.min_latency;
309     new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
310     new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
311
312     if (old_min_latency != new_min_latency) {
313         pa_log_info("Increasing minimal latency to %0.2f ms",
314                     (double) new_min_latency / PA_USEC_PER_MSEC);
315
316         pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
317     }
318
319     /* When we reach this we're officialy fucked! */
320 }
321
322 static void decrease_watermark(struct userdata *u) {
323     size_t old_watermark;
324     pa_usec_t now;
325
326     pa_assert(u);
327     pa_assert(u->use_tsched);
328
329     now = pa_rtclock_now();
330
331     if (u->watermark_dec_not_before <= 0)
332         goto restart;
333
334     if (u->watermark_dec_not_before > now)
335         return;
336
337     old_watermark = u->tsched_watermark;
338
339     if (u->tsched_watermark < u->watermark_dec_step)
340         u->tsched_watermark = u->tsched_watermark / 2;
341     else
342         u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
343
344     fix_tsched_watermark(u);
345
346     if (old_watermark != u->tsched_watermark)
347         pa_log_info("Decreasing wakeup watermark to %0.2f ms",
348                     (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
349
350     /* We don't change the latency range*/
351
352 restart:
353     u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
354 }
355
356 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
357     pa_usec_t wm, usec;
358
359     pa_assert(sleep_usec);
360     pa_assert(process_usec);
361
362     pa_assert(u);
363     pa_assert(u->use_tsched);
364
365     usec = pa_source_get_requested_latency_within_thread(u->source);
366
367     if (usec == (pa_usec_t) -1)
368         usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
369
370     wm = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
371
372     if (wm > usec)
373         wm = usec/2;
374
375     *sleep_usec = usec - wm;
376     *process_usec = wm;
377
378 #ifdef DEBUG_TIMING
379     pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
380                  (unsigned long) (usec / PA_USEC_PER_MSEC),
381                  (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
382                  (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
383 #endif
384 }
385
386 static int try_recover(struct userdata *u, const char *call, int err) {
387     pa_assert(u);
388     pa_assert(call);
389     pa_assert(err < 0);
390
391     pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
392
393     pa_assert(err != -EAGAIN);
394
395     if (err == -EPIPE)
396         pa_log_debug("%s: Buffer overrun!", call);
397
398     if (err == -ESTRPIPE)
399         pa_log_debug("%s: System suspended!", call);
400
401     if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
402         pa_log("%s: %s", call, pa_alsa_strerror(err));
403         return -1;
404     }
405
406     u->first = TRUE;
407     return 0;
408 }
409
410 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
411     size_t left_to_record;
412     size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
413     pa_bool_t overrun = FALSE;
414
415     /* We use <= instead of < for this check here because an overrun
416      * only happens after the last sample was processed, not already when
417      * it is removed from the buffer. This is particularly important
418      * when block transfer is used. */
419
420     if (n_bytes <= rec_space)
421         left_to_record = rec_space - n_bytes;
422     else {
423
424         /* We got a dropout. What a mess! */
425         left_to_record = 0;
426         overrun = TRUE;
427
428 #ifdef DEBUG_TIMING
429         PA_DEBUG_TRAP;
430 #endif
431
432         if (pa_log_ratelimit(PA_LOG_INFO))
433             pa_log_info("Overrun!");
434     }
435
436 #ifdef DEBUG_TIMING
437     pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
438 #endif
439
440     if (u->use_tsched) {
441         pa_bool_t reset_not_before = TRUE;
442
443         if (overrun || left_to_record < u->watermark_inc_threshold)
444             increase_watermark(u);
445         else if (left_to_record > u->watermark_dec_threshold) {
446             reset_not_before = FALSE;
447
448             /* We decrease the watermark only if have actually
449              * been woken up by a timeout. If something else woke
450              * us up it's too easy to fulfill the deadlines... */
451
452             if (on_timeout)
453                 decrease_watermark(u);
454         }
455
456         if (reset_not_before)
457             u->watermark_dec_not_before = 0;
458     }
459
460     return left_to_record;
461 }
462
463 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
464     pa_bool_t work_done = FALSE;
465     pa_usec_t max_sleep_usec = 0, process_usec = 0;
466     size_t left_to_record;
467     unsigned j = 0;
468
469     pa_assert(u);
470     pa_source_assert_ref(u->source);
471
472     if (u->use_tsched)
473         hw_sleep_time(u, &max_sleep_usec, &process_usec);
474
475     for (;;) {
476         snd_pcm_sframes_t n;
477         size_t n_bytes;
478         int r;
479         pa_bool_t after_avail = TRUE;
480
481         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
482
483             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
484                 continue;
485
486             return r;
487         }
488
489         n_bytes = (size_t) n * u->frame_size;
490
491 #ifdef DEBUG_TIMING
492         pa_log_debug("avail: %lu", (unsigned long) n_bytes);
493 #endif
494
495         left_to_record = check_left_to_record(u, n_bytes, on_timeout);
496         on_timeout = FALSE;
497
498         if (u->use_tsched)
499             if (!polled &&
500                 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
501 #ifdef DEBUG_TIMING
502                 pa_log_debug("Not reading, because too early.");
503 #endif
504                 break;
505             }
506
507         if (PA_UNLIKELY(n_bytes <= 0)) {
508
509             if (polled)
510                 PA_ONCE_BEGIN {
511                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
512                     pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
513                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
514                              "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
515                            pa_strnull(dn));
516                     pa_xfree(dn);
517                 } PA_ONCE_END;
518
519 #ifdef DEBUG_TIMING
520             pa_log_debug("Not reading, because not necessary.");
521 #endif
522             break;
523         }
524
525
526         if (++j > 10) {
527 #ifdef DEBUG_TIMING
528             pa_log_debug("Not filling up, because already too many iterations.");
529 #endif
530
531             break;
532         }
533
534         polled = FALSE;
535
536 #ifdef DEBUG_TIMING
537         pa_log_debug("Reading");
538 #endif
539
540         for (;;) {
541             pa_memchunk chunk;
542             void *p;
543             int err;
544             const snd_pcm_channel_area_t *areas;
545             snd_pcm_uframes_t offset, frames;
546             snd_pcm_sframes_t sframes;
547
548             frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
549 /*             pa_log_debug("%lu frames to read", (unsigned long) frames); */
550
551             if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
552
553                 if (!after_avail && err == -EAGAIN)
554                     break;
555
556                 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
557                     continue;
558
559                 return r;
560             }
561
562             /* Make sure that if these memblocks need to be copied they will fit into one slot */
563             if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
564                 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
565
566             if (!after_avail && frames == 0)
567                 break;
568
569             pa_assert(frames > 0);
570             after_avail = FALSE;
571
572             /* Check these are multiples of 8 bit */
573             pa_assert((areas[0].first & 7) == 0);
574             pa_assert((areas[0].step & 7)== 0);
575
576             /* We assume a single interleaved memory buffer */
577             pa_assert((areas[0].first >> 3) == 0);
578             pa_assert((areas[0].step >> 3) == u->frame_size);
579
580             p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
581
582             chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
583             chunk.length = pa_memblock_get_length(chunk.memblock);
584             chunk.index = 0;
585
586             pa_source_post(u->source, &chunk);
587             pa_memblock_unref_fixed(chunk.memblock);
588
589             if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
590
591                 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
592                     continue;
593
594                 return r;
595             }
596
597             work_done = TRUE;
598
599             u->read_count += frames * u->frame_size;
600
601 #ifdef DEBUG_TIMING
602             pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
603 #endif
604
605             if ((size_t) frames * u->frame_size >= n_bytes)
606                 break;
607
608             n_bytes -= (size_t) frames * u->frame_size;
609         }
610     }
611
612     if (u->use_tsched) {
613         *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
614         process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
615
616         if (*sleep_usec > process_usec)
617             *sleep_usec -= process_usec;
618         else
619             *sleep_usec = 0;
620     }
621
622     return work_done ? 1 : 0;
623 }
624
625 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
626     int work_done = FALSE;
627     pa_usec_t max_sleep_usec = 0, process_usec = 0;
628     size_t left_to_record;
629     unsigned j = 0;
630
631     pa_assert(u);
632     pa_source_assert_ref(u->source);
633
634     if (u->use_tsched)
635         hw_sleep_time(u, &max_sleep_usec, &process_usec);
636
637     for (;;) {
638         snd_pcm_sframes_t n;
639         size_t n_bytes;
640         int r;
641         pa_bool_t after_avail = TRUE;
642
643         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
644
645             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
646                 continue;
647
648             return r;
649         }
650
651         n_bytes = (size_t) n * u->frame_size;
652         left_to_record = check_left_to_record(u, n_bytes, on_timeout);
653         on_timeout = FALSE;
654
655         if (u->use_tsched)
656             if (!polled &&
657                 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
658                 break;
659
660         if (PA_UNLIKELY(n_bytes <= 0)) {
661
662             if (polled)
663                 PA_ONCE_BEGIN {
664                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
665                     pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
666                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
667                              "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
668                            pa_strnull(dn));
669                     pa_xfree(dn);
670                 } PA_ONCE_END;
671
672             break;
673         }
674
675         if (++j > 10) {
676 #ifdef DEBUG_TIMING
677             pa_log_debug("Not filling up, because already too many iterations.");
678 #endif
679
680             break;
681         }
682
683         polled = FALSE;
684
685         for (;;) {
686             void *p;
687             snd_pcm_sframes_t frames;
688             pa_memchunk chunk;
689
690             chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
691
692             frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
693
694             if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
695                 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
696
697 /*             pa_log_debug("%lu frames to read", (unsigned long) n); */
698
699             p = pa_memblock_acquire(chunk.memblock);
700             frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
701             pa_memblock_release(chunk.memblock);
702
703             if (PA_UNLIKELY(frames < 0)) {
704                 pa_memblock_unref(chunk.memblock);
705
706                 if (!after_avail && (int) frames == -EAGAIN)
707                     break;
708
709                 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
710                     continue;
711
712                 return r;
713             }
714
715             if (!after_avail && frames == 0) {
716                 pa_memblock_unref(chunk.memblock);
717                 break;
718             }
719
720             pa_assert(frames > 0);
721             after_avail = FALSE;
722
723             chunk.index = 0;
724             chunk.length = (size_t) frames * u->frame_size;
725
726             pa_source_post(u->source, &chunk);
727             pa_memblock_unref(chunk.memblock);
728
729             work_done = TRUE;
730
731             u->read_count += frames * u->frame_size;
732
733 /*             pa_log_debug("read %lu frames", (unsigned long) frames); */
734
735             if ((size_t) frames * u->frame_size >= n_bytes)
736                 break;
737
738             n_bytes -= (size_t) frames * u->frame_size;
739         }
740     }
741
742     if (u->use_tsched) {
743         *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
744         process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
745
746         if (*sleep_usec > process_usec)
747             *sleep_usec -= process_usec;
748         else
749             *sleep_usec = 0;
750     }
751
752     return work_done ? 1 : 0;
753 }
754
755 static void update_smoother(struct userdata *u) {
756     snd_pcm_sframes_t delay = 0;
757     uint64_t position;
758     int err;
759     pa_usec_t now1 = 0, now2;
760     snd_pcm_status_t *status;
761
762     snd_pcm_status_alloca(&status);
763
764     pa_assert(u);
765     pa_assert(u->pcm_handle);
766
767     /* Let's update the time smoother */
768
769     if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->source->sample_spec, TRUE)) < 0)) {
770         pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
771         return;
772     }
773
774     if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
775         pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
776     else {
777         snd_htimestamp_t htstamp = { 0, 0 };
778         snd_pcm_status_get_htstamp(status, &htstamp);
779         now1 = pa_timespec_load(&htstamp);
780     }
781
782     /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
783     if (now1 <= 0)
784         now1 = pa_rtclock_now();
785
786     /* check if the time since the last update is bigger than the interval */
787     if (u->last_smoother_update > 0)
788         if (u->last_smoother_update + u->smoother_interval > now1)
789             return;
790
791     position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
792     now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
793
794     pa_smoother_put(u->smoother, now1, now2);
795
796     u->last_smoother_update = now1;
797     /* exponentially increase the update interval up to the MAX limit */
798     u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
799 }
800
801 static pa_usec_t source_get_latency(struct userdata *u) {
802     int64_t delay;
803     pa_usec_t now1, now2;
804
805     pa_assert(u);
806
807     now1 = pa_rtclock_now();
808     now2 = pa_smoother_get(u->smoother, now1);
809
810     delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
811
812     return delay >= 0 ? (pa_usec_t) delay : 0;
813 }
814
815 static int build_pollfd(struct userdata *u) {
816     pa_assert(u);
817     pa_assert(u->pcm_handle);
818
819     if (u->alsa_rtpoll_item)
820         pa_rtpoll_item_free(u->alsa_rtpoll_item);
821
822     if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
823         return -1;
824
825     return 0;
826 }
827
828 /* Called from IO context */
829 static int suspend(struct userdata *u) {
830     pa_assert(u);
831     pa_assert(u->pcm_handle);
832
833     pa_smoother_pause(u->smoother, pa_rtclock_now());
834
835     /* Let's suspend */
836     snd_pcm_close(u->pcm_handle);
837     u->pcm_handle = NULL;
838
839     if (u->alsa_rtpoll_item) {
840         pa_rtpoll_item_free(u->alsa_rtpoll_item);
841         u->alsa_rtpoll_item = NULL;
842     }
843
844     pa_log_info("Device suspended...");
845
846     return 0;
847 }
848
849 /* Called from IO context */
850 static int update_sw_params(struct userdata *u) {
851     snd_pcm_uframes_t avail_min;
852     int err;
853
854     pa_assert(u);
855
856     /* Use the full buffer if no one asked us for anything specific */
857     u->hwbuf_unused = 0;
858
859     if (u->use_tsched) {
860         pa_usec_t latency;
861
862         if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
863             size_t b;
864
865             pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
866
867             b = pa_usec_to_bytes(latency, &u->source->sample_spec);
868
869             /* We need at least one sample in our buffer */
870
871             if (PA_UNLIKELY(b < u->frame_size))
872                 b = u->frame_size;
873
874             u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
875         }
876
877         fix_min_sleep_wakeup(u);
878         fix_tsched_watermark(u);
879     }
880
881     pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
882
883     avail_min = 1;
884
885     if (u->use_tsched) {
886         pa_usec_t sleep_usec, process_usec;
887
888         hw_sleep_time(u, &sleep_usec, &process_usec);
889         avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
890     }
891
892     pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
893
894     if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
895         pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
896         return err;
897     }
898
899     return 0;
900 }
901
902 /* Called from IO Context on unsuspend or from main thread when creating source */
903 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
904                             pa_bool_t in_thread)
905 {
906     u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
907                                                     &u->source->sample_spec);
908
909     u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
910     u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
911
912     u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
913     u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
914
915     fix_min_sleep_wakeup(u);
916     fix_tsched_watermark(u);
917
918     if (in_thread)
919         pa_source_set_latency_range_within_thread(u->source,
920                                                   u->min_latency_ref,
921                                                   pa_bytes_to_usec(u->hwbuf_size, ss));
922     else {
923         pa_source_set_latency_range(u->source,
924                                     0,
925                                     pa_bytes_to_usec(u->hwbuf_size, ss));
926
927         /* work-around assert in pa_source_set_latency_within_thead,
928            keep track of min_latency and reuse it when
929            this routine is called from IO context */
930         u->min_latency_ref = u->source->thread_info.min_latency;
931     }
932
933     pa_log_info("Time scheduling watermark is %0.2fms",
934                 (double) pa_bytes_to_usec(u->tsched_watermark, ss) / PA_USEC_PER_MSEC);
935 }
936
937 /* Called from IO context */
938 static int unsuspend(struct userdata *u) {
939     pa_sample_spec ss;
940     int err;
941     pa_bool_t b, d;
942     snd_pcm_uframes_t period_size, buffer_size;
943
944     pa_assert(u);
945     pa_assert(!u->pcm_handle);
946
947     pa_log_info("Trying resume...");
948
949     if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
950                             SND_PCM_NONBLOCK|
951                             SND_PCM_NO_AUTO_RESAMPLE|
952                             SND_PCM_NO_AUTO_CHANNELS|
953                             SND_PCM_NO_AUTO_FORMAT)) < 0) {
954         pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
955         goto fail;
956     }
957
958     ss = u->source->sample_spec;
959     period_size = u->fragment_size / u->frame_size;
960     buffer_size = u->hwbuf_size / u->frame_size;
961     b = u->use_mmap;
962     d = u->use_tsched;
963
964     if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
965         pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
966         goto fail;
967     }
968
969     if (b != u->use_mmap || d != u->use_tsched) {
970         pa_log_warn("Resume failed, couldn't get original access mode.");
971         goto fail;
972     }
973
974     if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
975         pa_log_warn("Resume failed, couldn't restore original sample settings.");
976         goto fail;
977     }
978
979     if (period_size*u->frame_size != u->fragment_size ||
980         buffer_size*u->frame_size != u->hwbuf_size) {
981         pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
982                     (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
983                     (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
984         goto fail;
985     }
986
987     if (update_sw_params(u) < 0)
988         goto fail;
989
990     if (build_pollfd(u) < 0)
991         goto fail;
992
993     /* FIXME: We need to reload the volume somehow */
994
995     u->read_count = 0;
996     pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
997     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
998     u->last_smoother_update = 0;
999
1000     u->first = TRUE;
1001
1002     /* reset the watermark to the value defined when source was created */
1003     if (u->use_tsched)
1004         reset_watermark(u, u->tsched_watermark_ref, &u->source->sample_spec, TRUE);
1005
1006     pa_log_info("Resumed successfully...");
1007
1008     return 0;
1009
1010 fail:
1011     if (u->pcm_handle) {
1012         snd_pcm_close(u->pcm_handle);
1013         u->pcm_handle = NULL;
1014     }
1015
1016     return -PA_ERR_IO;
1017 }
1018
1019 /* Called from IO context */
1020 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1021     struct userdata *u = PA_SOURCE(o)->userdata;
1022
1023     switch (code) {
1024
1025         case PA_SOURCE_MESSAGE_GET_LATENCY: {
1026             pa_usec_t r = 0;
1027
1028             if (u->pcm_handle)
1029                 r = source_get_latency(u);
1030
1031             *((pa_usec_t*) data) = r;
1032
1033             return 0;
1034         }
1035
1036         case PA_SOURCE_MESSAGE_SET_STATE:
1037
1038             switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
1039
1040                 case PA_SOURCE_SUSPENDED: {
1041                     int r;
1042
1043                     pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
1044
1045                     if ((r = suspend(u)) < 0)
1046                         return r;
1047
1048                     break;
1049                 }
1050
1051                 case PA_SOURCE_IDLE:
1052                 case PA_SOURCE_RUNNING: {
1053                     int r;
1054
1055                     if (u->source->thread_info.state == PA_SOURCE_INIT) {
1056                         if (build_pollfd(u) < 0)
1057                             return -PA_ERR_IO;
1058                     }
1059
1060                     if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1061                         if ((r = unsuspend(u)) < 0)
1062                             return r;
1063                     }
1064
1065                     break;
1066                 }
1067
1068                 case PA_SOURCE_UNLINKED:
1069                 case PA_SOURCE_INIT:
1070                 case PA_SOURCE_INVALID_STATE:
1071                     ;
1072             }
1073
1074             break;
1075     }
1076
1077     return pa_source_process_msg(o, code, data, offset, chunk);
1078 }
1079
1080 /* Called from main context */
1081 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1082     pa_source_state_t old_state;
1083     struct userdata *u;
1084
1085     pa_source_assert_ref(s);
1086     pa_assert_se(u = s->userdata);
1087
1088     old_state = pa_source_get_state(u->source);
1089
1090     if (PA_SOURCE_IS_OPENED(old_state) && new_state == PA_SOURCE_SUSPENDED)
1091         reserve_done(u);
1092     else if (old_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(new_state))
1093         if (reserve_init(u, u->device_name) < 0)
1094             return -PA_ERR_BUSY;
1095
1096     return 0;
1097 }
1098
1099 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1100     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1101
1102     pa_assert(u);
1103     pa_assert(u->mixer_handle);
1104
1105     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1106         return 0;
1107
1108     if (!PA_SOURCE_IS_LINKED(u->source->state))
1109         return 0;
1110
1111     if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1112         return 0;
1113
1114     if (mask & SND_CTL_EVENT_MASK_VALUE) {
1115         pa_source_get_volume(u->source, TRUE);
1116         pa_source_get_mute(u->source, TRUE);
1117     }
1118
1119     return 0;
1120 }
1121
1122 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1123     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1124
1125     pa_assert(u);
1126     pa_assert(u->mixer_handle);
1127
1128     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1129         return 0;
1130
1131     if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1132         return 0;
1133
1134     if (mask & SND_CTL_EVENT_MASK_VALUE)
1135         pa_source_update_volume_and_mute(u->source);
1136
1137     return 0;
1138 }
1139
1140 static void source_get_volume_cb(pa_source *s) {
1141     struct userdata *u = s->userdata;
1142     pa_cvolume r;
1143     char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1144
1145     pa_assert(u);
1146     pa_assert(u->mixer_path);
1147     pa_assert(u->mixer_handle);
1148
1149     if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1150         return;
1151
1152     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1153     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1154
1155     pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1156
1157     if (u->mixer_path->has_dB) {
1158         char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1159
1160         pa_log_debug("               in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1161     }
1162
1163     if (pa_cvolume_equal(&u->hardware_volume, &r))
1164         return;
1165
1166     s->real_volume = u->hardware_volume = r;
1167
1168     /* Hmm, so the hardware volume changed, let's reset our software volume */
1169     if (u->mixer_path->has_dB)
1170         pa_source_set_soft_volume(s, NULL);
1171 }
1172
1173 static void source_set_volume_cb(pa_source *s) {
1174     struct userdata *u = s->userdata;
1175     pa_cvolume r;
1176     char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1177     pa_bool_t deferred_volume = !!(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1178
1179     pa_assert(u);
1180     pa_assert(u->mixer_path);
1181     pa_assert(u->mixer_handle);
1182
1183     /* Shift up by the base volume */
1184     pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1185
1186     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1187         return;
1188
1189     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1190     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1191
1192     u->hardware_volume = r;
1193
1194     if (u->mixer_path->has_dB) {
1195         pa_cvolume new_soft_volume;
1196         pa_bool_t accurate_enough;
1197         char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1198
1199         /* Match exactly what the user requested by software */
1200         pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1201
1202         /* If the adjustment to do in software is only minimal we
1203          * can skip it. That saves us CPU at the expense of a bit of
1204          * accuracy */
1205         accurate_enough =
1206             (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1207             (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1208
1209         pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1210         pa_log_debug("           in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1211         pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1212         pa_log_debug("              in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1213         pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1214                      pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1215                      pa_yes_no(accurate_enough));
1216         pa_log_debug("                     in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1217
1218         if (!accurate_enough)
1219             s->soft_volume = new_soft_volume;
1220
1221     } else {
1222         pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1223
1224         /* We can't match exactly what the user requested, hence let's
1225          * at least tell the user about it */
1226
1227         s->real_volume = r;
1228     }
1229 }
1230
1231 static void source_write_volume_cb(pa_source *s) {
1232     struct userdata *u = s->userdata;
1233     pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1234
1235     pa_assert(u);
1236     pa_assert(u->mixer_path);
1237     pa_assert(u->mixer_handle);
1238     pa_assert(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1239
1240     /* Shift up by the base volume */
1241     pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1242
1243     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1244         pa_log_error("Writing HW volume failed");
1245     else {
1246         pa_cvolume tmp_vol;
1247         pa_bool_t accurate_enough;
1248
1249         /* Shift down by the base volume, so that 0dB becomes maximum volume */
1250         pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1251
1252         pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1253         accurate_enough =
1254             (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1255             (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1256
1257         if (!accurate_enough) {
1258             union {
1259                 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1260                 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1261             } vol;
1262
1263             pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1264                          pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1265                          pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1266             pa_log_debug("                                           in dB: %s (request) != %s",
1267                          pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1268                          pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1269         }
1270     }
1271 }
1272
1273 static void source_get_mute_cb(pa_source *s) {
1274     struct userdata *u = s->userdata;
1275     pa_bool_t b;
1276
1277     pa_assert(u);
1278     pa_assert(u->mixer_path);
1279     pa_assert(u->mixer_handle);
1280
1281     if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1282         return;
1283
1284     s->muted = b;
1285 }
1286
1287 static void source_set_mute_cb(pa_source *s) {
1288     struct userdata *u = s->userdata;
1289
1290     pa_assert(u);
1291     pa_assert(u->mixer_path);
1292     pa_assert(u->mixer_handle);
1293
1294     pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1295 }
1296
1297 static void mixer_volume_init(struct userdata *u) {
1298     pa_assert(u);
1299
1300     if (!u->mixer_path->has_volume) {
1301         pa_source_set_write_volume_callback(u->source, NULL);
1302         pa_source_set_get_volume_callback(u->source, NULL);
1303         pa_source_set_set_volume_callback(u->source, NULL);
1304
1305         pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1306     } else {
1307         pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1308         pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1309
1310         if (u->mixer_path->has_dB && u->deferred_volume) {
1311             pa_source_set_write_volume_callback(u->source, source_write_volume_cb);
1312             pa_log_info("Successfully enabled synchronous volume.");
1313         } else
1314             pa_source_set_write_volume_callback(u->source, NULL);
1315
1316         if (u->mixer_path->has_dB) {
1317             pa_source_enable_decibel_volume(u->source, TRUE);
1318             pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1319
1320             u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1321             u->source->n_volume_steps = PA_VOLUME_NORM+1;
1322
1323             pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1324         } else {
1325             pa_source_enable_decibel_volume(u->source, FALSE);
1326             pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1327
1328             u->source->base_volume = PA_VOLUME_NORM;
1329             u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1330         }
1331
1332         pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1333     }
1334
1335     if (!u->mixer_path->has_mute) {
1336         pa_source_set_get_mute_callback(u->source, NULL);
1337         pa_source_set_set_mute_callback(u->source, NULL);
1338         pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1339     } else {
1340         pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1341         pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1342         pa_log_info("Using hardware mute control.");
1343     }
1344 }
1345
1346 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1347     struct userdata *u = s->userdata;
1348     pa_alsa_port_data *data;
1349
1350     pa_assert(u);
1351     pa_assert(p);
1352     pa_assert(u->mixer_handle);
1353
1354     data = PA_DEVICE_PORT_DATA(p);
1355
1356     pa_assert_se(u->mixer_path = data->path);
1357     pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1358
1359     mixer_volume_init(u);
1360
1361     if (data->setting)
1362         pa_alsa_setting_select(data->setting, u->mixer_handle);
1363
1364     if (s->set_mute)
1365         s->set_mute(s);
1366     if (s->set_volume)
1367         s->set_volume(s);
1368
1369     return 0;
1370 }
1371
1372 static void source_update_requested_latency_cb(pa_source *s) {
1373     struct userdata *u = s->userdata;
1374     pa_assert(u);
1375     pa_assert(u->use_tsched); /* only when timer scheduling is used
1376                                * we can dynamically adjust the
1377                                * latency */
1378
1379     if (!u->pcm_handle)
1380         return;
1381
1382     update_sw_params(u);
1383 }
1384
1385 static pa_bool_t source_update_rate_cb(pa_source *s, uint32_t rate)
1386 {
1387     struct userdata *u = s->userdata;
1388     pa_assert(u);
1389
1390     if (!PA_SOURCE_IS_OPENED(s->state)) {
1391         pa_log_info("Updating rate for device %s, new rate is %d", u->device_name, rate);
1392         u->source->sample_spec.rate = rate;
1393         return TRUE;
1394     }
1395     return FALSE;
1396 }
1397
1398 static void thread_func(void *userdata) {
1399     struct userdata *u = userdata;
1400     unsigned short revents = 0;
1401
1402     pa_assert(u);
1403
1404     pa_log_debug("Thread starting up");
1405
1406     if (u->core->realtime_scheduling)
1407         pa_make_realtime(u->core->realtime_priority);
1408
1409     pa_thread_mq_install(&u->thread_mq);
1410
1411     for (;;) {
1412         int ret;
1413         pa_usec_t rtpoll_sleep = 0;
1414
1415 #ifdef DEBUG_TIMING
1416         pa_log_debug("Loop");
1417 #endif
1418
1419         /* Read some data and pass it to the sources */
1420         if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1421             int work_done;
1422             pa_usec_t sleep_usec = 0;
1423             pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1424
1425             if (u->first) {
1426                 pa_log_info("Starting capture.");
1427                 snd_pcm_start(u->pcm_handle);
1428
1429                 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1430
1431                 u->first = FALSE;
1432             }
1433
1434             if (u->use_mmap)
1435                 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1436             else
1437                 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1438
1439             if (work_done < 0)
1440                 goto fail;
1441
1442 /*             pa_log_debug("work_done = %i", work_done); */
1443
1444             if (work_done)
1445                 update_smoother(u);
1446
1447             if (u->use_tsched) {
1448                 pa_usec_t cusec;
1449
1450                 /* OK, the capture buffer is now empty, let's
1451                  * calculate when to wake up next */
1452
1453 /*                 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1454
1455                 /* Convert from the sound card time domain to the
1456                  * system time domain */
1457                 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1458
1459 /*                 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1460
1461                 /* We don't trust the conversion, so we wake up whatever comes first */
1462                 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1463             }
1464         }
1465
1466         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1467             pa_usec_t volume_sleep;
1468             pa_source_volume_change_apply(u->source, &volume_sleep);
1469             if (volume_sleep > 0) {
1470                 if (rtpoll_sleep > 0)
1471                     rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1472                 else
1473                     rtpoll_sleep = volume_sleep;
1474             }
1475         }
1476
1477         if (rtpoll_sleep > 0)
1478             pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1479         else
1480             pa_rtpoll_set_timer_disabled(u->rtpoll);
1481
1482         /* Hmm, nothing to do. Let's sleep */
1483         if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1484             goto fail;
1485
1486         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME)
1487             pa_source_volume_change_apply(u->source, NULL);
1488
1489         if (ret == 0)
1490             goto finish;
1491
1492         /* Tell ALSA about this and process its response */
1493         if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1494             struct pollfd *pollfd;
1495             int err;
1496             unsigned n;
1497
1498             pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1499
1500             if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1501                 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1502                 goto fail;
1503             }
1504
1505             if (revents & ~POLLIN) {
1506                 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1507                     goto fail;
1508
1509                 u->first = TRUE;
1510                 revents = 0;
1511             } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1512                 pa_log_debug("Wakeup from ALSA!");
1513
1514         } else
1515             revents = 0;
1516     }
1517
1518 fail:
1519     /* If this was no regular exit from the loop we have to continue
1520      * processing messages until we received PA_MESSAGE_SHUTDOWN */
1521     pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1522     pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1523
1524 finish:
1525     pa_log_debug("Thread shutting down");
1526 }
1527
1528 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1529     const char *n;
1530     char *t;
1531
1532     pa_assert(data);
1533     pa_assert(ma);
1534     pa_assert(device_name);
1535
1536     if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1537         pa_source_new_data_set_name(data, n);
1538         data->namereg_fail = TRUE;
1539         return;
1540     }
1541
1542     if ((n = pa_modargs_get_value(ma, "name", NULL)))
1543         data->namereg_fail = TRUE;
1544     else {
1545         n = device_id ? device_id : device_name;
1546         data->namereg_fail = FALSE;
1547     }
1548
1549     if (mapping)
1550         t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1551     else
1552         t = pa_sprintf_malloc("alsa_input.%s", n);
1553
1554     pa_source_new_data_set_name(data, t);
1555     pa_xfree(t);
1556 }
1557
1558 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1559
1560     if (!mapping && !element)
1561         return;
1562
1563     if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1564         pa_log_info("Failed to find a working mixer device.");
1565         return;
1566     }
1567
1568     if (element) {
1569
1570         if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1571             goto fail;
1572
1573         if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1574             goto fail;
1575
1576         pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1577         pa_alsa_path_dump(u->mixer_path);
1578     } else {
1579
1580         if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_INPUT, u->paths_dir)))
1581             goto fail;
1582
1583         pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1584     }
1585
1586     return;
1587
1588 fail:
1589
1590     if (u->mixer_path_set) {
1591         pa_alsa_path_set_free(u->mixer_path_set);
1592         u->mixer_path_set = NULL;
1593     } else if (u->mixer_path) {
1594         pa_alsa_path_free(u->mixer_path);
1595         u->mixer_path = NULL;
1596     }
1597
1598     if (u->mixer_handle) {
1599         snd_mixer_close(u->mixer_handle);
1600         u->mixer_handle = NULL;
1601     }
1602 }
1603
1604 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1605     pa_bool_t need_mixer_callback = FALSE;
1606
1607     pa_assert(u);
1608
1609     if (!u->mixer_handle)
1610         return 0;
1611
1612     if (u->source->active_port) {
1613         pa_alsa_port_data *data;
1614
1615         /* We have a list of supported paths, so let's activate the
1616          * one that has been chosen as active */
1617
1618         data = PA_DEVICE_PORT_DATA(u->source->active_port);
1619         u->mixer_path = data->path;
1620
1621         pa_alsa_path_select(data->path, u->mixer_handle);
1622
1623         if (data->setting)
1624             pa_alsa_setting_select(data->setting, u->mixer_handle);
1625
1626     } else {
1627
1628         if (!u->mixer_path && u->mixer_path_set)
1629             u->mixer_path = u->mixer_path_set->paths;
1630
1631         if (u->mixer_path) {
1632             /* Hmm, we have only a single path, then let's activate it */
1633
1634             pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1635
1636             if (u->mixer_path->settings)
1637                 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1638         } else
1639             return 0;
1640     }
1641
1642     mixer_volume_init(u);
1643
1644     /* Will we need to register callbacks? */
1645     if (u->mixer_path_set && u->mixer_path_set->paths) {
1646         pa_alsa_path *p;
1647
1648         PA_LLIST_FOREACH(p, u->mixer_path_set->paths) {
1649             if (p->has_volume || p->has_mute)
1650                 need_mixer_callback = TRUE;
1651         }
1652     }
1653     else if (u->mixer_path)
1654         need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1655
1656     if (need_mixer_callback) {
1657         int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1658         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1659             u->mixer_pd = pa_alsa_mixer_pdata_new();
1660             mixer_callback = io_mixer_callback;
1661
1662             if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1663                 pa_log("Failed to initialize file descriptor monitoring");
1664                 return -1;
1665             }
1666         } else {
1667             u->mixer_fdl = pa_alsa_fdlist_new();
1668             mixer_callback = ctl_mixer_callback;
1669
1670             if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1671                 pa_log("Failed to initialize file descriptor monitoring");
1672                 return -1;
1673             }
1674         }
1675
1676         if (u->mixer_path_set)
1677             pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1678         else
1679             pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1680     }
1681
1682     return 0;
1683 }
1684
1685 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1686
1687     struct userdata *u = NULL;
1688     const char *dev_id = NULL;
1689     pa_sample_spec ss;
1690     uint32_t alternate_sample_rate;
1691     pa_channel_map map;
1692     uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1693     snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1694     size_t frame_size;
1695     pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE;
1696     pa_source_new_data data;
1697     pa_alsa_profile_set *profile_set = NULL;
1698
1699     pa_assert(m);
1700     pa_assert(ma);
1701
1702     ss = m->core->default_sample_spec;
1703     map = m->core->default_channel_map;
1704     if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1705         pa_log("Failed to parse sample specification and channel map");
1706         goto fail;
1707     }
1708
1709     alternate_sample_rate = m->core->alternate_sample_rate;
1710     if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
1711         pa_log("Failed to parse alternate sample rate");
1712         goto fail;
1713     }
1714
1715     frame_size = pa_frame_size(&ss);
1716
1717     nfrags = m->core->default_n_fragments;
1718     frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1719     if (frag_size <= 0)
1720         frag_size = (uint32_t) frame_size;
1721     tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1722     tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1723
1724     if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1725         pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1726         pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1727         pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1728         pa_log("Failed to parse buffer metrics");
1729         goto fail;
1730     }
1731
1732     buffer_size = nfrags * frag_size;
1733
1734     period_frames = frag_size/frame_size;
1735     buffer_frames = buffer_size/frame_size;
1736     tsched_frames = tsched_size/frame_size;
1737
1738     if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1739         pa_log("Failed to parse mmap argument.");
1740         goto fail;
1741     }
1742
1743     if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1744         pa_log("Failed to parse tsched argument.");
1745         goto fail;
1746     }
1747
1748     if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1749         pa_log("Failed to parse ignore_dB argument.");
1750         goto fail;
1751     }
1752
1753     deferred_volume = m->core->deferred_volume;
1754     if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
1755         pa_log("Failed to parse deferred_volume argument.");
1756         goto fail;
1757     }
1758
1759     use_tsched = pa_alsa_may_tsched(use_tsched);
1760
1761     u = pa_xnew0(struct userdata, 1);
1762     u->core = m->core;
1763     u->module = m;
1764     u->use_mmap = use_mmap;
1765     u->use_tsched = use_tsched;
1766     u->deferred_volume = deferred_volume;
1767     u->first = TRUE;
1768     u->rtpoll = pa_rtpoll_new();
1769     pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1770
1771     u->smoother = pa_smoother_new(
1772             SMOOTHER_ADJUST_USEC,
1773             SMOOTHER_WINDOW_USEC,
1774             TRUE,
1775             TRUE,
1776             5,
1777             pa_rtclock_now(),
1778             TRUE);
1779     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1780
1781     dev_id = pa_modargs_get_value(
1782             ma, "device_id",
1783             pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1784
1785     u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
1786
1787     if (reserve_init(u, dev_id) < 0)
1788         goto fail;
1789
1790     if (reserve_monitor_init(u, dev_id) < 0)
1791         goto fail;
1792
1793     b = use_mmap;
1794     d = use_tsched;
1795
1796     if (mapping) {
1797
1798         if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1799             pa_log("device_id= not set");
1800             goto fail;
1801         }
1802
1803         if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1804                       dev_id,
1805                       &u->device_name,
1806                       &ss, &map,
1807                       SND_PCM_STREAM_CAPTURE,
1808                       &period_frames, &buffer_frames, tsched_frames,
1809                       &b, &d, mapping)))
1810             goto fail;
1811
1812     } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1813
1814         if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1815             goto fail;
1816
1817         if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1818                       dev_id,
1819                       &u->device_name,
1820                       &ss, &map,
1821                       SND_PCM_STREAM_CAPTURE,
1822                       &period_frames, &buffer_frames, tsched_frames,
1823                       &b, &d, profile_set, &mapping)))
1824             goto fail;
1825
1826     } else {
1827
1828         if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1829                       pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1830                       &u->device_name,
1831                       &ss, &map,
1832                       SND_PCM_STREAM_CAPTURE,
1833                       &period_frames, &buffer_frames, tsched_frames,
1834                       &b, &d, FALSE)))
1835             goto fail;
1836     }
1837
1838     pa_assert(u->device_name);
1839     pa_log_info("Successfully opened device %s.", u->device_name);
1840
1841     if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1842         pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1843         goto fail;
1844     }
1845
1846     if (mapping)
1847         pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1848
1849     if (use_mmap && !b) {
1850         pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1851         u->use_mmap = use_mmap = FALSE;
1852     }
1853
1854     if (use_tsched && (!b || !d)) {
1855         pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1856         u->use_tsched = use_tsched = FALSE;
1857     }
1858
1859     if (u->use_mmap)
1860         pa_log_info("Successfully enabled mmap() mode.");
1861
1862     if (u->use_tsched)
1863         pa_log_info("Successfully enabled timer-based scheduling mode.");
1864
1865     /* ALSA might tweak the sample spec, so recalculate the frame size */
1866     frame_size = pa_frame_size(&ss);
1867
1868     find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1869
1870     pa_source_new_data_init(&data);
1871     data.driver = driver;
1872     data.module = m;
1873     data.card = card;
1874     set_source_name(&data, ma, dev_id, u->device_name, mapping);
1875
1876     /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1877      * variable instead of using &data.namereg_fail directly, because
1878      * data.namereg_fail is a bitfield and taking the address of a bitfield
1879      * variable is impossible. */
1880     namereg_fail = data.namereg_fail;
1881     if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1882         pa_log("Failed to parse namereg_fail argument.");
1883         pa_source_new_data_done(&data);
1884         goto fail;
1885     }
1886     data.namereg_fail = namereg_fail;
1887
1888     pa_source_new_data_set_sample_spec(&data, &ss);
1889     pa_source_new_data_set_channel_map(&data, &map);
1890     pa_source_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
1891
1892     pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1893     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1894     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1895     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1896     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1897
1898     if (mapping) {
1899         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1900         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1901     }
1902
1903     pa_alsa_init_description(data.proplist);
1904
1905     if (u->control_device)
1906         pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1907
1908     if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1909         pa_log("Invalid properties");
1910         pa_source_new_data_done(&data);
1911         goto fail;
1912     }
1913
1914     if (u->mixer_path_set)
1915         pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1916
1917     u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
1918     pa_source_new_data_done(&data);
1919
1920     if (!u->source) {
1921         pa_log("Failed to create source object");
1922         goto fail;
1923     }
1924
1925     if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
1926                                  &u->source->thread_info.volume_change_safety_margin) < 0) {
1927         pa_log("Failed to parse deferred_volume_safety_margin parameter");
1928         goto fail;
1929     }
1930
1931     if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
1932                                  &u->source->thread_info.volume_change_extra_delay) < 0) {
1933         pa_log("Failed to parse deferred_volume_extra_delay parameter");
1934         goto fail;
1935     }
1936
1937     u->source->parent.process_msg = source_process_msg;
1938     if (u->use_tsched)
1939         u->source->update_requested_latency = source_update_requested_latency_cb;
1940     u->source->set_state = source_set_state_cb;
1941     u->source->set_port = source_set_port_cb;
1942     if (u->source->alternate_sample_rate)
1943         u->source->update_rate = source_update_rate_cb;
1944     u->source->userdata = u;
1945
1946     pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
1947     pa_source_set_rtpoll(u->source, u->rtpoll);
1948
1949     u->frame_size = frame_size;
1950     u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
1951     u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
1952     pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
1953
1954     pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
1955                 (double) u->hwbuf_size / (double) u->fragment_size,
1956                 (long unsigned) u->fragment_size,
1957                 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
1958                 (long unsigned) u->hwbuf_size,
1959                 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1960
1961     if (u->use_tsched) {
1962         u->tsched_watermark_ref = tsched_watermark;
1963         reset_watermark(u, u->tsched_watermark_ref, &ss, FALSE);
1964     }
1965     else
1966         pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
1967
1968     reserve_update(u);
1969
1970     if (update_sw_params(u) < 0)
1971         goto fail;
1972
1973     if (setup_mixer(u, ignore_dB) < 0)
1974         goto fail;
1975
1976     pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1977
1978     if (!(u->thread = pa_thread_new("alsa-source", thread_func, u))) {
1979         pa_log("Failed to create thread.");
1980         goto fail;
1981     }
1982
1983     /* Get initial mixer settings */
1984     if (data.volume_is_set) {
1985         if (u->source->set_volume)
1986             u->source->set_volume(u->source);
1987     } else {
1988         if (u->source->get_volume)
1989             u->source->get_volume(u->source);
1990     }
1991
1992     if (data.muted_is_set) {
1993         if (u->source->set_mute)
1994             u->source->set_mute(u->source);
1995     } else {
1996         if (u->source->get_mute)
1997             u->source->get_mute(u->source);
1998     }
1999
2000     if ((data.volume_is_set || data.muted_is_set) && u->source->write_volume)
2001         u->source->write_volume(u->source);
2002
2003     pa_source_put(u->source);
2004
2005     if (profile_set)
2006         pa_alsa_profile_set_free(profile_set);
2007
2008     return u->source;
2009
2010 fail:
2011
2012     if (u)
2013         userdata_free(u);
2014
2015     if (profile_set)
2016         pa_alsa_profile_set_free(profile_set);
2017
2018     return NULL;
2019 }
2020
2021 static void userdata_free(struct userdata *u) {
2022     pa_assert(u);
2023
2024     if (u->source)
2025         pa_source_unlink(u->source);
2026
2027     if (u->thread) {
2028         pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2029         pa_thread_free(u->thread);
2030     }
2031
2032     pa_thread_mq_done(&u->thread_mq);
2033
2034     if (u->source)
2035         pa_source_unref(u->source);
2036
2037     if (u->mixer_pd)
2038         pa_alsa_mixer_pdata_free(u->mixer_pd);
2039
2040     if (u->alsa_rtpoll_item)
2041         pa_rtpoll_item_free(u->alsa_rtpoll_item);
2042
2043     if (u->rtpoll)
2044         pa_rtpoll_free(u->rtpoll);
2045
2046     if (u->pcm_handle) {
2047         snd_pcm_drop(u->pcm_handle);
2048         snd_pcm_close(u->pcm_handle);
2049     }
2050
2051     if (u->mixer_fdl)
2052         pa_alsa_fdlist_free(u->mixer_fdl);
2053
2054     if (u->mixer_path_set)
2055         pa_alsa_path_set_free(u->mixer_path_set);
2056     else if (u->mixer_path)
2057         pa_alsa_path_free(u->mixer_path);
2058
2059     if (u->mixer_handle)
2060         snd_mixer_close(u->mixer_handle);
2061
2062     if (u->smoother)
2063         pa_smoother_free(u->smoother);
2064
2065     reserve_done(u);
2066     monitor_done(u);
2067
2068     pa_xfree(u->device_name);
2069     pa_xfree(u->control_device);
2070     pa_xfree(u->paths_dir);
2071     pa_xfree(u);
2072 }
2073
2074 void pa_alsa_source_free(pa_source *s) {
2075     struct userdata *u;
2076
2077     pa_source_assert_ref(s);
2078     pa_assert_se(u = s->userdata);
2079
2080     userdata_free(u);
2081 }