alsa-sink/source: Make sure volumes are synchronised after fast user switching
[profile/ivi/pulseaudio-panda.git] / src / modules / alsa / alsa-source.c
1 /***
2   This file is part of PulseAudio.
3
4   Copyright 2004-2008 Lennart Poettering
5   Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7   PulseAudio is free software; you can redistribute it and/or modify
8   it under the terms of the GNU Lesser General Public License as published
9   by the Free Software Foundation; either version 2.1 of the License,
10   or (at your option) any later version.
11
12   PulseAudio is distributed in the hope that it will be useful, but
13   WITHOUT ANY WARRANTY; without even the implied warranty of
14   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15   General Public License for more details.
16
17   You should have received a copy of the GNU Lesser General Public License
18   along with PulseAudio; if not, write to the Free Software
19   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20   USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #include <pulse/rtclock.h>
32 #include <pulse/timeval.h>
33 #include <pulse/volume.h>
34 #include <pulse/xmalloc.h>
35
36 #include <pulsecore/core.h>
37 #include <pulsecore/i18n.h>
38 #include <pulsecore/module.h>
39 #include <pulsecore/memchunk.h>
40 #include <pulsecore/sink.h>
41 #include <pulsecore/modargs.h>
42 #include <pulsecore/core-rtclock.h>
43 #include <pulsecore/core-util.h>
44 #include <pulsecore/sample-util.h>
45 #include <pulsecore/log.h>
46 #include <pulsecore/macro.h>
47 #include <pulsecore/thread.h>
48 #include <pulsecore/thread-mq.h>
49 #include <pulsecore/rtpoll.h>
50 #include <pulsecore/time-smoother.h>
51
52 #include <modules/reserve-wrap.h>
53
54 #include "alsa-util.h"
55 #include "alsa-source.h"
56
57 /* #define DEBUG_TIMING */
58
59 #define DEFAULT_DEVICE "default"
60
61 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC)             /* 2s */
62 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC)        /* 20ms */
63
64 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC)       /* 10ms  */
65 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC)        /* 5ms */
66 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC)    /* 20s */
67 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC)   /* 0ms */
68 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
69 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC)           /* 10ms */
70
71 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC)                /* 10ms */
72 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC)                /* 4ms */
73
74 #define SMOOTHER_WINDOW_USEC  (10*PA_USEC_PER_SEC)                 /* 10s */
75 #define SMOOTHER_ADJUST_USEC  (1*PA_USEC_PER_SEC)                  /* 1s */
76
77 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC)                 /* 2ms */
78 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC)               /* 200ms */
79
80 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
81
82 struct userdata {
83     pa_core *core;
84     pa_module *module;
85     pa_source *source;
86
87     pa_thread *thread;
88     pa_thread_mq thread_mq;
89     pa_rtpoll *rtpoll;
90
91     snd_pcm_t *pcm_handle;
92
93     char *paths_dir;
94     pa_alsa_fdlist *mixer_fdl;
95     pa_alsa_mixer_pdata *mixer_pd;
96     snd_mixer_t *mixer_handle;
97     pa_alsa_path_set *mixer_path_set;
98     pa_alsa_path *mixer_path;
99
100     pa_cvolume hardware_volume;
101
102     unsigned int *rates;
103
104     size_t
105         frame_size,
106         fragment_size,
107         hwbuf_size,
108         tsched_watermark,
109         tsched_watermark_ref,
110         hwbuf_unused,
111         min_sleep,
112         min_wakeup,
113         watermark_inc_step,
114         watermark_dec_step,
115         watermark_inc_threshold,
116         watermark_dec_threshold;
117
118     pa_usec_t watermark_dec_not_before;
119     pa_usec_t min_latency_ref;
120
121     char *device_name;  /* name of the PCM device */
122     char *control_device; /* name of the control device */
123
124     pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
125
126     pa_bool_t first;
127
128     pa_rtpoll_item *alsa_rtpoll_item;
129
130     pa_smoother *smoother;
131     uint64_t read_count;
132     pa_usec_t smoother_interval;
133     pa_usec_t last_smoother_update;
134
135     pa_reserve_wrapper *reserve;
136     pa_hook_slot *reserve_slot;
137     pa_reserve_monitor_wrapper *monitor;
138     pa_hook_slot *monitor_slot;
139 };
140
141 static void userdata_free(struct userdata *u);
142
143 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
144     pa_assert(r);
145     pa_assert(u);
146
147     if (pa_source_suspend(u->source, TRUE, PA_SUSPEND_APPLICATION) < 0)
148         return PA_HOOK_CANCEL;
149
150     return PA_HOOK_OK;
151 }
152
153 static void reserve_done(struct userdata *u) {
154     pa_assert(u);
155
156     if (u->reserve_slot) {
157         pa_hook_slot_free(u->reserve_slot);
158         u->reserve_slot = NULL;
159     }
160
161     if (u->reserve) {
162         pa_reserve_wrapper_unref(u->reserve);
163         u->reserve = NULL;
164     }
165 }
166
167 static void reserve_update(struct userdata *u) {
168     const char *description;
169     pa_assert(u);
170
171     if (!u->source || !u->reserve)
172         return;
173
174     if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
175         pa_reserve_wrapper_set_application_device_name(u->reserve, description);
176 }
177
178 static int reserve_init(struct userdata *u, const char *dname) {
179     char *rname;
180
181     pa_assert(u);
182     pa_assert(dname);
183
184     if (u->reserve)
185         return 0;
186
187     if (pa_in_system_mode())
188         return 0;
189
190     if (!(rname = pa_alsa_get_reserve_name(dname)))
191         return 0;
192
193     /* We are resuming, try to lock the device */
194     u->reserve = pa_reserve_wrapper_get(u->core, rname);
195     pa_xfree(rname);
196
197     if (!(u->reserve))
198         return -1;
199
200     reserve_update(u);
201
202     pa_assert(!u->reserve_slot);
203     u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
204
205     return 0;
206 }
207
208 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
209     pa_bool_t b;
210
211     pa_assert(w);
212     pa_assert(u);
213
214     b = PA_PTR_TO_UINT(busy) && !u->reserve;
215
216     pa_source_suspend(u->source, b, PA_SUSPEND_APPLICATION);
217     return PA_HOOK_OK;
218 }
219
220 static void monitor_done(struct userdata *u) {
221     pa_assert(u);
222
223     if (u->monitor_slot) {
224         pa_hook_slot_free(u->monitor_slot);
225         u->monitor_slot = NULL;
226     }
227
228     if (u->monitor) {
229         pa_reserve_monitor_wrapper_unref(u->monitor);
230         u->monitor = NULL;
231     }
232 }
233
234 static int reserve_monitor_init(struct userdata *u, const char *dname) {
235     char *rname;
236
237     pa_assert(u);
238     pa_assert(dname);
239
240     if (pa_in_system_mode())
241         return 0;
242
243     if (!(rname = pa_alsa_get_reserve_name(dname)))
244         return 0;
245
246     /* We are resuming, try to lock the device */
247     u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
248     pa_xfree(rname);
249
250     if (!(u->monitor))
251         return -1;
252
253     pa_assert(!u->monitor_slot);
254     u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
255
256     return 0;
257 }
258
259 static void fix_min_sleep_wakeup(struct userdata *u) {
260     size_t max_use, max_use_2;
261
262     pa_assert(u);
263     pa_assert(u->use_tsched);
264
265     max_use = u->hwbuf_size - u->hwbuf_unused;
266     max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
267
268     u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
269     u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
270
271     u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
272     u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
273 }
274
275 static void fix_tsched_watermark(struct userdata *u) {
276     size_t max_use;
277     pa_assert(u);
278     pa_assert(u->use_tsched);
279
280     max_use = u->hwbuf_size - u->hwbuf_unused;
281
282     if (u->tsched_watermark > max_use - u->min_sleep)
283         u->tsched_watermark = max_use - u->min_sleep;
284
285     if (u->tsched_watermark < u->min_wakeup)
286         u->tsched_watermark = u->min_wakeup;
287 }
288
289 static void increase_watermark(struct userdata *u) {
290     size_t old_watermark;
291     pa_usec_t old_min_latency, new_min_latency;
292
293     pa_assert(u);
294     pa_assert(u->use_tsched);
295
296     /* First, just try to increase the watermark */
297     old_watermark = u->tsched_watermark;
298     u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
299     fix_tsched_watermark(u);
300
301     if (old_watermark != u->tsched_watermark) {
302         pa_log_info("Increasing wakeup watermark to %0.2f ms",
303                     (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
304         return;
305     }
306
307     /* Hmm, we cannot increase the watermark any further, hence let's
308      raise the latency unless doing so was disabled in
309      configuration */
310     if (u->fixed_latency_range)
311         return;
312
313     old_min_latency = u->source->thread_info.min_latency;
314     new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
315     new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
316
317     if (old_min_latency != new_min_latency) {
318         pa_log_info("Increasing minimal latency to %0.2f ms",
319                     (double) new_min_latency / PA_USEC_PER_MSEC);
320
321         pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
322     }
323
324     /* When we reach this we're officialy fucked! */
325 }
326
327 static void decrease_watermark(struct userdata *u) {
328     size_t old_watermark;
329     pa_usec_t now;
330
331     pa_assert(u);
332     pa_assert(u->use_tsched);
333
334     now = pa_rtclock_now();
335
336     if (u->watermark_dec_not_before <= 0)
337         goto restart;
338
339     if (u->watermark_dec_not_before > now)
340         return;
341
342     old_watermark = u->tsched_watermark;
343
344     if (u->tsched_watermark < u->watermark_dec_step)
345         u->tsched_watermark = u->tsched_watermark / 2;
346     else
347         u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
348
349     fix_tsched_watermark(u);
350
351     if (old_watermark != u->tsched_watermark)
352         pa_log_info("Decreasing wakeup watermark to %0.2f ms",
353                     (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
354
355     /* We don't change the latency range*/
356
357 restart:
358     u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
359 }
360
361 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
362     pa_usec_t wm, usec;
363
364     pa_assert(sleep_usec);
365     pa_assert(process_usec);
366
367     pa_assert(u);
368     pa_assert(u->use_tsched);
369
370     usec = pa_source_get_requested_latency_within_thread(u->source);
371
372     if (usec == (pa_usec_t) -1)
373         usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
374
375     wm = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
376
377     if (wm > usec)
378         wm = usec/2;
379
380     *sleep_usec = usec - wm;
381     *process_usec = wm;
382
383 #ifdef DEBUG_TIMING
384     pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
385                  (unsigned long) (usec / PA_USEC_PER_MSEC),
386                  (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
387                  (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
388 #endif
389 }
390
391 static int try_recover(struct userdata *u, const char *call, int err) {
392     pa_assert(u);
393     pa_assert(call);
394     pa_assert(err < 0);
395
396     pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
397
398     pa_assert(err != -EAGAIN);
399
400     if (err == -EPIPE)
401         pa_log_debug("%s: Buffer overrun!", call);
402
403     if (err == -ESTRPIPE)
404         pa_log_debug("%s: System suspended!", call);
405
406     if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
407         pa_log("%s: %s", call, pa_alsa_strerror(err));
408         return -1;
409     }
410
411     u->first = TRUE;
412     return 0;
413 }
414
415 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
416     size_t left_to_record;
417     size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
418     pa_bool_t overrun = FALSE;
419
420     /* We use <= instead of < for this check here because an overrun
421      * only happens after the last sample was processed, not already when
422      * it is removed from the buffer. This is particularly important
423      * when block transfer is used. */
424
425     if (n_bytes <= rec_space)
426         left_to_record = rec_space - n_bytes;
427     else {
428
429         /* We got a dropout. What a mess! */
430         left_to_record = 0;
431         overrun = TRUE;
432
433 #ifdef DEBUG_TIMING
434         PA_DEBUG_TRAP;
435 #endif
436
437         if (pa_log_ratelimit(PA_LOG_INFO))
438             pa_log_info("Overrun!");
439     }
440
441 #ifdef DEBUG_TIMING
442     pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
443 #endif
444
445     if (u->use_tsched) {
446         pa_bool_t reset_not_before = TRUE;
447
448         if (overrun || left_to_record < u->watermark_inc_threshold)
449             increase_watermark(u);
450         else if (left_to_record > u->watermark_dec_threshold) {
451             reset_not_before = FALSE;
452
453             /* We decrease the watermark only if have actually
454              * been woken up by a timeout. If something else woke
455              * us up it's too easy to fulfill the deadlines... */
456
457             if (on_timeout)
458                 decrease_watermark(u);
459         }
460
461         if (reset_not_before)
462             u->watermark_dec_not_before = 0;
463     }
464
465     return left_to_record;
466 }
467
468 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
469     pa_bool_t work_done = FALSE;
470     pa_usec_t max_sleep_usec = 0, process_usec = 0;
471     size_t left_to_record;
472     unsigned j = 0;
473
474     pa_assert(u);
475     pa_source_assert_ref(u->source);
476
477     if (u->use_tsched)
478         hw_sleep_time(u, &max_sleep_usec, &process_usec);
479
480     for (;;) {
481         snd_pcm_sframes_t n;
482         size_t n_bytes;
483         int r;
484         pa_bool_t after_avail = TRUE;
485
486         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
487
488             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
489                 continue;
490
491             return r;
492         }
493
494         n_bytes = (size_t) n * u->frame_size;
495
496 #ifdef DEBUG_TIMING
497         pa_log_debug("avail: %lu", (unsigned long) n_bytes);
498 #endif
499
500         left_to_record = check_left_to_record(u, n_bytes, on_timeout);
501         on_timeout = FALSE;
502
503         if (u->use_tsched)
504             if (!polled &&
505                 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
506 #ifdef DEBUG_TIMING
507                 pa_log_debug("Not reading, because too early.");
508 #endif
509                 break;
510             }
511
512         if (PA_UNLIKELY(n_bytes <= 0)) {
513
514             if (polled)
515                 PA_ONCE_BEGIN {
516                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
517                     pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
518                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
519                              "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
520                            pa_strnull(dn));
521                     pa_xfree(dn);
522                 } PA_ONCE_END;
523
524 #ifdef DEBUG_TIMING
525             pa_log_debug("Not reading, because not necessary.");
526 #endif
527             break;
528         }
529
530
531         if (++j > 10) {
532 #ifdef DEBUG_TIMING
533             pa_log_debug("Not filling up, because already too many iterations.");
534 #endif
535
536             break;
537         }
538
539         polled = FALSE;
540
541 #ifdef DEBUG_TIMING
542         pa_log_debug("Reading");
543 #endif
544
545         for (;;) {
546             pa_memchunk chunk;
547             void *p;
548             int err;
549             const snd_pcm_channel_area_t *areas;
550             snd_pcm_uframes_t offset, frames;
551             snd_pcm_sframes_t sframes;
552
553             frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
554 /*             pa_log_debug("%lu frames to read", (unsigned long) frames); */
555
556             if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
557
558                 if (!after_avail && err == -EAGAIN)
559                     break;
560
561                 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
562                     continue;
563
564                 return r;
565             }
566
567             /* Make sure that if these memblocks need to be copied they will fit into one slot */
568             if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
569                 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
570
571             if (!after_avail && frames == 0)
572                 break;
573
574             pa_assert(frames > 0);
575             after_avail = FALSE;
576
577             /* Check these are multiples of 8 bit */
578             pa_assert((areas[0].first & 7) == 0);
579             pa_assert((areas[0].step & 7)== 0);
580
581             /* We assume a single interleaved memory buffer */
582             pa_assert((areas[0].first >> 3) == 0);
583             pa_assert((areas[0].step >> 3) == u->frame_size);
584
585             p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
586
587             chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
588             chunk.length = pa_memblock_get_length(chunk.memblock);
589             chunk.index = 0;
590
591             pa_source_post(u->source, &chunk);
592             pa_memblock_unref_fixed(chunk.memblock);
593
594             if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
595
596                 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
597                     continue;
598
599                 return r;
600             }
601
602             work_done = TRUE;
603
604             u->read_count += frames * u->frame_size;
605
606 #ifdef DEBUG_TIMING
607             pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
608 #endif
609
610             if ((size_t) frames * u->frame_size >= n_bytes)
611                 break;
612
613             n_bytes -= (size_t) frames * u->frame_size;
614         }
615     }
616
617     if (u->use_tsched) {
618         *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
619         process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
620
621         if (*sleep_usec > process_usec)
622             *sleep_usec -= process_usec;
623         else
624             *sleep_usec = 0;
625     }
626
627     return work_done ? 1 : 0;
628 }
629
630 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
631     int work_done = FALSE;
632     pa_usec_t max_sleep_usec = 0, process_usec = 0;
633     size_t left_to_record;
634     unsigned j = 0;
635
636     pa_assert(u);
637     pa_source_assert_ref(u->source);
638
639     if (u->use_tsched)
640         hw_sleep_time(u, &max_sleep_usec, &process_usec);
641
642     for (;;) {
643         snd_pcm_sframes_t n;
644         size_t n_bytes;
645         int r;
646         pa_bool_t after_avail = TRUE;
647
648         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
649
650             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
651                 continue;
652
653             return r;
654         }
655
656         n_bytes = (size_t) n * u->frame_size;
657         left_to_record = check_left_to_record(u, n_bytes, on_timeout);
658         on_timeout = FALSE;
659
660         if (u->use_tsched)
661             if (!polled &&
662                 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
663                 break;
664
665         if (PA_UNLIKELY(n_bytes <= 0)) {
666
667             if (polled)
668                 PA_ONCE_BEGIN {
669                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
670                     pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
671                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
672                              "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
673                            pa_strnull(dn));
674                     pa_xfree(dn);
675                 } PA_ONCE_END;
676
677             break;
678         }
679
680         if (++j > 10) {
681 #ifdef DEBUG_TIMING
682             pa_log_debug("Not filling up, because already too many iterations.");
683 #endif
684
685             break;
686         }
687
688         polled = FALSE;
689
690         for (;;) {
691             void *p;
692             snd_pcm_sframes_t frames;
693             pa_memchunk chunk;
694
695             chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
696
697             frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
698
699             if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
700                 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
701
702 /*             pa_log_debug("%lu frames to read", (unsigned long) n); */
703
704             p = pa_memblock_acquire(chunk.memblock);
705             frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
706             pa_memblock_release(chunk.memblock);
707
708             if (PA_UNLIKELY(frames < 0)) {
709                 pa_memblock_unref(chunk.memblock);
710
711                 if (!after_avail && (int) frames == -EAGAIN)
712                     break;
713
714                 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
715                     continue;
716
717                 return r;
718             }
719
720             if (!after_avail && frames == 0) {
721                 pa_memblock_unref(chunk.memblock);
722                 break;
723             }
724
725             pa_assert(frames > 0);
726             after_avail = FALSE;
727
728             chunk.index = 0;
729             chunk.length = (size_t) frames * u->frame_size;
730
731             pa_source_post(u->source, &chunk);
732             pa_memblock_unref(chunk.memblock);
733
734             work_done = TRUE;
735
736             u->read_count += frames * u->frame_size;
737
738 /*             pa_log_debug("read %lu frames", (unsigned long) frames); */
739
740             if ((size_t) frames * u->frame_size >= n_bytes)
741                 break;
742
743             n_bytes -= (size_t) frames * u->frame_size;
744         }
745     }
746
747     if (u->use_tsched) {
748         *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
749         process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
750
751         if (*sleep_usec > process_usec)
752             *sleep_usec -= process_usec;
753         else
754             *sleep_usec = 0;
755     }
756
757     return work_done ? 1 : 0;
758 }
759
760 static void update_smoother(struct userdata *u) {
761     snd_pcm_sframes_t delay = 0;
762     uint64_t position;
763     int err;
764     pa_usec_t now1 = 0, now2;
765     snd_pcm_status_t *status;
766
767     snd_pcm_status_alloca(&status);
768
769     pa_assert(u);
770     pa_assert(u->pcm_handle);
771
772     /* Let's update the time smoother */
773
774     if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->source->sample_spec, TRUE)) < 0)) {
775         pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
776         return;
777     }
778
779     if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
780         pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
781     else {
782         snd_htimestamp_t htstamp = { 0, 0 };
783         snd_pcm_status_get_htstamp(status, &htstamp);
784         now1 = pa_timespec_load(&htstamp);
785     }
786
787     /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
788     if (now1 <= 0)
789         now1 = pa_rtclock_now();
790
791     /* check if the time since the last update is bigger than the interval */
792     if (u->last_smoother_update > 0)
793         if (u->last_smoother_update + u->smoother_interval > now1)
794             return;
795
796     position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
797     now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
798
799     pa_smoother_put(u->smoother, now1, now2);
800
801     u->last_smoother_update = now1;
802     /* exponentially increase the update interval up to the MAX limit */
803     u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
804 }
805
806 static pa_usec_t source_get_latency(struct userdata *u) {
807     int64_t delay;
808     pa_usec_t now1, now2;
809
810     pa_assert(u);
811
812     now1 = pa_rtclock_now();
813     now2 = pa_smoother_get(u->smoother, now1);
814
815     delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
816
817     return delay >= 0 ? (pa_usec_t) delay : 0;
818 }
819
820 static int build_pollfd(struct userdata *u) {
821     pa_assert(u);
822     pa_assert(u->pcm_handle);
823
824     if (u->alsa_rtpoll_item)
825         pa_rtpoll_item_free(u->alsa_rtpoll_item);
826
827     if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
828         return -1;
829
830     return 0;
831 }
832
833 /* Called from IO context */
834 static int suspend(struct userdata *u) {
835     pa_assert(u);
836     pa_assert(u->pcm_handle);
837
838     pa_smoother_pause(u->smoother, pa_rtclock_now());
839
840     /* Let's suspend */
841     snd_pcm_close(u->pcm_handle);
842     u->pcm_handle = NULL;
843
844     if (u->alsa_rtpoll_item) {
845         pa_rtpoll_item_free(u->alsa_rtpoll_item);
846         u->alsa_rtpoll_item = NULL;
847     }
848
849     pa_log_info("Device suspended...");
850
851     return 0;
852 }
853
854 /* Called from IO context */
855 static int update_sw_params(struct userdata *u) {
856     snd_pcm_uframes_t avail_min;
857     int err;
858
859     pa_assert(u);
860
861     /* Use the full buffer if no one asked us for anything specific */
862     u->hwbuf_unused = 0;
863
864     if (u->use_tsched) {
865         pa_usec_t latency;
866
867         if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
868             size_t b;
869
870             pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
871
872             b = pa_usec_to_bytes(latency, &u->source->sample_spec);
873
874             /* We need at least one sample in our buffer */
875
876             if (PA_UNLIKELY(b < u->frame_size))
877                 b = u->frame_size;
878
879             u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
880         }
881
882         fix_min_sleep_wakeup(u);
883         fix_tsched_watermark(u);
884     }
885
886     pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
887
888     avail_min = 1;
889
890     if (u->use_tsched) {
891         pa_usec_t sleep_usec, process_usec;
892
893         hw_sleep_time(u, &sleep_usec, &process_usec);
894         avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
895     }
896
897     pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
898
899     if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
900         pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
901         return err;
902     }
903
904     return 0;
905 }
906
907 /* Called from IO Context on unsuspend or from main thread when creating source */
908 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
909                             pa_bool_t in_thread)
910 {
911     u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
912                                                     &u->source->sample_spec);
913
914     u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
915     u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
916
917     u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
918     u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
919
920     fix_min_sleep_wakeup(u);
921     fix_tsched_watermark(u);
922
923     if (in_thread)
924         pa_source_set_latency_range_within_thread(u->source,
925                                                   u->min_latency_ref,
926                                                   pa_bytes_to_usec(u->hwbuf_size, ss));
927     else {
928         pa_source_set_latency_range(u->source,
929                                     0,
930                                     pa_bytes_to_usec(u->hwbuf_size, ss));
931
932         /* work-around assert in pa_source_set_latency_within_thead,
933            keep track of min_latency and reuse it when
934            this routine is called from IO context */
935         u->min_latency_ref = u->source->thread_info.min_latency;
936     }
937
938     pa_log_info("Time scheduling watermark is %0.2fms",
939                 (double) pa_bytes_to_usec(u->tsched_watermark, ss) / PA_USEC_PER_MSEC);
940 }
941
942 /* Called from IO context */
943 static int unsuspend(struct userdata *u) {
944     pa_sample_spec ss;
945     int err;
946     pa_bool_t b, d;
947     snd_pcm_uframes_t period_size, buffer_size;
948
949     pa_assert(u);
950     pa_assert(!u->pcm_handle);
951
952     pa_log_info("Trying resume...");
953
954     if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
955                             SND_PCM_NONBLOCK|
956                             SND_PCM_NO_AUTO_RESAMPLE|
957                             SND_PCM_NO_AUTO_CHANNELS|
958                             SND_PCM_NO_AUTO_FORMAT)) < 0) {
959         pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
960         goto fail;
961     }
962
963     ss = u->source->sample_spec;
964     period_size = u->fragment_size / u->frame_size;
965     buffer_size = u->hwbuf_size / u->frame_size;
966     b = u->use_mmap;
967     d = u->use_tsched;
968
969     if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
970         pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
971         goto fail;
972     }
973
974     if (b != u->use_mmap || d != u->use_tsched) {
975         pa_log_warn("Resume failed, couldn't get original access mode.");
976         goto fail;
977     }
978
979     if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
980         pa_log_warn("Resume failed, couldn't restore original sample settings.");
981         goto fail;
982     }
983
984     if (period_size*u->frame_size != u->fragment_size ||
985         buffer_size*u->frame_size != u->hwbuf_size) {
986         pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
987                     (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
988                     (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
989         goto fail;
990     }
991
992     if (update_sw_params(u) < 0)
993         goto fail;
994
995     if (build_pollfd(u) < 0)
996         goto fail;
997
998     /* FIXME: We need to reload the volume somehow */
999
1000     u->read_count = 0;
1001     pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
1002     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1003     u->last_smoother_update = 0;
1004
1005     u->first = TRUE;
1006
1007     /* reset the watermark to the value defined when source was created */
1008     if (u->use_tsched)
1009         reset_watermark(u, u->tsched_watermark_ref, &u->source->sample_spec, TRUE);
1010
1011     pa_log_info("Resumed successfully...");
1012
1013     return 0;
1014
1015 fail:
1016     if (u->pcm_handle) {
1017         snd_pcm_close(u->pcm_handle);
1018         u->pcm_handle = NULL;
1019     }
1020
1021     return -PA_ERR_IO;
1022 }
1023
1024 /* Called from IO context */
1025 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1026     struct userdata *u = PA_SOURCE(o)->userdata;
1027
1028     switch (code) {
1029
1030         case PA_SOURCE_MESSAGE_GET_LATENCY: {
1031             pa_usec_t r = 0;
1032
1033             if (u->pcm_handle)
1034                 r = source_get_latency(u);
1035
1036             *((pa_usec_t*) data) = r;
1037
1038             return 0;
1039         }
1040
1041         case PA_SOURCE_MESSAGE_SET_STATE:
1042
1043             switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
1044
1045                 case PA_SOURCE_SUSPENDED: {
1046                     int r;
1047
1048                     pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
1049
1050                     if ((r = suspend(u)) < 0)
1051                         return r;
1052
1053                     break;
1054                 }
1055
1056                 case PA_SOURCE_IDLE:
1057                 case PA_SOURCE_RUNNING: {
1058                     int r;
1059
1060                     if (u->source->thread_info.state == PA_SOURCE_INIT) {
1061                         if (build_pollfd(u) < 0)
1062                             return -PA_ERR_IO;
1063                     }
1064
1065                     if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1066                         if ((r = unsuspend(u)) < 0)
1067                             return r;
1068                     }
1069
1070                     break;
1071                 }
1072
1073                 case PA_SOURCE_UNLINKED:
1074                 case PA_SOURCE_INIT:
1075                 case PA_SOURCE_INVALID_STATE:
1076                     ;
1077             }
1078
1079             break;
1080     }
1081
1082     return pa_source_process_msg(o, code, data, offset, chunk);
1083 }
1084
1085 /* Called from main context */
1086 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1087     pa_source_state_t old_state;
1088     struct userdata *u;
1089
1090     pa_source_assert_ref(s);
1091     pa_assert_se(u = s->userdata);
1092
1093     old_state = pa_source_get_state(u->source);
1094
1095     if (PA_SOURCE_IS_OPENED(old_state) && new_state == PA_SOURCE_SUSPENDED)
1096         reserve_done(u);
1097     else if (old_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(new_state))
1098         if (reserve_init(u, u->device_name) < 0)
1099             return -PA_ERR_BUSY;
1100
1101     return 0;
1102 }
1103
1104 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1105     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1106
1107     pa_assert(u);
1108     pa_assert(u->mixer_handle);
1109
1110     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1111         return 0;
1112
1113     if (!PA_SOURCE_IS_LINKED(u->source->state))
1114         return 0;
1115
1116     if (u->source->suspend_cause & PA_SUSPEND_SESSION) {
1117         pa_source_set_mixer_dirty(u->source, TRUE);
1118         return 0;
1119     }
1120
1121     if (mask & SND_CTL_EVENT_MASK_VALUE) {
1122         pa_source_get_volume(u->source, TRUE);
1123         pa_source_get_mute(u->source, TRUE);
1124     }
1125
1126     return 0;
1127 }
1128
1129 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1130     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1131
1132     pa_assert(u);
1133     pa_assert(u->mixer_handle);
1134
1135     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1136         return 0;
1137
1138     if (u->source->suspend_cause & PA_SUSPEND_SESSION) {
1139         pa_source_set_mixer_dirty(u->source, TRUE);
1140         return 0;
1141     }
1142
1143     if (mask & SND_CTL_EVENT_MASK_VALUE)
1144         pa_source_update_volume_and_mute(u->source);
1145
1146     return 0;
1147 }
1148
1149 static void source_get_volume_cb(pa_source *s) {
1150     struct userdata *u = s->userdata;
1151     pa_cvolume r;
1152     char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1153
1154     pa_assert(u);
1155     pa_assert(u->mixer_path);
1156     pa_assert(u->mixer_handle);
1157
1158     if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1159         return;
1160
1161     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1162     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1163
1164     pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1165
1166     if (u->mixer_path->has_dB) {
1167         char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1168
1169         pa_log_debug("               in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1170     }
1171
1172     if (pa_cvolume_equal(&u->hardware_volume, &r))
1173         return;
1174
1175     s->real_volume = u->hardware_volume = r;
1176
1177     /* Hmm, so the hardware volume changed, let's reset our software volume */
1178     if (u->mixer_path->has_dB)
1179         pa_source_set_soft_volume(s, NULL);
1180 }
1181
1182 static void source_set_volume_cb(pa_source *s) {
1183     struct userdata *u = s->userdata;
1184     pa_cvolume r;
1185     char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1186     pa_bool_t deferred_volume = !!(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1187
1188     pa_assert(u);
1189     pa_assert(u->mixer_path);
1190     pa_assert(u->mixer_handle);
1191
1192     /* Shift up by the base volume */
1193     pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1194
1195     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1196         return;
1197
1198     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1199     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1200
1201     u->hardware_volume = r;
1202
1203     if (u->mixer_path->has_dB) {
1204         pa_cvolume new_soft_volume;
1205         pa_bool_t accurate_enough;
1206         char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1207
1208         /* Match exactly what the user requested by software */
1209         pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1210
1211         /* If the adjustment to do in software is only minimal we
1212          * can skip it. That saves us CPU at the expense of a bit of
1213          * accuracy */
1214         accurate_enough =
1215             (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1216             (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1217
1218         pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1219         pa_log_debug("           in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1220         pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1221         pa_log_debug("              in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1222         pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1223                      pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1224                      pa_yes_no(accurate_enough));
1225         pa_log_debug("                     in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1226
1227         if (!accurate_enough)
1228             s->soft_volume = new_soft_volume;
1229
1230     } else {
1231         pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1232
1233         /* We can't match exactly what the user requested, hence let's
1234          * at least tell the user about it */
1235
1236         s->real_volume = r;
1237     }
1238 }
1239
1240 static void source_write_volume_cb(pa_source *s) {
1241     struct userdata *u = s->userdata;
1242     pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1243
1244     pa_assert(u);
1245     pa_assert(u->mixer_path);
1246     pa_assert(u->mixer_handle);
1247     pa_assert(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1248
1249     /* Shift up by the base volume */
1250     pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1251
1252     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1253         pa_log_error("Writing HW volume failed");
1254     else {
1255         pa_cvolume tmp_vol;
1256         pa_bool_t accurate_enough;
1257
1258         /* Shift down by the base volume, so that 0dB becomes maximum volume */
1259         pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1260
1261         pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1262         accurate_enough =
1263             (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1264             (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1265
1266         if (!accurate_enough) {
1267             union {
1268                 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1269                 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1270             } vol;
1271
1272             pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1273                          pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1274                          pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1275             pa_log_debug("                                           in dB: %s (request) != %s",
1276                          pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1277                          pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1278         }
1279     }
1280 }
1281
1282 static void source_get_mute_cb(pa_source *s) {
1283     struct userdata *u = s->userdata;
1284     pa_bool_t b;
1285
1286     pa_assert(u);
1287     pa_assert(u->mixer_path);
1288     pa_assert(u->mixer_handle);
1289
1290     if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1291         return;
1292
1293     s->muted = b;
1294 }
1295
1296 static void source_set_mute_cb(pa_source *s) {
1297     struct userdata *u = s->userdata;
1298
1299     pa_assert(u);
1300     pa_assert(u->mixer_path);
1301     pa_assert(u->mixer_handle);
1302
1303     pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1304 }
1305
1306 static void mixer_volume_init(struct userdata *u) {
1307     pa_assert(u);
1308
1309     if (!u->mixer_path->has_volume) {
1310         pa_source_set_write_volume_callback(u->source, NULL);
1311         pa_source_set_get_volume_callback(u->source, NULL);
1312         pa_source_set_set_volume_callback(u->source, NULL);
1313
1314         pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1315     } else {
1316         pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1317         pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1318
1319         if (u->mixer_path->has_dB && u->deferred_volume) {
1320             pa_source_set_write_volume_callback(u->source, source_write_volume_cb);
1321             pa_log_info("Successfully enabled deferred volume.");
1322         } else
1323             pa_source_set_write_volume_callback(u->source, NULL);
1324
1325         if (u->mixer_path->has_dB) {
1326             pa_source_enable_decibel_volume(u->source, TRUE);
1327             pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1328
1329             u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1330             u->source->n_volume_steps = PA_VOLUME_NORM+1;
1331
1332             pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1333         } else {
1334             pa_source_enable_decibel_volume(u->source, FALSE);
1335             pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1336
1337             u->source->base_volume = PA_VOLUME_NORM;
1338             u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1339         }
1340
1341         pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1342     }
1343
1344     if (!u->mixer_path->has_mute) {
1345         pa_source_set_get_mute_callback(u->source, NULL);
1346         pa_source_set_set_mute_callback(u->source, NULL);
1347         pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1348     } else {
1349         pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1350         pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1351         pa_log_info("Using hardware mute control.");
1352     }
1353 }
1354
1355 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1356     struct userdata *u = s->userdata;
1357     pa_alsa_port_data *data;
1358
1359     pa_assert(u);
1360     pa_assert(p);
1361     pa_assert(u->mixer_handle);
1362
1363     data = PA_DEVICE_PORT_DATA(p);
1364
1365     pa_assert_se(u->mixer_path = data->path);
1366     pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1367
1368     mixer_volume_init(u);
1369
1370     if (data->setting)
1371         pa_alsa_setting_select(data->setting, u->mixer_handle);
1372
1373     if (s->set_mute)
1374         s->set_mute(s);
1375     if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
1376         if (s->write_volume)
1377             s->write_volume(s);
1378     } else {
1379         if (s->set_volume)
1380             s->set_volume(s);
1381     }
1382
1383     return 0;
1384 }
1385
1386 static void source_update_requested_latency_cb(pa_source *s) {
1387     struct userdata *u = s->userdata;
1388     pa_assert(u);
1389     pa_assert(u->use_tsched); /* only when timer scheduling is used
1390                                * we can dynamically adjust the
1391                                * latency */
1392
1393     if (!u->pcm_handle)
1394         return;
1395
1396     update_sw_params(u);
1397 }
1398
1399 static pa_bool_t source_update_rate_cb(pa_source *s, uint32_t rate)
1400 {
1401     struct userdata *u = s->userdata;
1402     int i;
1403     pa_bool_t supported = FALSE;
1404
1405     pa_assert(u);
1406
1407     for (i = 0; u->rates[i]; i++) {
1408         if (u->rates[i] == rate) {
1409             supported = TRUE;
1410             break;
1411         }
1412     }
1413
1414     if (!supported) {
1415         pa_log_info("Sink does not support sample rate of %d Hz", rate);
1416         return FALSE;
1417     }
1418
1419     if (!PA_SOURCE_IS_OPENED(s->state)) {
1420         pa_log_info("Updating rate for device %s, new rate is %d", u->device_name, rate);
1421         u->source->sample_spec.rate = rate;
1422         return TRUE;
1423     }
1424
1425     return FALSE;
1426 }
1427
1428 static void thread_func(void *userdata) {
1429     struct userdata *u = userdata;
1430     unsigned short revents = 0;
1431
1432     pa_assert(u);
1433
1434     pa_log_debug("Thread starting up");
1435
1436     if (u->core->realtime_scheduling)
1437         pa_make_realtime(u->core->realtime_priority);
1438
1439     pa_thread_mq_install(&u->thread_mq);
1440
1441     for (;;) {
1442         int ret;
1443         pa_usec_t rtpoll_sleep = 0;
1444
1445 #ifdef DEBUG_TIMING
1446         pa_log_debug("Loop");
1447 #endif
1448
1449         /* Read some data and pass it to the sources */
1450         if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1451             int work_done;
1452             pa_usec_t sleep_usec = 0;
1453             pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1454
1455             if (u->first) {
1456                 pa_log_info("Starting capture.");
1457                 snd_pcm_start(u->pcm_handle);
1458
1459                 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1460
1461                 u->first = FALSE;
1462             }
1463
1464             if (u->use_mmap)
1465                 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1466             else
1467                 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1468
1469             if (work_done < 0)
1470                 goto fail;
1471
1472 /*             pa_log_debug("work_done = %i", work_done); */
1473
1474             if (work_done)
1475                 update_smoother(u);
1476
1477             if (u->use_tsched) {
1478                 pa_usec_t cusec;
1479
1480                 /* OK, the capture buffer is now empty, let's
1481                  * calculate when to wake up next */
1482
1483 /*                 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1484
1485                 /* Convert from the sound card time domain to the
1486                  * system time domain */
1487                 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1488
1489 /*                 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1490
1491                 /* We don't trust the conversion, so we wake up whatever comes first */
1492                 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1493             }
1494         }
1495
1496         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1497             pa_usec_t volume_sleep;
1498             pa_source_volume_change_apply(u->source, &volume_sleep);
1499             if (volume_sleep > 0) {
1500                 if (rtpoll_sleep > 0)
1501                     rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1502                 else
1503                     rtpoll_sleep = volume_sleep;
1504             }
1505         }
1506
1507         if (rtpoll_sleep > 0)
1508             pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1509         else
1510             pa_rtpoll_set_timer_disabled(u->rtpoll);
1511
1512         /* Hmm, nothing to do. Let's sleep */
1513         if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1514             goto fail;
1515
1516         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME)
1517             pa_source_volume_change_apply(u->source, NULL);
1518
1519         if (ret == 0)
1520             goto finish;
1521
1522         /* Tell ALSA about this and process its response */
1523         if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1524             struct pollfd *pollfd;
1525             int err;
1526             unsigned n;
1527
1528             pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1529
1530             if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1531                 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1532                 goto fail;
1533             }
1534
1535             if (revents & ~POLLIN) {
1536                 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1537                     goto fail;
1538
1539                 u->first = TRUE;
1540                 revents = 0;
1541             } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1542                 pa_log_debug("Wakeup from ALSA!");
1543
1544         } else
1545             revents = 0;
1546     }
1547
1548 fail:
1549     /* If this was no regular exit from the loop we have to continue
1550      * processing messages until we received PA_MESSAGE_SHUTDOWN */
1551     pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1552     pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1553
1554 finish:
1555     pa_log_debug("Thread shutting down");
1556 }
1557
1558 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1559     const char *n;
1560     char *t;
1561
1562     pa_assert(data);
1563     pa_assert(ma);
1564     pa_assert(device_name);
1565
1566     if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1567         pa_source_new_data_set_name(data, n);
1568         data->namereg_fail = TRUE;
1569         return;
1570     }
1571
1572     if ((n = pa_modargs_get_value(ma, "name", NULL)))
1573         data->namereg_fail = TRUE;
1574     else {
1575         n = device_id ? device_id : device_name;
1576         data->namereg_fail = FALSE;
1577     }
1578
1579     if (mapping)
1580         t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1581     else
1582         t = pa_sprintf_malloc("alsa_input.%s", n);
1583
1584     pa_source_new_data_set_name(data, t);
1585     pa_xfree(t);
1586 }
1587
1588 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1589     snd_hctl_t *hctl;
1590
1591     if (!mapping && !element)
1592         return;
1593
1594     if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device, &hctl))) {
1595         pa_log_info("Failed to find a working mixer device.");
1596         return;
1597     }
1598
1599     if (element) {
1600
1601         if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1602             goto fail;
1603
1604         if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, hctl, ignore_dB) < 0)
1605             goto fail;
1606
1607         pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1608         pa_alsa_path_dump(u->mixer_path);
1609     } else if (!(u->mixer_path_set = mapping->input_path_set))
1610         goto fail;
1611
1612     return;
1613
1614 fail:
1615
1616     if (u->mixer_path) {
1617         pa_alsa_path_free(u->mixer_path);
1618         u->mixer_path = NULL;
1619     }
1620
1621     if (u->mixer_handle) {
1622         snd_mixer_close(u->mixer_handle);
1623         u->mixer_handle = NULL;
1624     }
1625 }
1626
1627 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1628     pa_bool_t need_mixer_callback = FALSE;
1629
1630     pa_assert(u);
1631
1632     if (!u->mixer_handle)
1633         return 0;
1634
1635     if (u->source->active_port) {
1636         pa_alsa_port_data *data;
1637
1638         /* We have a list of supported paths, so let's activate the
1639          * one that has been chosen as active */
1640
1641         data = PA_DEVICE_PORT_DATA(u->source->active_port);
1642         u->mixer_path = data->path;
1643
1644         pa_alsa_path_select(data->path, u->mixer_handle);
1645
1646         if (data->setting)
1647             pa_alsa_setting_select(data->setting, u->mixer_handle);
1648
1649     } else {
1650
1651         if (!u->mixer_path && u->mixer_path_set)
1652             u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1653
1654         if (u->mixer_path) {
1655             /* Hmm, we have only a single path, then let's activate it */
1656
1657             pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1658
1659             if (u->mixer_path->settings)
1660                 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1661         } else
1662             return 0;
1663     }
1664
1665     mixer_volume_init(u);
1666
1667     /* Will we need to register callbacks? */
1668     if (u->mixer_path_set && u->mixer_path_set->paths) {
1669         pa_alsa_path *p;
1670         void *state;
1671
1672         PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1673             if (p->has_volume || p->has_mute)
1674                 need_mixer_callback = TRUE;
1675         }
1676     }
1677     else if (u->mixer_path)
1678         need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1679
1680     if (need_mixer_callback) {
1681         int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1682         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1683             u->mixer_pd = pa_alsa_mixer_pdata_new();
1684             mixer_callback = io_mixer_callback;
1685
1686             if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1687                 pa_log("Failed to initialize file descriptor monitoring");
1688                 return -1;
1689             }
1690         } else {
1691             u->mixer_fdl = pa_alsa_fdlist_new();
1692             mixer_callback = ctl_mixer_callback;
1693
1694             if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
1695                 pa_log("Failed to initialize file descriptor monitoring");
1696                 return -1;
1697             }
1698         }
1699
1700         if (u->mixer_path_set)
1701             pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1702         else
1703             pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1704     }
1705
1706     return 0;
1707 }
1708
1709 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1710
1711     struct userdata *u = NULL;
1712     const char *dev_id = NULL;
1713     pa_sample_spec ss;
1714     uint32_t alternate_sample_rate;
1715     pa_channel_map map;
1716     uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1717     snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1718     size_t frame_size;
1719     pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE, fixed_latency_range = FALSE;
1720     pa_source_new_data data;
1721     pa_alsa_profile_set *profile_set = NULL;
1722
1723     pa_assert(m);
1724     pa_assert(ma);
1725
1726     ss = m->core->default_sample_spec;
1727     map = m->core->default_channel_map;
1728     if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1729         pa_log("Failed to parse sample specification and channel map");
1730         goto fail;
1731     }
1732
1733     alternate_sample_rate = m->core->alternate_sample_rate;
1734     if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
1735         pa_log("Failed to parse alternate sample rate");
1736         goto fail;
1737     }
1738
1739     frame_size = pa_frame_size(&ss);
1740
1741     nfrags = m->core->default_n_fragments;
1742     frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1743     if (frag_size <= 0)
1744         frag_size = (uint32_t) frame_size;
1745     tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1746     tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1747
1748     if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1749         pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1750         pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1751         pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1752         pa_log("Failed to parse buffer metrics");
1753         goto fail;
1754     }
1755
1756     buffer_size = nfrags * frag_size;
1757
1758     period_frames = frag_size/frame_size;
1759     buffer_frames = buffer_size/frame_size;
1760     tsched_frames = tsched_size/frame_size;
1761
1762     if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1763         pa_log("Failed to parse mmap argument.");
1764         goto fail;
1765     }
1766
1767     if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1768         pa_log("Failed to parse tsched argument.");
1769         goto fail;
1770     }
1771
1772     if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1773         pa_log("Failed to parse ignore_dB argument.");
1774         goto fail;
1775     }
1776
1777     deferred_volume = m->core->deferred_volume;
1778     if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
1779         pa_log("Failed to parse deferred_volume argument.");
1780         goto fail;
1781     }
1782
1783     if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
1784         pa_log("Failed to parse fixed_latency_range argument.");
1785         goto fail;
1786     }
1787
1788     use_tsched = pa_alsa_may_tsched(use_tsched);
1789
1790     u = pa_xnew0(struct userdata, 1);
1791     u->core = m->core;
1792     u->module = m;
1793     u->use_mmap = use_mmap;
1794     u->use_tsched = use_tsched;
1795     u->deferred_volume = deferred_volume;
1796     u->fixed_latency_range = fixed_latency_range;
1797     u->first = TRUE;
1798     u->rtpoll = pa_rtpoll_new();
1799     pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1800
1801     u->smoother = pa_smoother_new(
1802             SMOOTHER_ADJUST_USEC,
1803             SMOOTHER_WINDOW_USEC,
1804             TRUE,
1805             TRUE,
1806             5,
1807             pa_rtclock_now(),
1808             TRUE);
1809     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1810
1811     dev_id = pa_modargs_get_value(
1812             ma, "device_id",
1813             pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1814
1815     u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
1816
1817     if (reserve_init(u, dev_id) < 0)
1818         goto fail;
1819
1820     if (reserve_monitor_init(u, dev_id) < 0)
1821         goto fail;
1822
1823     b = use_mmap;
1824     d = use_tsched;
1825
1826     if (mapping) {
1827
1828         if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1829             pa_log("device_id= not set");
1830             goto fail;
1831         }
1832
1833         if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1834                       dev_id,
1835                       &u->device_name,
1836                       &ss, &map,
1837                       SND_PCM_STREAM_CAPTURE,
1838                       &period_frames, &buffer_frames, tsched_frames,
1839                       &b, &d, mapping)))
1840             goto fail;
1841
1842     } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1843
1844         if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1845             goto fail;
1846
1847         if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1848                       dev_id,
1849                       &u->device_name,
1850                       &ss, &map,
1851                       SND_PCM_STREAM_CAPTURE,
1852                       &period_frames, &buffer_frames, tsched_frames,
1853                       &b, &d, profile_set, &mapping)))
1854             goto fail;
1855
1856     } else {
1857
1858         if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1859                       pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1860                       &u->device_name,
1861                       &ss, &map,
1862                       SND_PCM_STREAM_CAPTURE,
1863                       &period_frames, &buffer_frames, tsched_frames,
1864                       &b, &d, FALSE)))
1865             goto fail;
1866     }
1867
1868     pa_assert(u->device_name);
1869     pa_log_info("Successfully opened device %s.", u->device_name);
1870
1871     if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1872         pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1873         goto fail;
1874     }
1875
1876     if (mapping)
1877         pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1878
1879     if (use_mmap && !b) {
1880         pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1881         u->use_mmap = use_mmap = FALSE;
1882     }
1883
1884     if (use_tsched && (!b || !d)) {
1885         pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1886         u->use_tsched = use_tsched = FALSE;
1887     }
1888
1889     if (u->use_mmap)
1890         pa_log_info("Successfully enabled mmap() mode.");
1891
1892     if (u->use_tsched) {
1893         pa_log_info("Successfully enabled timer-based scheduling mode.");
1894         if (u->fixed_latency_range)
1895             pa_log_info("Disabling latency range changes on overrun");
1896     }
1897
1898     u->rates = pa_alsa_get_supported_rates(u->pcm_handle);
1899     if (!u->rates) {
1900         pa_log_error("Failed to find any supported sample rates.");
1901         goto fail;
1902     }
1903
1904     /* ALSA might tweak the sample spec, so recalculate the frame size */
1905     frame_size = pa_frame_size(&ss);
1906
1907     find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1908
1909     pa_source_new_data_init(&data);
1910     data.driver = driver;
1911     data.module = m;
1912     data.card = card;
1913     set_source_name(&data, ma, dev_id, u->device_name, mapping);
1914
1915     /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1916      * variable instead of using &data.namereg_fail directly, because
1917      * data.namereg_fail is a bitfield and taking the address of a bitfield
1918      * variable is impossible. */
1919     namereg_fail = data.namereg_fail;
1920     if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1921         pa_log("Failed to parse namereg_fail argument.");
1922         pa_source_new_data_done(&data);
1923         goto fail;
1924     }
1925     data.namereg_fail = namereg_fail;
1926
1927     pa_source_new_data_set_sample_spec(&data, &ss);
1928     pa_source_new_data_set_channel_map(&data, &map);
1929     pa_source_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
1930
1931     pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1932     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1933     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1934     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1935     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1936
1937     if (mapping) {
1938         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1939         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1940     }
1941
1942     pa_alsa_init_description(data.proplist);
1943
1944     if (u->control_device)
1945         pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1946
1947     if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1948         pa_log("Invalid properties");
1949         pa_source_new_data_done(&data);
1950         goto fail;
1951     }
1952
1953     if (u->mixer_path_set)
1954         pa_alsa_add_ports(&data.ports, u->mixer_path_set, card);
1955
1956     u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
1957     pa_source_new_data_done(&data);
1958
1959     if (!u->source) {
1960         pa_log("Failed to create source object");
1961         goto fail;
1962     }
1963
1964     if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
1965                                  &u->source->thread_info.volume_change_safety_margin) < 0) {
1966         pa_log("Failed to parse deferred_volume_safety_margin parameter");
1967         goto fail;
1968     }
1969
1970     if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
1971                                  &u->source->thread_info.volume_change_extra_delay) < 0) {
1972         pa_log("Failed to parse deferred_volume_extra_delay parameter");
1973         goto fail;
1974     }
1975
1976     u->source->parent.process_msg = source_process_msg;
1977     if (u->use_tsched)
1978         u->source->update_requested_latency = source_update_requested_latency_cb;
1979     u->source->set_state = source_set_state_cb;
1980     u->source->set_port = source_set_port_cb;
1981     if (u->source->alternate_sample_rate)
1982         u->source->update_rate = source_update_rate_cb;
1983     u->source->userdata = u;
1984
1985     pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
1986     pa_source_set_rtpoll(u->source, u->rtpoll);
1987
1988     u->frame_size = frame_size;
1989     u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
1990     u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
1991     pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
1992
1993     pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
1994                 (double) u->hwbuf_size / (double) u->fragment_size,
1995                 (long unsigned) u->fragment_size,
1996                 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
1997                 (long unsigned) u->hwbuf_size,
1998                 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1999
2000     if (u->use_tsched) {
2001         u->tsched_watermark_ref = tsched_watermark;
2002         reset_watermark(u, u->tsched_watermark_ref, &ss, FALSE);
2003     }
2004     else
2005         pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
2006
2007     reserve_update(u);
2008
2009     if (update_sw_params(u) < 0)
2010         goto fail;
2011
2012     if (setup_mixer(u, ignore_dB) < 0)
2013         goto fail;
2014
2015     pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2016
2017     if (!(u->thread = pa_thread_new("alsa-source", thread_func, u))) {
2018         pa_log("Failed to create thread.");
2019         goto fail;
2020     }
2021
2022     /* Get initial mixer settings */
2023     if (data.volume_is_set) {
2024         if (u->source->set_volume)
2025             u->source->set_volume(u->source);
2026     } else {
2027         if (u->source->get_volume)
2028             u->source->get_volume(u->source);
2029     }
2030
2031     if (data.muted_is_set) {
2032         if (u->source->set_mute)
2033             u->source->set_mute(u->source);
2034     } else {
2035         if (u->source->get_mute)
2036             u->source->get_mute(u->source);
2037     }
2038
2039     if ((data.volume_is_set || data.muted_is_set) && u->source->write_volume)
2040         u->source->write_volume(u->source);
2041
2042     pa_source_put(u->source);
2043
2044     if (profile_set)
2045         pa_alsa_profile_set_free(profile_set);
2046
2047     return u->source;
2048
2049 fail:
2050
2051     if (u)
2052         userdata_free(u);
2053
2054     if (profile_set)
2055         pa_alsa_profile_set_free(profile_set);
2056
2057     return NULL;
2058 }
2059
2060 static void userdata_free(struct userdata *u) {
2061     pa_assert(u);
2062
2063     if (u->source)
2064         pa_source_unlink(u->source);
2065
2066     if (u->thread) {
2067         pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2068         pa_thread_free(u->thread);
2069     }
2070
2071     pa_thread_mq_done(&u->thread_mq);
2072
2073     if (u->source)
2074         pa_source_unref(u->source);
2075
2076     if (u->mixer_pd)
2077         pa_alsa_mixer_pdata_free(u->mixer_pd);
2078
2079     if (u->alsa_rtpoll_item)
2080         pa_rtpoll_item_free(u->alsa_rtpoll_item);
2081
2082     if (u->rtpoll)
2083         pa_rtpoll_free(u->rtpoll);
2084
2085     if (u->pcm_handle) {
2086         snd_pcm_drop(u->pcm_handle);
2087         snd_pcm_close(u->pcm_handle);
2088     }
2089
2090     if (u->mixer_fdl)
2091         pa_alsa_fdlist_free(u->mixer_fdl);
2092
2093     if (u->mixer_path && !u->mixer_path_set)
2094         pa_alsa_path_free(u->mixer_path);
2095
2096     if (u->mixer_handle)
2097         snd_mixer_close(u->mixer_handle);
2098
2099     if (u->smoother)
2100         pa_smoother_free(u->smoother);
2101
2102     if (u->rates)
2103         pa_xfree(u->rates);
2104
2105     reserve_done(u);
2106     monitor_done(u);
2107
2108     pa_xfree(u->device_name);
2109     pa_xfree(u->control_device);
2110     pa_xfree(u->paths_dir);
2111     pa_xfree(u);
2112 }
2113
2114 void pa_alsa_source_free(pa_source *s) {
2115     struct userdata *u;
2116
2117     pa_source_assert_ref(s);
2118     pa_assert_se(u = s->userdata);
2119
2120     userdata_free(u);
2121 }